initial upload: v2026.3.23 with HF Spaces deployment
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .env.example +322 -0
- .gitattributes +6 -0
- .github/ISSUE_TEMPLATE/bug_report.yml +144 -0
- .github/ISSUE_TEMPLATE/config.yml +11 -0
- .github/ISSUE_TEMPLATE/feature_request.yml +73 -0
- .github/ISSUE_TEMPLATE/setup_help.yml +100 -0
- .github/PULL_REQUEST_TEMPLATE.md +75 -0
- .github/workflows/deploy-site.yml +60 -0
- .github/workflows/docs-site-checks.yml +39 -0
- .github/workflows/supply-chain-audit.yml +192 -0
- .github/workflows/tests.yml +42 -0
- .gitignore +56 -0
- .gitmodules +3 -0
- .plans/openai-api-server.md +291 -0
- .plans/streaming-support.md +705 -0
- AGENTS.md +390 -0
- CONTRIBUTING.md +660 -0
- Dockerfile +91 -0
- EXPERIENCE.md +43 -0
- LICENSE +21 -0
- README.md +172 -7
- RELEASE_v0.2.0.md +383 -0
- RELEASE_v0.3.0.md +377 -0
- RELEASE_v0.4.0.md +400 -0
- acp_adapter/__init__.py +1 -0
- acp_adapter/__main__.py +5 -0
- acp_adapter/auth.py +24 -0
- acp_adapter/entry.py +85 -0
- acp_adapter/events.py +171 -0
- acp_adapter/permissions.py +80 -0
- acp_adapter/server.py +492 -0
- acp_adapter/session.py +459 -0
- acp_adapter/tools.py +215 -0
- acp_registry/agent.json +12 -0
- acp_registry/icon.svg +25 -0
- agent/__init__.py +6 -0
- agent/anthropic_adapter.py +1166 -0
- agent/auxiliary_client.py +1627 -0
- agent/context_compressor.py +677 -0
- agent/context_references.py +485 -0
- agent/copilot_acp_client.py +447 -0
- agent/display.py +708 -0
- agent/insights.py +792 -0
- agent/model_metadata.py +897 -0
- agent/models_dev.py +171 -0
- agent/prompt_builder.py +604 -0
- agent/prompt_caching.py +72 -0
- agent/redact.py +165 -0
- agent/skill_commands.py +282 -0
- agent/smart_model_routing.py +196 -0
.env.example
ADDED
|
@@ -0,0 +1,322 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Hermes Agent Environment Configuration
|
| 2 |
+
# Copy this file to .env and fill in your API keys
|
| 3 |
+
|
| 4 |
+
# =============================================================================
|
| 5 |
+
# LLM PROVIDER (OpenRouter)
|
| 6 |
+
# =============================================================================
|
| 7 |
+
# OpenRouter provides access to many models through one API
|
| 8 |
+
# All LLM calls go through OpenRouter - no direct provider keys needed
|
| 9 |
+
# Get your key at: https://openrouter.ai/keys
|
| 10 |
+
OPENROUTER_API_KEY=
|
| 11 |
+
|
| 12 |
+
# Default model to use (OpenRouter format: provider/model)
|
| 13 |
+
# Examples: anthropic/claude-opus-4.6, openai/gpt-4o, google/gemini-3-flash-preview, zhipuai/glm-4-plus
|
| 14 |
+
LLM_MODEL=anthropic/claude-opus-4.6
|
| 15 |
+
|
| 16 |
+
# =============================================================================
|
| 17 |
+
# LLM PROVIDER (z.ai / GLM)
|
| 18 |
+
# =============================================================================
|
| 19 |
+
# z.ai provides access to ZhipuAI GLM models (GLM-4-Plus, etc.)
|
| 20 |
+
# Get your key at: https://z.ai or https://open.bigmodel.cn
|
| 21 |
+
GLM_API_KEY=
|
| 22 |
+
# GLM_BASE_URL=https://api.z.ai/api/paas/v4 # Override default base URL
|
| 23 |
+
|
| 24 |
+
# =============================================================================
|
| 25 |
+
# LLM PROVIDER (Kimi / Moonshot)
|
| 26 |
+
# =============================================================================
|
| 27 |
+
# Kimi Code provides access to Moonshot AI coding models (kimi-k2.5, etc.)
|
| 28 |
+
# Get your key at: https://platform.kimi.ai (Kimi Code console)
|
| 29 |
+
# Keys prefixed sk-kimi- use the Kimi Code API (api.kimi.com) by default.
|
| 30 |
+
# Legacy keys from platform.moonshot.ai need KIMI_BASE_URL override below.
|
| 31 |
+
KIMI_API_KEY=
|
| 32 |
+
# KIMI_BASE_URL=https://api.kimi.com/coding/v1 # Default for sk-kimi- keys
|
| 33 |
+
# KIMI_BASE_URL=https://api.moonshot.ai/v1 # For legacy Moonshot keys
|
| 34 |
+
# KIMI_BASE_URL=https://api.moonshot.cn/v1 # For Moonshot China keys
|
| 35 |
+
|
| 36 |
+
# =============================================================================
|
| 37 |
+
# LLM PROVIDER (MiniMax)
|
| 38 |
+
# =============================================================================
|
| 39 |
+
# MiniMax provides access to MiniMax models (global endpoint)
|
| 40 |
+
# Get your key at: https://www.minimax.io
|
| 41 |
+
MINIMAX_API_KEY=
|
| 42 |
+
# MINIMAX_BASE_URL=https://api.minimax.io/v1 # Override default base URL
|
| 43 |
+
|
| 44 |
+
# MiniMax China endpoint (for users in mainland China)
|
| 45 |
+
MINIMAX_CN_API_KEY=
|
| 46 |
+
# MINIMAX_CN_BASE_URL=https://api.minimaxi.com/v1 # Override default base URL
|
| 47 |
+
|
| 48 |
+
# =============================================================================
|
| 49 |
+
# LLM PROVIDER (OpenCode Zen)
|
| 50 |
+
# =============================================================================
|
| 51 |
+
# OpenCode Zen provides curated, tested models (GPT, Claude, Gemini, MiniMax, GLM, Kimi)
|
| 52 |
+
# Pay-as-you-go pricing. Get your key at: https://opencode.ai/auth
|
| 53 |
+
OPENCODE_ZEN_API_KEY=
|
| 54 |
+
# OPENCODE_ZEN_BASE_URL=https://opencode.ai/zen/v1 # Override default base URL
|
| 55 |
+
|
| 56 |
+
# =============================================================================
|
| 57 |
+
# LLM PROVIDER (OpenCode Go)
|
| 58 |
+
# =============================================================================
|
| 59 |
+
# OpenCode Go provides access to open models (GLM-5, Kimi K2.5, MiniMax M2.5)
|
| 60 |
+
# $10/month subscription. Get your key at: https://opencode.ai/auth
|
| 61 |
+
OPENCODE_GO_API_KEY=
|
| 62 |
+
# OPENCODE_GO_BASE_URL=https://opencode.ai/zen/go/v1 # Override default base URL
|
| 63 |
+
|
| 64 |
+
# =============================================================================
|
| 65 |
+
# TOOL API KEYS
|
| 66 |
+
# =============================================================================
|
| 67 |
+
|
| 68 |
+
# Parallel API Key - AI-native web search and extract
|
| 69 |
+
# Get at: https://parallel.ai
|
| 70 |
+
PARALLEL_API_KEY=
|
| 71 |
+
|
| 72 |
+
# Firecrawl API Key - Web search, extract, and crawl
|
| 73 |
+
# Get at: https://firecrawl.dev/
|
| 74 |
+
FIRECRAWL_API_KEY=
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
# FAL.ai API Key - Image generation
|
| 78 |
+
# Get at: https://fal.ai/
|
| 79 |
+
FAL_KEY=
|
| 80 |
+
|
| 81 |
+
# Honcho - Cross-session AI-native user modeling (optional)
|
| 82 |
+
# Builds a persistent understanding of the user across sessions and tools.
|
| 83 |
+
# Get at: https://app.honcho.dev
|
| 84 |
+
# Also requires ~/.honcho/config.json with enabled=true (see README).
|
| 85 |
+
HONCHO_API_KEY=
|
| 86 |
+
|
| 87 |
+
# =============================================================================
|
| 88 |
+
# TERMINAL TOOL CONFIGURATION (mini-swe-agent backend)
|
| 89 |
+
# =============================================================================
|
| 90 |
+
# Backend type: "local", "singularity", "docker", "modal", or "ssh"
|
| 91 |
+
# Terminal backend is configured in ~/.hermes/config.yaml (terminal.backend).
|
| 92 |
+
# Use 'hermes setup' or 'hermes config set terminal.backend docker' to change.
|
| 93 |
+
# Supported: local, docker, singularity, modal, ssh
|
| 94 |
+
#
|
| 95 |
+
# Only override here if you need to force a backend without touching config.yaml:
|
| 96 |
+
# TERMINAL_ENV=local
|
| 97 |
+
|
| 98 |
+
# Container images (for singularity/docker/modal backends)
|
| 99 |
+
# TERMINAL_DOCKER_IMAGE=nikolaik/python-nodejs:python3.11-nodejs20
|
| 100 |
+
# TERMINAL_SINGULARITY_IMAGE=docker://nikolaik/python-nodejs:python3.11-nodejs20
|
| 101 |
+
TERMINAL_MODAL_IMAGE=nikolaik/python-nodejs:python3.11-nodejs20
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
# Working directory for terminal commands
|
| 105 |
+
# For local backend: "." means current directory (resolved automatically)
|
| 106 |
+
# For remote backends (ssh/docker/modal/singularity): use an absolute path
|
| 107 |
+
# INSIDE the target environment, or leave unset for the backend's default
|
| 108 |
+
# (/root for modal, / for docker, ~ for ssh). Do NOT use a host-local path.
|
| 109 |
+
# Usually managed by config.yaml (terminal.cwd) — uncomment to override
|
| 110 |
+
# TERMINAL_CWD=.
|
| 111 |
+
|
| 112 |
+
# Default command timeout in seconds
|
| 113 |
+
TERMINAL_TIMEOUT=60
|
| 114 |
+
|
| 115 |
+
# Cleanup inactive environments after this many seconds
|
| 116 |
+
TERMINAL_LIFETIME_SECONDS=300
|
| 117 |
+
|
| 118 |
+
# =============================================================================
|
| 119 |
+
# SSH REMOTE EXECUTION (for TERMINAL_ENV=ssh)
|
| 120 |
+
# =============================================================================
|
| 121 |
+
# Run terminal commands on a remote server via SSH.
|
| 122 |
+
# Agent code stays on your machine, commands execute remotely.
|
| 123 |
+
#
|
| 124 |
+
# SECURITY BENEFITS:
|
| 125 |
+
# - Agent cannot read your .env file (API keys protected)
|
| 126 |
+
# - Agent cannot modify its own code
|
| 127 |
+
# - Remote server acts as isolated sandbox
|
| 128 |
+
# - Can safely configure passwordless sudo on remote
|
| 129 |
+
#
|
| 130 |
+
# TERMINAL_SSH_HOST=192.168.1.100
|
| 131 |
+
# TERMINAL_SSH_USER=agent
|
| 132 |
+
# TERMINAL_SSH_PORT=22
|
| 133 |
+
# TERMINAL_SSH_KEY=~/.ssh/id_rsa
|
| 134 |
+
|
| 135 |
+
# =============================================================================
|
| 136 |
+
# SUDO SUPPORT (works with ALL terminal backends)
|
| 137 |
+
# =============================================================================
|
| 138 |
+
# If set, enables sudo commands by piping password via `sudo -S`.
|
| 139 |
+
# Works with: local, docker, singularity, modal, and ssh backends.
|
| 140 |
+
#
|
| 141 |
+
# SECURITY WARNING: Password stored in plaintext. Only use on trusted machines.
|
| 142 |
+
#
|
| 143 |
+
# ALTERNATIVES:
|
| 144 |
+
# - For SSH backend: Configure passwordless sudo on the remote server
|
| 145 |
+
# - For containers: Run as root inside the container (no sudo needed)
|
| 146 |
+
# - For local: Configure /etc/sudoers for specific commands
|
| 147 |
+
# - For CLI: Leave unset - you'll be prompted interactively with 45s timeout
|
| 148 |
+
#
|
| 149 |
+
# SUDO_PASSWORD=your_password_here
|
| 150 |
+
|
| 151 |
+
# =============================================================================
|
| 152 |
+
# MODAL CLOUD BACKEND (Optional - for TERMINAL_ENV=modal)
|
| 153 |
+
# =============================================================================
|
| 154 |
+
# Modal uses CLI authentication, not environment variables.
|
| 155 |
+
# Run: pip install modal && modal setup
|
| 156 |
+
# This will authenticate via browser and store credentials locally.
|
| 157 |
+
# No API key needed in .env - Modal handles auth automatically.
|
| 158 |
+
|
| 159 |
+
# =============================================================================
|
| 160 |
+
# BROWSER TOOL CONFIGURATION (agent-browser + Browserbase)
|
| 161 |
+
# =============================================================================
|
| 162 |
+
# Browser automation requires Browserbase cloud service for remote browser execution.
|
| 163 |
+
# This allows the agent to navigate websites, fill forms, and extract information.
|
| 164 |
+
#
|
| 165 |
+
# STEALTH MODES:
|
| 166 |
+
# - Basic Stealth: ALWAYS active (random fingerprints, auto CAPTCHA solving)
|
| 167 |
+
# - Advanced Stealth: Requires BROWSERBASE_ADVANCED_STEALTH=true (Scale Plan only)
|
| 168 |
+
|
| 169 |
+
# Browserbase API Key - Cloud browser execution
|
| 170 |
+
# Get at: https://browserbase.com/
|
| 171 |
+
BROWSERBASE_API_KEY=
|
| 172 |
+
|
| 173 |
+
# Browserbase Project ID - From your Browserbase dashboard
|
| 174 |
+
BROWSERBASE_PROJECT_ID=
|
| 175 |
+
|
| 176 |
+
# Enable residential proxies for better CAPTCHA solving (default: true)
|
| 177 |
+
# Routes traffic through residential IPs, significantly improves success rate
|
| 178 |
+
BROWSERBASE_PROXIES=true
|
| 179 |
+
|
| 180 |
+
# Enable advanced stealth mode (default: false, requires Scale Plan)
|
| 181 |
+
# Uses custom Chromium build to avoid bot detection altogether
|
| 182 |
+
BROWSERBASE_ADVANCED_STEALTH=false
|
| 183 |
+
|
| 184 |
+
# Browser session timeout in seconds (default: 300)
|
| 185 |
+
# Sessions are cleaned up after this duration of inactivity
|
| 186 |
+
BROWSER_SESSION_TIMEOUT=300
|
| 187 |
+
|
| 188 |
+
# Browser inactivity timeout - auto-cleanup inactive sessions (default: 120 = 2 min)
|
| 189 |
+
# Browser sessions are automatically closed after this period of no activity
|
| 190 |
+
BROWSER_INACTIVITY_TIMEOUT=120
|
| 191 |
+
|
| 192 |
+
# =============================================================================
|
| 193 |
+
# SESSION LOGGING
|
| 194 |
+
# =============================================================================
|
| 195 |
+
# Session trajectories are automatically saved to logs/ directory
|
| 196 |
+
# Format: logs/session_YYYYMMDD_HHMMSS_UUID.json
|
| 197 |
+
# Contains full conversation history in trajectory format for debugging/replay
|
| 198 |
+
|
| 199 |
+
# =============================================================================
|
| 200 |
+
# VOICE TRANSCRIPTION & OPENAI TTS
|
| 201 |
+
# =============================================================================
|
| 202 |
+
# Required for voice message transcription (Whisper) and OpenAI TTS voices.
|
| 203 |
+
# Uses OpenAI's API directly (not via OpenRouter).
|
| 204 |
+
# Named VOICE_TOOLS_OPENAI_KEY to avoid interference with OpenRouter.
|
| 205 |
+
# Get at: https://platform.openai.com/api-keys
|
| 206 |
+
VOICE_TOOLS_OPENAI_KEY=
|
| 207 |
+
|
| 208 |
+
# =============================================================================
|
| 209 |
+
# SLACK INTEGRATION
|
| 210 |
+
# =============================================================================
|
| 211 |
+
# Slack Bot Token - From Slack App settings (OAuth & Permissions)
|
| 212 |
+
# Get at: https://api.slack.com/apps
|
| 213 |
+
# SLACK_BOT_TOKEN=xoxb-...
|
| 214 |
+
|
| 215 |
+
# Slack App Token - For Socket Mode (App-Level Tokens in Slack App settings)
|
| 216 |
+
# SLACK_APP_TOKEN=xapp-...
|
| 217 |
+
|
| 218 |
+
# Slack allowed users (comma-separated Slack user IDs)
|
| 219 |
+
# SLACK_ALLOWED_USERS=
|
| 220 |
+
|
| 221 |
+
# WhatsApp (built-in Baileys bridge — run `hermes whatsapp` to pair)
|
| 222 |
+
# WHATSAPP_ENABLED=false
|
| 223 |
+
# WHATSAPP_ALLOWED_USERS=15551234567
|
| 224 |
+
|
| 225 |
+
# Email (IMAP/SMTP — send and receive emails as Hermes)
|
| 226 |
+
# For Gmail: enable 2FA → create App Password at https://myaccount.google.com/apppasswords
|
| 227 |
+
# EMAIL_ADDRESS=hermes@gmail.com
|
| 228 |
+
# EMAIL_PASSWORD=xxxx xxxx xxxx xxxx
|
| 229 |
+
# EMAIL_IMAP_HOST=imap.gmail.com
|
| 230 |
+
# EMAIL_IMAP_PORT=993
|
| 231 |
+
# EMAIL_SMTP_HOST=smtp.gmail.com
|
| 232 |
+
# EMAIL_SMTP_PORT=587
|
| 233 |
+
# EMAIL_POLL_INTERVAL=15
|
| 234 |
+
# EMAIL_ALLOWED_USERS=your@email.com
|
| 235 |
+
# EMAIL_HOME_ADDRESS=your@email.com
|
| 236 |
+
|
| 237 |
+
# Gateway-wide: allow ALL users without an allowlist (default: false = deny)
|
| 238 |
+
# Only set to true if you intentionally want open access.
|
| 239 |
+
# GATEWAY_ALLOW_ALL_USERS=false
|
| 240 |
+
|
| 241 |
+
# =============================================================================
|
| 242 |
+
# RESPONSE PACING
|
| 243 |
+
# =============================================================================
|
| 244 |
+
# Human-like delays between message chunks on messaging platforms.
|
| 245 |
+
# Makes the bot feel less robotic.
|
| 246 |
+
# HERMES_HUMAN_DELAY_MODE=off # off | natural | custom
|
| 247 |
+
# HERMES_HUMAN_DELAY_MIN_MS=800 # Min delay in ms (custom mode)
|
| 248 |
+
# HERMES_HUMAN_DELAY_MAX_MS=2500 # Max delay in ms (custom mode)
|
| 249 |
+
|
| 250 |
+
# =============================================================================
|
| 251 |
+
# DEBUG OPTIONS
|
| 252 |
+
# =============================================================================
|
| 253 |
+
WEB_TOOLS_DEBUG=false
|
| 254 |
+
VISION_TOOLS_DEBUG=false
|
| 255 |
+
MOA_TOOLS_DEBUG=false
|
| 256 |
+
IMAGE_TOOLS_DEBUG=false
|
| 257 |
+
|
| 258 |
+
# =============================================================================
|
| 259 |
+
# CONTEXT COMPRESSION (Auto-shrinks long conversations)
|
| 260 |
+
# =============================================================================
|
| 261 |
+
# When conversation approaches model's context limit, middle turns are
|
| 262 |
+
# automatically summarized to free up space.
|
| 263 |
+
#
|
| 264 |
+
# Context compression is configured in ~/.hermes/config.yaml under compression:
|
| 265 |
+
# CONTEXT_COMPRESSION_ENABLED=true # Enable auto-compression (default: true)
|
| 266 |
+
# CONTEXT_COMPRESSION_THRESHOLD=0.85 # Compress at 85% of context limit
|
| 267 |
+
# Model is set via compression.summary_model in config.yaml (default: google/gemini-3-flash-preview)
|
| 268 |
+
|
| 269 |
+
# =============================================================================
|
| 270 |
+
# RL TRAINING (Tinker + Atropos)
|
| 271 |
+
# =============================================================================
|
| 272 |
+
# Run reinforcement learning training on language models using the Tinker API.
|
| 273 |
+
# Requires the rl-server to be running (from tinker-atropos package).
|
| 274 |
+
|
| 275 |
+
# Tinker API Key - RL training service
|
| 276 |
+
# Get at: https://tinker-console.thinkingmachines.ai/keys
|
| 277 |
+
TINKER_API_KEY=
|
| 278 |
+
|
| 279 |
+
# Weights & Biases API Key - Experiment tracking and metrics
|
| 280 |
+
# Get at: https://wandb.ai/authorize
|
| 281 |
+
WANDB_API_KEY=
|
| 282 |
+
|
| 283 |
+
# RL API Server URL (default: http://localhost:8080)
|
| 284 |
+
# Change if running the rl-server on a different host/port
|
| 285 |
+
# RL_API_URL=http://localhost:8080
|
| 286 |
+
|
| 287 |
+
# =============================================================================
|
| 288 |
+
# SKILLS HUB (GitHub integration for skill search/install/publish)
|
| 289 |
+
# =============================================================================
|
| 290 |
+
|
| 291 |
+
# GitHub Personal Access Token — for higher API rate limits on skill search/install
|
| 292 |
+
# Get at: https://github.com/settings/tokens (Fine-grained recommended)
|
| 293 |
+
# GITHUB_TOKEN=ghp_xxxxxxxxxxxxxxxxxxxx
|
| 294 |
+
|
| 295 |
+
# GitHub App credentials (optional — for bot identity on PRs)
|
| 296 |
+
# GITHUB_APP_ID=
|
| 297 |
+
# GITHUB_APP_PRIVATE_KEY_PATH=
|
| 298 |
+
# GITHUB_APP_INSTALLATION_ID=
|
| 299 |
+
|
| 300 |
+
# Groq API key (free tier — used for Whisper STT in voice mode)
|
| 301 |
+
# GROQ_API_KEY=
|
| 302 |
+
|
| 303 |
+
# =============================================================================
|
| 304 |
+
# STT PROVIDER SELECTION
|
| 305 |
+
# =============================================================================
|
| 306 |
+
# Default STT provider is "local" (faster-whisper) — runs on your machine, no API key needed.
|
| 307 |
+
# Install with: pip install faster-whisper
|
| 308 |
+
# Model downloads automatically on first use (~150 MB for "base").
|
| 309 |
+
# To use cloud providers instead, set GROQ_API_KEY or VOICE_TOOLS_OPENAI_KEY above.
|
| 310 |
+
# Provider priority: local > groq > openai
|
| 311 |
+
# Configure in config.yaml: stt.provider: local | groq | openai
|
| 312 |
+
|
| 313 |
+
# =============================================================================
|
| 314 |
+
# STT ADVANCED OVERRIDES (optional)
|
| 315 |
+
# =============================================================================
|
| 316 |
+
# Override default STT models per provider (normally set via stt.model in config.yaml)
|
| 317 |
+
# STT_GROQ_MODEL=whisper-large-v3-turbo
|
| 318 |
+
# STT_OPENAI_MODEL=whisper-1
|
| 319 |
+
|
| 320 |
+
# Override STT provider endpoints (for proxies or self-hosted instances)
|
| 321 |
+
# GROQ_BASE_URL=https://api.groq.com/openai/v1
|
| 322 |
+
# STT_OPENAI_BASE_URL=https://api.openai.com/v1
|
.gitattributes
CHANGED
|
@@ -33,3 +33,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
landingpage/icon-512.png filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
skills/research/ml-paper-writing/templates/colm2025/colm2025_conference.pdf filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
skills/research/ml-paper-writing/templates/iclr2026/iclr2026_conference.pdf filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
skills/research/ml-paper-writing/templates/icml2026/example_paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
tools/neutts_samples/jo.wav filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
website/static/img/logo.png filter=lfs diff=lfs merge=lfs -text
|
.github/ISSUE_TEMPLATE/bug_report.yml
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: "🐛 Bug Report"
|
| 2 |
+
description: Report a bug — something that's broken, crashes, or behaves incorrectly.
|
| 3 |
+
title: "[Bug]: "
|
| 4 |
+
labels: ["bug"]
|
| 5 |
+
body:
|
| 6 |
+
- type: markdown
|
| 7 |
+
attributes:
|
| 8 |
+
value: |
|
| 9 |
+
Thanks for reporting a bug! Please fill out the sections below so we can reproduce and fix it quickly.
|
| 10 |
+
|
| 11 |
+
**Before submitting**, please:
|
| 12 |
+
- [ ] Search [existing issues](https://github.com/NousResearch/hermes-agent/issues) to avoid duplicates
|
| 13 |
+
- [ ] Update to the latest version (`hermes update`) and confirm the bug still exists
|
| 14 |
+
|
| 15 |
+
- type: textarea
|
| 16 |
+
id: description
|
| 17 |
+
attributes:
|
| 18 |
+
label: Bug Description
|
| 19 |
+
description: A clear description of what's broken. Include error messages, tracebacks, or screenshots if relevant.
|
| 20 |
+
placeholder: |
|
| 21 |
+
What happened? What did you expect to happen instead?
|
| 22 |
+
validations:
|
| 23 |
+
required: true
|
| 24 |
+
|
| 25 |
+
- type: textarea
|
| 26 |
+
id: reproduction
|
| 27 |
+
attributes:
|
| 28 |
+
label: Steps to Reproduce
|
| 29 |
+
description: Minimal steps to trigger the bug. The more specific, the faster we can fix it.
|
| 30 |
+
placeholder: |
|
| 31 |
+
1. Run `hermes chat`
|
| 32 |
+
2. Send the message "..."
|
| 33 |
+
3. Agent calls tool X
|
| 34 |
+
4. Error appears: ...
|
| 35 |
+
validations:
|
| 36 |
+
required: true
|
| 37 |
+
|
| 38 |
+
- type: textarea
|
| 39 |
+
id: expected
|
| 40 |
+
attributes:
|
| 41 |
+
label: Expected Behavior
|
| 42 |
+
description: What should have happened instead?
|
| 43 |
+
validations:
|
| 44 |
+
required: true
|
| 45 |
+
|
| 46 |
+
- type: textarea
|
| 47 |
+
id: actual
|
| 48 |
+
attributes:
|
| 49 |
+
label: Actual Behavior
|
| 50 |
+
description: What actually happened? Include full error output if available.
|
| 51 |
+
validations:
|
| 52 |
+
required: true
|
| 53 |
+
|
| 54 |
+
- type: dropdown
|
| 55 |
+
id: component
|
| 56 |
+
attributes:
|
| 57 |
+
label: Affected Component
|
| 58 |
+
description: Which part of Hermes is affected?
|
| 59 |
+
multiple: true
|
| 60 |
+
options:
|
| 61 |
+
- CLI (interactive chat)
|
| 62 |
+
- Gateway (Telegram/Discord/Slack/WhatsApp)
|
| 63 |
+
- Setup / Installation
|
| 64 |
+
- Tools (terminal, file ops, web, code execution, etc.)
|
| 65 |
+
- Skills (skill loading, skill hub, skill guard)
|
| 66 |
+
- Agent Core (conversation loop, context compression, memory)
|
| 67 |
+
- Configuration (config.yaml, .env, hermes setup)
|
| 68 |
+
- Other
|
| 69 |
+
validations:
|
| 70 |
+
required: true
|
| 71 |
+
|
| 72 |
+
- type: dropdown
|
| 73 |
+
id: platform
|
| 74 |
+
attributes:
|
| 75 |
+
label: Messaging Platform (if gateway-related)
|
| 76 |
+
description: Which platform adapter is affected?
|
| 77 |
+
multiple: true
|
| 78 |
+
options:
|
| 79 |
+
- N/A (CLI only)
|
| 80 |
+
- Telegram
|
| 81 |
+
- Discord
|
| 82 |
+
- Slack
|
| 83 |
+
- WhatsApp
|
| 84 |
+
|
| 85 |
+
- type: input
|
| 86 |
+
id: os
|
| 87 |
+
attributes:
|
| 88 |
+
label: Operating System
|
| 89 |
+
description: e.g. Ubuntu 24.04, macOS 15.2, Windows 11
|
| 90 |
+
placeholder: Ubuntu 24.04
|
| 91 |
+
validations:
|
| 92 |
+
required: true
|
| 93 |
+
|
| 94 |
+
- type: input
|
| 95 |
+
id: python-version
|
| 96 |
+
attributes:
|
| 97 |
+
label: Python Version
|
| 98 |
+
description: Output of `python --version`
|
| 99 |
+
placeholder: "3.11.9"
|
| 100 |
+
validations:
|
| 101 |
+
required: true
|
| 102 |
+
|
| 103 |
+
- type: input
|
| 104 |
+
id: hermes-version
|
| 105 |
+
attributes:
|
| 106 |
+
label: Hermes Version
|
| 107 |
+
description: Output of `hermes version`
|
| 108 |
+
placeholder: "2.1.0"
|
| 109 |
+
validations:
|
| 110 |
+
required: true
|
| 111 |
+
|
| 112 |
+
- type: textarea
|
| 113 |
+
id: logs
|
| 114 |
+
attributes:
|
| 115 |
+
label: Relevant Logs / Traceback
|
| 116 |
+
description: Paste any error output, traceback, or log messages. This will be auto-formatted as code.
|
| 117 |
+
render: shell
|
| 118 |
+
|
| 119 |
+
- type: textarea
|
| 120 |
+
id: root-cause
|
| 121 |
+
attributes:
|
| 122 |
+
label: Root Cause Analysis (optional)
|
| 123 |
+
description: |
|
| 124 |
+
If you've dug into the code and identified the root cause, share it here.
|
| 125 |
+
Include file paths, line numbers, and code snippets if possible. This massively speeds up fixes.
|
| 126 |
+
placeholder: |
|
| 127 |
+
The bug is in `gateway/run.py` line 949. `len(history)` counts session_meta entries
|
| 128 |
+
but `agent_messages` was built from filtered history...
|
| 129 |
+
|
| 130 |
+
- type: textarea
|
| 131 |
+
id: proposed-fix
|
| 132 |
+
attributes:
|
| 133 |
+
label: Proposed Fix (optional)
|
| 134 |
+
description: If you have a fix in mind (or a PR ready), describe it here.
|
| 135 |
+
placeholder: |
|
| 136 |
+
Replace `.get()` with `.pop()` on line 289 of `gateway/platforms/base.py`
|
| 137 |
+
to actually clear the pending message after retrieval.
|
| 138 |
+
|
| 139 |
+
- type: checkboxes
|
| 140 |
+
id: pr-ready
|
| 141 |
+
attributes:
|
| 142 |
+
label: Are you willing to submit a PR for this?
|
| 143 |
+
options:
|
| 144 |
+
- label: I'd like to fix this myself and submit a PR
|
.github/ISSUE_TEMPLATE/config.yml
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
blank_issues_enabled: true
|
| 2 |
+
contact_links:
|
| 3 |
+
- name: 💬 Nous Research Discord
|
| 4 |
+
url: https://discord.gg/NousResearch
|
| 5 |
+
about: For quick questions, showcasing projects, sharing skills, and community chat.
|
| 6 |
+
- name: 📖 Documentation
|
| 7 |
+
url: https://github.com/NousResearch/hermes-agent/blob/main/README.md
|
| 8 |
+
about: Check the README and docs before opening an issue.
|
| 9 |
+
- name: 🤝 Contributing Guide
|
| 10 |
+
url: https://github.com/NousResearch/hermes-agent/blob/main/CONTRIBUTING.md
|
| 11 |
+
about: Read this before submitting a PR.
|
.github/ISSUE_TEMPLATE/feature_request.yml
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: "✨ Feature Request"
|
| 2 |
+
description: Suggest a new feature or improvement.
|
| 3 |
+
title: "[Feature]: "
|
| 4 |
+
labels: ["enhancement"]
|
| 5 |
+
body:
|
| 6 |
+
- type: markdown
|
| 7 |
+
attributes:
|
| 8 |
+
value: |
|
| 9 |
+
Thanks for the suggestion! Before submitting, please consider:
|
| 10 |
+
|
| 11 |
+
- **Is this a new skill?** Most capabilities should be [skills, not tools](https://github.com/NousResearch/hermes-agent/blob/main/CONTRIBUTING.md#should-it-be-a-skill-or-a-tool). If it's a specialized integration (crypto, NFT, niche SaaS), it belongs on the Skills Hub, not bundled.
|
| 12 |
+
- **Search [existing issues](https://github.com/NousResearch/hermes-agent/issues)** — someone may have already proposed this.
|
| 13 |
+
|
| 14 |
+
- type: textarea
|
| 15 |
+
id: problem
|
| 16 |
+
attributes:
|
| 17 |
+
label: Problem or Use Case
|
| 18 |
+
description: What problem does this solve? What are you trying to do that you can't today?
|
| 19 |
+
placeholder: |
|
| 20 |
+
I'm trying to use Hermes with [provider/platform/workflow] but currently
|
| 21 |
+
there's no way to...
|
| 22 |
+
validations:
|
| 23 |
+
required: true
|
| 24 |
+
|
| 25 |
+
- type: textarea
|
| 26 |
+
id: solution
|
| 27 |
+
attributes:
|
| 28 |
+
label: Proposed Solution
|
| 29 |
+
description: How do you think this should work? Be as specific as you can — CLI flags, config options, UI behavior.
|
| 30 |
+
placeholder: |
|
| 31 |
+
Add a `--foo` flag to `hermes chat` that enables...
|
| 32 |
+
Or: Add a config key `bar.baz` that controls...
|
| 33 |
+
validations:
|
| 34 |
+
required: true
|
| 35 |
+
|
| 36 |
+
- type: textarea
|
| 37 |
+
id: alternatives
|
| 38 |
+
attributes:
|
| 39 |
+
label: Alternatives Considered
|
| 40 |
+
description: What other approaches did you consider? Why is the proposed solution better?
|
| 41 |
+
|
| 42 |
+
- type: dropdown
|
| 43 |
+
id: type
|
| 44 |
+
attributes:
|
| 45 |
+
label: Feature Type
|
| 46 |
+
options:
|
| 47 |
+
- New tool
|
| 48 |
+
- New bundled skill
|
| 49 |
+
- CLI improvement
|
| 50 |
+
- Gateway / messaging improvement
|
| 51 |
+
- Configuration option
|
| 52 |
+
- Performance / reliability
|
| 53 |
+
- Developer experience (tests, docs, CI)
|
| 54 |
+
- Other
|
| 55 |
+
validations:
|
| 56 |
+
required: true
|
| 57 |
+
|
| 58 |
+
- type: dropdown
|
| 59 |
+
id: scope
|
| 60 |
+
attributes:
|
| 61 |
+
label: Scope
|
| 62 |
+
description: How big is this change?
|
| 63 |
+
options:
|
| 64 |
+
- Small (single file, < 50 lines)
|
| 65 |
+
- Medium (few files, < 300 lines)
|
| 66 |
+
- Large (new module or significant refactor)
|
| 67 |
+
|
| 68 |
+
- type: checkboxes
|
| 69 |
+
id: pr-ready
|
| 70 |
+
attributes:
|
| 71 |
+
label: Contribution
|
| 72 |
+
options:
|
| 73 |
+
- label: I'd like to implement this myself and submit a PR
|
.github/ISSUE_TEMPLATE/setup_help.yml
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: "🔧 Setup / Installation Help"
|
| 2 |
+
description: Having trouble installing or configuring Hermes? Ask here.
|
| 3 |
+
title: "[Setup]: "
|
| 4 |
+
labels: ["setup"]
|
| 5 |
+
body:
|
| 6 |
+
- type: markdown
|
| 7 |
+
attributes:
|
| 8 |
+
value: |
|
| 9 |
+
Sorry you're having trouble! Please fill out the details below so we can help.
|
| 10 |
+
|
| 11 |
+
**Quick checks first:**
|
| 12 |
+
- Run `hermes doctor` and include the output below
|
| 13 |
+
- Try `hermes update` to get the latest version
|
| 14 |
+
- Check the [README troubleshooting section](https://github.com/NousResearch/hermes-agent#troubleshooting)
|
| 15 |
+
- For general questions, consider the [Nous Research Discord](https://discord.gg/NousResearch) for faster help
|
| 16 |
+
|
| 17 |
+
- type: textarea
|
| 18 |
+
id: description
|
| 19 |
+
attributes:
|
| 20 |
+
label: What's Going Wrong?
|
| 21 |
+
description: Describe what you're trying to do and where it fails.
|
| 22 |
+
placeholder: |
|
| 23 |
+
I ran `hermes setup` and selected Nous Portal, but when I try to
|
| 24 |
+
start the gateway I get...
|
| 25 |
+
validations:
|
| 26 |
+
required: true
|
| 27 |
+
|
| 28 |
+
- type: textarea
|
| 29 |
+
id: steps
|
| 30 |
+
attributes:
|
| 31 |
+
label: Steps Taken
|
| 32 |
+
description: What did you do? Include the exact commands you ran.
|
| 33 |
+
placeholder: |
|
| 34 |
+
1. Ran the install script: `curl -fsSL ... | bash`
|
| 35 |
+
2. Ran `hermes setup` and chose "Quick setup"
|
| 36 |
+
3. Selected OpenRouter, entered API key
|
| 37 |
+
4. Ran `hermes chat` and got error...
|
| 38 |
+
validations:
|
| 39 |
+
required: true
|
| 40 |
+
|
| 41 |
+
- type: dropdown
|
| 42 |
+
id: install-method
|
| 43 |
+
attributes:
|
| 44 |
+
label: Installation Method
|
| 45 |
+
options:
|
| 46 |
+
- Install script (curl | bash)
|
| 47 |
+
- Manual clone + pip/uv install
|
| 48 |
+
- PowerShell installer (Windows)
|
| 49 |
+
- Docker
|
| 50 |
+
- Other
|
| 51 |
+
validations:
|
| 52 |
+
required: true
|
| 53 |
+
|
| 54 |
+
- type: input
|
| 55 |
+
id: os
|
| 56 |
+
attributes:
|
| 57 |
+
label: Operating System
|
| 58 |
+
placeholder: Ubuntu 24.04 / macOS 15.2 / Windows 11
|
| 59 |
+
validations:
|
| 60 |
+
required: true
|
| 61 |
+
|
| 62 |
+
- type: input
|
| 63 |
+
id: python-version
|
| 64 |
+
attributes:
|
| 65 |
+
label: Python Version
|
| 66 |
+
description: Output of `python --version` (or `python3 --version`)
|
| 67 |
+
placeholder: "3.11.9"
|
| 68 |
+
|
| 69 |
+
- type: input
|
| 70 |
+
id: hermes-version
|
| 71 |
+
attributes:
|
| 72 |
+
label: Hermes Version
|
| 73 |
+
description: Output of `hermes version` (if install got that far)
|
| 74 |
+
placeholder: "2.1.0"
|
| 75 |
+
|
| 76 |
+
- type: textarea
|
| 77 |
+
id: doctor-output
|
| 78 |
+
attributes:
|
| 79 |
+
label: Output of `hermes doctor`
|
| 80 |
+
description: Run `hermes doctor` and paste the full output. This will be auto-formatted.
|
| 81 |
+
render: shell
|
| 82 |
+
|
| 83 |
+
- type: textarea
|
| 84 |
+
id: error-output
|
| 85 |
+
attributes:
|
| 86 |
+
label: Full Error Output
|
| 87 |
+
description: Paste the complete error message or traceback. This will be auto-formatted.
|
| 88 |
+
render: shell
|
| 89 |
+
validations:
|
| 90 |
+
required: true
|
| 91 |
+
|
| 92 |
+
- type: textarea
|
| 93 |
+
id: tried
|
| 94 |
+
attributes:
|
| 95 |
+
label: What I've Already Tried
|
| 96 |
+
description: List any fixes or workarounds you've already attempted.
|
| 97 |
+
placeholder: |
|
| 98 |
+
- Ran `hermes update`
|
| 99 |
+
- Tried reinstalling with `pip install -e ".[all]"`
|
| 100 |
+
- Checked that OPENROUTER_API_KEY is set in ~/.hermes/.env
|
.github/PULL_REQUEST_TEMPLATE.md
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## What does this PR do?
|
| 2 |
+
|
| 3 |
+
<!-- Describe the change clearly. What problem does it solve? Why is this approach the right one? -->
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
## Related Issue
|
| 8 |
+
|
| 9 |
+
<!-- Link the issue this PR addresses. If no issue exists, consider creating one first. -->
|
| 10 |
+
|
| 11 |
+
Fixes #
|
| 12 |
+
|
| 13 |
+
## Type of Change
|
| 14 |
+
|
| 15 |
+
<!-- Check the one that applies. -->
|
| 16 |
+
|
| 17 |
+
- [ ] 🐛 Bug fix (non-breaking change that fixes an issue)
|
| 18 |
+
- [ ] ✨ New feature (non-breaking change that adds functionality)
|
| 19 |
+
- [ ] 🔒 Security fix
|
| 20 |
+
- [ ] 📝 Documentation update
|
| 21 |
+
- [ ] ✅ Tests (adding or improving test coverage)
|
| 22 |
+
- [ ] ♻️ Refactor (no behavior change)
|
| 23 |
+
- [ ] 🎯 New skill (bundled or hub)
|
| 24 |
+
|
| 25 |
+
## Changes Made
|
| 26 |
+
|
| 27 |
+
<!-- List the specific changes. Include file paths for code changes. -->
|
| 28 |
+
|
| 29 |
+
-
|
| 30 |
+
|
| 31 |
+
## How to Test
|
| 32 |
+
|
| 33 |
+
<!-- Steps to verify this change works. For bugs: reproduction steps + proof that the fix works. -->
|
| 34 |
+
|
| 35 |
+
1.
|
| 36 |
+
2.
|
| 37 |
+
3.
|
| 38 |
+
|
| 39 |
+
## Checklist
|
| 40 |
+
|
| 41 |
+
<!-- Complete these before requesting review. -->
|
| 42 |
+
|
| 43 |
+
### Code
|
| 44 |
+
|
| 45 |
+
- [ ] I've read the [Contributing Guide](https://github.com/NousResearch/hermes-agent/blob/main/CONTRIBUTING.md)
|
| 46 |
+
- [ ] My commit messages follow [Conventional Commits](https://www.conventionalcommits.org/) (`fix(scope):`, `feat(scope):`, etc.)
|
| 47 |
+
- [ ] I searched for [existing PRs](https://github.com/NousResearch/hermes-agent/pulls) to make sure this isn't a duplicate
|
| 48 |
+
- [ ] My PR contains **only** changes related to this fix/feature (no unrelated commits)
|
| 49 |
+
- [ ] I've run `pytest tests/ -q` and all tests pass
|
| 50 |
+
- [ ] I've added tests for my changes (required for bug fixes, strongly encouraged for features)
|
| 51 |
+
- [ ] I've tested on my platform: <!-- e.g. Ubuntu 24.04, macOS 15.2, Windows 11 -->
|
| 52 |
+
|
| 53 |
+
### Documentation & Housekeeping
|
| 54 |
+
|
| 55 |
+
<!-- Check all that apply. It's OK to check "N/A" if a category doesn't apply to your change. -->
|
| 56 |
+
|
| 57 |
+
- [ ] I've updated relevant documentation (README, `docs/`, docstrings) — or N/A
|
| 58 |
+
- [ ] I've updated `cli-config.yaml.example` if I added/changed config keys — or N/A
|
| 59 |
+
- [ ] I've updated `CONTRIBUTING.md` or `AGENTS.md` if I changed architecture or workflows — or N/A
|
| 60 |
+
- [ ] I've considered cross-platform impact (Windows, macOS) per the [compatibility guide](https://github.com/NousResearch/hermes-agent/blob/main/CONTRIBUTING.md#cross-platform-compatibility) — or N/A
|
| 61 |
+
- [ ] I've updated tool descriptions/schemas if I changed tool behavior — or N/A
|
| 62 |
+
|
| 63 |
+
## For New Skills
|
| 64 |
+
|
| 65 |
+
<!-- Only fill this out if you're adding a skill. Delete this section otherwise. -->
|
| 66 |
+
|
| 67 |
+
- [ ] This skill is **broadly useful** to most users (if bundled) — see [Contributing Guide](https://github.com/NousResearch/hermes-agent/blob/main/CONTRIBUTING.md#should-the-skill-be-bundled)
|
| 68 |
+
- [ ] SKILL.md follows the [standard format](https://github.com/NousResearch/hermes-agent/blob/main/CONTRIBUTING.md#skillmd-format) (frontmatter, trigger conditions, steps, pitfalls)
|
| 69 |
+
- [ ] No external dependencies that aren't already available (prefer stdlib, curl, existing Hermes tools)
|
| 70 |
+
- [ ] I've tested the skill end-to-end: `hermes --toolsets skills -q "Use the X skill to do Y"`
|
| 71 |
+
|
| 72 |
+
## Screenshots / Logs
|
| 73 |
+
|
| 74 |
+
<!-- If applicable, add screenshots or log output showing the fix/feature in action. -->
|
| 75 |
+
|
.github/workflows/deploy-site.yml
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: Deploy Site
|
| 2 |
+
|
| 3 |
+
on:
|
| 4 |
+
push:
|
| 5 |
+
branches: [main]
|
| 6 |
+
paths:
|
| 7 |
+
- 'website/**'
|
| 8 |
+
- 'landingpage/**'
|
| 9 |
+
- '.github/workflows/deploy-site.yml'
|
| 10 |
+
workflow_dispatch:
|
| 11 |
+
|
| 12 |
+
permissions:
|
| 13 |
+
pages: write
|
| 14 |
+
id-token: write
|
| 15 |
+
|
| 16 |
+
concurrency:
|
| 17 |
+
group: pages
|
| 18 |
+
cancel-in-progress: false
|
| 19 |
+
|
| 20 |
+
jobs:
|
| 21 |
+
build-and-deploy:
|
| 22 |
+
runs-on: ubuntu-latest
|
| 23 |
+
environment:
|
| 24 |
+
name: github-pages
|
| 25 |
+
url: ${{ steps.deploy.outputs.page_url }}
|
| 26 |
+
steps:
|
| 27 |
+
- uses: actions/checkout@v4
|
| 28 |
+
|
| 29 |
+
- uses: actions/setup-node@v4
|
| 30 |
+
with:
|
| 31 |
+
node-version: 20
|
| 32 |
+
cache: npm
|
| 33 |
+
cache-dependency-path: website/package-lock.json
|
| 34 |
+
|
| 35 |
+
- name: Install dependencies
|
| 36 |
+
run: npm ci
|
| 37 |
+
working-directory: website
|
| 38 |
+
|
| 39 |
+
- name: Build Docusaurus
|
| 40 |
+
run: npm run build
|
| 41 |
+
working-directory: website
|
| 42 |
+
|
| 43 |
+
- name: Stage deployment
|
| 44 |
+
run: |
|
| 45 |
+
mkdir -p _site/docs
|
| 46 |
+
# Landing page at root
|
| 47 |
+
cp -r landingpage/* _site/
|
| 48 |
+
# Docusaurus at /docs/
|
| 49 |
+
cp -r website/build/* _site/docs/
|
| 50 |
+
# CNAME so GitHub Pages keeps the custom domain between deploys
|
| 51 |
+
echo "hermes-agent.nousresearch.com" > _site/CNAME
|
| 52 |
+
|
| 53 |
+
- name: Upload artifact
|
| 54 |
+
uses: actions/upload-pages-artifact@v3
|
| 55 |
+
with:
|
| 56 |
+
path: _site
|
| 57 |
+
|
| 58 |
+
- name: Deploy to GitHub Pages
|
| 59 |
+
id: deploy
|
| 60 |
+
uses: actions/deploy-pages@v4
|
.github/workflows/docs-site-checks.yml
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: Docs Site Checks
|
| 2 |
+
|
| 3 |
+
on:
|
| 4 |
+
pull_request:
|
| 5 |
+
paths:
|
| 6 |
+
- 'website/**'
|
| 7 |
+
- '.github/workflows/docs-site-checks.yml'
|
| 8 |
+
workflow_dispatch:
|
| 9 |
+
|
| 10 |
+
jobs:
|
| 11 |
+
docs-site-checks:
|
| 12 |
+
runs-on: ubuntu-latest
|
| 13 |
+
steps:
|
| 14 |
+
- uses: actions/checkout@v4
|
| 15 |
+
|
| 16 |
+
- uses: actions/setup-node@v4
|
| 17 |
+
with:
|
| 18 |
+
node-version: 20
|
| 19 |
+
cache: npm
|
| 20 |
+
cache-dependency-path: website/package-lock.json
|
| 21 |
+
|
| 22 |
+
- name: Install website dependencies
|
| 23 |
+
run: npm ci
|
| 24 |
+
working-directory: website
|
| 25 |
+
|
| 26 |
+
- uses: actions/setup-python@v5
|
| 27 |
+
with:
|
| 28 |
+
python-version: '3.11'
|
| 29 |
+
|
| 30 |
+
- name: Install ascii-guard
|
| 31 |
+
run: python -m pip install ascii-guard
|
| 32 |
+
|
| 33 |
+
- name: Lint docs diagrams
|
| 34 |
+
run: npm run lint:diagrams
|
| 35 |
+
working-directory: website
|
| 36 |
+
|
| 37 |
+
- name: Build Docusaurus
|
| 38 |
+
run: npm run build
|
| 39 |
+
working-directory: website
|
.github/workflows/supply-chain-audit.yml
ADDED
|
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: Supply Chain Audit
|
| 2 |
+
|
| 3 |
+
on:
|
| 4 |
+
pull_request:
|
| 5 |
+
types: [opened, synchronize, reopened]
|
| 6 |
+
|
| 7 |
+
permissions:
|
| 8 |
+
pull-requests: write
|
| 9 |
+
contents: read
|
| 10 |
+
|
| 11 |
+
jobs:
|
| 12 |
+
scan:
|
| 13 |
+
name: Scan PR for supply chain risks
|
| 14 |
+
runs-on: ubuntu-latest
|
| 15 |
+
steps:
|
| 16 |
+
- name: Checkout
|
| 17 |
+
uses: actions/checkout@v4
|
| 18 |
+
with:
|
| 19 |
+
fetch-depth: 0
|
| 20 |
+
|
| 21 |
+
- name: Scan diff for suspicious patterns
|
| 22 |
+
id: scan
|
| 23 |
+
env:
|
| 24 |
+
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
| 25 |
+
run: |
|
| 26 |
+
set -euo pipefail
|
| 27 |
+
|
| 28 |
+
BASE="${{ github.event.pull_request.base.sha }}"
|
| 29 |
+
HEAD="${{ github.event.pull_request.head.sha }}"
|
| 30 |
+
|
| 31 |
+
# Get the full diff (added lines only)
|
| 32 |
+
DIFF=$(git diff "$BASE".."$HEAD" -- . ':!uv.lock' ':!*.lock' ':!package-lock.json' ':!yarn.lock' || true)
|
| 33 |
+
|
| 34 |
+
FINDINGS=""
|
| 35 |
+
CRITICAL=false
|
| 36 |
+
|
| 37 |
+
# --- .pth files (auto-execute on Python startup) ---
|
| 38 |
+
PTH_FILES=$(git diff --name-only "$BASE".."$HEAD" | grep '\.pth$' || true)
|
| 39 |
+
if [ -n "$PTH_FILES" ]; then
|
| 40 |
+
CRITICAL=true
|
| 41 |
+
FINDINGS="${FINDINGS}
|
| 42 |
+
### 🚨 CRITICAL: .pth file added or modified
|
| 43 |
+
Python \`.pth\` files in \`site-packages/\` execute automatically when the interpreter starts — no import required. This is the exact mechanism used in the [litellm supply chain attack](https://github.com/BerriAI/litellm/issues/24512).
|
| 44 |
+
|
| 45 |
+
**Files:**
|
| 46 |
+
\`\`\`
|
| 47 |
+
${PTH_FILES}
|
| 48 |
+
\`\`\`
|
| 49 |
+
"
|
| 50 |
+
fi
|
| 51 |
+
|
| 52 |
+
# --- base64 + exec/eval combo (the litellm attack pattern) ---
|
| 53 |
+
B64_EXEC_HITS=$(echo "$DIFF" | grep -n '^\+' | grep -iE 'base64\.(b64decode|decodebytes|urlsafe_b64decode)' | grep -iE 'exec\(|eval\(' | head -10 || true)
|
| 54 |
+
if [ -n "$B64_EXEC_HITS" ]; then
|
| 55 |
+
CRITICAL=true
|
| 56 |
+
FINDINGS="${FINDINGS}
|
| 57 |
+
### 🚨 CRITICAL: base64 decode + exec/eval combo
|
| 58 |
+
This is the exact pattern used in the [litellm supply chain attack](https://github.com/BerriAI/litellm/issues/24512) — base64-decoded strings passed to exec/eval to hide credential-stealing payloads.
|
| 59 |
+
|
| 60 |
+
**Matches:**
|
| 61 |
+
\`\`\`
|
| 62 |
+
${B64_EXEC_HITS}
|
| 63 |
+
\`\`\`
|
| 64 |
+
"
|
| 65 |
+
fi
|
| 66 |
+
|
| 67 |
+
# --- base64 decode/encode (alone — legitimate uses exist) ---
|
| 68 |
+
B64_HITS=$(echo "$DIFF" | grep -n '^\+' | grep -iE 'base64\.(b64decode|b64encode|decodebytes|encodebytes|urlsafe_b64decode)|atob\(|btoa\(|Buffer\.from\(.*base64' | head -20 || true)
|
| 69 |
+
if [ -n "$B64_HITS" ]; then
|
| 70 |
+
FINDINGS="${FINDINGS}
|
| 71 |
+
### ⚠️ WARNING: base64 encoding/decoding detected
|
| 72 |
+
Base64 has legitimate uses (images, JWT, etc.) but is also commonly used to obfuscate malicious payloads. Verify the usage is appropriate.
|
| 73 |
+
|
| 74 |
+
**Matches (first 20):**
|
| 75 |
+
\`\`\`
|
| 76 |
+
${B64_HITS}
|
| 77 |
+
\`\`\`
|
| 78 |
+
"
|
| 79 |
+
fi
|
| 80 |
+
|
| 81 |
+
# --- exec/eval with string arguments ---
|
| 82 |
+
EXEC_HITS=$(echo "$DIFF" | grep -n '^\+' | grep -E '(exec|eval)\s*\(' | grep -v '^\+\s*#' | grep -v 'test_\|mock\|assert\|# ' | head -20 || true)
|
| 83 |
+
if [ -n "$EXEC_HITS" ]; then
|
| 84 |
+
FINDINGS="${FINDINGS}
|
| 85 |
+
### ⚠️ WARNING: exec() or eval() usage
|
| 86 |
+
Dynamic code execution can hide malicious behavior, especially when combined with base64 or network fetches.
|
| 87 |
+
|
| 88 |
+
**Matches (first 20):**
|
| 89 |
+
\`\`\`
|
| 90 |
+
${EXEC_HITS}
|
| 91 |
+
\`\`\`
|
| 92 |
+
"
|
| 93 |
+
fi
|
| 94 |
+
|
| 95 |
+
# --- subprocess with encoded/obfuscated commands ---
|
| 96 |
+
PROC_HITS=$(echo "$DIFF" | grep -n '^\+' | grep -E 'subprocess\.(Popen|call|run)\s*\(' | grep -iE 'base64|decode|encode|\\x|chr\(' | head -10 || true)
|
| 97 |
+
if [ -n "$PROC_HITS" ]; then
|
| 98 |
+
CRITICAL=true
|
| 99 |
+
FINDINGS="${FINDINGS}
|
| 100 |
+
### 🚨 CRITICAL: subprocess with encoded/obfuscated command
|
| 101 |
+
Subprocess calls with encoded arguments are a strong indicator of payload execution.
|
| 102 |
+
|
| 103 |
+
**Matches:**
|
| 104 |
+
\`\`\`
|
| 105 |
+
${PROC_HITS}
|
| 106 |
+
\`\`\`
|
| 107 |
+
"
|
| 108 |
+
fi
|
| 109 |
+
|
| 110 |
+
# --- Network calls to non-standard domains ---
|
| 111 |
+
EXFIL_HITS=$(echo "$DIFF" | grep -n '^\+' | grep -iE 'requests\.(post|put)\(|httpx\.(post|put)\(|urllib\.request\.urlopen' | grep -v '^\+\s*#' | grep -v 'test_\|mock\|assert' | head -10 || true)
|
| 112 |
+
if [ -n "$EXFIL_HITS" ]; then
|
| 113 |
+
FINDINGS="${FINDINGS}
|
| 114 |
+
### ⚠️ WARNING: Outbound network calls (POST/PUT)
|
| 115 |
+
Outbound POST/PUT requests in new code could be data exfiltration. Verify the destination URLs are legitimate.
|
| 116 |
+
|
| 117 |
+
**Matches (first 10):**
|
| 118 |
+
\`\`\`
|
| 119 |
+
${EXFIL_HITS}
|
| 120 |
+
\`\`\`
|
| 121 |
+
"
|
| 122 |
+
fi
|
| 123 |
+
|
| 124 |
+
# --- setup.py / setup.cfg install hooks ---
|
| 125 |
+
SETUP_HITS=$(git diff --name-only "$BASE".."$HEAD" | grep -E '(setup\.py|setup\.cfg|__init__\.pth|sitecustomize\.py|usercustomize\.py)$' || true)
|
| 126 |
+
if [ -n "$SETUP_HITS" ]; then
|
| 127 |
+
FINDINGS="${FINDINGS}
|
| 128 |
+
### ⚠️ WARNING: Install hook files modified
|
| 129 |
+
These files can execute code during package installation or interpreter startup.
|
| 130 |
+
|
| 131 |
+
**Files:**
|
| 132 |
+
\`\`\`
|
| 133 |
+
${SETUP_HITS}
|
| 134 |
+
\`\`\`
|
| 135 |
+
"
|
| 136 |
+
fi
|
| 137 |
+
|
| 138 |
+
# --- Compile/marshal/pickle (code object injection) ---
|
| 139 |
+
MARSHAL_HITS=$(echo "$DIFF" | grep -n '^\+' | grep -iE 'marshal\.loads|pickle\.loads|compile\(' | grep -v '^\+\s*#' | grep -v 'test_\|re\.compile\|ast\.compile' | head -10 || true)
|
| 140 |
+
if [ -n "$MARSHAL_HITS" ]; then
|
| 141 |
+
FINDINGS="${FINDINGS}
|
| 142 |
+
### ⚠️ WARNING: marshal/pickle/compile usage
|
| 143 |
+
These can deserialize or construct executable code objects.
|
| 144 |
+
|
| 145 |
+
**Matches:**
|
| 146 |
+
\`\`\`
|
| 147 |
+
${MARSHAL_HITS}
|
| 148 |
+
\`\`\`
|
| 149 |
+
"
|
| 150 |
+
fi
|
| 151 |
+
|
| 152 |
+
# --- Output results ---
|
| 153 |
+
if [ -n "$FINDINGS" ]; then
|
| 154 |
+
echo "found=true" >> "$GITHUB_OUTPUT"
|
| 155 |
+
if [ "$CRITICAL" = true ]; then
|
| 156 |
+
echo "critical=true" >> "$GITHUB_OUTPUT"
|
| 157 |
+
else
|
| 158 |
+
echo "critical=false" >> "$GITHUB_OUTPUT"
|
| 159 |
+
fi
|
| 160 |
+
# Write findings to a file (multiline env vars are fragile)
|
| 161 |
+
echo "$FINDINGS" > /tmp/findings.md
|
| 162 |
+
else
|
| 163 |
+
echo "found=false" >> "$GITHUB_OUTPUT"
|
| 164 |
+
echo "critical=false" >> "$GITHUB_OUTPUT"
|
| 165 |
+
fi
|
| 166 |
+
|
| 167 |
+
- name: Post warning comment
|
| 168 |
+
if: steps.scan.outputs.found == 'true'
|
| 169 |
+
env:
|
| 170 |
+
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
| 171 |
+
run: |
|
| 172 |
+
SEVERITY="⚠️ Supply Chain Risk Detected"
|
| 173 |
+
if [ "${{ steps.scan.outputs.critical }}" = "true" ]; then
|
| 174 |
+
SEVERITY="🚨 CRITICAL Supply Chain Risk Detected"
|
| 175 |
+
fi
|
| 176 |
+
|
| 177 |
+
BODY="## ${SEVERITY}
|
| 178 |
+
|
| 179 |
+
This PR contains patterns commonly associated with supply chain attacks. This does **not** mean the PR is malicious — but these patterns require careful human review before merging.
|
| 180 |
+
|
| 181 |
+
$(cat /tmp/findings.md)
|
| 182 |
+
|
| 183 |
+
---
|
| 184 |
+
*Automated scan triggered by [supply-chain-audit](/.github/workflows/supply-chain-audit.yml). If this is a false positive, a maintainer can approve after manual review.*"
|
| 185 |
+
|
| 186 |
+
gh pr comment "${{ github.event.pull_request.number }}" --body "$BODY"
|
| 187 |
+
|
| 188 |
+
- name: Fail on critical findings
|
| 189 |
+
if: steps.scan.outputs.critical == 'true'
|
| 190 |
+
run: |
|
| 191 |
+
echo "::error::CRITICAL supply chain risk patterns detected in this PR. See the PR comment for details."
|
| 192 |
+
exit 1
|
.github/workflows/tests.yml
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: Tests
|
| 2 |
+
|
| 3 |
+
on:
|
| 4 |
+
push:
|
| 5 |
+
branches: [main]
|
| 6 |
+
pull_request:
|
| 7 |
+
branches: [main]
|
| 8 |
+
|
| 9 |
+
# Cancel in-progress runs for the same PR/branch
|
| 10 |
+
concurrency:
|
| 11 |
+
group: tests-${{ github.ref }}
|
| 12 |
+
cancel-in-progress: true
|
| 13 |
+
|
| 14 |
+
jobs:
|
| 15 |
+
test:
|
| 16 |
+
runs-on: ubuntu-latest
|
| 17 |
+
timeout-minutes: 10
|
| 18 |
+
steps:
|
| 19 |
+
- name: Checkout code
|
| 20 |
+
uses: actions/checkout@v4
|
| 21 |
+
|
| 22 |
+
- name: Install uv
|
| 23 |
+
uses: astral-sh/setup-uv@v5
|
| 24 |
+
|
| 25 |
+
- name: Set up Python 3.11
|
| 26 |
+
run: uv python install 3.11
|
| 27 |
+
|
| 28 |
+
- name: Install dependencies
|
| 29 |
+
run: |
|
| 30 |
+
uv venv .venv --python 3.11
|
| 31 |
+
source .venv/bin/activate
|
| 32 |
+
uv pip install -e ".[all,dev]"
|
| 33 |
+
|
| 34 |
+
- name: Run tests
|
| 35 |
+
run: |
|
| 36 |
+
source .venv/bin/activate
|
| 37 |
+
python -m pytest tests/ -q --ignore=tests/integration --tb=short -n auto
|
| 38 |
+
env:
|
| 39 |
+
# Ensure tests don't accidentally call real APIs
|
| 40 |
+
OPENROUTER_API_KEY: ""
|
| 41 |
+
OPENAI_API_KEY: ""
|
| 42 |
+
NOUS_API_KEY: ""
|
.gitignore
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/venv/
|
| 2 |
+
/_pycache/
|
| 3 |
+
*.pyc*
|
| 4 |
+
__pycache__/
|
| 5 |
+
.venv/
|
| 6 |
+
.vscode/
|
| 7 |
+
.env
|
| 8 |
+
.env.local
|
| 9 |
+
.env.development.local
|
| 10 |
+
.env.test.local
|
| 11 |
+
.env.production.local
|
| 12 |
+
.env.development
|
| 13 |
+
.env.test
|
| 14 |
+
export*
|
| 15 |
+
__pycache__/model_tools.cpython-310.pyc
|
| 16 |
+
__pycache__/web_tools.cpython-310.pyc
|
| 17 |
+
logs/
|
| 18 |
+
data/
|
| 19 |
+
.pytest_cache/
|
| 20 |
+
tmp/
|
| 21 |
+
temp_vision_images/
|
| 22 |
+
hermes-*/*
|
| 23 |
+
examples/
|
| 24 |
+
tests/quick_test_dataset.jsonl
|
| 25 |
+
tests/sample_dataset.jsonl
|
| 26 |
+
run_datagen_kimik2-thinking.sh
|
| 27 |
+
run_datagen_megascience_glm4-6.sh
|
| 28 |
+
run_datagen_sonnet.sh
|
| 29 |
+
source-data/*
|
| 30 |
+
run_datagen_megascience_glm4-6.sh
|
| 31 |
+
data/*
|
| 32 |
+
node_modules/
|
| 33 |
+
browser-use/
|
| 34 |
+
agent-browser/
|
| 35 |
+
# Private keys
|
| 36 |
+
*.ppk
|
| 37 |
+
*.pem
|
| 38 |
+
privvy*
|
| 39 |
+
images/
|
| 40 |
+
__pycache__/
|
| 41 |
+
hermes_agent.egg-info/
|
| 42 |
+
wandb/
|
| 43 |
+
testlogs
|
| 44 |
+
|
| 45 |
+
# CLI config (may contain sensitive SSH paths)
|
| 46 |
+
cli-config.yaml
|
| 47 |
+
|
| 48 |
+
# Skills Hub state (lives in ~/.hermes/skills/.hub/ at runtime, but just in case)
|
| 49 |
+
skills/.hub/
|
| 50 |
+
ignored/
|
| 51 |
+
.worktrees/
|
| 52 |
+
environments/benchmarks/evals/
|
| 53 |
+
|
| 54 |
+
# Release script temp files
|
| 55 |
+
.release_notes.md
|
| 56 |
+
mini-swe-agent/
|
.gitmodules
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[submodule "tinker-atropos"]
|
| 2 |
+
path = tinker-atropos
|
| 3 |
+
url = https://github.com/nousresearch/tinker-atropos
|
.plans/openai-api-server.md
ADDED
|
@@ -0,0 +1,291 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# OpenAI-Compatible API Server for Hermes Agent
|
| 2 |
+
|
| 3 |
+
## Motivation
|
| 4 |
+
|
| 5 |
+
Every major chat frontend (Open WebUI 126k★, LobeChat 73k★, LibreChat 34k★,
|
| 6 |
+
AnythingLLM 56k★, NextChat 87k★, ChatBox 39k★, Jan 26k★, HF Chat-UI 8k★,
|
| 7 |
+
big-AGI 7k★) connects to backends via the OpenAI-compatible REST API with
|
| 8 |
+
SSE streaming. By exposing this endpoint, hermes-agent becomes instantly
|
| 9 |
+
usable as a backend for all of them — no custom adapters needed.
|
| 10 |
+
|
| 11 |
+
## What It Enables
|
| 12 |
+
|
| 13 |
+
```
|
| 14 |
+
┌──────────────────┐
|
| 15 |
+
│ Open WebUI │──┐
|
| 16 |
+
│ LobeChat │ │ POST /v1/chat/completions
|
| 17 |
+
│ LibreChat │ ├──► Authorization: Bearer <key> ┌─────────────────┐
|
| 18 |
+
│ AnythingLLM │ │ {"messages": [...]} │ hermes-agent │
|
| 19 |
+
│ NextChat │ │ │ gateway │
|
| 20 |
+
│ Any OAI client │──┘ ◄── SSE streaming response │ (API server) │
|
| 21 |
+
└──────────────────┘ └─────────────────┘
|
| 22 |
+
```
|
| 23 |
+
|
| 24 |
+
A user would:
|
| 25 |
+
1. Set `API_SERVER_ENABLED=true` in `~/.hermes/.env`
|
| 26 |
+
2. Run `hermes gateway` (API server starts alongside Telegram/Discord/etc.)
|
| 27 |
+
3. Point Open WebUI (or any frontend) at `http://localhost:8642/v1`
|
| 28 |
+
4. Chat with hermes-agent through any OpenAI-compatible UI
|
| 29 |
+
|
| 30 |
+
## Endpoints
|
| 31 |
+
|
| 32 |
+
| Method | Path | Purpose |
|
| 33 |
+
|--------|------|---------|
|
| 34 |
+
| POST | `/v1/chat/completions` | Chat with the agent (streaming + non-streaming) |
|
| 35 |
+
| GET | `/v1/models` | List available "models" (returns hermes-agent as a model) |
|
| 36 |
+
| GET | `/health` | Health check |
|
| 37 |
+
|
| 38 |
+
## Architecture
|
| 39 |
+
|
| 40 |
+
### Option A: Gateway Platform Adapter (recommended)
|
| 41 |
+
|
| 42 |
+
Create `gateway/platforms/api_server.py` as a new platform adapter that
|
| 43 |
+
extends `BasePlatformAdapter`. This is the cleanest approach because:
|
| 44 |
+
|
| 45 |
+
- Reuses all gateway infrastructure (session management, auth, context building)
|
| 46 |
+
- Runs in the same async loop as other adapters
|
| 47 |
+
- Gets message handling, interrupt support, and session persistence for free
|
| 48 |
+
- Follows the established pattern (like Telegram, Discord, etc.)
|
| 49 |
+
- Uses `aiohttp.web` (already a dependency) for the HTTP server
|
| 50 |
+
|
| 51 |
+
The adapter would start an `aiohttp.web.Application` server in `connect()`
|
| 52 |
+
and route incoming HTTP requests through the standard `handle_message()` pipeline.
|
| 53 |
+
|
| 54 |
+
### Option B: Standalone Component
|
| 55 |
+
|
| 56 |
+
A separate HTTP server class in `gateway/api_server.py` that creates its own
|
| 57 |
+
AIAgent instances directly. Simpler but duplicates session/auth logic.
|
| 58 |
+
|
| 59 |
+
**Recommendation: Option A** — fits the existing architecture, less code to
|
| 60 |
+
maintain, gets all gateway features for free.
|
| 61 |
+
|
| 62 |
+
## Request/Response Format
|
| 63 |
+
|
| 64 |
+
### Chat Completions (non-streaming)
|
| 65 |
+
|
| 66 |
+
```
|
| 67 |
+
POST /v1/chat/completions
|
| 68 |
+
Authorization: Bearer hermes-api-key-here
|
| 69 |
+
Content-Type: application/json
|
| 70 |
+
|
| 71 |
+
{
|
| 72 |
+
"model": "hermes-agent",
|
| 73 |
+
"messages": [
|
| 74 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
| 75 |
+
{"role": "user", "content": "What files are in the current directory?"}
|
| 76 |
+
],
|
| 77 |
+
"stream": false,
|
| 78 |
+
"temperature": 0.7
|
| 79 |
+
}
|
| 80 |
+
```
|
| 81 |
+
|
| 82 |
+
Response:
|
| 83 |
+
```json
|
| 84 |
+
{
|
| 85 |
+
"id": "chatcmpl-abc123",
|
| 86 |
+
"object": "chat.completion",
|
| 87 |
+
"created": 1710000000,
|
| 88 |
+
"model": "hermes-agent",
|
| 89 |
+
"choices": [{
|
| 90 |
+
"index": 0,
|
| 91 |
+
"message": {
|
| 92 |
+
"role": "assistant",
|
| 93 |
+
"content": "Here are the files in the current directory:\n..."
|
| 94 |
+
},
|
| 95 |
+
"finish_reason": "stop"
|
| 96 |
+
}],
|
| 97 |
+
"usage": {
|
| 98 |
+
"prompt_tokens": 50,
|
| 99 |
+
"completion_tokens": 200,
|
| 100 |
+
"total_tokens": 250
|
| 101 |
+
}
|
| 102 |
+
}
|
| 103 |
+
```
|
| 104 |
+
|
| 105 |
+
### Chat Completions (streaming)
|
| 106 |
+
|
| 107 |
+
Same request with `"stream": true`. Response is SSE:
|
| 108 |
+
|
| 109 |
+
```
|
| 110 |
+
data: {"id":"chatcmpl-abc123","object":"chat.completion.chunk","choices":[{"index":0,"delta":{"role":"assistant"},"finish_reason":null}]}
|
| 111 |
+
|
| 112 |
+
data: {"id":"chatcmpl-abc123","object":"chat.completion.chunk","choices":[{"index":0,"delta":{"content":"Here "},"finish_reason":null}]}
|
| 113 |
+
|
| 114 |
+
data: {"id":"chatcmpl-abc123","object":"chat.completion.chunk","choices":[{"index":0,"delta":{"content":"are "},"finish_reason":null}]}
|
| 115 |
+
|
| 116 |
+
data: {"id":"chatcmpl-abc123","object":"chat.completion.chunk","choices":[{"index":0,"delta":{},"finish_reason":"stop"}]}
|
| 117 |
+
|
| 118 |
+
data: [DONE]
|
| 119 |
+
```
|
| 120 |
+
|
| 121 |
+
### Models List
|
| 122 |
+
|
| 123 |
+
```
|
| 124 |
+
GET /v1/models
|
| 125 |
+
Authorization: Bearer hermes-api-key-here
|
| 126 |
+
```
|
| 127 |
+
|
| 128 |
+
Response:
|
| 129 |
+
```json
|
| 130 |
+
{
|
| 131 |
+
"object": "list",
|
| 132 |
+
"data": [{
|
| 133 |
+
"id": "hermes-agent",
|
| 134 |
+
"object": "model",
|
| 135 |
+
"created": 1710000000,
|
| 136 |
+
"owned_by": "hermes-agent"
|
| 137 |
+
}]
|
| 138 |
+
}
|
| 139 |
+
```
|
| 140 |
+
|
| 141 |
+
## Key Design Decisions
|
| 142 |
+
|
| 143 |
+
### 1. Session Management
|
| 144 |
+
|
| 145 |
+
The OpenAI API is stateless — each request includes the full conversation.
|
| 146 |
+
But hermes-agent sessions have persistent state (memory, skills, tool context).
|
| 147 |
+
|
| 148 |
+
**Approach: Hybrid**
|
| 149 |
+
- Default: Stateless. Each request is independent. The `messages` array IS
|
| 150 |
+
the conversation. No session persistence between requests.
|
| 151 |
+
- Opt-in persistent sessions via `X-Session-ID` header. When provided, the
|
| 152 |
+
server maintains session state across requests (conversation history,
|
| 153 |
+
memory context, tool state). This enables richer agent behavior.
|
| 154 |
+
- The session ID also enables interrupt support — a subsequent request with
|
| 155 |
+
the same session ID while one is running triggers an interrupt.
|
| 156 |
+
|
| 157 |
+
### 2. Streaming
|
| 158 |
+
|
| 159 |
+
The agent's `run_conversation()` is synchronous and returns the full response.
|
| 160 |
+
For real SSE streaming, we need to emit chunks as they're generated.
|
| 161 |
+
|
| 162 |
+
**Phase 1 (MVP):** Run agent in a thread, return the complete response as
|
| 163 |
+
a single SSE chunk + `[DONE]`. This works with all frontends — they just see
|
| 164 |
+
a fast single-chunk response. Not true streaming but functional.
|
| 165 |
+
|
| 166 |
+
**Phase 2:** Add a response callback to AIAgent that emits text chunks as the
|
| 167 |
+
LLM generates them. The API server captures these via a queue and streams them
|
| 168 |
+
as SSE events. This gives real token-by-token streaming.
|
| 169 |
+
|
| 170 |
+
**Phase 3:** Stream tool execution progress too — emit tool call/result events
|
| 171 |
+
as the agent works, giving frontends visibility into what the agent is doing.
|
| 172 |
+
|
| 173 |
+
### 3. Tool Transparency
|
| 174 |
+
|
| 175 |
+
Two modes:
|
| 176 |
+
- **Opaque (default):** Frontends see only the final response. Tool calls
|
| 177 |
+
happen server-side and are invisible. Best for general-purpose UIs.
|
| 178 |
+
- **Transparent (opt-in via header):** Tool calls are emitted as OpenAI-format
|
| 179 |
+
tool_call/tool_result messages in the stream. Useful for agent-aware frontends.
|
| 180 |
+
|
| 181 |
+
### 4. Authentication
|
| 182 |
+
|
| 183 |
+
- Bearer token via `Authorization: Bearer <key>` header
|
| 184 |
+
- Token configured via `API_SERVER_KEY` env var
|
| 185 |
+
- Optional: allow unauthenticated local-only access (127.0.0.1 bind)
|
| 186 |
+
- Follows the same pattern as other platform adapters
|
| 187 |
+
|
| 188 |
+
### 5. Model Mapping
|
| 189 |
+
|
| 190 |
+
Frontends send `"model": "hermes-agent"` (or whatever). The actual LLM model
|
| 191 |
+
used is configured server-side in config.yaml. The API server maps any
|
| 192 |
+
requested model name to the configured hermes-agent model.
|
| 193 |
+
|
| 194 |
+
Optionally, allow model passthrough: if the frontend sends
|
| 195 |
+
`"model": "anthropic/claude-sonnet-4"`, the agent uses that model. Controlled
|
| 196 |
+
by a config flag.
|
| 197 |
+
|
| 198 |
+
## Configuration
|
| 199 |
+
|
| 200 |
+
```yaml
|
| 201 |
+
# In config.yaml
|
| 202 |
+
api_server:
|
| 203 |
+
enabled: true
|
| 204 |
+
port: 8642
|
| 205 |
+
host: "127.0.0.1" # localhost only by default
|
| 206 |
+
key: "your-secret-key" # or via API_SERVER_KEY env var
|
| 207 |
+
allow_model_override: false # let clients choose the model
|
| 208 |
+
max_concurrent: 5 # max simultaneous requests
|
| 209 |
+
```
|
| 210 |
+
|
| 211 |
+
Environment variables:
|
| 212 |
+
```bash
|
| 213 |
+
API_SERVER_ENABLED=true
|
| 214 |
+
API_SERVER_PORT=8642
|
| 215 |
+
API_SERVER_HOST=127.0.0.1
|
| 216 |
+
API_SERVER_KEY=your-secret-key
|
| 217 |
+
```
|
| 218 |
+
|
| 219 |
+
## Implementation Plan
|
| 220 |
+
|
| 221 |
+
### Phase 1: MVP (non-streaming) — PR
|
| 222 |
+
|
| 223 |
+
1. `gateway/platforms/api_server.py` — new adapter
|
| 224 |
+
- aiohttp.web server with endpoints:
|
| 225 |
+
- `POST /v1/chat/completions` — Chat Completions API (universal compat)
|
| 226 |
+
- `POST /v1/responses` — Responses API (server-side state, tool preservation)
|
| 227 |
+
- `GET /v1/models` — list available models
|
| 228 |
+
- `GET /health` — health check
|
| 229 |
+
- Bearer token auth middleware
|
| 230 |
+
- Non-streaming responses (run agent, return full result)
|
| 231 |
+
- Chat Completions: stateless, messages array is the conversation
|
| 232 |
+
- Responses API: server-side conversation storage via previous_response_id
|
| 233 |
+
- Store full internal conversation (including tool calls) keyed by response ID
|
| 234 |
+
- On subsequent requests, reconstruct full context from stored chain
|
| 235 |
+
- Frontend system prompt layered on top of hermes-agent's core prompt
|
| 236 |
+
|
| 237 |
+
2. `gateway/config.py` — add `Platform.API_SERVER` enum + config
|
| 238 |
+
|
| 239 |
+
3. `gateway/run.py` — register adapter in `_create_adapter()`
|
| 240 |
+
|
| 241 |
+
4. Tests in `tests/gateway/test_api_server.py`
|
| 242 |
+
|
| 243 |
+
### Phase 2: SSE Streaming
|
| 244 |
+
|
| 245 |
+
1. Add response streaming to both endpoints
|
| 246 |
+
- Chat Completions: `choices[0].delta.content` SSE format
|
| 247 |
+
- Responses API: semantic events (response.output_text.delta, etc.)
|
| 248 |
+
- Run agent in thread, collect output via callback queue
|
| 249 |
+
- Handle client disconnect (cancel agent)
|
| 250 |
+
|
| 251 |
+
2. Add `stream_callback` parameter to `AIAgent.run_conversation()`
|
| 252 |
+
|
| 253 |
+
### Phase 3: Enhanced Features
|
| 254 |
+
|
| 255 |
+
1. Tool call transparency mode (opt-in)
|
| 256 |
+
2. Model passthrough/override
|
| 257 |
+
3. Concurrent request limiting
|
| 258 |
+
4. Usage tracking / rate limiting
|
| 259 |
+
5. CORS headers for browser-based frontends
|
| 260 |
+
6. GET /v1/responses/{id} — retrieve stored response
|
| 261 |
+
7. DELETE /v1/responses/{id} — delete stored response
|
| 262 |
+
|
| 263 |
+
## Files Changed
|
| 264 |
+
|
| 265 |
+
| File | Change |
|
| 266 |
+
|------|--------|
|
| 267 |
+
| `gateway/platforms/api_server.py` | NEW — main adapter (~300 lines) |
|
| 268 |
+
| `gateway/config.py` | Add Platform.API_SERVER + config (~20 lines) |
|
| 269 |
+
| `gateway/run.py` | Register adapter in _create_adapter() (~10 lines) |
|
| 270 |
+
| `tests/gateway/test_api_server.py` | NEW — tests (~200 lines) |
|
| 271 |
+
| `cli-config.yaml.example` | Add api_server section |
|
| 272 |
+
| `README.md` | Mention API server in platform list |
|
| 273 |
+
|
| 274 |
+
## Compatibility Matrix
|
| 275 |
+
|
| 276 |
+
Once implemented, hermes-agent works as a drop-in backend for:
|
| 277 |
+
|
| 278 |
+
| Frontend | Stars | How to Connect |
|
| 279 |
+
|----------|-------|---------------|
|
| 280 |
+
| Open WebUI | 126k | Settings → Connections → Add OpenAI API, URL: `http://localhost:8642/v1` |
|
| 281 |
+
| NextChat | 87k | BASE_URL env var |
|
| 282 |
+
| LobeChat | 73k | Custom provider endpoint |
|
| 283 |
+
| AnythingLLM | 56k | LLM Provider → Generic OpenAI |
|
| 284 |
+
| Oobabooga | 42k | Already a backend, not a frontend |
|
| 285 |
+
| ChatBox | 39k | API Host setting |
|
| 286 |
+
| LibreChat | 34k | librechat.yaml custom endpoint |
|
| 287 |
+
| Chatbot UI | 29k | Custom API endpoint |
|
| 288 |
+
| Jan | 26k | Remote model config |
|
| 289 |
+
| AionUI | 18k | Custom API endpoint |
|
| 290 |
+
| HF Chat-UI | 8k | OPENAI_BASE_URL env var |
|
| 291 |
+
| big-AGI | 7k | Custom endpoint |
|
.plans/streaming-support.md
ADDED
|
@@ -0,0 +1,705 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Streaming LLM Response Support for Hermes Agent
|
| 2 |
+
|
| 3 |
+
## Overview
|
| 4 |
+
|
| 5 |
+
Add token-by-token streaming of LLM responses across all platforms. When enabled,
|
| 6 |
+
users see the response typing out live instead of waiting for the full generation.
|
| 7 |
+
Streaming is opt-in via config, defaults to off, and all existing non-streaming
|
| 8 |
+
code paths remain intact as the default.
|
| 9 |
+
|
| 10 |
+
## Design Principles
|
| 11 |
+
|
| 12 |
+
1. **Feature-flagged**: `streaming.enabled: true` in config.yaml. Off by default.
|
| 13 |
+
When off, all existing code paths are unchanged — zero risk to current behavior.
|
| 14 |
+
2. **Callback-based**: A simple `stream_callback(text_delta: str)` function injected
|
| 15 |
+
into AIAgent. The agent doesn't know or care what the consumer does with tokens.
|
| 16 |
+
3. **Graceful degradation**: If the provider doesn't support streaming, or streaming
|
| 17 |
+
fails for any reason, silently fall back to the non-streaming path.
|
| 18 |
+
4. **Platform-agnostic core**: The streaming mechanism in AIAgent works the same
|
| 19 |
+
regardless of whether the consumer is CLI, Telegram, Discord, or the API server.
|
| 20 |
+
|
| 21 |
+
---
|
| 22 |
+
|
| 23 |
+
## Architecture
|
| 24 |
+
|
| 25 |
+
```
|
| 26 |
+
stream_callback(delta)
|
| 27 |
+
│
|
| 28 |
+
┌─────────────┐ ┌─────────────▼──────────────┐
|
| 29 |
+
│ LLM API │ │ queue.Queue() │
|
| 30 |
+
│ (stream) │───►│ thread-safe bridge between │
|
| 31 |
+
│ │ │ agent thread & consumer │
|
| 32 |
+
└─────────────┘ └─────────────┬──────────────┘
|
| 33 |
+
│
|
| 34 |
+
┌──────────────┼──────────────┐
|
| 35 |
+
│ │ │
|
| 36 |
+
┌─────▼─────┐ ┌─────▼─────┐ ┌─────▼─────┐
|
| 37 |
+
│ CLI │ │ Gateway │ │ API Server│
|
| 38 |
+
│ print to │ │ edit msg │ │ SSE event │
|
| 39 |
+
│ terminal │ │ on Tg/Dc │ │ to client │
|
| 40 |
+
└───────────┘ └───────────┘ └───────────┘
|
| 41 |
+
```
|
| 42 |
+
|
| 43 |
+
The agent runs in a thread. The callback puts tokens into a thread-safe queue.
|
| 44 |
+
Each consumer reads the queue in its own context (async task, main thread, etc.).
|
| 45 |
+
|
| 46 |
+
---
|
| 47 |
+
|
| 48 |
+
## Configuration
|
| 49 |
+
|
| 50 |
+
### config.yaml
|
| 51 |
+
|
| 52 |
+
```yaml
|
| 53 |
+
streaming:
|
| 54 |
+
enabled: false # Master switch. Default off.
|
| 55 |
+
# Per-platform overrides (optional):
|
| 56 |
+
# cli: true # Override for CLI only
|
| 57 |
+
# telegram: true # Override for Telegram only
|
| 58 |
+
# discord: false # Keep Discord non-streaming
|
| 59 |
+
# api_server: true # Override for API server
|
| 60 |
+
```
|
| 61 |
+
|
| 62 |
+
### Environment variables
|
| 63 |
+
|
| 64 |
+
```
|
| 65 |
+
HERMES_STREAMING_ENABLED=true # Master switch via env
|
| 66 |
+
```
|
| 67 |
+
|
| 68 |
+
### How the flag is read
|
| 69 |
+
|
| 70 |
+
- **CLI**: `load_cli_config()` reads `streaming.enabled`, sets env var. AIAgent
|
| 71 |
+
checks at init time.
|
| 72 |
+
- **Gateway**: `_run_agent()` reads config, decides whether to pass
|
| 73 |
+
`stream_callback` to the AIAgent constructor.
|
| 74 |
+
- **API server**: For Chat Completions `stream=true` requests, always uses streaming
|
| 75 |
+
regardless of config (the client is explicitly requesting it). For non-stream
|
| 76 |
+
requests, uses config.
|
| 77 |
+
|
| 78 |
+
### Precedence
|
| 79 |
+
|
| 80 |
+
1. API server: client's `stream` field overrides everything
|
| 81 |
+
2. Per-platform config override (e.g., `streaming.telegram: true`)
|
| 82 |
+
3. Master `streaming.enabled` flag
|
| 83 |
+
4. Default: off
|
| 84 |
+
|
| 85 |
+
---
|
| 86 |
+
|
| 87 |
+
## Implementation Plan
|
| 88 |
+
|
| 89 |
+
### Phase 1: Core streaming infrastructure in AIAgent
|
| 90 |
+
|
| 91 |
+
**File: run_agent.py**
|
| 92 |
+
|
| 93 |
+
#### 1a. Add stream_callback parameter to __init__ (~5 lines)
|
| 94 |
+
|
| 95 |
+
```python
|
| 96 |
+
def __init__(self, ..., stream_callback: callable = None, ...):
|
| 97 |
+
self.stream_callback = stream_callback
|
| 98 |
+
```
|
| 99 |
+
|
| 100 |
+
No other init changes. The callback is optional — when None, everything
|
| 101 |
+
works exactly as before.
|
| 102 |
+
|
| 103 |
+
#### 1b. Add _run_streaming_chat_completion() method (~65 lines)
|
| 104 |
+
|
| 105 |
+
New method for Chat Completions API streaming:
|
| 106 |
+
|
| 107 |
+
```python
|
| 108 |
+
def _run_streaming_chat_completion(self, api_kwargs: dict):
|
| 109 |
+
"""Stream a chat completion, emitting text tokens via stream_callback.
|
| 110 |
+
|
| 111 |
+
Returns a fake response object compatible with the non-streaming code path.
|
| 112 |
+
Falls back to non-streaming on any error.
|
| 113 |
+
"""
|
| 114 |
+
stream_kwargs = dict(api_kwargs)
|
| 115 |
+
stream_kwargs["stream"] = True
|
| 116 |
+
stream_kwargs["stream_options"] = {"include_usage": True}
|
| 117 |
+
|
| 118 |
+
accumulated_content = []
|
| 119 |
+
accumulated_tool_calls = {} # index -> {id, name, arguments}
|
| 120 |
+
final_usage = None
|
| 121 |
+
|
| 122 |
+
try:
|
| 123 |
+
stream = self.client.chat.completions.create(**stream_kwargs)
|
| 124 |
+
|
| 125 |
+
for chunk in stream:
|
| 126 |
+
if not chunk.choices:
|
| 127 |
+
# Usage-only chunk (final)
|
| 128 |
+
if chunk.usage:
|
| 129 |
+
final_usage = chunk.usage
|
| 130 |
+
continue
|
| 131 |
+
|
| 132 |
+
delta = chunk.choices[0].delta
|
| 133 |
+
|
| 134 |
+
# Text content — emit via callback
|
| 135 |
+
if delta.content:
|
| 136 |
+
accumulated_content.append(delta.content)
|
| 137 |
+
if self.stream_callback:
|
| 138 |
+
try:
|
| 139 |
+
self.stream_callback(delta.content)
|
| 140 |
+
except Exception:
|
| 141 |
+
pass
|
| 142 |
+
|
| 143 |
+
# Tool call deltas — accumulate silently
|
| 144 |
+
if delta.tool_calls:
|
| 145 |
+
for tc_delta in delta.tool_calls:
|
| 146 |
+
idx = tc_delta.index
|
| 147 |
+
if idx not in accumulated_tool_calls:
|
| 148 |
+
accumulated_tool_calls[idx] = {
|
| 149 |
+
"id": tc_delta.id or "",
|
| 150 |
+
"name": "", "arguments": ""
|
| 151 |
+
}
|
| 152 |
+
if tc_delta.function:
|
| 153 |
+
if tc_delta.function.name:
|
| 154 |
+
accumulated_tool_calls[idx]["name"] = tc_delta.function.name
|
| 155 |
+
if tc_delta.function.arguments:
|
| 156 |
+
accumulated_tool_calls[idx]["arguments"] += tc_delta.function.arguments
|
| 157 |
+
|
| 158 |
+
# Build fake response compatible with existing code
|
| 159 |
+
tool_calls = []
|
| 160 |
+
for idx in sorted(accumulated_tool_calls):
|
| 161 |
+
tc = accumulated_tool_calls[idx]
|
| 162 |
+
if tc["name"]:
|
| 163 |
+
tool_calls.append(SimpleNamespace(
|
| 164 |
+
id=tc["id"], type="function",
|
| 165 |
+
function=SimpleNamespace(name=tc["name"], arguments=tc["arguments"]),
|
| 166 |
+
))
|
| 167 |
+
|
| 168 |
+
return SimpleNamespace(
|
| 169 |
+
choices=[SimpleNamespace(
|
| 170 |
+
message=SimpleNamespace(
|
| 171 |
+
content="".join(accumulated_content) or "",
|
| 172 |
+
tool_calls=tool_calls or None,
|
| 173 |
+
role="assistant",
|
| 174 |
+
),
|
| 175 |
+
finish_reason="tool_calls" if tool_calls else "stop",
|
| 176 |
+
)],
|
| 177 |
+
usage=final_usage,
|
| 178 |
+
model=self.model,
|
| 179 |
+
)
|
| 180 |
+
|
| 181 |
+
except Exception as e:
|
| 182 |
+
logger.debug("Streaming failed, falling back to non-streaming: %s", e)
|
| 183 |
+
return self.client.chat.completions.create(**api_kwargs)
|
| 184 |
+
```
|
| 185 |
+
|
| 186 |
+
#### 1c. Modify _run_codex_stream() for Responses API (~10 lines)
|
| 187 |
+
|
| 188 |
+
The method already iterates the stream. Add callback emission:
|
| 189 |
+
|
| 190 |
+
```python
|
| 191 |
+
def _run_codex_stream(self, api_kwargs: dict):
|
| 192 |
+
with self.client.responses.stream(**api_kwargs) as stream:
|
| 193 |
+
for event in stream:
|
| 194 |
+
# Emit text deltas if streaming callback is set
|
| 195 |
+
if self.stream_callback and hasattr(event, 'type'):
|
| 196 |
+
if event.type == 'response.output_text.delta':
|
| 197 |
+
try:
|
| 198 |
+
self.stream_callback(event.delta)
|
| 199 |
+
except Exception:
|
| 200 |
+
pass
|
| 201 |
+
return stream.get_final_response()
|
| 202 |
+
```
|
| 203 |
+
|
| 204 |
+
#### 1d. Modify _interruptible_api_call() (~5 lines)
|
| 205 |
+
|
| 206 |
+
Add the streaming branch:
|
| 207 |
+
|
| 208 |
+
```python
|
| 209 |
+
def _call():
|
| 210 |
+
try:
|
| 211 |
+
if self.api_mode == "codex_responses":
|
| 212 |
+
result["response"] = self._run_codex_stream(api_kwargs)
|
| 213 |
+
elif self.stream_callback is not None:
|
| 214 |
+
result["response"] = self._run_streaming_chat_completion(api_kwargs)
|
| 215 |
+
else:
|
| 216 |
+
result["response"] = self.client.chat.completions.create(**api_kwargs)
|
| 217 |
+
except Exception as e:
|
| 218 |
+
result["error"] = e
|
| 219 |
+
```
|
| 220 |
+
|
| 221 |
+
#### 1e. Signal end-of-stream to consumers (~5 lines)
|
| 222 |
+
|
| 223 |
+
After the API call returns, signal the callback that streaming is done
|
| 224 |
+
so consumers can finalize (remove cursor, close SSE, etc.):
|
| 225 |
+
|
| 226 |
+
```python
|
| 227 |
+
# In run_conversation(), after _interruptible_api_call returns:
|
| 228 |
+
if self.stream_callback:
|
| 229 |
+
try:
|
| 230 |
+
self.stream_callback(None) # None = end of stream signal
|
| 231 |
+
except Exception:
|
| 232 |
+
pass
|
| 233 |
+
```
|
| 234 |
+
|
| 235 |
+
Consumers check: `if delta is None: finalize()`
|
| 236 |
+
|
| 237 |
+
**Tests for Phase 1:** (~150 lines)
|
| 238 |
+
- Test _run_streaming_chat_completion with mocked stream
|
| 239 |
+
- Test fallback to non-streaming on error
|
| 240 |
+
- Test tool_call accumulation during streaming
|
| 241 |
+
- Test stream_callback receives correct deltas
|
| 242 |
+
- Test None signal at end of stream
|
| 243 |
+
- Test streaming disabled when callback is None
|
| 244 |
+
|
| 245 |
+
---
|
| 246 |
+
|
| 247 |
+
### Phase 2: Gateway consumers (Telegram, Discord, etc.)
|
| 248 |
+
|
| 249 |
+
**File: gateway/run.py**
|
| 250 |
+
|
| 251 |
+
#### 2a. Read streaming config (~15 lines)
|
| 252 |
+
|
| 253 |
+
In `_run_agent()`, before creating the AIAgent:
|
| 254 |
+
|
| 255 |
+
```python
|
| 256 |
+
# Read streaming config
|
| 257 |
+
_streaming_enabled = False
|
| 258 |
+
try:
|
| 259 |
+
# Check per-platform override first
|
| 260 |
+
platform_key = source.platform.value if source.platform else ""
|
| 261 |
+
_stream_cfg = {} # loaded from config.yaml streaming section
|
| 262 |
+
if _stream_cfg.get(platform_key) is not None:
|
| 263 |
+
_streaming_enabled = bool(_stream_cfg[platform_key])
|
| 264 |
+
else:
|
| 265 |
+
_streaming_enabled = bool(_stream_cfg.get("enabled", False))
|
| 266 |
+
except Exception:
|
| 267 |
+
pass
|
| 268 |
+
# Env var override
|
| 269 |
+
if os.getenv("HERMES_STREAMING_ENABLED", "").lower() in ("true", "1", "yes"):
|
| 270 |
+
_streaming_enabled = True
|
| 271 |
+
```
|
| 272 |
+
|
| 273 |
+
#### 2b. Set up queue + callback (~15 lines)
|
| 274 |
+
|
| 275 |
+
```python
|
| 276 |
+
_stream_q = None
|
| 277 |
+
_stream_done = None
|
| 278 |
+
_stream_msg_id = [None] # mutable ref for the async task
|
| 279 |
+
|
| 280 |
+
if _streaming_enabled:
|
| 281 |
+
import queue as _q
|
| 282 |
+
_stream_q = _q.Queue()
|
| 283 |
+
_stream_done = threading.Event()
|
| 284 |
+
|
| 285 |
+
def _on_token(delta):
|
| 286 |
+
if delta is None:
|
| 287 |
+
_stream_done.set()
|
| 288 |
+
else:
|
| 289 |
+
_stream_q.put(delta)
|
| 290 |
+
```
|
| 291 |
+
|
| 292 |
+
Pass `stream_callback=_on_token` to the AIAgent constructor.
|
| 293 |
+
|
| 294 |
+
#### 2c. Telegram/Discord stream preview task (~50 lines)
|
| 295 |
+
|
| 296 |
+
```python
|
| 297 |
+
async def stream_preview():
|
| 298 |
+
"""Progressively edit a message with streaming tokens."""
|
| 299 |
+
if not _stream_q:
|
| 300 |
+
return
|
| 301 |
+
adapter = self.adapters.get(source.platform)
|
| 302 |
+
if not adapter:
|
| 303 |
+
return
|
| 304 |
+
|
| 305 |
+
accumulated = []
|
| 306 |
+
token_count = 0
|
| 307 |
+
last_edit = 0.0
|
| 308 |
+
MIN_TOKENS = 20 # Don't show until enough context
|
| 309 |
+
EDIT_INTERVAL = 1.5 # Respect Telegram rate limits
|
| 310 |
+
|
| 311 |
+
try:
|
| 312 |
+
while not _stream_done.is_set():
|
| 313 |
+
try:
|
| 314 |
+
chunk = _stream_q.get(timeout=0.1)
|
| 315 |
+
accumulated.append(chunk)
|
| 316 |
+
token_count += 1
|
| 317 |
+
except queue.Empty:
|
| 318 |
+
continue
|
| 319 |
+
|
| 320 |
+
now = time.monotonic()
|
| 321 |
+
if token_count >= MIN_TOKENS and (now - last_edit) >= EDIT_INTERVAL:
|
| 322 |
+
preview = "".join(accumulated) + " ▌"
|
| 323 |
+
if _stream_msg_id[0] is None:
|
| 324 |
+
r = await adapter.send(
|
| 325 |
+
chat_id=source.chat_id,
|
| 326 |
+
content=preview,
|
| 327 |
+
metadata=_thread_metadata,
|
| 328 |
+
)
|
| 329 |
+
if r.success and r.message_id:
|
| 330 |
+
_stream_msg_id[0] = r.message_id
|
| 331 |
+
else:
|
| 332 |
+
await adapter.edit_message(
|
| 333 |
+
chat_id=source.chat_id,
|
| 334 |
+
message_id=_stream_msg_id[0],
|
| 335 |
+
content=preview,
|
| 336 |
+
)
|
| 337 |
+
last_edit = now
|
| 338 |
+
|
| 339 |
+
# Drain remaining tokens
|
| 340 |
+
while not _stream_q.empty():
|
| 341 |
+
accumulated.append(_stream_q.get_nowait())
|
| 342 |
+
|
| 343 |
+
# Final edit — remove cursor, show complete text
|
| 344 |
+
if _stream_msg_id[0] and accumulated:
|
| 345 |
+
await adapter.edit_message(
|
| 346 |
+
chat_id=source.chat_id,
|
| 347 |
+
message_id=_stream_msg_id[0],
|
| 348 |
+
content="".join(accumulated),
|
| 349 |
+
)
|
| 350 |
+
|
| 351 |
+
except asyncio.CancelledError:
|
| 352 |
+
# Clean up on cancel
|
| 353 |
+
if _stream_msg_id[0] and accumulated:
|
| 354 |
+
try:
|
| 355 |
+
await adapter.edit_message(
|
| 356 |
+
chat_id=source.chat_id,
|
| 357 |
+
message_id=_stream_msg_id[0],
|
| 358 |
+
content="".join(accumulated),
|
| 359 |
+
)
|
| 360 |
+
except Exception:
|
| 361 |
+
pass
|
| 362 |
+
except Exception as e:
|
| 363 |
+
logger.debug("stream_preview error: %s", e)
|
| 364 |
+
```
|
| 365 |
+
|
| 366 |
+
#### 2d. Skip final send if already streamed (~10 lines)
|
| 367 |
+
|
| 368 |
+
In `_process_message_background()` (base.py), after getting the response,
|
| 369 |
+
if streaming was active and `_stream_msg_id[0]` is set, the final response
|
| 370 |
+
was already delivered via progressive edits. Skip the normal `self.send()`
|
| 371 |
+
call to avoid duplicating the message.
|
| 372 |
+
|
| 373 |
+
This is the most delicate integration point — we need to communicate from
|
| 374 |
+
the gateway's `_run_agent` back to the base adapter's response sender that
|
| 375 |
+
the response was already delivered. Options:
|
| 376 |
+
|
| 377 |
+
- **Option A**: Return a special marker in the result dict:
|
| 378 |
+
`result["_streamed_msg_id"] = _stream_msg_id[0]`
|
| 379 |
+
The base adapter checks this and skips `send()`.
|
| 380 |
+
|
| 381 |
+
- **Option B**: Edit the already-sent message with the final response
|
| 382 |
+
(which may differ slightly from accumulated tokens due to think-block
|
| 383 |
+
stripping, etc.) and don't send a new one.
|
| 384 |
+
|
| 385 |
+
- **Option C**: The stream preview task handles the FULL final response
|
| 386 |
+
(including any post-processing), and the handler returns None to skip
|
| 387 |
+
the normal send path.
|
| 388 |
+
|
| 389 |
+
Recommended: **Option A** — cleanest separation. The result dict already
|
| 390 |
+
carries metadata; adding one more field is low-risk.
|
| 391 |
+
|
| 392 |
+
**Platform-specific considerations:**
|
| 393 |
+
|
| 394 |
+
| Platform | Edit support | Rate limits | Streaming approach |
|
| 395 |
+
|----------|-------------|-------------|-------------------|
|
| 396 |
+
| Telegram | ✅ edit_message_text | ~20 edits/min | Edit every 1.5s |
|
| 397 |
+
| Discord | ✅ message.edit | 5 edits/5s per message | Edit every 1.2s |
|
| 398 |
+
| Slack | ✅ chat.update | Tier 3 (~50/min) | Edit every 1.5s |
|
| 399 |
+
| WhatsApp | ❌ no edit support | N/A | Skip streaming, use normal path |
|
| 400 |
+
| HomeAssistant | ❌ no edit | N/A | Skip streaming |
|
| 401 |
+
| API Server | ✅ SSE native | No limit | Real SSE events |
|
| 402 |
+
|
| 403 |
+
WhatsApp and HomeAssistant fall back to non-streaming automatically because
|
| 404 |
+
they don't support message editing.
|
| 405 |
+
|
| 406 |
+
**Tests for Phase 2:** (~100 lines)
|
| 407 |
+
- Test stream_preview sends/edits correctly
|
| 408 |
+
- Test skip-final-send when streaming delivered
|
| 409 |
+
- Test WhatsApp/HA graceful fallback
|
| 410 |
+
- Test streaming disabled per-platform config
|
| 411 |
+
- Test thread_id metadata forwarded in stream messages
|
| 412 |
+
|
| 413 |
+
---
|
| 414 |
+
|
| 415 |
+
### Phase 3: CLI streaming
|
| 416 |
+
|
| 417 |
+
**File: cli.py**
|
| 418 |
+
|
| 419 |
+
#### 3a. Set up callback in the CLI chat loop (~20 lines)
|
| 420 |
+
|
| 421 |
+
In `_chat_once()` or wherever the agent is invoked:
|
| 422 |
+
|
| 423 |
+
```python
|
| 424 |
+
if streaming_enabled:
|
| 425 |
+
_stream_q = queue.Queue()
|
| 426 |
+
_stream_done = threading.Event()
|
| 427 |
+
|
| 428 |
+
def _cli_stream_callback(delta):
|
| 429 |
+
if delta is None:
|
| 430 |
+
_stream_done.set()
|
| 431 |
+
else:
|
| 432 |
+
_stream_q.put(delta)
|
| 433 |
+
|
| 434 |
+
agent.stream_callback = _cli_stream_callback
|
| 435 |
+
```
|
| 436 |
+
|
| 437 |
+
#### 3b. Token display thread/task (~30 lines)
|
| 438 |
+
|
| 439 |
+
Start a thread that reads the queue and prints tokens:
|
| 440 |
+
|
| 441 |
+
```python
|
| 442 |
+
def _stream_display():
|
| 443 |
+
"""Print tokens to terminal as they arrive."""
|
| 444 |
+
first_token = True
|
| 445 |
+
while not _stream_done.is_set():
|
| 446 |
+
try:
|
| 447 |
+
delta = _stream_q.get(timeout=0.1)
|
| 448 |
+
except queue.Empty:
|
| 449 |
+
continue
|
| 450 |
+
if first_token:
|
| 451 |
+
# Print response box top border
|
| 452 |
+
_cprint(f"\n{top}")
|
| 453 |
+
first_token = False
|
| 454 |
+
sys.stdout.write(delta)
|
| 455 |
+
sys.stdout.flush()
|
| 456 |
+
# Drain remaining
|
| 457 |
+
while not _stream_q.empty():
|
| 458 |
+
sys.stdout.write(_stream_q.get_nowait())
|
| 459 |
+
sys.stdout.flush()
|
| 460 |
+
# Print bottom border
|
| 461 |
+
_cprint(f"\n\n{bot}")
|
| 462 |
+
```
|
| 463 |
+
|
| 464 |
+
**Integration challenge: prompt_toolkit**
|
| 465 |
+
|
| 466 |
+
The CLI uses prompt_toolkit which controls the terminal. Writing directly
|
| 467 |
+
to stdout while prompt_toolkit is active can cause display corruption.
|
| 468 |
+
The existing KawaiiSpinner already solves this by using prompt_toolkit's
|
| 469 |
+
`patch_stdout` context. The streaming display would need to do the same.
|
| 470 |
+
|
| 471 |
+
Alternative: use `_cprint()` for each token chunk (routes through
|
| 472 |
+
prompt_toolkit's renderer). But this might be slow for individual tokens.
|
| 473 |
+
|
| 474 |
+
Recommended approach: accumulate tokens in small batches (e.g., every 50ms)
|
| 475 |
+
and `_cprint()` the batch. This balances display responsiveness with
|
| 476 |
+
prompt_toolkit compatibility.
|
| 477 |
+
|
| 478 |
+
**Tests for Phase 3:** (~50 lines)
|
| 479 |
+
- Test CLI streaming callback setup
|
| 480 |
+
- Test response box borders with streaming
|
| 481 |
+
- Test fallback when streaming disabled
|
| 482 |
+
|
| 483 |
+
---
|
| 484 |
+
|
| 485 |
+
### Phase 4: API Server real streaming
|
| 486 |
+
|
| 487 |
+
**File: gateway/platforms/api_server.py**
|
| 488 |
+
|
| 489 |
+
Replace the pseudo-streaming `_write_sse_chat_completion()` with real
|
| 490 |
+
token-by-token SSE when the agent supports it.
|
| 491 |
+
|
| 492 |
+
#### 4a. Wire streaming callback for stream=true requests (~20 lines)
|
| 493 |
+
|
| 494 |
+
```python
|
| 495 |
+
if stream:
|
| 496 |
+
_stream_q = queue.Queue()
|
| 497 |
+
|
| 498 |
+
def _api_stream_callback(delta):
|
| 499 |
+
_stream_q.put(delta) # None = done
|
| 500 |
+
|
| 501 |
+
# Pass callback to _run_agent
|
| 502 |
+
result, usage = await self._run_agent(
|
| 503 |
+
..., stream_callback=_api_stream_callback,
|
| 504 |
+
)
|
| 505 |
+
```
|
| 506 |
+
|
| 507 |
+
#### 4b. Real SSE writer (~40 lines)
|
| 508 |
+
|
| 509 |
+
```python
|
| 510 |
+
async def _write_real_sse(self, request, completion_id, model, stream_q):
|
| 511 |
+
response = web.StreamResponse(
|
| 512 |
+
headers={"Content-Type": "text/event-stream", "Cache-Control": "no-cache"},
|
| 513 |
+
)
|
| 514 |
+
await response.prepare(request)
|
| 515 |
+
|
| 516 |
+
# Role chunk
|
| 517 |
+
await response.write(...)
|
| 518 |
+
|
| 519 |
+
# Stream content chunks as they arrive
|
| 520 |
+
while True:
|
| 521 |
+
try:
|
| 522 |
+
delta = await asyncio.get_event_loop().run_in_executor(
|
| 523 |
+
None, lambda: stream_q.get(timeout=0.1)
|
| 524 |
+
)
|
| 525 |
+
except queue.Empty:
|
| 526 |
+
continue
|
| 527 |
+
|
| 528 |
+
if delta is None: # End of stream
|
| 529 |
+
break
|
| 530 |
+
|
| 531 |
+
chunk = {"id": completion_id, "object": "chat.completion.chunk", ...
|
| 532 |
+
"choices": [{"delta": {"content": delta}, ...}]}
|
| 533 |
+
await response.write(f"data: {json.dumps(chunk)}\n\n".encode())
|
| 534 |
+
|
| 535 |
+
# Finish + [DONE]
|
| 536 |
+
await response.write(...)
|
| 537 |
+
await response.write(b"data: [DONE]\n\n")
|
| 538 |
+
return response
|
| 539 |
+
```
|
| 540 |
+
|
| 541 |
+
**Challenge: concurrent execution**
|
| 542 |
+
|
| 543 |
+
The agent runs in a thread executor. SSE writing happens in the async event
|
| 544 |
+
loop. The queue bridges them. But `_run_agent()` currently awaits the full
|
| 545 |
+
result before returning. For real streaming, we need to start the agent in
|
| 546 |
+
the background and stream tokens while it runs:
|
| 547 |
+
|
| 548 |
+
```python
|
| 549 |
+
# Start agent in background
|
| 550 |
+
agent_task = asyncio.create_task(self._run_agent_async(...))
|
| 551 |
+
|
| 552 |
+
# Stream tokens while agent runs
|
| 553 |
+
await self._write_real_sse(request, ..., stream_q)
|
| 554 |
+
|
| 555 |
+
# Agent is done by now (stream_q received None)
|
| 556 |
+
result, usage = await agent_task
|
| 557 |
+
```
|
| 558 |
+
|
| 559 |
+
This requires splitting `_run_agent` into an async version that doesn't
|
| 560 |
+
block waiting for the result, or running it in a separate task.
|
| 561 |
+
|
| 562 |
+
**Responses API SSE format:**
|
| 563 |
+
|
| 564 |
+
For `/v1/responses` with `stream=true`, the SSE events are different:
|
| 565 |
+
|
| 566 |
+
```
|
| 567 |
+
event: response.output_text.delta
|
| 568 |
+
data: {"type":"response.output_text.delta","delta":"Hello"}
|
| 569 |
+
|
| 570 |
+
event: response.completed
|
| 571 |
+
data: {"type":"response.completed","response":{...}}
|
| 572 |
+
```
|
| 573 |
+
|
| 574 |
+
This needs a separate SSE writer that emits Responses API format events.
|
| 575 |
+
|
| 576 |
+
**Tests for Phase 4:** (~80 lines)
|
| 577 |
+
- Test real SSE streaming with mocked agent
|
| 578 |
+
- Test SSE event format (Chat Completions vs Responses)
|
| 579 |
+
- Test client disconnect during streaming
|
| 580 |
+
- Test fallback to pseudo-streaming when callback not available
|
| 581 |
+
|
| 582 |
+
---
|
| 583 |
+
|
| 584 |
+
## Integration Issues & Edge Cases
|
| 585 |
+
|
| 586 |
+
### 1. Tool calls during streaming
|
| 587 |
+
|
| 588 |
+
When the model returns tool calls instead of text, no text tokens are emitted.
|
| 589 |
+
The stream_callback is simply never called with text. After tools execute, the
|
| 590 |
+
next API call may produce the final text response — streaming picks up again.
|
| 591 |
+
|
| 592 |
+
The stream preview task needs to handle this: if no tokens arrive during a
|
| 593 |
+
tool-call round, don't send/edit any message. The tool progress messages
|
| 594 |
+
continue working as before.
|
| 595 |
+
|
| 596 |
+
### 2. Duplicate messages
|
| 597 |
+
|
| 598 |
+
The biggest risk: the agent sends the final response normally (via the
|
| 599 |
+
existing send path) AND the stream preview already showed it. The user
|
| 600 |
+
sees the response twice.
|
| 601 |
+
|
| 602 |
+
Prevention: when streaming is active and tokens were delivered, the final
|
| 603 |
+
response send must be suppressed. The `result["_streamed_msg_id"]` marker
|
| 604 |
+
tells the base adapter to skip its normal send.
|
| 605 |
+
|
| 606 |
+
### 3. Response post-processing
|
| 607 |
+
|
| 608 |
+
The final response may differ from the accumulated streamed tokens:
|
| 609 |
+
- Think block stripping (`<think>...</think>` removed)
|
| 610 |
+
- Trailing whitespace cleanup
|
| 611 |
+
- Tool result media tag appending
|
| 612 |
+
|
| 613 |
+
The stream preview shows raw tokens. The final edit should use the
|
| 614 |
+
post-processed version. This means the final edit (removing the cursor)
|
| 615 |
+
should use the post-processed `final_response`, not just the accumulated
|
| 616 |
+
stream text.
|
| 617 |
+
|
| 618 |
+
### 4. Context compression during streaming
|
| 619 |
+
|
| 620 |
+
If the agent triggers context compression mid-conversation, the streaming
|
| 621 |
+
tokens from BEFORE compression are from a different context than those
|
| 622 |
+
after. This isn't a problem in practice — compression happens between
|
| 623 |
+
API calls, not during streaming.
|
| 624 |
+
|
| 625 |
+
### 5. Interrupt during streaming
|
| 626 |
+
|
| 627 |
+
User sends a new message while streaming → interrupt. The stream is killed
|
| 628 |
+
(HTTP connection closed), accumulated tokens are shown as-is (no cursor),
|
| 629 |
+
and the interrupt message is processed normally. This is already handled by
|
| 630 |
+
`_interruptible_api_call` closing the client.
|
| 631 |
+
|
| 632 |
+
### 6. Multi-model / fallback
|
| 633 |
+
|
| 634 |
+
If the primary model fails and the agent falls back to a different model,
|
| 635 |
+
streaming state resets. The fallback call may or may not support streaming.
|
| 636 |
+
The graceful fallback in `_run_streaming_chat_completion` handles this.
|
| 637 |
+
|
| 638 |
+
### 7. Rate limiting on edits
|
| 639 |
+
|
| 640 |
+
Telegram: ~20 edits/minute (~1 every 3 seconds to be safe)
|
| 641 |
+
Discord: 5 edits per 5 seconds per message
|
| 642 |
+
Slack: ~50 API calls/minute
|
| 643 |
+
|
| 644 |
+
The 1.5s edit interval is conservative enough for all platforms. If we get
|
| 645 |
+
429 rate limit errors on edits, just skip that edit cycle and try next time.
|
| 646 |
+
|
| 647 |
+
---
|
| 648 |
+
|
| 649 |
+
## Files Changed Summary
|
| 650 |
+
|
| 651 |
+
| File | Phase | Changes |
|
| 652 |
+
|------|-------|---------|
|
| 653 |
+
| `run_agent.py` | 1 | +stream_callback param, +_run_streaming_chat_completion(), modify _run_codex_stream(), modify _interruptible_api_call() |
|
| 654 |
+
| `gateway/run.py` | 2 | +streaming config reader, +queue/callback setup, +stream_preview task, +skip-final-send logic |
|
| 655 |
+
| `gateway/platforms/base.py` | 2 | +check for _streamed_msg_id in response handler |
|
| 656 |
+
| `cli.py` | 3 | +streaming setup, +token display, +response box integration |
|
| 657 |
+
| `gateway/platforms/api_server.py` | 4 | +real SSE writer, +streaming callback wiring |
|
| 658 |
+
| `hermes_cli/config.py` | 1 | +streaming config defaults |
|
| 659 |
+
| `cli-config.yaml.example` | 1 | +streaming section |
|
| 660 |
+
| `tests/test_streaming.py` | 1-4 | NEW — ~380 lines of tests |
|
| 661 |
+
|
| 662 |
+
**Total new code**: ~500 lines across all phases
|
| 663 |
+
**Total test code**: ~380 lines
|
| 664 |
+
|
| 665 |
+
---
|
| 666 |
+
|
| 667 |
+
## Rollout Plan
|
| 668 |
+
|
| 669 |
+
1. **Phase 1** (core): Merge to main. Streaming disabled by default.
|
| 670 |
+
Zero impact on existing behavior. Can be tested with env var.
|
| 671 |
+
|
| 672 |
+
2. **Phase 2** (gateway): Merge to main. Test on Telegram manually.
|
| 673 |
+
Enable per-platform: `streaming.telegram: true` in config.
|
| 674 |
+
|
| 675 |
+
3. **Phase 3** (CLI): Merge to main. Test in terminal.
|
| 676 |
+
Enable: `streaming.cli: true` or `streaming.enabled: true`.
|
| 677 |
+
|
| 678 |
+
4. **Phase 4** (API server): Merge to main. Test with Open WebUI.
|
| 679 |
+
Auto-enabled when client sends `stream: true`.
|
| 680 |
+
|
| 681 |
+
Each phase is independently mergeable and testable. Streaming stays
|
| 682 |
+
off by default throughout. Once all phases are stable, consider
|
| 683 |
+
changing the default to enabled.
|
| 684 |
+
|
| 685 |
+
---
|
| 686 |
+
|
| 687 |
+
## Config Reference (final state)
|
| 688 |
+
|
| 689 |
+
```yaml
|
| 690 |
+
# config.yaml
|
| 691 |
+
streaming:
|
| 692 |
+
enabled: false # Master switch (default: off)
|
| 693 |
+
cli: true # Per-platform override
|
| 694 |
+
telegram: true
|
| 695 |
+
discord: true
|
| 696 |
+
slack: true
|
| 697 |
+
api_server: true # API server always streams when client requests it
|
| 698 |
+
edit_interval: 1.5 # Seconds between message edits (default: 1.5)
|
| 699 |
+
min_tokens: 20 # Tokens before first display (default: 20)
|
| 700 |
+
```
|
| 701 |
+
|
| 702 |
+
```bash
|
| 703 |
+
# Environment variable override
|
| 704 |
+
HERMES_STREAMING_ENABLED=true
|
| 705 |
+
```
|
AGENTS.md
ADDED
|
@@ -0,0 +1,390 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Hermes Agent - Development Guide
|
| 2 |
+
|
| 3 |
+
Instructions for AI coding assistants and developers working on the hermes-agent codebase.
|
| 4 |
+
|
| 5 |
+
## Development Environment
|
| 6 |
+
|
| 7 |
+
```bash
|
| 8 |
+
source venv/bin/activate # ALWAYS activate before running Python
|
| 9 |
+
```
|
| 10 |
+
|
| 11 |
+
## Project Structure
|
| 12 |
+
|
| 13 |
+
```
|
| 14 |
+
hermes-agent/
|
| 15 |
+
├── run_agent.py # AIAgent class — core conversation loop
|
| 16 |
+
├── model_tools.py # Tool orchestration, _discover_tools(), handle_function_call()
|
| 17 |
+
├── toolsets.py # Toolset definitions, _HERMES_CORE_TOOLS list
|
| 18 |
+
├── cli.py # HermesCLI class — interactive CLI orchestrator
|
| 19 |
+
├── hermes_state.py # SessionDB — SQLite session store (FTS5 search)
|
| 20 |
+
├── agent/ # Agent internals
|
| 21 |
+
│ ├── prompt_builder.py # System prompt assembly
|
| 22 |
+
│ ├── context_compressor.py # Auto context compression
|
| 23 |
+
│ ├── prompt_caching.py # Anthropic prompt caching
|
| 24 |
+
│ ├── auxiliary_client.py # Auxiliary LLM client (vision, summarization)
|
| 25 |
+
│ ├── model_metadata.py # Model context lengths, token estimation
|
| 26 |
+
│ ├── models_dev.py # models.dev registry integration (provider-aware context)
|
| 27 |
+
│ ├── display.py # KawaiiSpinner, tool preview formatting
|
| 28 |
+
│ ├── skill_commands.py # Skill slash commands (shared CLI/gateway)
|
| 29 |
+
│ └── trajectory.py # Trajectory saving helpers
|
| 30 |
+
├── hermes_cli/ # CLI subcommands and setup
|
| 31 |
+
│ ├── main.py # Entry point — all `hermes` subcommands
|
| 32 |
+
│ ├── config.py # DEFAULT_CONFIG, OPTIONAL_ENV_VARS, migration
|
| 33 |
+
│ ├── commands.py # Slash command definitions + SlashCommandCompleter
|
| 34 |
+
│ ├── callbacks.py # Terminal callbacks (clarify, sudo, approval)
|
| 35 |
+
│ ├── setup.py # Interactive setup wizard
|
| 36 |
+
│ ├── skin_engine.py # Skin/theme engine — CLI visual customization
|
| 37 |
+
│ ├── skills_config.py # `hermes skills` — enable/disable skills per platform
|
| 38 |
+
│ ├── tools_config.py # `hermes tools` — enable/disable tools per platform
|
| 39 |
+
│ ├── skills_hub.py # `/skills` slash command (search, browse, install)
|
| 40 |
+
│ ├── models.py # Model catalog, provider model lists
|
| 41 |
+
│ ├── model_switch.py # Shared /model switch pipeline (CLI + gateway)
|
| 42 |
+
│ └── auth.py # Provider credential resolution
|
| 43 |
+
├── tools/ # Tool implementations (one file per tool)
|
| 44 |
+
│ ├── registry.py # Central tool registry (schemas, handlers, dispatch)
|
| 45 |
+
│ ├── approval.py # Dangerous command detection
|
| 46 |
+
│ ├── terminal_tool.py # Terminal orchestration
|
| 47 |
+
│ ├── process_registry.py # Background process management
|
| 48 |
+
│ ├── file_tools.py # File read/write/search/patch
|
| 49 |
+
│ ├── web_tools.py # Web search/extract (Parallel + Firecrawl)
|
| 50 |
+
│ ├── browser_tool.py # Browserbase browser automation
|
| 51 |
+
│ ├── code_execution_tool.py # execute_code sandbox
|
| 52 |
+
│ ├── delegate_tool.py # Subagent delegation
|
| 53 |
+
│ ├── mcp_tool.py # MCP client (~1050 lines)
|
| 54 |
+
│ └── environments/ # Terminal backends (local, docker, ssh, modal, daytona, singularity)
|
| 55 |
+
├── gateway/ # Messaging platform gateway
|
| 56 |
+
│ ├── run.py # Main loop, slash commands, message dispatch
|
| 57 |
+
│ ├── session.py # SessionStore — conversation persistence
|
| 58 |
+
│ └── platforms/ # Adapters: telegram, discord, slack, whatsapp, homeassistant, signal
|
| 59 |
+
├── acp_adapter/ # ACP server (VS Code / Zed / JetBrains integration)
|
| 60 |
+
├── cron/ # Scheduler (jobs.py, scheduler.py)
|
| 61 |
+
├── environments/ # RL training environments (Atropos)
|
| 62 |
+
├── tests/ # Pytest suite (~3000 tests)
|
| 63 |
+
└── batch_runner.py # Parallel batch processing
|
| 64 |
+
```
|
| 65 |
+
|
| 66 |
+
**User config:** `~/.hermes/config.yaml` (settings), `~/.hermes/.env` (API keys)
|
| 67 |
+
|
| 68 |
+
## File Dependency Chain
|
| 69 |
+
|
| 70 |
+
```
|
| 71 |
+
tools/registry.py (no deps — imported by all tool files)
|
| 72 |
+
↑
|
| 73 |
+
tools/*.py (each calls registry.register() at import time)
|
| 74 |
+
↑
|
| 75 |
+
model_tools.py (imports tools/registry + triggers tool discovery)
|
| 76 |
+
↑
|
| 77 |
+
run_agent.py, cli.py, batch_runner.py, environments/
|
| 78 |
+
```
|
| 79 |
+
|
| 80 |
+
---
|
| 81 |
+
|
| 82 |
+
## AIAgent Class (run_agent.py)
|
| 83 |
+
|
| 84 |
+
```python
|
| 85 |
+
class AIAgent:
|
| 86 |
+
def __init__(self,
|
| 87 |
+
model: str = "anthropic/claude-opus-4.6",
|
| 88 |
+
max_iterations: int = 90,
|
| 89 |
+
enabled_toolsets: list = None,
|
| 90 |
+
disabled_toolsets: list = None,
|
| 91 |
+
quiet_mode: bool = False,
|
| 92 |
+
save_trajectories: bool = False,
|
| 93 |
+
platform: str = None, # "cli", "telegram", etc.
|
| 94 |
+
session_id: str = None,
|
| 95 |
+
skip_context_files: bool = False,
|
| 96 |
+
skip_memory: bool = False,
|
| 97 |
+
# ... plus provider, api_mode, callbacks, routing params
|
| 98 |
+
): ...
|
| 99 |
+
|
| 100 |
+
def chat(self, message: str) -> str:
|
| 101 |
+
"""Simple interface — returns final response string."""
|
| 102 |
+
|
| 103 |
+
def run_conversation(self, user_message: str, system_message: str = None,
|
| 104 |
+
conversation_history: list = None, task_id: str = None) -> dict:
|
| 105 |
+
"""Full interface — returns dict with final_response + messages."""
|
| 106 |
+
```
|
| 107 |
+
|
| 108 |
+
### Agent Loop
|
| 109 |
+
|
| 110 |
+
The core loop is inside `run_conversation()` — entirely synchronous:
|
| 111 |
+
|
| 112 |
+
```python
|
| 113 |
+
while api_call_count < self.max_iterations and self.iteration_budget.remaining > 0:
|
| 114 |
+
response = client.chat.completions.create(model=model, messages=messages, tools=tool_schemas)
|
| 115 |
+
if response.tool_calls:
|
| 116 |
+
for tool_call in response.tool_calls:
|
| 117 |
+
result = handle_function_call(tool_call.name, tool_call.args, task_id)
|
| 118 |
+
messages.append(tool_result_message(result))
|
| 119 |
+
api_call_count += 1
|
| 120 |
+
else:
|
| 121 |
+
return response.content
|
| 122 |
+
```
|
| 123 |
+
|
| 124 |
+
Messages follow OpenAI format: `{"role": "system/user/assistant/tool", ...}`. Reasoning content is stored in `assistant_msg["reasoning"]`.
|
| 125 |
+
|
| 126 |
+
---
|
| 127 |
+
|
| 128 |
+
## CLI Architecture (cli.py)
|
| 129 |
+
|
| 130 |
+
- **Rich** for banner/panels, **prompt_toolkit** for input with autocomplete
|
| 131 |
+
- **KawaiiSpinner** (`agent/display.py`) — animated faces during API calls, `┊` activity feed for tool results
|
| 132 |
+
- `load_cli_config()` in cli.py merges hardcoded defaults + user config YAML
|
| 133 |
+
- **Skin engine** (`hermes_cli/skin_engine.py`) — data-driven CLI theming; initialized from `display.skin` config key at startup; skins customize banner colors, spinner faces/verbs/wings, tool prefix, response box, branding text
|
| 134 |
+
- `process_command()` is a method on `HermesCLI` — dispatches on canonical command name resolved via `resolve_command()` from the central registry
|
| 135 |
+
- Skill slash commands: `agent/skill_commands.py` scans `~/.hermes/skills/`, injects as **user message** (not system prompt) to preserve prompt caching
|
| 136 |
+
|
| 137 |
+
### Slash Command Registry (`hermes_cli/commands.py`)
|
| 138 |
+
|
| 139 |
+
All slash commands are defined in a central `COMMAND_REGISTRY` list of `CommandDef` objects. Every downstream consumer derives from this registry automatically:
|
| 140 |
+
|
| 141 |
+
- **CLI** — `process_command()` resolves aliases via `resolve_command()`, dispatches on canonical name
|
| 142 |
+
- **Gateway** — `GATEWAY_KNOWN_COMMANDS` frozenset for hook emission, `resolve_command()` for dispatch
|
| 143 |
+
- **Gateway help** — `gateway_help_lines()` generates `/help` output
|
| 144 |
+
- **Telegram** — `telegram_bot_commands()` generates the BotCommand menu
|
| 145 |
+
- **Slack** — `slack_subcommand_map()` generates `/hermes` subcommand routing
|
| 146 |
+
- **Autocomplete** — `COMMANDS` flat dict feeds `SlashCommandCompleter`
|
| 147 |
+
- **CLI help** — `COMMANDS_BY_CATEGORY` dict feeds `show_help()`
|
| 148 |
+
|
| 149 |
+
### Adding a Slash Command
|
| 150 |
+
|
| 151 |
+
1. Add a `CommandDef` entry to `COMMAND_REGISTRY` in `hermes_cli/commands.py`:
|
| 152 |
+
```python
|
| 153 |
+
CommandDef("mycommand", "Description of what it does", "Session",
|
| 154 |
+
aliases=("mc",), args_hint="[arg]"),
|
| 155 |
+
```
|
| 156 |
+
2. Add handler in `HermesCLI.process_command()` in `cli.py`:
|
| 157 |
+
```python
|
| 158 |
+
elif canonical == "mycommand":
|
| 159 |
+
self._handle_mycommand(cmd_original)
|
| 160 |
+
```
|
| 161 |
+
3. If the command is available in the gateway, add a handler in `gateway/run.py`:
|
| 162 |
+
```python
|
| 163 |
+
if canonical == "mycommand":
|
| 164 |
+
return await self._handle_mycommand(event)
|
| 165 |
+
```
|
| 166 |
+
4. For persistent settings, use `save_config_value()` in `cli.py`
|
| 167 |
+
|
| 168 |
+
**CommandDef fields:**
|
| 169 |
+
- `name` — canonical name without slash (e.g. `"background"`)
|
| 170 |
+
- `description` — human-readable description
|
| 171 |
+
- `category` — one of `"Session"`, `"Configuration"`, `"Tools & Skills"`, `"Info"`, `"Exit"`
|
| 172 |
+
- `aliases` — tuple of alternative names (e.g. `("bg",)`)
|
| 173 |
+
- `args_hint` — argument placeholder shown in help (e.g. `"<prompt>"`, `"[name]"`)
|
| 174 |
+
- `cli_only` — only available in the interactive CLI
|
| 175 |
+
- `gateway_only` — only available in messaging platforms
|
| 176 |
+
|
| 177 |
+
**Adding an alias** requires only adding it to the `aliases` tuple on the existing `CommandDef`. No other file changes needed — dispatch, help text, Telegram menu, Slack mapping, and autocomplete all update automatically.
|
| 178 |
+
|
| 179 |
+
---
|
| 180 |
+
|
| 181 |
+
## Adding New Tools
|
| 182 |
+
|
| 183 |
+
Requires changes in **3 files**:
|
| 184 |
+
|
| 185 |
+
**1. Create `tools/your_tool.py`:**
|
| 186 |
+
```python
|
| 187 |
+
import json, os
|
| 188 |
+
from tools.registry import registry
|
| 189 |
+
|
| 190 |
+
def check_requirements() -> bool:
|
| 191 |
+
return bool(os.getenv("EXAMPLE_API_KEY"))
|
| 192 |
+
|
| 193 |
+
def example_tool(param: str, task_id: str = None) -> str:
|
| 194 |
+
return json.dumps({"success": True, "data": "..."})
|
| 195 |
+
|
| 196 |
+
registry.register(
|
| 197 |
+
name="example_tool",
|
| 198 |
+
toolset="example",
|
| 199 |
+
schema={"name": "example_tool", "description": "...", "parameters": {...}},
|
| 200 |
+
handler=lambda args, **kw: example_tool(param=args.get("param", ""), task_id=kw.get("task_id")),
|
| 201 |
+
check_fn=check_requirements,
|
| 202 |
+
requires_env=["EXAMPLE_API_KEY"],
|
| 203 |
+
)
|
| 204 |
+
```
|
| 205 |
+
|
| 206 |
+
**2. Add import** in `model_tools.py` `_discover_tools()` list.
|
| 207 |
+
|
| 208 |
+
**3. Add to `toolsets.py`** — either `_HERMES_CORE_TOOLS` (all platforms) or a new toolset.
|
| 209 |
+
|
| 210 |
+
The registry handles schema collection, dispatch, availability checking, and error wrapping. All handlers MUST return a JSON string.
|
| 211 |
+
|
| 212 |
+
**Agent-level tools** (todo, memory): intercepted by `run_agent.py` before `handle_function_call()`. See `todo_tool.py` for the pattern.
|
| 213 |
+
|
| 214 |
+
---
|
| 215 |
+
|
| 216 |
+
## Adding Configuration
|
| 217 |
+
|
| 218 |
+
### config.yaml options:
|
| 219 |
+
1. Add to `DEFAULT_CONFIG` in `hermes_cli/config.py`
|
| 220 |
+
2. Bump `_config_version` (currently 5) to trigger migration for existing users
|
| 221 |
+
|
| 222 |
+
### .env variables:
|
| 223 |
+
1. Add to `OPTIONAL_ENV_VARS` in `hermes_cli/config.py` with metadata:
|
| 224 |
+
```python
|
| 225 |
+
"NEW_API_KEY": {
|
| 226 |
+
"description": "What it's for",
|
| 227 |
+
"prompt": "Display name",
|
| 228 |
+
"url": "https://...",
|
| 229 |
+
"password": True,
|
| 230 |
+
"category": "tool", # provider, tool, messaging, setting
|
| 231 |
+
},
|
| 232 |
+
```
|
| 233 |
+
|
| 234 |
+
### Config loaders (two separate systems):
|
| 235 |
+
|
| 236 |
+
| Loader | Used by | Location |
|
| 237 |
+
|--------|---------|----------|
|
| 238 |
+
| `load_cli_config()` | CLI mode | `cli.py` |
|
| 239 |
+
| `load_config()` | `hermes tools`, `hermes setup` | `hermes_cli/config.py` |
|
| 240 |
+
| Direct YAML load | Gateway | `gateway/run.py` |
|
| 241 |
+
|
| 242 |
+
---
|
| 243 |
+
|
| 244 |
+
## Skin/Theme System
|
| 245 |
+
|
| 246 |
+
The skin engine (`hermes_cli/skin_engine.py`) provides data-driven CLI visual customization. Skins are **pure data** — no code changes needed to add a new skin.
|
| 247 |
+
|
| 248 |
+
### Architecture
|
| 249 |
+
|
| 250 |
+
```
|
| 251 |
+
hermes_cli/skin_engine.py # SkinConfig dataclass, built-in skins, YAML loader
|
| 252 |
+
~/.hermes/skins/*.yaml # User-installed custom skins (drop-in)
|
| 253 |
+
```
|
| 254 |
+
|
| 255 |
+
- `init_skin_from_config()` — called at CLI startup, reads `display.skin` from config
|
| 256 |
+
- `get_active_skin()` — returns cached `SkinConfig` for the current skin
|
| 257 |
+
- `set_active_skin(name)` — switches skin at runtime (used by `/skin` command)
|
| 258 |
+
- `load_skin(name)` — loads from user skins first, then built-ins, then falls back to default
|
| 259 |
+
- Missing skin values inherit from the `default` skin automatically
|
| 260 |
+
|
| 261 |
+
### What skins customize
|
| 262 |
+
|
| 263 |
+
| Element | Skin Key | Used By |
|
| 264 |
+
|---------|----------|---------|
|
| 265 |
+
| Banner panel border | `colors.banner_border` | `banner.py` |
|
| 266 |
+
| Banner panel title | `colors.banner_title` | `banner.py` |
|
| 267 |
+
| Banner section headers | `colors.banner_accent` | `banner.py` |
|
| 268 |
+
| Banner dim text | `colors.banner_dim` | `banner.py` |
|
| 269 |
+
| Banner body text | `colors.banner_text` | `banner.py` |
|
| 270 |
+
| Response box border | `colors.response_border` | `cli.py` |
|
| 271 |
+
| Spinner faces (waiting) | `spinner.waiting_faces` | `display.py` |
|
| 272 |
+
| Spinner faces (thinking) | `spinner.thinking_faces` | `display.py` |
|
| 273 |
+
| Spinner verbs | `spinner.thinking_verbs` | `display.py` |
|
| 274 |
+
| Spinner wings (optional) | `spinner.wings` | `display.py` |
|
| 275 |
+
| Tool output prefix | `tool_prefix` | `display.py` |
|
| 276 |
+
| Per-tool emojis | `tool_emojis` | `display.py` → `get_tool_emoji()` |
|
| 277 |
+
| Agent name | `branding.agent_name` | `banner.py`, `cli.py` |
|
| 278 |
+
| Welcome message | `branding.welcome` | `cli.py` |
|
| 279 |
+
| Response box label | `branding.response_label` | `cli.py` |
|
| 280 |
+
| Prompt symbol | `branding.prompt_symbol` | `cli.py` |
|
| 281 |
+
|
| 282 |
+
### Built-in skins
|
| 283 |
+
|
| 284 |
+
- `default` — Classic Hermes gold/kawaii (the current look)
|
| 285 |
+
- `ares` — Crimson/bronze war-god theme with custom spinner wings
|
| 286 |
+
- `mono` — Clean grayscale monochrome
|
| 287 |
+
- `slate` — Cool blue developer-focused theme
|
| 288 |
+
|
| 289 |
+
### Adding a built-in skin
|
| 290 |
+
|
| 291 |
+
Add to `_BUILTIN_SKINS` dict in `hermes_cli/skin_engine.py`:
|
| 292 |
+
|
| 293 |
+
```python
|
| 294 |
+
"mytheme": {
|
| 295 |
+
"name": "mytheme",
|
| 296 |
+
"description": "Short description",
|
| 297 |
+
"colors": { ... },
|
| 298 |
+
"spinner": { ... },
|
| 299 |
+
"branding": { ... },
|
| 300 |
+
"tool_prefix": "┊",
|
| 301 |
+
},
|
| 302 |
+
```
|
| 303 |
+
|
| 304 |
+
### User skins (YAML)
|
| 305 |
+
|
| 306 |
+
Users create `~/.hermes/skins/<name>.yaml`:
|
| 307 |
+
|
| 308 |
+
```yaml
|
| 309 |
+
name: cyberpunk
|
| 310 |
+
description: Neon-soaked terminal theme
|
| 311 |
+
|
| 312 |
+
colors:
|
| 313 |
+
banner_border: "#FF00FF"
|
| 314 |
+
banner_title: "#00FFFF"
|
| 315 |
+
banner_accent: "#FF1493"
|
| 316 |
+
|
| 317 |
+
spinner:
|
| 318 |
+
thinking_verbs: ["jacking in", "decrypting", "uploading"]
|
| 319 |
+
wings:
|
| 320 |
+
- ["⟨⚡", "⚡⟩"]
|
| 321 |
+
|
| 322 |
+
branding:
|
| 323 |
+
agent_name: "Cyber Agent"
|
| 324 |
+
response_label: " ⚡ Cyber "
|
| 325 |
+
|
| 326 |
+
tool_prefix: "▏"
|
| 327 |
+
```
|
| 328 |
+
|
| 329 |
+
Activate with `/skin cyberpunk` or `display.skin: cyberpunk` in config.yaml.
|
| 330 |
+
|
| 331 |
+
---
|
| 332 |
+
|
| 333 |
+
## Important Policies
|
| 334 |
+
### Prompt Caching Must Not Break
|
| 335 |
+
|
| 336 |
+
Hermes-Agent ensures caching remains valid throughout a conversation. **Do NOT implement changes that would:**
|
| 337 |
+
- Alter past context mid-conversation
|
| 338 |
+
- Change toolsets mid-conversation
|
| 339 |
+
- Reload memories or rebuild system prompts mid-conversation
|
| 340 |
+
|
| 341 |
+
Cache-breaking forces dramatically higher costs. The ONLY time we alter context is during context compression.
|
| 342 |
+
|
| 343 |
+
### Working Directory Behavior
|
| 344 |
+
- **CLI**: Uses current directory (`.` → `os.getcwd()`)
|
| 345 |
+
- **Messaging**: Uses `MESSAGING_CWD` env var (default: home directory)
|
| 346 |
+
|
| 347 |
+
### Background Process Notifications (Gateway)
|
| 348 |
+
|
| 349 |
+
When `terminal(background=true, check_interval=...)` is used, the gateway runs a watcher that
|
| 350 |
+
pushes status updates to the user's chat. Control verbosity with `display.background_process_notifications`
|
| 351 |
+
in config.yaml (or `HERMES_BACKGROUND_NOTIFICATIONS` env var):
|
| 352 |
+
|
| 353 |
+
- `all` — running-output updates + final message (default)
|
| 354 |
+
- `result` — only the final completion message
|
| 355 |
+
- `error` — only the final message when exit code != 0
|
| 356 |
+
- `off` — no watcher messages at all
|
| 357 |
+
|
| 358 |
+
---
|
| 359 |
+
|
| 360 |
+
## Known Pitfalls
|
| 361 |
+
|
| 362 |
+
### DO NOT use `simple_term_menu` for interactive menus
|
| 363 |
+
Rendering bugs in tmux/iTerm2 — ghosting on scroll. Use `curses` (stdlib) instead. See `hermes_cli/tools_config.py` for the pattern.
|
| 364 |
+
|
| 365 |
+
### DO NOT use `\033[K` (ANSI erase-to-EOL) in spinner/display code
|
| 366 |
+
Leaks as literal `?[K` text under `prompt_toolkit`'s `patch_stdout`. Use space-padding: `f"\r{line}{' ' * pad}"`.
|
| 367 |
+
|
| 368 |
+
### `_last_resolved_tool_names` is a process-global in `model_tools.py`
|
| 369 |
+
`_run_single_child()` in `delegate_tool.py` saves and restores this global around subagent execution. If you add new code that reads this global, be aware it may be temporarily stale during child agent runs.
|
| 370 |
+
|
| 371 |
+
### DO NOT hardcode cross-tool references in schema descriptions
|
| 372 |
+
Tool schema descriptions must not mention tools from other toolsets by name (e.g., `browser_navigate` saying "prefer web_search"). Those tools may be unavailable (missing API keys, disabled toolset), causing the model to hallucinate calls to non-existent tools. If a cross-reference is needed, add it dynamically in `get_tool_definitions()` in `model_tools.py` — see the `browser_navigate` / `execute_code` post-processing blocks for the pattern.
|
| 373 |
+
|
| 374 |
+
### Tests must not write to `~/.hermes/`
|
| 375 |
+
The `_isolate_hermes_home` autouse fixture in `tests/conftest.py` redirects `HERMES_HOME` to a temp dir. Never hardcode `~/.hermes/` paths in tests.
|
| 376 |
+
|
| 377 |
+
---
|
| 378 |
+
|
| 379 |
+
## Testing
|
| 380 |
+
|
| 381 |
+
```bash
|
| 382 |
+
source venv/bin/activate
|
| 383 |
+
python -m pytest tests/ -q # Full suite (~3000 tests, ~3 min)
|
| 384 |
+
python -m pytest tests/test_model_tools.py -q # Toolset resolution
|
| 385 |
+
python -m pytest tests/test_cli_init.py -q # CLI config loading
|
| 386 |
+
python -m pytest tests/gateway/ -q # Gateway tests
|
| 387 |
+
python -m pytest tests/tools/ -q # Tool-level tests
|
| 388 |
+
```
|
| 389 |
+
|
| 390 |
+
Always run the full suite before pushing changes.
|
CONTRIBUTING.md
ADDED
|
@@ -0,0 +1,660 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Contributing to Hermes Agent
|
| 2 |
+
|
| 3 |
+
Thank you for contributing to Hermes Agent! This guide covers everything you need: setting up your dev environment, understanding the architecture, deciding what to build, and getting your PR merged.
|
| 4 |
+
|
| 5 |
+
---
|
| 6 |
+
|
| 7 |
+
## Contribution Priorities
|
| 8 |
+
|
| 9 |
+
We value contributions in this order:
|
| 10 |
+
|
| 11 |
+
1. **Bug fixes** — crashes, incorrect behavior, data loss. Always top priority.
|
| 12 |
+
2. **Cross-platform compatibility** — Windows, macOS, different Linux distros, different terminal emulators. We want Hermes to work everywhere.
|
| 13 |
+
3. **Security hardening** — shell injection, prompt injection, path traversal, privilege escalation. See [Security](#security-considerations).
|
| 14 |
+
4. **Performance and robustness** — retry logic, error handling, graceful degradation.
|
| 15 |
+
5. **New skills** — but only broadly useful ones. See [Should it be a Skill or a Tool?](#should-it-be-a-skill-or-a-tool)
|
| 16 |
+
6. **New tools** — rarely needed. Most capabilities should be skills. See below.
|
| 17 |
+
7. **Documentation** — fixes, clarifications, new examples.
|
| 18 |
+
|
| 19 |
+
---
|
| 20 |
+
|
| 21 |
+
## Should it be a Skill or a Tool?
|
| 22 |
+
|
| 23 |
+
This is the most common question for new contributors. The answer is almost always **skill**.
|
| 24 |
+
|
| 25 |
+
### Make it a Skill when:
|
| 26 |
+
|
| 27 |
+
- The capability can be expressed as instructions + shell commands + existing tools
|
| 28 |
+
- It wraps an external CLI or API that the agent can call via `terminal` or `web_extract`
|
| 29 |
+
- It doesn't need custom Python integration or API key management baked into the agent
|
| 30 |
+
- Examples: arXiv search, git workflows, Docker management, PDF processing, email via CLI tools
|
| 31 |
+
|
| 32 |
+
### Make it a Tool when:
|
| 33 |
+
|
| 34 |
+
- It requires end-to-end integration with API keys, auth flows, or multi-component configuration managed by the agent harness
|
| 35 |
+
- It needs custom processing logic that must execute precisely every time (not "best effort" from LLM interpretation)
|
| 36 |
+
- It handles binary data, streaming, or real-time events that can't go through the terminal
|
| 37 |
+
- Examples: browser automation (Browserbase session management), TTS (audio encoding + platform delivery), vision analysis (base64 image handling)
|
| 38 |
+
|
| 39 |
+
### Should the Skill be bundled?
|
| 40 |
+
|
| 41 |
+
Bundled skills (in `skills/`) ship with every Hermes install. They should be **broadly useful to most users**:
|
| 42 |
+
|
| 43 |
+
- Document handling, web research, common dev workflows, system administration
|
| 44 |
+
- Used regularly by a wide range of people
|
| 45 |
+
|
| 46 |
+
If your skill is official and useful but not universally needed (e.g., a paid service integration, a heavyweight dependency), put it in **`optional-skills/`** — it ships with the repo but isn't activated by default. Users can discover it via `hermes skills browse` (labeled "official") and install it with `hermes skills install` (no third-party warning, builtin trust).
|
| 47 |
+
|
| 48 |
+
If your skill is specialized, community-contributed, or niche, it's better suited for a **Skills Hub** — upload it to a skills registry and share it in the [Nous Research Discord](https://discord.gg/NousResearch). Users can install it with `hermes skills install`.
|
| 49 |
+
|
| 50 |
+
---
|
| 51 |
+
|
| 52 |
+
## Development Setup
|
| 53 |
+
|
| 54 |
+
### Prerequisites
|
| 55 |
+
|
| 56 |
+
| Requirement | Notes |
|
| 57 |
+
|-------------|-------|
|
| 58 |
+
| **Git** | With `--recurse-submodules` support |
|
| 59 |
+
| **Python 3.11+** | uv will install it if missing |
|
| 60 |
+
| **uv** | Fast Python package manager ([install](https://docs.astral.sh/uv/)) |
|
| 61 |
+
| **Node.js 18+** | Optional — needed for browser tools and WhatsApp bridge |
|
| 62 |
+
|
| 63 |
+
### Clone and install
|
| 64 |
+
|
| 65 |
+
```bash
|
| 66 |
+
git clone --recurse-submodules https://github.com/NousResearch/hermes-agent.git
|
| 67 |
+
cd hermes-agent
|
| 68 |
+
|
| 69 |
+
# Create venv with Python 3.11
|
| 70 |
+
uv venv venv --python 3.11
|
| 71 |
+
export VIRTUAL_ENV="$(pwd)/venv"
|
| 72 |
+
|
| 73 |
+
# Install with all extras (messaging, cron, CLI menus, dev tools)
|
| 74 |
+
uv pip install -e ".[all,dev]"
|
| 75 |
+
|
| 76 |
+
# Optional: RL training submodule
|
| 77 |
+
# git submodule update --init tinker-atropos && uv pip install -e "./tinker-atropos"
|
| 78 |
+
|
| 79 |
+
# Optional: browser tools
|
| 80 |
+
npm install
|
| 81 |
+
```
|
| 82 |
+
|
| 83 |
+
### Configure for development
|
| 84 |
+
|
| 85 |
+
```bash
|
| 86 |
+
mkdir -p ~/.hermes/{cron,sessions,logs,memories,skills}
|
| 87 |
+
cp cli-config.yaml.example ~/.hermes/config.yaml
|
| 88 |
+
touch ~/.hermes/.env
|
| 89 |
+
|
| 90 |
+
# Add at minimum an LLM provider key:
|
| 91 |
+
echo 'OPENROUTER_API_KEY=sk-or-v1-your-key' >> ~/.hermes/.env
|
| 92 |
+
```
|
| 93 |
+
|
| 94 |
+
### Run
|
| 95 |
+
|
| 96 |
+
```bash
|
| 97 |
+
# Symlink for global access
|
| 98 |
+
mkdir -p ~/.local/bin
|
| 99 |
+
ln -sf "$(pwd)/venv/bin/hermes" ~/.local/bin/hermes
|
| 100 |
+
|
| 101 |
+
# Verify
|
| 102 |
+
hermes doctor
|
| 103 |
+
hermes chat -q "Hello"
|
| 104 |
+
```
|
| 105 |
+
|
| 106 |
+
### Run tests
|
| 107 |
+
|
| 108 |
+
```bash
|
| 109 |
+
pytest tests/ -v
|
| 110 |
+
```
|
| 111 |
+
|
| 112 |
+
---
|
| 113 |
+
|
| 114 |
+
## Project Structure
|
| 115 |
+
|
| 116 |
+
```
|
| 117 |
+
hermes-agent/
|
| 118 |
+
├── run_agent.py # AIAgent class — core conversation loop, tool dispatch, session persistence
|
| 119 |
+
├── cli.py # HermesCLI class — interactive TUI, prompt_toolkit integration
|
| 120 |
+
├── model_tools.py # Tool orchestration (thin layer over tools/registry.py)
|
| 121 |
+
├── toolsets.py # Tool groupings and presets (hermes-cli, hermes-telegram, etc.)
|
| 122 |
+
├── hermes_state.py # SQLite session database with FTS5 full-text search, session titles
|
| 123 |
+
├── batch_runner.py # Parallel batch processing for trajectory generation
|
| 124 |
+
│
|
| 125 |
+
├── agent/ # Agent internals (extracted modules)
|
| 126 |
+
│ ├── prompt_builder.py # System prompt assembly (identity, skills, context files, memory)
|
| 127 |
+
│ ├── context_compressor.py # Auto-summarization when approaching context limits
|
| 128 |
+
│ ├── auxiliary_client.py # Resolves auxiliary OpenAI clients (summarization, vision)
|
| 129 |
+
│ ├── display.py # KawaiiSpinner, tool progress formatting
|
| 130 |
+
│ ├── model_metadata.py # Model context lengths, token estimation
|
| 131 |
+
│ └── trajectory.py # Trajectory saving helpers
|
| 132 |
+
│
|
| 133 |
+
├── hermes_cli/ # CLI command implementations
|
| 134 |
+
│ ├── main.py # Entry point, argument parsing, command dispatch
|
| 135 |
+
│ ├── config.py # Config management, migration, env var definitions
|
| 136 |
+
│ ├── setup.py # Interactive setup wizard
|
| 137 |
+
│ ├── auth.py # Provider resolution, OAuth, Nous Portal
|
| 138 |
+
│ ├── models.py # OpenRouter model selection lists
|
| 139 |
+
│ ├── banner.py # Welcome banner, ASCII art
|
| 140 |
+
│ ├── commands.py # Central slash command registry (CommandDef), autocomplete, gateway helpers
|
| 141 |
+
│ ├── callbacks.py # Interactive callbacks (clarify, sudo, approval)
|
| 142 |
+
│ ├── doctor.py # Diagnostics
|
| 143 |
+
│ ├── skills_hub.py # Skills Hub CLI + /skills slash command
|
| 144 |
+
│ └── skin_engine.py # Skin/theme engine — data-driven CLI visual customization
|
| 145 |
+
│
|
| 146 |
+
├── tools/ # Tool implementations (self-registering)
|
| 147 |
+
│ ├── registry.py # Central tool registry (schemas, handlers, dispatch)
|
| 148 |
+
│ ├── approval.py # Dangerous command detection + per-session approval
|
| 149 |
+
│ ├── terminal_tool.py # Terminal orchestration (sudo, env lifecycle, backends)
|
| 150 |
+
│ ├── file_operations.py # read_file, write_file, search, patch, etc.
|
| 151 |
+
│ ├── web_tools.py # web_search, web_extract (Parallel/Firecrawl + Gemini summarization)
|
| 152 |
+
│ ├── vision_tools.py # Image analysis via multimodal models
|
| 153 |
+
│ ├── delegate_tool.py # Subagent spawning and parallel task execution
|
| 154 |
+
│ ├── code_execution_tool.py # Sandboxed Python with RPC tool access
|
| 155 |
+
│ ├── session_search_tool.py # Search past conversations with FTS5 + summarization
|
| 156 |
+
│ ├── cronjob_tools.py # Scheduled task management
|
| 157 |
+
│ ├── skill_tools.py # Skill search, load, manage
|
| 158 |
+
│ └── environments/ # Terminal execution backends
|
| 159 |
+
│ ├── base.py # BaseEnvironment ABC
|
| 160 |
+
│ ├── local.py, docker.py, ssh.py, singularity.py, modal.py, daytona.py
|
| 161 |
+
│
|
| 162 |
+
├── gateway/ # Messaging gateway
|
| 163 |
+
│ ├── run.py # GatewayRunner — platform lifecycle, message routing, cron
|
| 164 |
+
│ ├── config.py # Platform configuration resolution
|
| 165 |
+
│ ├── session.py # Session store, context prompts, reset policies
|
| 166 |
+
│ └── platforms/ # Platform adapters
|
| 167 |
+
│ ├── telegram.py, discord_adapter.py, slack.py, whatsapp.py
|
| 168 |
+
│
|
| 169 |
+
├── scripts/ # Installer and bridge scripts
|
| 170 |
+
│ ├── install.sh # Linux/macOS installer
|
| 171 |
+
│ ├── install.ps1 # Windows PowerShell installer
|
| 172 |
+
│ └── whatsapp-bridge/ # Node.js WhatsApp bridge (Baileys)
|
| 173 |
+
│
|
| 174 |
+
├── skills/ # Bundled skills (copied to ~/.hermes/skills/ on install)
|
| 175 |
+
├── optional-skills/ # Official optional skills (discoverable via hub, not activated by default)
|
| 176 |
+
├── environments/ # RL training environments (Atropos integration)
|
| 177 |
+
├── tests/ # Test suite
|
| 178 |
+
├── website/ # Documentation site (hermes-agent.nousresearch.com)
|
| 179 |
+
│
|
| 180 |
+
├── cli-config.yaml.example # Example configuration (copied to ~/.hermes/config.yaml)
|
| 181 |
+
└── AGENTS.md # Development guide for AI coding assistants
|
| 182 |
+
```
|
| 183 |
+
|
| 184 |
+
### User configuration (stored in `~/.hermes/`)
|
| 185 |
+
|
| 186 |
+
| Path | Purpose |
|
| 187 |
+
|------|---------|
|
| 188 |
+
| `~/.hermes/config.yaml` | Settings (model, terminal, toolsets, compression, etc.) |
|
| 189 |
+
| `~/.hermes/.env` | API keys and secrets |
|
| 190 |
+
| `~/.hermes/auth.json` | OAuth credentials (Nous Portal) |
|
| 191 |
+
| `~/.hermes/skills/` | All active skills (bundled + hub-installed + agent-created) |
|
| 192 |
+
| `~/.hermes/memories/` | Persistent memory (MEMORY.md, USER.md) |
|
| 193 |
+
| `~/.hermes/state.db` | SQLite session database |
|
| 194 |
+
| `~/.hermes/sessions/` | JSON session logs |
|
| 195 |
+
| `~/.hermes/cron/` | Scheduled job data |
|
| 196 |
+
| `~/.hermes/whatsapp/session/` | WhatsApp bridge credentials |
|
| 197 |
+
|
| 198 |
+
---
|
| 199 |
+
|
| 200 |
+
## Architecture Overview
|
| 201 |
+
|
| 202 |
+
### Core Loop
|
| 203 |
+
|
| 204 |
+
```
|
| 205 |
+
User message → AIAgent._run_agent_loop()
|
| 206 |
+
├── Build system prompt (prompt_builder.py)
|
| 207 |
+
├── Build API kwargs (model, messages, tools, reasoning config)
|
| 208 |
+
├── Call LLM (OpenAI-compatible API)
|
| 209 |
+
├── If tool_calls in response:
|
| 210 |
+
│ ├── Execute each tool via registry dispatch
|
| 211 |
+
│ ├── Add tool results to conversation
|
| 212 |
+
│ └── Loop back to LLM call
|
| 213 |
+
├── If text response:
|
| 214 |
+
│ ├── Persist session to DB
|
| 215 |
+
│ └── Return final_response
|
| 216 |
+
└── Context compression if approaching token limit
|
| 217 |
+
```
|
| 218 |
+
|
| 219 |
+
### Key Design Patterns
|
| 220 |
+
|
| 221 |
+
- **Self-registering tools**: Each tool file calls `registry.register()` at import time. `model_tools.py` triggers discovery by importing all tool modules.
|
| 222 |
+
- **Toolset grouping**: Tools are grouped into toolsets (`web`, `terminal`, `file`, `browser`, etc.) that can be enabled/disabled per platform.
|
| 223 |
+
- **Session persistence**: All conversations are stored in SQLite (`hermes_state.py`) with full-text search and unique session titles. JSON logs go to `~/.hermes/sessions/`.
|
| 224 |
+
- **Ephemeral injection**: System prompts and prefill messages are injected at API call time, never persisted to the database or logs.
|
| 225 |
+
- **Provider abstraction**: The agent works with any OpenAI-compatible API. Provider resolution happens at init time (Nous Portal OAuth, OpenRouter API key, or custom endpoint).
|
| 226 |
+
- **Provider routing**: When using OpenRouter, `provider_routing` in config.yaml controls provider selection (sort by throughput/latency/price, allow/ignore specific providers, data retention policies). These are injected as `extra_body.provider` in API requests.
|
| 227 |
+
|
| 228 |
+
---
|
| 229 |
+
|
| 230 |
+
## Code Style
|
| 231 |
+
|
| 232 |
+
- **PEP 8** with practical exceptions (we don't enforce strict line length)
|
| 233 |
+
- **Comments**: Only when explaining non-obvious intent, trade-offs, or API quirks. Don't narrate what the code does — `# increment counter` adds nothing
|
| 234 |
+
- **Error handling**: Catch specific exceptions. Log with `logger.warning()`/`logger.error()` — use `exc_info=True` for unexpected errors so stack traces appear in logs
|
| 235 |
+
- **Cross-platform**: Never assume Unix. See [Cross-Platform Compatibility](#cross-platform-compatibility)
|
| 236 |
+
|
| 237 |
+
---
|
| 238 |
+
|
| 239 |
+
## Adding a New Tool
|
| 240 |
+
|
| 241 |
+
Before writing a tool, ask: [should this be a skill instead?](#should-it-be-a-skill-or-a-tool)
|
| 242 |
+
|
| 243 |
+
Tools self-register with the central registry. Each tool file co-locates its schema, handler, and registration:
|
| 244 |
+
|
| 245 |
+
```python
|
| 246 |
+
"""my_tool — Brief description of what this tool does."""
|
| 247 |
+
|
| 248 |
+
import json
|
| 249 |
+
from tools.registry import registry
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
def my_tool(param1: str, param2: int = 10, **kwargs) -> str:
|
| 253 |
+
"""Handler. Returns a string result (often JSON)."""
|
| 254 |
+
result = do_work(param1, param2)
|
| 255 |
+
return json.dumps(result)
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
MY_TOOL_SCHEMA = {
|
| 259 |
+
"type": "function",
|
| 260 |
+
"function": {
|
| 261 |
+
"name": "my_tool",
|
| 262 |
+
"description": "What this tool does and when the agent should use it.",
|
| 263 |
+
"parameters": {
|
| 264 |
+
"type": "object",
|
| 265 |
+
"properties": {
|
| 266 |
+
"param1": {"type": "string", "description": "What param1 is"},
|
| 267 |
+
"param2": {"type": "integer", "description": "What param2 is", "default": 10},
|
| 268 |
+
},
|
| 269 |
+
"required": ["param1"],
|
| 270 |
+
},
|
| 271 |
+
},
|
| 272 |
+
}
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
def _check_requirements() -> bool:
|
| 276 |
+
"""Return True if this tool's dependencies are available."""
|
| 277 |
+
return True
|
| 278 |
+
|
| 279 |
+
|
| 280 |
+
registry.register(
|
| 281 |
+
name="my_tool",
|
| 282 |
+
toolset="my_toolset",
|
| 283 |
+
schema=MY_TOOL_SCHEMA,
|
| 284 |
+
handler=lambda args, **kw: my_tool(**args, **kw),
|
| 285 |
+
check_fn=_check_requirements,
|
| 286 |
+
)
|
| 287 |
+
```
|
| 288 |
+
|
| 289 |
+
Then add the import to `model_tools.py` in the `_modules` list:
|
| 290 |
+
|
| 291 |
+
```python
|
| 292 |
+
_modules = [
|
| 293 |
+
# ... existing modules ...
|
| 294 |
+
"tools.my_tool",
|
| 295 |
+
]
|
| 296 |
+
```
|
| 297 |
+
|
| 298 |
+
If it's a new toolset, add it to `toolsets.py` and to the relevant platform presets.
|
| 299 |
+
|
| 300 |
+
---
|
| 301 |
+
|
| 302 |
+
## Adding a Skill
|
| 303 |
+
|
| 304 |
+
Bundled skills live in `skills/` organized by category. Official optional skills use the same structure in `optional-skills/`:
|
| 305 |
+
|
| 306 |
+
```
|
| 307 |
+
skills/
|
| 308 |
+
├── research/
|
| 309 |
+
│ └── arxiv/
|
| 310 |
+
│ ├── SKILL.md # Required: main instructions
|
| 311 |
+
│ └── scripts/ # Optional: helper scripts
|
| 312 |
+
│ └── search_arxiv.py
|
| 313 |
+
├── productivity/
|
| 314 |
+
│ └── ocr-and-documents/
|
| 315 |
+
│ ├── SKILL.md
|
| 316 |
+
│ ├── scripts/
|
| 317 |
+
│ └── references/
|
| 318 |
+
└── ...
|
| 319 |
+
```
|
| 320 |
+
|
| 321 |
+
### SKILL.md format
|
| 322 |
+
|
| 323 |
+
```markdown
|
| 324 |
+
---
|
| 325 |
+
name: my-skill
|
| 326 |
+
description: Brief description (shown in skill search results)
|
| 327 |
+
version: 1.0.0
|
| 328 |
+
author: Your Name
|
| 329 |
+
license: MIT
|
| 330 |
+
platforms: [macos, linux] # Optional — restrict to specific OS platforms
|
| 331 |
+
# Valid: macos, linux, windows
|
| 332 |
+
# Omit to load on all platforms (default)
|
| 333 |
+
required_environment_variables: # Optional — secure setup-on-load metadata
|
| 334 |
+
- name: MY_API_KEY
|
| 335 |
+
prompt: API key
|
| 336 |
+
help: Where to get it
|
| 337 |
+
required_for: full functionality
|
| 338 |
+
prerequisites: # Optional legacy runtime requirements
|
| 339 |
+
env_vars: [MY_API_KEY] # Backward-compatible alias for required env vars
|
| 340 |
+
commands: [curl, jq] # Advisory only; does not hide the skill
|
| 341 |
+
metadata:
|
| 342 |
+
hermes:
|
| 343 |
+
tags: [Category, Subcategory, Keywords]
|
| 344 |
+
related_skills: [other-skill-name]
|
| 345 |
+
fallback_for_toolsets: [web] # Optional — show only when toolset is unavailable
|
| 346 |
+
requires_toolsets: [terminal] # Optional — show only when toolset is available
|
| 347 |
+
---
|
| 348 |
+
|
| 349 |
+
# Skill Title
|
| 350 |
+
|
| 351 |
+
Brief intro.
|
| 352 |
+
|
| 353 |
+
## When to Use
|
| 354 |
+
Trigger conditions — when should the agent load this skill?
|
| 355 |
+
|
| 356 |
+
## Quick Reference
|
| 357 |
+
Table of common commands or API calls.
|
| 358 |
+
|
| 359 |
+
## Procedure
|
| 360 |
+
Step-by-step instructions the agent follows.
|
| 361 |
+
|
| 362 |
+
## Pitfalls
|
| 363 |
+
Known failure modes and how to handle them.
|
| 364 |
+
|
| 365 |
+
## Verification
|
| 366 |
+
How the agent confirms it worked.
|
| 367 |
+
```
|
| 368 |
+
|
| 369 |
+
### Platform-specific skills
|
| 370 |
+
|
| 371 |
+
Skills can declare which OS platforms they support via the `platforms` frontmatter field. Skills with this field are automatically hidden from the system prompt, `skills_list()`, and slash commands on incompatible platforms.
|
| 372 |
+
|
| 373 |
+
```yaml
|
| 374 |
+
platforms: [macos] # macOS only (e.g., iMessage, Apple Reminders)
|
| 375 |
+
platforms: [macos, linux] # macOS and Linux
|
| 376 |
+
platforms: [windows] # Windows only
|
| 377 |
+
```
|
| 378 |
+
|
| 379 |
+
If the field is omitted or empty, the skill loads on all platforms (backward compatible). See `skills/apple/` for examples of macOS-only skills.
|
| 380 |
+
|
| 381 |
+
### Conditional skill activation
|
| 382 |
+
|
| 383 |
+
Skills can declare conditions that control when they appear in the system prompt, based on which tools and toolsets are available in the current session. This is primarily used for **fallback skills** — alternatives that should only be shown when a primary tool is unavailable.
|
| 384 |
+
|
| 385 |
+
Four fields are supported under `metadata.hermes`:
|
| 386 |
+
|
| 387 |
+
```yaml
|
| 388 |
+
metadata:
|
| 389 |
+
hermes:
|
| 390 |
+
fallback_for_toolsets: [web] # Show ONLY when these toolsets are unavailable
|
| 391 |
+
requires_toolsets: [terminal] # Show ONLY when these toolsets are available
|
| 392 |
+
fallback_for_tools: [web_search] # Show ONLY when these specific tools are unavailable
|
| 393 |
+
requires_tools: [terminal] # Show ONLY when these specific tools are available
|
| 394 |
+
```
|
| 395 |
+
|
| 396 |
+
**Semantics:**
|
| 397 |
+
- `fallback_for_*`: The skill is a backup. It is **hidden** when the listed tools/toolsets are available, and **shown** when they are unavailable. Use this for free alternatives to premium tools.
|
| 398 |
+
- `requires_*`: The skill needs certain tools to function. It is **hidden** when the listed tools/toolsets are unavailable. Use this for skills that depend on specific capabilities (e.g., a skill that only makes sense with terminal access).
|
| 399 |
+
- If both are specified, both conditions must be satisfied for the skill to appear.
|
| 400 |
+
- If neither is specified, the skill is always shown (backward compatible).
|
| 401 |
+
|
| 402 |
+
**Examples:**
|
| 403 |
+
|
| 404 |
+
```yaml
|
| 405 |
+
# DuckDuckGo search — shown when Firecrawl (web toolset) is unavailable
|
| 406 |
+
metadata:
|
| 407 |
+
hermes:
|
| 408 |
+
fallback_for_toolsets: [web]
|
| 409 |
+
|
| 410 |
+
# Smart home skill — only useful when terminal is available
|
| 411 |
+
metadata:
|
| 412 |
+
hermes:
|
| 413 |
+
requires_toolsets: [terminal]
|
| 414 |
+
|
| 415 |
+
# Local browser fallback — shown when Browserbase is unavailable
|
| 416 |
+
metadata:
|
| 417 |
+
hermes:
|
| 418 |
+
fallback_for_toolsets: [browser]
|
| 419 |
+
```
|
| 420 |
+
|
| 421 |
+
The filtering happens at prompt build time in `agent/prompt_builder.py`. The `build_skills_system_prompt()` function receives the set of available tools and toolsets from the agent and uses `_skill_should_show()` to evaluate each skill's conditions.
|
| 422 |
+
|
| 423 |
+
### Skill setup metadata
|
| 424 |
+
|
| 425 |
+
Skills can declare secure setup-on-load metadata via the `required_environment_variables` frontmatter field. Missing values do not hide the skill from discovery; they trigger a CLI-only secure prompt when the skill is actually loaded.
|
| 426 |
+
|
| 427 |
+
```yaml
|
| 428 |
+
required_environment_variables:
|
| 429 |
+
- name: TENOR_API_KEY
|
| 430 |
+
prompt: Tenor API key
|
| 431 |
+
help: Get a key from https://developers.google.com/tenor
|
| 432 |
+
required_for: full functionality
|
| 433 |
+
```
|
| 434 |
+
|
| 435 |
+
The user may skip setup and keep loading the skill. Hermes only exposes metadata (`stored_as`, `skipped`, `validated`) to the model — never the secret value.
|
| 436 |
+
|
| 437 |
+
Legacy `prerequisites.env_vars` remains supported and is normalized into the new representation.
|
| 438 |
+
|
| 439 |
+
```yaml
|
| 440 |
+
prerequisites:
|
| 441 |
+
env_vars: [TENOR_API_KEY] # Legacy alias for required_environment_variables
|
| 442 |
+
commands: [curl, jq] # Advisory CLI checks
|
| 443 |
+
```
|
| 444 |
+
|
| 445 |
+
Gateway and messaging sessions never collect secrets in-band; they instruct the user to run `hermes setup` or update `~/.hermes/.env` locally.
|
| 446 |
+
|
| 447 |
+
**When to declare required environment variables:**
|
| 448 |
+
- The skill uses an API key or token that should be collected securely at load time
|
| 449 |
+
- The skill can still be useful if the user skips setup, but may degrade gracefully
|
| 450 |
+
|
| 451 |
+
**When to declare command prerequisites:**
|
| 452 |
+
- The skill relies on a CLI tool that may not be installed (e.g., `himalaya`, `openhue`, `ddgs`)
|
| 453 |
+
- Treat command checks as guidance, not discovery-time hiding
|
| 454 |
+
|
| 455 |
+
See `skills/gifs/gif-search/` and `skills/email/himalaya/` for examples.
|
| 456 |
+
|
| 457 |
+
### Skill guidelines
|
| 458 |
+
|
| 459 |
+
- **No external dependencies unless absolutely necessary.** Prefer stdlib Python, curl, and existing Hermes tools (`web_extract`, `terminal`, `read_file`).
|
| 460 |
+
- **Progressive disclosure.** Put the most common workflow first. Edge cases and advanced usage go at the bottom.
|
| 461 |
+
- **Include helper scripts** for XML/JSON parsing or complex logic — don't expect the LLM to write parsers inline every time.
|
| 462 |
+
- **Test it.** Run `hermes --toolsets skills -q "Use the X skill to do Y"` and verify the agent follows the instructions correctly.
|
| 463 |
+
|
| 464 |
+
---
|
| 465 |
+
|
| 466 |
+
## Adding a Skin / Theme
|
| 467 |
+
|
| 468 |
+
Hermes uses a data-driven skin system — no code changes needed to add a new skin.
|
| 469 |
+
|
| 470 |
+
**Option A: User skin (YAML file)**
|
| 471 |
+
|
| 472 |
+
Create `~/.hermes/skins/<name>.yaml`:
|
| 473 |
+
|
| 474 |
+
```yaml
|
| 475 |
+
name: mytheme
|
| 476 |
+
description: Short description of the theme
|
| 477 |
+
|
| 478 |
+
colors:
|
| 479 |
+
banner_border: "#HEX" # Panel border color
|
| 480 |
+
banner_title: "#HEX" # Panel title color
|
| 481 |
+
banner_accent: "#HEX" # Section header color
|
| 482 |
+
banner_dim: "#HEX" # Muted/dim text color
|
| 483 |
+
banner_text: "#HEX" # Body text color
|
| 484 |
+
response_border: "#HEX" # Response box border
|
| 485 |
+
|
| 486 |
+
spinner:
|
| 487 |
+
waiting_faces: ["(⚔)", "(⛨)"]
|
| 488 |
+
thinking_faces: ["(⚔)", "(⌁)"]
|
| 489 |
+
thinking_verbs: ["forging", "plotting"]
|
| 490 |
+
wings: # Optional left/right decorations
|
| 491 |
+
- ["⟪⚔", "⚔⟫"]
|
| 492 |
+
|
| 493 |
+
branding:
|
| 494 |
+
agent_name: "My Agent"
|
| 495 |
+
welcome: "Welcome message"
|
| 496 |
+
response_label: " ⚔ Agent "
|
| 497 |
+
prompt_symbol: "⚔ ❯ "
|
| 498 |
+
|
| 499 |
+
tool_prefix: "╎" # Tool output line prefix
|
| 500 |
+
```
|
| 501 |
+
|
| 502 |
+
All fields are optional — missing values inherit from the default skin.
|
| 503 |
+
|
| 504 |
+
**Option B: Built-in skin**
|
| 505 |
+
|
| 506 |
+
Add to `_BUILTIN_SKINS` dict in `hermes_cli/skin_engine.py`. Use the same schema as above but as a Python dict. Built-in skins ship with the package and are always available.
|
| 507 |
+
|
| 508 |
+
**Activating:**
|
| 509 |
+
- CLI: `/skin mytheme` or set `display.skin: mytheme` in config.yaml
|
| 510 |
+
- Config: `display: { skin: mytheme }`
|
| 511 |
+
|
| 512 |
+
See `hermes_cli/skin_engine.py` for the full schema and existing skins as examples.
|
| 513 |
+
|
| 514 |
+
---
|
| 515 |
+
|
| 516 |
+
## Cross-Platform Compatibility
|
| 517 |
+
|
| 518 |
+
Hermes runs on Linux, macOS, and Windows. When writing code that touches the OS:
|
| 519 |
+
|
| 520 |
+
### Critical rules
|
| 521 |
+
|
| 522 |
+
1. **`termios` and `fcntl` are Unix-only.** Always catch both `ImportError` and `NotImplementedError`:
|
| 523 |
+
```python
|
| 524 |
+
try:
|
| 525 |
+
from simple_term_menu import TerminalMenu
|
| 526 |
+
menu = TerminalMenu(options)
|
| 527 |
+
idx = menu.show()
|
| 528 |
+
except (ImportError, NotImplementedError):
|
| 529 |
+
# Fallback: numbered menu for Windows
|
| 530 |
+
for i, opt in enumerate(options):
|
| 531 |
+
print(f" {i+1}. {opt}")
|
| 532 |
+
idx = int(input("Choice: ")) - 1
|
| 533 |
+
```
|
| 534 |
+
|
| 535 |
+
2. **File encoding.** Windows may save `.env` files in `cp1252`. Always handle encoding errors:
|
| 536 |
+
```python
|
| 537 |
+
try:
|
| 538 |
+
load_dotenv(env_path)
|
| 539 |
+
except UnicodeDecodeError:
|
| 540 |
+
load_dotenv(env_path, encoding="latin-1")
|
| 541 |
+
```
|
| 542 |
+
|
| 543 |
+
3. **Process management.** `os.setsid()`, `os.killpg()`, and signal handling differ on Windows. Use platform checks:
|
| 544 |
+
```python
|
| 545 |
+
import platform
|
| 546 |
+
if platform.system() != "Windows":
|
| 547 |
+
kwargs["preexec_fn"] = os.setsid
|
| 548 |
+
```
|
| 549 |
+
|
| 550 |
+
4. **Path separators.** Use `pathlib.Path` instead of string concatenation with `/`.
|
| 551 |
+
|
| 552 |
+
5. **Shell commands in installers.** If you change `scripts/install.sh`, check if the equivalent change is needed in `scripts/install.ps1`.
|
| 553 |
+
|
| 554 |
+
---
|
| 555 |
+
|
| 556 |
+
## Security Considerations
|
| 557 |
+
|
| 558 |
+
Hermes has terminal access. Security matters.
|
| 559 |
+
|
| 560 |
+
### Existing protections
|
| 561 |
+
|
| 562 |
+
| Layer | Implementation |
|
| 563 |
+
|-------|---------------|
|
| 564 |
+
| **Sudo password piping** | Uses `shlex.quote()` to prevent shell injection |
|
| 565 |
+
| **Dangerous command detection** | Regex patterns in `tools/approval.py` with user approval flow |
|
| 566 |
+
| **Cron prompt injection** | Scanner in `tools/cronjob_tools.py` blocks instruction-override patterns |
|
| 567 |
+
| **Write deny list** | Protected paths (`~/.ssh/authorized_keys`, `/etc/shadow`) resolved via `os.path.realpath()` to prevent symlink bypass |
|
| 568 |
+
| **Skills guard** | Security scanner for hub-installed skills (`tools/skills_guard.py`) |
|
| 569 |
+
| **Code execution sandbox** | `execute_code` child process runs with API keys stripped from environment |
|
| 570 |
+
| **Container hardening** | Docker: all capabilities dropped, no privilege escalation, PID limits, size-limited tmpfs |
|
| 571 |
+
|
| 572 |
+
### When contributing security-sensitive code
|
| 573 |
+
|
| 574 |
+
- **Always use `shlex.quote()`** when interpolating user input into shell commands
|
| 575 |
+
- **Resolve symlinks** with `os.path.realpath()` before path-based access control checks
|
| 576 |
+
- **Don't log secrets.** API keys, tokens, and passwords should never appear in log output
|
| 577 |
+
- **Catch broad exceptions** around tool execution so a single failure doesn't crash the agent loop
|
| 578 |
+
- **Test on all platforms** if your change touches file paths, process management, or shell commands
|
| 579 |
+
|
| 580 |
+
If your PR affects security, note it explicitly in the description.
|
| 581 |
+
|
| 582 |
+
---
|
| 583 |
+
|
| 584 |
+
## Pull Request Process
|
| 585 |
+
|
| 586 |
+
### Branch naming
|
| 587 |
+
|
| 588 |
+
```
|
| 589 |
+
fix/description # Bug fixes
|
| 590 |
+
feat/description # New features
|
| 591 |
+
docs/description # Documentation
|
| 592 |
+
test/description # Tests
|
| 593 |
+
refactor/description # Code restructuring
|
| 594 |
+
```
|
| 595 |
+
|
| 596 |
+
### Before submitting
|
| 597 |
+
|
| 598 |
+
1. **Run tests**: `pytest tests/ -v`
|
| 599 |
+
2. **Test manually**: Run `hermes` and exercise the code path you changed
|
| 600 |
+
3. **Check cross-platform impact**: If you touch file I/O, process management, or terminal handling, consider Windows and macOS
|
| 601 |
+
4. **Keep PRs focused**: One logical change per PR. Don't mix a bug fix with a refactor with a new feature.
|
| 602 |
+
|
| 603 |
+
### PR description
|
| 604 |
+
|
| 605 |
+
Include:
|
| 606 |
+
- **What** changed and **why**
|
| 607 |
+
- **How to test** it (reproduction steps for bugs, usage examples for features)
|
| 608 |
+
- **What platforms** you tested on
|
| 609 |
+
- Reference any related issues
|
| 610 |
+
|
| 611 |
+
### Commit messages
|
| 612 |
+
|
| 613 |
+
We use [Conventional Commits](https://www.conventionalcommits.org/):
|
| 614 |
+
|
| 615 |
+
```
|
| 616 |
+
<type>(<scope>): <description>
|
| 617 |
+
```
|
| 618 |
+
|
| 619 |
+
| Type | Use for |
|
| 620 |
+
|------|---------|
|
| 621 |
+
| `fix` | Bug fixes |
|
| 622 |
+
| `feat` | New features |
|
| 623 |
+
| `docs` | Documentation |
|
| 624 |
+
| `test` | Tests |
|
| 625 |
+
| `refactor` | Code restructuring (no behavior change) |
|
| 626 |
+
| `chore` | Build, CI, dependency updates |
|
| 627 |
+
|
| 628 |
+
Scopes: `cli`, `gateway`, `tools`, `skills`, `agent`, `install`, `whatsapp`, `security`, etc.
|
| 629 |
+
|
| 630 |
+
Examples:
|
| 631 |
+
```
|
| 632 |
+
fix(cli): prevent crash in save_config_value when model is a string
|
| 633 |
+
feat(gateway): add WhatsApp multi-user session isolation
|
| 634 |
+
fix(security): prevent shell injection in sudo password piping
|
| 635 |
+
test(tools): add unit tests for file_operations
|
| 636 |
+
```
|
| 637 |
+
|
| 638 |
+
---
|
| 639 |
+
|
| 640 |
+
## Reporting Issues
|
| 641 |
+
|
| 642 |
+
- Use [GitHub Issues](https://github.com/NousResearch/hermes-agent/issues)
|
| 643 |
+
- Include: OS, Python version, Hermes version (`hermes version`), full error traceback
|
| 644 |
+
- Include steps to reproduce
|
| 645 |
+
- Check existing issues before creating duplicates
|
| 646 |
+
- For security vulnerabilities, please report privately
|
| 647 |
+
|
| 648 |
+
---
|
| 649 |
+
|
| 650 |
+
## Community
|
| 651 |
+
|
| 652 |
+
- **Discord**: [discord.gg/NousResearch](https://discord.gg/NousResearch) — for questions, showcasing projects, and sharing skills
|
| 653 |
+
- **GitHub Discussions**: For design proposals and architecture discussions
|
| 654 |
+
- **Skills Hub**: Upload specialized skills to a registry and share them with the community
|
| 655 |
+
|
| 656 |
+
---
|
| 657 |
+
|
| 658 |
+
## License
|
| 659 |
+
|
| 660 |
+
By contributing, you agree that your contributions will be licensed under the [MIT License](LICENSE).
|
Dockerfile
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Hermes Agent - Hugging Face Spaces Dockerfile
|
| 2 |
+
FROM python:3.12-slim
|
| 3 |
+
|
| 4 |
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 5 |
+
curl \
|
| 6 |
+
ca-certificates \
|
| 7 |
+
gnupg \
|
| 8 |
+
git \
|
| 9 |
+
wget \
|
| 10 |
+
unzip \
|
| 11 |
+
inotify-tools \
|
| 12 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 13 |
+
|
| 14 |
+
RUN curl -LsSf https://astral.sh/uv/install.sh | sh
|
| 15 |
+
ENV PATH="/root/.local/bin:$PATH"
|
| 16 |
+
|
| 17 |
+
WORKDIR /app
|
| 18 |
+
|
| 19 |
+
COPY pyproject.toml ./
|
| 20 |
+
COPY requirements.txt ./
|
| 21 |
+
COPY README.md ./
|
| 22 |
+
|
| 23 |
+
RUN uv pip install --system \
|
| 24 |
+
"pydantic>=2.0" \
|
| 25 |
+
"httpx" \
|
| 26 |
+
"requests" \
|
| 27 |
+
"pyyaml" \
|
| 28 |
+
"jinja2" \
|
| 29 |
+
"rich" \
|
| 30 |
+
"tenacity" \
|
| 31 |
+
"python-dotenv" \
|
| 32 |
+
"fire" \
|
| 33 |
+
"openai" \
|
| 34 |
+
"anthropic>=0.39.0" \
|
| 35 |
+
"prompt_toolkit" \
|
| 36 |
+
"typer" \
|
| 37 |
+
"platformdirs" \
|
| 38 |
+
"python-telegram-bot>=20.0" \
|
| 39 |
+
"aiohttp>=3.9.0" \
|
| 40 |
+
"edge-tts" \
|
| 41 |
+
"faster-whisper>=1.0.0" \
|
| 42 |
+
"firecrawl-py" \
|
| 43 |
+
"parallel-web>=0.4.2" \
|
| 44 |
+
"fal-client" \
|
| 45 |
+
"croniter" \
|
| 46 |
+
"slack-bolt>=1.18.0" \
|
| 47 |
+
"slack-sdk>=3.27.0" \
|
| 48 |
+
"mcp>=1.2.0"
|
| 49 |
+
|
| 50 |
+
COPY agent/ ./agent/
|
| 51 |
+
COPY tools/ ./tools/
|
| 52 |
+
COPY gateway/ ./gateway/
|
| 53 |
+
COPY hermes_cli/ ./hermes_cli/
|
| 54 |
+
COPY cron/ ./cron/
|
| 55 |
+
COPY honcho_integration/ ./honcho_integration/
|
| 56 |
+
COPY acp_adapter/ ./acp_adapter/
|
| 57 |
+
COPY run_agent.py ./
|
| 58 |
+
COPY model_tools.py ./
|
| 59 |
+
COPY toolsets.py ./
|
| 60 |
+
COPY cli.py ./
|
| 61 |
+
COPY hermes_state.py ./
|
| 62 |
+
COPY hermes_constants.py ./
|
| 63 |
+
COPY hermes_time.py ./
|
| 64 |
+
COPY utils.py ./
|
| 65 |
+
COPY batch_runner.py ./
|
| 66 |
+
COPY trajectory_compressor.py ./
|
| 67 |
+
COPY toolset_distributions.py ./
|
| 68 |
+
COPY minisweagent_path.py ./
|
| 69 |
+
COPY cli-config.yaml.example ./
|
| 70 |
+
COPY .env.example ./
|
| 71 |
+
|
| 72 |
+
RUN git submodule update --init --recursive || true
|
| 73 |
+
|
| 74 |
+
RUN uv pip install --system -e .
|
| 75 |
+
|
| 76 |
+
COPY setup.sh /usr/local/bin/setup.sh
|
| 77 |
+
COPY hermes_run.py /usr/local/bin/hermes_run.py
|
| 78 |
+
RUN chmod +x /usr/local/bin/setup.sh /usr/local/bin/hermes_run.py
|
| 79 |
+
|
| 80 |
+
RUN mkdir -p /root/.hermes/workspace/skills
|
| 81 |
+
RUN mkdir -p /root/.hermes/workspace/memory
|
| 82 |
+
RUN mkdir -p /root/.hermes/sessions
|
| 83 |
+
|
| 84 |
+
ENV HERMES_HOME=/root/.hermes
|
| 85 |
+
ENV HERMES_WORKSPACE=/root/.hermes/workspace
|
| 86 |
+
ENV PYTHONUNBUFFERED=1
|
| 87 |
+
|
| 88 |
+
EXPOSE 7860
|
| 89 |
+
|
| 90 |
+
ENTRYPOINT ["/usr/local/bin/setup.sh"]
|
| 91 |
+
CMD ["python3", "-m", "gateway.run"]
|
EXPERIENCE.md
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Hermes-agent 部署经验总结
|
| 2 |
+
|
| 3 |
+
## 1. 删除 litellm 依赖
|
| 4 |
+
|
| 5 |
+
**问题**:PyPI 上 litellm 被隔离(quarantined),导致 Docker 构建失败
|
| 6 |
+
|
| 7 |
+
**解决**:删除 litellm 依赖,因为代码中实际没有使用它
|
| 8 |
+
|
| 9 |
+
需要修改的文件:
|
| 10 |
+
- `pyproject.toml`
|
| 11 |
+
- `requirements.txt`
|
| 12 |
+
- `Dockerfile`
|
| 13 |
+
|
| 14 |
+
## 2. 风控问题解决
|
| 15 |
+
|
| 16 |
+
### 2.1 终端命令审批( Dangerous Command Approval)
|
| 17 |
+
|
| 18 |
+
**问题**:执行 curl、rm 等命令时需要用户手动审批
|
| 19 |
+
|
| 20 |
+
**解决**:
|
| 21 |
+
1. `config.yaml` 中添加:
|
| 22 |
+
```yaml
|
| 23 |
+
approvals:
|
| 24 |
+
mode: off
|
| 25 |
+
```
|
| 26 |
+
2. `.env` 中添加:
|
| 27 |
+
```
|
| 28 |
+
HERMES_YOLO_MODE=true
|
| 29 |
+
```
|
| 30 |
+
|
| 31 |
+
### 2.2 网页抓取 lookalike 域名检测
|
| 32 |
+
|
| 33 |
+
**问题**:Firecrawl API 会检测钓鱼/仿冒域名,阻止抓取
|
| 34 |
+
|
| 35 |
+
**解决**:将 web backend 改为 tavily
|
| 36 |
+
|
| 37 |
+
在 `config.yaml` 中添加:
|
| 38 |
+
```yaml
|
| 39 |
+
web:
|
| 40 |
+
backend: tavily
|
| 41 |
+
```
|
| 42 |
+
|
| 43 |
+
需要确保 `.env` 中有 `TAVILY_API_KEY` 环境变量
|
LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2025 Nous Research
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
README.md
CHANGED
|
@@ -1,10 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
---
|
| 9 |
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<p align="center">
|
| 2 |
+
<img src="assets/banner.png" alt="Hermes Agent" width="100%">
|
| 3 |
+
</p>
|
| 4 |
+
|
| 5 |
+
# Hermes Agent ☤
|
| 6 |
+
|
| 7 |
+
<p align="center">
|
| 8 |
+
<a href="https://hermes-agent.nousresearch.com/docs/"><img src="https://img.shields.io/badge/Docs-hermes--agent.nousresearch.com-FFD700?style=for-the-badge" alt="Documentation"></a>
|
| 9 |
+
<a href="https://discord.gg/NousResearch"><img src="https://img.shields.io/badge/Discord-5865F2?style=for-the-badge&logo=discord&logoColor=white" alt="Discord"></a>
|
| 10 |
+
<a href="https://github.com/NousResearch/hermes-agent/blob/main/LICENSE"><img src="https://img.shields.io/badge/License-MIT-green?style=for-the-badge" alt="License: MIT"></a>
|
| 11 |
+
<a href="https://nousresearch.com"><img src="https://img.shields.io/badge/Built%20by-Nous%20Research-blueviolet?style=for-the-badge" alt="Built by Nous Research"></a>
|
| 12 |
+
</p>
|
| 13 |
+
|
| 14 |
+
**The self-improving AI agent built by [Nous Research](https://nousresearch.com).** It's the only agent with a built-in learning loop — it creates skills from experience, improves them during use, nudges itself to persist knowledge, searches its own past conversations, and builds a deepening model of who you are across sessions. Run it on a $5 VPS, a GPU cluster, or serverless infrastructure that costs nearly nothing when idle. It's not tied to your laptop — talk to it from Telegram while it works on a cloud VM.
|
| 15 |
+
|
| 16 |
+
Use any model you want — [Nous Portal](https://portal.nousresearch.com), [OpenRouter](https://openrouter.ai) (200+ models), [z.ai/GLM](https://z.ai), [Kimi/Moonshot](https://platform.moonshot.ai), [MiniMax](https://www.minimax.io), OpenAI, or your own endpoint. Switch with `hermes model` — no code changes, no lock-in.
|
| 17 |
+
|
| 18 |
+
<table>
|
| 19 |
+
<tr><td><b>A real terminal interface</b></td><td>Full TUI with multiline editing, slash-command autocomplete, conversation history, interrupt-and-redirect, and streaming tool output.</td></tr>
|
| 20 |
+
<tr><td><b>Lives where you do</b></td><td>Telegram, Discord, Slack, WhatsApp, Signal, and CLI — all from a single gateway process. Voice memo transcription, cross-platform conversation continuity.</td></tr>
|
| 21 |
+
<tr><td><b>A closed learning loop</b></td><td>Agent-curated memory with periodic nudges. Autonomous skill creation after complex tasks. Skills self-improve during use. FTS5 session search with LLM summarization for cross-session recall. <a href="https://github.com/plastic-labs/honcho">Honcho</a> dialectic user modeling. Compatible with the <a href="https://agentskills.io">agentskills.io</a> open standard.</td></tr>
|
| 22 |
+
<tr><td><b>Scheduled automations</b></td><td>Built-in cron scheduler with delivery to any platform. Daily reports, nightly backups, weekly audits — all in natural language, running unattended.</td></tr>
|
| 23 |
+
<tr><td><b>Delegates and parallelizes</b></td><td>Spawn isolated subagents for parallel workstreams. Write Python scripts that call tools via RPC, collapsing multi-step pipelines into zero-context-cost turns.</td></tr>
|
| 24 |
+
<tr><td><b>Runs anywhere, not just your laptop</b></td><td>Six terminal backends — local, Docker, SSH, Daytona, Singularity, and Modal. Daytona and Modal offer serverless persistence — your agent's environment hibernates when idle and wakes on demand, costing nearly nothing between sessions. Run it on a $5 VPS or a GPU cluster.</td></tr>
|
| 25 |
+
<tr><td><b>Research-ready</b></td><td>Batch trajectory generation, Atropos RL environments, trajectory compression for training the next generation of tool-calling models.</td></tr>
|
| 26 |
+
</table>
|
| 27 |
+
|
| 28 |
---
|
| 29 |
+
|
| 30 |
+
## Quick Install
|
| 31 |
+
|
| 32 |
+
```bash
|
| 33 |
+
curl -fsSL https://raw.githubusercontent.com/NousResearch/hermes-agent/main/scripts/install.sh | bash
|
| 34 |
+
```
|
| 35 |
+
|
| 36 |
+
Works on Linux, macOS, and WSL2. The installer handles everything — Python, Node.js, dependencies, and the `hermes` command. No prerequisites except git.
|
| 37 |
+
|
| 38 |
+
> **Windows:** Native Windows is not supported. Please install [WSL2](https://learn.microsoft.com/en-us/windows/wsl/install) and run the command above.
|
| 39 |
+
|
| 40 |
+
After installation:
|
| 41 |
+
|
| 42 |
+
```bash
|
| 43 |
+
source ~/.bashrc # reload shell (or: source ~/.zshrc)
|
| 44 |
+
hermes # start chatting!
|
| 45 |
+
```
|
| 46 |
+
|
| 47 |
---
|
| 48 |
|
| 49 |
+
## Getting Started
|
| 50 |
+
|
| 51 |
+
```bash
|
| 52 |
+
hermes # Interactive CLI — start a conversation
|
| 53 |
+
hermes model # Choose your LLM provider and model
|
| 54 |
+
hermes tools # Configure which tools are enabled
|
| 55 |
+
hermes config set # Set individual config values
|
| 56 |
+
hermes gateway # Start the messaging gateway (Telegram, Discord, etc.)
|
| 57 |
+
hermes setup # Run the full setup wizard (configures everything at once)
|
| 58 |
+
hermes claw migrate # Migrate from OpenClaw (if coming from OpenClaw)
|
| 59 |
+
hermes update # Update to the latest version
|
| 60 |
+
hermes doctor # Diagnose any issues
|
| 61 |
+
```
|
| 62 |
+
|
| 63 |
+
📖 **[Full documentation →](https://hermes-agent.nousresearch.com/docs/)**
|
| 64 |
+
|
| 65 |
+
## CLI vs Messaging Quick Reference
|
| 66 |
+
|
| 67 |
+
Hermes has two entry points: start the terminal UI with `hermes`, or run the gateway and talk to it from Telegram, Discord, Slack, WhatsApp, Signal, or Email. Once you're in a conversation, many slash commands are shared across both interfaces.
|
| 68 |
+
|
| 69 |
+
| Action | CLI | Messaging platforms |
|
| 70 |
+
|---------|-----|---------------------|
|
| 71 |
+
| Start chatting | `hermes` | Run `hermes gateway setup` + `hermes gateway start`, then send the bot a message |
|
| 72 |
+
| Start fresh conversation | `/new` or `/reset` | `/new` or `/reset` |
|
| 73 |
+
| Change model | `/model [provider:model]` | `/model [provider:model]` |
|
| 74 |
+
| Set a personality | `/personality [name]` | `/personality [name]` |
|
| 75 |
+
| Retry or undo the last turn | `/retry`, `/undo` | `/retry`, `/undo` |
|
| 76 |
+
| Compress context / check usage | `/compress`, `/usage`, `/insights [--days N]` | `/compress`, `/usage`, `/insights [days]` |
|
| 77 |
+
| Browse skills | `/skills` or `/<skill-name>` | `/skills` or `/<skill-name>` |
|
| 78 |
+
| Interrupt current work | `Ctrl+C` or send a new message | `/stop` or send a new message |
|
| 79 |
+
| Platform-specific status | `/platforms` | `/status`, `/sethome` |
|
| 80 |
+
|
| 81 |
+
For the full command lists, see the [CLI guide](https://hermes-agent.nousresearch.com/docs/user-guide/cli) and the [Messaging Gateway guide](https://hermes-agent.nousresearch.com/docs/user-guide/messaging).
|
| 82 |
+
|
| 83 |
+
---
|
| 84 |
+
|
| 85 |
+
## Documentation
|
| 86 |
+
|
| 87 |
+
All documentation lives at **[hermes-agent.nousresearch.com/docs](https://hermes-agent.nousresearch.com/docs/)**:
|
| 88 |
+
|
| 89 |
+
| Section | What's Covered |
|
| 90 |
+
|---------|---------------|
|
| 91 |
+
| [Quickstart](https://hermes-agent.nousresearch.com/docs/getting-started/quickstart) | Install → setup → first conversation in 2 minutes |
|
| 92 |
+
| [CLI Usage](https://hermes-agent.nousresearch.com/docs/user-guide/cli) | Commands, keybindings, personalities, sessions |
|
| 93 |
+
| [Configuration](https://hermes-agent.nousresearch.com/docs/user-guide/configuration) | Config file, providers, models, all options |
|
| 94 |
+
| [Messaging Gateway](https://hermes-agent.nousresearch.com/docs/user-guide/messaging) | Telegram, Discord, Slack, WhatsApp, Signal, Home Assistant |
|
| 95 |
+
| [Security](https://hermes-agent.nousresearch.com/docs/user-guide/security) | Command approval, DM pairing, container isolation |
|
| 96 |
+
| [Tools & Toolsets](https://hermes-agent.nousresearch.com/docs/user-guide/features/tools) | 40+ tools, toolset system, terminal backends |
|
| 97 |
+
| [Skills System](https://hermes-agent.nousresearch.com/docs/user-guide/features/skills) | Procedural memory, Skills Hub, creating skills |
|
| 98 |
+
| [Memory](https://hermes-agent.nousresearch.com/docs/user-guide/features/memory) | Persistent memory, user profiles, best practices |
|
| 99 |
+
| [MCP Integration](https://hermes-agent.nousresearch.com/docs/user-guide/features/mcp) | Connect any MCP server for extended capabilities |
|
| 100 |
+
| [Cron Scheduling](https://hermes-agent.nousresearch.com/docs/user-guide/features/cron) | Scheduled tasks with platform delivery |
|
| 101 |
+
| [Context Files](https://hermes-agent.nousresearch.com/docs/user-guide/features/context-files) | Project context that shapes every conversation |
|
| 102 |
+
| [Architecture](https://hermes-agent.nousresearch.com/docs/developer-guide/architecture) | Project structure, agent loop, key classes |
|
| 103 |
+
| [Contributing](https://hermes-agent.nousresearch.com/docs/developer-guide/contributing) | Development setup, PR process, code style |
|
| 104 |
+
| [CLI Reference](https://hermes-agent.nousresearch.com/docs/reference/cli-commands) | All commands and flags |
|
| 105 |
+
| [Environment Variables](https://hermes-agent.nousresearch.com/docs/reference/environment-variables) | Complete env var reference |
|
| 106 |
+
|
| 107 |
+
---
|
| 108 |
+
|
| 109 |
+
## Migrating from OpenClaw
|
| 110 |
+
|
| 111 |
+
If you're coming from OpenClaw, Hermes can automatically import your settings, memories, skills, and API keys.
|
| 112 |
+
|
| 113 |
+
**During first-time setup:** The setup wizard (`hermes setup`) automatically detects `~/.openclaw` and offers to migrate before configuration begins.
|
| 114 |
+
|
| 115 |
+
**Anytime after install:**
|
| 116 |
+
|
| 117 |
+
```bash
|
| 118 |
+
hermes claw migrate # Interactive migration (full preset)
|
| 119 |
+
hermes claw migrate --dry-run # Preview what would be migrated
|
| 120 |
+
hermes claw migrate --preset user-data # Migrate without secrets
|
| 121 |
+
hermes claw migrate --overwrite # Overwrite existing conflicts
|
| 122 |
+
```
|
| 123 |
+
|
| 124 |
+
What gets imported:
|
| 125 |
+
- **SOUL.md** — persona file
|
| 126 |
+
- **Memories** — MEMORY.md and USER.md entries
|
| 127 |
+
- **Skills** — user-created skills → `~/.hermes/skills/openclaw-imports/`
|
| 128 |
+
- **Command allowlist** — approval patterns
|
| 129 |
+
- **Messaging settings** — platform configs, allowed users, working directory
|
| 130 |
+
- **API keys** — allowlisted secrets (Telegram, OpenRouter, OpenAI, Anthropic, ElevenLabs)
|
| 131 |
+
- **TTS assets** — workspace audio files
|
| 132 |
+
- **Workspace instructions** — AGENTS.md (with `--workspace-target`)
|
| 133 |
+
|
| 134 |
+
See `hermes claw migrate --help` for all options, or use the `openclaw-migration` skill for an interactive agent-guided migration with dry-run previews.
|
| 135 |
+
|
| 136 |
+
---
|
| 137 |
+
|
| 138 |
+
## Contributing
|
| 139 |
+
|
| 140 |
+
We welcome contributions! See the [Contributing Guide](https://hermes-agent.nousresearch.com/docs/developer-guide/contributing) for development setup, code style, and PR process.
|
| 141 |
+
|
| 142 |
+
Quick start for contributors:
|
| 143 |
+
|
| 144 |
+
```bash
|
| 145 |
+
git clone https://github.com/NousResearch/hermes-agent.git
|
| 146 |
+
cd hermes-agent
|
| 147 |
+
curl -LsSf https://astral.sh/uv/install.sh | sh
|
| 148 |
+
uv venv venv --python 3.11
|
| 149 |
+
source venv/bin/activate
|
| 150 |
+
uv pip install -e ".[all,dev]"
|
| 151 |
+
python -m pytest tests/ -q
|
| 152 |
+
```
|
| 153 |
+
|
| 154 |
+
> **RL Training (optional):** To work on the RL/Tinker-Atropos integration:
|
| 155 |
+
> ```bash
|
| 156 |
+
> git submodule update --init tinker-atropos
|
| 157 |
+
> uv pip install -e "./tinker-atropos"
|
| 158 |
+
> ```
|
| 159 |
+
|
| 160 |
+
---
|
| 161 |
+
|
| 162 |
+
## Community
|
| 163 |
+
|
| 164 |
+
- 💬 [Discord](https://discord.gg/NousResearch)
|
| 165 |
+
- 📚 [Skills Hub](https://agentskills.io)
|
| 166 |
+
- 🐛 [Issues](https://github.com/NousResearch/hermes-agent/issues)
|
| 167 |
+
- 💡 [Discussions](https://github.com/NousResearch/hermes-agent/discussions)
|
| 168 |
+
|
| 169 |
+
---
|
| 170 |
+
|
| 171 |
+
## License
|
| 172 |
+
|
| 173 |
+
MIT — see [LICENSE](LICENSE).
|
| 174 |
+
|
| 175 |
+
Built by [Nous Research](https://nousresearch.com).
|
RELEASE_v0.2.0.md
ADDED
|
@@ -0,0 +1,383 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Hermes Agent v0.2.0 (v2026.3.12)
|
| 2 |
+
|
| 3 |
+
**Release Date:** March 12, 2026
|
| 4 |
+
|
| 5 |
+
> First tagged release since v0.1.0 (the initial pre-public foundation). In just over two weeks, Hermes Agent went from a small internal project to a full-featured AI agent platform — thanks to an explosion of community contributions. This release covers **216 merged pull requests** from **63 contributors**, resolving **119 issues**.
|
| 6 |
+
|
| 7 |
+
---
|
| 8 |
+
|
| 9 |
+
## ✨ Highlights
|
| 10 |
+
|
| 11 |
+
- **Multi-Platform Messaging Gateway** — Telegram, Discord, Slack, WhatsApp, Signal, Email (IMAP/SMTP), and Home Assistant platforms with unified session management, media attachments, and per-platform tool configuration.
|
| 12 |
+
|
| 13 |
+
- **MCP (Model Context Protocol) Client** — Native MCP support with stdio and HTTP transports, reconnection, resource/prompt discovery, and sampling (server-initiated LLM requests). ([#291](https://github.com/NousResearch/hermes-agent/pull/291) — @0xbyt4, [#301](https://github.com/NousResearch/hermes-agent/pull/301), [#753](https://github.com/NousResearch/hermes-agent/pull/753))
|
| 14 |
+
|
| 15 |
+
- **Skills Ecosystem** — 70+ bundled and optional skills across 15+ categories with a Skills Hub for community discovery, per-platform enable/disable, conditional activation based on tool availability, and prerequisite validation. ([#743](https://github.com/NousResearch/hermes-agent/pull/743) — @teyrebaz33, [#785](https://github.com/NousResearch/hermes-agent/pull/785) — @teyrebaz33)
|
| 16 |
+
|
| 17 |
+
- **Centralized Provider Router** — Unified `call_llm()`/`async_call_llm()` API replaces scattered provider logic across vision, summarization, compression, and trajectory saving. All auxiliary consumers route through a single code path with automatic credential resolution. ([#1003](https://github.com/NousResearch/hermes-agent/pull/1003))
|
| 18 |
+
|
| 19 |
+
- **ACP Server** — VS Code, Zed, and JetBrains editor integration via the Agent Communication Protocol standard. ([#949](https://github.com/NousResearch/hermes-agent/pull/949))
|
| 20 |
+
|
| 21 |
+
- **CLI Skin/Theme Engine** — Data-driven visual customization: banners, spinners, colors, branding. 7 built-in skins + custom YAML skins.
|
| 22 |
+
|
| 23 |
+
- **Git Worktree Isolation** — `hermes -w` launches isolated agent sessions in git worktrees for safe parallel work on the same repo. ([#654](https://github.com/NousResearch/hermes-agent/pull/654))
|
| 24 |
+
|
| 25 |
+
- **Filesystem Checkpoints & Rollback** — Automatic snapshots before destructive operations with `/rollback` to restore. ([#824](https://github.com/NousResearch/hermes-agent/pull/824))
|
| 26 |
+
|
| 27 |
+
- **3,289 Tests** — From near-zero test coverage to a comprehensive test suite covering agent, gateway, tools, cron, and CLI.
|
| 28 |
+
|
| 29 |
+
---
|
| 30 |
+
|
| 31 |
+
## 🏗️ Core Agent & Architecture
|
| 32 |
+
|
| 33 |
+
### Provider & Model Support
|
| 34 |
+
- Centralized provider router with `resolve_provider_client()` + `call_llm()` API ([#1003](https://github.com/NousResearch/hermes-agent/pull/1003))
|
| 35 |
+
- Nous Portal as first-class provider in setup ([#644](https://github.com/NousResearch/hermes-agent/issues/644))
|
| 36 |
+
- OpenAI Codex (Responses API) with ChatGPT subscription support ([#43](https://github.com/NousResearch/hermes-agent/pull/43)) — @grp06
|
| 37 |
+
- Codex OAuth vision support + multimodal content adapter
|
| 38 |
+
- Validate `/model` against live API instead of hardcoded lists
|
| 39 |
+
- Self-hosted Firecrawl support ([#460](https://github.com/NousResearch/hermes-agent/pull/460)) — @caentzminger
|
| 40 |
+
- Kimi Code API support ([#635](https://github.com/NousResearch/hermes-agent/pull/635)) — @christomitov
|
| 41 |
+
- MiniMax model ID update ([#473](https://github.com/NousResearch/hermes-agent/pull/473)) — @tars90percent
|
| 42 |
+
- OpenRouter provider routing configuration (provider_preferences)
|
| 43 |
+
- Nous credential refresh on 401 errors ([#571](https://github.com/NousResearch/hermes-agent/pull/571), [#269](https://github.com/NousResearch/hermes-agent/pull/269)) — @rewbs
|
| 44 |
+
- z.ai/GLM, Kimi/Moonshot, MiniMax, Azure OpenAI as first-class providers
|
| 45 |
+
- Unified `/model` and `/provider` into single view
|
| 46 |
+
|
| 47 |
+
### Agent Loop & Conversation
|
| 48 |
+
- Simple fallback model for provider resilience ([#740](https://github.com/NousResearch/hermes-agent/pull/740))
|
| 49 |
+
- Shared iteration budget across parent + subagent delegation
|
| 50 |
+
- Iteration budget pressure via tool result injection
|
| 51 |
+
- Configurable subagent provider/model with full credential resolution
|
| 52 |
+
- Handle 413 payload-too-large via compression instead of aborting ([#153](https://github.com/NousResearch/hermes-agent/pull/153)) — @tekelala
|
| 53 |
+
- Retry with rebuilt payload after compression ([#616](https://github.com/NousResearch/hermes-agent/pull/616)) — @tripledoublev
|
| 54 |
+
- Auto-compress pathologically large gateway sessions ([#628](https://github.com/NousResearch/hermes-agent/issues/628))
|
| 55 |
+
- Tool call repair middleware — auto-lowercase and invalid tool handler
|
| 56 |
+
- Reasoning effort configuration and `/reasoning` command ([#921](https://github.com/NousResearch/hermes-agent/pull/921))
|
| 57 |
+
- Detect and block file re-read/search loops after context compression ([#705](https://github.com/NousResearch/hermes-agent/pull/705)) — @0xbyt4
|
| 58 |
+
|
| 59 |
+
### Session & Memory
|
| 60 |
+
- Session naming with unique titles, auto-lineage, rich listing, and resume by name ([#720](https://github.com/NousResearch/hermes-agent/pull/720))
|
| 61 |
+
- Interactive session browser with search filtering ([#733](https://github.com/NousResearch/hermes-agent/pull/733))
|
| 62 |
+
- Display previous messages when resuming a session ([#734](https://github.com/NousResearch/hermes-agent/pull/734))
|
| 63 |
+
- Honcho AI-native cross-session user modeling ([#38](https://github.com/NousResearch/hermes-agent/pull/38)) — @erosika
|
| 64 |
+
- Proactive async memory flush on session expiry
|
| 65 |
+
- Smart context length probing with persistent caching + banner display
|
| 66 |
+
- `/resume` command for switching to named sessions in gateway
|
| 67 |
+
- Session reset policy for messaging platforms
|
| 68 |
+
|
| 69 |
+
---
|
| 70 |
+
|
| 71 |
+
## 📱 Messaging Platforms (Gateway)
|
| 72 |
+
|
| 73 |
+
### Telegram
|
| 74 |
+
- Native file attachments: send_document + send_video
|
| 75 |
+
- Document file processing for PDF, text, and Office files — @tekelala
|
| 76 |
+
- Forum topic session isolation ([#766](https://github.com/NousResearch/hermes-agent/pull/766)) — @spanishflu-est1918
|
| 77 |
+
- Browser screenshot sharing via MEDIA: protocol ([#657](https://github.com/NousResearch/hermes-agent/pull/657))
|
| 78 |
+
- Location support for find-nearby skill
|
| 79 |
+
- TTS voice message accumulation fix ([#176](https://github.com/NousResearch/hermes-agent/pull/176)) — @Bartok9
|
| 80 |
+
- Improved error handling and logging ([#763](https://github.com/NousResearch/hermes-agent/pull/763)) — @aydnOktay
|
| 81 |
+
- Italic regex newline fix + 43 format tests ([#204](https://github.com/NousResearch/hermes-agent/pull/204)) — @0xbyt4
|
| 82 |
+
|
| 83 |
+
### Discord
|
| 84 |
+
- Channel topic included in session context ([#248](https://github.com/NousResearch/hermes-agent/pull/248)) — @Bartok9
|
| 85 |
+
- DISCORD_ALLOW_BOTS config for bot message filtering ([#758](https://github.com/NousResearch/hermes-agent/pull/758))
|
| 86 |
+
- Document and video support ([#784](https://github.com/NousResearch/hermes-agent/pull/784))
|
| 87 |
+
- Improved error handling and logging ([#761](https://github.com/NousResearch/hermes-agent/pull/761)) — @aydnOktay
|
| 88 |
+
|
| 89 |
+
### Slack
|
| 90 |
+
- App_mention 404 fix + document/video support ([#784](https://github.com/NousResearch/hermes-agent/pull/784))
|
| 91 |
+
- Structured logging replacing print statements — @aydnOktay
|
| 92 |
+
|
| 93 |
+
### WhatsApp
|
| 94 |
+
- Native media sending — images, videos, documents ([#292](https://github.com/NousResearch/hermes-agent/pull/292)) — @satelerd
|
| 95 |
+
- Multi-user session isolation ([#75](https://github.com/NousResearch/hermes-agent/pull/75)) — @satelerd
|
| 96 |
+
- Cross-platform port cleanup replacing Linux-only fuser ([#433](https://github.com/NousResearch/hermes-agent/pull/433)) — @Farukest
|
| 97 |
+
- DM interrupt key mismatch fix ([#350](https://github.com/NousResearch/hermes-agent/pull/350)) — @Farukest
|
| 98 |
+
|
| 99 |
+
### Signal
|
| 100 |
+
- Full Signal messenger gateway via signal-cli-rest-api ([#405](https://github.com/NousResearch/hermes-agent/issues/405))
|
| 101 |
+
- Media URL support in message events ([#871](https://github.com/NousResearch/hermes-agent/pull/871))
|
| 102 |
+
|
| 103 |
+
### Email (IMAP/SMTP)
|
| 104 |
+
- New email gateway platform — @0xbyt4
|
| 105 |
+
|
| 106 |
+
### Home Assistant
|
| 107 |
+
- REST tools + WebSocket gateway integration ([#184](https://github.com/NousResearch/hermes-agent/pull/184)) — @0xbyt4
|
| 108 |
+
- Service discovery and enhanced setup
|
| 109 |
+
- Toolset mapping fix ([#538](https://github.com/NousResearch/hermes-agent/pull/538)) — @Himess
|
| 110 |
+
|
| 111 |
+
### Gateway Core
|
| 112 |
+
- Expose subagent tool calls and thinking to users ([#186](https://github.com/NousResearch/hermes-agent/pull/186)) — @cutepawss
|
| 113 |
+
- Configurable background process watcher notifications ([#840](https://github.com/NousResearch/hermes-agent/pull/840))
|
| 114 |
+
- `edit_message()` for Telegram/Discord/Slack with fallback
|
| 115 |
+
- `/compress`, `/usage`, `/update` slash commands
|
| 116 |
+
- Eliminated 3x SQLite message duplication in gateway sessions ([#873](https://github.com/NousResearch/hermes-agent/pull/873))
|
| 117 |
+
- Stabilize system prompt across gateway turns for cache hits ([#754](https://github.com/NousResearch/hermes-agent/pull/754))
|
| 118 |
+
- MCP server shutdown on gateway exit ([#796](https://github.com/NousResearch/hermes-agent/pull/796)) — @0xbyt4
|
| 119 |
+
- Pass session_db to AIAgent, fixing session_search error ([#108](https://github.com/NousResearch/hermes-agent/pull/108)) — @Bartok9
|
| 120 |
+
- Persist transcript changes in /retry, /undo; fix /reset attribute ([#217](https://github.com/NousResearch/hermes-agent/pull/217)) — @Farukest
|
| 121 |
+
- UTF-8 encoding fix preventing Windows crashes ([#369](https://github.com/NousResearch/hermes-agent/pull/369)) — @ch3ronsa
|
| 122 |
+
|
| 123 |
+
---
|
| 124 |
+
|
| 125 |
+
## 🖥️ CLI & User Experience
|
| 126 |
+
|
| 127 |
+
### Interactive CLI
|
| 128 |
+
- Data-driven skin/theme engine — 7 built-in skins (default, ares, mono, slate, poseidon, sisyphus, charizard) + custom YAML skins
|
| 129 |
+
- `/personality` command with custom personality + disable support ([#773](https://github.com/NousResearch/hermes-agent/pull/773)) — @teyrebaz33
|
| 130 |
+
- User-defined quick commands that bypass the agent loop ([#746](https://github.com/NousResearch/hermes-agent/pull/746)) — @teyrebaz33
|
| 131 |
+
- `/reasoning` command for effort level and display toggle ([#921](https://github.com/NousResearch/hermes-agent/pull/921))
|
| 132 |
+
- `/verbose` slash command to toggle debug at runtime ([#94](https://github.com/NousResearch/hermes-agent/pull/94)) — @cesareth
|
| 133 |
+
- `/insights` command — usage analytics, cost estimation & activity patterns ([#552](https://github.com/NousResearch/hermes-agent/pull/552))
|
| 134 |
+
- `/background` command for managing background processes
|
| 135 |
+
- `/help` formatting with command categories
|
| 136 |
+
- Bell-on-complete — terminal bell when agent finishes ([#738](https://github.com/NousResearch/hermes-agent/pull/738))
|
| 137 |
+
- Up/down arrow history navigation
|
| 138 |
+
- Clipboard image paste (Alt+V / Ctrl+V)
|
| 139 |
+
- Loading indicators for slow slash commands ([#882](https://github.com/NousResearch/hermes-agent/pull/882))
|
| 140 |
+
- Spinner flickering fix under patch_stdout ([#91](https://github.com/NousResearch/hermes-agent/pull/91)) — @0xbyt4
|
| 141 |
+
- `--quiet/-Q` flag for programmatic single-query mode
|
| 142 |
+
- `--fuck-it-ship-it` flag to bypass all approval prompts ([#724](https://github.com/NousResearch/hermes-agent/pull/724)) — @dmahan93
|
| 143 |
+
- Tools summary flag ([#767](https://github.com/NousResearch/hermes-agent/pull/767)) — @luisv-1
|
| 144 |
+
- Terminal blinking fix on SSH ([#284](https://github.com/NousResearch/hermes-agent/pull/284)) — @ygd58
|
| 145 |
+
- Multi-line paste detection fix ([#84](https://github.com/NousResearch/hermes-agent/pull/84)) — @0xbyt4
|
| 146 |
+
|
| 147 |
+
### Setup & Configuration
|
| 148 |
+
- Modular setup wizard with section subcommands and tool-first UX
|
| 149 |
+
- Container resource configuration prompts
|
| 150 |
+
- Backend validation for required binaries
|
| 151 |
+
- Config migration system (currently v7)
|
| 152 |
+
- API keys properly routed to .env instead of config.yaml ([#469](https://github.com/NousResearch/hermes-agent/pull/469)) — @ygd58
|
| 153 |
+
- Atomic write for .env to prevent API key loss on crash ([#954](https://github.com/NousResearch/hermes-agent/pull/954))
|
| 154 |
+
- `hermes tools` — per-platform tool enable/disable with curses UI
|
| 155 |
+
- `hermes doctor` for health checks across all configured providers
|
| 156 |
+
- `hermes update` with auto-restart for gateway service
|
| 157 |
+
- Show update-available notice in CLI banner
|
| 158 |
+
- Multiple named custom providers
|
| 159 |
+
- Shell config detection improvement for PATH setup ([#317](https://github.com/NousResearch/hermes-agent/pull/317)) — @mehmetkr-31
|
| 160 |
+
- Consistent HERMES_HOME and .env path resolution ([#51](https://github.com/NousResearch/hermes-agent/pull/51), [#48](https://github.com/NousResearch/hermes-agent/pull/48)) — @deankerr
|
| 161 |
+
- Docker backend fix on macOS + subagent auth for Nous Portal ([#46](https://github.com/NousResearch/hermes-agent/pull/46)) — @rsavitt
|
| 162 |
+
|
| 163 |
+
---
|
| 164 |
+
|
| 165 |
+
## 🔧 Tool System
|
| 166 |
+
|
| 167 |
+
### MCP (Model Context Protocol)
|
| 168 |
+
- Native MCP client with stdio + HTTP transports ([#291](https://github.com/NousResearch/hermes-agent/pull/291) — @0xbyt4, [#301](https://github.com/NousResearch/hermes-agent/pull/301))
|
| 169 |
+
- Sampling support — server-initiated LLM requests ([#753](https://github.com/NousResearch/hermes-agent/pull/753))
|
| 170 |
+
- Resource and prompt discovery
|
| 171 |
+
- Automatic reconnection and security hardening
|
| 172 |
+
- Banner integration, `/reload-mcp` command
|
| 173 |
+
- `hermes tools` UI integration
|
| 174 |
+
|
| 175 |
+
### Browser
|
| 176 |
+
- Local browser backend — zero-cost headless Chromium (no Browserbase needed)
|
| 177 |
+
- Console/errors tool, annotated screenshots, auto-recording, dogfood QA skill ([#745](https://github.com/NousResearch/hermes-agent/pull/745))
|
| 178 |
+
- Screenshot sharing via MEDIA: on all messaging platforms ([#657](https://github.com/NousResearch/hermes-agent/pull/657))
|
| 179 |
+
|
| 180 |
+
### Terminal & Execution
|
| 181 |
+
- `execute_code` sandbox with json_parse, shell_quote, retry helpers
|
| 182 |
+
- Docker: custom volume mounts ([#158](https://github.com/NousResearch/hermes-agent/pull/158)) — @Indelwin
|
| 183 |
+
- Daytona cloud sandbox backend ([#451](https://github.com/NousResearch/hermes-agent/pull/451)) — @rovle
|
| 184 |
+
- SSH backend fix ([#59](https://github.com/NousResearch/hermes-agent/pull/59)) — @deankerr
|
| 185 |
+
- Shell noise filtering and login shell execution for environment consistency
|
| 186 |
+
- Head+tail truncation for execute_code stdout overflow
|
| 187 |
+
- Configurable background process notification modes
|
| 188 |
+
|
| 189 |
+
### File Operations
|
| 190 |
+
- Filesystem checkpoints and `/rollback` command ([#824](https://github.com/NousResearch/hermes-agent/pull/824))
|
| 191 |
+
- Structured tool result hints (next-action guidance) for patch and search_files ([#722](https://github.com/NousResearch/hermes-agent/issues/722))
|
| 192 |
+
- Docker volumes passed to sandbox container config ([#687](https://github.com/NousResearch/hermes-agent/pull/687)) — @manuelschipper
|
| 193 |
+
|
| 194 |
+
---
|
| 195 |
+
|
| 196 |
+
## 🧩 Skills Ecosystem
|
| 197 |
+
|
| 198 |
+
### Skills System
|
| 199 |
+
- Per-platform skill enable/disable ([#743](https://github.com/NousResearch/hermes-agent/pull/743)) — @teyrebaz33
|
| 200 |
+
- Conditional skill activation based on tool availability ([#785](https://github.com/NousResearch/hermes-agent/pull/785)) — @teyrebaz33
|
| 201 |
+
- Skill prerequisites — hide skills with unmet dependencies ([#659](https://github.com/NousResearch/hermes-agent/pull/659)) — @kshitijk4poor
|
| 202 |
+
- Optional skills — shipped but not activated by default
|
| 203 |
+
- `hermes skills browse` — paginated hub browsing
|
| 204 |
+
- Skills sub-category organization
|
| 205 |
+
- Platform-conditional skill loading
|
| 206 |
+
- Atomic skill file writes ([#551](https://github.com/NousResearch/hermes-agent/pull/551)) — @aydnOktay
|
| 207 |
+
- Skills sync data loss prevention ([#563](https://github.com/NousResearch/hermes-agent/pull/563)) — @0xbyt4
|
| 208 |
+
- Dynamic skill slash commands for CLI and gateway
|
| 209 |
+
|
| 210 |
+
### New Skills (selected)
|
| 211 |
+
- **ASCII Art** — pyfiglet (571 fonts), cowsay, image-to-ascii ([#209](https://github.com/NousResearch/hermes-agent/pull/209)) — @0xbyt4
|
| 212 |
+
- **ASCII Video** — Full production pipeline ([#854](https://github.com/NousResearch/hermes-agent/pull/854)) — @SHL0MS
|
| 213 |
+
- **DuckDuckGo Search** — Firecrawl fallback ([#267](https://github.com/NousResearch/hermes-agent/pull/267)) — @gamedevCloudy; DDGS API expansion ([#598](https://github.com/NousResearch/hermes-agent/pull/598)) — @areu01or00
|
| 214 |
+
- **Solana Blockchain** — Wallet balances, USD pricing, token names ([#212](https://github.com/NousResearch/hermes-agent/pull/212)) — @gizdusum
|
| 215 |
+
- **AgentMail** — Agent-owned email inboxes ([#330](https://github.com/NousResearch/hermes-agent/pull/330)) — @teyrebaz33
|
| 216 |
+
- **Polymarket** — Prediction market data (read-only) ([#629](https://github.com/NousResearch/hermes-agent/pull/629))
|
| 217 |
+
- **OpenClaw Migration** — Official migration tool ([#570](https://github.com/NousResearch/hermes-agent/pull/570)) — @unmodeled-tyler
|
| 218 |
+
- **Domain Intelligence** — Passive recon: subdomains, SSL, WHOIS, DNS ([#136](https://github.com/NousResearch/hermes-agent/pull/136)) — @FurkanL0
|
| 219 |
+
- **Superpowers** — Software development skills ([#137](https://github.com/NousResearch/hermes-agent/pull/137)) — @kaos35
|
| 220 |
+
- **Hermes-Atropos** — RL environment development skill ([#815](https://github.com/NousResearch/hermes-agent/pull/815))
|
| 221 |
+
- Plus: arXiv search, OCR/documents, Excalidraw diagrams, YouTube transcripts, GIF search, Pokémon player, Minecraft modpack server, OpenHue (Philips Hue), Google Workspace, Notion, PowerPoint, Obsidian, find-nearby, and 40+ MLOps skills
|
| 222 |
+
|
| 223 |
+
---
|
| 224 |
+
|
| 225 |
+
## 🔒 Security & Reliability
|
| 226 |
+
|
| 227 |
+
### Security Hardening
|
| 228 |
+
- Path traversal fix in skill_view — prevented reading arbitrary files ([#220](https://github.com/NousResearch/hermes-agent/issues/220)) — @Farukest
|
| 229 |
+
- Shell injection prevention in sudo password piping ([#65](https://github.com/NousResearch/hermes-agent/pull/65)) — @leonsgithub
|
| 230 |
+
- Dangerous command detection: multiline bypass fix ([#233](https://github.com/NousResearch/hermes-agent/pull/233)) — @Farukest; tee/process substitution patterns ([#280](https://github.com/NousResearch/hermes-agent/pull/280)) — @dogiladeveloper
|
| 231 |
+
- Symlink boundary check fix in skills_guard ([#386](https://github.com/NousResearch/hermes-agent/pull/386)) — @Farukest
|
| 232 |
+
- Symlink bypass fix in write deny list on macOS ([#61](https://github.com/NousResearch/hermes-agent/pull/61)) — @0xbyt4
|
| 233 |
+
- Multi-word prompt injection bypass prevention ([#192](https://github.com/NousResearch/hermes-agent/pull/192)) — @0xbyt4
|
| 234 |
+
- Cron prompt injection scanner bypass fix ([#63](https://github.com/NousResearch/hermes-agent/pull/63)) — @0xbyt4
|
| 235 |
+
- Enforce 0600/0700 file permissions on sensitive files ([#757](https://github.com/NousResearch/hermes-agent/pull/757))
|
| 236 |
+
- .env file permissions restricted to owner-only ([#529](https://github.com/NousResearch/hermes-agent/pull/529)) — @Himess
|
| 237 |
+
- `--force` flag properly blocked from overriding dangerous verdicts ([#388](https://github.com/NousResearch/hermes-agent/pull/388)) — @Farukest
|
| 238 |
+
- FTS5 query sanitization + DB connection leak fix ([#565](https://github.com/NousResearch/hermes-agent/pull/565)) — @0xbyt4
|
| 239 |
+
- Expand secret redaction patterns + config toggle to disable
|
| 240 |
+
- In-memory permanent allowlist to prevent data leak ([#600](https://github.com/NousResearch/hermes-agent/pull/600)) — @alireza78a
|
| 241 |
+
|
| 242 |
+
### Atomic Writes (data loss prevention)
|
| 243 |
+
- sessions.json ([#611](https://github.com/NousResearch/hermes-agent/pull/611)) — @alireza78a
|
| 244 |
+
- Cron jobs ([#146](https://github.com/NousResearch/hermes-agent/pull/146)) — @alireza78a
|
| 245 |
+
- .env config ([#954](https://github.com/NousResearch/hermes-agent/pull/954))
|
| 246 |
+
- Process checkpoints ([#298](https://github.com/NousResearch/hermes-agent/pull/298)) — @aydnOktay
|
| 247 |
+
- Batch runner ([#297](https://github.com/NousResearch/hermes-agent/pull/297)) — @aydnOktay
|
| 248 |
+
- Skill files ([#551](https://github.com/NousResearch/hermes-agent/pull/551)) — @aydnOktay
|
| 249 |
+
|
| 250 |
+
### Reliability
|
| 251 |
+
- Guard all print() against OSError for systemd/headless environments ([#963](https://github.com/NousResearch/hermes-agent/pull/963))
|
| 252 |
+
- Reset all retry counters at start of run_conversation ([#607](https://github.com/NousResearch/hermes-agent/pull/607)) — @0xbyt4
|
| 253 |
+
- Return deny on approval callback timeout instead of None ([#603](https://github.com/NousResearch/hermes-agent/pull/603)) — @0xbyt4
|
| 254 |
+
- Fix None message content crashes across codebase ([#277](https://github.com/NousResearch/hermes-agent/pull/277))
|
| 255 |
+
- Fix context overrun crash with local LLM backends ([#403](https://github.com/NousResearch/hermes-agent/pull/403)) — @ch3ronsa
|
| 256 |
+
- Prevent `_flush_sentinel` from leaking to external APIs ([#227](https://github.com/NousResearch/hermes-agent/pull/227)) — @Farukest
|
| 257 |
+
- Prevent conversation_history mutation in callers ([#229](https://github.com/NousResearch/hermes-agent/pull/229)) — @Farukest
|
| 258 |
+
- Fix systemd restart loop ([#614](https://github.com/NousResearch/hermes-agent/pull/614)) — @voidborne-d
|
| 259 |
+
- Close file handles and sockets to prevent fd leaks ([#568](https://github.com/NousResearch/hermes-agent/pull/568) — @alireza78a, [#296](https://github.com/NousResearch/hermes-agent/pull/296) — @alireza78a, [#709](https://github.com/NousResearch/hermes-agent/pull/709) — @memosr)
|
| 260 |
+
- Prevent data loss in clipboard PNG conversion ([#602](https://github.com/NousResearch/hermes-agent/pull/602)) — @0xbyt4
|
| 261 |
+
- Eliminate shell noise from terminal output ([#293](https://github.com/NousResearch/hermes-agent/pull/293)) — @0xbyt4
|
| 262 |
+
- Timezone-aware now() for prompt, cron, and execute_code ([#309](https://github.com/NousResearch/hermes-agent/pull/309)) — @areu01or00
|
| 263 |
+
|
| 264 |
+
### Windows Compatibility
|
| 265 |
+
- Guard POSIX-only process functions ([#219](https://github.com/NousResearch/hermes-agent/pull/219)) — @Farukest
|
| 266 |
+
- Windows native support via Git Bash + ZIP-based update fallback
|
| 267 |
+
- pywinpty for PTY support ([#457](https://github.com/NousResearch/hermes-agent/pull/457)) — @shitcoinsherpa
|
| 268 |
+
- Explicit UTF-8 encoding on all config/data file I/O ([#458](https://github.com/NousResearch/hermes-agent/pull/458)) — @shitcoinsherpa
|
| 269 |
+
- Windows-compatible path handling ([#354](https://github.com/NousResearch/hermes-agent/pull/354), [#390](https://github.com/NousResearch/hermes-agent/pull/390)) — @Farukest
|
| 270 |
+
- Regex-based search output parsing for drive-letter paths ([#533](https://github.com/NousResearch/hermes-agent/pull/533)) — @Himess
|
| 271 |
+
- Auth store file lock for Windows ([#455](https://github.com/NousResearch/hermes-agent/pull/455)) — @shitcoinsherpa
|
| 272 |
+
|
| 273 |
+
---
|
| 274 |
+
|
| 275 |
+
## 🐛 Notable Bug Fixes
|
| 276 |
+
|
| 277 |
+
- Fix DeepSeek V3 tool call parser silently dropping multi-line JSON arguments ([#444](https://github.com/NousResearch/hermes-agent/pull/444)) — @PercyDikec
|
| 278 |
+
- Fix gateway transcript losing 1 message per turn due to offset mismatch ([#395](https://github.com/NousResearch/hermes-agent/pull/395)) — @PercyDikec
|
| 279 |
+
- Fix /retry command silently discarding the agent's final response ([#441](https://github.com/NousResearch/hermes-agent/pull/441)) — @PercyDikec
|
| 280 |
+
- Fix max-iterations retry returning empty string after think-block stripping ([#438](https://github.com/NousResearch/hermes-agent/pull/438)) — @PercyDikec
|
| 281 |
+
- Fix max-iterations retry using hardcoded max_tokens ([#436](https://github.com/NousResearch/hermes-agent/pull/436)) — @Farukest
|
| 282 |
+
- Fix Codex status dict key mismatch ([#448](https://github.com/NousResearch/hermes-agent/pull/448)) and visibility filter ([#446](https://github.com/NousResearch/hermes-agent/pull/446)) — @PercyDikec
|
| 283 |
+
- Strip \<think\> blocks from final user-facing responses ([#174](https://github.com/NousResearch/hermes-agent/pull/174)) — @Bartok9
|
| 284 |
+
- Fix \<think\> block regex stripping visible content when model discusses tags literally ([#786](https://github.com/NousResearch/hermes-agent/issues/786))
|
| 285 |
+
- Fix Mistral 422 errors from leftover finish_reason in assistant messages ([#253](https://github.com/NousResearch/hermes-agent/pull/253)) — @Sertug17
|
| 286 |
+
- Fix OPENROUTER_API_KEY resolution order across all code paths ([#295](https://github.com/NousResearch/hermes-agent/pull/295)) — @0xbyt4
|
| 287 |
+
- Fix OPENAI_BASE_URL API key priority ([#420](https://github.com/NousResearch/hermes-agent/pull/420)) — @manuelschipper
|
| 288 |
+
- Fix Anthropic "prompt is too long" 400 error not detected as context length error ([#813](https://github.com/NousResearch/hermes-agent/issues/813))
|
| 289 |
+
- Fix SQLite session transcript accumulating duplicate messages — 3-4x token inflation ([#860](https://github.com/NousResearch/hermes-agent/issues/860))
|
| 290 |
+
- Fix setup wizard skipping API key prompts on first install ([#748](https://github.com/NousResearch/hermes-agent/pull/748))
|
| 291 |
+
- Fix setup wizard showing OpenRouter model list for Nous Portal ([#575](https://github.com/NousResearch/hermes-agent/pull/575)) — @PercyDikec
|
| 292 |
+
- Fix provider selection not persisting when switching via hermes model ([#881](https://github.com/NousResearch/hermes-agent/pull/881))
|
| 293 |
+
- Fix Docker backend failing when docker not in PATH on macOS ([#889](https://github.com/NousResearch/hermes-agent/pull/889))
|
| 294 |
+
- Fix ClawHub Skills Hub adapter for API endpoint changes ([#286](https://github.com/NousResearch/hermes-agent/pull/286)) — @BP602
|
| 295 |
+
- Fix Honcho auto-enable when API key is present ([#243](https://github.com/NousResearch/hermes-agent/pull/243)) — @Bartok9
|
| 296 |
+
- Fix duplicate 'skills' subparser crash on Python 3.11+ ([#898](https://github.com/NousResearch/hermes-agent/issues/898))
|
| 297 |
+
- Fix memory tool entry parsing when content contains section sign ([#162](https://github.com/NousResearch/hermes-agent/pull/162)) — @aydnOktay
|
| 298 |
+
- Fix piped install silently aborting when interactive prompts fail ([#72](https://github.com/NousResearch/hermes-agent/pull/72)) — @cutepawss
|
| 299 |
+
- Fix false positives in recursive delete detection ([#68](https://github.com/NousResearch/hermes-agent/pull/68)) — @cutepawss
|
| 300 |
+
- Fix Ruff lint warnings across codebase ([#608](https://github.com/NousResearch/hermes-agent/pull/608)) — @JackTheGit
|
| 301 |
+
- Fix Anthropic native base URL fail-fast ([#173](https://github.com/NousResearch/hermes-agent/pull/173)) — @adavyas
|
| 302 |
+
- Fix install.sh creating ~/.hermes before moving Node.js directory ([#53](https://github.com/NousResearch/hermes-agent/pull/53)) — @JoshuaMart
|
| 303 |
+
- Fix SystemExit traceback during atexit cleanup on Ctrl+C ([#55](https://github.com/NousResearch/hermes-agent/pull/55)) — @bierlingm
|
| 304 |
+
- Restore missing MIT license file ([#620](https://github.com/NousResearch/hermes-agent/pull/620)) — @stablegenius49
|
| 305 |
+
|
| 306 |
+
---
|
| 307 |
+
|
| 308 |
+
## 🧪 Testing
|
| 309 |
+
|
| 310 |
+
- **3,289 tests** across agent, gateway, tools, cron, and CLI
|
| 311 |
+
- Parallelized test suite with pytest-xdist ([#802](https://github.com/NousResearch/hermes-agent/pull/802)) — @OutThisLife
|
| 312 |
+
- Unit tests batch 1: 8 core modules ([#60](https://github.com/NousResearch/hermes-agent/pull/60)) — @0xbyt4
|
| 313 |
+
- Unit tests batch 2: 8 more modules ([#62](https://github.com/NousResearch/hermes-agent/pull/62)) — @0xbyt4
|
| 314 |
+
- Unit tests batch 3: 8 untested modules ([#191](https://github.com/NousResearch/hermes-agent/pull/191)) — @0xbyt4
|
| 315 |
+
- Unit tests batch 4: 5 security/logic-critical modules ([#193](https://github.com/NousResearch/hermes-agent/pull/193)) — @0xbyt4
|
| 316 |
+
- AIAgent (run_agent.py) unit tests ([#67](https://github.com/NousResearch/hermes-agent/pull/67)) — @0xbyt4
|
| 317 |
+
- Trajectory compressor tests ([#203](https://github.com/NousResearch/hermes-agent/pull/203)) — @0xbyt4
|
| 318 |
+
- Clarify tool tests ([#121](https://github.com/NousResearch/hermes-agent/pull/121)) — @Bartok9
|
| 319 |
+
- Telegram format tests — 43 tests for italic/bold/code rendering ([#204](https://github.com/NousResearch/hermes-agent/pull/204)) — @0xbyt4
|
| 320 |
+
- Vision tools type hints + 42 tests ([#792](https://github.com/NousResearch/hermes-agent/pull/792))
|
| 321 |
+
- Compressor tool-call boundary regression tests ([#648](https://github.com/NousResearch/hermes-agent/pull/648)) — @intertwine
|
| 322 |
+
- Test structure reorganization ([#34](https://github.com/NousResearch/hermes-agent/pull/34)) — @0xbyt4
|
| 323 |
+
- Shell noise elimination + fix 36 test failures ([#293](https://github.com/NousResearch/hermes-agent/pull/293)) — @0xbyt4
|
| 324 |
+
|
| 325 |
+
---
|
| 326 |
+
|
| 327 |
+
## 🔬 RL & Evaluation Environments
|
| 328 |
+
|
| 329 |
+
- WebResearchEnv — Multi-step web research RL environment ([#434](https://github.com/NousResearch/hermes-agent/pull/434)) — @jackx707
|
| 330 |
+
- Modal sandbox concurrency limits to avoid deadlocks ([#621](https://github.com/NousResearch/hermes-agent/pull/621)) — @voteblake
|
| 331 |
+
- Hermes-atropos-environments bundled skill ([#815](https://github.com/NousResearch/hermes-agent/pull/815))
|
| 332 |
+
- Local vLLM instance support for evaluation — @dmahan93
|
| 333 |
+
- YC-Bench long-horizon agent benchmark environment
|
| 334 |
+
- OpenThoughts-TBLite evaluation environment and scripts
|
| 335 |
+
|
| 336 |
+
---
|
| 337 |
+
|
| 338 |
+
## 📚 Documentation
|
| 339 |
+
|
| 340 |
+
- Full documentation website (Docusaurus) with 37+ pages
|
| 341 |
+
- Comprehensive platform setup guides for Telegram, Discord, Slack, WhatsApp, Signal, Email
|
| 342 |
+
- AGENTS.md — development guide for AI coding assistants
|
| 343 |
+
- CONTRIBUTING.md ([#117](https://github.com/NousResearch/hermes-agent/pull/117)) — @Bartok9
|
| 344 |
+
- Slash commands reference ([#142](https://github.com/NousResearch/hermes-agent/pull/142)) — @Bartok9
|
| 345 |
+
- Comprehensive AGENTS.md accuracy audit ([#732](https://github.com/NousResearch/hermes-agent/pull/732))
|
| 346 |
+
- Skin/theme system documentation
|
| 347 |
+
- MCP documentation and examples
|
| 348 |
+
- Docs accuracy audit — 35+ corrections
|
| 349 |
+
- Documentation typo fixes ([#825](https://github.com/NousResearch/hermes-agent/pull/825), [#439](https://github.com/NousResearch/hermes-agent/pull/439)) — @JackTheGit
|
| 350 |
+
- CLI config precedence and terminology standardization ([#166](https://github.com/NousResearch/hermes-agent/pull/166), [#167](https://github.com/NousResearch/hermes-agent/pull/167), [#168](https://github.com/NousResearch/hermes-agent/pull/168)) — @Jr-kenny
|
| 351 |
+
- Telegram token regex documentation ([#713](https://github.com/NousResearch/hermes-agent/pull/713)) — @VolodymyrBg
|
| 352 |
+
|
| 353 |
+
---
|
| 354 |
+
|
| 355 |
+
## 👥 Contributors
|
| 356 |
+
|
| 357 |
+
Thank you to the 63 contributors who made this release possible! In just over two weeks, the Hermes Agent community came together to ship an extraordinary amount of work.
|
| 358 |
+
|
| 359 |
+
### Core
|
| 360 |
+
- **@teknium1** — 43 PRs: Project lead, core architecture, provider router, sessions, skills, CLI, documentation
|
| 361 |
+
|
| 362 |
+
### Top Community Contributors
|
| 363 |
+
- **@0xbyt4** — 40 PRs: MCP client, Home Assistant, security fixes (symlink, prompt injection, cron), extensive test coverage (6 batches), ascii-art skill, shell noise elimination, skills sync, Telegram formatting, and dozens more
|
| 364 |
+
- **@Farukest** — 16 PRs: Security hardening (path traversal, dangerous command detection, symlink boundary), Windows compatibility (POSIX guards, path handling), WhatsApp fixes, max-iterations retry, gateway fixes
|
| 365 |
+
- **@aydnOktay** — 11 PRs: Atomic writes (process checkpoints, batch runner, skill files), error handling improvements across Telegram, Discord, code execution, transcription, TTS, and skills
|
| 366 |
+
- **@Bartok9** — 9 PRs: CONTRIBUTING.md, slash commands reference, Discord channel topics, think-block stripping, TTS fix, Honcho fix, session count fix, clarify tests
|
| 367 |
+
- **@PercyDikec** — 7 PRs: DeepSeek V3 parser fix, /retry response discard, gateway transcript offset, Codex status/visibility, max-iterations retry, setup wizard fix
|
| 368 |
+
- **@teyrebaz33** — 5 PRs: Skills enable/disable system, quick commands, personality customization, conditional skill activation
|
| 369 |
+
- **@alireza78a** — 5 PRs: Atomic writes (cron, sessions), fd leak prevention, security allowlist, code execution socket cleanup
|
| 370 |
+
- **@shitcoinsherpa** — 3 PRs: Windows support (pywinpty, UTF-8 encoding, auth store lock)
|
| 371 |
+
- **@Himess** — 3 PRs: Cron/HomeAssistant/Daytona fix, Windows drive-letter parsing, .env permissions
|
| 372 |
+
- **@satelerd** — 2 PRs: WhatsApp native media, multi-user session isolation
|
| 373 |
+
- **@rovle** — 1 PR: Daytona cloud sandbox backend (4 commits)
|
| 374 |
+
- **@erosika** — 1 PR: Honcho AI-native memory integration
|
| 375 |
+
- **@dmahan93** — 1 PR: --fuck-it-ship-it flag + RL environment work
|
| 376 |
+
- **@SHL0MS** — 1 PR: ASCII video skill
|
| 377 |
+
|
| 378 |
+
### All Contributors
|
| 379 |
+
@0xbyt4, @BP602, @Bartok9, @Farukest, @FurkanL0, @Himess, @Indelwin, @JackTheGit, @JoshuaMart, @Jr-kenny, @OutThisLife, @PercyDikec, @SHL0MS, @Sertug17, @VencentSoliman, @VolodymyrBg, @adavyas, @alireza78a, @areu01or00, @aydnOktay, @batuhankocyigit, @bierlingm, @caentzminger, @cesareth, @ch3ronsa, @christomitov, @cutepawss, @deankerr, @dmahan93, @dogiladeveloper, @dragonkhoi, @erosika, @gamedevCloudy, @gizdusum, @grp06, @intertwine, @jackx707, @jdblackstar, @johnh4098, @kaos35, @kshitijk4poor, @leonsgithub, @luisv-1, @manuelschipper, @mehmetkr-31, @memosr, @PeterFile, @rewbs, @rovle, @rsavitt, @satelerd, @spanishflu-est1918, @stablegenius49, @tars90percent, @tekelala, @teknium1, @teyrebaz33, @tripledoublev, @unmodeled-tyler, @voidborne-d, @voteblake, @ygd58
|
| 380 |
+
|
| 381 |
+
---
|
| 382 |
+
|
| 383 |
+
**Full Changelog**: [v0.1.0...v2026.3.12](https://github.com/NousResearch/hermes-agent/compare/v0.1.0...v2026.3.12)
|
RELEASE_v0.3.0.md
ADDED
|
@@ -0,0 +1,377 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Hermes Agent v0.3.0 (v2026.3.17)
|
| 2 |
+
|
| 3 |
+
**Release Date:** March 17, 2026
|
| 4 |
+
|
| 5 |
+
> The streaming, plugins, and provider release — unified real-time token delivery, first-class plugin architecture, rebuilt provider system with Vercel AI Gateway, native Anthropic provider, smart approvals, live Chrome CDP browser connect, ACP IDE integration, Honcho memory, voice mode, persistent shell, and 50+ bug fixes across every platform.
|
| 6 |
+
|
| 7 |
+
---
|
| 8 |
+
|
| 9 |
+
## ✨ Highlights
|
| 10 |
+
|
| 11 |
+
- **Unified Streaming Infrastructure** — Real-time token-by-token delivery in CLI and all gateway platforms. Responses stream as they're generated instead of arriving as a block. ([#1538](https://github.com/NousResearch/hermes-agent/pull/1538))
|
| 12 |
+
|
| 13 |
+
- **First-Class Plugin Architecture** — Drop Python files into `~/.hermes/plugins/` to extend Hermes with custom tools, commands, and hooks. No forking required. ([#1544](https://github.com/NousResearch/hermes-agent/pull/1544), [#1555](https://github.com/NousResearch/hermes-agent/pull/1555))
|
| 14 |
+
|
| 15 |
+
- **Native Anthropic Provider** — Direct Anthropic API calls with Claude Code credential auto-discovery, OAuth PKCE flows, and native prompt caching. No OpenRouter middleman needed. ([#1097](https://github.com/NousResearch/hermes-agent/pull/1097))
|
| 16 |
+
|
| 17 |
+
- **Smart Approvals + /stop Command** — Codex-inspired approval system that learns which commands are safe and remembers your preferences. `/stop` kills the current agent run immediately. ([#1543](https://github.com/NousResearch/hermes-agent/pull/1543))
|
| 18 |
+
|
| 19 |
+
- **Honcho Memory Integration** — Async memory writes, configurable recall modes, session title integration, and multi-user isolation in gateway mode. By @erosika. ([#736](https://github.com/NousResearch/hermes-agent/pull/736))
|
| 20 |
+
|
| 21 |
+
- **Voice Mode** — Push-to-talk in CLI, voice notes in Telegram/Discord, Discord voice channel support, and local Whisper transcription via faster-whisper. ([#1299](https://github.com/NousResearch/hermes-agent/pull/1299), [#1185](https://github.com/NousResearch/hermes-agent/pull/1185), [#1429](https://github.com/NousResearch/hermes-agent/pull/1429))
|
| 22 |
+
|
| 23 |
+
- **Concurrent Tool Execution** — Multiple independent tool calls now run in parallel via ThreadPoolExecutor, significantly reducing latency for multi-tool turns. ([#1152](https://github.com/NousResearch/hermes-agent/pull/1152))
|
| 24 |
+
|
| 25 |
+
- **PII Redaction** — When `privacy.redact_pii` is enabled, personally identifiable information is automatically scrubbed before sending context to LLM providers. ([#1542](https://github.com/NousResearch/hermes-agent/pull/1542))
|
| 26 |
+
|
| 27 |
+
- **`/browser connect` via CDP** — Attach browser tools to a live Chrome instance through Chrome DevTools Protocol. Debug, inspect, and interact with pages you already have open. ([#1549](https://github.com/NousResearch/hermes-agent/pull/1549))
|
| 28 |
+
|
| 29 |
+
- **Vercel AI Gateway Provider** — Route Hermes through Vercel's AI Gateway for access to their model catalog and infrastructure. ([#1628](https://github.com/NousResearch/hermes-agent/pull/1628))
|
| 30 |
+
|
| 31 |
+
- **Centralized Provider Router** — Rebuilt provider system with `call_llm` API, unified `/model` command, auto-detect provider on model switch, and direct endpoint overrides for auxiliary/delegation clients. ([#1003](https://github.com/NousResearch/hermes-agent/pull/1003), [#1506](https://github.com/NousResearch/hermes-agent/pull/1506), [#1375](https://github.com/NousResearch/hermes-agent/pull/1375))
|
| 32 |
+
|
| 33 |
+
- **ACP Server (IDE Integration)** — VS Code, Zed, and JetBrains can now connect to Hermes as an agent backend, with full slash command support. ([#1254](https://github.com/NousResearch/hermes-agent/pull/1254), [#1532](https://github.com/NousResearch/hermes-agent/pull/1532))
|
| 34 |
+
|
| 35 |
+
- **Persistent Shell Mode** — Local and SSH terminal backends can maintain shell state across tool calls — cd, env vars, and aliases persist. By @alt-glitch. ([#1067](https://github.com/NousResearch/hermes-agent/pull/1067), [#1483](https://github.com/NousResearch/hermes-agent/pull/1483))
|
| 36 |
+
|
| 37 |
+
- **Agentic On-Policy Distillation (OPD)** — New RL training environment for distilling agent policies, expanding the Atropos training ecosystem. ([#1149](https://github.com/NousResearch/hermes-agent/pull/1149))
|
| 38 |
+
|
| 39 |
+
---
|
| 40 |
+
|
| 41 |
+
## 🏗️ Core Agent & Architecture
|
| 42 |
+
|
| 43 |
+
### Provider & Model Support
|
| 44 |
+
- **Centralized provider router** with `call_llm` API and unified `/model` command — switch models and providers seamlessly ([#1003](https://github.com/NousResearch/hermes-agent/pull/1003))
|
| 45 |
+
- **Vercel AI Gateway** provider support ([#1628](https://github.com/NousResearch/hermes-agent/pull/1628))
|
| 46 |
+
- **Auto-detect provider** when switching models via `/model` ([#1506](https://github.com/NousResearch/hermes-agent/pull/1506))
|
| 47 |
+
- **Direct endpoint overrides** for auxiliary and delegation clients — point vision/subagent calls at specific endpoints ([#1375](https://github.com/NousResearch/hermes-agent/pull/1375))
|
| 48 |
+
- **Native Anthropic auxiliary vision** — use Claude's native vision API instead of routing through OpenAI-compatible endpoints ([#1377](https://github.com/NousResearch/hermes-agent/pull/1377))
|
| 49 |
+
- Anthropic OAuth flow improvements — auto-run `claude setup-token`, reauthentication, PKCE state persistence, identity fingerprinting ([#1132](https://github.com/NousResearch/hermes-agent/pull/1132), [#1360](https://github.com/NousResearch/hermes-agent/pull/1360), [#1396](https://github.com/NousResearch/hermes-agent/pull/1396), [#1597](https://github.com/NousResearch/hermes-agent/pull/1597))
|
| 50 |
+
- Fix adaptive thinking without `budget_tokens` for Claude 4.6 models — by @ASRagab ([#1128](https://github.com/NousResearch/hermes-agent/pull/1128))
|
| 51 |
+
- Fix Anthropic cache markers through adapter — by @brandtcormorant ([#1216](https://github.com/NousResearch/hermes-agent/pull/1216))
|
| 52 |
+
- Retry Anthropic 429/529 errors and surface details to users — by @0xbyt4 ([#1585](https://github.com/NousResearch/hermes-agent/pull/1585))
|
| 53 |
+
- Fix Anthropic adapter max_tokens, fallback crash, proxy base_url — by @0xbyt4 ([#1121](https://github.com/NousResearch/hermes-agent/pull/1121))
|
| 54 |
+
- Fix DeepSeek V3 parser dropping multiple parallel tool calls — by @mr-emmett-one ([#1365](https://github.com/NousResearch/hermes-agent/pull/1365), [#1300](https://github.com/NousResearch/hermes-agent/pull/1300))
|
| 55 |
+
- Accept unlisted models with warning instead of rejecting ([#1047](https://github.com/NousResearch/hermes-agent/pull/1047), [#1102](https://github.com/NousResearch/hermes-agent/pull/1102))
|
| 56 |
+
- Skip reasoning params for unsupported OpenRouter models ([#1485](https://github.com/NousResearch/hermes-agent/pull/1485))
|
| 57 |
+
- MiniMax Anthropic API compatibility fix ([#1623](https://github.com/NousResearch/hermes-agent/pull/1623))
|
| 58 |
+
- Custom endpoint `/models` verification and `/v1` base URL suggestion ([#1480](https://github.com/NousResearch/hermes-agent/pull/1480))
|
| 59 |
+
- Resolve delegation providers from `custom_providers` config ([#1328](https://github.com/NousResearch/hermes-agent/pull/1328))
|
| 60 |
+
- Kimi model additions and User-Agent fix ([#1039](https://github.com/NousResearch/hermes-agent/pull/1039))
|
| 61 |
+
- Strip `call_id`/`response_item_id` for Mistral compatibility ([#1058](https://github.com/NousResearch/hermes-agent/pull/1058))
|
| 62 |
+
|
| 63 |
+
### Agent Loop & Conversation
|
| 64 |
+
- **Anthropic Context Editing API** support ([#1147](https://github.com/NousResearch/hermes-agent/pull/1147))
|
| 65 |
+
- Improved context compaction handoff summaries — compressor now preserves more actionable state ([#1273](https://github.com/NousResearch/hermes-agent/pull/1273))
|
| 66 |
+
- Sync session_id after mid-run context compression ([#1160](https://github.com/NousResearch/hermes-agent/pull/1160))
|
| 67 |
+
- Session hygiene threshold tuned to 50% for more proactive compression ([#1096](https://github.com/NousResearch/hermes-agent/pull/1096), [#1161](https://github.com/NousResearch/hermes-agent/pull/1161))
|
| 68 |
+
- Include session ID in system prompt via `--pass-session-id` flag ([#1040](https://github.com/NousResearch/hermes-agent/pull/1040))
|
| 69 |
+
- Prevent closed OpenAI client reuse across retries ([#1391](https://github.com/NousResearch/hermes-agent/pull/1391))
|
| 70 |
+
- Sanitize chat payloads and provider precedence ([#1253](https://github.com/NousResearch/hermes-agent/pull/1253))
|
| 71 |
+
- Handle dict tool call arguments from Codex and local backends ([#1393](https://github.com/NousResearch/hermes-agent/pull/1393), [#1440](https://github.com/NousResearch/hermes-agent/pull/1440))
|
| 72 |
+
|
| 73 |
+
### Memory & Sessions
|
| 74 |
+
- **Improve memory prioritization** — user preferences and corrections weighted above procedural knowledge ([#1548](https://github.com/NousResearch/hermes-agent/pull/1548))
|
| 75 |
+
- Tighter memory and session recall guidance in system prompts ([#1329](https://github.com/NousResearch/hermes-agent/pull/1329))
|
| 76 |
+
- Persist CLI token counts to session DB for `/insights` ([#1498](https://github.com/NousResearch/hermes-agent/pull/1498))
|
| 77 |
+
- Keep Honcho recall out of the cached system prefix ([#1201](https://github.com/NousResearch/hermes-agent/pull/1201))
|
| 78 |
+
- Correct `seed_ai_identity` to use `session.add_messages()` ([#1475](https://github.com/NousResearch/hermes-agent/pull/1475))
|
| 79 |
+
- Isolate Honcho session routing for multi-user gateway ([#1500](https://github.com/NousResearch/hermes-agent/pull/1500))
|
| 80 |
+
|
| 81 |
+
---
|
| 82 |
+
|
| 83 |
+
## 📱 Messaging Platforms (Gateway)
|
| 84 |
+
|
| 85 |
+
### Gateway Core
|
| 86 |
+
- **System gateway service mode** — run as a system-level systemd service, not just user-level ([#1371](https://github.com/NousResearch/hermes-agent/pull/1371))
|
| 87 |
+
- **Gateway install scope prompts** — choose user vs system scope during setup ([#1374](https://github.com/NousResearch/hermes-agent/pull/1374))
|
| 88 |
+
- **Reasoning hot reload** — change reasoning settings without restarting the gateway ([#1275](https://github.com/NousResearch/hermes-agent/pull/1275))
|
| 89 |
+
- Default group sessions to per-user isolation — no more shared state across users in group chats ([#1495](https://github.com/NousResearch/hermes-agent/pull/1495), [#1417](https://github.com/NousResearch/hermes-agent/pull/1417))
|
| 90 |
+
- Harden gateway restart recovery ([#1310](https://github.com/NousResearch/hermes-agent/pull/1310))
|
| 91 |
+
- Cancel active runs during shutdown ([#1427](https://github.com/NousResearch/hermes-agent/pull/1427))
|
| 92 |
+
- SSL certificate auto-detection for NixOS and non-standard systems ([#1494](https://github.com/NousResearch/hermes-agent/pull/1494))
|
| 93 |
+
- Auto-detect D-Bus session bus for `systemctl --user` on headless servers ([#1601](https://github.com/NousResearch/hermes-agent/pull/1601))
|
| 94 |
+
- Auto-enable systemd linger during gateway install on headless servers ([#1334](https://github.com/NousResearch/hermes-agent/pull/1334))
|
| 95 |
+
- Fall back to module entrypoint when `hermes` is not on PATH ([#1355](https://github.com/NousResearch/hermes-agent/pull/1355))
|
| 96 |
+
- Fix dual gateways on macOS launchd after `hermes update` ([#1567](https://github.com/NousResearch/hermes-agent/pull/1567))
|
| 97 |
+
- Remove recursive ExecStop from systemd units ([#1530](https://github.com/NousResearch/hermes-agent/pull/1530))
|
| 98 |
+
- Prevent logging handler accumulation in gateway mode ([#1251](https://github.com/NousResearch/hermes-agent/pull/1251))
|
| 99 |
+
- Restart on retryable startup failures — by @jplew ([#1517](https://github.com/NousResearch/hermes-agent/pull/1517))
|
| 100 |
+
- Backfill model on gateway sessions after agent runs ([#1306](https://github.com/NousResearch/hermes-agent/pull/1306))
|
| 101 |
+
- PID-based gateway kill and deferred config write ([#1499](https://github.com/NousResearch/hermes-agent/pull/1499))
|
| 102 |
+
|
| 103 |
+
### Telegram
|
| 104 |
+
- Buffer media groups to prevent self-interruption from photo bursts ([#1341](https://github.com/NousResearch/hermes-agent/pull/1341), [#1422](https://github.com/NousResearch/hermes-agent/pull/1422))
|
| 105 |
+
- Retry on transient TLS failures during connect and send ([#1535](https://github.com/NousResearch/hermes-agent/pull/1535))
|
| 106 |
+
- Harden polling conflict handling ([#1339](https://github.com/NousResearch/hermes-agent/pull/1339))
|
| 107 |
+
- Escape chunk indicators and inline code in MarkdownV2 ([#1478](https://github.com/NousResearch/hermes-agent/pull/1478), [#1626](https://github.com/NousResearch/hermes-agent/pull/1626))
|
| 108 |
+
- Check updater/app state before disconnect ([#1389](https://github.com/NousResearch/hermes-agent/pull/1389))
|
| 109 |
+
|
| 110 |
+
### Discord
|
| 111 |
+
- `/thread` command with `auto_thread` config and media metadata fixes ([#1178](https://github.com/NousResearch/hermes-agent/pull/1178))
|
| 112 |
+
- Auto-thread on @mention, skip mention text in bot threads ([#1438](https://github.com/NousResearch/hermes-agent/pull/1438))
|
| 113 |
+
- Retry without reply reference for system messages ([#1385](https://github.com/NousResearch/hermes-agent/pull/1385))
|
| 114 |
+
- Preserve native document and video attachment support ([#1392](https://github.com/NousResearch/hermes-agent/pull/1392))
|
| 115 |
+
- Defer discord adapter annotations to avoid optional import crashes ([#1314](https://github.com/NousResearch/hermes-agent/pull/1314))
|
| 116 |
+
|
| 117 |
+
### Slack
|
| 118 |
+
- Thread handling overhaul — progress messages, responses, and session isolation all respect threads ([#1103](https://github.com/NousResearch/hermes-agent/pull/1103))
|
| 119 |
+
- Formatting, reactions, user resolution, and command improvements ([#1106](https://github.com/NousResearch/hermes-agent/pull/1106))
|
| 120 |
+
- Fix MAX_MESSAGE_LENGTH 3900 → 39000 ([#1117](https://github.com/NousResearch/hermes-agent/pull/1117))
|
| 121 |
+
- File upload fallback preserves thread context — by @0xbyt4 ([#1122](https://github.com/NousResearch/hermes-agent/pull/1122))
|
| 122 |
+
- Improve setup guidance ([#1387](https://github.com/NousResearch/hermes-agent/pull/1387))
|
| 123 |
+
|
| 124 |
+
### Email
|
| 125 |
+
- Fix IMAP UID tracking and SMTP TLS verification ([#1305](https://github.com/NousResearch/hermes-agent/pull/1305))
|
| 126 |
+
- Add `skip_attachments` option via config.yaml ([#1536](https://github.com/NousResearch/hermes-agent/pull/1536))
|
| 127 |
+
|
| 128 |
+
### Home Assistant
|
| 129 |
+
- Event filtering closed by default ([#1169](https://github.com/NousResearch/hermes-agent/pull/1169))
|
| 130 |
+
|
| 131 |
+
---
|
| 132 |
+
|
| 133 |
+
## 🖥️ CLI & User Experience
|
| 134 |
+
|
| 135 |
+
### Interactive CLI
|
| 136 |
+
- **Persistent CLI status bar** — always-visible model, provider, and token counts ([#1522](https://github.com/NousResearch/hermes-agent/pull/1522))
|
| 137 |
+
- **File path autocomplete** in the input prompt ([#1545](https://github.com/NousResearch/hermes-agent/pull/1545))
|
| 138 |
+
- **`/plan` command** — generate implementation plans from specs ([#1372](https://github.com/NousResearch/hermes-agent/pull/1372), [#1381](https://github.com/NousResearch/hermes-agent/pull/1381))
|
| 139 |
+
- **Major `/rollback` improvements** — richer checkpoint history, clearer UX ([#1505](https://github.com/NousResearch/hermes-agent/pull/1505))
|
| 140 |
+
- **Preload CLI skills on launch** — skills are ready before the first prompt ([#1359](https://github.com/NousResearch/hermes-agent/pull/1359))
|
| 141 |
+
- **Centralized slash command registry** — all commands defined once, consumed everywhere ([#1603](https://github.com/NousResearch/hermes-agent/pull/1603))
|
| 142 |
+
- `/bg` alias for `/background` ([#1590](https://github.com/NousResearch/hermes-agent/pull/1590))
|
| 143 |
+
- Prefix matching for slash commands — `/mod` resolves to `/model` ([#1320](https://github.com/NousResearch/hermes-agent/pull/1320))
|
| 144 |
+
- `/new`, `/reset`, `/clear` now start genuinely fresh sessions ([#1237](https://github.com/NousResearch/hermes-agent/pull/1237))
|
| 145 |
+
- Accept session ID prefixes for session actions ([#1425](https://github.com/NousResearch/hermes-agent/pull/1425))
|
| 146 |
+
- TUI prompt and accent output now respect active skin ([#1282](https://github.com/NousResearch/hermes-agent/pull/1282))
|
| 147 |
+
- Centralize tool emoji metadata in registry + skin integration ([#1484](https://github.com/NousResearch/hermes-agent/pull/1484))
|
| 148 |
+
- "View full command" option added to dangerous command approval — by @teknium1 based on design by community ([#887](https://github.com/NousResearch/hermes-agent/pull/887))
|
| 149 |
+
- Non-blocking startup update check and banner deduplication ([#1386](https://github.com/NousResearch/hermes-agent/pull/1386))
|
| 150 |
+
- `/reasoning` command output ordering and inline think extraction fixes ([#1031](https://github.com/NousResearch/hermes-agent/pull/1031))
|
| 151 |
+
- Verbose mode shows full untruncated output ([#1472](https://github.com/NousResearch/hermes-agent/pull/1472))
|
| 152 |
+
- Fix `/status` to report live state and tokens ([#1476](https://github.com/NousResearch/hermes-agent/pull/1476))
|
| 153 |
+
- Seed a default global SOUL.md ([#1311](https://github.com/NousResearch/hermes-agent/pull/1311))
|
| 154 |
+
|
| 155 |
+
### Setup & Configuration
|
| 156 |
+
- **OpenClaw migration** during first-time setup — by @kshitijk4poor ([#981](https://github.com/NousResearch/hermes-agent/pull/981))
|
| 157 |
+
- `hermes claw migrate` command + migration docs ([#1059](https://github.com/NousResearch/hermes-agent/pull/1059))
|
| 158 |
+
- Smart vision setup that respects the user's chosen provider ([#1323](https://github.com/NousResearch/hermes-agent/pull/1323))
|
| 159 |
+
- Handle headless setup flows end-to-end ([#1274](https://github.com/NousResearch/hermes-agent/pull/1274))
|
| 160 |
+
- Prefer curses over `simple_term_menu` in setup.py ([#1487](https://github.com/NousResearch/hermes-agent/pull/1487))
|
| 161 |
+
- Show effective model and provider in `/status` ([#1284](https://github.com/NousResearch/hermes-agent/pull/1284))
|
| 162 |
+
- Config set examples use placeholder syntax ([#1322](https://github.com/NousResearch/hermes-agent/pull/1322))
|
| 163 |
+
- Reload .env over stale shell overrides ([#1434](https://github.com/NousResearch/hermes-agent/pull/1434))
|
| 164 |
+
- Fix is_coding_plan NameError crash — by @0xbyt4 ([#1123](https://github.com/NousResearch/hermes-agent/pull/1123))
|
| 165 |
+
- Add missing packages to setuptools config — by @alt-glitch ([#912](https://github.com/NousResearch/hermes-agent/pull/912))
|
| 166 |
+
- Installer: clarify why sudo is needed at every prompt ([#1602](https://github.com/NousResearch/hermes-agent/pull/1602))
|
| 167 |
+
|
| 168 |
+
---
|
| 169 |
+
|
| 170 |
+
## 🔧 Tool System
|
| 171 |
+
|
| 172 |
+
### Terminal & Execution
|
| 173 |
+
- **Persistent shell mode** for local and SSH backends — maintain shell state across tool calls — by @alt-glitch ([#1067](https://github.com/NousResearch/hermes-agent/pull/1067), [#1483](https://github.com/NousResearch/hermes-agent/pull/1483))
|
| 174 |
+
- **Tirith pre-exec command scanning** — security layer that analyzes commands before execution ([#1256](https://github.com/NousResearch/hermes-agent/pull/1256))
|
| 175 |
+
- Strip Hermes provider env vars from all subprocess environments ([#1157](https://github.com/NousResearch/hermes-agent/pull/1157), [#1172](https://github.com/NousResearch/hermes-agent/pull/1172), [#1399](https://github.com/NousResearch/hermes-agent/pull/1399), [#1419](https://github.com/NousResearch/hermes-agent/pull/1419)) — initial fix by @eren-karakus0
|
| 176 |
+
- SSH preflight check ([#1486](https://github.com/NousResearch/hermes-agent/pull/1486))
|
| 177 |
+
- Docker backend: make cwd workspace mount explicit opt-in ([#1534](https://github.com/NousResearch/hermes-agent/pull/1534))
|
| 178 |
+
- Add project root to PYTHONPATH in execute_code sandbox ([#1383](https://github.com/NousResearch/hermes-agent/pull/1383))
|
| 179 |
+
- Eliminate execute_code progress spam on gateway platforms ([#1098](https://github.com/NousResearch/hermes-agent/pull/1098))
|
| 180 |
+
- Clearer docker backend preflight errors ([#1276](https://github.com/NousResearch/hermes-agent/pull/1276))
|
| 181 |
+
|
| 182 |
+
### Browser
|
| 183 |
+
- **`/browser connect`** — attach browser tools to a live Chrome instance via CDP ([#1549](https://github.com/NousResearch/hermes-agent/pull/1549))
|
| 184 |
+
- Improve browser cleanup, local browser PATH setup, and screenshot recovery ([#1333](https://github.com/NousResearch/hermes-agent/pull/1333))
|
| 185 |
+
|
| 186 |
+
### MCP
|
| 187 |
+
- **Selective tool loading** with utility policies — filter which MCP tools are available ([#1302](https://github.com/NousResearch/hermes-agent/pull/1302))
|
| 188 |
+
- Auto-reload MCP tools when `mcp_servers` config changes without restart ([#1474](https://github.com/NousResearch/hermes-agent/pull/1474))
|
| 189 |
+
- Resolve npx stdio connection failures ([#1291](https://github.com/NousResearch/hermes-agent/pull/1291))
|
| 190 |
+
- Preserve MCP toolsets when saving platform tool config ([#1421](https://github.com/NousResearch/hermes-agent/pull/1421))
|
| 191 |
+
|
| 192 |
+
### Vision
|
| 193 |
+
- Unify vision backend gating ([#1367](https://github.com/NousResearch/hermes-agent/pull/1367))
|
| 194 |
+
- Surface actual error reason instead of generic message ([#1338](https://github.com/NousResearch/hermes-agent/pull/1338))
|
| 195 |
+
- Make Claude image handling work end-to-end ([#1408](https://github.com/NousResearch/hermes-agent/pull/1408))
|
| 196 |
+
|
| 197 |
+
### Cron
|
| 198 |
+
- **Compress cron management into one tool** — single `cronjob` tool replaces multiple commands ([#1343](https://github.com/NousResearch/hermes-agent/pull/1343))
|
| 199 |
+
- Suppress duplicate cron sends to auto-delivery targets ([#1357](https://github.com/NousResearch/hermes-agent/pull/1357))
|
| 200 |
+
- Persist cron sessions to SQLite ([#1255](https://github.com/NousResearch/hermes-agent/pull/1255))
|
| 201 |
+
- Per-job runtime overrides (provider, model, base_url) ([#1398](https://github.com/NousResearch/hermes-agent/pull/1398))
|
| 202 |
+
- Atomic write in `save_job_output` to prevent data loss on crash ([#1173](https://github.com/NousResearch/hermes-agent/pull/1173))
|
| 203 |
+
- Preserve thread context for `deliver=origin` ([#1437](https://github.com/NousResearch/hermes-agent/pull/1437))
|
| 204 |
+
|
| 205 |
+
### Patch Tool
|
| 206 |
+
- Avoid corrupting pipe chars in V4A patch apply ([#1286](https://github.com/NousResearch/hermes-agent/pull/1286))
|
| 207 |
+
- Permissive `block_anchor` thresholds and unicode normalization ([#1539](https://github.com/NousResearch/hermes-agent/pull/1539))
|
| 208 |
+
|
| 209 |
+
### Delegation
|
| 210 |
+
- Add observability metadata to subagent results (model, tokens, duration, tool trace) ([#1175](https://github.com/NousResearch/hermes-agent/pull/1175))
|
| 211 |
+
|
| 212 |
+
---
|
| 213 |
+
|
| 214 |
+
## 🧩 Skills Ecosystem
|
| 215 |
+
|
| 216 |
+
### Skills System
|
| 217 |
+
- **Integrate skills.sh** as a hub source alongside ClawHub ([#1303](https://github.com/NousResearch/hermes-agent/pull/1303))
|
| 218 |
+
- Secure skill env setup on load ([#1153](https://github.com/NousResearch/hermes-agent/pull/1153))
|
| 219 |
+
- Honor policy table for dangerous verdicts ([#1330](https://github.com/NousResearch/hermes-agent/pull/1330))
|
| 220 |
+
- Harden ClawHub skill search exact matches ([#1400](https://github.com/NousResearch/hermes-agent/pull/1400))
|
| 221 |
+
- Fix ClawHub skill install — use `/download` ZIP endpoint ([#1060](https://github.com/NousResearch/hermes-agent/pull/1060))
|
| 222 |
+
- Avoid mislabeling local skills as builtin — by @arceus77-7 ([#862](https://github.com/NousResearch/hermes-agent/pull/862))
|
| 223 |
+
|
| 224 |
+
### New Skills
|
| 225 |
+
- **Linear** project management ([#1230](https://github.com/NousResearch/hermes-agent/pull/1230))
|
| 226 |
+
- **X/Twitter** via x-cli ([#1285](https://github.com/NousResearch/hermes-agent/pull/1285))
|
| 227 |
+
- **Telephony** — Twilio, SMS, and AI calls ([#1289](https://github.com/NousResearch/hermes-agent/pull/1289))
|
| 228 |
+
- **1Password** — by @arceus77-7 ([#883](https://github.com/NousResearch/hermes-agent/pull/883), [#1179](https://github.com/NousResearch/hermes-agent/pull/1179))
|
| 229 |
+
- **NeuroSkill BCI** integration ([#1135](https://github.com/NousResearch/hermes-agent/pull/1135))
|
| 230 |
+
- **Blender MCP** for 3D modeling ([#1531](https://github.com/NousResearch/hermes-agent/pull/1531))
|
| 231 |
+
- **OSS Security Forensics** ([#1482](https://github.com/NousResearch/hermes-agent/pull/1482))
|
| 232 |
+
- **Parallel CLI** research skill ([#1301](https://github.com/NousResearch/hermes-agent/pull/1301))
|
| 233 |
+
- **OpenCode** CLI skill ([#1174](https://github.com/NousResearch/hermes-agent/pull/1174))
|
| 234 |
+
- **ASCII Video** skill refactored — by @SHL0MS ([#1213](https://github.com/NousResearch/hermes-agent/pull/1213), [#1598](https://github.com/NousResearch/hermes-agent/pull/1598))
|
| 235 |
+
|
| 236 |
+
---
|
| 237 |
+
|
| 238 |
+
## 🎙️ Voice Mode
|
| 239 |
+
|
| 240 |
+
- Voice mode foundation — push-to-talk CLI, Telegram/Discord voice notes ([#1299](https://github.com/NousResearch/hermes-agent/pull/1299))
|
| 241 |
+
- Free local Whisper transcription via faster-whisper ([#1185](https://github.com/NousResearch/hermes-agent/pull/1185))
|
| 242 |
+
- Discord voice channel reliability fixes ([#1429](https://github.com/NousResearch/hermes-agent/pull/1429))
|
| 243 |
+
- Restore local STT fallback for gateway voice notes ([#1490](https://github.com/NousResearch/hermes-agent/pull/1490))
|
| 244 |
+
- Honor `stt.enabled: false` across gateway transcription ([#1394](https://github.com/NousResearch/hermes-agent/pull/1394))
|
| 245 |
+
- Fix bogus incapability message on Telegram voice notes (Issue [#1033](https://github.com/NousResearch/hermes-agent/issues/1033))
|
| 246 |
+
|
| 247 |
+
---
|
| 248 |
+
|
| 249 |
+
## 🔌 ACP (IDE Integration)
|
| 250 |
+
|
| 251 |
+
- Restore ACP server implementation ([#1254](https://github.com/NousResearch/hermes-agent/pull/1254))
|
| 252 |
+
- Support slash commands in ACP adapter ([#1532](https://github.com/NousResearch/hermes-agent/pull/1532))
|
| 253 |
+
|
| 254 |
+
---
|
| 255 |
+
|
| 256 |
+
## 🧪 RL Training
|
| 257 |
+
|
| 258 |
+
- **Agentic On-Policy Distillation (OPD)** environment — new RL training environment for agent policy distillation ([#1149](https://github.com/NousResearch/hermes-agent/pull/1149))
|
| 259 |
+
- Make tinker-atropos RL training fully optional ([#1062](https://github.com/NousResearch/hermes-agent/pull/1062))
|
| 260 |
+
|
| 261 |
+
---
|
| 262 |
+
|
| 263 |
+
## 🔒 Security & Reliability
|
| 264 |
+
|
| 265 |
+
### Security Hardening
|
| 266 |
+
- **Tirith pre-exec command scanning** — static analysis of terminal commands before execution ([#1256](https://github.com/NousResearch/hermes-agent/pull/1256))
|
| 267 |
+
- **PII redaction** when `privacy.redact_pii` is enabled ([#1542](https://github.com/NousResearch/hermes-agent/pull/1542))
|
| 268 |
+
- Strip Hermes provider/gateway/tool env vars from all subprocess environments ([#1157](https://github.com/NousResearch/hermes-agent/pull/1157), [#1172](https://github.com/NousResearch/hermes-agent/pull/1172), [#1399](https://github.com/NousResearch/hermes-agent/pull/1399), [#1419](https://github.com/NousResearch/hermes-agent/pull/1419))
|
| 269 |
+
- Docker cwd workspace mount now explicit opt-in — never auto-mount host directories ([#1534](https://github.com/NousResearch/hermes-agent/pull/1534))
|
| 270 |
+
- Escape parens and braces in fork bomb regex pattern ([#1397](https://github.com/NousResearch/hermes-agent/pull/1397))
|
| 271 |
+
- Harden `.worktreeinclude` path containment ([#1388](https://github.com/NousResearch/hermes-agent/pull/1388))
|
| 272 |
+
- Use description as `pattern_key` to prevent approval collisions ([#1395](https://github.com/NousResearch/hermes-agent/pull/1395))
|
| 273 |
+
|
| 274 |
+
### Reliability
|
| 275 |
+
- Guard init-time stdio writes ([#1271](https://github.com/NousResearch/hermes-agent/pull/1271))
|
| 276 |
+
- Session log writes reuse shared atomic JSON helper ([#1280](https://github.com/NousResearch/hermes-agent/pull/1280))
|
| 277 |
+
- Atomic temp cleanup protected on interrupts ([#1401](https://github.com/NousResearch/hermes-agent/pull/1401))
|
| 278 |
+
|
| 279 |
+
---
|
| 280 |
+
|
| 281 |
+
## 🐛 Notable Bug Fixes
|
| 282 |
+
|
| 283 |
+
- **`/status` always showing 0 tokens** — now reports live state (Issue [#1465](https://github.com/NousResearch/hermes-agent/issues/1465), [#1476](https://github.com/NousResearch/hermes-agent/pull/1476))
|
| 284 |
+
- **Custom model endpoints not working** — restored config-saved endpoint resolution (Issue [#1460](https://github.com/NousResearch/hermes-agent/issues/1460), [#1373](https://github.com/NousResearch/hermes-agent/pull/1373))
|
| 285 |
+
- **MCP tools not visible until restart** — auto-reload on config change (Issue [#1036](https://github.com/NousResearch/hermes-agent/issues/1036), [#1474](https://github.com/NousResearch/hermes-agent/pull/1474))
|
| 286 |
+
- **`hermes tools` removing MCP tools** — preserve MCP toolsets when saving (Issue [#1247](https://github.com/NousResearch/hermes-agent/issues/1247), [#1421](https://github.com/NousResearch/hermes-agent/pull/1421))
|
| 287 |
+
- **Terminal subprocesses inheriting `OPENAI_BASE_URL`** breaking external tools (Issue [#1002](https://github.com/NousResearch/hermes-agent/issues/1002), [#1399](https://github.com/NousResearch/hermes-agent/pull/1399))
|
| 288 |
+
- **Background process lost on gateway restart** — improved recovery (Issue [#1144](https://github.com/NousResearch/hermes-agent/issues/1144))
|
| 289 |
+
- **Cron jobs not persisting state** — now stored in SQLite (Issue [#1416](https://github.com/NousResearch/hermes-agent/issues/1416), [#1255](https://github.com/NousResearch/hermes-agent/pull/1255))
|
| 290 |
+
- **Cronjob `deliver: origin` not preserving thread context** (Issue [#1219](https://github.com/NousResearch/hermes-agent/issues/1219), [#1437](https://github.com/NousResearch/hermes-agent/pull/1437))
|
| 291 |
+
- **Gateway systemd service failing to auto-restart** when browser processes orphaned (Issue [#1617](https://github.com/NousResearch/hermes-agent/issues/1617))
|
| 292 |
+
- **`/background` completion report cut off in Telegram** (Issue [#1443](https://github.com/NousResearch/hermes-agent/issues/1443))
|
| 293 |
+
- **Model switching not taking effect** (Issue [#1244](https://github.com/NousResearch/hermes-agent/issues/1244), [#1183](https://github.com/NousResearch/hermes-agent/pull/1183))
|
| 294 |
+
- **`hermes doctor` reporting cronjob as unavailable** (Issue [#878](https://github.com/NousResearch/hermes-agent/issues/878), [#1180](https://github.com/NousResearch/hermes-agent/pull/1180))
|
| 295 |
+
- **WhatsApp bridge messages not received** from mobile (Issue [#1142](https://github.com/NousResearch/hermes-agent/issues/1142))
|
| 296 |
+
- **Setup wizard hanging on headless SSH** (Issue [#905](https://github.com/NousResearch/hermes-agent/issues/905), [#1274](https://github.com/NousResearch/hermes-agent/pull/1274))
|
| 297 |
+
- **Log handler accumulation** degrading gateway performance (Issue [#990](https://github.com/NousResearch/hermes-agent/issues/990), [#1251](https://github.com/NousResearch/hermes-agent/pull/1251))
|
| 298 |
+
- **Gateway NULL model in DB** (Issue [#987](https://github.com/NousResearch/hermes-agent/issues/987), [#1306](https://github.com/NousResearch/hermes-agent/pull/1306))
|
| 299 |
+
- **Strict endpoints rejecting replayed tool_calls** (Issue [#893](https://github.com/NousResearch/hermes-agent/issues/893))
|
| 300 |
+
- **Remaining hardcoded `~/.hermes` paths** — all now respect `HERMES_HOME` (Issue [#892](https://github.com/NousResearch/hermes-agent/issues/892), [#1233](https://github.com/NousResearch/hermes-agent/pull/1233))
|
| 301 |
+
- **Delegate tool not working with custom inference providers** (Issue [#1011](https://github.com/NousResearch/hermes-agent/issues/1011), [#1328](https://github.com/NousResearch/hermes-agent/pull/1328))
|
| 302 |
+
- **Skills Guard blocking official skills** (Issue [#1006](https://github.com/NousResearch/hermes-agent/issues/1006), [#1330](https://github.com/NousResearch/hermes-agent/pull/1330))
|
| 303 |
+
- **Setup writing provider before model selection** (Issue [#1182](https://github.com/NousResearch/hermes-agent/issues/1182))
|
| 304 |
+
- **`GatewayConfig.get()` AttributeError** crashing all message handling (Issue [#1158](https://github.com/NousResearch/hermes-agent/issues/1158), [#1287](https://github.com/NousResearch/hermes-agent/pull/1287))
|
| 305 |
+
- **`/update` hard-failing with "command not found"** (Issue [#1049](https://github.com/NousResearch/hermes-agent/issues/1049))
|
| 306 |
+
- **Image analysis failing silently** (Issue [#1034](https://github.com/NousResearch/hermes-agent/issues/1034), [#1338](https://github.com/NousResearch/hermes-agent/pull/1338))
|
| 307 |
+
- **API `BadRequestError` from `'dict'` object has no attribute `'strip'`** (Issue [#1071](https://github.com/NousResearch/hermes-agent/issues/1071))
|
| 308 |
+
- **Slash commands requiring exact full name** — now uses prefix matching (Issue [#928](https://github.com/NousResearch/hermes-agent/issues/928), [#1320](https://github.com/NousResearch/hermes-agent/pull/1320))
|
| 309 |
+
- **Gateway stops responding when terminal is closed on headless** (Issue [#1005](https://github.com/NousResearch/hermes-agent/issues/1005))
|
| 310 |
+
|
| 311 |
+
---
|
| 312 |
+
|
| 313 |
+
## 🧪 Testing
|
| 314 |
+
|
| 315 |
+
- Cover empty cached Anthropic tool-call turns ([#1222](https://github.com/NousResearch/hermes-agent/pull/1222))
|
| 316 |
+
- Fix stale CI assumptions in parser and quick-command coverage ([#1236](https://github.com/NousResearch/hermes-agent/pull/1236))
|
| 317 |
+
- Fix gateway async tests without implicit event loop ([#1278](https://github.com/NousResearch/hermes-agent/pull/1278))
|
| 318 |
+
- Make gateway async tests xdist-safe ([#1281](https://github.com/NousResearch/hermes-agent/pull/1281))
|
| 319 |
+
- Cross-timezone naive timestamp regression for cron ([#1319](https://github.com/NousResearch/hermes-agent/pull/1319))
|
| 320 |
+
- Isolate codex provider tests from local env ([#1335](https://github.com/NousResearch/hermes-agent/pull/1335))
|
| 321 |
+
- Lock retry replacement semantics ([#1379](https://github.com/NousResearch/hermes-agent/pull/1379))
|
| 322 |
+
- Improve error logging in session search tool — by @aydnOktay ([#1533](https://github.com/NousResearch/hermes-agent/pull/1533))
|
| 323 |
+
|
| 324 |
+
---
|
| 325 |
+
|
| 326 |
+
## 📚 Documentation
|
| 327 |
+
|
| 328 |
+
- Comprehensive SOUL.md guide ([#1315](https://github.com/NousResearch/hermes-agent/pull/1315))
|
| 329 |
+
- Voice mode documentation ([#1316](https://github.com/NousResearch/hermes-agent/pull/1316), [#1362](https://github.com/NousResearch/hermes-agent/pull/1362))
|
| 330 |
+
- Provider contribution guide ([#1361](https://github.com/NousResearch/hermes-agent/pull/1361))
|
| 331 |
+
- ACP and internal systems implementation guides ([#1259](https://github.com/NousResearch/hermes-agent/pull/1259))
|
| 332 |
+
- Expand Docusaurus coverage across CLI, tools, skills, and skins ([#1232](https://github.com/NousResearch/hermes-agent/pull/1232))
|
| 333 |
+
- Terminal backend and Windows troubleshooting ([#1297](https://github.com/NousResearch/hermes-agent/pull/1297))
|
| 334 |
+
- Skills hub reference section ([#1317](https://github.com/NousResearch/hermes-agent/pull/1317))
|
| 335 |
+
- Checkpoint, /rollback, and git worktrees guide ([#1493](https://github.com/NousResearch/hermes-agent/pull/1493), [#1524](https://github.com/NousResearch/hermes-agent/pull/1524))
|
| 336 |
+
- CLI status bar and /usage reference ([#1523](https://github.com/NousResearch/hermes-agent/pull/1523))
|
| 337 |
+
- Fallback providers + /background command docs ([#1430](https://github.com/NousResearch/hermes-agent/pull/1430))
|
| 338 |
+
- Gateway service scopes docs ([#1378](https://github.com/NousResearch/hermes-agent/pull/1378))
|
| 339 |
+
- Slack thread reply behavior docs ([#1407](https://github.com/NousResearch/hermes-agent/pull/1407))
|
| 340 |
+
- Redesigned landing page with Nous blue palette — by @austinpickett ([#974](https://github.com/NousResearch/hermes-agent/pull/974))
|
| 341 |
+
- Fix several documentation typos — by @JackTheGit ([#953](https://github.com/NousResearch/hermes-agent/pull/953))
|
| 342 |
+
- Stabilize website diagrams ([#1405](https://github.com/NousResearch/hermes-agent/pull/1405))
|
| 343 |
+
- CLI vs messaging quick reference in README ([#1491](https://github.com/NousResearch/hermes-agent/pull/1491))
|
| 344 |
+
- Add search to Docusaurus ([#1053](https://github.com/NousResearch/hermes-agent/pull/1053))
|
| 345 |
+
- Home Assistant integration docs ([#1170](https://github.com/NousResearch/hermes-agent/pull/1170))
|
| 346 |
+
|
| 347 |
+
---
|
| 348 |
+
|
| 349 |
+
## 👥 Contributors
|
| 350 |
+
|
| 351 |
+
### Core
|
| 352 |
+
- **@teknium1** — 220+ PRs spanning every area of the codebase
|
| 353 |
+
|
| 354 |
+
### Top Community Contributors
|
| 355 |
+
|
| 356 |
+
- **@0xbyt4** (4 PRs) — Anthropic adapter fixes (max_tokens, fallback crash, 429/529 retry), Slack file upload thread context, setup NameError fix
|
| 357 |
+
- **@erosika** (1 PR) — Honcho memory integration: async writes, memory modes, session title integration
|
| 358 |
+
- **@SHL0MS** (2 PRs) — ASCII video skill design patterns and refactoring
|
| 359 |
+
- **@alt-glitch** (2 PRs) — Persistent shell mode for local/SSH backends, setuptools packaging fix
|
| 360 |
+
- **@arceus77-7** (2 PRs) — 1Password skill, fix skills list mislabeling
|
| 361 |
+
- **@kshitijk4poor** (1 PR) — OpenClaw migration during setup wizard
|
| 362 |
+
- **@ASRagab** (1 PR) — Fix adaptive thinking for Claude 4.6 models
|
| 363 |
+
- **@eren-karakus0** (1 PR) — Strip Hermes provider env vars from subprocess environment
|
| 364 |
+
- **@mr-emmett-one** (1 PR) — Fix DeepSeek V3 parser multi-tool call support
|
| 365 |
+
- **@jplew** (1 PR) — Gateway restart on retryable startup failures
|
| 366 |
+
- **@brandtcormorant** (1 PR) — Fix Anthropic cache control for empty text blocks
|
| 367 |
+
- **@aydnOktay** (1 PR) — Improve error logging in session search tool
|
| 368 |
+
- **@austinpickett** (1 PR) — Landing page redesign with Nous blue palette
|
| 369 |
+
- **@JackTheGit** (1 PR) — Documentation typo fixes
|
| 370 |
+
|
| 371 |
+
### All Contributors
|
| 372 |
+
|
| 373 |
+
@0xbyt4, @alt-glitch, @arceus77-7, @ASRagab, @austinpickett, @aydnOktay, @brandtcormorant, @eren-karakus0, @erosika, @JackTheGit, @jplew, @kshitijk4poor, @mr-emmett-one, @SHL0MS, @teknium1
|
| 374 |
+
|
| 375 |
+
---
|
| 376 |
+
|
| 377 |
+
**Full Changelog**: [v2026.3.12...v2026.3.17](https://github.com/NousResearch/hermes-agent/compare/v2026.3.12...v2026.3.17)
|
RELEASE_v0.4.0.md
ADDED
|
@@ -0,0 +1,400 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Hermes Agent v0.4.0 (v2026.3.23)
|
| 2 |
+
|
| 3 |
+
**Release Date:** March 23, 2026
|
| 4 |
+
|
| 5 |
+
> The platform expansion release — OpenAI-compatible API server, 6 new messaging adapters, 4 new inference providers, MCP server management with OAuth 2.1, @ context references, gateway prompt caching, streaming enabled by default, and a sweeping reliability pass with 200+ bug fixes.
|
| 6 |
+
|
| 7 |
+
---
|
| 8 |
+
|
| 9 |
+
## ✨ Highlights
|
| 10 |
+
|
| 11 |
+
- **OpenAI-compatible API server** — Expose Hermes as an `/v1/chat/completions` endpoint with a new `/api/jobs` REST API for cron job management, hardened with input limits, field whitelists, SQLite-backed response persistence, and CORS origin protection ([#1756](https://github.com/NousResearch/hermes-agent/pull/1756), [#2450](https://github.com/NousResearch/hermes-agent/pull/2450), [#2456](https://github.com/NousResearch/hermes-agent/pull/2456), [#2451](https://github.com/NousResearch/hermes-agent/pull/2451), [#2472](https://github.com/NousResearch/hermes-agent/pull/2472))
|
| 12 |
+
|
| 13 |
+
- **6 new messaging platform adapters** — Signal, DingTalk, SMS (Twilio), Mattermost, Matrix, and Webhook adapters join Telegram, Discord, and WhatsApp. Gateway auto-reconnects failed platforms with exponential backoff ([#2206](https://github.com/NousResearch/hermes-agent/pull/2206), [#1685](https://github.com/NousResearch/hermes-agent/pull/1685), [#1688](https://github.com/NousResearch/hermes-agent/pull/1688), [#1683](https://github.com/NousResearch/hermes-agent/pull/1683), [#2166](https://github.com/NousResearch/hermes-agent/pull/2166), [#2584](https://github.com/NousResearch/hermes-agent/pull/2584))
|
| 14 |
+
|
| 15 |
+
- **@ context references** — Claude Code-style `@file` and `@url` context injection with tab completions in the CLI ([#2343](https://github.com/NousResearch/hermes-agent/pull/2343), [#2482](https://github.com/NousResearch/hermes-agent/pull/2482))
|
| 16 |
+
|
| 17 |
+
- **4 new inference providers** — GitHub Copilot (OAuth + token validation), Alibaba Cloud / DashScope, Kilo Code, and OpenCode Zen/Go ([#1924](https://github.com/NousResearch/hermes-agent/pull/1924), [#1879](https://github.com/NousResearch/hermes-agent/pull/1879) by @mchzimm, [#1673](https://github.com/NousResearch/hermes-agent/pull/1673), [#1666](https://github.com/NousResearch/hermes-agent/pull/1666), [#1650](https://github.com/NousResearch/hermes-agent/pull/1650))
|
| 18 |
+
|
| 19 |
+
- **MCP server management CLI** — `hermes mcp` commands for installing, configuring, and authenticating MCP servers with full OAuth 2.1 PKCE flow ([#2465](https://github.com/NousResearch/hermes-agent/pull/2465))
|
| 20 |
+
|
| 21 |
+
- **Gateway prompt caching** — Cache AIAgent instances per session, preserving Anthropic prompt cache across turns for dramatic cost reduction on long conversations ([#2282](https://github.com/NousResearch/hermes-agent/pull/2282), [#2284](https://github.com/NousResearch/hermes-agent/pull/2284), [#2361](https://github.com/NousResearch/hermes-agent/pull/2361))
|
| 22 |
+
|
| 23 |
+
- **Context compression overhaul** — Structured summaries with iterative updates, token-budget tail protection, configurable summary endpoint, and fallback model support ([#2323](https://github.com/NousResearch/hermes-agent/pull/2323), [#1727](https://github.com/NousResearch/hermes-agent/pull/1727), [#2224](https://github.com/NousResearch/hermes-agent/pull/2224))
|
| 24 |
+
|
| 25 |
+
- **Streaming enabled by default** — CLI streaming on by default with proper spinner/tool progress display during streaming mode, plus extensive linebreak and concatenation fixes ([#2340](https://github.com/NousResearch/hermes-agent/pull/2340), [#2161](https://github.com/NousResearch/hermes-agent/pull/2161), [#2258](https://github.com/NousResearch/hermes-agent/pull/2258))
|
| 26 |
+
|
| 27 |
+
---
|
| 28 |
+
|
| 29 |
+
## 🖥️ CLI & User Experience
|
| 30 |
+
|
| 31 |
+
### New Commands & Interactions
|
| 32 |
+
- **@ context completions** — Tab-completable `@file`/`@url` references that inject file content or web pages into the conversation ([#2482](https://github.com/NousResearch/hermes-agent/pull/2482), [#2343](https://github.com/NousResearch/hermes-agent/pull/2343))
|
| 33 |
+
- **`/statusbar`** — Toggle a persistent config bar showing model + provider info in the prompt ([#2240](https://github.com/NousResearch/hermes-agent/pull/2240), [#1917](https://github.com/NousResearch/hermes-agent/pull/1917))
|
| 34 |
+
- **`/queue`** — Queue prompts for the agent without interrupting the current run ([#2191](https://github.com/NousResearch/hermes-agent/pull/2191), [#2469](https://github.com/NousResearch/hermes-agent/pull/2469))
|
| 35 |
+
- **`/permission`** — Switch approval mode dynamically during a session ([#2207](https://github.com/NousResearch/hermes-agent/pull/2207))
|
| 36 |
+
- **`/browser`** — Interactive browser sessions from the CLI ([#2273](https://github.com/NousResearch/hermes-agent/pull/2273), [#1814](https://github.com/NousResearch/hermes-agent/pull/1814))
|
| 37 |
+
- **`/cost`** — Live pricing and usage tracking in gateway mode ([#2180](https://github.com/NousResearch/hermes-agent/pull/2180))
|
| 38 |
+
- **`/approve` and `/deny`** — Replaced bare text approval in gateway with explicit commands ([#2002](https://github.com/NousResearch/hermes-agent/pull/2002))
|
| 39 |
+
|
| 40 |
+
### Streaming & Display
|
| 41 |
+
- Streaming enabled by default in CLI ([#2340](https://github.com/NousResearch/hermes-agent/pull/2340))
|
| 42 |
+
- Show spinners and tool progress during streaming mode ([#2161](https://github.com/NousResearch/hermes-agent/pull/2161))
|
| 43 |
+
- Show reasoning/thinking blocks when `show_reasoning` enabled ([#2118](https://github.com/NousResearch/hermes-agent/pull/2118))
|
| 44 |
+
- Context pressure warnings for CLI and gateway ([#2159](https://github.com/NousResearch/hermes-agent/pull/2159))
|
| 45 |
+
- Fix: streaming chunks concatenated without whitespace ([#2258](https://github.com/NousResearch/hermes-agent/pull/2258))
|
| 46 |
+
- Fix: iteration boundary linebreak prevents stream concatenation ([#2413](https://github.com/NousResearch/hermes-agent/pull/2413))
|
| 47 |
+
- Fix: defer streaming linebreak to prevent blank line stacking ([#2473](https://github.com/NousResearch/hermes-agent/pull/2473))
|
| 48 |
+
- Fix: suppress spinner animation in non-TTY environments ([#2216](https://github.com/NousResearch/hermes-agent/pull/2216))
|
| 49 |
+
- Fix: display provider and endpoint in API error messages ([#2266](https://github.com/NousResearch/hermes-agent/pull/2266))
|
| 50 |
+
- Fix: resolve garbled ANSI escape codes in status printouts ([#2448](https://github.com/NousResearch/hermes-agent/pull/2448))
|
| 51 |
+
- Fix: update gold ANSI color to true-color format ([#2246](https://github.com/NousResearch/hermes-agent/pull/2246))
|
| 52 |
+
- Fix: normalize toolset labels and use skin colors in banner ([#1912](https://github.com/NousResearch/hermes-agent/pull/1912))
|
| 53 |
+
|
| 54 |
+
### CLI Polish
|
| 55 |
+
- Fix: prevent 'Press ENTER to continue...' on exit ([#2555](https://github.com/NousResearch/hermes-agent/pull/2555))
|
| 56 |
+
- Fix: flush stdout during agent loop to prevent macOS display freeze ([#1654](https://github.com/NousResearch/hermes-agent/pull/1654))
|
| 57 |
+
- Fix: show human-readable error when `hermes setup` hits permissions error ([#2196](https://github.com/NousResearch/hermes-agent/pull/2196))
|
| 58 |
+
- Fix: `/stop` command crash + UnboundLocalError in streaming media delivery ([#2463](https://github.com/NousResearch/hermes-agent/pull/2463))
|
| 59 |
+
- Fix: allow custom/local endpoints without API key ([#2556](https://github.com/NousResearch/hermes-agent/pull/2556))
|
| 60 |
+
- Fix: Kitty keyboard protocol Shift+Enter for Ghostty/WezTerm (attempted + reverted due to prompt_toolkit crash) ([#2345](https://github.com/NousResearch/hermes-agent/pull/2345), [#2349](https://github.com/NousResearch/hermes-agent/pull/2349))
|
| 61 |
+
|
| 62 |
+
### Configuration
|
| 63 |
+
- **`${ENV_VAR}` substitution** in config.yaml ([#2684](https://github.com/NousResearch/hermes-agent/pull/2684))
|
| 64 |
+
- **Real-time config reload** — config.yaml changes apply without restart ([#2210](https://github.com/NousResearch/hermes-agent/pull/2210))
|
| 65 |
+
- **`custom_models.yaml`** for user-managed model additions ([#2214](https://github.com/NousResearch/hermes-agent/pull/2214))
|
| 66 |
+
- **Priority-based context file selection** + CLAUDE.md support ([#2301](https://github.com/NousResearch/hermes-agent/pull/2301))
|
| 67 |
+
- **Merge nested YAML sections** instead of replacing on config update ([#2213](https://github.com/NousResearch/hermes-agent/pull/2213))
|
| 68 |
+
- Fix: config.yaml provider key overrides env var silently ([#2272](https://github.com/NousResearch/hermes-agent/pull/2272))
|
| 69 |
+
- Fix: log warning instead of silently swallowing config.yaml errors ([#2683](https://github.com/NousResearch/hermes-agent/pull/2683))
|
| 70 |
+
- Fix: disabled toolsets re-enable themselves after `hermes tools` ([#2268](https://github.com/NousResearch/hermes-agent/pull/2268))
|
| 71 |
+
- Fix: platform default toolsets silently override tool deselection ([#2624](https://github.com/NousResearch/hermes-agent/pull/2624))
|
| 72 |
+
- Fix: honor bare YAML `approvals.mode: off` ([#2620](https://github.com/NousResearch/hermes-agent/pull/2620))
|
| 73 |
+
- Fix: `hermes update` use `.[all]` extras with fallback ([#1728](https://github.com/NousResearch/hermes-agent/pull/1728))
|
| 74 |
+
- Fix: `hermes update` prompt before resetting working tree on stash conflicts ([#2390](https://github.com/NousResearch/hermes-agent/pull/2390))
|
| 75 |
+
- Fix: use git pull --rebase in update/install to avoid divergent branch error ([#2274](https://github.com/NousResearch/hermes-agent/pull/2274))
|
| 76 |
+
- Fix: add zprofile fallback and create zshrc on fresh macOS installs ([#2320](https://github.com/NousResearch/hermes-agent/pull/2320))
|
| 77 |
+
- Fix: remove `ANTHROPIC_BASE_URL` env var to avoid collisions ([#1675](https://github.com/NousResearch/hermes-agent/pull/1675))
|
| 78 |
+
- Fix: don't ask IMAP password if already in keyring or env ([#2212](https://github.com/NousResearch/hermes-agent/pull/2212))
|
| 79 |
+
- Fix: OpenCode Zen/Go show OpenRouter models instead of their own ([#2277](https://github.com/NousResearch/hermes-agent/pull/2277))
|
| 80 |
+
|
| 81 |
+
---
|
| 82 |
+
|
| 83 |
+
## 🏗️ Core Agent & Architecture
|
| 84 |
+
|
| 85 |
+
### New Providers
|
| 86 |
+
- **GitHub Copilot** — Full OAuth auth, API routing, token validation, and 400k context. ([#1924](https://github.com/NousResearch/hermes-agent/pull/1924), [#1896](https://github.com/NousResearch/hermes-agent/pull/1896), [#1879](https://github.com/NousResearch/hermes-agent/pull/1879) by @mchzimm, [#2507](https://github.com/NousResearch/hermes-agent/pull/2507))
|
| 87 |
+
- **Alibaba Cloud / DashScope** — Full integration with DashScope v1 runtime, model dot preservation, and 401 auth fixes ([#1673](https://github.com/NousResearch/hermes-agent/pull/1673), [#2332](https://github.com/NousResearch/hermes-agent/pull/2332), [#2459](https://github.com/NousResearch/hermes-agent/pull/2459))
|
| 88 |
+
- **Kilo Code** — First-class inference provider ([#1666](https://github.com/NousResearch/hermes-agent/pull/1666))
|
| 89 |
+
- **OpenCode Zen and OpenCode Go** — New provider backends ([#1650](https://github.com/NousResearch/hermes-agent/pull/1650), [#2393](https://github.com/NousResearch/hermes-agent/pull/2393) by @0xbyt4)
|
| 90 |
+
- **NeuTTS** — Local TTS provider backend with built-in setup flow, replacing the old optional skill ([#1657](https://github.com/NousResearch/hermes-agent/pull/1657), [#1664](https://github.com/NousResearch/hermes-agent/pull/1664))
|
| 91 |
+
|
| 92 |
+
### Provider Improvements
|
| 93 |
+
- **Eager fallback** to backup model on rate-limit errors ([#1730](https://github.com/NousResearch/hermes-agent/pull/1730))
|
| 94 |
+
- **Endpoint metadata** for custom model context and pricing; query local servers for actual context window size ([#1906](https://github.com/NousResearch/hermes-agent/pull/1906), [#2091](https://github.com/NousResearch/hermes-agent/pull/2091) by @dusterbloom)
|
| 95 |
+
- **Context length detection overhaul** — models.dev integration, provider-aware resolution, fuzzy matching for custom endpoints, `/v1/props` for llama.cpp ([#2158](https://github.com/NousResearch/hermes-agent/pull/2158), [#2051](https://github.com/NousResearch/hermes-agent/pull/2051), [#2403](https://github.com/NousResearch/hermes-agent/pull/2403))
|
| 96 |
+
- **Model catalog updates** — gpt-5.4-mini, gpt-5.4-nano, healer-alpha, haiku-4.5, minimax-m2.7, claude 4.6 at 1M context ([#1913](https://github.com/NousResearch/hermes-agent/pull/1913), [#1915](https://github.com/NousResearch/hermes-agent/pull/1915), [#1900](https://github.com/NousResearch/hermes-agent/pull/1900), [#2155](https://github.com/NousResearch/hermes-agent/pull/2155), [#2474](https://github.com/NousResearch/hermes-agent/pull/2474))
|
| 97 |
+
- **Custom endpoint improvements** — `model.base_url` in config.yaml, `api_mode` override for responses API, allow endpoints without API key, fail fast on missing keys ([#2330](https://github.com/NousResearch/hermes-agent/pull/2330), [#1651](https://github.com/NousResearch/hermes-agent/pull/1651), [#2556](https://github.com/NousResearch/hermes-agent/pull/2556), [#2445](https://github.com/NousResearch/hermes-agent/pull/2445), [#1994](https://github.com/NousResearch/hermes-agent/pull/1994), [#1998](https://github.com/NousResearch/hermes-agent/pull/1998))
|
| 98 |
+
- Inject model and provider into system prompt ([#1929](https://github.com/NousResearch/hermes-agent/pull/1929))
|
| 99 |
+
- Tie `api_mode` to provider config instead of env var ([#1656](https://github.com/NousResearch/hermes-agent/pull/1656))
|
| 100 |
+
- Fix: prevent Anthropic token leaking to third-party `anthropic_messages` providers ([#2389](https://github.com/NousResearch/hermes-agent/pull/2389))
|
| 101 |
+
- Fix: prevent Anthropic fallback from inheriting non-Anthropic `base_url` ([#2388](https://github.com/NousResearch/hermes-agent/pull/2388))
|
| 102 |
+
- Fix: `auxiliary_is_nous` flag never resets — leaked Nous tags to other providers ([#1713](https://github.com/NousResearch/hermes-agent/pull/1713))
|
| 103 |
+
- Fix: Anthropic `tool_choice 'none'` still allowed tool calls ([#1714](https://github.com/NousResearch/hermes-agent/pull/1714))
|
| 104 |
+
- Fix: Mistral parser nested JSON fallback extraction ([#2335](https://github.com/NousResearch/hermes-agent/pull/2335))
|
| 105 |
+
- Fix: MiniMax 401 auth resolved by defaulting to `anthropic_messages` ([#2103](https://github.com/NousResearch/hermes-agent/pull/2103))
|
| 106 |
+
- Fix: case-insensitive model family matching ([#2350](https://github.com/NousResearch/hermes-agent/pull/2350))
|
| 107 |
+
- Fix: ignore placeholder provider keys in activation checks ([#2358](https://github.com/NousResearch/hermes-agent/pull/2358))
|
| 108 |
+
- Fix: Preserve Ollama model:tag colons in context length detection ([#2149](https://github.com/NousResearch/hermes-agent/pull/2149))
|
| 109 |
+
- Fix: recognize Claude Code OAuth credentials in startup gate ([#1663](https://github.com/NousResearch/hermes-agent/pull/1663))
|
| 110 |
+
- Fix: detect Claude Code version dynamically for OAuth user-agent ([#1670](https://github.com/NousResearch/hermes-agent/pull/1670))
|
| 111 |
+
- Fix: OAuth flag stale after refresh/fallback ([#1890](https://github.com/NousResearch/hermes-agent/pull/1890))
|
| 112 |
+
- Fix: auxiliary client skips expired Codex JWT ([#2397](https://github.com/NousResearch/hermes-agent/pull/2397))
|
| 113 |
+
|
| 114 |
+
### Agent Loop
|
| 115 |
+
- **Gateway prompt caching** — Cache AIAgent per session, keep assistant turns, fix session restore ([#2282](https://github.com/NousResearch/hermes-agent/pull/2282), [#2284](https://github.com/NousResearch/hermes-agent/pull/2284), [#2361](https://github.com/NousResearch/hermes-agent/pull/2361))
|
| 116 |
+
- **Context compression overhaul** — Structured summaries, iterative updates, token-budget tail protection, configurable `summary_base_url` ([#2323](https://github.com/NousResearch/hermes-agent/pull/2323), [#1727](https://github.com/NousResearch/hermes-agent/pull/1727), [#2224](https://github.com/NousResearch/hermes-agent/pull/2224))
|
| 117 |
+
- **Pre-call sanitization and post-call tool guardrails** ([#1732](https://github.com/NousResearch/hermes-agent/pull/1732))
|
| 118 |
+
- **Auto-recover** from provider-rejected `tool_choice` by retrying without ([#2174](https://github.com/NousResearch/hermes-agent/pull/2174))
|
| 119 |
+
- **Background memory/skill review** replaces inline nudges ([#2235](https://github.com/NousResearch/hermes-agent/pull/2235))
|
| 120 |
+
- **SOUL.md as primary agent identity** instead of hardcoded default ([#1922](https://github.com/NousResearch/hermes-agent/pull/1922))
|
| 121 |
+
- Fix: prevent silent tool result loss during context compression ([#1993](https://github.com/NousResearch/hermes-agent/pull/1993))
|
| 122 |
+
- Fix: handle empty/null function arguments in tool call recovery ([#2163](https://github.com/NousResearch/hermes-agent/pull/2163))
|
| 123 |
+
- Fix: handle API refusal responses gracefully instead of crashing ([#2156](https://github.com/NousResearch/hermes-agent/pull/2156))
|
| 124 |
+
- Fix: prevent stuck agent loop on malformed tool calls ([#2114](https://github.com/NousResearch/hermes-agent/pull/2114))
|
| 125 |
+
- Fix: return JSON parse error to model instead of dispatching with empty args ([#2342](https://github.com/NousResearch/hermes-agent/pull/2342))
|
| 126 |
+
- Fix: consecutive assistant message merge drops content on mixed types ([#1703](https://github.com/NousResearch/hermes-agent/pull/1703))
|
| 127 |
+
- Fix: message role alternation violations in JSON recovery and error handler ([#1722](https://github.com/NousResearch/hermes-agent/pull/1722))
|
| 128 |
+
- Fix: `compression_attempts` resets each iteration — allowed unlimited compressions ([#1723](https://github.com/NousResearch/hermes-agent/pull/1723))
|
| 129 |
+
- Fix: `length_continue_retries` never resets — later truncations got fewer retries ([#1717](https://github.com/NousResearch/hermes-agent/pull/1717))
|
| 130 |
+
- Fix: compressor summary role violated consecutive-role constraint ([#1720](https://github.com/NousResearch/hermes-agent/pull/1720), [#1743](https://github.com/NousResearch/hermes-agent/pull/1743))
|
| 131 |
+
- Fix: remove hardcoded `gemini-3-flash-preview` as default summary model ([#2464](https://github.com/NousResearch/hermes-agent/pull/2464))
|
| 132 |
+
- Fix: correctly handle empty tool results ([#2201](https://github.com/NousResearch/hermes-agent/pull/2201))
|
| 133 |
+
- Fix: crash on None entry in `tool_calls` list ([#2209](https://github.com/NousResearch/hermes-agent/pull/2209) by @0xbyt4, [#2316](https://github.com/NousResearch/hermes-agent/pull/2316))
|
| 134 |
+
- Fix: per-thread persistent event loops in worker threads ([#2214](https://github.com/NousResearch/hermes-agent/pull/2214) by @jquesnelle)
|
| 135 |
+
- Fix: prevent 'event loop already running' when async tools run in parallel ([#2207](https://github.com/NousResearch/hermes-agent/pull/2207))
|
| 136 |
+
- Fix: strip ANSI at the source — clean terminal output before it reaches the model ([#2115](https://github.com/NousResearch/hermes-agent/pull/2115))
|
| 137 |
+
- Fix: skip top-level `cache_control` on role:tool for OpenRouter ([#2391](https://github.com/NousResearch/hermes-agent/pull/2391))
|
| 138 |
+
- Fix: delegate tool — save parent tool names before child construction mutates global ([#2083](https://github.com/NousResearch/hermes-agent/pull/2083) by @ygd58, [#1894](https://github.com/NousResearch/hermes-agent/pull/1894))
|
| 139 |
+
- Fix: only strip last assistant message if empty string ([#2326](https://github.com/NousResearch/hermes-agent/pull/2326))
|
| 140 |
+
|
| 141 |
+
### Session & Memory
|
| 142 |
+
- **Session search** and management slash commands ([#2198](https://github.com/NousResearch/hermes-agent/pull/2198))
|
| 143 |
+
- **Auto session titles** and `.hermes.md` project config ([#1712](https://github.com/NousResearch/hermes-agent/pull/1712))
|
| 144 |
+
- Fix: concurrent memory writes silently drop entries — added file locking ([#1726](https://github.com/NousResearch/hermes-agent/pull/1726))
|
| 145 |
+
- Fix: search all sources by default in `session_search` ([#1892](https://github.com/NousResearch/hermes-agent/pull/1892))
|
| 146 |
+
- Fix: handle hyphenated FTS5 queries and preserve quoted literals ([#1776](https://github.com/NousResearch/hermes-agent/pull/1776))
|
| 147 |
+
- Fix: skip corrupt lines in `load_transcript` instead of crashing ([#1744](https://github.com/NousResearch/hermes-agent/pull/1744))
|
| 148 |
+
- Fix: normalize session keys to prevent case-sensitive duplicates ([#2157](https://github.com/NousResearch/hermes-agent/pull/2157))
|
| 149 |
+
- Fix: prevent `session_search` crash when no sessions exist ([#2194](https://github.com/NousResearch/hermes-agent/pull/2194))
|
| 150 |
+
- Fix: reset token counters on new session for accurate usage display ([#2101](https://github.com/NousResearch/hermes-agent/pull/2101) by @InB4DevOps)
|
| 151 |
+
- Fix: prevent stale memory overwrites by flush agent ([#2687](https://github.com/NousResearch/hermes-agent/pull/2687))
|
| 152 |
+
- Fix: remove synthetic error message injection, fix session resume after repeated failures ([#2303](https://github.com/NousResearch/hermes-agent/pull/2303))
|
| 153 |
+
- Fix: quiet mode with `--resume` now passes conversation_history ([#2357](https://github.com/NousResearch/hermes-agent/pull/2357))
|
| 154 |
+
- Fix: unify resume logic in batch mode ([#2331](https://github.com/NousResearch/hermes-agent/pull/2331))
|
| 155 |
+
|
| 156 |
+
### Honcho Memory
|
| 157 |
+
- Honcho config fixes and @ context reference integration ([#2343](https://github.com/NousResearch/hermes-agent/pull/2343))
|
| 158 |
+
- Self-hosted / Docker configuration documentation ([#2475](https://github.com/NousResearch/hermes-agent/pull/2475))
|
| 159 |
+
|
| 160 |
+
---
|
| 161 |
+
|
| 162 |
+
## 📱 Messaging Platforms (Gateway)
|
| 163 |
+
|
| 164 |
+
### New Platform Adapters
|
| 165 |
+
- **Signal Messenger** — Full adapter with attachment handling, group message filtering, and Note to Self echo-back protection ([#2206](https://github.com/NousResearch/hermes-agent/pull/2206), [#2400](https://github.com/NousResearch/hermes-agent/pull/2400), [#2297](https://github.com/NousResearch/hermes-agent/pull/2297), [#2156](https://github.com/NousResearch/hermes-agent/pull/2156))
|
| 166 |
+
- **DingTalk** — Adapter with gateway wiring and setup docs ([#1685](https://github.com/NousResearch/hermes-agent/pull/1685), [#1690](https://github.com/NousResearch/hermes-agent/pull/1690), [#1692](https://github.com/NousResearch/hermes-agent/pull/1692))
|
| 167 |
+
- **SMS (Twilio)** ([#1688](https://github.com/NousResearch/hermes-agent/pull/1688))
|
| 168 |
+
- **Mattermost** — With @-mention-only channel filter ([#1683](https://github.com/NousResearch/hermes-agent/pull/1683), [#2443](https://github.com/NousResearch/hermes-agent/pull/2443))
|
| 169 |
+
- **Matrix** — With vision support and image caching ([#1683](https://github.com/NousResearch/hermes-agent/pull/1683), [#2520](https://github.com/NousResearch/hermes-agent/pull/2520))
|
| 170 |
+
- **Webhook** — Platform adapter for external event triggers ([#2166](https://github.com/NousResearch/hermes-agent/pull/2166))
|
| 171 |
+
- **OpenAI-compatible API server** — `/v1/chat/completions` endpoint with `/api/jobs` cron management ([#1756](https://github.com/NousResearch/hermes-agent/pull/1756), [#2450](https://github.com/NousResearch/hermes-agent/pull/2450), [#2456](https://github.com/NousResearch/hermes-agent/pull/2456))
|
| 172 |
+
|
| 173 |
+
### Telegram Improvements
|
| 174 |
+
- MarkdownV2 support — strikethrough, spoiler, blockquotes, escape parentheses/braces/backslashes/backticks ([#2199](https://github.com/NousResearch/hermes-agent/pull/2199), [#2200](https://github.com/NousResearch/hermes-agent/pull/2200) by @llbn, [#2386](https://github.com/NousResearch/hermes-agent/pull/2386))
|
| 175 |
+
- Auto-detect HTML tags and use `parse_mode=HTML` ([#1709](https://github.com/NousResearch/hermes-agent/pull/1709))
|
| 176 |
+
- Telegram group vision support + thread-based sessions ([#2153](https://github.com/NousResearch/hermes-agent/pull/2153))
|
| 177 |
+
- Auto-reconnect polling after network interruption ([#2517](https://github.com/NousResearch/hermes-agent/pull/2517))
|
| 178 |
+
- Aggregate split text messages before dispatching ([#1674](https://github.com/NousResearch/hermes-agent/pull/1674))
|
| 179 |
+
- Fix: streaming config bridge, not-modified, flood control ([#1782](https://github.com/NousResearch/hermes-agent/pull/1782), [#1783](https://github.com/NousResearch/hermes-agent/pull/1783))
|
| 180 |
+
- Fix: edited_message event crashes ([#2074](https://github.com/NousResearch/hermes-agent/pull/2074))
|
| 181 |
+
- Fix: retry 409 polling conflicts before giving up ([#2312](https://github.com/NousResearch/hermes-agent/pull/2312))
|
| 182 |
+
- Fix: topic delivery via `platform:chat_id:thread_id` format ([#2455](https://github.com/NousResearch/hermes-agent/pull/2455))
|
| 183 |
+
|
| 184 |
+
### Discord Improvements
|
| 185 |
+
- Document caching and text-file injection ([#2503](https://github.com/NousResearch/hermes-agent/pull/2503))
|
| 186 |
+
- Persistent typing indicator for DMs ([#2468](https://github.com/NousResearch/hermes-agent/pull/2468))
|
| 187 |
+
- Discord DM vision — inline images + attachment analysis ([#2186](https://github.com/NousResearch/hermes-agent/pull/2186))
|
| 188 |
+
- Persist thread participation across gateway restarts ([#1661](https://github.com/NousResearch/hermes-agent/pull/1661))
|
| 189 |
+
- Fix: gateway crash on non-ASCII guild names ([#2302](https://github.com/NousResearch/hermes-agent/pull/2302))
|
| 190 |
+
- Fix: thread permission errors ([#2073](https://github.com/NousResearch/hermes-agent/pull/2073))
|
| 191 |
+
- Fix: slash event routing in threads ([#2460](https://github.com/NousResearch/hermes-agent/pull/2460))
|
| 192 |
+
- Fix: remove bugged followup messages + `/ask` command ([#1836](https://github.com/NousResearch/hermes-agent/pull/1836))
|
| 193 |
+
- Fix: graceful WebSocket reconnection ([#2127](https://github.com/NousResearch/hermes-agent/pull/2127))
|
| 194 |
+
- Fix: voice channel TTS when streaming enabled ([#2322](https://github.com/NousResearch/hermes-agent/pull/2322))
|
| 195 |
+
|
| 196 |
+
### WhatsApp & Other Adapters
|
| 197 |
+
- WhatsApp: outbound `send_message` routing ([#1769](https://github.com/NousResearch/hermes-agent/pull/1769) by @sai-samarth), LID format self-chat ([#1667](https://github.com/NousResearch/hermes-agent/pull/1667)), `reply_prefix` config fix ([#1923](https://github.com/NousResearch/hermes-agent/pull/1923)), restart on bridge child exit ([#2334](https://github.com/NousResearch/hermes-agent/pull/2334)), image/bridge improvements ([#2181](https://github.com/NousResearch/hermes-agent/pull/2181))
|
| 198 |
+
- Matrix: correct `reply_to_message_id` parameter ([#1895](https://github.com/NousResearch/hermes-agent/pull/1895)), bare media types fix ([#1736](https://github.com/NousResearch/hermes-agent/pull/1736))
|
| 199 |
+
- Mattermost: MIME types for media attachments ([#2329](https://github.com/NousResearch/hermes-agent/pull/2329))
|
| 200 |
+
|
| 201 |
+
### Gateway Core
|
| 202 |
+
- **Auto-reconnect** failed platforms with exponential backoff ([#2584](https://github.com/NousResearch/hermes-agent/pull/2584))
|
| 203 |
+
- **Notify users when session auto-resets** ([#2519](https://github.com/NousResearch/hermes-agent/pull/2519))
|
| 204 |
+
- **Reply-to message context** for out-of-session replies ([#1662](https://github.com/NousResearch/hermes-agent/pull/1662))
|
| 205 |
+
- **Ignore unauthorized DMs** config option ([#1919](https://github.com/NousResearch/hermes-agent/pull/1919))
|
| 206 |
+
- Fix: `/reset` in thread-mode resets global session instead of thread ([#2254](https://github.com/NousResearch/hermes-agent/pull/2254))
|
| 207 |
+
- Fix: deliver MEDIA: files after streaming responses ([#2382](https://github.com/NousResearch/hermes-agent/pull/2382))
|
| 208 |
+
- Fix: cap interrupt recursion depth to prevent resource exhaustion ([#1659](https://github.com/NousResearch/hermes-agent/pull/1659))
|
| 209 |
+
- Fix: detect stopped processes and release stale locks on `--replace` ([#2406](https://github.com/NousResearch/hermes-agent/pull/2406), [#1908](https://github.com/NousResearch/hermes-agent/pull/1908))
|
| 210 |
+
- Fix: PID-based wait with force-kill for gateway restart ([#1902](https://github.com/NousResearch/hermes-agent/pull/1902))
|
| 211 |
+
- Fix: prevent `--replace` mode from killing the caller process ([#2185](https://github.com/NousResearch/hermes-agent/pull/2185))
|
| 212 |
+
- Fix: `/model` shows active fallback model instead of config default ([#1660](https://github.com/NousResearch/hermes-agent/pull/1660))
|
| 213 |
+
- Fix: `/title` command fails when session doesn't exist in SQLite yet ([#2379](https://github.com/NousResearch/hermes-agent/pull/2379) by @ten-jampa)
|
| 214 |
+
- Fix: process `/queue`'d messages after agent completion ([#2469](https://github.com/NousResearch/hermes-agent/pull/2469))
|
| 215 |
+
- Fix: strip orphaned `tool_results` + let `/reset` bypass running agent ([#2180](https://github.com/NousResearch/hermes-agent/pull/2180))
|
| 216 |
+
- Fix: prevent agents from starting gateway outside systemd management ([#2617](https://github.com/NousResearch/hermes-agent/pull/2617))
|
| 217 |
+
- Fix: prevent systemd restart storm on gateway connection failure ([#2327](https://github.com/NousResearch/hermes-agent/pull/2327))
|
| 218 |
+
- Fix: include resolved node path in systemd unit ([#1767](https://github.com/NousResearch/hermes-agent/pull/1767) by @sai-samarth)
|
| 219 |
+
- Fix: send error details to user in gateway outer exception handler ([#1966](https://github.com/NousResearch/hermes-agent/pull/1966))
|
| 220 |
+
- Fix: improve error handling for 429 usage limits and 500 context overflow ([#1839](https://github.com/NousResearch/hermes-agent/pull/1839))
|
| 221 |
+
- Fix: add all missing platform allowlist env vars to startup warning check ([#2628](https://github.com/NousResearch/hermes-agent/pull/2628))
|
| 222 |
+
- Fix: media delivery fails for file paths containing spaces ([#2621](https://github.com/NousResearch/hermes-agent/pull/2621))
|
| 223 |
+
- Fix: duplicate session-key collision in multi-platform gateway ([#2171](https://github.com/NousResearch/hermes-agent/pull/2171))
|
| 224 |
+
- Fix: Matrix and Mattermost never report as connected ([#1711](https://github.com/NousResearch/hermes-agent/pull/1711))
|
| 225 |
+
- Fix: PII redaction config never read — missing yaml import ([#1701](https://github.com/NousResearch/hermes-agent/pull/1701))
|
| 226 |
+
- Fix: NameError on skill slash commands ([#1697](https://github.com/NousResearch/hermes-agent/pull/1697))
|
| 227 |
+
- Fix: persist watcher metadata in checkpoint for crash recovery ([#1706](https://github.com/NousResearch/hermes-agent/pull/1706))
|
| 228 |
+
- Fix: pass `message_thread_id` in send_image_file, send_document, send_video ([#2339](https://github.com/NousResearch/hermes-agent/pull/2339))
|
| 229 |
+
- Fix: media-group aggregation on rapid successive photo messages ([#2160](https://github.com/NousResearch/hermes-agent/pull/2160))
|
| 230 |
+
|
| 231 |
+
---
|
| 232 |
+
|
| 233 |
+
## 🔧 Tool System
|
| 234 |
+
|
| 235 |
+
### MCP Enhancements
|
| 236 |
+
- **MCP server management CLI** + OAuth 2.1 PKCE auth ([#2465](https://github.com/NousResearch/hermes-agent/pull/2465))
|
| 237 |
+
- **Expose MCP servers as standalone toolsets** ([#1907](https://github.com/NousResearch/hermes-agent/pull/1907))
|
| 238 |
+
- **Interactive MCP tool configuration** in `hermes tools` ([#1694](https://github.com/NousResearch/hermes-agent/pull/1694))
|
| 239 |
+
- Fix: MCP-OAuth port mismatch, path traversal, and shared handler state ([#2552](https://github.com/NousResearch/hermes-agent/pull/2552))
|
| 240 |
+
- Fix: preserve MCP tool registrations across session resets ([#2124](https://github.com/NousResearch/hermes-agent/pull/2124))
|
| 241 |
+
- Fix: concurrent file access crash + duplicate MCP registration ([#2154](https://github.com/NousResearch/hermes-agent/pull/2154))
|
| 242 |
+
- Fix: normalise MCP schemas + expand session list columns ([#2102](https://github.com/NousResearch/hermes-agent/pull/2102))
|
| 243 |
+
- Fix: `tool_choice` `mcp_` prefix handling ([#1775](https://github.com/NousResearch/hermes-agent/pull/1775))
|
| 244 |
+
|
| 245 |
+
### Web Tool Backends
|
| 246 |
+
- **Tavily** as web search/extract/crawl backend ([#1731](https://github.com/NousResearch/hermes-agent/pull/1731))
|
| 247 |
+
- **Parallel** as alternative web search/extract backend ([#1696](https://github.com/NousResearch/hermes-agent/pull/1696))
|
| 248 |
+
- **Configurable web backend** — Firecrawl/BeautifulSoup/Playwright selection ([#2256](https://github.com/NousResearch/hermes-agent/pull/2256))
|
| 249 |
+
- Fix: whitespace-only env vars bypass web backend detection ([#2341](https://github.com/NousResearch/hermes-agent/pull/2341))
|
| 250 |
+
|
| 251 |
+
### New Tools
|
| 252 |
+
- **IMAP email** reading and sending ([#2173](https://github.com/NousResearch/hermes-agent/pull/2173))
|
| 253 |
+
- **STT (speech-to-text)** tool using Whisper API ([#2072](https://github.com/NousResearch/hermes-agent/pull/2072))
|
| 254 |
+
- **Route-aware pricing estimates** ([#1695](https://github.com/NousResearch/hermes-agent/pull/1695))
|
| 255 |
+
|
| 256 |
+
### Tool Improvements
|
| 257 |
+
- TTS: `base_url` support for OpenAI TTS provider ([#2064](https://github.com/NousResearch/hermes-agent/pull/2064) by @hanai)
|
| 258 |
+
- Vision: configurable timeout, tilde expansion in file paths, DM vision with multi-image and base64 fallback ([#2480](https://github.com/NousResearch/hermes-agent/pull/2480), [#2585](https://github.com/NousResearch/hermes-agent/pull/2585), [#2211](https://github.com/NousResearch/hermes-agent/pull/2211))
|
| 259 |
+
- Browser: race condition fix in session creation ([#1721](https://github.com/NousResearch/hermes-agent/pull/1721)), TypeError on unexpected LLM params ([#1735](https://github.com/NousResearch/hermes-agent/pull/1735))
|
| 260 |
+
- File tools: strip ANSI escape codes from write_file and patch content ([#2532](https://github.com/NousResearch/hermes-agent/pull/2532)), include pagination args in repeated search key ([#1824](https://github.com/NousResearch/hermes-agent/pull/1824) by @cutepawss), improve fuzzy matching accuracy + position calculation refactor ([#2096](https://github.com/NousResearch/hermes-agent/pull/2096), [#1681](https://github.com/NousResearch/hermes-agent/pull/1681))
|
| 261 |
+
- Code execution: resource leak and double socket close fix ([#2381](https://github.com/NousResearch/hermes-agent/pull/2381))
|
| 262 |
+
- Delegate: thread safety for concurrent subagent delegation ([#1672](https://github.com/NousResearch/hermes-agent/pull/1672)), preserve parent agent's tool list after delegation ([#1778](https://github.com/NousResearch/hermes-agent/pull/1778))
|
| 263 |
+
- Fix: make concurrent tool batching path-aware for file mutations ([#1914](https://github.com/NousResearch/hermes-agent/pull/1914))
|
| 264 |
+
- Fix: chunk long messages in `send_message_tool` before platform dispatch ([#1646](https://github.com/NousResearch/hermes-agent/pull/1646))
|
| 265 |
+
- Fix: add missing 'messaging' toolset ([#1718](https://github.com/NousResearch/hermes-agent/pull/1718))
|
| 266 |
+
- Fix: prevent unavailable tool names from leaking into model schemas ([#2072](https://github.com/NousResearch/hermes-agent/pull/2072))
|
| 267 |
+
- Fix: pass visited set by reference to prevent diamond dependency duplication ([#2311](https://github.com/NousResearch/hermes-agent/pull/2311))
|
| 268 |
+
- Fix: Daytona sandbox lookup migrated from `find_one` to `get/list` ([#2063](https://github.com/NousResearch/hermes-agent/pull/2063) by @rovle)
|
| 269 |
+
|
| 270 |
+
---
|
| 271 |
+
|
| 272 |
+
## 🧩 Skills Ecosystem
|
| 273 |
+
|
| 274 |
+
### Skills System Improvements
|
| 275 |
+
- **Agent-created skills** — Caution-level findings allowed, dangerous skills ask instead of block ([#1840](https://github.com/NousResearch/hermes-agent/pull/1840), [#2446](https://github.com/NousResearch/hermes-agent/pull/2446))
|
| 276 |
+
- **`--yes` flag** to bypass confirmation in `/skills install` and uninstall ([#1647](https://github.com/NousResearch/hermes-agent/pull/1647))
|
| 277 |
+
- **Disabled skills respected** across banner, system prompt, and slash commands ([#1897](https://github.com/NousResearch/hermes-agent/pull/1897))
|
| 278 |
+
- Fix: skills custom_tools import crash + sandbox file_tools integration ([#2239](https://github.com/NousResearch/hermes-agent/pull/2239))
|
| 279 |
+
- Fix: agent-created skills with pip requirements crash on install ([#2145](https://github.com/NousResearch/hermes-agent/pull/2145))
|
| 280 |
+
- Fix: race condition in `Skills.__init__` when `hub.yaml` missing ([#2242](https://github.com/NousResearch/hermes-agent/pull/2242))
|
| 281 |
+
- Fix: validate skill metadata before install and block duplicates ([#2241](https://github.com/NousResearch/hermes-agent/pull/2241))
|
| 282 |
+
- Fix: skills hub inspect/resolve — 4 bugs in inspect, redirects, discovery, tap list ([#2447](https://github.com/NousResearch/hermes-agent/pull/2447))
|
| 283 |
+
- Fix: agent-created skills keep working after session reset ([#2121](https://github.com/NousResearch/hermes-agent/pull/2121))
|
| 284 |
+
|
| 285 |
+
### New Skills
|
| 286 |
+
- **OCR-and-documents** — PDF/DOCX/XLS/PPTX/image OCR with optional GPU ([#2236](https://github.com/NousResearch/hermes-agent/pull/2236), [#2461](https://github.com/NousResearch/hermes-agent/pull/2461))
|
| 287 |
+
- **Huggingface-hub** bundled skill ([#1921](https://github.com/NousResearch/hermes-agent/pull/1921))
|
| 288 |
+
- **Sherlock OSINT** username search ([#1671](https://github.com/NousResearch/hermes-agent/pull/1671))
|
| 289 |
+
- **Meme-generation** — Image generator with Pillow ([#2344](https://github.com/NousResearch/hermes-agent/pull/2344))
|
| 290 |
+
- **Bioinformatics** gateway skill — index to 400+ bio skills ([#2387](https://github.com/NousResearch/hermes-agent/pull/2387))
|
| 291 |
+
- **Inference.sh** skill (terminal-based) ([#1686](https://github.com/NousResearch/hermes-agent/pull/1686))
|
| 292 |
+
- **Base blockchain** optional skill ([#1643](https://github.com/NousResearch/hermes-agent/pull/1643))
|
| 293 |
+
- **3D-model-viewer** optional skill ([#2226](https://github.com/NousResearch/hermes-agent/pull/2226))
|
| 294 |
+
- **FastMCP** optional skill ([#2113](https://github.com/NousResearch/hermes-agent/pull/2113))
|
| 295 |
+
- **Hermes-agent-setup** skill ([#1905](https://github.com/NousResearch/hermes-agent/pull/1905))
|
| 296 |
+
|
| 297 |
+
---
|
| 298 |
+
|
| 299 |
+
## 🔌 Plugin System Enhancements
|
| 300 |
+
|
| 301 |
+
- **TUI extension hooks** — Build custom CLIs on top of Hermes ([#2333](https://github.com/NousResearch/hermes-agent/pull/2333))
|
| 302 |
+
- **`hermes plugins install/remove/list`** commands ([#2337](https://github.com/NousResearch/hermes-agent/pull/2337))
|
| 303 |
+
- **Slash command registration** for plugins ([#2359](https://github.com/NousResearch/hermes-agent/pull/2359))
|
| 304 |
+
- **`session:end` lifecycle event** hook ([#1725](https://github.com/NousResearch/hermes-agent/pull/1725))
|
| 305 |
+
- Fix: require opt-in for project plugin discovery ([#2215](https://github.com/NousResearch/hermes-agent/pull/2215))
|
| 306 |
+
|
| 307 |
+
---
|
| 308 |
+
|
| 309 |
+
## 🔒 Security & Reliability
|
| 310 |
+
|
| 311 |
+
### Security
|
| 312 |
+
- **SSRF protection** for vision_tools and web_tools ([#2679](https://github.com/NousResearch/hermes-agent/pull/2679))
|
| 313 |
+
- **Shell injection prevention** in `_expand_path` via `~user` path suffix ([#2685](https://github.com/NousResearch/hermes-agent/pull/2685))
|
| 314 |
+
- **Block untrusted browser-origin** API server access ([#2451](https://github.com/NousResearch/hermes-agent/pull/2451))
|
| 315 |
+
- **Block sandbox backend creds** from subprocess env ([#1658](https://github.com/NousResearch/hermes-agent/pull/1658))
|
| 316 |
+
- **Block @ references** from reading secrets outside workspace ([#2601](https://github.com/NousResearch/hermes-agent/pull/2601) by @Gutslabs)
|
| 317 |
+
- **Malicious code pattern pre-exec scanner** for terminal_tool ([#2245](https://github.com/NousResearch/hermes-agent/pull/2245))
|
| 318 |
+
- **Harden terminal safety** and sandbox file writes ([#1653](https://github.com/NousResearch/hermes-agent/pull/1653))
|
| 319 |
+
- **PKCE verifier leak** fix + OAuth refresh Content-Type ([#1775](https://github.com/NousResearch/hermes-agent/pull/1775))
|
| 320 |
+
- **Eliminate SQL string formatting** in `execute()` calls ([#2061](https://github.com/NousResearch/hermes-agent/pull/2061) by @dusterbloom)
|
| 321 |
+
- **Harden jobs API** — input limits, field whitelist, startup check ([#2456](https://github.com/NousResearch/hermes-agent/pull/2456))
|
| 322 |
+
|
| 323 |
+
### Reliability
|
| 324 |
+
- Thread locks on 4 SessionDB methods ([#1704](https://github.com/NousResearch/hermes-agent/pull/1704))
|
| 325 |
+
- File locking for concurrent memory writes ([#1726](https://github.com/NousResearch/hermes-agent/pull/1726))
|
| 326 |
+
- Handle OpenRouter errors gracefully ([#2112](https://github.com/NousResearch/hermes-agent/pull/2112))
|
| 327 |
+
- Guard print() calls against OSError ([#1668](https://github.com/NousResearch/hermes-agent/pull/1668))
|
| 328 |
+
- Safely handle non-string inputs in redacting formatter ([#2392](https://github.com/NousResearch/hermes-agent/pull/2392), [#1700](https://github.com/NousResearch/hermes-agent/pull/1700))
|
| 329 |
+
- ACP: preserve session provider on model switch, persist sessions to disk ([#2380](https://github.com/NousResearch/hermes-agent/pull/2380), [#2071](https://github.com/NousResearch/hermes-agent/pull/2071))
|
| 330 |
+
- API server: persist ResponseStore to SQLite across restarts ([#2472](https://github.com/NousResearch/hermes-agent/pull/2472))
|
| 331 |
+
- Fix: `fetch_nous_models` always TypeError from positional args ([#1699](https://github.com/NousResearch/hermes-agent/pull/1699))
|
| 332 |
+
- Fix: resolve merge conflict markers in cli.py breaking startup ([#2347](https://github.com/NousResearch/hermes-agent/pull/2347))
|
| 333 |
+
- Fix: `minisweagent_path.py` missing from wheel ([#2098](https://github.com/NousResearch/hermes-agent/pull/2098) by @JiwaniZakir)
|
| 334 |
+
|
| 335 |
+
### Cron System
|
| 336 |
+
- **`[SILENT]` response** — cron agents can suppress delivery ([#1833](https://github.com/NousResearch/hermes-agent/pull/1833))
|
| 337 |
+
- **Scale missed-job grace window** with schedule frequency ([#2449](https://github.com/NousResearch/hermes-agent/pull/2449))
|
| 338 |
+
- **Recover recent one-shot jobs** ([#1918](https://github.com/NousResearch/hermes-agent/pull/1918))
|
| 339 |
+
- Fix: normalize `repeat<=0` to None — jobs deleted after first run when LLM passes -1 ([#2612](https://github.com/NousResearch/hermes-agent/pull/2612) by @Mibayy)
|
| 340 |
+
- Fix: Matrix added to scheduler delivery platform_map ([#2167](https://github.com/NousResearch/hermes-agent/pull/2167) by @buntingszn)
|
| 341 |
+
- Fix: naive ISO timestamps without timezone — jobs fire at wrong time ([#1729](https://github.com/NousResearch/hermes-agent/pull/1729))
|
| 342 |
+
- Fix: `get_due_jobs` reads `jobs.json` twice — race condition ([#1716](https://github.com/NousResearch/hermes-agent/pull/1716))
|
| 343 |
+
- Fix: silent jobs return empty response for delivery skip ([#2442](https://github.com/NousResearch/hermes-agent/pull/2442))
|
| 344 |
+
- Fix: stop injecting cron outputs into gateway session history ([#2313](https://github.com/NousResearch/hermes-agent/pull/2313))
|
| 345 |
+
- Fix: close abandoned coroutine when `asyncio.run()` raises RuntimeError ([#2317](https://github.com/NousResearch/hermes-agent/pull/2317))
|
| 346 |
+
|
| 347 |
+
---
|
| 348 |
+
|
| 349 |
+
## 🧪 Testing
|
| 350 |
+
|
| 351 |
+
- Resolve all consistently failing tests ([#2488](https://github.com/NousResearch/hermes-agent/pull/2488))
|
| 352 |
+
- Replace `FakePath` with `monkeypatch` for Python 3.12 compat ([#2444](https://github.com/NousResearch/hermes-agent/pull/2444))
|
| 353 |
+
- Align Hermes setup and full-suite expectations ([#1710](https://github.com/NousResearch/hermes-agent/pull/1710))
|
| 354 |
+
|
| 355 |
+
---
|
| 356 |
+
|
| 357 |
+
## 📚 Documentation
|
| 358 |
+
|
| 359 |
+
- Comprehensive docs update for recent features ([#1693](https://github.com/NousResearch/hermes-agent/pull/1693), [#2183](https://github.com/NousResearch/hermes-agent/pull/2183))
|
| 360 |
+
- Alibaba Cloud and DingTalk setup guides ([#1687](https://github.com/NousResearch/hermes-agent/pull/1687), [#1692](https://github.com/NousResearch/hermes-agent/pull/1692))
|
| 361 |
+
- Detailed skills documentation ([#2244](https://github.com/NousResearch/hermes-agent/pull/2244))
|
| 362 |
+
- Honcho self-hosted / Docker configuration ([#2475](https://github.com/NousResearch/hermes-agent/pull/2475))
|
| 363 |
+
- Context length detection FAQ and quickstart references ([#2179](https://github.com/NousResearch/hermes-agent/pull/2179))
|
| 364 |
+
- Fix docs inconsistencies across reference and user guides ([#1995](https://github.com/NousResearch/hermes-agent/pull/1995))
|
| 365 |
+
- Fix MCP install commands — use uv, not bare pip ([#1909](https://github.com/NousResearch/hermes-agent/pull/1909))
|
| 366 |
+
- Replace ASCII diagrams with Mermaid/lists ([#2402](https://github.com/NousResearch/hermes-agent/pull/2402))
|
| 367 |
+
- Gemini OAuth provider implementation plan ([#2467](https://github.com/NousResearch/hermes-agent/pull/2467))
|
| 368 |
+
- Discord Server Members Intent marked as required ([#2330](https://github.com/NousResearch/hermes-agent/pull/2330))
|
| 369 |
+
- Fix MDX build error in api-server.md ([#1787](https://github.com/NousResearch/hermes-agent/pull/1787))
|
| 370 |
+
- Align venv path to match installer ([#2114](https://github.com/NousResearch/hermes-agent/pull/2114))
|
| 371 |
+
- New skills added to hub index ([#2281](https://github.com/NousResearch/hermes-agent/pull/2281))
|
| 372 |
+
|
| 373 |
+
---
|
| 374 |
+
|
| 375 |
+
## 👥 Contributors
|
| 376 |
+
|
| 377 |
+
### Core
|
| 378 |
+
- **@teknium1** (Teknium) — 280 PRs
|
| 379 |
+
|
| 380 |
+
### Community Contributors
|
| 381 |
+
- **@mchzimm** (to_the_max) — GitHub Copilot provider integration ([#1879](https://github.com/NousResearch/hermes-agent/pull/1879))
|
| 382 |
+
- **@jquesnelle** (Jeffrey Quesnelle) — Per-thread persistent event loops fix ([#2214](https://github.com/NousResearch/hermes-agent/pull/2214))
|
| 383 |
+
- **@llbn** (lbn) — Telegram MarkdownV2 strikethrough, spoiler, blockquotes, and escape fixes ([#2199](https://github.com/NousResearch/hermes-agent/pull/2199), [#2200](https://github.com/NousResearch/hermes-agent/pull/2200))
|
| 384 |
+
- **@dusterbloom** — SQL injection prevention + local server context window querying ([#2061](https://github.com/NousResearch/hermes-agent/pull/2061), [#2091](https://github.com/NousResearch/hermes-agent/pull/2091))
|
| 385 |
+
- **@0xbyt4** — Anthropic tool_calls None guard + OpenCode-Go provider config fix ([#2209](https://github.com/NousResearch/hermes-agent/pull/2209), [#2393](https://github.com/NousResearch/hermes-agent/pull/2393))
|
| 386 |
+
- **@sai-samarth** (Saisamarth) — WhatsApp send_message routing + systemd node path ([#1769](https://github.com/NousResearch/hermes-agent/pull/1769), [#1767](https://github.com/NousResearch/hermes-agent/pull/1767))
|
| 387 |
+
- **@Gutslabs** (Guts) — Block @ references from reading secrets ([#2601](https://github.com/NousResearch/hermes-agent/pull/2601))
|
| 388 |
+
- **@Mibayy** (Mibay) — Cron job repeat normalization ([#2612](https://github.com/NousResearch/hermes-agent/pull/2612))
|
| 389 |
+
- **@ten-jampa** (Tenzin Jampa) — Gateway /title command fix ([#2379](https://github.com/NousResearch/hermes-agent/pull/2379))
|
| 390 |
+
- **@cutepawss** (lila) — File tools search pagination fix ([#1824](https://github.com/NousResearch/hermes-agent/pull/1824))
|
| 391 |
+
- **@hanai** (Hanai) — OpenAI TTS base_url support ([#2064](https://github.com/NousResearch/hermes-agent/pull/2064))
|
| 392 |
+
- **@rovle** (Lovre Pešut) — Daytona sandbox API migration ([#2063](https://github.com/NousResearch/hermes-agent/pull/2063))
|
| 393 |
+
- **@buntingszn** (bunting szn) — Matrix cron delivery support ([#2167](https://github.com/NousResearch/hermes-agent/pull/2167))
|
| 394 |
+
- **@InB4DevOps** — Token counter reset on new session ([#2101](https://github.com/NousResearch/hermes-agent/pull/2101))
|
| 395 |
+
- **@JiwaniZakir** (Zakir Jiwani) — Missing file in wheel fix ([#2098](https://github.com/NousResearch/hermes-agent/pull/2098))
|
| 396 |
+
- **@ygd58** (buray) — Delegate tool parent tool names fix ([#2083](https://github.com/NousResearch/hermes-agent/pull/2083))
|
| 397 |
+
|
| 398 |
+
---
|
| 399 |
+
|
| 400 |
+
**Full Changelog**: [v2026.3.17...v2026.3.23](https://github.com/NousResearch/hermes-agent/compare/v2026.3.17...v2026.3.23)
|
acp_adapter/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
"""ACP (Agent Communication Protocol) adapter for hermes-agent."""
|
acp_adapter/__main__.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Allow running the ACP adapter as ``python -m acp_adapter``."""
|
| 2 |
+
|
| 3 |
+
from .entry import main
|
| 4 |
+
|
| 5 |
+
main()
|
acp_adapter/auth.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""ACP auth helpers — detect the currently configured Hermes provider."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from typing import Optional
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def detect_provider() -> Optional[str]:
|
| 9 |
+
"""Resolve the active Hermes runtime provider, or None if unavailable."""
|
| 10 |
+
try:
|
| 11 |
+
from hermes_cli.runtime_provider import resolve_runtime_provider
|
| 12 |
+
runtime = resolve_runtime_provider()
|
| 13 |
+
api_key = runtime.get("api_key")
|
| 14 |
+
provider = runtime.get("provider")
|
| 15 |
+
if isinstance(api_key, str) and api_key.strip() and isinstance(provider, str) and provider.strip():
|
| 16 |
+
return provider.strip().lower()
|
| 17 |
+
except Exception:
|
| 18 |
+
return None
|
| 19 |
+
return None
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def has_provider() -> bool:
|
| 23 |
+
"""Return True if Hermes can resolve any runtime provider credentials."""
|
| 24 |
+
return detect_provider() is not None
|
acp_adapter/entry.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""CLI entry point for the hermes-agent ACP adapter.
|
| 2 |
+
|
| 3 |
+
Loads environment variables from ``~/.hermes/.env``, configures logging
|
| 4 |
+
to write to stderr (so stdout is reserved for ACP JSON-RPC transport),
|
| 5 |
+
and starts the ACP agent server.
|
| 6 |
+
|
| 7 |
+
Usage::
|
| 8 |
+
|
| 9 |
+
python -m acp_adapter.entry
|
| 10 |
+
# or
|
| 11 |
+
hermes acp
|
| 12 |
+
# or
|
| 13 |
+
hermes-acp
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
import asyncio
|
| 17 |
+
import logging
|
| 18 |
+
import os
|
| 19 |
+
import sys
|
| 20 |
+
from pathlib import Path
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def _setup_logging() -> None:
|
| 24 |
+
"""Route all logging to stderr so stdout stays clean for ACP stdio."""
|
| 25 |
+
handler = logging.StreamHandler(sys.stderr)
|
| 26 |
+
handler.setFormatter(
|
| 27 |
+
logging.Formatter(
|
| 28 |
+
"%(asctime)s [%(levelname)s] %(name)s: %(message)s",
|
| 29 |
+
datefmt="%Y-%m-%d %H:%M:%S",
|
| 30 |
+
)
|
| 31 |
+
)
|
| 32 |
+
root = logging.getLogger()
|
| 33 |
+
root.handlers.clear()
|
| 34 |
+
root.addHandler(handler)
|
| 35 |
+
root.setLevel(logging.INFO)
|
| 36 |
+
|
| 37 |
+
# Quiet down noisy libraries
|
| 38 |
+
logging.getLogger("httpx").setLevel(logging.WARNING)
|
| 39 |
+
logging.getLogger("httpcore").setLevel(logging.WARNING)
|
| 40 |
+
logging.getLogger("openai").setLevel(logging.WARNING)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def _load_env() -> None:
|
| 44 |
+
"""Load .env from HERMES_HOME (default ``~/.hermes``)."""
|
| 45 |
+
from hermes_cli.env_loader import load_hermes_dotenv
|
| 46 |
+
|
| 47 |
+
hermes_home = Path(os.getenv("HERMES_HOME", Path.home() / ".hermes"))
|
| 48 |
+
loaded = load_hermes_dotenv(hermes_home=hermes_home)
|
| 49 |
+
if loaded:
|
| 50 |
+
for env_file in loaded:
|
| 51 |
+
logging.getLogger(__name__).info("Loaded env from %s", env_file)
|
| 52 |
+
else:
|
| 53 |
+
logging.getLogger(__name__).info(
|
| 54 |
+
"No .env found at %s, using system env", hermes_home / ".env"
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def main() -> None:
|
| 59 |
+
"""Entry point: load env, configure logging, run the ACP agent."""
|
| 60 |
+
_setup_logging()
|
| 61 |
+
_load_env()
|
| 62 |
+
|
| 63 |
+
logger = logging.getLogger(__name__)
|
| 64 |
+
logger.info("Starting hermes-agent ACP adapter")
|
| 65 |
+
|
| 66 |
+
# Ensure the project root is on sys.path so ``from run_agent import AIAgent`` works
|
| 67 |
+
project_root = str(Path(__file__).resolve().parent.parent)
|
| 68 |
+
if project_root not in sys.path:
|
| 69 |
+
sys.path.insert(0, project_root)
|
| 70 |
+
|
| 71 |
+
import acp
|
| 72 |
+
from .server import HermesACPAgent
|
| 73 |
+
|
| 74 |
+
agent = HermesACPAgent()
|
| 75 |
+
try:
|
| 76 |
+
asyncio.run(acp.run_agent(agent))
|
| 77 |
+
except KeyboardInterrupt:
|
| 78 |
+
logger.info("Shutting down (KeyboardInterrupt)")
|
| 79 |
+
except Exception:
|
| 80 |
+
logger.exception("ACP agent crashed")
|
| 81 |
+
sys.exit(1)
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
if __name__ == "__main__":
|
| 85 |
+
main()
|
acp_adapter/events.py
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Callback factories for bridging AIAgent events to ACP notifications.
|
| 2 |
+
|
| 3 |
+
Each factory returns a callable with the signature that AIAgent expects
|
| 4 |
+
for its callbacks. Internally, the callbacks push ACP session updates
|
| 5 |
+
to the client via ``conn.session_update()`` using
|
| 6 |
+
``asyncio.run_coroutine_threadsafe()`` (since AIAgent runs in a worker
|
| 7 |
+
thread while the event loop lives on the main thread).
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import asyncio
|
| 11 |
+
import json
|
| 12 |
+
import logging
|
| 13 |
+
from collections import defaultdict, deque
|
| 14 |
+
from typing import Any, Callable, Deque, Dict
|
| 15 |
+
|
| 16 |
+
import acp
|
| 17 |
+
|
| 18 |
+
from .tools import (
|
| 19 |
+
build_tool_complete,
|
| 20 |
+
build_tool_start,
|
| 21 |
+
make_tool_call_id,
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
logger = logging.getLogger(__name__)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def _send_update(
|
| 28 |
+
conn: acp.Client,
|
| 29 |
+
session_id: str,
|
| 30 |
+
loop: asyncio.AbstractEventLoop,
|
| 31 |
+
update: Any,
|
| 32 |
+
) -> None:
|
| 33 |
+
"""Fire-and-forget an ACP session update from a worker thread."""
|
| 34 |
+
try:
|
| 35 |
+
future = asyncio.run_coroutine_threadsafe(
|
| 36 |
+
conn.session_update(session_id, update), loop
|
| 37 |
+
)
|
| 38 |
+
future.result(timeout=5)
|
| 39 |
+
except Exception:
|
| 40 |
+
logger.debug("Failed to send ACP update", exc_info=True)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
# ------------------------------------------------------------------
|
| 44 |
+
# Tool progress callback
|
| 45 |
+
# ------------------------------------------------------------------
|
| 46 |
+
|
| 47 |
+
def make_tool_progress_cb(
|
| 48 |
+
conn: acp.Client,
|
| 49 |
+
session_id: str,
|
| 50 |
+
loop: asyncio.AbstractEventLoop,
|
| 51 |
+
tool_call_ids: Dict[str, Deque[str]],
|
| 52 |
+
) -> Callable:
|
| 53 |
+
"""Create a ``tool_progress_callback`` for AIAgent.
|
| 54 |
+
|
| 55 |
+
Signature expected by AIAgent::
|
| 56 |
+
|
| 57 |
+
tool_progress_callback(name: str, preview: str, args: dict)
|
| 58 |
+
|
| 59 |
+
Emits ``ToolCallStart`` for each tool invocation and tracks IDs in a FIFO
|
| 60 |
+
queue per tool name so duplicate/parallel same-name calls still complete
|
| 61 |
+
against the correct ACP tool call.
|
| 62 |
+
"""
|
| 63 |
+
|
| 64 |
+
def _tool_progress(name: str, preview: str, args: Any = None) -> None:
|
| 65 |
+
if isinstance(args, str):
|
| 66 |
+
try:
|
| 67 |
+
args = json.loads(args)
|
| 68 |
+
except (json.JSONDecodeError, TypeError):
|
| 69 |
+
args = {"raw": args}
|
| 70 |
+
if not isinstance(args, dict):
|
| 71 |
+
args = {}
|
| 72 |
+
|
| 73 |
+
tc_id = make_tool_call_id()
|
| 74 |
+
queue = tool_call_ids.get(name)
|
| 75 |
+
if queue is None:
|
| 76 |
+
queue = deque()
|
| 77 |
+
tool_call_ids[name] = queue
|
| 78 |
+
elif isinstance(queue, str):
|
| 79 |
+
queue = deque([queue])
|
| 80 |
+
tool_call_ids[name] = queue
|
| 81 |
+
queue.append(tc_id)
|
| 82 |
+
|
| 83 |
+
update = build_tool_start(tc_id, name, args)
|
| 84 |
+
_send_update(conn, session_id, loop, update)
|
| 85 |
+
|
| 86 |
+
return _tool_progress
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
# ------------------------------------------------------------------
|
| 90 |
+
# Thinking callback
|
| 91 |
+
# ------------------------------------------------------------------
|
| 92 |
+
|
| 93 |
+
def make_thinking_cb(
|
| 94 |
+
conn: acp.Client,
|
| 95 |
+
session_id: str,
|
| 96 |
+
loop: asyncio.AbstractEventLoop,
|
| 97 |
+
) -> Callable:
|
| 98 |
+
"""Create a ``thinking_callback`` for AIAgent."""
|
| 99 |
+
|
| 100 |
+
def _thinking(text: str) -> None:
|
| 101 |
+
if not text:
|
| 102 |
+
return
|
| 103 |
+
update = acp.update_agent_thought_text(text)
|
| 104 |
+
_send_update(conn, session_id, loop, update)
|
| 105 |
+
|
| 106 |
+
return _thinking
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
# ------------------------------------------------------------------
|
| 110 |
+
# Step callback
|
| 111 |
+
# ------------------------------------------------------------------
|
| 112 |
+
|
| 113 |
+
def make_step_cb(
|
| 114 |
+
conn: acp.Client,
|
| 115 |
+
session_id: str,
|
| 116 |
+
loop: asyncio.AbstractEventLoop,
|
| 117 |
+
tool_call_ids: Dict[str, Deque[str]],
|
| 118 |
+
) -> Callable:
|
| 119 |
+
"""Create a ``step_callback`` for AIAgent.
|
| 120 |
+
|
| 121 |
+
Signature expected by AIAgent::
|
| 122 |
+
|
| 123 |
+
step_callback(api_call_count: int, prev_tools: list)
|
| 124 |
+
"""
|
| 125 |
+
|
| 126 |
+
def _step(api_call_count: int, prev_tools: Any = None) -> None:
|
| 127 |
+
if prev_tools and isinstance(prev_tools, list):
|
| 128 |
+
for tool_info in prev_tools:
|
| 129 |
+
tool_name = None
|
| 130 |
+
result = None
|
| 131 |
+
|
| 132 |
+
if isinstance(tool_info, dict):
|
| 133 |
+
tool_name = tool_info.get("name") or tool_info.get("function_name")
|
| 134 |
+
result = tool_info.get("result") or tool_info.get("output")
|
| 135 |
+
elif isinstance(tool_info, str):
|
| 136 |
+
tool_name = tool_info
|
| 137 |
+
|
| 138 |
+
queue = tool_call_ids.get(tool_name or "")
|
| 139 |
+
if isinstance(queue, str):
|
| 140 |
+
queue = deque([queue])
|
| 141 |
+
tool_call_ids[tool_name] = queue
|
| 142 |
+
if tool_name and queue:
|
| 143 |
+
tc_id = queue.popleft()
|
| 144 |
+
update = build_tool_complete(
|
| 145 |
+
tc_id, tool_name, result=str(result) if result is not None else None
|
| 146 |
+
)
|
| 147 |
+
_send_update(conn, session_id, loop, update)
|
| 148 |
+
if not queue:
|
| 149 |
+
tool_call_ids.pop(tool_name, None)
|
| 150 |
+
|
| 151 |
+
return _step
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
# ------------------------------------------------------------------
|
| 155 |
+
# Agent message callback
|
| 156 |
+
# ------------------------------------------------------------------
|
| 157 |
+
|
| 158 |
+
def make_message_cb(
|
| 159 |
+
conn: acp.Client,
|
| 160 |
+
session_id: str,
|
| 161 |
+
loop: asyncio.AbstractEventLoop,
|
| 162 |
+
) -> Callable:
|
| 163 |
+
"""Create a callback that streams agent response text to the editor."""
|
| 164 |
+
|
| 165 |
+
def _message(text: str) -> None:
|
| 166 |
+
if not text:
|
| 167 |
+
return
|
| 168 |
+
update = acp.update_agent_message_text(text)
|
| 169 |
+
_send_update(conn, session_id, loop, update)
|
| 170 |
+
|
| 171 |
+
return _message
|
acp_adapter/permissions.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""ACP permission bridging — maps ACP approval requests to hermes approval callbacks."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import asyncio
|
| 6 |
+
import logging
|
| 7 |
+
from concurrent.futures import TimeoutError as FutureTimeout
|
| 8 |
+
from typing import Any, Callable, Optional
|
| 9 |
+
|
| 10 |
+
from acp.schema import (
|
| 11 |
+
AllowedOutcome,
|
| 12 |
+
DeniedOutcome,
|
| 13 |
+
PermissionOption,
|
| 14 |
+
RequestPermissionRequest,
|
| 15 |
+
SelectedPermissionOutcome,
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
logger = logging.getLogger(__name__)
|
| 19 |
+
|
| 20 |
+
# Maps ACP PermissionOptionKind -> hermes approval result strings
|
| 21 |
+
_KIND_TO_HERMES = {
|
| 22 |
+
"allow_once": "once",
|
| 23 |
+
"allow_always": "always",
|
| 24 |
+
"reject_once": "deny",
|
| 25 |
+
"reject_always": "deny",
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def make_approval_callback(
|
| 30 |
+
request_permission_fn: Callable,
|
| 31 |
+
loop: asyncio.AbstractEventLoop,
|
| 32 |
+
session_id: str,
|
| 33 |
+
timeout: float = 60.0,
|
| 34 |
+
) -> Callable[[str, str], str]:
|
| 35 |
+
"""
|
| 36 |
+
Return a hermes-compatible ``approval_callback(command, description) -> str``
|
| 37 |
+
that bridges to the ACP client's ``request_permission`` call.
|
| 38 |
+
|
| 39 |
+
Args:
|
| 40 |
+
request_permission_fn: The ACP connection's ``request_permission`` coroutine.
|
| 41 |
+
loop: The event loop on which the ACP connection lives.
|
| 42 |
+
session_id: Current ACP session id.
|
| 43 |
+
timeout: Seconds to wait for a response before auto-denying.
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
def _callback(command: str, description: str) -> str:
|
| 47 |
+
options = [
|
| 48 |
+
PermissionOption(option_id="allow_once", kind="allow_once", name="Allow once"),
|
| 49 |
+
PermissionOption(option_id="allow_always", kind="allow_always", name="Allow always"),
|
| 50 |
+
PermissionOption(option_id="deny", kind="reject_once", name="Deny"),
|
| 51 |
+
]
|
| 52 |
+
import acp as _acp
|
| 53 |
+
|
| 54 |
+
tool_call = _acp.start_tool_call("perm-check", command, kind="execute")
|
| 55 |
+
|
| 56 |
+
coro = request_permission_fn(
|
| 57 |
+
session_id=session_id,
|
| 58 |
+
tool_call=tool_call,
|
| 59 |
+
options=options,
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
try:
|
| 63 |
+
future = asyncio.run_coroutine_threadsafe(coro, loop)
|
| 64 |
+
response = future.result(timeout=timeout)
|
| 65 |
+
except (FutureTimeout, Exception) as exc:
|
| 66 |
+
logger.warning("Permission request timed out or failed: %s", exc)
|
| 67 |
+
return "deny"
|
| 68 |
+
|
| 69 |
+
outcome = response.outcome
|
| 70 |
+
if isinstance(outcome, AllowedOutcome):
|
| 71 |
+
option_id = outcome.option_id
|
| 72 |
+
# Look up the kind from our options list
|
| 73 |
+
for opt in options:
|
| 74 |
+
if opt.option_id == option_id:
|
| 75 |
+
return _KIND_TO_HERMES.get(opt.kind, "deny")
|
| 76 |
+
return "once" # fallback for unknown option_id
|
| 77 |
+
else:
|
| 78 |
+
return "deny"
|
| 79 |
+
|
| 80 |
+
return _callback
|
acp_adapter/server.py
ADDED
|
@@ -0,0 +1,492 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""ACP agent server — exposes Hermes Agent via the Agent Client Protocol."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import asyncio
|
| 6 |
+
import logging
|
| 7 |
+
from collections import defaultdict, deque
|
| 8 |
+
from concurrent.futures import ThreadPoolExecutor
|
| 9 |
+
from typing import Any, Deque, Optional
|
| 10 |
+
|
| 11 |
+
import acp
|
| 12 |
+
from acp.schema import (
|
| 13 |
+
AgentCapabilities,
|
| 14 |
+
AuthenticateResponse,
|
| 15 |
+
AuthMethod,
|
| 16 |
+
ClientCapabilities,
|
| 17 |
+
EmbeddedResourceContentBlock,
|
| 18 |
+
ForkSessionResponse,
|
| 19 |
+
ImageContentBlock,
|
| 20 |
+
AudioContentBlock,
|
| 21 |
+
Implementation,
|
| 22 |
+
InitializeResponse,
|
| 23 |
+
ListSessionsResponse,
|
| 24 |
+
LoadSessionResponse,
|
| 25 |
+
NewSessionResponse,
|
| 26 |
+
PromptResponse,
|
| 27 |
+
ResumeSessionResponse,
|
| 28 |
+
ResourceContentBlock,
|
| 29 |
+
SessionCapabilities,
|
| 30 |
+
SessionForkCapabilities,
|
| 31 |
+
SessionListCapabilities,
|
| 32 |
+
SessionInfo,
|
| 33 |
+
TextContentBlock,
|
| 34 |
+
Usage,
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
from acp_adapter.auth import detect_provider, has_provider
|
| 38 |
+
from acp_adapter.events import (
|
| 39 |
+
make_message_cb,
|
| 40 |
+
make_step_cb,
|
| 41 |
+
make_thinking_cb,
|
| 42 |
+
make_tool_progress_cb,
|
| 43 |
+
)
|
| 44 |
+
from acp_adapter.permissions import make_approval_callback
|
| 45 |
+
from acp_adapter.session import SessionManager, SessionState
|
| 46 |
+
|
| 47 |
+
logger = logging.getLogger(__name__)
|
| 48 |
+
|
| 49 |
+
try:
|
| 50 |
+
from hermes_cli import __version__ as HERMES_VERSION
|
| 51 |
+
except Exception:
|
| 52 |
+
HERMES_VERSION = "0.0.0"
|
| 53 |
+
|
| 54 |
+
# Thread pool for running AIAgent (synchronous) in parallel.
|
| 55 |
+
_executor = ThreadPoolExecutor(max_workers=4, thread_name_prefix="acp-agent")
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def _extract_text(
|
| 59 |
+
prompt: list[
|
| 60 |
+
TextContentBlock
|
| 61 |
+
| ImageContentBlock
|
| 62 |
+
| AudioContentBlock
|
| 63 |
+
| ResourceContentBlock
|
| 64 |
+
| EmbeddedResourceContentBlock
|
| 65 |
+
],
|
| 66 |
+
) -> str:
|
| 67 |
+
"""Extract plain text from ACP content blocks."""
|
| 68 |
+
parts: list[str] = []
|
| 69 |
+
for block in prompt:
|
| 70 |
+
if isinstance(block, TextContentBlock):
|
| 71 |
+
parts.append(block.text)
|
| 72 |
+
elif hasattr(block, "text"):
|
| 73 |
+
parts.append(str(block.text))
|
| 74 |
+
# Non-text blocks are ignored for now.
|
| 75 |
+
return "\n".join(parts)
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
class HermesACPAgent(acp.Agent):
|
| 79 |
+
"""ACP Agent implementation wrapping Hermes AIAgent."""
|
| 80 |
+
|
| 81 |
+
def __init__(self, session_manager: SessionManager | None = None):
|
| 82 |
+
super().__init__()
|
| 83 |
+
self.session_manager = session_manager or SessionManager()
|
| 84 |
+
self._conn: Optional[acp.Client] = None
|
| 85 |
+
|
| 86 |
+
# ---- Connection lifecycle -----------------------------------------------
|
| 87 |
+
|
| 88 |
+
def on_connect(self, conn: acp.Client) -> None:
|
| 89 |
+
"""Store the client connection for sending session updates."""
|
| 90 |
+
self._conn = conn
|
| 91 |
+
logger.info("ACP client connected")
|
| 92 |
+
|
| 93 |
+
# ---- ACP lifecycle ------------------------------------------------------
|
| 94 |
+
|
| 95 |
+
async def initialize(
|
| 96 |
+
self,
|
| 97 |
+
protocol_version: int,
|
| 98 |
+
client_capabilities: ClientCapabilities | None = None,
|
| 99 |
+
client_info: Implementation | None = None,
|
| 100 |
+
**kwargs: Any,
|
| 101 |
+
) -> InitializeResponse:
|
| 102 |
+
provider = detect_provider()
|
| 103 |
+
auth_methods = None
|
| 104 |
+
if provider:
|
| 105 |
+
auth_methods = [
|
| 106 |
+
AuthMethod(
|
| 107 |
+
id=provider,
|
| 108 |
+
name=f"{provider} runtime credentials",
|
| 109 |
+
description=f"Authenticate Hermes using the currently configured {provider} runtime credentials.",
|
| 110 |
+
)
|
| 111 |
+
]
|
| 112 |
+
|
| 113 |
+
client_name = client_info.name if client_info else "unknown"
|
| 114 |
+
logger.info("Initialize from %s (protocol v%s)", client_name, protocol_version)
|
| 115 |
+
|
| 116 |
+
return InitializeResponse(
|
| 117 |
+
protocol_version=acp.PROTOCOL_VERSION,
|
| 118 |
+
agent_info=Implementation(name="hermes-agent", version=HERMES_VERSION),
|
| 119 |
+
agent_capabilities=AgentCapabilities(
|
| 120 |
+
session_capabilities=SessionCapabilities(
|
| 121 |
+
fork=SessionForkCapabilities(),
|
| 122 |
+
list=SessionListCapabilities(),
|
| 123 |
+
),
|
| 124 |
+
),
|
| 125 |
+
auth_methods=auth_methods,
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
async def authenticate(self, method_id: str, **kwargs: Any) -> AuthenticateResponse | None:
|
| 129 |
+
if has_provider():
|
| 130 |
+
return AuthenticateResponse()
|
| 131 |
+
return None
|
| 132 |
+
|
| 133 |
+
# ---- Session management -------------------------------------------------
|
| 134 |
+
|
| 135 |
+
async def new_session(
|
| 136 |
+
self,
|
| 137 |
+
cwd: str,
|
| 138 |
+
mcp_servers: list | None = None,
|
| 139 |
+
**kwargs: Any,
|
| 140 |
+
) -> NewSessionResponse:
|
| 141 |
+
state = self.session_manager.create_session(cwd=cwd)
|
| 142 |
+
logger.info("New session %s (cwd=%s)", state.session_id, cwd)
|
| 143 |
+
return NewSessionResponse(session_id=state.session_id)
|
| 144 |
+
|
| 145 |
+
async def load_session(
|
| 146 |
+
self,
|
| 147 |
+
cwd: str,
|
| 148 |
+
session_id: str,
|
| 149 |
+
mcp_servers: list | None = None,
|
| 150 |
+
**kwargs: Any,
|
| 151 |
+
) -> LoadSessionResponse | None:
|
| 152 |
+
state = self.session_manager.update_cwd(session_id, cwd)
|
| 153 |
+
if state is None:
|
| 154 |
+
logger.warning("load_session: session %s not found", session_id)
|
| 155 |
+
return None
|
| 156 |
+
logger.info("Loaded session %s", session_id)
|
| 157 |
+
return LoadSessionResponse()
|
| 158 |
+
|
| 159 |
+
async def resume_session(
|
| 160 |
+
self,
|
| 161 |
+
cwd: str,
|
| 162 |
+
session_id: str,
|
| 163 |
+
mcp_servers: list | None = None,
|
| 164 |
+
**kwargs: Any,
|
| 165 |
+
) -> ResumeSessionResponse:
|
| 166 |
+
state = self.session_manager.update_cwd(session_id, cwd)
|
| 167 |
+
if state is None:
|
| 168 |
+
logger.warning("resume_session: session %s not found, creating new", session_id)
|
| 169 |
+
state = self.session_manager.create_session(cwd=cwd)
|
| 170 |
+
logger.info("Resumed session %s", state.session_id)
|
| 171 |
+
return ResumeSessionResponse()
|
| 172 |
+
|
| 173 |
+
async def cancel(self, session_id: str, **kwargs: Any) -> None:
|
| 174 |
+
state = self.session_manager.get_session(session_id)
|
| 175 |
+
if state and state.cancel_event:
|
| 176 |
+
state.cancel_event.set()
|
| 177 |
+
try:
|
| 178 |
+
if getattr(state, "agent", None) and hasattr(state.agent, "interrupt"):
|
| 179 |
+
state.agent.interrupt()
|
| 180 |
+
except Exception:
|
| 181 |
+
logger.debug("Failed to interrupt ACP session %s", session_id, exc_info=True)
|
| 182 |
+
logger.info("Cancelled session %s", session_id)
|
| 183 |
+
|
| 184 |
+
async def fork_session(
|
| 185 |
+
self,
|
| 186 |
+
cwd: str,
|
| 187 |
+
session_id: str,
|
| 188 |
+
mcp_servers: list | None = None,
|
| 189 |
+
**kwargs: Any,
|
| 190 |
+
) -> ForkSessionResponse:
|
| 191 |
+
state = self.session_manager.fork_session(session_id, cwd=cwd)
|
| 192 |
+
new_id = state.session_id if state else ""
|
| 193 |
+
logger.info("Forked session %s -> %s", session_id, new_id)
|
| 194 |
+
return ForkSessionResponse(session_id=new_id)
|
| 195 |
+
|
| 196 |
+
async def list_sessions(
|
| 197 |
+
self,
|
| 198 |
+
cursor: str | None = None,
|
| 199 |
+
cwd: str | None = None,
|
| 200 |
+
**kwargs: Any,
|
| 201 |
+
) -> ListSessionsResponse:
|
| 202 |
+
infos = self.session_manager.list_sessions()
|
| 203 |
+
sessions = [
|
| 204 |
+
SessionInfo(session_id=s["session_id"], cwd=s["cwd"])
|
| 205 |
+
for s in infos
|
| 206 |
+
]
|
| 207 |
+
return ListSessionsResponse(sessions=sessions)
|
| 208 |
+
|
| 209 |
+
# ---- Prompt (core) ------------------------------------------------------
|
| 210 |
+
|
| 211 |
+
async def prompt(
|
| 212 |
+
self,
|
| 213 |
+
prompt: list[
|
| 214 |
+
TextContentBlock
|
| 215 |
+
| ImageContentBlock
|
| 216 |
+
| AudioContentBlock
|
| 217 |
+
| ResourceContentBlock
|
| 218 |
+
| EmbeddedResourceContentBlock
|
| 219 |
+
],
|
| 220 |
+
session_id: str,
|
| 221 |
+
**kwargs: Any,
|
| 222 |
+
) -> PromptResponse:
|
| 223 |
+
"""Run Hermes on the user's prompt and stream events back to the editor."""
|
| 224 |
+
state = self.session_manager.get_session(session_id)
|
| 225 |
+
if state is None:
|
| 226 |
+
logger.error("prompt: session %s not found", session_id)
|
| 227 |
+
return PromptResponse(stop_reason="refusal")
|
| 228 |
+
|
| 229 |
+
user_text = _extract_text(prompt).strip()
|
| 230 |
+
if not user_text:
|
| 231 |
+
return PromptResponse(stop_reason="end_turn")
|
| 232 |
+
|
| 233 |
+
# Intercept slash commands — handle locally without calling the LLM
|
| 234 |
+
if user_text.startswith("/"):
|
| 235 |
+
response_text = self._handle_slash_command(user_text, state)
|
| 236 |
+
if response_text is not None:
|
| 237 |
+
if self._conn:
|
| 238 |
+
update = acp.update_agent_message_text(response_text)
|
| 239 |
+
await self._conn.session_update(session_id, update)
|
| 240 |
+
return PromptResponse(stop_reason="end_turn")
|
| 241 |
+
|
| 242 |
+
logger.info("Prompt on session %s: %s", session_id, user_text[:100])
|
| 243 |
+
|
| 244 |
+
conn = self._conn
|
| 245 |
+
loop = asyncio.get_running_loop()
|
| 246 |
+
|
| 247 |
+
if state.cancel_event:
|
| 248 |
+
state.cancel_event.clear()
|
| 249 |
+
|
| 250 |
+
tool_call_ids: dict[str, Deque[str]] = defaultdict(deque)
|
| 251 |
+
previous_approval_cb = None
|
| 252 |
+
|
| 253 |
+
if conn:
|
| 254 |
+
tool_progress_cb = make_tool_progress_cb(conn, session_id, loop, tool_call_ids)
|
| 255 |
+
thinking_cb = make_thinking_cb(conn, session_id, loop)
|
| 256 |
+
step_cb = make_step_cb(conn, session_id, loop, tool_call_ids)
|
| 257 |
+
message_cb = make_message_cb(conn, session_id, loop)
|
| 258 |
+
approval_cb = make_approval_callback(conn.request_permission, loop, session_id)
|
| 259 |
+
else:
|
| 260 |
+
tool_progress_cb = None
|
| 261 |
+
thinking_cb = None
|
| 262 |
+
step_cb = None
|
| 263 |
+
message_cb = None
|
| 264 |
+
approval_cb = None
|
| 265 |
+
|
| 266 |
+
agent = state.agent
|
| 267 |
+
agent.tool_progress_callback = tool_progress_cb
|
| 268 |
+
agent.thinking_callback = thinking_cb
|
| 269 |
+
agent.step_callback = step_cb
|
| 270 |
+
agent.message_callback = message_cb
|
| 271 |
+
|
| 272 |
+
if approval_cb:
|
| 273 |
+
try:
|
| 274 |
+
from tools import terminal_tool as _terminal_tool
|
| 275 |
+
previous_approval_cb = getattr(_terminal_tool, "_approval_callback", None)
|
| 276 |
+
_terminal_tool.set_approval_callback(approval_cb)
|
| 277 |
+
except Exception:
|
| 278 |
+
logger.debug("Could not set ACP approval callback", exc_info=True)
|
| 279 |
+
|
| 280 |
+
def _run_agent() -> dict:
|
| 281 |
+
try:
|
| 282 |
+
result = agent.run_conversation(
|
| 283 |
+
user_message=user_text,
|
| 284 |
+
conversation_history=state.history,
|
| 285 |
+
task_id=session_id,
|
| 286 |
+
)
|
| 287 |
+
return result
|
| 288 |
+
except Exception as e:
|
| 289 |
+
logger.exception("Agent error in session %s", session_id)
|
| 290 |
+
return {"final_response": f"Error: {e}", "messages": state.history}
|
| 291 |
+
finally:
|
| 292 |
+
if approval_cb:
|
| 293 |
+
try:
|
| 294 |
+
from tools import terminal_tool as _terminal_tool
|
| 295 |
+
_terminal_tool.set_approval_callback(previous_approval_cb)
|
| 296 |
+
except Exception:
|
| 297 |
+
logger.debug("Could not restore approval callback", exc_info=True)
|
| 298 |
+
|
| 299 |
+
try:
|
| 300 |
+
result = await loop.run_in_executor(_executor, _run_agent)
|
| 301 |
+
except Exception:
|
| 302 |
+
logger.exception("Executor error for session %s", session_id)
|
| 303 |
+
return PromptResponse(stop_reason="end_turn")
|
| 304 |
+
|
| 305 |
+
if result.get("messages"):
|
| 306 |
+
state.history = result["messages"]
|
| 307 |
+
# Persist updated history so sessions survive process restarts.
|
| 308 |
+
self.session_manager.save_session(session_id)
|
| 309 |
+
|
| 310 |
+
final_response = result.get("final_response", "")
|
| 311 |
+
if final_response and conn:
|
| 312 |
+
update = acp.update_agent_message_text(final_response)
|
| 313 |
+
await conn.session_update(session_id, update)
|
| 314 |
+
|
| 315 |
+
usage = None
|
| 316 |
+
usage_data = result.get("usage")
|
| 317 |
+
if usage_data and isinstance(usage_data, dict):
|
| 318 |
+
usage = Usage(
|
| 319 |
+
input_tokens=usage_data.get("prompt_tokens", 0),
|
| 320 |
+
output_tokens=usage_data.get("completion_tokens", 0),
|
| 321 |
+
total_tokens=usage_data.get("total_tokens", 0),
|
| 322 |
+
thought_tokens=usage_data.get("reasoning_tokens"),
|
| 323 |
+
cached_read_tokens=usage_data.get("cached_tokens"),
|
| 324 |
+
)
|
| 325 |
+
|
| 326 |
+
stop_reason = "cancelled" if state.cancel_event and state.cancel_event.is_set() else "end_turn"
|
| 327 |
+
return PromptResponse(stop_reason=stop_reason, usage=usage)
|
| 328 |
+
|
| 329 |
+
# ---- Slash commands (headless) -------------------------------------------
|
| 330 |
+
|
| 331 |
+
_SLASH_COMMANDS = {
|
| 332 |
+
"help": "Show available commands",
|
| 333 |
+
"model": "Show or change current model",
|
| 334 |
+
"tools": "List available tools",
|
| 335 |
+
"context": "Show conversation context info",
|
| 336 |
+
"reset": "Clear conversation history",
|
| 337 |
+
"compact": "Compress conversation context",
|
| 338 |
+
"version": "Show Hermes version",
|
| 339 |
+
}
|
| 340 |
+
|
| 341 |
+
def _handle_slash_command(self, text: str, state: SessionState) -> str | None:
|
| 342 |
+
"""Dispatch a slash command and return the response text.
|
| 343 |
+
|
| 344 |
+
Returns ``None`` for unrecognized commands so they fall through
|
| 345 |
+
to the LLM (the user may have typed ``/something`` as prose).
|
| 346 |
+
"""
|
| 347 |
+
parts = text.split(maxsplit=1)
|
| 348 |
+
cmd = parts[0].lstrip("/").lower()
|
| 349 |
+
args = parts[1].strip() if len(parts) > 1 else ""
|
| 350 |
+
|
| 351 |
+
handler = {
|
| 352 |
+
"help": self._cmd_help,
|
| 353 |
+
"model": self._cmd_model,
|
| 354 |
+
"tools": self._cmd_tools,
|
| 355 |
+
"context": self._cmd_context,
|
| 356 |
+
"reset": self._cmd_reset,
|
| 357 |
+
"compact": self._cmd_compact,
|
| 358 |
+
"version": self._cmd_version,
|
| 359 |
+
}.get(cmd)
|
| 360 |
+
|
| 361 |
+
if handler is None:
|
| 362 |
+
return None # not a known command — let the LLM handle it
|
| 363 |
+
|
| 364 |
+
try:
|
| 365 |
+
return handler(args, state)
|
| 366 |
+
except Exception as e:
|
| 367 |
+
logger.error("Slash command /%s error: %s", cmd, e, exc_info=True)
|
| 368 |
+
return f"Error executing /{cmd}: {e}"
|
| 369 |
+
|
| 370 |
+
def _cmd_help(self, args: str, state: SessionState) -> str:
|
| 371 |
+
lines = ["Available commands:", ""]
|
| 372 |
+
for cmd, desc in self._SLASH_COMMANDS.items():
|
| 373 |
+
lines.append(f" /{cmd:10s} {desc}")
|
| 374 |
+
lines.append("")
|
| 375 |
+
lines.append("Unrecognized /commands are sent to the model as normal messages.")
|
| 376 |
+
return "\n".join(lines)
|
| 377 |
+
|
| 378 |
+
def _cmd_model(self, args: str, state: SessionState) -> str:
|
| 379 |
+
if not args:
|
| 380 |
+
model = state.model or getattr(state.agent, "model", "unknown")
|
| 381 |
+
provider = getattr(state.agent, "provider", None) or "auto"
|
| 382 |
+
return f"Current model: {model}\nProvider: {provider}"
|
| 383 |
+
|
| 384 |
+
new_model = args.strip()
|
| 385 |
+
target_provider = None
|
| 386 |
+
current_provider = getattr(state.agent, "provider", None) or "openrouter"
|
| 387 |
+
|
| 388 |
+
# Auto-detect provider for the requested model
|
| 389 |
+
try:
|
| 390 |
+
from hermes_cli.models import parse_model_input, detect_provider_for_model
|
| 391 |
+
target_provider, new_model = parse_model_input(new_model, current_provider)
|
| 392 |
+
if target_provider == current_provider:
|
| 393 |
+
detected = detect_provider_for_model(new_model, current_provider)
|
| 394 |
+
if detected:
|
| 395 |
+
target_provider, new_model = detected
|
| 396 |
+
except Exception:
|
| 397 |
+
logger.debug("Provider detection failed, using model as-is", exc_info=True)
|
| 398 |
+
|
| 399 |
+
state.model = new_model
|
| 400 |
+
state.agent = self.session_manager._make_agent(
|
| 401 |
+
session_id=state.session_id,
|
| 402 |
+
cwd=state.cwd,
|
| 403 |
+
model=new_model,
|
| 404 |
+
requested_provider=target_provider or current_provider,
|
| 405 |
+
)
|
| 406 |
+
self.session_manager.save_session(state.session_id)
|
| 407 |
+
provider_label = getattr(state.agent, "provider", None) or target_provider or current_provider
|
| 408 |
+
logger.info("Session %s: model switched to %s", state.session_id, new_model)
|
| 409 |
+
return f"Model switched to: {new_model}\nProvider: {provider_label}"
|
| 410 |
+
|
| 411 |
+
def _cmd_tools(self, args: str, state: SessionState) -> str:
|
| 412 |
+
try:
|
| 413 |
+
from model_tools import get_tool_definitions
|
| 414 |
+
toolsets = getattr(state.agent, "enabled_toolsets", None) or ["hermes-acp"]
|
| 415 |
+
tools = get_tool_definitions(enabled_toolsets=toolsets, quiet_mode=True)
|
| 416 |
+
if not tools:
|
| 417 |
+
return "No tools available."
|
| 418 |
+
lines = [f"Available tools ({len(tools)}):"]
|
| 419 |
+
for t in tools:
|
| 420 |
+
name = t.get("function", {}).get("name", "?")
|
| 421 |
+
desc = t.get("function", {}).get("description", "")
|
| 422 |
+
# Truncate long descriptions
|
| 423 |
+
if len(desc) > 80:
|
| 424 |
+
desc = desc[:77] + "..."
|
| 425 |
+
lines.append(f" {name}: {desc}")
|
| 426 |
+
return "\n".join(lines)
|
| 427 |
+
except Exception as e:
|
| 428 |
+
return f"Could not list tools: {e}"
|
| 429 |
+
|
| 430 |
+
def _cmd_context(self, args: str, state: SessionState) -> str:
|
| 431 |
+
n_messages = len(state.history)
|
| 432 |
+
if n_messages == 0:
|
| 433 |
+
return "Conversation is empty (no messages yet)."
|
| 434 |
+
# Count by role
|
| 435 |
+
roles: dict[str, int] = {}
|
| 436 |
+
for msg in state.history:
|
| 437 |
+
role = msg.get("role", "unknown")
|
| 438 |
+
roles[role] = roles.get(role, 0) + 1
|
| 439 |
+
lines = [
|
| 440 |
+
f"Conversation: {n_messages} messages",
|
| 441 |
+
f" user: {roles.get('user', 0)}, assistant: {roles.get('assistant', 0)}, "
|
| 442 |
+
f"tool: {roles.get('tool', 0)}, system: {roles.get('system', 0)}",
|
| 443 |
+
]
|
| 444 |
+
model = state.model or getattr(state.agent, "model", "")
|
| 445 |
+
if model:
|
| 446 |
+
lines.append(f"Model: {model}")
|
| 447 |
+
return "\n".join(lines)
|
| 448 |
+
|
| 449 |
+
def _cmd_reset(self, args: str, state: SessionState) -> str:
|
| 450 |
+
state.history.clear()
|
| 451 |
+
self.session_manager.save_session(state.session_id)
|
| 452 |
+
return "Conversation history cleared."
|
| 453 |
+
|
| 454 |
+
def _cmd_compact(self, args: str, state: SessionState) -> str:
|
| 455 |
+
if not state.history:
|
| 456 |
+
return "Nothing to compress — conversation is empty."
|
| 457 |
+
try:
|
| 458 |
+
agent = state.agent
|
| 459 |
+
if hasattr(agent, "compress_context"):
|
| 460 |
+
agent.compress_context(state.history)
|
| 461 |
+
self.session_manager.save_session(state.session_id)
|
| 462 |
+
return f"Context compressed. Messages: {len(state.history)}"
|
| 463 |
+
return "Context compression not available for this agent."
|
| 464 |
+
except Exception as e:
|
| 465 |
+
return f"Compression failed: {e}"
|
| 466 |
+
|
| 467 |
+
def _cmd_version(self, args: str, state: SessionState) -> str:
|
| 468 |
+
return f"Hermes Agent v{HERMES_VERSION}"
|
| 469 |
+
|
| 470 |
+
# ---- Model switching (ACP protocol method) -------------------------------
|
| 471 |
+
|
| 472 |
+
async def set_session_model(
|
| 473 |
+
self, model_id: str, session_id: str, **kwargs: Any
|
| 474 |
+
):
|
| 475 |
+
"""Switch the model for a session (called by ACP protocol)."""
|
| 476 |
+
state = self.session_manager.get_session(session_id)
|
| 477 |
+
if state:
|
| 478 |
+
state.model = model_id
|
| 479 |
+
current_provider = getattr(state.agent, "provider", None)
|
| 480 |
+
current_base_url = getattr(state.agent, "base_url", None)
|
| 481 |
+
current_api_mode = getattr(state.agent, "api_mode", None)
|
| 482 |
+
state.agent = self.session_manager._make_agent(
|
| 483 |
+
session_id=session_id,
|
| 484 |
+
cwd=state.cwd,
|
| 485 |
+
model=model_id,
|
| 486 |
+
requested_provider=current_provider,
|
| 487 |
+
base_url=current_base_url,
|
| 488 |
+
api_mode=current_api_mode,
|
| 489 |
+
)
|
| 490 |
+
self.session_manager.save_session(session_id)
|
| 491 |
+
logger.info("Session %s: model switched to %s", session_id, model_id)
|
| 492 |
+
return None
|
acp_adapter/session.py
ADDED
|
@@ -0,0 +1,459 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""ACP session manager — maps ACP sessions to Hermes AIAgent instances.
|
| 2 |
+
|
| 3 |
+
Sessions are persisted to the shared SessionDB (``~/.hermes/state.db``) so they
|
| 4 |
+
survive process restarts and appear in ``session_search``. When the editor
|
| 5 |
+
reconnects after idle/restart, the ``load_session`` / ``resume_session`` calls
|
| 6 |
+
find the persisted session in the database and restore the full conversation
|
| 7 |
+
history.
|
| 8 |
+
"""
|
| 9 |
+
from __future__ import annotations
|
| 10 |
+
|
| 11 |
+
import copy
|
| 12 |
+
import json
|
| 13 |
+
import logging
|
| 14 |
+
import uuid
|
| 15 |
+
from dataclasses import dataclass, field
|
| 16 |
+
from threading import Lock
|
| 17 |
+
from typing import Any, Dict, List, Optional
|
| 18 |
+
|
| 19 |
+
logger = logging.getLogger(__name__)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def _register_task_cwd(task_id: str, cwd: str) -> None:
|
| 23 |
+
"""Bind a task/session id to the editor's working directory for tools."""
|
| 24 |
+
if not task_id:
|
| 25 |
+
return
|
| 26 |
+
try:
|
| 27 |
+
from tools.terminal_tool import register_task_env_overrides
|
| 28 |
+
register_task_env_overrides(task_id, {"cwd": cwd})
|
| 29 |
+
except Exception:
|
| 30 |
+
logger.debug("Failed to register ACP task cwd override", exc_info=True)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def _clear_task_cwd(task_id: str) -> None:
|
| 34 |
+
"""Remove task-specific cwd overrides for an ACP session."""
|
| 35 |
+
if not task_id:
|
| 36 |
+
return
|
| 37 |
+
try:
|
| 38 |
+
from tools.terminal_tool import clear_task_env_overrides
|
| 39 |
+
clear_task_env_overrides(task_id)
|
| 40 |
+
except Exception:
|
| 41 |
+
logger.debug("Failed to clear ACP task cwd override", exc_info=True)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
@dataclass
|
| 45 |
+
class SessionState:
|
| 46 |
+
"""Tracks per-session state for an ACP-managed Hermes agent."""
|
| 47 |
+
|
| 48 |
+
session_id: str
|
| 49 |
+
agent: Any # AIAgent instance
|
| 50 |
+
cwd: str = "."
|
| 51 |
+
model: str = ""
|
| 52 |
+
history: List[Dict[str, Any]] = field(default_factory=list)
|
| 53 |
+
cancel_event: Any = None # threading.Event
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
class SessionManager:
|
| 57 |
+
"""Thread-safe manager for ACP sessions backed by Hermes AIAgent instances.
|
| 58 |
+
|
| 59 |
+
Sessions are held in-memory for fast access **and** persisted to the
|
| 60 |
+
shared SessionDB so they survive process restarts and are searchable
|
| 61 |
+
via ``session_search``.
|
| 62 |
+
"""
|
| 63 |
+
|
| 64 |
+
def __init__(self, agent_factory=None, db=None):
|
| 65 |
+
"""
|
| 66 |
+
Args:
|
| 67 |
+
agent_factory: Optional callable that creates an AIAgent-like object.
|
| 68 |
+
Used by tests. When omitted, a real AIAgent is created
|
| 69 |
+
using the current Hermes runtime provider configuration.
|
| 70 |
+
db: Optional SessionDB instance. When omitted, the default
|
| 71 |
+
SessionDB (``~/.hermes/state.db``) is lazily created.
|
| 72 |
+
"""
|
| 73 |
+
self._sessions: Dict[str, SessionState] = {}
|
| 74 |
+
self._lock = Lock()
|
| 75 |
+
self._agent_factory = agent_factory
|
| 76 |
+
self._db_instance = db # None → lazy-init on first use
|
| 77 |
+
|
| 78 |
+
# ---- public API ---------------------------------------------------------
|
| 79 |
+
|
| 80 |
+
def create_session(self, cwd: str = ".") -> SessionState:
|
| 81 |
+
"""Create a new session with a unique ID and a fresh AIAgent."""
|
| 82 |
+
import threading
|
| 83 |
+
|
| 84 |
+
session_id = str(uuid.uuid4())
|
| 85 |
+
agent = self._make_agent(session_id=session_id, cwd=cwd)
|
| 86 |
+
state = SessionState(
|
| 87 |
+
session_id=session_id,
|
| 88 |
+
agent=agent,
|
| 89 |
+
cwd=cwd,
|
| 90 |
+
model=getattr(agent, "model", "") or "",
|
| 91 |
+
cancel_event=threading.Event(),
|
| 92 |
+
)
|
| 93 |
+
with self._lock:
|
| 94 |
+
self._sessions[session_id] = state
|
| 95 |
+
_register_task_cwd(session_id, cwd)
|
| 96 |
+
self._persist(state)
|
| 97 |
+
logger.info("Created ACP session %s (cwd=%s)", session_id, cwd)
|
| 98 |
+
return state
|
| 99 |
+
|
| 100 |
+
def get_session(self, session_id: str) -> Optional[SessionState]:
|
| 101 |
+
"""Return the session for *session_id*, or ``None``.
|
| 102 |
+
|
| 103 |
+
If the session is not in memory but exists in the database (e.g. after
|
| 104 |
+
a process restart), it is transparently restored.
|
| 105 |
+
"""
|
| 106 |
+
with self._lock:
|
| 107 |
+
state = self._sessions.get(session_id)
|
| 108 |
+
if state is not None:
|
| 109 |
+
return state
|
| 110 |
+
# Attempt to restore from database.
|
| 111 |
+
return self._restore(session_id)
|
| 112 |
+
|
| 113 |
+
def remove_session(self, session_id: str) -> bool:
|
| 114 |
+
"""Remove a session from memory and database. Returns True if it existed."""
|
| 115 |
+
with self._lock:
|
| 116 |
+
existed = self._sessions.pop(session_id, None) is not None
|
| 117 |
+
db_existed = self._delete_persisted(session_id)
|
| 118 |
+
if existed or db_existed:
|
| 119 |
+
_clear_task_cwd(session_id)
|
| 120 |
+
return existed or db_existed
|
| 121 |
+
|
| 122 |
+
def fork_session(self, session_id: str, cwd: str = ".") -> Optional[SessionState]:
|
| 123 |
+
"""Deep-copy a session's history into a new session."""
|
| 124 |
+
import threading
|
| 125 |
+
|
| 126 |
+
original = self.get_session(session_id) # checks DB too
|
| 127 |
+
if original is None:
|
| 128 |
+
return None
|
| 129 |
+
|
| 130 |
+
new_id = str(uuid.uuid4())
|
| 131 |
+
agent = self._make_agent(
|
| 132 |
+
session_id=new_id,
|
| 133 |
+
cwd=cwd,
|
| 134 |
+
model=original.model or None,
|
| 135 |
+
)
|
| 136 |
+
state = SessionState(
|
| 137 |
+
session_id=new_id,
|
| 138 |
+
agent=agent,
|
| 139 |
+
cwd=cwd,
|
| 140 |
+
model=getattr(agent, "model", original.model) or original.model,
|
| 141 |
+
history=copy.deepcopy(original.history),
|
| 142 |
+
cancel_event=threading.Event(),
|
| 143 |
+
)
|
| 144 |
+
with self._lock:
|
| 145 |
+
self._sessions[new_id] = state
|
| 146 |
+
_register_task_cwd(new_id, cwd)
|
| 147 |
+
self._persist(state)
|
| 148 |
+
logger.info("Forked ACP session %s -> %s", session_id, new_id)
|
| 149 |
+
return state
|
| 150 |
+
|
| 151 |
+
def list_sessions(self) -> List[Dict[str, Any]]:
|
| 152 |
+
"""Return lightweight info dicts for all sessions (memory + database)."""
|
| 153 |
+
# Collect in-memory sessions first.
|
| 154 |
+
with self._lock:
|
| 155 |
+
seen_ids = set(self._sessions.keys())
|
| 156 |
+
results = [
|
| 157 |
+
{
|
| 158 |
+
"session_id": s.session_id,
|
| 159 |
+
"cwd": s.cwd,
|
| 160 |
+
"model": s.model,
|
| 161 |
+
"history_len": len(s.history),
|
| 162 |
+
}
|
| 163 |
+
for s in self._sessions.values()
|
| 164 |
+
]
|
| 165 |
+
|
| 166 |
+
# Merge any persisted sessions not currently in memory.
|
| 167 |
+
db = self._get_db()
|
| 168 |
+
if db is not None:
|
| 169 |
+
try:
|
| 170 |
+
rows = db.search_sessions(source="acp", limit=1000)
|
| 171 |
+
for row in rows:
|
| 172 |
+
sid = row["id"]
|
| 173 |
+
if sid in seen_ids:
|
| 174 |
+
continue
|
| 175 |
+
# Extract cwd from model_config JSON.
|
| 176 |
+
cwd = "."
|
| 177 |
+
mc = row.get("model_config")
|
| 178 |
+
if mc:
|
| 179 |
+
try:
|
| 180 |
+
cwd = json.loads(mc).get("cwd", ".")
|
| 181 |
+
except (json.JSONDecodeError, TypeError):
|
| 182 |
+
pass
|
| 183 |
+
results.append({
|
| 184 |
+
"session_id": sid,
|
| 185 |
+
"cwd": cwd,
|
| 186 |
+
"model": row.get("model") or "",
|
| 187 |
+
"history_len": row.get("message_count") or 0,
|
| 188 |
+
})
|
| 189 |
+
except Exception:
|
| 190 |
+
logger.debug("Failed to list ACP sessions from DB", exc_info=True)
|
| 191 |
+
|
| 192 |
+
return results
|
| 193 |
+
|
| 194 |
+
def update_cwd(self, session_id: str, cwd: str) -> Optional[SessionState]:
|
| 195 |
+
"""Update the working directory for a session and its tool overrides."""
|
| 196 |
+
state = self.get_session(session_id) # checks DB too
|
| 197 |
+
if state is None:
|
| 198 |
+
return None
|
| 199 |
+
state.cwd = cwd
|
| 200 |
+
_register_task_cwd(session_id, cwd)
|
| 201 |
+
self._persist(state)
|
| 202 |
+
return state
|
| 203 |
+
|
| 204 |
+
def cleanup(self) -> None:
|
| 205 |
+
"""Remove all sessions (memory and database) and clear task-specific cwd overrides."""
|
| 206 |
+
with self._lock:
|
| 207 |
+
session_ids = list(self._sessions.keys())
|
| 208 |
+
self._sessions.clear()
|
| 209 |
+
for session_id in session_ids:
|
| 210 |
+
_clear_task_cwd(session_id)
|
| 211 |
+
self._delete_persisted(session_id)
|
| 212 |
+
# Also remove any DB-only ACP sessions not currently in memory.
|
| 213 |
+
db = self._get_db()
|
| 214 |
+
if db is not None:
|
| 215 |
+
try:
|
| 216 |
+
rows = db.search_sessions(source="acp", limit=10000)
|
| 217 |
+
for row in rows:
|
| 218 |
+
sid = row["id"]
|
| 219 |
+
_clear_task_cwd(sid)
|
| 220 |
+
db.delete_session(sid)
|
| 221 |
+
except Exception:
|
| 222 |
+
logger.debug("Failed to cleanup ACP sessions from DB", exc_info=True)
|
| 223 |
+
|
| 224 |
+
def save_session(self, session_id: str) -> None:
|
| 225 |
+
"""Persist the current state of a session to the database.
|
| 226 |
+
|
| 227 |
+
Called by the server after prompt completion, slash commands that
|
| 228 |
+
mutate history, and model switches.
|
| 229 |
+
"""
|
| 230 |
+
with self._lock:
|
| 231 |
+
state = self._sessions.get(session_id)
|
| 232 |
+
if state is not None:
|
| 233 |
+
self._persist(state)
|
| 234 |
+
|
| 235 |
+
# ---- persistence via SessionDB ------------------------------------------
|
| 236 |
+
|
| 237 |
+
def _get_db(self):
|
| 238 |
+
"""Lazily initialise and return the SessionDB instance.
|
| 239 |
+
|
| 240 |
+
Returns ``None`` if the DB is unavailable (e.g. import error in a
|
| 241 |
+
minimal test environment).
|
| 242 |
+
|
| 243 |
+
Note: we resolve ``HERMES_HOME`` dynamically rather than relying on
|
| 244 |
+
the module-level ``DEFAULT_DB_PATH`` constant, because that constant
|
| 245 |
+
is evaluated at import time and won't reflect env-var changes made
|
| 246 |
+
later (e.g. by the test fixture ``_isolate_hermes_home``).
|
| 247 |
+
"""
|
| 248 |
+
if self._db_instance is not None:
|
| 249 |
+
return self._db_instance
|
| 250 |
+
try:
|
| 251 |
+
import os
|
| 252 |
+
from pathlib import Path
|
| 253 |
+
from hermes_state import SessionDB
|
| 254 |
+
hermes_home = Path(os.getenv("HERMES_HOME", Path.home() / ".hermes"))
|
| 255 |
+
self._db_instance = SessionDB(db_path=hermes_home / "state.db")
|
| 256 |
+
return self._db_instance
|
| 257 |
+
except Exception:
|
| 258 |
+
logger.debug("SessionDB unavailable for ACP persistence", exc_info=True)
|
| 259 |
+
return None
|
| 260 |
+
|
| 261 |
+
def _persist(self, state: SessionState) -> None:
|
| 262 |
+
"""Write session state to the database.
|
| 263 |
+
|
| 264 |
+
Creates the session record if it doesn't exist, then replaces all
|
| 265 |
+
stored messages with the current in-memory history.
|
| 266 |
+
"""
|
| 267 |
+
db = self._get_db()
|
| 268 |
+
if db is None:
|
| 269 |
+
return
|
| 270 |
+
|
| 271 |
+
# Ensure model is a plain string (not a MagicMock or other proxy).
|
| 272 |
+
model_str = str(state.model) if state.model else None
|
| 273 |
+
session_meta = {"cwd": state.cwd}
|
| 274 |
+
provider = getattr(state.agent, "provider", None)
|
| 275 |
+
base_url = getattr(state.agent, "base_url", None)
|
| 276 |
+
api_mode = getattr(state.agent, "api_mode", None)
|
| 277 |
+
if isinstance(provider, str) and provider.strip():
|
| 278 |
+
session_meta["provider"] = provider.strip()
|
| 279 |
+
if isinstance(base_url, str) and base_url.strip():
|
| 280 |
+
session_meta["base_url"] = base_url.strip()
|
| 281 |
+
if isinstance(api_mode, str) and api_mode.strip():
|
| 282 |
+
session_meta["api_mode"] = api_mode.strip()
|
| 283 |
+
cwd_json = json.dumps(session_meta)
|
| 284 |
+
|
| 285 |
+
try:
|
| 286 |
+
# Ensure the session record exists.
|
| 287 |
+
existing = db.get_session(state.session_id)
|
| 288 |
+
if existing is None:
|
| 289 |
+
db.create_session(
|
| 290 |
+
session_id=state.session_id,
|
| 291 |
+
source="acp",
|
| 292 |
+
model=model_str,
|
| 293 |
+
model_config={"cwd": state.cwd},
|
| 294 |
+
)
|
| 295 |
+
else:
|
| 296 |
+
# Update model_config (contains cwd) if changed.
|
| 297 |
+
try:
|
| 298 |
+
with db._lock:
|
| 299 |
+
db._conn.execute(
|
| 300 |
+
"UPDATE sessions SET model_config = ?, model = COALESCE(?, model) WHERE id = ?",
|
| 301 |
+
(cwd_json, model_str, state.session_id),
|
| 302 |
+
)
|
| 303 |
+
db._conn.commit()
|
| 304 |
+
except Exception:
|
| 305 |
+
logger.debug("Failed to update ACP session metadata", exc_info=True)
|
| 306 |
+
|
| 307 |
+
# Replace stored messages with current history.
|
| 308 |
+
db.clear_messages(state.session_id)
|
| 309 |
+
for msg in state.history:
|
| 310 |
+
db.append_message(
|
| 311 |
+
session_id=state.session_id,
|
| 312 |
+
role=msg.get("role", "user"),
|
| 313 |
+
content=msg.get("content"),
|
| 314 |
+
tool_name=msg.get("tool_name") or msg.get("name"),
|
| 315 |
+
tool_calls=msg.get("tool_calls"),
|
| 316 |
+
tool_call_id=msg.get("tool_call_id"),
|
| 317 |
+
)
|
| 318 |
+
except Exception:
|
| 319 |
+
logger.warning("Failed to persist ACP session %s", state.session_id, exc_info=True)
|
| 320 |
+
|
| 321 |
+
def _restore(self, session_id: str) -> Optional[SessionState]:
|
| 322 |
+
"""Load a session from the database into memory, recreating the AIAgent."""
|
| 323 |
+
import threading
|
| 324 |
+
|
| 325 |
+
db = self._get_db()
|
| 326 |
+
if db is None:
|
| 327 |
+
return None
|
| 328 |
+
|
| 329 |
+
try:
|
| 330 |
+
row = db.get_session(session_id)
|
| 331 |
+
except Exception:
|
| 332 |
+
logger.debug("Failed to query DB for ACP session %s", session_id, exc_info=True)
|
| 333 |
+
return None
|
| 334 |
+
|
| 335 |
+
if row is None:
|
| 336 |
+
return None
|
| 337 |
+
|
| 338 |
+
# Only restore ACP sessions.
|
| 339 |
+
if row.get("source") != "acp":
|
| 340 |
+
return None
|
| 341 |
+
|
| 342 |
+
# Extract cwd from model_config.
|
| 343 |
+
cwd = "."
|
| 344 |
+
requested_provider = row.get("billing_provider")
|
| 345 |
+
restored_base_url = row.get("billing_base_url")
|
| 346 |
+
restored_api_mode = None
|
| 347 |
+
mc = row.get("model_config")
|
| 348 |
+
if mc:
|
| 349 |
+
try:
|
| 350 |
+
meta = json.loads(mc)
|
| 351 |
+
if isinstance(meta, dict):
|
| 352 |
+
cwd = meta.get("cwd", ".")
|
| 353 |
+
requested_provider = meta.get("provider") or requested_provider
|
| 354 |
+
restored_base_url = meta.get("base_url") or restored_base_url
|
| 355 |
+
restored_api_mode = meta.get("api_mode") or restored_api_mode
|
| 356 |
+
except (json.JSONDecodeError, TypeError):
|
| 357 |
+
pass
|
| 358 |
+
|
| 359 |
+
model = row.get("model") or None
|
| 360 |
+
|
| 361 |
+
# Load conversation history.
|
| 362 |
+
try:
|
| 363 |
+
history = db.get_messages_as_conversation(session_id)
|
| 364 |
+
except Exception:
|
| 365 |
+
logger.warning("Failed to load messages for ACP session %s", session_id, exc_info=True)
|
| 366 |
+
history = []
|
| 367 |
+
|
| 368 |
+
try:
|
| 369 |
+
agent = self._make_agent(
|
| 370 |
+
session_id=session_id,
|
| 371 |
+
cwd=cwd,
|
| 372 |
+
model=model,
|
| 373 |
+
requested_provider=requested_provider,
|
| 374 |
+
base_url=restored_base_url,
|
| 375 |
+
api_mode=restored_api_mode,
|
| 376 |
+
)
|
| 377 |
+
except Exception:
|
| 378 |
+
logger.warning("Failed to recreate agent for ACP session %s", session_id, exc_info=True)
|
| 379 |
+
return None
|
| 380 |
+
|
| 381 |
+
state = SessionState(
|
| 382 |
+
session_id=session_id,
|
| 383 |
+
agent=agent,
|
| 384 |
+
cwd=cwd,
|
| 385 |
+
model=model or getattr(agent, "model", "") or "",
|
| 386 |
+
history=history,
|
| 387 |
+
cancel_event=threading.Event(),
|
| 388 |
+
)
|
| 389 |
+
with self._lock:
|
| 390 |
+
self._sessions[session_id] = state
|
| 391 |
+
_register_task_cwd(session_id, cwd)
|
| 392 |
+
logger.info("Restored ACP session %s from DB (%d messages)", session_id, len(history))
|
| 393 |
+
return state
|
| 394 |
+
|
| 395 |
+
def _delete_persisted(self, session_id: str) -> bool:
|
| 396 |
+
"""Delete a session from the database. Returns True if it existed."""
|
| 397 |
+
db = self._get_db()
|
| 398 |
+
if db is None:
|
| 399 |
+
return False
|
| 400 |
+
try:
|
| 401 |
+
return db.delete_session(session_id)
|
| 402 |
+
except Exception:
|
| 403 |
+
logger.debug("Failed to delete ACP session %s from DB", session_id, exc_info=True)
|
| 404 |
+
return False
|
| 405 |
+
|
| 406 |
+
# ---- internal -----------------------------------------------------------
|
| 407 |
+
|
| 408 |
+
def _make_agent(
|
| 409 |
+
self,
|
| 410 |
+
*,
|
| 411 |
+
session_id: str,
|
| 412 |
+
cwd: str,
|
| 413 |
+
model: str | None = None,
|
| 414 |
+
requested_provider: str | None = None,
|
| 415 |
+
base_url: str | None = None,
|
| 416 |
+
api_mode: str | None = None,
|
| 417 |
+
):
|
| 418 |
+
if self._agent_factory is not None:
|
| 419 |
+
return self._agent_factory()
|
| 420 |
+
|
| 421 |
+
from run_agent import AIAgent
|
| 422 |
+
from hermes_cli.config import load_config
|
| 423 |
+
from hermes_cli.runtime_provider import resolve_runtime_provider
|
| 424 |
+
|
| 425 |
+
config = load_config()
|
| 426 |
+
model_cfg = config.get("model")
|
| 427 |
+
default_model = "anthropic/claude-opus-4.6"
|
| 428 |
+
config_provider = None
|
| 429 |
+
if isinstance(model_cfg, dict):
|
| 430 |
+
default_model = str(model_cfg.get("default") or default_model)
|
| 431 |
+
config_provider = model_cfg.get("provider")
|
| 432 |
+
elif isinstance(model_cfg, str) and model_cfg.strip():
|
| 433 |
+
default_model = model_cfg.strip()
|
| 434 |
+
|
| 435 |
+
kwargs = {
|
| 436 |
+
"platform": "acp",
|
| 437 |
+
"enabled_toolsets": ["hermes-acp"],
|
| 438 |
+
"quiet_mode": True,
|
| 439 |
+
"session_id": session_id,
|
| 440 |
+
"model": model or default_model,
|
| 441 |
+
}
|
| 442 |
+
|
| 443 |
+
try:
|
| 444 |
+
runtime = resolve_runtime_provider(requested=requested_provider or config_provider)
|
| 445 |
+
kwargs.update(
|
| 446 |
+
{
|
| 447 |
+
"provider": runtime.get("provider"),
|
| 448 |
+
"api_mode": api_mode or runtime.get("api_mode"),
|
| 449 |
+
"base_url": base_url or runtime.get("base_url"),
|
| 450 |
+
"api_key": runtime.get("api_key"),
|
| 451 |
+
"command": runtime.get("command"),
|
| 452 |
+
"args": list(runtime.get("args") or []),
|
| 453 |
+
}
|
| 454 |
+
)
|
| 455 |
+
except Exception:
|
| 456 |
+
logger.debug("ACP session falling back to default provider resolution", exc_info=True)
|
| 457 |
+
|
| 458 |
+
_register_task_cwd(session_id, cwd)
|
| 459 |
+
return AIAgent(**kwargs)
|
acp_adapter/tools.py
ADDED
|
@@ -0,0 +1,215 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""ACP tool-call helpers for mapping hermes tools to ACP ToolKind and building content."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import uuid
|
| 6 |
+
from typing import Any, Dict, List, Optional
|
| 7 |
+
|
| 8 |
+
import acp
|
| 9 |
+
from acp.schema import (
|
| 10 |
+
ToolCallLocation,
|
| 11 |
+
ToolCallStart,
|
| 12 |
+
ToolCallProgress,
|
| 13 |
+
ToolKind,
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
# ---------------------------------------------------------------------------
|
| 17 |
+
# Map hermes tool names -> ACP ToolKind
|
| 18 |
+
# ---------------------------------------------------------------------------
|
| 19 |
+
|
| 20 |
+
TOOL_KIND_MAP: Dict[str, ToolKind] = {
|
| 21 |
+
# File operations
|
| 22 |
+
"read_file": "read",
|
| 23 |
+
"write_file": "edit",
|
| 24 |
+
"patch": "edit",
|
| 25 |
+
"search_files": "search",
|
| 26 |
+
# Terminal / execution
|
| 27 |
+
"terminal": "execute",
|
| 28 |
+
"process": "execute",
|
| 29 |
+
"execute_code": "execute",
|
| 30 |
+
# Web / fetch
|
| 31 |
+
"web_search": "fetch",
|
| 32 |
+
"web_extract": "fetch",
|
| 33 |
+
# Browser
|
| 34 |
+
"browser_navigate": "fetch",
|
| 35 |
+
"browser_click": "execute",
|
| 36 |
+
"browser_type": "execute",
|
| 37 |
+
"browser_snapshot": "read",
|
| 38 |
+
"browser_vision": "read",
|
| 39 |
+
"browser_scroll": "execute",
|
| 40 |
+
"browser_press": "execute",
|
| 41 |
+
"browser_back": "execute",
|
| 42 |
+
"browser_close": "execute",
|
| 43 |
+
"browser_get_images": "read",
|
| 44 |
+
# Agent internals
|
| 45 |
+
"delegate_task": "execute",
|
| 46 |
+
"vision_analyze": "read",
|
| 47 |
+
"image_generate": "execute",
|
| 48 |
+
"text_to_speech": "execute",
|
| 49 |
+
# Thinking / meta
|
| 50 |
+
"_thinking": "think",
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def get_tool_kind(tool_name: str) -> ToolKind:
|
| 55 |
+
"""Return the ACP ToolKind for a hermes tool, defaulting to 'other'."""
|
| 56 |
+
return TOOL_KIND_MAP.get(tool_name, "other")
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def make_tool_call_id() -> str:
|
| 60 |
+
"""Generate a unique tool call ID."""
|
| 61 |
+
return f"tc-{uuid.uuid4().hex[:12]}"
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def build_tool_title(tool_name: str, args: Dict[str, Any]) -> str:
|
| 65 |
+
"""Build a human-readable title for a tool call."""
|
| 66 |
+
if tool_name == "terminal":
|
| 67 |
+
cmd = args.get("command", "")
|
| 68 |
+
if len(cmd) > 80:
|
| 69 |
+
cmd = cmd[:77] + "..."
|
| 70 |
+
return f"terminal: {cmd}"
|
| 71 |
+
if tool_name == "read_file":
|
| 72 |
+
return f"read: {args.get('path', '?')}"
|
| 73 |
+
if tool_name == "write_file":
|
| 74 |
+
return f"write: {args.get('path', '?')}"
|
| 75 |
+
if tool_name == "patch":
|
| 76 |
+
mode = args.get("mode", "replace")
|
| 77 |
+
path = args.get("path", "?")
|
| 78 |
+
return f"patch ({mode}): {path}"
|
| 79 |
+
if tool_name == "search_files":
|
| 80 |
+
return f"search: {args.get('pattern', '?')}"
|
| 81 |
+
if tool_name == "web_search":
|
| 82 |
+
return f"web search: {args.get('query', '?')}"
|
| 83 |
+
if tool_name == "web_extract":
|
| 84 |
+
urls = args.get("urls", [])
|
| 85 |
+
if urls:
|
| 86 |
+
return f"extract: {urls[0]}" + (f" (+{len(urls)-1})" if len(urls) > 1 else "")
|
| 87 |
+
return "web extract"
|
| 88 |
+
if tool_name == "delegate_task":
|
| 89 |
+
goal = args.get("goal", "")
|
| 90 |
+
if goal and len(goal) > 60:
|
| 91 |
+
goal = goal[:57] + "..."
|
| 92 |
+
return f"delegate: {goal}" if goal else "delegate task"
|
| 93 |
+
if tool_name == "execute_code":
|
| 94 |
+
return "execute code"
|
| 95 |
+
if tool_name == "vision_analyze":
|
| 96 |
+
return f"analyze image: {args.get('question', '?')[:50]}"
|
| 97 |
+
return tool_name
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
# ---------------------------------------------------------------------------
|
| 101 |
+
# Build ACP content objects for tool-call events
|
| 102 |
+
# ---------------------------------------------------------------------------
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def build_tool_start(
|
| 106 |
+
tool_call_id: str,
|
| 107 |
+
tool_name: str,
|
| 108 |
+
arguments: Dict[str, Any],
|
| 109 |
+
) -> ToolCallStart:
|
| 110 |
+
"""Create a ToolCallStart event for the given hermes tool invocation."""
|
| 111 |
+
kind = get_tool_kind(tool_name)
|
| 112 |
+
title = build_tool_title(tool_name, arguments)
|
| 113 |
+
locations = extract_locations(arguments)
|
| 114 |
+
|
| 115 |
+
if tool_name == "patch":
|
| 116 |
+
mode = arguments.get("mode", "replace")
|
| 117 |
+
if mode == "replace":
|
| 118 |
+
path = arguments.get("path", "")
|
| 119 |
+
old = arguments.get("old_string", "")
|
| 120 |
+
new = arguments.get("new_string", "")
|
| 121 |
+
content = [acp.tool_diff_content(path=path, new_text=new, old_text=old)]
|
| 122 |
+
else:
|
| 123 |
+
# Patch mode — show the patch content as text
|
| 124 |
+
patch_text = arguments.get("patch", "")
|
| 125 |
+
content = [acp.tool_content(acp.text_block(patch_text))]
|
| 126 |
+
return acp.start_tool_call(
|
| 127 |
+
tool_call_id, title, kind=kind, content=content, locations=locations,
|
| 128 |
+
raw_input=arguments,
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
if tool_name == "write_file":
|
| 132 |
+
path = arguments.get("path", "")
|
| 133 |
+
file_content = arguments.get("content", "")
|
| 134 |
+
content = [acp.tool_diff_content(path=path, new_text=file_content)]
|
| 135 |
+
return acp.start_tool_call(
|
| 136 |
+
tool_call_id, title, kind=kind, content=content, locations=locations,
|
| 137 |
+
raw_input=arguments,
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
if tool_name == "terminal":
|
| 141 |
+
command = arguments.get("command", "")
|
| 142 |
+
content = [acp.tool_content(acp.text_block(f"$ {command}"))]
|
| 143 |
+
return acp.start_tool_call(
|
| 144 |
+
tool_call_id, title, kind=kind, content=content, locations=locations,
|
| 145 |
+
raw_input=arguments,
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
if tool_name == "read_file":
|
| 149 |
+
path = arguments.get("path", "")
|
| 150 |
+
content = [acp.tool_content(acp.text_block(f"Reading {path}"))]
|
| 151 |
+
return acp.start_tool_call(
|
| 152 |
+
tool_call_id, title, kind=kind, content=content, locations=locations,
|
| 153 |
+
raw_input=arguments,
|
| 154 |
+
)
|
| 155 |
+
|
| 156 |
+
if tool_name == "search_files":
|
| 157 |
+
pattern = arguments.get("pattern", "")
|
| 158 |
+
target = arguments.get("target", "content")
|
| 159 |
+
content = [acp.tool_content(acp.text_block(f"Searching for '{pattern}' ({target})"))]
|
| 160 |
+
return acp.start_tool_call(
|
| 161 |
+
tool_call_id, title, kind=kind, content=content, locations=locations,
|
| 162 |
+
raw_input=arguments,
|
| 163 |
+
)
|
| 164 |
+
|
| 165 |
+
# Generic fallback
|
| 166 |
+
import json
|
| 167 |
+
try:
|
| 168 |
+
args_text = json.dumps(arguments, indent=2, default=str)
|
| 169 |
+
except (TypeError, ValueError):
|
| 170 |
+
args_text = str(arguments)
|
| 171 |
+
content = [acp.tool_content(acp.text_block(args_text))]
|
| 172 |
+
return acp.start_tool_call(
|
| 173 |
+
tool_call_id, title, kind=kind, content=content, locations=locations,
|
| 174 |
+
raw_input=arguments,
|
| 175 |
+
)
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def build_tool_complete(
|
| 179 |
+
tool_call_id: str,
|
| 180 |
+
tool_name: str,
|
| 181 |
+
result: Optional[str] = None,
|
| 182 |
+
) -> ToolCallProgress:
|
| 183 |
+
"""Create a ToolCallUpdate (progress) event for a completed tool call."""
|
| 184 |
+
kind = get_tool_kind(tool_name)
|
| 185 |
+
|
| 186 |
+
# Truncate very large results for the UI
|
| 187 |
+
display_result = result or ""
|
| 188 |
+
if len(display_result) > 5000:
|
| 189 |
+
display_result = display_result[:4900] + f"\n... ({len(result)} chars total, truncated)"
|
| 190 |
+
|
| 191 |
+
content = [acp.tool_content(acp.text_block(display_result))]
|
| 192 |
+
return acp.update_tool_call(
|
| 193 |
+
tool_call_id,
|
| 194 |
+
kind=kind,
|
| 195 |
+
status="completed",
|
| 196 |
+
content=content,
|
| 197 |
+
raw_output=result,
|
| 198 |
+
)
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
# ---------------------------------------------------------------------------
|
| 202 |
+
# Location extraction
|
| 203 |
+
# ---------------------------------------------------------------------------
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
def extract_locations(
|
| 207 |
+
arguments: Dict[str, Any],
|
| 208 |
+
) -> List[ToolCallLocation]:
|
| 209 |
+
"""Extract file-system locations from tool arguments."""
|
| 210 |
+
locations: List[ToolCallLocation] = []
|
| 211 |
+
path = arguments.get("path")
|
| 212 |
+
if path:
|
| 213 |
+
line = arguments.get("offset") or arguments.get("line")
|
| 214 |
+
locations.append(ToolCallLocation(path=path, line=line))
|
| 215 |
+
return locations
|
acp_registry/agent.json
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"schema_version": 1,
|
| 3 |
+
"name": "hermes-agent",
|
| 4 |
+
"display_name": "Hermes Agent",
|
| 5 |
+
"description": "AI agent by Nous Research with 90+ tools, persistent memory, and multi-platform support",
|
| 6 |
+
"icon": "icon.svg",
|
| 7 |
+
"distribution": {
|
| 8 |
+
"type": "command",
|
| 9 |
+
"command": "hermes",
|
| 10 |
+
"args": ["acp"]
|
| 11 |
+
}
|
| 12 |
+
}
|
acp_registry/icon.svg
ADDED
|
|
agent/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Agent internals -- extracted modules from run_agent.py.
|
| 2 |
+
|
| 3 |
+
These modules contain pure utility functions and self-contained classes
|
| 4 |
+
that were previously embedded in the 3,600-line run_agent.py. Extracting
|
| 5 |
+
them makes run_agent.py focused on the AIAgent orchestrator class.
|
| 6 |
+
"""
|
agent/anthropic_adapter.py
ADDED
|
@@ -0,0 +1,1166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Anthropic Messages API adapter for Hermes Agent.
|
| 2 |
+
|
| 3 |
+
Translates between Hermes's internal OpenAI-style message format and
|
| 4 |
+
Anthropic's Messages API. Follows the same pattern as the codex_responses
|
| 5 |
+
adapter — all provider-specific logic is isolated here.
|
| 6 |
+
|
| 7 |
+
Auth supports:
|
| 8 |
+
- Regular API keys (sk-ant-api*) → x-api-key header
|
| 9 |
+
- OAuth setup-tokens (sk-ant-oat*) → Bearer auth + beta header
|
| 10 |
+
- Claude Code credentials (~/.claude.json or ~/.claude/.credentials.json) → Bearer auth
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
import json
|
| 14 |
+
import logging
|
| 15 |
+
import os
|
| 16 |
+
from pathlib import Path
|
| 17 |
+
from types import SimpleNamespace
|
| 18 |
+
from typing import Any, Dict, List, Optional, Tuple
|
| 19 |
+
|
| 20 |
+
try:
|
| 21 |
+
import anthropic as _anthropic_sdk
|
| 22 |
+
except ImportError:
|
| 23 |
+
_anthropic_sdk = None # type: ignore[assignment]
|
| 24 |
+
|
| 25 |
+
logger = logging.getLogger(__name__)
|
| 26 |
+
|
| 27 |
+
THINKING_BUDGET = {"xhigh": 32000, "high": 16000, "medium": 8000, "low": 4000}
|
| 28 |
+
ADAPTIVE_EFFORT_MAP = {
|
| 29 |
+
"xhigh": "max",
|
| 30 |
+
"high": "high",
|
| 31 |
+
"medium": "medium",
|
| 32 |
+
"low": "low",
|
| 33 |
+
"minimal": "low",
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def _supports_adaptive_thinking(model: str) -> bool:
|
| 38 |
+
"""Return True for Claude 4.6 models that support adaptive thinking."""
|
| 39 |
+
return any(v in model for v in ("4-6", "4.6"))
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
# Beta headers for enhanced features (sent with ALL auth types)
|
| 43 |
+
_COMMON_BETAS = [
|
| 44 |
+
"interleaved-thinking-2025-05-14",
|
| 45 |
+
"fine-grained-tool-streaming-2025-05-14",
|
| 46 |
+
]
|
| 47 |
+
|
| 48 |
+
# Additional beta headers required for OAuth/subscription auth.
|
| 49 |
+
# Matches what Claude Code (and pi-ai / OpenCode) send.
|
| 50 |
+
_OAUTH_ONLY_BETAS = [
|
| 51 |
+
"claude-code-20250219",
|
| 52 |
+
"oauth-2025-04-20",
|
| 53 |
+
]
|
| 54 |
+
|
| 55 |
+
# Claude Code identity — required for OAuth requests to be routed correctly.
|
| 56 |
+
# Without these, Anthropic's infrastructure intermittently 500s OAuth traffic.
|
| 57 |
+
# The version must stay reasonably current — Anthropic rejects OAuth requests
|
| 58 |
+
# when the spoofed user-agent version is too far behind the actual release.
|
| 59 |
+
_CLAUDE_CODE_VERSION_FALLBACK = "2.1.74"
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def _detect_claude_code_version() -> str:
|
| 63 |
+
"""Detect the installed Claude Code version, fall back to a static constant.
|
| 64 |
+
|
| 65 |
+
Anthropic's OAuth infrastructure validates the user-agent version and may
|
| 66 |
+
reject requests with a version that's too old. Detecting dynamically means
|
| 67 |
+
users who keep Claude Code updated never hit stale-version 400s.
|
| 68 |
+
"""
|
| 69 |
+
import subprocess as _sp
|
| 70 |
+
|
| 71 |
+
for cmd in ("claude", "claude-code"):
|
| 72 |
+
try:
|
| 73 |
+
result = _sp.run(
|
| 74 |
+
[cmd, "--version"],
|
| 75 |
+
capture_output=True, text=True, timeout=5,
|
| 76 |
+
)
|
| 77 |
+
if result.returncode == 0 and result.stdout.strip():
|
| 78 |
+
# Output is like "2.1.74 (Claude Code)" or just "2.1.74"
|
| 79 |
+
version = result.stdout.strip().split()[0]
|
| 80 |
+
if version and version[0].isdigit():
|
| 81 |
+
return version
|
| 82 |
+
except Exception:
|
| 83 |
+
pass
|
| 84 |
+
return _CLAUDE_CODE_VERSION_FALLBACK
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
_CLAUDE_CODE_VERSION = _detect_claude_code_version()
|
| 88 |
+
_CLAUDE_CODE_SYSTEM_PREFIX = "You are Claude Code, Anthropic's official CLI for Claude."
|
| 89 |
+
_MCP_TOOL_PREFIX = "mcp_"
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def _is_oauth_token(key: str) -> bool:
|
| 93 |
+
"""Check if the key is an OAuth/setup token (not a regular Console API key).
|
| 94 |
+
|
| 95 |
+
Regular API keys start with 'sk-ant-api'. Everything else (setup-tokens
|
| 96 |
+
starting with 'sk-ant-oat', managed keys, JWTs, etc.) needs Bearer auth.
|
| 97 |
+
"""
|
| 98 |
+
if not key:
|
| 99 |
+
return False
|
| 100 |
+
# Regular Console API keys use x-api-key header
|
| 101 |
+
if key.startswith("sk-ant-api"):
|
| 102 |
+
return False
|
| 103 |
+
# Everything else (setup-tokens, managed keys, JWTs) uses Bearer auth
|
| 104 |
+
return True
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def build_anthropic_client(api_key: str, base_url: str = None):
|
| 108 |
+
"""Create an Anthropic client, auto-detecting setup-tokens vs API keys.
|
| 109 |
+
|
| 110 |
+
Returns an anthropic.Anthropic instance.
|
| 111 |
+
"""
|
| 112 |
+
if _anthropic_sdk is None:
|
| 113 |
+
raise ImportError(
|
| 114 |
+
"The 'anthropic' package is required for the Anthropic provider. "
|
| 115 |
+
"Install it with: pip install 'anthropic>=0.39.0'"
|
| 116 |
+
)
|
| 117 |
+
from httpx import Timeout
|
| 118 |
+
|
| 119 |
+
kwargs = {
|
| 120 |
+
"timeout": Timeout(timeout=900.0, connect=10.0),
|
| 121 |
+
}
|
| 122 |
+
if base_url:
|
| 123 |
+
kwargs["base_url"] = base_url
|
| 124 |
+
|
| 125 |
+
if _is_oauth_token(api_key):
|
| 126 |
+
# OAuth access token / setup-token → Bearer auth + Claude Code identity.
|
| 127 |
+
# Anthropic routes OAuth requests based on user-agent and headers;
|
| 128 |
+
# without Claude Code's fingerprint, requests get intermittent 500s.
|
| 129 |
+
all_betas = _COMMON_BETAS + _OAUTH_ONLY_BETAS
|
| 130 |
+
kwargs["auth_token"] = api_key
|
| 131 |
+
kwargs["default_headers"] = {
|
| 132 |
+
"anthropic-beta": ",".join(all_betas),
|
| 133 |
+
"user-agent": f"claude-cli/{_CLAUDE_CODE_VERSION} (external, cli)",
|
| 134 |
+
"x-app": "cli",
|
| 135 |
+
}
|
| 136 |
+
else:
|
| 137 |
+
# Regular API key → x-api-key header + common betas
|
| 138 |
+
kwargs["api_key"] = api_key
|
| 139 |
+
if _COMMON_BETAS:
|
| 140 |
+
kwargs["default_headers"] = {"anthropic-beta": ",".join(_COMMON_BETAS)}
|
| 141 |
+
|
| 142 |
+
return _anthropic_sdk.Anthropic(**kwargs)
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def read_claude_code_credentials() -> Optional[Dict[str, Any]]:
|
| 146 |
+
"""Read refreshable Claude Code OAuth credentials from ~/.claude/.credentials.json.
|
| 147 |
+
|
| 148 |
+
This intentionally excludes ~/.claude.json primaryApiKey. Opencode's
|
| 149 |
+
subscription flow is OAuth/setup-token based with refreshable credentials,
|
| 150 |
+
and native direct Anthropic provider usage should follow that path rather
|
| 151 |
+
than auto-detecting Claude's first-party managed key.
|
| 152 |
+
|
| 153 |
+
Returns dict with {accessToken, refreshToken?, expiresAt?} or None.
|
| 154 |
+
"""
|
| 155 |
+
cred_path = Path.home() / ".claude" / ".credentials.json"
|
| 156 |
+
if cred_path.exists():
|
| 157 |
+
try:
|
| 158 |
+
data = json.loads(cred_path.read_text(encoding="utf-8"))
|
| 159 |
+
oauth_data = data.get("claudeAiOauth")
|
| 160 |
+
if oauth_data and isinstance(oauth_data, dict):
|
| 161 |
+
access_token = oauth_data.get("accessToken", "")
|
| 162 |
+
if access_token:
|
| 163 |
+
return {
|
| 164 |
+
"accessToken": access_token,
|
| 165 |
+
"refreshToken": oauth_data.get("refreshToken", ""),
|
| 166 |
+
"expiresAt": oauth_data.get("expiresAt", 0),
|
| 167 |
+
"source": "claude_code_credentials_file",
|
| 168 |
+
}
|
| 169 |
+
except (json.JSONDecodeError, OSError, IOError) as e:
|
| 170 |
+
logger.debug("Failed to read ~/.claude/.credentials.json: %s", e)
|
| 171 |
+
|
| 172 |
+
return None
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def read_claude_managed_key() -> Optional[str]:
|
| 176 |
+
"""Read Claude's native managed key from ~/.claude.json for diagnostics only."""
|
| 177 |
+
claude_json = Path.home() / ".claude.json"
|
| 178 |
+
if claude_json.exists():
|
| 179 |
+
try:
|
| 180 |
+
data = json.loads(claude_json.read_text(encoding="utf-8"))
|
| 181 |
+
primary_key = data.get("primaryApiKey", "")
|
| 182 |
+
if isinstance(primary_key, str) and primary_key.strip():
|
| 183 |
+
return primary_key.strip()
|
| 184 |
+
except (json.JSONDecodeError, OSError, IOError) as e:
|
| 185 |
+
logger.debug("Failed to read ~/.claude.json: %s", e)
|
| 186 |
+
return None
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
def is_claude_code_token_valid(creds: Dict[str, Any]) -> bool:
|
| 190 |
+
"""Check if Claude Code credentials have a non-expired access token."""
|
| 191 |
+
import time
|
| 192 |
+
|
| 193 |
+
expires_at = creds.get("expiresAt", 0)
|
| 194 |
+
if not expires_at:
|
| 195 |
+
# No expiry set (managed keys) — valid if token is present
|
| 196 |
+
return bool(creds.get("accessToken"))
|
| 197 |
+
|
| 198 |
+
# expiresAt is in milliseconds since epoch
|
| 199 |
+
now_ms = int(time.time() * 1000)
|
| 200 |
+
# Allow 60 seconds of buffer
|
| 201 |
+
return now_ms < (expires_at - 60_000)
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
def _refresh_oauth_token(creds: Dict[str, Any]) -> Optional[str]:
|
| 205 |
+
"""Attempt to refresh an expired Claude Code OAuth token.
|
| 206 |
+
|
| 207 |
+
Uses the same token endpoint and client_id as Claude Code / OpenCode.
|
| 208 |
+
Only works for credentials that have a refresh token (from claude /login
|
| 209 |
+
or claude setup-token with OAuth flow).
|
| 210 |
+
|
| 211 |
+
Returns the new access token, or None if refresh fails.
|
| 212 |
+
"""
|
| 213 |
+
import urllib.parse
|
| 214 |
+
import urllib.request
|
| 215 |
+
|
| 216 |
+
refresh_token = creds.get("refreshToken", "")
|
| 217 |
+
if not refresh_token:
|
| 218 |
+
logger.debug("No refresh token available — cannot refresh")
|
| 219 |
+
return None
|
| 220 |
+
|
| 221 |
+
# Client ID used by Claude Code's OAuth flow
|
| 222 |
+
CLIENT_ID = "9d1c250a-e61b-44d9-88ed-5944d1962f5e"
|
| 223 |
+
|
| 224 |
+
data = urllib.parse.urlencode({
|
| 225 |
+
"grant_type": "refresh_token",
|
| 226 |
+
"refresh_token": refresh_token,
|
| 227 |
+
"client_id": CLIENT_ID,
|
| 228 |
+
}).encode()
|
| 229 |
+
|
| 230 |
+
req = urllib.request.Request(
|
| 231 |
+
"https://console.anthropic.com/v1/oauth/token",
|
| 232 |
+
data=data,
|
| 233 |
+
headers={
|
| 234 |
+
"Content-Type": "application/x-www-form-urlencoded",
|
| 235 |
+
"User-Agent": f"claude-cli/{_CLAUDE_CODE_VERSION} (external, cli)",
|
| 236 |
+
},
|
| 237 |
+
method="POST",
|
| 238 |
+
)
|
| 239 |
+
|
| 240 |
+
try:
|
| 241 |
+
with urllib.request.urlopen(req, timeout=10) as resp:
|
| 242 |
+
result = json.loads(resp.read().decode())
|
| 243 |
+
new_access = result.get("access_token", "")
|
| 244 |
+
new_refresh = result.get("refresh_token", refresh_token)
|
| 245 |
+
expires_in = result.get("expires_in", 3600) # seconds
|
| 246 |
+
|
| 247 |
+
if new_access:
|
| 248 |
+
import time
|
| 249 |
+
new_expires_ms = int(time.time() * 1000) + (expires_in * 1000)
|
| 250 |
+
# Write refreshed credentials back to ~/.claude/.credentials.json
|
| 251 |
+
_write_claude_code_credentials(new_access, new_refresh, new_expires_ms)
|
| 252 |
+
logger.debug("Successfully refreshed Claude Code OAuth token")
|
| 253 |
+
return new_access
|
| 254 |
+
except Exception as e:
|
| 255 |
+
logger.debug("Failed to refresh Claude Code token: %s", e)
|
| 256 |
+
|
| 257 |
+
return None
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
def _write_claude_code_credentials(access_token: str, refresh_token: str, expires_at_ms: int) -> None:
|
| 261 |
+
"""Write refreshed credentials back to ~/.claude/.credentials.json."""
|
| 262 |
+
cred_path = Path.home() / ".claude" / ".credentials.json"
|
| 263 |
+
try:
|
| 264 |
+
# Read existing file to preserve other fields
|
| 265 |
+
existing = {}
|
| 266 |
+
if cred_path.exists():
|
| 267 |
+
existing = json.loads(cred_path.read_text(encoding="utf-8"))
|
| 268 |
+
|
| 269 |
+
existing["claudeAiOauth"] = {
|
| 270 |
+
"accessToken": access_token,
|
| 271 |
+
"refreshToken": refresh_token,
|
| 272 |
+
"expiresAt": expires_at_ms,
|
| 273 |
+
}
|
| 274 |
+
|
| 275 |
+
cred_path.parent.mkdir(parents=True, exist_ok=True)
|
| 276 |
+
cred_path.write_text(json.dumps(existing, indent=2), encoding="utf-8")
|
| 277 |
+
# Restrict permissions (credentials file)
|
| 278 |
+
cred_path.chmod(0o600)
|
| 279 |
+
except (OSError, IOError) as e:
|
| 280 |
+
logger.debug("Failed to write refreshed credentials: %s", e)
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
def _resolve_claude_code_token_from_credentials(creds: Optional[Dict[str, Any]] = None) -> Optional[str]:
|
| 284 |
+
"""Resolve a token from Claude Code credential files, refreshing if needed."""
|
| 285 |
+
creds = creds or read_claude_code_credentials()
|
| 286 |
+
if creds and is_claude_code_token_valid(creds):
|
| 287 |
+
logger.debug("Using Claude Code credentials (auto-detected)")
|
| 288 |
+
return creds["accessToken"]
|
| 289 |
+
if creds:
|
| 290 |
+
logger.debug("Claude Code credentials expired — attempting refresh")
|
| 291 |
+
refreshed = _refresh_oauth_token(creds)
|
| 292 |
+
if refreshed:
|
| 293 |
+
return refreshed
|
| 294 |
+
logger.debug("Token refresh failed — re-run 'claude setup-token' to reauthenticate")
|
| 295 |
+
return None
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
def _prefer_refreshable_claude_code_token(env_token: str, creds: Optional[Dict[str, Any]]) -> Optional[str]:
|
| 299 |
+
"""Prefer Claude Code creds when a persisted env OAuth token would shadow refresh.
|
| 300 |
+
|
| 301 |
+
Hermes historically persisted setup tokens into ANTHROPIC_TOKEN. That makes
|
| 302 |
+
later refresh impossible because the static env token wins before we ever
|
| 303 |
+
inspect Claude Code's refreshable credential file. If we have a refreshable
|
| 304 |
+
Claude Code credential record, prefer it over the static env OAuth token.
|
| 305 |
+
"""
|
| 306 |
+
if not env_token or not _is_oauth_token(env_token) or not isinstance(creds, dict):
|
| 307 |
+
return None
|
| 308 |
+
if not creds.get("refreshToken"):
|
| 309 |
+
return None
|
| 310 |
+
|
| 311 |
+
resolved = _resolve_claude_code_token_from_credentials(creds)
|
| 312 |
+
if resolved and resolved != env_token:
|
| 313 |
+
logger.debug(
|
| 314 |
+
"Preferring Claude Code credential file over static env OAuth token so refresh can proceed"
|
| 315 |
+
)
|
| 316 |
+
return resolved
|
| 317 |
+
return None
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
def get_anthropic_token_source(token: Optional[str] = None) -> str:
|
| 321 |
+
"""Best-effort source classification for an Anthropic credential token."""
|
| 322 |
+
token = (token or "").strip()
|
| 323 |
+
if not token:
|
| 324 |
+
return "none"
|
| 325 |
+
|
| 326 |
+
env_token = os.getenv("ANTHROPIC_TOKEN", "").strip()
|
| 327 |
+
if env_token and env_token == token:
|
| 328 |
+
return "anthropic_token_env"
|
| 329 |
+
|
| 330 |
+
cc_env_token = os.getenv("CLAUDE_CODE_OAUTH_TOKEN", "").strip()
|
| 331 |
+
if cc_env_token and cc_env_token == token:
|
| 332 |
+
return "claude_code_oauth_token_env"
|
| 333 |
+
|
| 334 |
+
creds = read_claude_code_credentials()
|
| 335 |
+
if creds and creds.get("accessToken") == token:
|
| 336 |
+
return str(creds.get("source") or "claude_code_credentials")
|
| 337 |
+
|
| 338 |
+
managed_key = read_claude_managed_key()
|
| 339 |
+
if managed_key and managed_key == token:
|
| 340 |
+
return "claude_json_primary_api_key"
|
| 341 |
+
|
| 342 |
+
api_key = os.getenv("ANTHROPIC_API_KEY", "").strip()
|
| 343 |
+
if api_key and api_key == token:
|
| 344 |
+
return "anthropic_api_key_env"
|
| 345 |
+
|
| 346 |
+
return "unknown"
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
def resolve_anthropic_token() -> Optional[str]:
|
| 350 |
+
"""Resolve an Anthropic token from all available sources.
|
| 351 |
+
|
| 352 |
+
Priority:
|
| 353 |
+
1. ANTHROPIC_TOKEN env var (OAuth/setup token saved by Hermes)
|
| 354 |
+
2. CLAUDE_CODE_OAUTH_TOKEN env var
|
| 355 |
+
3. Claude Code credentials (~/.claude.json or ~/.claude/.credentials.json)
|
| 356 |
+
— with automatic refresh if expired and a refresh token is available
|
| 357 |
+
4. ANTHROPIC_API_KEY env var (regular API key, or legacy fallback)
|
| 358 |
+
|
| 359 |
+
Returns the token string or None.
|
| 360 |
+
"""
|
| 361 |
+
creds = read_claude_code_credentials()
|
| 362 |
+
|
| 363 |
+
# 1. Hermes-managed OAuth/setup token env var
|
| 364 |
+
token = os.getenv("ANTHROPIC_TOKEN", "").strip()
|
| 365 |
+
if token:
|
| 366 |
+
preferred = _prefer_refreshable_claude_code_token(token, creds)
|
| 367 |
+
if preferred:
|
| 368 |
+
return preferred
|
| 369 |
+
return token
|
| 370 |
+
|
| 371 |
+
# 2. CLAUDE_CODE_OAUTH_TOKEN (used by Claude Code for setup-tokens)
|
| 372 |
+
cc_token = os.getenv("CLAUDE_CODE_OAUTH_TOKEN", "").strip()
|
| 373 |
+
if cc_token:
|
| 374 |
+
preferred = _prefer_refreshable_claude_code_token(cc_token, creds)
|
| 375 |
+
if preferred:
|
| 376 |
+
return preferred
|
| 377 |
+
return cc_token
|
| 378 |
+
|
| 379 |
+
# 3. Hermes-managed OAuth credentials (~/.hermes/.anthropic_oauth.json)
|
| 380 |
+
hermes_creds = read_hermes_oauth_credentials()
|
| 381 |
+
if hermes_creds:
|
| 382 |
+
if is_claude_code_token_valid(hermes_creds):
|
| 383 |
+
logger.debug("Using Hermes-managed OAuth credentials")
|
| 384 |
+
return hermes_creds["accessToken"]
|
| 385 |
+
# Expired — try refresh
|
| 386 |
+
logger.debug("Hermes OAuth token expired — attempting refresh")
|
| 387 |
+
refreshed = refresh_hermes_oauth_token()
|
| 388 |
+
if refreshed:
|
| 389 |
+
return refreshed
|
| 390 |
+
|
| 391 |
+
# 4. Claude Code credential file
|
| 392 |
+
resolved_claude_token = _resolve_claude_code_token_from_credentials(creds)
|
| 393 |
+
if resolved_claude_token:
|
| 394 |
+
return resolved_claude_token
|
| 395 |
+
|
| 396 |
+
# 5. Regular API key, or a legacy OAuth token saved in ANTHROPIC_API_KEY.
|
| 397 |
+
# This remains as a compatibility fallback for pre-migration Hermes configs.
|
| 398 |
+
api_key = os.getenv("ANTHROPIC_API_KEY", "").strip()
|
| 399 |
+
if api_key:
|
| 400 |
+
return api_key
|
| 401 |
+
|
| 402 |
+
return None
|
| 403 |
+
|
| 404 |
+
|
| 405 |
+
def run_oauth_setup_token() -> Optional[str]:
|
| 406 |
+
"""Run 'claude setup-token' interactively and return the resulting token.
|
| 407 |
+
|
| 408 |
+
Checks multiple sources after the subprocess completes:
|
| 409 |
+
1. Claude Code credential files (may be written by the subprocess)
|
| 410 |
+
2. CLAUDE_CODE_OAUTH_TOKEN / ANTHROPIC_TOKEN env vars
|
| 411 |
+
|
| 412 |
+
Returns the token string, or None if no credentials were obtained.
|
| 413 |
+
Raises FileNotFoundError if the 'claude' CLI is not installed.
|
| 414 |
+
"""
|
| 415 |
+
import shutil
|
| 416 |
+
import subprocess
|
| 417 |
+
|
| 418 |
+
claude_path = shutil.which("claude")
|
| 419 |
+
if not claude_path:
|
| 420 |
+
raise FileNotFoundError(
|
| 421 |
+
"The 'claude' CLI is not installed. "
|
| 422 |
+
"Install it with: npm install -g @anthropic-ai/claude-code"
|
| 423 |
+
)
|
| 424 |
+
|
| 425 |
+
# Run interactively — stdin/stdout/stderr inherited so user can interact
|
| 426 |
+
try:
|
| 427 |
+
subprocess.run([claude_path, "setup-token"])
|
| 428 |
+
except (KeyboardInterrupt, EOFError):
|
| 429 |
+
return None
|
| 430 |
+
|
| 431 |
+
# Check if credentials were saved to Claude Code's config files
|
| 432 |
+
creds = read_claude_code_credentials()
|
| 433 |
+
if creds and is_claude_code_token_valid(creds):
|
| 434 |
+
return creds["accessToken"]
|
| 435 |
+
|
| 436 |
+
# Check env vars that may have been set
|
| 437 |
+
for env_var in ("CLAUDE_CODE_OAUTH_TOKEN", "ANTHROPIC_TOKEN"):
|
| 438 |
+
val = os.getenv(env_var, "").strip()
|
| 439 |
+
if val:
|
| 440 |
+
return val
|
| 441 |
+
|
| 442 |
+
return None
|
| 443 |
+
|
| 444 |
+
|
| 445 |
+
# ── Hermes-native PKCE OAuth flow ────────────────────────────────────────
|
| 446 |
+
# Mirrors the flow used by Claude Code, pi-ai, and OpenCode.
|
| 447 |
+
# Stores credentials in ~/.hermes/.anthropic_oauth.json (our own file).
|
| 448 |
+
|
| 449 |
+
_OAUTH_CLIENT_ID = "9d1c250a-e61b-44d9-88ed-5944d1962f5e"
|
| 450 |
+
_OAUTH_TOKEN_URL = "https://console.anthropic.com/v1/oauth/token"
|
| 451 |
+
_OAUTH_REDIRECT_URI = "https://console.anthropic.com/oauth/code/callback"
|
| 452 |
+
_OAUTH_SCOPES = "org:create_api_key user:profile user:inference"
|
| 453 |
+
_HERMES_OAUTH_FILE = Path(os.getenv("HERMES_HOME", str(Path.home() / ".hermes"))) / ".anthropic_oauth.json"
|
| 454 |
+
|
| 455 |
+
|
| 456 |
+
def _generate_pkce() -> tuple:
|
| 457 |
+
"""Generate PKCE code_verifier and code_challenge (S256)."""
|
| 458 |
+
import base64
|
| 459 |
+
import hashlib
|
| 460 |
+
import secrets
|
| 461 |
+
|
| 462 |
+
verifier = base64.urlsafe_b64encode(secrets.token_bytes(32)).rstrip(b"=").decode()
|
| 463 |
+
challenge = base64.urlsafe_b64encode(
|
| 464 |
+
hashlib.sha256(verifier.encode()).digest()
|
| 465 |
+
).rstrip(b"=").decode()
|
| 466 |
+
return verifier, challenge
|
| 467 |
+
|
| 468 |
+
|
| 469 |
+
def run_hermes_oauth_login() -> Optional[str]:
|
| 470 |
+
"""Run Hermes-native OAuth PKCE flow for Claude Pro/Max subscription.
|
| 471 |
+
|
| 472 |
+
Opens a browser to claude.ai for authorization, prompts for the code,
|
| 473 |
+
exchanges it for tokens, and stores them in ~/.hermes/.anthropic_oauth.json.
|
| 474 |
+
|
| 475 |
+
Returns the access token on success, None on failure.
|
| 476 |
+
"""
|
| 477 |
+
import time
|
| 478 |
+
import webbrowser
|
| 479 |
+
|
| 480 |
+
verifier, challenge = _generate_pkce()
|
| 481 |
+
|
| 482 |
+
# Build authorization URL
|
| 483 |
+
params = {
|
| 484 |
+
"code": "true",
|
| 485 |
+
"client_id": _OAUTH_CLIENT_ID,
|
| 486 |
+
"response_type": "code",
|
| 487 |
+
"redirect_uri": _OAUTH_REDIRECT_URI,
|
| 488 |
+
"scope": _OAUTH_SCOPES,
|
| 489 |
+
"code_challenge": challenge,
|
| 490 |
+
"code_challenge_method": "S256",
|
| 491 |
+
"state": verifier,
|
| 492 |
+
}
|
| 493 |
+
from urllib.parse import urlencode
|
| 494 |
+
auth_url = f"https://claude.ai/oauth/authorize?{urlencode(params)}"
|
| 495 |
+
|
| 496 |
+
print()
|
| 497 |
+
print("Authorize Hermes with your Claude Pro/Max subscription.")
|
| 498 |
+
print()
|
| 499 |
+
print("╭─ Claude Pro/Max Authorization ────────────────────╮")
|
| 500 |
+
print("│ │")
|
| 501 |
+
print("│ Open this link in your browser: │")
|
| 502 |
+
print("╰───────────────────────────────────────────────────╯")
|
| 503 |
+
print()
|
| 504 |
+
print(f" {auth_url}")
|
| 505 |
+
print()
|
| 506 |
+
|
| 507 |
+
# Try to open browser automatically (works on desktop, silently fails on headless/SSH)
|
| 508 |
+
try:
|
| 509 |
+
webbrowser.open(auth_url)
|
| 510 |
+
print(" (Browser opened automatically)")
|
| 511 |
+
except Exception:
|
| 512 |
+
pass
|
| 513 |
+
|
| 514 |
+
print()
|
| 515 |
+
print("After authorizing, you'll see a code. Paste it below.")
|
| 516 |
+
print()
|
| 517 |
+
try:
|
| 518 |
+
auth_code = input("Authorization code: ").strip()
|
| 519 |
+
except (KeyboardInterrupt, EOFError):
|
| 520 |
+
return None
|
| 521 |
+
|
| 522 |
+
if not auth_code:
|
| 523 |
+
print("No code entered.")
|
| 524 |
+
return None
|
| 525 |
+
|
| 526 |
+
# Split code#state format
|
| 527 |
+
splits = auth_code.split("#")
|
| 528 |
+
code = splits[0]
|
| 529 |
+
state = splits[1] if len(splits) > 1 else ""
|
| 530 |
+
|
| 531 |
+
# Exchange code for tokens
|
| 532 |
+
try:
|
| 533 |
+
import urllib.request
|
| 534 |
+
exchange_data = json.dumps({
|
| 535 |
+
"grant_type": "authorization_code",
|
| 536 |
+
"client_id": _OAUTH_CLIENT_ID,
|
| 537 |
+
"code": code,
|
| 538 |
+
"state": state,
|
| 539 |
+
"redirect_uri": _OAUTH_REDIRECT_URI,
|
| 540 |
+
"code_verifier": verifier,
|
| 541 |
+
}).encode()
|
| 542 |
+
|
| 543 |
+
req = urllib.request.Request(
|
| 544 |
+
_OAUTH_TOKEN_URL,
|
| 545 |
+
data=exchange_data,
|
| 546 |
+
headers={
|
| 547 |
+
"Content-Type": "application/json",
|
| 548 |
+
"User-Agent": f"claude-cli/{_CLAUDE_CODE_VERSION} (external, cli)",
|
| 549 |
+
},
|
| 550 |
+
method="POST",
|
| 551 |
+
)
|
| 552 |
+
|
| 553 |
+
with urllib.request.urlopen(req, timeout=15) as resp:
|
| 554 |
+
result = json.loads(resp.read().decode())
|
| 555 |
+
except Exception as e:
|
| 556 |
+
print(f"Token exchange failed: {e}")
|
| 557 |
+
return None
|
| 558 |
+
|
| 559 |
+
access_token = result.get("access_token", "")
|
| 560 |
+
refresh_token = result.get("refresh_token", "")
|
| 561 |
+
expires_in = result.get("expires_in", 3600)
|
| 562 |
+
|
| 563 |
+
if not access_token:
|
| 564 |
+
print("No access token in response.")
|
| 565 |
+
return None
|
| 566 |
+
|
| 567 |
+
# Store credentials
|
| 568 |
+
expires_at_ms = int(time.time() * 1000) + (expires_in * 1000)
|
| 569 |
+
_save_hermes_oauth_credentials(access_token, refresh_token, expires_at_ms)
|
| 570 |
+
|
| 571 |
+
# Also write to Claude Code's credential file for backward compat
|
| 572 |
+
_write_claude_code_credentials(access_token, refresh_token, expires_at_ms)
|
| 573 |
+
|
| 574 |
+
print("Authentication successful!")
|
| 575 |
+
return access_token
|
| 576 |
+
|
| 577 |
+
|
| 578 |
+
def _save_hermes_oauth_credentials(access_token: str, refresh_token: str, expires_at_ms: int) -> None:
|
| 579 |
+
"""Save OAuth credentials to ~/.hermes/.anthropic_oauth.json."""
|
| 580 |
+
data = {
|
| 581 |
+
"accessToken": access_token,
|
| 582 |
+
"refreshToken": refresh_token,
|
| 583 |
+
"expiresAt": expires_at_ms,
|
| 584 |
+
}
|
| 585 |
+
try:
|
| 586 |
+
_HERMES_OAUTH_FILE.parent.mkdir(parents=True, exist_ok=True)
|
| 587 |
+
_HERMES_OAUTH_FILE.write_text(json.dumps(data, indent=2), encoding="utf-8")
|
| 588 |
+
_HERMES_OAUTH_FILE.chmod(0o600)
|
| 589 |
+
except (OSError, IOError) as e:
|
| 590 |
+
logger.debug("Failed to save Hermes OAuth credentials: %s", e)
|
| 591 |
+
|
| 592 |
+
|
| 593 |
+
def read_hermes_oauth_credentials() -> Optional[Dict[str, Any]]:
|
| 594 |
+
"""Read Hermes-managed OAuth credentials from ~/.hermes/.anthropic_oauth.json."""
|
| 595 |
+
if _HERMES_OAUTH_FILE.exists():
|
| 596 |
+
try:
|
| 597 |
+
data = json.loads(_HERMES_OAUTH_FILE.read_text(encoding="utf-8"))
|
| 598 |
+
if data.get("accessToken"):
|
| 599 |
+
return data
|
| 600 |
+
except (json.JSONDecodeError, OSError, IOError) as e:
|
| 601 |
+
logger.debug("Failed to read Hermes OAuth credentials: %s", e)
|
| 602 |
+
return None
|
| 603 |
+
|
| 604 |
+
|
| 605 |
+
def refresh_hermes_oauth_token() -> Optional[str]:
|
| 606 |
+
"""Refresh the Hermes-managed OAuth token using the stored refresh token.
|
| 607 |
+
|
| 608 |
+
Returns the new access token, or None if refresh fails.
|
| 609 |
+
"""
|
| 610 |
+
import time
|
| 611 |
+
import urllib.request
|
| 612 |
+
|
| 613 |
+
creds = read_hermes_oauth_credentials()
|
| 614 |
+
if not creds or not creds.get("refreshToken"):
|
| 615 |
+
return None
|
| 616 |
+
|
| 617 |
+
try:
|
| 618 |
+
data = json.dumps({
|
| 619 |
+
"grant_type": "refresh_token",
|
| 620 |
+
"refresh_token": creds["refreshToken"],
|
| 621 |
+
"client_id": _OAUTH_CLIENT_ID,
|
| 622 |
+
}).encode()
|
| 623 |
+
|
| 624 |
+
req = urllib.request.Request(
|
| 625 |
+
_OAUTH_TOKEN_URL,
|
| 626 |
+
data=data,
|
| 627 |
+
headers={
|
| 628 |
+
"Content-Type": "application/json",
|
| 629 |
+
"User-Agent": f"claude-cli/{_CLAUDE_CODE_VERSION} (external, cli)",
|
| 630 |
+
},
|
| 631 |
+
method="POST",
|
| 632 |
+
)
|
| 633 |
+
|
| 634 |
+
with urllib.request.urlopen(req, timeout=10) as resp:
|
| 635 |
+
result = json.loads(resp.read().decode())
|
| 636 |
+
|
| 637 |
+
new_access = result.get("access_token", "")
|
| 638 |
+
new_refresh = result.get("refresh_token", creds["refreshToken"])
|
| 639 |
+
expires_in = result.get("expires_in", 3600)
|
| 640 |
+
|
| 641 |
+
if new_access:
|
| 642 |
+
new_expires_ms = int(time.time() * 1000) + (expires_in * 1000)
|
| 643 |
+
_save_hermes_oauth_credentials(new_access, new_refresh, new_expires_ms)
|
| 644 |
+
# Also update Claude Code's credential file
|
| 645 |
+
_write_claude_code_credentials(new_access, new_refresh, new_expires_ms)
|
| 646 |
+
logger.debug("Successfully refreshed Hermes OAuth token")
|
| 647 |
+
return new_access
|
| 648 |
+
except Exception as e:
|
| 649 |
+
logger.debug("Failed to refresh Hermes OAuth token: %s", e)
|
| 650 |
+
|
| 651 |
+
return None
|
| 652 |
+
|
| 653 |
+
|
| 654 |
+
# ---------------------------------------------------------------------------
|
| 655 |
+
# Message / tool / response format conversion
|
| 656 |
+
# ---------------------------------------------------------------------------
|
| 657 |
+
|
| 658 |
+
|
| 659 |
+
def normalize_model_name(model: str, preserve_dots: bool = False) -> str:
|
| 660 |
+
"""Normalize a model name for the Anthropic API.
|
| 661 |
+
|
| 662 |
+
- Strips 'anthropic/' prefix (OpenRouter format, case-insensitive)
|
| 663 |
+
- Converts dots to hyphens in version numbers (OpenRouter uses dots,
|
| 664 |
+
Anthropic uses hyphens: claude-opus-4.6 → claude-opus-4-6), unless
|
| 665 |
+
preserve_dots is True (e.g. for Alibaba/DashScope: qwen3.5-plus).
|
| 666 |
+
"""
|
| 667 |
+
lower = model.lower()
|
| 668 |
+
if lower.startswith("anthropic/"):
|
| 669 |
+
model = model[len("anthropic/"):]
|
| 670 |
+
if not preserve_dots:
|
| 671 |
+
# OpenRouter uses dots for version separators (claude-opus-4.6),
|
| 672 |
+
# Anthropic uses hyphens (claude-opus-4-6). Convert dots to hyphens.
|
| 673 |
+
model = model.replace(".", "-")
|
| 674 |
+
return model
|
| 675 |
+
|
| 676 |
+
|
| 677 |
+
def _sanitize_tool_id(tool_id: str) -> str:
|
| 678 |
+
"""Sanitize a tool call ID for the Anthropic API.
|
| 679 |
+
|
| 680 |
+
Anthropic requires IDs matching [a-zA-Z0-9_-]. Replace invalid
|
| 681 |
+
characters with underscores and ensure non-empty.
|
| 682 |
+
"""
|
| 683 |
+
import re
|
| 684 |
+
if not tool_id:
|
| 685 |
+
return "tool_0"
|
| 686 |
+
sanitized = re.sub(r"[^a-zA-Z0-9_-]", "_", tool_id)
|
| 687 |
+
return sanitized or "tool_0"
|
| 688 |
+
|
| 689 |
+
|
| 690 |
+
def _convert_openai_image_part_to_anthropic(part: Dict[str, Any]) -> Optional[Dict[str, Any]]:
|
| 691 |
+
"""Convert an OpenAI-style image block to Anthropic's image source format."""
|
| 692 |
+
image_data = part.get("image_url", {})
|
| 693 |
+
url = image_data.get("url", "") if isinstance(image_data, dict) else str(image_data)
|
| 694 |
+
if not isinstance(url, str) or not url.strip():
|
| 695 |
+
return None
|
| 696 |
+
url = url.strip()
|
| 697 |
+
|
| 698 |
+
if url.startswith("data:"):
|
| 699 |
+
header, sep, data = url.partition(",")
|
| 700 |
+
if sep and ";base64" in header:
|
| 701 |
+
media_type = header[5:].split(";", 1)[0] or "image/png"
|
| 702 |
+
return {
|
| 703 |
+
"type": "image",
|
| 704 |
+
"source": {
|
| 705 |
+
"type": "base64",
|
| 706 |
+
"media_type": media_type,
|
| 707 |
+
"data": data,
|
| 708 |
+
},
|
| 709 |
+
}
|
| 710 |
+
|
| 711 |
+
if url.startswith("http://") or url.startswith("https://"):
|
| 712 |
+
return {
|
| 713 |
+
"type": "image",
|
| 714 |
+
"source": {
|
| 715 |
+
"type": "url",
|
| 716 |
+
"url": url,
|
| 717 |
+
},
|
| 718 |
+
}
|
| 719 |
+
|
| 720 |
+
return None
|
| 721 |
+
|
| 722 |
+
|
| 723 |
+
def _convert_user_content_part_to_anthropic(part: Any) -> Optional[Dict[str, Any]]:
|
| 724 |
+
if isinstance(part, dict):
|
| 725 |
+
ptype = part.get("type")
|
| 726 |
+
if ptype == "text":
|
| 727 |
+
block = {"type": "text", "text": part.get("text", "")}
|
| 728 |
+
if isinstance(part.get("cache_control"), dict):
|
| 729 |
+
block["cache_control"] = dict(part["cache_control"])
|
| 730 |
+
return block
|
| 731 |
+
if ptype == "image_url":
|
| 732 |
+
return _convert_openai_image_part_to_anthropic(part)
|
| 733 |
+
if ptype == "image" and part.get("source"):
|
| 734 |
+
return dict(part)
|
| 735 |
+
if ptype == "image" and part.get("data"):
|
| 736 |
+
media_type = part.get("mimeType") or part.get("media_type") or "image/png"
|
| 737 |
+
return {
|
| 738 |
+
"type": "image",
|
| 739 |
+
"source": {
|
| 740 |
+
"type": "base64",
|
| 741 |
+
"media_type": media_type,
|
| 742 |
+
"data": part.get("data", ""),
|
| 743 |
+
},
|
| 744 |
+
}
|
| 745 |
+
if ptype == "tool_result":
|
| 746 |
+
return dict(part)
|
| 747 |
+
elif part is not None:
|
| 748 |
+
return {"type": "text", "text": str(part)}
|
| 749 |
+
return None
|
| 750 |
+
|
| 751 |
+
|
| 752 |
+
def convert_tools_to_anthropic(tools: List[Dict]) -> List[Dict]:
|
| 753 |
+
"""Convert OpenAI tool definitions to Anthropic format."""
|
| 754 |
+
if not tools:
|
| 755 |
+
return []
|
| 756 |
+
result = []
|
| 757 |
+
for t in tools:
|
| 758 |
+
fn = t.get("function", {})
|
| 759 |
+
result.append({
|
| 760 |
+
"name": fn.get("name", ""),
|
| 761 |
+
"description": fn.get("description", ""),
|
| 762 |
+
"input_schema": fn.get("parameters", {"type": "object", "properties": {}}),
|
| 763 |
+
})
|
| 764 |
+
return result
|
| 765 |
+
|
| 766 |
+
|
| 767 |
+
def _image_source_from_openai_url(url: str) -> Dict[str, str]:
|
| 768 |
+
"""Convert an OpenAI-style image URL/data URL into Anthropic image source."""
|
| 769 |
+
url = str(url or "").strip()
|
| 770 |
+
if not url:
|
| 771 |
+
return {"type": "url", "url": ""}
|
| 772 |
+
|
| 773 |
+
if url.startswith("data:"):
|
| 774 |
+
header, _, data = url.partition(",")
|
| 775 |
+
media_type = "image/jpeg"
|
| 776 |
+
if header.startswith("data:"):
|
| 777 |
+
mime_part = header[len("data:"):].split(";", 1)[0].strip()
|
| 778 |
+
if mime_part.startswith("image/"):
|
| 779 |
+
media_type = mime_part
|
| 780 |
+
return {
|
| 781 |
+
"type": "base64",
|
| 782 |
+
"media_type": media_type,
|
| 783 |
+
"data": data,
|
| 784 |
+
}
|
| 785 |
+
|
| 786 |
+
return {"type": "url", "url": url}
|
| 787 |
+
|
| 788 |
+
|
| 789 |
+
def _convert_content_part_to_anthropic(part: Any) -> Optional[Dict[str, Any]]:
|
| 790 |
+
"""Convert a single OpenAI-style content part to Anthropic format."""
|
| 791 |
+
if part is None:
|
| 792 |
+
return None
|
| 793 |
+
if isinstance(part, str):
|
| 794 |
+
return {"type": "text", "text": part}
|
| 795 |
+
if not isinstance(part, dict):
|
| 796 |
+
return {"type": "text", "text": str(part)}
|
| 797 |
+
|
| 798 |
+
ptype = part.get("type")
|
| 799 |
+
|
| 800 |
+
if ptype == "input_text":
|
| 801 |
+
block: Dict[str, Any] = {"type": "text", "text": part.get("text", "")}
|
| 802 |
+
elif ptype in {"image_url", "input_image"}:
|
| 803 |
+
image_value = part.get("image_url", {})
|
| 804 |
+
url = image_value.get("url", "") if isinstance(image_value, dict) else str(image_value or "")
|
| 805 |
+
block = {"type": "image", "source": _image_source_from_openai_url(url)}
|
| 806 |
+
else:
|
| 807 |
+
block = dict(part)
|
| 808 |
+
|
| 809 |
+
if isinstance(part.get("cache_control"), dict) and "cache_control" not in block:
|
| 810 |
+
block["cache_control"] = dict(part["cache_control"])
|
| 811 |
+
return block
|
| 812 |
+
|
| 813 |
+
|
| 814 |
+
def _convert_content_to_anthropic(content: Any) -> Any:
|
| 815 |
+
"""Convert OpenAI-style multimodal content arrays to Anthropic blocks."""
|
| 816 |
+
if not isinstance(content, list):
|
| 817 |
+
return content
|
| 818 |
+
|
| 819 |
+
converted = []
|
| 820 |
+
for part in content:
|
| 821 |
+
block = _convert_content_part_to_anthropic(part)
|
| 822 |
+
if block is not None:
|
| 823 |
+
converted.append(block)
|
| 824 |
+
return converted
|
| 825 |
+
|
| 826 |
+
|
| 827 |
+
def convert_messages_to_anthropic(
|
| 828 |
+
messages: List[Dict],
|
| 829 |
+
) -> Tuple[Optional[Any], List[Dict]]:
|
| 830 |
+
"""Convert OpenAI-format messages to Anthropic format.
|
| 831 |
+
|
| 832 |
+
Returns (system_prompt, anthropic_messages).
|
| 833 |
+
System messages are extracted since Anthropic takes them as a separate param.
|
| 834 |
+
system_prompt is a string or list of content blocks (when cache_control present).
|
| 835 |
+
"""
|
| 836 |
+
system = None
|
| 837 |
+
result = []
|
| 838 |
+
|
| 839 |
+
for m in messages:
|
| 840 |
+
role = m.get("role", "user")
|
| 841 |
+
content = m.get("content", "")
|
| 842 |
+
|
| 843 |
+
if role == "system":
|
| 844 |
+
if isinstance(content, list):
|
| 845 |
+
# Preserve cache_control markers on content blocks
|
| 846 |
+
has_cache = any(
|
| 847 |
+
p.get("cache_control") for p in content if isinstance(p, dict)
|
| 848 |
+
)
|
| 849 |
+
if has_cache:
|
| 850 |
+
system = [p for p in content if isinstance(p, dict)]
|
| 851 |
+
else:
|
| 852 |
+
system = "\n".join(
|
| 853 |
+
p["text"] for p in content if p.get("type") == "text"
|
| 854 |
+
)
|
| 855 |
+
else:
|
| 856 |
+
system = content
|
| 857 |
+
continue
|
| 858 |
+
|
| 859 |
+
if role == "assistant":
|
| 860 |
+
blocks = []
|
| 861 |
+
if content:
|
| 862 |
+
if isinstance(content, list):
|
| 863 |
+
converted_content = _convert_content_to_anthropic(content)
|
| 864 |
+
if isinstance(converted_content, list):
|
| 865 |
+
blocks.extend(converted_content)
|
| 866 |
+
else:
|
| 867 |
+
blocks.append({"type": "text", "text": str(content)})
|
| 868 |
+
for tc in m.get("tool_calls", []):
|
| 869 |
+
if not tc or not isinstance(tc, dict):
|
| 870 |
+
continue
|
| 871 |
+
fn = tc.get("function", {})
|
| 872 |
+
args = fn.get("arguments", "{}")
|
| 873 |
+
try:
|
| 874 |
+
parsed_args = json.loads(args) if isinstance(args, str) else args
|
| 875 |
+
except (json.JSONDecodeError, ValueError):
|
| 876 |
+
parsed_args = {}
|
| 877 |
+
blocks.append({
|
| 878 |
+
"type": "tool_use",
|
| 879 |
+
"id": _sanitize_tool_id(tc.get("id", "")),
|
| 880 |
+
"name": fn.get("name", ""),
|
| 881 |
+
"input": parsed_args,
|
| 882 |
+
})
|
| 883 |
+
# Anthropic rejects empty assistant content
|
| 884 |
+
effective = blocks or content
|
| 885 |
+
if not effective or effective == "":
|
| 886 |
+
effective = [{"type": "text", "text": "(empty)"}]
|
| 887 |
+
result.append({"role": "assistant", "content": effective})
|
| 888 |
+
continue
|
| 889 |
+
|
| 890 |
+
if role == "tool":
|
| 891 |
+
# Sanitize tool_use_id and ensure non-empty content
|
| 892 |
+
result_content = content if isinstance(content, str) else json.dumps(content)
|
| 893 |
+
if not result_content:
|
| 894 |
+
result_content = "(no output)"
|
| 895 |
+
tool_result = {
|
| 896 |
+
"type": "tool_result",
|
| 897 |
+
"tool_use_id": _sanitize_tool_id(m.get("tool_call_id", "")),
|
| 898 |
+
"content": result_content,
|
| 899 |
+
}
|
| 900 |
+
if isinstance(m.get("cache_control"), dict):
|
| 901 |
+
tool_result["cache_control"] = dict(m["cache_control"])
|
| 902 |
+
# Merge consecutive tool results into one user message
|
| 903 |
+
if (
|
| 904 |
+
result
|
| 905 |
+
and result[-1]["role"] == "user"
|
| 906 |
+
and isinstance(result[-1]["content"], list)
|
| 907 |
+
and result[-1]["content"]
|
| 908 |
+
and result[-1]["content"][0].get("type") == "tool_result"
|
| 909 |
+
):
|
| 910 |
+
result[-1]["content"].append(tool_result)
|
| 911 |
+
else:
|
| 912 |
+
result.append({"role": "user", "content": [tool_result]})
|
| 913 |
+
continue
|
| 914 |
+
|
| 915 |
+
# Regular user message
|
| 916 |
+
if isinstance(content, list):
|
| 917 |
+
converted_blocks = _convert_content_to_anthropic(content)
|
| 918 |
+
result.append({
|
| 919 |
+
"role": "user",
|
| 920 |
+
"content": converted_blocks or [{"type": "text", "text": ""}],
|
| 921 |
+
})
|
| 922 |
+
else:
|
| 923 |
+
result.append({"role": "user", "content": content})
|
| 924 |
+
|
| 925 |
+
# Strip orphaned tool_use blocks (no matching tool_result follows)
|
| 926 |
+
tool_result_ids = set()
|
| 927 |
+
for m in result:
|
| 928 |
+
if m["role"] == "user" and isinstance(m["content"], list):
|
| 929 |
+
for block in m["content"]:
|
| 930 |
+
if block.get("type") == "tool_result":
|
| 931 |
+
tool_result_ids.add(block.get("tool_use_id"))
|
| 932 |
+
for m in result:
|
| 933 |
+
if m["role"] == "assistant" and isinstance(m["content"], list):
|
| 934 |
+
m["content"] = [
|
| 935 |
+
b
|
| 936 |
+
for b in m["content"]
|
| 937 |
+
if b.get("type") != "tool_use" or b.get("id") in tool_result_ids
|
| 938 |
+
]
|
| 939 |
+
if not m["content"]:
|
| 940 |
+
m["content"] = [{"type": "text", "text": "(tool call removed)"}]
|
| 941 |
+
|
| 942 |
+
# Strip orphaned tool_result blocks (no matching tool_use precedes them).
|
| 943 |
+
# This is the mirror of the above: context compression or session truncation
|
| 944 |
+
# can remove an assistant message containing a tool_use while leaving the
|
| 945 |
+
# subsequent tool_result intact. Anthropic rejects these with a 400.
|
| 946 |
+
tool_use_ids = set()
|
| 947 |
+
for m in result:
|
| 948 |
+
if m["role"] == "assistant" and isinstance(m["content"], list):
|
| 949 |
+
for block in m["content"]:
|
| 950 |
+
if block.get("type") == "tool_use":
|
| 951 |
+
tool_use_ids.add(block.get("id"))
|
| 952 |
+
for m in result:
|
| 953 |
+
if m["role"] == "user" and isinstance(m["content"], list):
|
| 954 |
+
m["content"] = [
|
| 955 |
+
b
|
| 956 |
+
for b in m["content"]
|
| 957 |
+
if b.get("type") != "tool_result" or b.get("tool_use_id") in tool_use_ids
|
| 958 |
+
]
|
| 959 |
+
if not m["content"]:
|
| 960 |
+
m["content"] = [{"type": "text", "text": "(tool result removed)"}]
|
| 961 |
+
|
| 962 |
+
# Enforce strict role alternation (Anthropic rejects consecutive same-role messages)
|
| 963 |
+
fixed = []
|
| 964 |
+
for m in result:
|
| 965 |
+
if fixed and fixed[-1]["role"] == m["role"]:
|
| 966 |
+
if m["role"] == "user":
|
| 967 |
+
# Merge consecutive user messages
|
| 968 |
+
prev_content = fixed[-1]["content"]
|
| 969 |
+
curr_content = m["content"]
|
| 970 |
+
if isinstance(prev_content, str) and isinstance(curr_content, str):
|
| 971 |
+
fixed[-1]["content"] = prev_content + "\n" + curr_content
|
| 972 |
+
elif isinstance(prev_content, list) and isinstance(curr_content, list):
|
| 973 |
+
fixed[-1]["content"] = prev_content + curr_content
|
| 974 |
+
else:
|
| 975 |
+
# Mixed types — wrap string in list
|
| 976 |
+
if isinstance(prev_content, str):
|
| 977 |
+
prev_content = [{"type": "text", "text": prev_content}]
|
| 978 |
+
if isinstance(curr_content, str):
|
| 979 |
+
curr_content = [{"type": "text", "text": curr_content}]
|
| 980 |
+
fixed[-1]["content"] = prev_content + curr_content
|
| 981 |
+
else:
|
| 982 |
+
# Consecutive assistant messages — merge text content
|
| 983 |
+
prev_blocks = fixed[-1]["content"]
|
| 984 |
+
curr_blocks = m["content"]
|
| 985 |
+
if isinstance(prev_blocks, list) and isinstance(curr_blocks, list):
|
| 986 |
+
fixed[-1]["content"] = prev_blocks + curr_blocks
|
| 987 |
+
elif isinstance(prev_blocks, str) and isinstance(curr_blocks, str):
|
| 988 |
+
fixed[-1]["content"] = prev_blocks + "\n" + curr_blocks
|
| 989 |
+
else:
|
| 990 |
+
# Mixed types — normalize both to list and merge
|
| 991 |
+
if isinstance(prev_blocks, str):
|
| 992 |
+
prev_blocks = [{"type": "text", "text": prev_blocks}]
|
| 993 |
+
if isinstance(curr_blocks, str):
|
| 994 |
+
curr_blocks = [{"type": "text", "text": curr_blocks}]
|
| 995 |
+
fixed[-1]["content"] = prev_blocks + curr_blocks
|
| 996 |
+
else:
|
| 997 |
+
fixed.append(m)
|
| 998 |
+
result = fixed
|
| 999 |
+
|
| 1000 |
+
return system, result
|
| 1001 |
+
|
| 1002 |
+
|
| 1003 |
+
def build_anthropic_kwargs(
|
| 1004 |
+
model: str,
|
| 1005 |
+
messages: List[Dict],
|
| 1006 |
+
tools: Optional[List[Dict]],
|
| 1007 |
+
max_tokens: Optional[int],
|
| 1008 |
+
reasoning_config: Optional[Dict[str, Any]],
|
| 1009 |
+
tool_choice: Optional[str] = None,
|
| 1010 |
+
is_oauth: bool = False,
|
| 1011 |
+
preserve_dots: bool = False,
|
| 1012 |
+
) -> Dict[str, Any]:
|
| 1013 |
+
"""Build kwargs for anthropic.messages.create().
|
| 1014 |
+
|
| 1015 |
+
When *is_oauth* is True, applies Claude Code compatibility transforms:
|
| 1016 |
+
system prompt prefix, tool name prefixing, and prompt sanitization.
|
| 1017 |
+
|
| 1018 |
+
When *preserve_dots* is True, model name dots are not converted to hyphens
|
| 1019 |
+
(for Alibaba/DashScope anthropic-compatible endpoints: qwen3.5-plus).
|
| 1020 |
+
"""
|
| 1021 |
+
system, anthropic_messages = convert_messages_to_anthropic(messages)
|
| 1022 |
+
anthropic_tools = convert_tools_to_anthropic(tools) if tools else []
|
| 1023 |
+
|
| 1024 |
+
model = normalize_model_name(model, preserve_dots=preserve_dots)
|
| 1025 |
+
effective_max_tokens = max_tokens or 16384
|
| 1026 |
+
|
| 1027 |
+
# ── OAuth: Claude Code identity ──────────────────────────────────
|
| 1028 |
+
if is_oauth:
|
| 1029 |
+
# 1. Prepend Claude Code system prompt identity
|
| 1030 |
+
cc_block = {"type": "text", "text": _CLAUDE_CODE_SYSTEM_PREFIX}
|
| 1031 |
+
if isinstance(system, list):
|
| 1032 |
+
system = [cc_block] + system
|
| 1033 |
+
elif isinstance(system, str) and system:
|
| 1034 |
+
system = [cc_block, {"type": "text", "text": system}]
|
| 1035 |
+
else:
|
| 1036 |
+
system = [cc_block]
|
| 1037 |
+
|
| 1038 |
+
# 2. Sanitize system prompt — replace product name references
|
| 1039 |
+
# to avoid Anthropic's server-side content filters.
|
| 1040 |
+
for block in system:
|
| 1041 |
+
if isinstance(block, dict) and block.get("type") == "text":
|
| 1042 |
+
text = block.get("text", "")
|
| 1043 |
+
text = text.replace("Hermes Agent", "Claude Code")
|
| 1044 |
+
text = text.replace("Hermes agent", "Claude Code")
|
| 1045 |
+
text = text.replace("hermes-agent", "claude-code")
|
| 1046 |
+
text = text.replace("Nous Research", "Anthropic")
|
| 1047 |
+
block["text"] = text
|
| 1048 |
+
|
| 1049 |
+
# 3. Prefix tool names with mcp_ (Claude Code convention)
|
| 1050 |
+
if anthropic_tools:
|
| 1051 |
+
for tool in anthropic_tools:
|
| 1052 |
+
if "name" in tool:
|
| 1053 |
+
tool["name"] = _MCP_TOOL_PREFIX + tool["name"]
|
| 1054 |
+
|
| 1055 |
+
# 4. Prefix tool names in message history (tool_use and tool_result blocks)
|
| 1056 |
+
for msg in anthropic_messages:
|
| 1057 |
+
content = msg.get("content")
|
| 1058 |
+
if isinstance(content, list):
|
| 1059 |
+
for block in content:
|
| 1060 |
+
if isinstance(block, dict):
|
| 1061 |
+
if block.get("type") == "tool_use" and "name" in block:
|
| 1062 |
+
if not block["name"].startswith(_MCP_TOOL_PREFIX):
|
| 1063 |
+
block["name"] = _MCP_TOOL_PREFIX + block["name"]
|
| 1064 |
+
elif block.get("type") == "tool_result" and "tool_use_id" in block:
|
| 1065 |
+
pass # tool_result uses ID, not name
|
| 1066 |
+
|
| 1067 |
+
kwargs: Dict[str, Any] = {
|
| 1068 |
+
"model": model,
|
| 1069 |
+
"messages": anthropic_messages,
|
| 1070 |
+
"max_tokens": effective_max_tokens,
|
| 1071 |
+
}
|
| 1072 |
+
|
| 1073 |
+
if system:
|
| 1074 |
+
kwargs["system"] = system
|
| 1075 |
+
|
| 1076 |
+
if anthropic_tools:
|
| 1077 |
+
kwargs["tools"] = anthropic_tools
|
| 1078 |
+
# Map OpenAI tool_choice to Anthropic format
|
| 1079 |
+
if tool_choice == "auto" or tool_choice is None:
|
| 1080 |
+
kwargs["tool_choice"] = {"type": "auto"}
|
| 1081 |
+
elif tool_choice == "required":
|
| 1082 |
+
kwargs["tool_choice"] = {"type": "any"}
|
| 1083 |
+
elif tool_choice == "none":
|
| 1084 |
+
# Anthropic has no tool_choice "none" — omit tools entirely to prevent use
|
| 1085 |
+
kwargs.pop("tools", None)
|
| 1086 |
+
elif isinstance(tool_choice, str):
|
| 1087 |
+
# Specific tool name
|
| 1088 |
+
kwargs["tool_choice"] = {"type": "tool", "name": tool_choice}
|
| 1089 |
+
|
| 1090 |
+
# Map reasoning_config to Anthropic's thinking parameter.
|
| 1091 |
+
# Claude 4.6 models use adaptive thinking + output_config.effort.
|
| 1092 |
+
# Older models use manual thinking with budget_tokens.
|
| 1093 |
+
# Haiku models do NOT support extended thinking at all — skip entirely.
|
| 1094 |
+
if reasoning_config and isinstance(reasoning_config, dict):
|
| 1095 |
+
if reasoning_config.get("enabled") is not False and "haiku" not in model.lower():
|
| 1096 |
+
effort = str(reasoning_config.get("effort", "medium")).lower()
|
| 1097 |
+
budget = THINKING_BUDGET.get(effort, 8000)
|
| 1098 |
+
if _supports_adaptive_thinking(model):
|
| 1099 |
+
kwargs["thinking"] = {"type": "adaptive"}
|
| 1100 |
+
kwargs["output_config"] = {
|
| 1101 |
+
"effort": ADAPTIVE_EFFORT_MAP.get(effort, "medium")
|
| 1102 |
+
}
|
| 1103 |
+
else:
|
| 1104 |
+
kwargs["thinking"] = {"type": "enabled", "budget_tokens": budget}
|
| 1105 |
+
# Anthropic requires temperature=1 when thinking is enabled on older models
|
| 1106 |
+
kwargs["temperature"] = 1
|
| 1107 |
+
kwargs["max_tokens"] = max(effective_max_tokens, budget + 4096)
|
| 1108 |
+
|
| 1109 |
+
return kwargs
|
| 1110 |
+
|
| 1111 |
+
|
| 1112 |
+
def normalize_anthropic_response(
|
| 1113 |
+
response,
|
| 1114 |
+
strip_tool_prefix: bool = False,
|
| 1115 |
+
) -> Tuple[SimpleNamespace, str]:
|
| 1116 |
+
"""Normalize Anthropic response to match the shape expected by AIAgent.
|
| 1117 |
+
|
| 1118 |
+
Returns (assistant_message, finish_reason) where assistant_message has
|
| 1119 |
+
.content, .tool_calls, and .reasoning attributes.
|
| 1120 |
+
|
| 1121 |
+
When *strip_tool_prefix* is True, removes the ``mcp_`` prefix that was
|
| 1122 |
+
added to tool names for OAuth Claude Code compatibility.
|
| 1123 |
+
"""
|
| 1124 |
+
text_parts = []
|
| 1125 |
+
reasoning_parts = []
|
| 1126 |
+
tool_calls = []
|
| 1127 |
+
|
| 1128 |
+
for block in response.content:
|
| 1129 |
+
if block.type == "text":
|
| 1130 |
+
text_parts.append(block.text)
|
| 1131 |
+
elif block.type == "thinking":
|
| 1132 |
+
reasoning_parts.append(block.thinking)
|
| 1133 |
+
elif block.type == "tool_use":
|
| 1134 |
+
name = block.name
|
| 1135 |
+
if strip_tool_prefix and name.startswith(_MCP_TOOL_PREFIX):
|
| 1136 |
+
name = name[len(_MCP_TOOL_PREFIX):]
|
| 1137 |
+
tool_calls.append(
|
| 1138 |
+
SimpleNamespace(
|
| 1139 |
+
id=block.id,
|
| 1140 |
+
type="function",
|
| 1141 |
+
function=SimpleNamespace(
|
| 1142 |
+
name=name,
|
| 1143 |
+
arguments=json.dumps(block.input),
|
| 1144 |
+
),
|
| 1145 |
+
)
|
| 1146 |
+
)
|
| 1147 |
+
|
| 1148 |
+
# Map Anthropic stop_reason to OpenAI finish_reason
|
| 1149 |
+
stop_reason_map = {
|
| 1150 |
+
"end_turn": "stop",
|
| 1151 |
+
"tool_use": "tool_calls",
|
| 1152 |
+
"max_tokens": "length",
|
| 1153 |
+
"stop_sequence": "stop",
|
| 1154 |
+
}
|
| 1155 |
+
finish_reason = stop_reason_map.get(response.stop_reason, "stop")
|
| 1156 |
+
|
| 1157 |
+
return (
|
| 1158 |
+
SimpleNamespace(
|
| 1159 |
+
content="\n".join(text_parts) if text_parts else None,
|
| 1160 |
+
tool_calls=tool_calls or None,
|
| 1161 |
+
reasoning="\n\n".join(reasoning_parts) if reasoning_parts else None,
|
| 1162 |
+
reasoning_content=None,
|
| 1163 |
+
reasoning_details=None,
|
| 1164 |
+
),
|
| 1165 |
+
finish_reason,
|
| 1166 |
+
)
|
agent/auxiliary_client.py
ADDED
|
@@ -0,0 +1,1627 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Shared auxiliary client router for side tasks.
|
| 2 |
+
|
| 3 |
+
Provides a single resolution chain so every consumer (context compression,
|
| 4 |
+
session search, web extraction, vision analysis, browser vision) picks up
|
| 5 |
+
the best available backend without duplicating fallback logic.
|
| 6 |
+
|
| 7 |
+
Resolution order for text tasks (auto mode):
|
| 8 |
+
1. OpenRouter (OPENROUTER_API_KEY)
|
| 9 |
+
2. Nous Portal (~/.hermes/auth.json active provider)
|
| 10 |
+
3. Custom endpoint (OPENAI_BASE_URL + OPENAI_API_KEY)
|
| 11 |
+
4. Codex OAuth (Responses API via chatgpt.com with gpt-5.3-codex,
|
| 12 |
+
wrapped to look like a chat.completions client)
|
| 13 |
+
5. Native Anthropic
|
| 14 |
+
6. Direct API-key providers (z.ai/GLM, Kimi/Moonshot, MiniMax, MiniMax-CN)
|
| 15 |
+
7. None
|
| 16 |
+
|
| 17 |
+
Resolution order for vision/multimodal tasks (auto mode):
|
| 18 |
+
1. Selected main provider, if it is one of the supported vision backends below
|
| 19 |
+
2. OpenRouter
|
| 20 |
+
3. Nous Portal
|
| 21 |
+
4. Codex OAuth (gpt-5.3-codex supports vision via Responses API)
|
| 22 |
+
5. Native Anthropic
|
| 23 |
+
6. Custom endpoint (for local vision models: Qwen-VL, LLaVA, Pixtral, etc.)
|
| 24 |
+
7. None
|
| 25 |
+
|
| 26 |
+
Per-task provider overrides (e.g. AUXILIARY_VISION_PROVIDER,
|
| 27 |
+
CONTEXT_COMPRESSION_PROVIDER) can force a specific provider for each task.
|
| 28 |
+
Default "auto" follows the chains above.
|
| 29 |
+
|
| 30 |
+
Per-task model overrides (e.g. AUXILIARY_VISION_MODEL,
|
| 31 |
+
AUXILIARY_WEB_EXTRACT_MODEL) let callers use a different model slug
|
| 32 |
+
than the provider's default.
|
| 33 |
+
|
| 34 |
+
Per-task direct endpoint overrides (e.g. AUXILIARY_VISION_BASE_URL,
|
| 35 |
+
AUXILIARY_VISION_API_KEY) let callers route a specific auxiliary task to a
|
| 36 |
+
custom OpenAI-compatible endpoint without touching the main model settings.
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
import json
|
| 40 |
+
import logging
|
| 41 |
+
import os
|
| 42 |
+
import threading
|
| 43 |
+
import time
|
| 44 |
+
from pathlib import Path
|
| 45 |
+
from types import SimpleNamespace
|
| 46 |
+
from typing import Any, Dict, List, Optional, Tuple
|
| 47 |
+
|
| 48 |
+
from openai import OpenAI
|
| 49 |
+
|
| 50 |
+
from hermes_cli.config import get_hermes_home
|
| 51 |
+
from hermes_constants import OPENROUTER_BASE_URL
|
| 52 |
+
|
| 53 |
+
logger = logging.getLogger(__name__)
|
| 54 |
+
|
| 55 |
+
# Default auxiliary models for direct API-key providers (cheap/fast for side tasks)
|
| 56 |
+
_API_KEY_PROVIDER_AUX_MODELS: Dict[str, str] = {
|
| 57 |
+
"zai": "glm-4.5-flash",
|
| 58 |
+
"kimi-coding": "kimi-k2-turbo-preview",
|
| 59 |
+
"minimax": "MiniMax-M2.7-highspeed",
|
| 60 |
+
"minimax-cn": "MiniMax-M2.7-highspeed",
|
| 61 |
+
"anthropic": "claude-haiku-4-5-20251001",
|
| 62 |
+
"ai-gateway": "google/gemini-3-flash",
|
| 63 |
+
"opencode-zen": "gemini-3-flash",
|
| 64 |
+
"opencode-go": "glm-5",
|
| 65 |
+
"kilocode": "google/gemini-3-flash-preview",
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
# OpenRouter app attribution headers
|
| 69 |
+
_OR_HEADERS = {
|
| 70 |
+
"HTTP-Referer": "https://hermes-agent.nousresearch.com",
|
| 71 |
+
"X-OpenRouter-Title": "Hermes Agent",
|
| 72 |
+
"X-OpenRouter-Categories": "productivity,cli-agent",
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
# Nous Portal extra_body for product attribution.
|
| 76 |
+
# Callers should pass this as extra_body in chat.completions.create()
|
| 77 |
+
# when the auxiliary client is backed by Nous Portal.
|
| 78 |
+
NOUS_EXTRA_BODY = {"tags": ["product=hermes-agent"]}
|
| 79 |
+
|
| 80 |
+
# Set at resolve time — True if the auxiliary client points to Nous Portal
|
| 81 |
+
auxiliary_is_nous: bool = False
|
| 82 |
+
|
| 83 |
+
# Default auxiliary models per provider
|
| 84 |
+
_OPENROUTER_MODEL = "google/gemini-3-flash-preview"
|
| 85 |
+
_NOUS_MODEL = "gemini-3-flash"
|
| 86 |
+
_NOUS_DEFAULT_BASE_URL = "https://inference-api.nousresearch.com/v1"
|
| 87 |
+
_ANTHROPIC_DEFAULT_BASE_URL = "https://api.anthropic.com"
|
| 88 |
+
_AUTH_JSON_PATH = get_hermes_home() / "auth.json"
|
| 89 |
+
|
| 90 |
+
# Codex fallback: uses the Responses API (the only endpoint the Codex
|
| 91 |
+
# OAuth token can access) with a fast model for auxiliary tasks.
|
| 92 |
+
# ChatGPT-backed Codex accounts currently reject gpt-5.3-codex for these
|
| 93 |
+
# auxiliary flows, while gpt-5.2-codex remains broadly available and supports
|
| 94 |
+
# vision via Responses.
|
| 95 |
+
_CODEX_AUX_MODEL = "gpt-5.2-codex"
|
| 96 |
+
_CODEX_AUX_BASE_URL = "https://chatgpt.com/backend-api/codex"
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
# ── Codex Responses → chat.completions adapter ─────────────────────────────
|
| 100 |
+
# All auxiliary consumers call client.chat.completions.create(**kwargs) and
|
| 101 |
+
# read response.choices[0].message.content. This adapter translates those
|
| 102 |
+
# calls to the Codex Responses API so callers don't need any changes.
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def _convert_content_for_responses(content: Any) -> Any:
|
| 106 |
+
"""Convert chat.completions content to Responses API format.
|
| 107 |
+
|
| 108 |
+
chat.completions uses:
|
| 109 |
+
{"type": "text", "text": "..."}
|
| 110 |
+
{"type": "image_url", "image_url": {"url": "data:image/png;base64,..."}}
|
| 111 |
+
|
| 112 |
+
Responses API uses:
|
| 113 |
+
{"type": "input_text", "text": "..."}
|
| 114 |
+
{"type": "input_image", "image_url": "data:image/png;base64,..."}
|
| 115 |
+
|
| 116 |
+
If content is a plain string, it's returned as-is (the Responses API
|
| 117 |
+
accepts strings directly for text-only messages).
|
| 118 |
+
"""
|
| 119 |
+
if isinstance(content, str):
|
| 120 |
+
return content
|
| 121 |
+
if not isinstance(content, list):
|
| 122 |
+
return str(content) if content else ""
|
| 123 |
+
|
| 124 |
+
converted: List[Dict[str, Any]] = []
|
| 125 |
+
for part in content:
|
| 126 |
+
if not isinstance(part, dict):
|
| 127 |
+
continue
|
| 128 |
+
ptype = part.get("type", "")
|
| 129 |
+
if ptype == "text":
|
| 130 |
+
converted.append({"type": "input_text", "text": part.get("text", "")})
|
| 131 |
+
elif ptype == "image_url":
|
| 132 |
+
# chat.completions nests the URL: {"image_url": {"url": "..."}}
|
| 133 |
+
image_data = part.get("image_url", {})
|
| 134 |
+
url = image_data.get("url", "") if isinstance(image_data, dict) else str(image_data)
|
| 135 |
+
entry: Dict[str, Any] = {"type": "input_image", "image_url": url}
|
| 136 |
+
# Preserve detail if specified
|
| 137 |
+
detail = image_data.get("detail") if isinstance(image_data, dict) else None
|
| 138 |
+
if detail:
|
| 139 |
+
entry["detail"] = detail
|
| 140 |
+
converted.append(entry)
|
| 141 |
+
elif ptype in ("input_text", "input_image"):
|
| 142 |
+
# Already in Responses format — pass through
|
| 143 |
+
converted.append(part)
|
| 144 |
+
else:
|
| 145 |
+
# Unknown content type — try to preserve as text
|
| 146 |
+
text = part.get("text", "")
|
| 147 |
+
if text:
|
| 148 |
+
converted.append({"type": "input_text", "text": text})
|
| 149 |
+
|
| 150 |
+
return converted or ""
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
class _CodexCompletionsAdapter:
|
| 154 |
+
"""Drop-in shim that accepts chat.completions.create() kwargs and
|
| 155 |
+
routes them through the Codex Responses streaming API."""
|
| 156 |
+
|
| 157 |
+
def __init__(self, real_client: OpenAI, model: str):
|
| 158 |
+
self._client = real_client
|
| 159 |
+
self._model = model
|
| 160 |
+
|
| 161 |
+
def create(self, **kwargs) -> Any:
|
| 162 |
+
messages = kwargs.get("messages", [])
|
| 163 |
+
model = kwargs.get("model", self._model)
|
| 164 |
+
temperature = kwargs.get("temperature")
|
| 165 |
+
|
| 166 |
+
# Separate system/instructions from conversation messages.
|
| 167 |
+
# Convert chat.completions multimodal content blocks to Responses
|
| 168 |
+
# API format (input_text / input_image instead of text / image_url).
|
| 169 |
+
instructions = "You are a helpful assistant."
|
| 170 |
+
input_msgs: List[Dict[str, Any]] = []
|
| 171 |
+
for msg in messages:
|
| 172 |
+
role = msg.get("role", "user")
|
| 173 |
+
content = msg.get("content") or ""
|
| 174 |
+
if role == "system":
|
| 175 |
+
instructions = content if isinstance(content, str) else str(content)
|
| 176 |
+
else:
|
| 177 |
+
input_msgs.append({
|
| 178 |
+
"role": role,
|
| 179 |
+
"content": _convert_content_for_responses(content),
|
| 180 |
+
})
|
| 181 |
+
|
| 182 |
+
resp_kwargs: Dict[str, Any] = {
|
| 183 |
+
"model": model,
|
| 184 |
+
"instructions": instructions,
|
| 185 |
+
"input": input_msgs or [{"role": "user", "content": ""}],
|
| 186 |
+
"store": False,
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
# Note: the Codex endpoint (chatgpt.com/backend-api/codex) does NOT
|
| 190 |
+
# support max_output_tokens or temperature — omit to avoid 400 errors.
|
| 191 |
+
|
| 192 |
+
# Tools support for flush_memories and similar callers
|
| 193 |
+
tools = kwargs.get("tools")
|
| 194 |
+
if tools:
|
| 195 |
+
converted = []
|
| 196 |
+
for t in tools:
|
| 197 |
+
fn = t.get("function", {}) if isinstance(t, dict) else {}
|
| 198 |
+
name = fn.get("name")
|
| 199 |
+
if not name:
|
| 200 |
+
continue
|
| 201 |
+
converted.append({
|
| 202 |
+
"type": "function",
|
| 203 |
+
"name": name,
|
| 204 |
+
"description": fn.get("description", ""),
|
| 205 |
+
"parameters": fn.get("parameters", {}),
|
| 206 |
+
})
|
| 207 |
+
if converted:
|
| 208 |
+
resp_kwargs["tools"] = converted
|
| 209 |
+
|
| 210 |
+
# Stream and collect the response
|
| 211 |
+
text_parts: List[str] = []
|
| 212 |
+
tool_calls_raw: List[Any] = []
|
| 213 |
+
usage = None
|
| 214 |
+
|
| 215 |
+
try:
|
| 216 |
+
with self._client.responses.stream(**resp_kwargs) as stream:
|
| 217 |
+
for _event in stream:
|
| 218 |
+
pass
|
| 219 |
+
final = stream.get_final_response()
|
| 220 |
+
|
| 221 |
+
# Extract text and tool calls from the Responses output
|
| 222 |
+
for item in getattr(final, "output", []):
|
| 223 |
+
item_type = getattr(item, "type", None)
|
| 224 |
+
if item_type == "message":
|
| 225 |
+
for part in getattr(item, "content", []):
|
| 226 |
+
ptype = getattr(part, "type", None)
|
| 227 |
+
if ptype in ("output_text", "text"):
|
| 228 |
+
text_parts.append(getattr(part, "text", ""))
|
| 229 |
+
elif item_type == "function_call":
|
| 230 |
+
tool_calls_raw.append(SimpleNamespace(
|
| 231 |
+
id=getattr(item, "call_id", ""),
|
| 232 |
+
type="function",
|
| 233 |
+
function=SimpleNamespace(
|
| 234 |
+
name=getattr(item, "name", ""),
|
| 235 |
+
arguments=getattr(item, "arguments", "{}"),
|
| 236 |
+
),
|
| 237 |
+
))
|
| 238 |
+
|
| 239 |
+
resp_usage = getattr(final, "usage", None)
|
| 240 |
+
if resp_usage:
|
| 241 |
+
usage = SimpleNamespace(
|
| 242 |
+
prompt_tokens=getattr(resp_usage, "input_tokens", 0),
|
| 243 |
+
completion_tokens=getattr(resp_usage, "output_tokens", 0),
|
| 244 |
+
total_tokens=getattr(resp_usage, "total_tokens", 0),
|
| 245 |
+
)
|
| 246 |
+
except Exception as exc:
|
| 247 |
+
logger.debug("Codex auxiliary Responses API call failed: %s", exc)
|
| 248 |
+
raise
|
| 249 |
+
|
| 250 |
+
content = "".join(text_parts).strip() or None
|
| 251 |
+
|
| 252 |
+
# Build a response that looks like chat.completions
|
| 253 |
+
message = SimpleNamespace(
|
| 254 |
+
role="assistant",
|
| 255 |
+
content=content,
|
| 256 |
+
tool_calls=tool_calls_raw or None,
|
| 257 |
+
)
|
| 258 |
+
choice = SimpleNamespace(
|
| 259 |
+
index=0,
|
| 260 |
+
message=message,
|
| 261 |
+
finish_reason="stop" if not tool_calls_raw else "tool_calls",
|
| 262 |
+
)
|
| 263 |
+
return SimpleNamespace(
|
| 264 |
+
choices=[choice],
|
| 265 |
+
model=model,
|
| 266 |
+
usage=usage,
|
| 267 |
+
)
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
class _CodexChatShim:
|
| 271 |
+
"""Wraps the adapter to provide client.chat.completions.create()."""
|
| 272 |
+
|
| 273 |
+
def __init__(self, adapter: _CodexCompletionsAdapter):
|
| 274 |
+
self.completions = adapter
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
class CodexAuxiliaryClient:
|
| 278 |
+
"""OpenAI-client-compatible wrapper that routes through Codex Responses API.
|
| 279 |
+
|
| 280 |
+
Consumers can call client.chat.completions.create(**kwargs) as normal.
|
| 281 |
+
Also exposes .api_key and .base_url for introspection by async wrappers.
|
| 282 |
+
"""
|
| 283 |
+
|
| 284 |
+
def __init__(self, real_client: OpenAI, model: str):
|
| 285 |
+
self._real_client = real_client
|
| 286 |
+
adapter = _CodexCompletionsAdapter(real_client, model)
|
| 287 |
+
self.chat = _CodexChatShim(adapter)
|
| 288 |
+
self.api_key = real_client.api_key
|
| 289 |
+
self.base_url = real_client.base_url
|
| 290 |
+
|
| 291 |
+
def close(self):
|
| 292 |
+
self._real_client.close()
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
class _AsyncCodexCompletionsAdapter:
|
| 296 |
+
"""Async version of the Codex Responses adapter.
|
| 297 |
+
|
| 298 |
+
Wraps the sync adapter via asyncio.to_thread() so async consumers
|
| 299 |
+
(web_tools, session_search) can await it as normal.
|
| 300 |
+
"""
|
| 301 |
+
|
| 302 |
+
def __init__(self, sync_adapter: _CodexCompletionsAdapter):
|
| 303 |
+
self._sync = sync_adapter
|
| 304 |
+
|
| 305 |
+
async def create(self, **kwargs) -> Any:
|
| 306 |
+
import asyncio
|
| 307 |
+
return await asyncio.to_thread(self._sync.create, **kwargs)
|
| 308 |
+
|
| 309 |
+
|
| 310 |
+
class _AsyncCodexChatShim:
|
| 311 |
+
def __init__(self, adapter: _AsyncCodexCompletionsAdapter):
|
| 312 |
+
self.completions = adapter
|
| 313 |
+
|
| 314 |
+
|
| 315 |
+
class AsyncCodexAuxiliaryClient:
|
| 316 |
+
"""Async-compatible wrapper matching AsyncOpenAI.chat.completions.create()."""
|
| 317 |
+
|
| 318 |
+
def __init__(self, sync_wrapper: "CodexAuxiliaryClient"):
|
| 319 |
+
sync_adapter = sync_wrapper.chat.completions
|
| 320 |
+
async_adapter = _AsyncCodexCompletionsAdapter(sync_adapter)
|
| 321 |
+
self.chat = _AsyncCodexChatShim(async_adapter)
|
| 322 |
+
self.api_key = sync_wrapper.api_key
|
| 323 |
+
self.base_url = sync_wrapper.base_url
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
class _AnthropicCompletionsAdapter:
|
| 327 |
+
"""OpenAI-client-compatible adapter for Anthropic Messages API."""
|
| 328 |
+
|
| 329 |
+
def __init__(self, real_client: Any, model: str, is_oauth: bool = False):
|
| 330 |
+
self._client = real_client
|
| 331 |
+
self._model = model
|
| 332 |
+
self._is_oauth = is_oauth
|
| 333 |
+
|
| 334 |
+
def create(self, **kwargs) -> Any:
|
| 335 |
+
from agent.anthropic_adapter import build_anthropic_kwargs, normalize_anthropic_response
|
| 336 |
+
|
| 337 |
+
messages = kwargs.get("messages", [])
|
| 338 |
+
model = kwargs.get("model", self._model)
|
| 339 |
+
tools = kwargs.get("tools")
|
| 340 |
+
tool_choice = kwargs.get("tool_choice")
|
| 341 |
+
max_tokens = kwargs.get("max_tokens") or kwargs.get("max_completion_tokens") or 2000
|
| 342 |
+
temperature = kwargs.get("temperature")
|
| 343 |
+
|
| 344 |
+
normalized_tool_choice = None
|
| 345 |
+
if isinstance(tool_choice, str):
|
| 346 |
+
normalized_tool_choice = tool_choice
|
| 347 |
+
elif isinstance(tool_choice, dict):
|
| 348 |
+
choice_type = str(tool_choice.get("type", "")).lower()
|
| 349 |
+
if choice_type == "function":
|
| 350 |
+
normalized_tool_choice = tool_choice.get("function", {}).get("name")
|
| 351 |
+
elif choice_type in {"auto", "required", "none"}:
|
| 352 |
+
normalized_tool_choice = choice_type
|
| 353 |
+
|
| 354 |
+
anthropic_kwargs = build_anthropic_kwargs(
|
| 355 |
+
model=model,
|
| 356 |
+
messages=messages,
|
| 357 |
+
tools=tools,
|
| 358 |
+
max_tokens=max_tokens,
|
| 359 |
+
reasoning_config=None,
|
| 360 |
+
tool_choice=normalized_tool_choice,
|
| 361 |
+
is_oauth=self._is_oauth,
|
| 362 |
+
)
|
| 363 |
+
if temperature is not None:
|
| 364 |
+
anthropic_kwargs["temperature"] = temperature
|
| 365 |
+
|
| 366 |
+
response = self._client.messages.create(**anthropic_kwargs)
|
| 367 |
+
assistant_message, finish_reason = normalize_anthropic_response(response)
|
| 368 |
+
|
| 369 |
+
usage = None
|
| 370 |
+
if hasattr(response, "usage") and response.usage:
|
| 371 |
+
prompt_tokens = getattr(response.usage, "input_tokens", 0) or 0
|
| 372 |
+
completion_tokens = getattr(response.usage, "output_tokens", 0) or 0
|
| 373 |
+
total_tokens = getattr(response.usage, "total_tokens", 0) or (prompt_tokens + completion_tokens)
|
| 374 |
+
usage = SimpleNamespace(
|
| 375 |
+
prompt_tokens=prompt_tokens,
|
| 376 |
+
completion_tokens=completion_tokens,
|
| 377 |
+
total_tokens=total_tokens,
|
| 378 |
+
)
|
| 379 |
+
|
| 380 |
+
choice = SimpleNamespace(
|
| 381 |
+
index=0,
|
| 382 |
+
message=assistant_message,
|
| 383 |
+
finish_reason=finish_reason,
|
| 384 |
+
)
|
| 385 |
+
return SimpleNamespace(
|
| 386 |
+
choices=[choice],
|
| 387 |
+
model=model,
|
| 388 |
+
usage=usage,
|
| 389 |
+
)
|
| 390 |
+
|
| 391 |
+
|
| 392 |
+
class _AnthropicChatShim:
|
| 393 |
+
def __init__(self, adapter: _AnthropicCompletionsAdapter):
|
| 394 |
+
self.completions = adapter
|
| 395 |
+
|
| 396 |
+
|
| 397 |
+
class AnthropicAuxiliaryClient:
|
| 398 |
+
"""OpenAI-client-compatible wrapper over a native Anthropic client."""
|
| 399 |
+
|
| 400 |
+
def __init__(self, real_client: Any, model: str, api_key: str, base_url: str, is_oauth: bool = False):
|
| 401 |
+
self._real_client = real_client
|
| 402 |
+
adapter = _AnthropicCompletionsAdapter(real_client, model, is_oauth=is_oauth)
|
| 403 |
+
self.chat = _AnthropicChatShim(adapter)
|
| 404 |
+
self.api_key = api_key
|
| 405 |
+
self.base_url = base_url
|
| 406 |
+
|
| 407 |
+
def close(self):
|
| 408 |
+
close_fn = getattr(self._real_client, "close", None)
|
| 409 |
+
if callable(close_fn):
|
| 410 |
+
close_fn()
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
class _AsyncAnthropicCompletionsAdapter:
|
| 414 |
+
def __init__(self, sync_adapter: _AnthropicCompletionsAdapter):
|
| 415 |
+
self._sync = sync_adapter
|
| 416 |
+
|
| 417 |
+
async def create(self, **kwargs) -> Any:
|
| 418 |
+
import asyncio
|
| 419 |
+
return await asyncio.to_thread(self._sync.create, **kwargs)
|
| 420 |
+
|
| 421 |
+
|
| 422 |
+
class _AsyncAnthropicChatShim:
|
| 423 |
+
def __init__(self, adapter: _AsyncAnthropicCompletionsAdapter):
|
| 424 |
+
self.completions = adapter
|
| 425 |
+
|
| 426 |
+
|
| 427 |
+
class AsyncAnthropicAuxiliaryClient:
|
| 428 |
+
def __init__(self, sync_wrapper: "AnthropicAuxiliaryClient"):
|
| 429 |
+
sync_adapter = sync_wrapper.chat.completions
|
| 430 |
+
async_adapter = _AsyncAnthropicCompletionsAdapter(sync_adapter)
|
| 431 |
+
self.chat = _AsyncAnthropicChatShim(async_adapter)
|
| 432 |
+
self.api_key = sync_wrapper.api_key
|
| 433 |
+
self.base_url = sync_wrapper.base_url
|
| 434 |
+
|
| 435 |
+
|
| 436 |
+
def _read_nous_auth() -> Optional[dict]:
|
| 437 |
+
"""Read and validate ~/.hermes/auth.json for an active Nous provider.
|
| 438 |
+
|
| 439 |
+
Returns the provider state dict if Nous is active with tokens,
|
| 440 |
+
otherwise None.
|
| 441 |
+
"""
|
| 442 |
+
try:
|
| 443 |
+
if not _AUTH_JSON_PATH.is_file():
|
| 444 |
+
return None
|
| 445 |
+
data = json.loads(_AUTH_JSON_PATH.read_text())
|
| 446 |
+
if data.get("active_provider") != "nous":
|
| 447 |
+
return None
|
| 448 |
+
provider = data.get("providers", {}).get("nous", {})
|
| 449 |
+
# Must have at least an access_token or agent_key
|
| 450 |
+
if not provider.get("agent_key") and not provider.get("access_token"):
|
| 451 |
+
return None
|
| 452 |
+
return provider
|
| 453 |
+
except Exception as exc:
|
| 454 |
+
logger.debug("Could not read Nous auth: %s", exc)
|
| 455 |
+
return None
|
| 456 |
+
|
| 457 |
+
|
| 458 |
+
def _nous_api_key(provider: dict) -> str:
|
| 459 |
+
"""Extract the best API key from a Nous provider state dict."""
|
| 460 |
+
return provider.get("agent_key") or provider.get("access_token", "")
|
| 461 |
+
|
| 462 |
+
|
| 463 |
+
def _nous_base_url() -> str:
|
| 464 |
+
"""Resolve the Nous inference base URL from env or default."""
|
| 465 |
+
return os.getenv("NOUS_INFERENCE_BASE_URL", _NOUS_DEFAULT_BASE_URL)
|
| 466 |
+
|
| 467 |
+
|
| 468 |
+
def _read_codex_access_token() -> Optional[str]:
|
| 469 |
+
"""Read a valid, non-expired Codex OAuth access token from Hermes auth store."""
|
| 470 |
+
try:
|
| 471 |
+
from hermes_cli.auth import _read_codex_tokens
|
| 472 |
+
data = _read_codex_tokens()
|
| 473 |
+
tokens = data.get("tokens", {})
|
| 474 |
+
access_token = tokens.get("access_token")
|
| 475 |
+
if not isinstance(access_token, str) or not access_token.strip():
|
| 476 |
+
return None
|
| 477 |
+
|
| 478 |
+
# Check JWT expiry — expired tokens block the auto chain and
|
| 479 |
+
# prevent fallback to working providers (e.g. Anthropic).
|
| 480 |
+
try:
|
| 481 |
+
import base64
|
| 482 |
+
payload = access_token.split(".")[1]
|
| 483 |
+
payload += "=" * (-len(payload) % 4)
|
| 484 |
+
claims = json.loads(base64.urlsafe_b64decode(payload))
|
| 485 |
+
exp = claims.get("exp", 0)
|
| 486 |
+
if exp and time.time() > exp:
|
| 487 |
+
logger.debug("Codex access token expired (exp=%s), skipping", exp)
|
| 488 |
+
return None
|
| 489 |
+
except Exception:
|
| 490 |
+
pass # Non-JWT token or decode error — use as-is
|
| 491 |
+
|
| 492 |
+
return access_token.strip()
|
| 493 |
+
except Exception as exc:
|
| 494 |
+
logger.debug("Could not read Codex auth for auxiliary client: %s", exc)
|
| 495 |
+
return None
|
| 496 |
+
|
| 497 |
+
|
| 498 |
+
def _resolve_api_key_provider() -> Tuple[Optional[OpenAI], Optional[str]]:
|
| 499 |
+
"""Try each API-key provider in PROVIDER_REGISTRY order.
|
| 500 |
+
|
| 501 |
+
Returns (client, model) for the first provider with usable runtime
|
| 502 |
+
credentials, or (None, None) if none are configured.
|
| 503 |
+
"""
|
| 504 |
+
try:
|
| 505 |
+
from hermes_cli.auth import PROVIDER_REGISTRY, resolve_api_key_provider_credentials
|
| 506 |
+
except ImportError:
|
| 507 |
+
logger.debug("Could not import PROVIDER_REGISTRY for API-key fallback")
|
| 508 |
+
return None, None
|
| 509 |
+
|
| 510 |
+
for provider_id, pconfig in PROVIDER_REGISTRY.items():
|
| 511 |
+
if pconfig.auth_type != "api_key":
|
| 512 |
+
continue
|
| 513 |
+
if provider_id == "anthropic":
|
| 514 |
+
return _try_anthropic()
|
| 515 |
+
|
| 516 |
+
creds = resolve_api_key_provider_credentials(provider_id)
|
| 517 |
+
api_key = str(creds.get("api_key", "")).strip()
|
| 518 |
+
if not api_key:
|
| 519 |
+
continue
|
| 520 |
+
|
| 521 |
+
base_url = str(creds.get("base_url", "")).strip().rstrip("/") or pconfig.inference_base_url
|
| 522 |
+
model = _API_KEY_PROVIDER_AUX_MODELS.get(provider_id, "default")
|
| 523 |
+
logger.debug("Auxiliary text client: %s (%s)", pconfig.name, model)
|
| 524 |
+
extra = {}
|
| 525 |
+
if "api.kimi.com" in base_url.lower():
|
| 526 |
+
extra["default_headers"] = {"User-Agent": "KimiCLI/1.0"}
|
| 527 |
+
elif "api.githubcopilot.com" in base_url.lower():
|
| 528 |
+
from hermes_cli.models import copilot_default_headers
|
| 529 |
+
|
| 530 |
+
extra["default_headers"] = copilot_default_headers()
|
| 531 |
+
return OpenAI(api_key=api_key, base_url=base_url, **extra), model
|
| 532 |
+
|
| 533 |
+
return None, None
|
| 534 |
+
|
| 535 |
+
|
| 536 |
+
# ── Provider resolution helpers ─────────────────────────────────────────────
|
| 537 |
+
|
| 538 |
+
def _get_auxiliary_provider(task: str = "") -> str:
|
| 539 |
+
"""Read the provider override for a specific auxiliary task.
|
| 540 |
+
|
| 541 |
+
Checks AUXILIARY_{TASK}_PROVIDER first (e.g. AUXILIARY_VISION_PROVIDER),
|
| 542 |
+
then CONTEXT_{TASK}_PROVIDER (for the compression section's summary_provider),
|
| 543 |
+
then falls back to "auto". Returns one of: "auto", "openrouter", "nous", "main".
|
| 544 |
+
"""
|
| 545 |
+
if task:
|
| 546 |
+
for prefix in ("AUXILIARY_", "CONTEXT_"):
|
| 547 |
+
val = os.getenv(f"{prefix}{task.upper()}_PROVIDER", "").strip().lower()
|
| 548 |
+
if val and val != "auto":
|
| 549 |
+
return val
|
| 550 |
+
return "auto"
|
| 551 |
+
|
| 552 |
+
|
| 553 |
+
def _get_auxiliary_env_override(task: str, suffix: str) -> Optional[str]:
|
| 554 |
+
"""Read an auxiliary env override from AUXILIARY_* or CONTEXT_* prefixes."""
|
| 555 |
+
if not task:
|
| 556 |
+
return None
|
| 557 |
+
for prefix in ("AUXILIARY_", "CONTEXT_"):
|
| 558 |
+
val = os.getenv(f"{prefix}{task.upper()}_{suffix}", "").strip()
|
| 559 |
+
if val:
|
| 560 |
+
return val
|
| 561 |
+
return None
|
| 562 |
+
|
| 563 |
+
|
| 564 |
+
def _try_openrouter() -> Tuple[Optional[OpenAI], Optional[str]]:
|
| 565 |
+
or_key = os.getenv("OPENROUTER_API_KEY")
|
| 566 |
+
if not or_key:
|
| 567 |
+
return None, None
|
| 568 |
+
logger.debug("Auxiliary client: OpenRouter")
|
| 569 |
+
return OpenAI(api_key=or_key, base_url=OPENROUTER_BASE_URL,
|
| 570 |
+
default_headers=_OR_HEADERS), _OPENROUTER_MODEL
|
| 571 |
+
|
| 572 |
+
|
| 573 |
+
def _try_nous() -> Tuple[Optional[OpenAI], Optional[str]]:
|
| 574 |
+
nous = _read_nous_auth()
|
| 575 |
+
if not nous:
|
| 576 |
+
return None, None
|
| 577 |
+
global auxiliary_is_nous
|
| 578 |
+
auxiliary_is_nous = True
|
| 579 |
+
logger.debug("Auxiliary client: Nous Portal")
|
| 580 |
+
return (
|
| 581 |
+
OpenAI(api_key=_nous_api_key(nous), base_url=_nous_base_url()),
|
| 582 |
+
_NOUS_MODEL,
|
| 583 |
+
)
|
| 584 |
+
|
| 585 |
+
|
| 586 |
+
def _read_main_model() -> str:
|
| 587 |
+
"""Read the user's configured main model from config/env.
|
| 588 |
+
|
| 589 |
+
Falls back through HERMES_MODEL → LLM_MODEL → config.yaml model.default
|
| 590 |
+
so the auxiliary client can use the same model as the main agent when no
|
| 591 |
+
dedicated auxiliary model is available.
|
| 592 |
+
"""
|
| 593 |
+
from_env = os.getenv("OPENAI_MODEL") or os.getenv("HERMES_MODEL") or os.getenv("LLM_MODEL")
|
| 594 |
+
if from_env:
|
| 595 |
+
return from_env.strip()
|
| 596 |
+
try:
|
| 597 |
+
from hermes_cli.config import load_config
|
| 598 |
+
cfg = load_config()
|
| 599 |
+
model_cfg = cfg.get("model", {})
|
| 600 |
+
if isinstance(model_cfg, str) and model_cfg.strip():
|
| 601 |
+
return model_cfg.strip()
|
| 602 |
+
if isinstance(model_cfg, dict):
|
| 603 |
+
default = model_cfg.get("default", "")
|
| 604 |
+
if isinstance(default, str) and default.strip():
|
| 605 |
+
return default.strip()
|
| 606 |
+
except Exception:
|
| 607 |
+
pass
|
| 608 |
+
return ""
|
| 609 |
+
|
| 610 |
+
|
| 611 |
+
def _resolve_custom_runtime() -> Tuple[Optional[str], Optional[str]]:
|
| 612 |
+
"""Resolve the active custom/main endpoint the same way the main CLI does.
|
| 613 |
+
|
| 614 |
+
This covers both env-driven OPENAI_BASE_URL setups and config-saved custom
|
| 615 |
+
endpoints where the base URL lives in config.yaml instead of the live
|
| 616 |
+
environment.
|
| 617 |
+
"""
|
| 618 |
+
try:
|
| 619 |
+
from hermes_cli.runtime_provider import resolve_runtime_provider
|
| 620 |
+
|
| 621 |
+
runtime = resolve_runtime_provider(requested="custom")
|
| 622 |
+
except Exception as exc:
|
| 623 |
+
logger.debug("Auxiliary client: custom runtime resolution failed: %s", exc)
|
| 624 |
+
return None, None
|
| 625 |
+
|
| 626 |
+
custom_base = runtime.get("base_url")
|
| 627 |
+
custom_key = runtime.get("api_key")
|
| 628 |
+
if not isinstance(custom_base, str) or not custom_base.strip():
|
| 629 |
+
return None, None
|
| 630 |
+
if not isinstance(custom_key, str) or not custom_key.strip():
|
| 631 |
+
return None, None
|
| 632 |
+
|
| 633 |
+
custom_base = custom_base.strip().rstrip("/")
|
| 634 |
+
if "openrouter.ai" in custom_base.lower():
|
| 635 |
+
# requested='custom' falls back to OpenRouter when no custom endpoint is
|
| 636 |
+
# configured. Treat that as "no custom endpoint" for auxiliary routing.
|
| 637 |
+
return None, None
|
| 638 |
+
|
| 639 |
+
return custom_base, custom_key.strip()
|
| 640 |
+
|
| 641 |
+
|
| 642 |
+
def _current_custom_base_url() -> str:
|
| 643 |
+
custom_base, _ = _resolve_custom_runtime()
|
| 644 |
+
return custom_base or ""
|
| 645 |
+
|
| 646 |
+
|
| 647 |
+
def _try_custom_endpoint() -> Tuple[Optional[OpenAI], Optional[str]]:
|
| 648 |
+
custom_base, custom_key = _resolve_custom_runtime()
|
| 649 |
+
if not custom_base or not custom_key:
|
| 650 |
+
return None, None
|
| 651 |
+
model = _read_main_model() or "gpt-4o-mini"
|
| 652 |
+
logger.debug("Auxiliary client: custom endpoint (%s)", model)
|
| 653 |
+
return OpenAI(api_key=custom_key, base_url=custom_base), model
|
| 654 |
+
|
| 655 |
+
|
| 656 |
+
def _try_codex() -> Tuple[Optional[Any], Optional[str]]:
|
| 657 |
+
codex_token = _read_codex_access_token()
|
| 658 |
+
if not codex_token:
|
| 659 |
+
return None, None
|
| 660 |
+
logger.debug("Auxiliary client: Codex OAuth (%s via Responses API)", _CODEX_AUX_MODEL)
|
| 661 |
+
real_client = OpenAI(api_key=codex_token, base_url=_CODEX_AUX_BASE_URL)
|
| 662 |
+
return CodexAuxiliaryClient(real_client, _CODEX_AUX_MODEL), _CODEX_AUX_MODEL
|
| 663 |
+
|
| 664 |
+
|
| 665 |
+
def _try_anthropic() -> Tuple[Optional[Any], Optional[str]]:
|
| 666 |
+
try:
|
| 667 |
+
from agent.anthropic_adapter import build_anthropic_client, resolve_anthropic_token
|
| 668 |
+
except ImportError:
|
| 669 |
+
return None, None
|
| 670 |
+
|
| 671 |
+
token = resolve_anthropic_token()
|
| 672 |
+
if not token:
|
| 673 |
+
return None, None
|
| 674 |
+
|
| 675 |
+
# Allow base URL override from config.yaml model.base_url, but only
|
| 676 |
+
# when the configured provider is anthropic — otherwise a non-Anthropic
|
| 677 |
+
# base_url (e.g. Codex endpoint) would leak into Anthropic requests.
|
| 678 |
+
base_url = _ANTHROPIC_DEFAULT_BASE_URL
|
| 679 |
+
try:
|
| 680 |
+
from hermes_cli.config import load_config
|
| 681 |
+
cfg = load_config()
|
| 682 |
+
model_cfg = cfg.get("model")
|
| 683 |
+
if isinstance(model_cfg, dict):
|
| 684 |
+
cfg_provider = str(model_cfg.get("provider") or "").strip().lower()
|
| 685 |
+
if cfg_provider == "anthropic":
|
| 686 |
+
cfg_base_url = (model_cfg.get("base_url") or "").strip().rstrip("/")
|
| 687 |
+
if cfg_base_url:
|
| 688 |
+
base_url = cfg_base_url
|
| 689 |
+
except Exception:
|
| 690 |
+
pass
|
| 691 |
+
|
| 692 |
+
from agent.anthropic_adapter import _is_oauth_token
|
| 693 |
+
is_oauth = _is_oauth_token(token)
|
| 694 |
+
model = _API_KEY_PROVIDER_AUX_MODELS.get("anthropic", "claude-haiku-4-5-20251001")
|
| 695 |
+
logger.debug("Auxiliary client: Anthropic native (%s) at %s (oauth=%s)", model, base_url, is_oauth)
|
| 696 |
+
real_client = build_anthropic_client(token, base_url)
|
| 697 |
+
return AnthropicAuxiliaryClient(real_client, model, token, base_url, is_oauth=is_oauth), model
|
| 698 |
+
|
| 699 |
+
|
| 700 |
+
def _resolve_forced_provider(forced: str) -> Tuple[Optional[OpenAI], Optional[str]]:
|
| 701 |
+
"""Resolve a specific forced provider. Returns (None, None) if creds missing."""
|
| 702 |
+
if forced == "openrouter":
|
| 703 |
+
client, model = _try_openrouter()
|
| 704 |
+
if client is None:
|
| 705 |
+
logger.warning("auxiliary.provider=openrouter but OPENROUTER_API_KEY not set")
|
| 706 |
+
return client, model
|
| 707 |
+
|
| 708 |
+
if forced == "nous":
|
| 709 |
+
client, model = _try_nous()
|
| 710 |
+
if client is None:
|
| 711 |
+
logger.warning("auxiliary.provider=nous but Nous Portal not configured (run: hermes login)")
|
| 712 |
+
return client, model
|
| 713 |
+
|
| 714 |
+
if forced == "codex":
|
| 715 |
+
client, model = _try_codex()
|
| 716 |
+
if client is None:
|
| 717 |
+
logger.warning("auxiliary.provider=codex but no Codex OAuth token found (run: hermes model)")
|
| 718 |
+
return client, model
|
| 719 |
+
|
| 720 |
+
if forced == "main":
|
| 721 |
+
# "main" = skip OpenRouter/Nous, use the main chat model's credentials.
|
| 722 |
+
for try_fn in (_try_custom_endpoint, _try_codex, _resolve_api_key_provider):
|
| 723 |
+
client, model = try_fn()
|
| 724 |
+
if client is not None:
|
| 725 |
+
return client, model
|
| 726 |
+
logger.warning("auxiliary.provider=main but no main endpoint credentials found")
|
| 727 |
+
return None, None
|
| 728 |
+
|
| 729 |
+
# Unknown provider name — fall through to auto
|
| 730 |
+
logger.warning("Unknown auxiliary.provider=%r, falling back to auto", forced)
|
| 731 |
+
return None, None
|
| 732 |
+
|
| 733 |
+
|
| 734 |
+
def _resolve_auto() -> Tuple[Optional[OpenAI], Optional[str]]:
|
| 735 |
+
"""Full auto-detection chain: OpenRouter → Nous → custom → Codex → API-key → None."""
|
| 736 |
+
global auxiliary_is_nous
|
| 737 |
+
auxiliary_is_nous = False # Reset — _try_nous() will set True if it wins
|
| 738 |
+
for try_fn in (_try_openrouter, _try_nous, _try_custom_endpoint,
|
| 739 |
+
_try_codex, _resolve_api_key_provider):
|
| 740 |
+
client, model = try_fn()
|
| 741 |
+
if client is not None:
|
| 742 |
+
return client, model
|
| 743 |
+
logger.debug("Auxiliary client: none available")
|
| 744 |
+
return None, None
|
| 745 |
+
|
| 746 |
+
|
| 747 |
+
# ── Centralized Provider Router ─────────────────────────────────────────────
|
| 748 |
+
#
|
| 749 |
+
# resolve_provider_client() is the single entry point for creating a properly
|
| 750 |
+
# configured client given a (provider, model) pair. It handles auth lookup,
|
| 751 |
+
# base URL resolution, provider-specific headers, and API format differences
|
| 752 |
+
# (Chat Completions vs Responses API for Codex).
|
| 753 |
+
#
|
| 754 |
+
# All auxiliary consumer code should go through this or the public helpers
|
| 755 |
+
# below — never look up auth env vars ad-hoc.
|
| 756 |
+
|
| 757 |
+
|
| 758 |
+
def _to_async_client(sync_client, model: str):
|
| 759 |
+
"""Convert a sync client to its async counterpart, preserving Codex routing."""
|
| 760 |
+
from openai import AsyncOpenAI
|
| 761 |
+
|
| 762 |
+
if isinstance(sync_client, CodexAuxiliaryClient):
|
| 763 |
+
return AsyncCodexAuxiliaryClient(sync_client), model
|
| 764 |
+
if isinstance(sync_client, AnthropicAuxiliaryClient):
|
| 765 |
+
return AsyncAnthropicAuxiliaryClient(sync_client), model
|
| 766 |
+
|
| 767 |
+
async_kwargs = {
|
| 768 |
+
"api_key": sync_client.api_key,
|
| 769 |
+
"base_url": str(sync_client.base_url),
|
| 770 |
+
}
|
| 771 |
+
base_lower = str(sync_client.base_url).lower()
|
| 772 |
+
if "openrouter" in base_lower:
|
| 773 |
+
async_kwargs["default_headers"] = dict(_OR_HEADERS)
|
| 774 |
+
elif "api.githubcopilot.com" in base_lower:
|
| 775 |
+
from hermes_cli.models import copilot_default_headers
|
| 776 |
+
|
| 777 |
+
async_kwargs["default_headers"] = copilot_default_headers()
|
| 778 |
+
elif "api.kimi.com" in base_lower:
|
| 779 |
+
async_kwargs["default_headers"] = {"User-Agent": "KimiCLI/1.0"}
|
| 780 |
+
return AsyncOpenAI(**async_kwargs), model
|
| 781 |
+
|
| 782 |
+
|
| 783 |
+
def resolve_provider_client(
|
| 784 |
+
provider: str,
|
| 785 |
+
model: str = None,
|
| 786 |
+
async_mode: bool = False,
|
| 787 |
+
raw_codex: bool = False,
|
| 788 |
+
explicit_base_url: str = None,
|
| 789 |
+
explicit_api_key: str = None,
|
| 790 |
+
) -> Tuple[Optional[Any], Optional[str]]:
|
| 791 |
+
"""Central router: given a provider name and optional model, return a
|
| 792 |
+
configured client with the correct auth, base URL, and API format.
|
| 793 |
+
|
| 794 |
+
The returned client always exposes ``.chat.completions.create()`` — for
|
| 795 |
+
Codex/Responses API providers, an adapter handles the translation
|
| 796 |
+
transparently.
|
| 797 |
+
|
| 798 |
+
Args:
|
| 799 |
+
provider: Provider identifier. One of:
|
| 800 |
+
"openrouter", "nous", "openai-codex" (or "codex"),
|
| 801 |
+
"zai", "kimi-coding", "minimax", "minimax-cn",
|
| 802 |
+
"custom" (OPENAI_BASE_URL + OPENAI_API_KEY),
|
| 803 |
+
"auto" (full auto-detection chain).
|
| 804 |
+
model: Model slug override. If None, uses the provider's default
|
| 805 |
+
auxiliary model.
|
| 806 |
+
async_mode: If True, return an async-compatible client.
|
| 807 |
+
raw_codex: If True, return a raw OpenAI client for Codex providers
|
| 808 |
+
instead of wrapping in CodexAuxiliaryClient. Use this when
|
| 809 |
+
the caller needs direct access to responses.stream() (e.g.,
|
| 810 |
+
the main agent loop).
|
| 811 |
+
explicit_base_url: Optional direct OpenAI-compatible endpoint.
|
| 812 |
+
explicit_api_key: Optional API key paired with explicit_base_url.
|
| 813 |
+
|
| 814 |
+
Returns:
|
| 815 |
+
(client, resolved_model) or (None, None) if auth is unavailable.
|
| 816 |
+
"""
|
| 817 |
+
# Normalise aliases
|
| 818 |
+
provider = (provider or "auto").strip().lower()
|
| 819 |
+
if provider == "codex":
|
| 820 |
+
provider = "openai-codex"
|
| 821 |
+
if provider == "main":
|
| 822 |
+
provider = "custom"
|
| 823 |
+
|
| 824 |
+
# ── Auto: try all providers in priority order ────────────────────
|
| 825 |
+
if provider == "auto":
|
| 826 |
+
client, resolved = _resolve_auto()
|
| 827 |
+
if client is None:
|
| 828 |
+
return None, None
|
| 829 |
+
# When auto-detection lands on a non-OpenRouter provider (e.g. a
|
| 830 |
+
# local server), an OpenRouter-formatted model override like
|
| 831 |
+
# "google/gemini-3-flash-preview" won't work. Drop it and use
|
| 832 |
+
# the provider's own default model instead.
|
| 833 |
+
if model and "/" in model and resolved and "/" not in resolved:
|
| 834 |
+
logger.debug(
|
| 835 |
+
"Dropping OpenRouter-format model %r for non-OpenRouter "
|
| 836 |
+
"auxiliary provider (using %r instead)", model, resolved)
|
| 837 |
+
model = None
|
| 838 |
+
final_model = model or resolved
|
| 839 |
+
return (_to_async_client(client, final_model) if async_mode
|
| 840 |
+
else (client, final_model))
|
| 841 |
+
|
| 842 |
+
# ── OpenRouter ───────────────────────────────────────────────────
|
| 843 |
+
if provider == "openrouter":
|
| 844 |
+
client, default = _try_openrouter()
|
| 845 |
+
if client is None:
|
| 846 |
+
logger.warning("resolve_provider_client: openrouter requested "
|
| 847 |
+
"but OPENROUTER_API_KEY not set")
|
| 848 |
+
return None, None
|
| 849 |
+
final_model = model or default
|
| 850 |
+
return (_to_async_client(client, final_model) if async_mode
|
| 851 |
+
else (client, final_model))
|
| 852 |
+
|
| 853 |
+
# ── Nous Portal (OAuth) ──────────────────────────────────────────
|
| 854 |
+
if provider == "nous":
|
| 855 |
+
client, default = _try_nous()
|
| 856 |
+
if client is None:
|
| 857 |
+
logger.warning("resolve_provider_client: nous requested "
|
| 858 |
+
"but Nous Portal not configured (run: hermes login)")
|
| 859 |
+
return None, None
|
| 860 |
+
final_model = model or default
|
| 861 |
+
return (_to_async_client(client, final_model) if async_mode
|
| 862 |
+
else (client, final_model))
|
| 863 |
+
|
| 864 |
+
# ── OpenAI Codex (OAuth → Responses API) ─────────────────────────
|
| 865 |
+
if provider == "openai-codex":
|
| 866 |
+
if raw_codex:
|
| 867 |
+
# Return the raw OpenAI client for callers that need direct
|
| 868 |
+
# access to responses.stream() (e.g., the main agent loop).
|
| 869 |
+
codex_token = _read_codex_access_token()
|
| 870 |
+
if not codex_token:
|
| 871 |
+
logger.warning("resolve_provider_client: openai-codex requested "
|
| 872 |
+
"but no Codex OAuth token found (run: hermes model)")
|
| 873 |
+
return None, None
|
| 874 |
+
final_model = model or _CODEX_AUX_MODEL
|
| 875 |
+
raw_client = OpenAI(api_key=codex_token, base_url=_CODEX_AUX_BASE_URL)
|
| 876 |
+
return (raw_client, final_model)
|
| 877 |
+
# Standard path: wrap in CodexAuxiliaryClient adapter
|
| 878 |
+
client, default = _try_codex()
|
| 879 |
+
if client is None:
|
| 880 |
+
logger.warning("resolve_provider_client: openai-codex requested "
|
| 881 |
+
"but no Codex OAuth token found (run: hermes model)")
|
| 882 |
+
return None, None
|
| 883 |
+
final_model = model or default
|
| 884 |
+
return (_to_async_client(client, final_model) if async_mode
|
| 885 |
+
else (client, final_model))
|
| 886 |
+
|
| 887 |
+
# ── Custom endpoint (OPENAI_BASE_URL + OPENAI_API_KEY) ───────────
|
| 888 |
+
if provider == "custom":
|
| 889 |
+
if explicit_base_url:
|
| 890 |
+
custom_base = explicit_base_url.strip()
|
| 891 |
+
custom_key = (
|
| 892 |
+
(explicit_api_key or "").strip()
|
| 893 |
+
or os.getenv("OPENAI_API_KEY", "").strip()
|
| 894 |
+
)
|
| 895 |
+
if not custom_base or not custom_key:
|
| 896 |
+
logger.warning(
|
| 897 |
+
"resolve_provider_client: explicit custom endpoint requested "
|
| 898 |
+
"but no API key was found (set explicit_api_key or OPENAI_API_KEY)"
|
| 899 |
+
)
|
| 900 |
+
return None, None
|
| 901 |
+
final_model = model or _read_main_model() or "gpt-4o-mini"
|
| 902 |
+
client = OpenAI(api_key=custom_key, base_url=custom_base)
|
| 903 |
+
return (_to_async_client(client, final_model) if async_mode
|
| 904 |
+
else (client, final_model))
|
| 905 |
+
# Try custom first, then codex, then API-key providers
|
| 906 |
+
for try_fn in (_try_custom_endpoint, _try_codex,
|
| 907 |
+
_resolve_api_key_provider):
|
| 908 |
+
client, default = try_fn()
|
| 909 |
+
if client is not None:
|
| 910 |
+
final_model = model or default
|
| 911 |
+
return (_to_async_client(client, final_model) if async_mode
|
| 912 |
+
else (client, final_model))
|
| 913 |
+
logger.warning("resolve_provider_client: custom/main requested "
|
| 914 |
+
"but no endpoint credentials found")
|
| 915 |
+
return None, None
|
| 916 |
+
|
| 917 |
+
# ── API-key providers from PROVIDER_REGISTRY ─────────────────────
|
| 918 |
+
try:
|
| 919 |
+
from hermes_cli.auth import PROVIDER_REGISTRY, resolve_api_key_provider_credentials
|
| 920 |
+
except ImportError:
|
| 921 |
+
logger.debug("hermes_cli.auth not available for provider %s", provider)
|
| 922 |
+
return None, None
|
| 923 |
+
|
| 924 |
+
pconfig = PROVIDER_REGISTRY.get(provider)
|
| 925 |
+
if pconfig is None:
|
| 926 |
+
logger.warning("resolve_provider_client: unknown provider %r", provider)
|
| 927 |
+
return None, None
|
| 928 |
+
|
| 929 |
+
if pconfig.auth_type == "api_key":
|
| 930 |
+
if provider == "anthropic":
|
| 931 |
+
client, default_model = _try_anthropic()
|
| 932 |
+
if client is None:
|
| 933 |
+
logger.warning("resolve_provider_client: anthropic requested but no Anthropic credentials found")
|
| 934 |
+
return None, None
|
| 935 |
+
final_model = model or default_model
|
| 936 |
+
return (_to_async_client(client, final_model) if async_mode else (client, final_model))
|
| 937 |
+
|
| 938 |
+
creds = resolve_api_key_provider_credentials(provider)
|
| 939 |
+
api_key = str(creds.get("api_key", "")).strip()
|
| 940 |
+
if not api_key:
|
| 941 |
+
tried_sources = list(pconfig.api_key_env_vars)
|
| 942 |
+
if provider == "copilot":
|
| 943 |
+
tried_sources.append("gh auth token")
|
| 944 |
+
logger.warning("resolve_provider_client: provider %s has no API "
|
| 945 |
+
"key configured (tried: %s)",
|
| 946 |
+
provider, ", ".join(tried_sources))
|
| 947 |
+
return None, None
|
| 948 |
+
|
| 949 |
+
base_url = str(creds.get("base_url", "")).strip().rstrip("/") or pconfig.inference_base_url
|
| 950 |
+
|
| 951 |
+
default_model = _API_KEY_PROVIDER_AUX_MODELS.get(provider, "")
|
| 952 |
+
final_model = model or default_model
|
| 953 |
+
|
| 954 |
+
# Provider-specific headers
|
| 955 |
+
headers = {}
|
| 956 |
+
if "api.kimi.com" in base_url.lower():
|
| 957 |
+
headers["User-Agent"] = "KimiCLI/1.0"
|
| 958 |
+
elif "api.githubcopilot.com" in base_url.lower():
|
| 959 |
+
from hermes_cli.models import copilot_default_headers
|
| 960 |
+
|
| 961 |
+
headers.update(copilot_default_headers())
|
| 962 |
+
|
| 963 |
+
client = OpenAI(api_key=api_key, base_url=base_url,
|
| 964 |
+
**({"default_headers": headers} if headers else {}))
|
| 965 |
+
logger.debug("resolve_provider_client: %s (%s)", provider, final_model)
|
| 966 |
+
return (_to_async_client(client, final_model) if async_mode
|
| 967 |
+
else (client, final_model))
|
| 968 |
+
|
| 969 |
+
elif pconfig.auth_type in ("oauth_device_code", "oauth_external"):
|
| 970 |
+
# OAuth providers — route through their specific try functions
|
| 971 |
+
if provider == "nous":
|
| 972 |
+
return resolve_provider_client("nous", model, async_mode)
|
| 973 |
+
if provider == "openai-codex":
|
| 974 |
+
return resolve_provider_client("openai-codex", model, async_mode)
|
| 975 |
+
# Other OAuth providers not directly supported
|
| 976 |
+
logger.warning("resolve_provider_client: OAuth provider %s not "
|
| 977 |
+
"directly supported, try 'auto'", provider)
|
| 978 |
+
return None, None
|
| 979 |
+
|
| 980 |
+
logger.warning("resolve_provider_client: unhandled auth_type %s for %s",
|
| 981 |
+
pconfig.auth_type, provider)
|
| 982 |
+
return None, None
|
| 983 |
+
|
| 984 |
+
|
| 985 |
+
# ── Public API ──────────────────────────────────────────────────────────────
|
| 986 |
+
|
| 987 |
+
def get_text_auxiliary_client(task: str = "") -> Tuple[Optional[OpenAI], Optional[str]]:
|
| 988 |
+
"""Return (client, default_model_slug) for text-only auxiliary tasks.
|
| 989 |
+
|
| 990 |
+
Args:
|
| 991 |
+
task: Optional task name ("compression", "web_extract") to check
|
| 992 |
+
for a task-specific provider override.
|
| 993 |
+
|
| 994 |
+
Callers may override the returned model with a per-task env var
|
| 995 |
+
(e.g. CONTEXT_COMPRESSION_MODEL, AUXILIARY_WEB_EXTRACT_MODEL).
|
| 996 |
+
"""
|
| 997 |
+
provider, model, base_url, api_key = _resolve_task_provider_model(task or None)
|
| 998 |
+
return resolve_provider_client(
|
| 999 |
+
provider,
|
| 1000 |
+
model=model,
|
| 1001 |
+
explicit_base_url=base_url,
|
| 1002 |
+
explicit_api_key=api_key,
|
| 1003 |
+
)
|
| 1004 |
+
|
| 1005 |
+
|
| 1006 |
+
def get_async_text_auxiliary_client(task: str = ""):
|
| 1007 |
+
"""Return (async_client, model_slug) for async consumers.
|
| 1008 |
+
|
| 1009 |
+
For standard providers returns (AsyncOpenAI, model). For Codex returns
|
| 1010 |
+
(AsyncCodexAuxiliaryClient, model) which wraps the Responses API.
|
| 1011 |
+
Returns (None, None) when no provider is available.
|
| 1012 |
+
"""
|
| 1013 |
+
provider, model, base_url, api_key = _resolve_task_provider_model(task or None)
|
| 1014 |
+
return resolve_provider_client(
|
| 1015 |
+
provider,
|
| 1016 |
+
model=model,
|
| 1017 |
+
async_mode=True,
|
| 1018 |
+
explicit_base_url=base_url,
|
| 1019 |
+
explicit_api_key=api_key,
|
| 1020 |
+
)
|
| 1021 |
+
|
| 1022 |
+
|
| 1023 |
+
_VISION_AUTO_PROVIDER_ORDER = (
|
| 1024 |
+
"openrouter",
|
| 1025 |
+
"nous",
|
| 1026 |
+
"openai-codex",
|
| 1027 |
+
"anthropic",
|
| 1028 |
+
"custom",
|
| 1029 |
+
)
|
| 1030 |
+
|
| 1031 |
+
|
| 1032 |
+
def _normalize_vision_provider(provider: Optional[str]) -> str:
|
| 1033 |
+
provider = (provider or "auto").strip().lower()
|
| 1034 |
+
if provider == "codex":
|
| 1035 |
+
return "openai-codex"
|
| 1036 |
+
if provider == "main":
|
| 1037 |
+
return "custom"
|
| 1038 |
+
return provider
|
| 1039 |
+
|
| 1040 |
+
|
| 1041 |
+
def _resolve_strict_vision_backend(provider: str) -> Tuple[Optional[Any], Optional[str]]:
|
| 1042 |
+
provider = _normalize_vision_provider(provider)
|
| 1043 |
+
if provider == "openrouter":
|
| 1044 |
+
return _try_openrouter()
|
| 1045 |
+
if provider == "nous":
|
| 1046 |
+
return _try_nous()
|
| 1047 |
+
if provider == "openai-codex":
|
| 1048 |
+
return _try_codex()
|
| 1049 |
+
if provider == "anthropic":
|
| 1050 |
+
return _try_anthropic()
|
| 1051 |
+
if provider == "custom":
|
| 1052 |
+
return _try_custom_endpoint()
|
| 1053 |
+
return None, None
|
| 1054 |
+
|
| 1055 |
+
|
| 1056 |
+
def _strict_vision_backend_available(provider: str) -> bool:
|
| 1057 |
+
return _resolve_strict_vision_backend(provider)[0] is not None
|
| 1058 |
+
|
| 1059 |
+
|
| 1060 |
+
def _preferred_main_vision_provider() -> Optional[str]:
|
| 1061 |
+
"""Return the selected main provider when it is also a supported vision backend."""
|
| 1062 |
+
try:
|
| 1063 |
+
from hermes_cli.config import load_config
|
| 1064 |
+
|
| 1065 |
+
config = load_config()
|
| 1066 |
+
model_cfg = config.get("model", {})
|
| 1067 |
+
if isinstance(model_cfg, dict):
|
| 1068 |
+
provider = _normalize_vision_provider(model_cfg.get("provider", ""))
|
| 1069 |
+
if provider in _VISION_AUTO_PROVIDER_ORDER:
|
| 1070 |
+
return provider
|
| 1071 |
+
except Exception:
|
| 1072 |
+
pass
|
| 1073 |
+
return None
|
| 1074 |
+
|
| 1075 |
+
|
| 1076 |
+
def get_available_vision_backends() -> List[str]:
|
| 1077 |
+
"""Return the currently available vision backends in auto-selection order.
|
| 1078 |
+
|
| 1079 |
+
This is the single source of truth for setup, tool gating, and runtime
|
| 1080 |
+
auto-routing of vision tasks. The selected main provider is preferred when
|
| 1081 |
+
it is also a known-good vision backend; otherwise Hermes falls back through
|
| 1082 |
+
the standard conservative order.
|
| 1083 |
+
"""
|
| 1084 |
+
ordered = list(_VISION_AUTO_PROVIDER_ORDER)
|
| 1085 |
+
preferred = _preferred_main_vision_provider()
|
| 1086 |
+
if preferred in ordered:
|
| 1087 |
+
ordered.remove(preferred)
|
| 1088 |
+
ordered.insert(0, preferred)
|
| 1089 |
+
return [provider for provider in ordered if _strict_vision_backend_available(provider)]
|
| 1090 |
+
|
| 1091 |
+
|
| 1092 |
+
def resolve_vision_provider_client(
|
| 1093 |
+
provider: Optional[str] = None,
|
| 1094 |
+
model: Optional[str] = None,
|
| 1095 |
+
*,
|
| 1096 |
+
base_url: Optional[str] = None,
|
| 1097 |
+
api_key: Optional[str] = None,
|
| 1098 |
+
async_mode: bool = False,
|
| 1099 |
+
) -> Tuple[Optional[str], Optional[Any], Optional[str]]:
|
| 1100 |
+
"""Resolve the client actually used for vision tasks.
|
| 1101 |
+
|
| 1102 |
+
Direct endpoint overrides take precedence over provider selection. Explicit
|
| 1103 |
+
provider overrides still use the generic provider router for non-standard
|
| 1104 |
+
backends, so users can intentionally force experimental providers. Auto mode
|
| 1105 |
+
stays conservative and only tries vision backends known to work today.
|
| 1106 |
+
"""
|
| 1107 |
+
requested, resolved_model, resolved_base_url, resolved_api_key = _resolve_task_provider_model(
|
| 1108 |
+
"vision", provider, model, base_url, api_key
|
| 1109 |
+
)
|
| 1110 |
+
requested = _normalize_vision_provider(requested)
|
| 1111 |
+
|
| 1112 |
+
def _finalize(resolved_provider: str, sync_client: Any, default_model: Optional[str]):
|
| 1113 |
+
if sync_client is None:
|
| 1114 |
+
return resolved_provider, None, None
|
| 1115 |
+
final_model = resolved_model or default_model
|
| 1116 |
+
if async_mode:
|
| 1117 |
+
async_client, async_model = _to_async_client(sync_client, final_model)
|
| 1118 |
+
return resolved_provider, async_client, async_model
|
| 1119 |
+
return resolved_provider, sync_client, final_model
|
| 1120 |
+
|
| 1121 |
+
if resolved_base_url:
|
| 1122 |
+
client, final_model = resolve_provider_client(
|
| 1123 |
+
"custom",
|
| 1124 |
+
model=resolved_model,
|
| 1125 |
+
async_mode=async_mode,
|
| 1126 |
+
explicit_base_url=resolved_base_url,
|
| 1127 |
+
explicit_api_key=resolved_api_key,
|
| 1128 |
+
)
|
| 1129 |
+
if client is None:
|
| 1130 |
+
return "custom", None, None
|
| 1131 |
+
return "custom", client, final_model
|
| 1132 |
+
|
| 1133 |
+
if requested == "auto":
|
| 1134 |
+
for candidate in get_available_vision_backends():
|
| 1135 |
+
sync_client, default_model = _resolve_strict_vision_backend(candidate)
|
| 1136 |
+
if sync_client is not None:
|
| 1137 |
+
return _finalize(candidate, sync_client, default_model)
|
| 1138 |
+
logger.debug("Auxiliary vision client: none available")
|
| 1139 |
+
return None, None, None
|
| 1140 |
+
|
| 1141 |
+
if requested in _VISION_AUTO_PROVIDER_ORDER:
|
| 1142 |
+
sync_client, default_model = _resolve_strict_vision_backend(requested)
|
| 1143 |
+
return _finalize(requested, sync_client, default_model)
|
| 1144 |
+
|
| 1145 |
+
client, final_model = _get_cached_client(requested, resolved_model, async_mode)
|
| 1146 |
+
if client is None:
|
| 1147 |
+
return requested, None, None
|
| 1148 |
+
return requested, client, final_model
|
| 1149 |
+
|
| 1150 |
+
|
| 1151 |
+
def get_vision_auxiliary_client() -> Tuple[Optional[OpenAI], Optional[str]]:
|
| 1152 |
+
"""Return (client, default_model_slug) for vision/multimodal auxiliary tasks."""
|
| 1153 |
+
_, client, final_model = resolve_vision_provider_client(async_mode=False)
|
| 1154 |
+
return client, final_model
|
| 1155 |
+
|
| 1156 |
+
|
| 1157 |
+
def get_async_vision_auxiliary_client():
|
| 1158 |
+
"""Return (async_client, model_slug) for async vision consumers."""
|
| 1159 |
+
_, client, final_model = resolve_vision_provider_client(async_mode=True)
|
| 1160 |
+
return client, final_model
|
| 1161 |
+
|
| 1162 |
+
|
| 1163 |
+
def get_auxiliary_extra_body() -> dict:
|
| 1164 |
+
"""Return extra_body kwargs for auxiliary API calls.
|
| 1165 |
+
|
| 1166 |
+
Includes Nous Portal product tags when the auxiliary client is backed
|
| 1167 |
+
by Nous Portal. Returns empty dict otherwise.
|
| 1168 |
+
"""
|
| 1169 |
+
return dict(NOUS_EXTRA_BODY) if auxiliary_is_nous else {}
|
| 1170 |
+
|
| 1171 |
+
|
| 1172 |
+
def auxiliary_max_tokens_param(value: int) -> dict:
|
| 1173 |
+
"""Return the correct max tokens kwarg for the auxiliary client's provider.
|
| 1174 |
+
|
| 1175 |
+
OpenRouter and local models use 'max_tokens'. Direct OpenAI with newer
|
| 1176 |
+
models (gpt-4o, o-series, gpt-5+) requires 'max_completion_tokens'.
|
| 1177 |
+
The Codex adapter translates max_tokens internally, so we use max_tokens
|
| 1178 |
+
for it as well.
|
| 1179 |
+
"""
|
| 1180 |
+
custom_base = _current_custom_base_url()
|
| 1181 |
+
or_key = os.getenv("OPENROUTER_API_KEY")
|
| 1182 |
+
# Only use max_completion_tokens for direct OpenAI custom endpoints
|
| 1183 |
+
if (not or_key
|
| 1184 |
+
and _read_nous_auth() is None
|
| 1185 |
+
and "api.openai.com" in custom_base.lower()):
|
| 1186 |
+
return {"max_completion_tokens": value}
|
| 1187 |
+
return {"max_tokens": value}
|
| 1188 |
+
|
| 1189 |
+
|
| 1190 |
+
# ── Centralized LLM Call API ────────────────────────────────────────────────
|
| 1191 |
+
#
|
| 1192 |
+
# call_llm() and async_call_llm() own the full request lifecycle:
|
| 1193 |
+
# 1. Resolve provider + model from task config (or explicit args)
|
| 1194 |
+
# 2. Get or create a cached client for that provider
|
| 1195 |
+
# 3. Format request args for the provider + model (max_tokens handling, etc.)
|
| 1196 |
+
# 4. Make the API call
|
| 1197 |
+
# 5. Return the response
|
| 1198 |
+
#
|
| 1199 |
+
# Every auxiliary LLM consumer should use these instead of manually
|
| 1200 |
+
# constructing clients and calling .chat.completions.create().
|
| 1201 |
+
|
| 1202 |
+
# Client cache: (provider, async_mode, base_url, api_key) -> (client, default_model)
|
| 1203 |
+
_client_cache: Dict[tuple, tuple] = {}
|
| 1204 |
+
_client_cache_lock = threading.Lock()
|
| 1205 |
+
|
| 1206 |
+
|
| 1207 |
+
def _force_close_async_httpx(client: Any) -> None:
|
| 1208 |
+
"""Mark the httpx AsyncClient inside an AsyncOpenAI client as closed.
|
| 1209 |
+
|
| 1210 |
+
This prevents ``AsyncHttpxClientWrapper.__del__`` from scheduling
|
| 1211 |
+
``aclose()`` on a (potentially closed) event loop, which causes
|
| 1212 |
+
``RuntimeError: Event loop is closed`` → prompt_toolkit's
|
| 1213 |
+
"Press ENTER to continue..." handler.
|
| 1214 |
+
|
| 1215 |
+
We intentionally do NOT run the full async close path — the
|
| 1216 |
+
connections will be dropped by the OS when the process exits.
|
| 1217 |
+
"""
|
| 1218 |
+
try:
|
| 1219 |
+
from httpx._client import ClientState
|
| 1220 |
+
inner = getattr(client, "_client", None)
|
| 1221 |
+
if inner is not None and not getattr(inner, "is_closed", True):
|
| 1222 |
+
inner._state = ClientState.CLOSED
|
| 1223 |
+
except Exception:
|
| 1224 |
+
pass
|
| 1225 |
+
|
| 1226 |
+
|
| 1227 |
+
def shutdown_cached_clients() -> None:
|
| 1228 |
+
"""Close all cached clients (sync and async) to prevent event-loop errors.
|
| 1229 |
+
|
| 1230 |
+
Call this during CLI shutdown, *before* the event loop is closed, to
|
| 1231 |
+
avoid ``AsyncHttpxClientWrapper.__del__`` raising on a dead loop.
|
| 1232 |
+
"""
|
| 1233 |
+
import inspect
|
| 1234 |
+
|
| 1235 |
+
with _client_cache_lock:
|
| 1236 |
+
for key, entry in list(_client_cache.items()):
|
| 1237 |
+
client = entry[0]
|
| 1238 |
+
if client is None:
|
| 1239 |
+
continue
|
| 1240 |
+
# Mark any async httpx transport as closed first (prevents __del__
|
| 1241 |
+
# from scheduling aclose() on a dead event loop).
|
| 1242 |
+
_force_close_async_httpx(client)
|
| 1243 |
+
# Sync clients: close the httpx connection pool cleanly.
|
| 1244 |
+
# Async clients: skip — we already neutered __del__ above.
|
| 1245 |
+
try:
|
| 1246 |
+
close_fn = getattr(client, "close", None)
|
| 1247 |
+
if close_fn and not inspect.iscoroutinefunction(close_fn):
|
| 1248 |
+
close_fn()
|
| 1249 |
+
except Exception:
|
| 1250 |
+
pass
|
| 1251 |
+
_client_cache.clear()
|
| 1252 |
+
|
| 1253 |
+
|
| 1254 |
+
def _get_cached_client(
|
| 1255 |
+
provider: str,
|
| 1256 |
+
model: str = None,
|
| 1257 |
+
async_mode: bool = False,
|
| 1258 |
+
base_url: str = None,
|
| 1259 |
+
api_key: str = None,
|
| 1260 |
+
) -> Tuple[Optional[Any], Optional[str]]:
|
| 1261 |
+
"""Get or create a cached client for the given provider."""
|
| 1262 |
+
cache_key = (provider, async_mode, base_url or "", api_key or "")
|
| 1263 |
+
with _client_cache_lock:
|
| 1264 |
+
if cache_key in _client_cache:
|
| 1265 |
+
cached_client, cached_default, cached_loop = _client_cache[cache_key]
|
| 1266 |
+
if async_mode:
|
| 1267 |
+
# Async clients are bound to the event loop that created them.
|
| 1268 |
+
# A cached async client whose loop has been closed will raise
|
| 1269 |
+
# "Event loop is closed" when httpx tries to clean up its
|
| 1270 |
+
# transport. Discard the stale client and create a fresh one.
|
| 1271 |
+
if cached_loop is not None and cached_loop.is_closed():
|
| 1272 |
+
_force_close_async_httpx(cached_client)
|
| 1273 |
+
del _client_cache[cache_key]
|
| 1274 |
+
else:
|
| 1275 |
+
return cached_client, model or cached_default
|
| 1276 |
+
else:
|
| 1277 |
+
return cached_client, model or cached_default
|
| 1278 |
+
# Build outside the lock
|
| 1279 |
+
client, default_model = resolve_provider_client(
|
| 1280 |
+
provider,
|
| 1281 |
+
model,
|
| 1282 |
+
async_mode,
|
| 1283 |
+
explicit_base_url=base_url,
|
| 1284 |
+
explicit_api_key=api_key,
|
| 1285 |
+
)
|
| 1286 |
+
if client is not None:
|
| 1287 |
+
# For async clients, remember which loop they were created on so we
|
| 1288 |
+
# can detect stale entries later.
|
| 1289 |
+
bound_loop = None
|
| 1290 |
+
if async_mode:
|
| 1291 |
+
try:
|
| 1292 |
+
import asyncio as _aio
|
| 1293 |
+
bound_loop = _aio.get_event_loop()
|
| 1294 |
+
except RuntimeError:
|
| 1295 |
+
pass
|
| 1296 |
+
with _client_cache_lock:
|
| 1297 |
+
if cache_key not in _client_cache:
|
| 1298 |
+
_client_cache[cache_key] = (client, default_model, bound_loop)
|
| 1299 |
+
else:
|
| 1300 |
+
client, default_model, _ = _client_cache[cache_key]
|
| 1301 |
+
return client, model or default_model
|
| 1302 |
+
|
| 1303 |
+
|
| 1304 |
+
def _resolve_task_provider_model(
|
| 1305 |
+
task: str = None,
|
| 1306 |
+
provider: str = None,
|
| 1307 |
+
model: str = None,
|
| 1308 |
+
base_url: str = None,
|
| 1309 |
+
api_key: str = None,
|
| 1310 |
+
) -> Tuple[str, Optional[str], Optional[str], Optional[str]]:
|
| 1311 |
+
"""Determine provider + model for a call.
|
| 1312 |
+
|
| 1313 |
+
Priority:
|
| 1314 |
+
1. Explicit provider/model/base_url/api_key args (always win)
|
| 1315 |
+
2. Env var overrides (AUXILIARY_{TASK}_*, CONTEXT_{TASK}_*)
|
| 1316 |
+
3. Config file (auxiliary.{task}.* or compression.*)
|
| 1317 |
+
4. "auto" (full auto-detection chain)
|
| 1318 |
+
|
| 1319 |
+
Returns (provider, model, base_url, api_key) where model may be None
|
| 1320 |
+
(use provider default). When base_url is set, provider is forced to
|
| 1321 |
+
"custom" and the task uses that direct endpoint.
|
| 1322 |
+
"""
|
| 1323 |
+
config = {}
|
| 1324 |
+
cfg_provider = None
|
| 1325 |
+
cfg_model = None
|
| 1326 |
+
cfg_base_url = None
|
| 1327 |
+
cfg_api_key = None
|
| 1328 |
+
|
| 1329 |
+
if task:
|
| 1330 |
+
try:
|
| 1331 |
+
from hermes_cli.config import load_config
|
| 1332 |
+
config = load_config()
|
| 1333 |
+
except ImportError:
|
| 1334 |
+
config = {}
|
| 1335 |
+
|
| 1336 |
+
aux = config.get("auxiliary", {}) if isinstance(config, dict) else {}
|
| 1337 |
+
task_config = aux.get(task, {}) if isinstance(aux, dict) else {}
|
| 1338 |
+
if not isinstance(task_config, dict):
|
| 1339 |
+
task_config = {}
|
| 1340 |
+
cfg_provider = str(task_config.get("provider", "")).strip() or None
|
| 1341 |
+
cfg_model = str(task_config.get("model", "")).strip() or None
|
| 1342 |
+
cfg_base_url = str(task_config.get("base_url", "")).strip() or None
|
| 1343 |
+
cfg_api_key = str(task_config.get("api_key", "")).strip() or None
|
| 1344 |
+
|
| 1345 |
+
# Backwards compat: compression section has its own keys.
|
| 1346 |
+
# The auxiliary.compression defaults to provider="auto", so treat
|
| 1347 |
+
# both None and "auto" as "not explicitly configured".
|
| 1348 |
+
if task == "compression" and (not cfg_provider or cfg_provider == "auto"):
|
| 1349 |
+
comp = config.get("compression", {}) if isinstance(config, dict) else {}
|
| 1350 |
+
if isinstance(comp, dict):
|
| 1351 |
+
cfg_provider = comp.get("summary_provider", "").strip() or None
|
| 1352 |
+
cfg_model = cfg_model or comp.get("summary_model", "").strip() or None
|
| 1353 |
+
_sbu = comp.get("summary_base_url") or ""
|
| 1354 |
+
cfg_base_url = cfg_base_url or _sbu.strip() or None
|
| 1355 |
+
|
| 1356 |
+
env_model = _get_auxiliary_env_override(task, "MODEL") if task else None
|
| 1357 |
+
resolved_model = model or env_model or cfg_model
|
| 1358 |
+
|
| 1359 |
+
if base_url:
|
| 1360 |
+
return "custom", resolved_model, base_url, api_key
|
| 1361 |
+
if provider:
|
| 1362 |
+
return provider, resolved_model, base_url, api_key
|
| 1363 |
+
|
| 1364 |
+
if task:
|
| 1365 |
+
env_base_url = _get_auxiliary_env_override(task, "BASE_URL")
|
| 1366 |
+
env_api_key = _get_auxiliary_env_override(task, "API_KEY")
|
| 1367 |
+
if env_base_url:
|
| 1368 |
+
return "custom", resolved_model, env_base_url, env_api_key or cfg_api_key
|
| 1369 |
+
|
| 1370 |
+
env_provider = _get_auxiliary_provider(task)
|
| 1371 |
+
if env_provider != "auto":
|
| 1372 |
+
return env_provider, resolved_model, None, None
|
| 1373 |
+
|
| 1374 |
+
if cfg_base_url:
|
| 1375 |
+
return "custom", resolved_model, cfg_base_url, cfg_api_key
|
| 1376 |
+
if cfg_provider and cfg_provider != "auto":
|
| 1377 |
+
return cfg_provider, resolved_model, None, None
|
| 1378 |
+
return "auto", resolved_model, None, None
|
| 1379 |
+
|
| 1380 |
+
return "auto", resolved_model, None, None
|
| 1381 |
+
|
| 1382 |
+
|
| 1383 |
+
def _build_call_kwargs(
|
| 1384 |
+
provider: str,
|
| 1385 |
+
model: str,
|
| 1386 |
+
messages: list,
|
| 1387 |
+
temperature: Optional[float] = None,
|
| 1388 |
+
max_tokens: Optional[int] = None,
|
| 1389 |
+
tools: Optional[list] = None,
|
| 1390 |
+
timeout: float = 30.0,
|
| 1391 |
+
extra_body: Optional[dict] = None,
|
| 1392 |
+
base_url: Optional[str] = None,
|
| 1393 |
+
) -> dict:
|
| 1394 |
+
"""Build kwargs for .chat.completions.create() with model/provider adjustments."""
|
| 1395 |
+
kwargs: Dict[str, Any] = {
|
| 1396 |
+
"model": model,
|
| 1397 |
+
"messages": messages,
|
| 1398 |
+
"timeout": timeout,
|
| 1399 |
+
}
|
| 1400 |
+
|
| 1401 |
+
if temperature is not None:
|
| 1402 |
+
kwargs["temperature"] = temperature
|
| 1403 |
+
|
| 1404 |
+
if max_tokens is not None:
|
| 1405 |
+
# Codex adapter handles max_tokens internally; OpenRouter/Nous use max_tokens.
|
| 1406 |
+
# Direct OpenAI api.openai.com with newer models needs max_completion_tokens.
|
| 1407 |
+
if provider == "custom":
|
| 1408 |
+
custom_base = base_url or _current_custom_base_url()
|
| 1409 |
+
if "api.openai.com" in custom_base.lower():
|
| 1410 |
+
kwargs["max_completion_tokens"] = max_tokens
|
| 1411 |
+
else:
|
| 1412 |
+
kwargs["max_tokens"] = max_tokens
|
| 1413 |
+
else:
|
| 1414 |
+
kwargs["max_tokens"] = max_tokens
|
| 1415 |
+
|
| 1416 |
+
if tools:
|
| 1417 |
+
kwargs["tools"] = tools
|
| 1418 |
+
|
| 1419 |
+
# Provider-specific extra_body
|
| 1420 |
+
merged_extra = dict(extra_body or {})
|
| 1421 |
+
if provider == "nous" or auxiliary_is_nous:
|
| 1422 |
+
merged_extra.setdefault("tags", []).extend(["product=hermes-agent"])
|
| 1423 |
+
if merged_extra:
|
| 1424 |
+
kwargs["extra_body"] = merged_extra
|
| 1425 |
+
|
| 1426 |
+
return kwargs
|
| 1427 |
+
|
| 1428 |
+
|
| 1429 |
+
def call_llm(
|
| 1430 |
+
task: str = None,
|
| 1431 |
+
*,
|
| 1432 |
+
provider: str = None,
|
| 1433 |
+
model: str = None,
|
| 1434 |
+
base_url: str = None,
|
| 1435 |
+
api_key: str = None,
|
| 1436 |
+
messages: list,
|
| 1437 |
+
temperature: float = None,
|
| 1438 |
+
max_tokens: int = None,
|
| 1439 |
+
tools: list = None,
|
| 1440 |
+
timeout: float = 30.0,
|
| 1441 |
+
extra_body: dict = None,
|
| 1442 |
+
) -> Any:
|
| 1443 |
+
"""Centralized synchronous LLM call.
|
| 1444 |
+
|
| 1445 |
+
Resolves provider + model (from task config, explicit args, or auto-detect),
|
| 1446 |
+
handles auth, request formatting, and model-specific arg adjustments.
|
| 1447 |
+
|
| 1448 |
+
Args:
|
| 1449 |
+
task: Auxiliary task name ("compression", "vision", "web_extract",
|
| 1450 |
+
"session_search", "skills_hub", "mcp", "flush_memories").
|
| 1451 |
+
Reads provider:model from config/env. Ignored if provider is set.
|
| 1452 |
+
provider: Explicit provider override.
|
| 1453 |
+
model: Explicit model override.
|
| 1454 |
+
messages: Chat messages list.
|
| 1455 |
+
temperature: Sampling temperature (None = provider default).
|
| 1456 |
+
max_tokens: Max output tokens (handles max_tokens vs max_completion_tokens).
|
| 1457 |
+
tools: Tool definitions (for function calling).
|
| 1458 |
+
timeout: Request timeout in seconds.
|
| 1459 |
+
extra_body: Additional request body fields.
|
| 1460 |
+
|
| 1461 |
+
Returns:
|
| 1462 |
+
Response object with .choices[0].message.content
|
| 1463 |
+
|
| 1464 |
+
Raises:
|
| 1465 |
+
RuntimeError: If no provider is configured.
|
| 1466 |
+
"""
|
| 1467 |
+
resolved_provider, resolved_model, resolved_base_url, resolved_api_key = _resolve_task_provider_model(
|
| 1468 |
+
task, provider, model, base_url, api_key)
|
| 1469 |
+
|
| 1470 |
+
if task == "vision":
|
| 1471 |
+
effective_provider, client, final_model = resolve_vision_provider_client(
|
| 1472 |
+
provider=provider,
|
| 1473 |
+
model=model,
|
| 1474 |
+
base_url=base_url,
|
| 1475 |
+
api_key=api_key,
|
| 1476 |
+
async_mode=False,
|
| 1477 |
+
)
|
| 1478 |
+
if client is None and resolved_provider != "auto" and not resolved_base_url:
|
| 1479 |
+
logger.warning(
|
| 1480 |
+
"Vision provider %s unavailable, falling back to auto vision backends",
|
| 1481 |
+
resolved_provider,
|
| 1482 |
+
)
|
| 1483 |
+
effective_provider, client, final_model = resolve_vision_provider_client(
|
| 1484 |
+
provider="auto",
|
| 1485 |
+
model=resolved_model,
|
| 1486 |
+
async_mode=False,
|
| 1487 |
+
)
|
| 1488 |
+
if client is None:
|
| 1489 |
+
raise RuntimeError(
|
| 1490 |
+
f"No LLM provider configured for task={task} provider={resolved_provider}. "
|
| 1491 |
+
f"Run: hermes setup"
|
| 1492 |
+
)
|
| 1493 |
+
resolved_provider = effective_provider or resolved_provider
|
| 1494 |
+
else:
|
| 1495 |
+
client, final_model = _get_cached_client(
|
| 1496 |
+
resolved_provider,
|
| 1497 |
+
resolved_model,
|
| 1498 |
+
base_url=resolved_base_url,
|
| 1499 |
+
api_key=resolved_api_key,
|
| 1500 |
+
)
|
| 1501 |
+
if client is None:
|
| 1502 |
+
# When the user explicitly chose a non-OpenRouter provider but no
|
| 1503 |
+
# credentials were found, fail fast instead of silently routing
|
| 1504 |
+
# through OpenRouter (which causes confusing 404s).
|
| 1505 |
+
_explicit = (resolved_provider or "").strip().lower()
|
| 1506 |
+
if _explicit and _explicit not in ("auto", "openrouter", "custom"):
|
| 1507 |
+
raise RuntimeError(
|
| 1508 |
+
f"Provider '{_explicit}' is set in config.yaml but no API key "
|
| 1509 |
+
f"was found. Set the {_explicit.upper()}_API_KEY environment "
|
| 1510 |
+
f"variable, or switch to a different provider with `hermes model`."
|
| 1511 |
+
)
|
| 1512 |
+
# For auto/custom, fall back to OpenRouter
|
| 1513 |
+
if not resolved_base_url:
|
| 1514 |
+
logger.warning("Provider %s unavailable, falling back to openrouter",
|
| 1515 |
+
resolved_provider)
|
| 1516 |
+
client, final_model = _get_cached_client(
|
| 1517 |
+
"openrouter", resolved_model or _OPENROUTER_MODEL)
|
| 1518 |
+
if client is None:
|
| 1519 |
+
raise RuntimeError(
|
| 1520 |
+
f"No LLM provider configured for task={task} provider={resolved_provider}. "
|
| 1521 |
+
f"Run: hermes setup")
|
| 1522 |
+
|
| 1523 |
+
kwargs = _build_call_kwargs(
|
| 1524 |
+
resolved_provider, final_model, messages,
|
| 1525 |
+
temperature=temperature, max_tokens=max_tokens,
|
| 1526 |
+
tools=tools, timeout=timeout, extra_body=extra_body,
|
| 1527 |
+
base_url=resolved_base_url)
|
| 1528 |
+
|
| 1529 |
+
# Handle max_tokens vs max_completion_tokens retry
|
| 1530 |
+
try:
|
| 1531 |
+
return client.chat.completions.create(**kwargs)
|
| 1532 |
+
except Exception as first_err:
|
| 1533 |
+
err_str = str(first_err)
|
| 1534 |
+
if "max_tokens" in err_str or "unsupported_parameter" in err_str:
|
| 1535 |
+
kwargs.pop("max_tokens", None)
|
| 1536 |
+
kwargs["max_completion_tokens"] = max_tokens
|
| 1537 |
+
return client.chat.completions.create(**kwargs)
|
| 1538 |
+
raise
|
| 1539 |
+
|
| 1540 |
+
|
| 1541 |
+
async def async_call_llm(
|
| 1542 |
+
task: str = None,
|
| 1543 |
+
*,
|
| 1544 |
+
provider: str = None,
|
| 1545 |
+
model: str = None,
|
| 1546 |
+
base_url: str = None,
|
| 1547 |
+
api_key: str = None,
|
| 1548 |
+
messages: list,
|
| 1549 |
+
temperature: float = None,
|
| 1550 |
+
max_tokens: int = None,
|
| 1551 |
+
tools: list = None,
|
| 1552 |
+
timeout: float = 30.0,
|
| 1553 |
+
extra_body: dict = None,
|
| 1554 |
+
) -> Any:
|
| 1555 |
+
"""Centralized asynchronous LLM call.
|
| 1556 |
+
|
| 1557 |
+
Same as call_llm() but async. See call_llm() for full documentation.
|
| 1558 |
+
"""
|
| 1559 |
+
resolved_provider, resolved_model, resolved_base_url, resolved_api_key = _resolve_task_provider_model(
|
| 1560 |
+
task, provider, model, base_url, api_key)
|
| 1561 |
+
|
| 1562 |
+
if task == "vision":
|
| 1563 |
+
effective_provider, client, final_model = resolve_vision_provider_client(
|
| 1564 |
+
provider=provider,
|
| 1565 |
+
model=model,
|
| 1566 |
+
base_url=base_url,
|
| 1567 |
+
api_key=api_key,
|
| 1568 |
+
async_mode=True,
|
| 1569 |
+
)
|
| 1570 |
+
if client is None and resolved_provider != "auto" and not resolved_base_url:
|
| 1571 |
+
logger.warning(
|
| 1572 |
+
"Vision provider %s unavailable, falling back to auto vision backends",
|
| 1573 |
+
resolved_provider,
|
| 1574 |
+
)
|
| 1575 |
+
effective_provider, client, final_model = resolve_vision_provider_client(
|
| 1576 |
+
provider="auto",
|
| 1577 |
+
model=resolved_model,
|
| 1578 |
+
async_mode=True,
|
| 1579 |
+
)
|
| 1580 |
+
if client is None:
|
| 1581 |
+
raise RuntimeError(
|
| 1582 |
+
f"No LLM provider configured for task={task} provider={resolved_provider}. "
|
| 1583 |
+
f"Run: hermes setup"
|
| 1584 |
+
)
|
| 1585 |
+
resolved_provider = effective_provider or resolved_provider
|
| 1586 |
+
else:
|
| 1587 |
+
client, final_model = _get_cached_client(
|
| 1588 |
+
resolved_provider,
|
| 1589 |
+
resolved_model,
|
| 1590 |
+
async_mode=True,
|
| 1591 |
+
base_url=resolved_base_url,
|
| 1592 |
+
api_key=resolved_api_key,
|
| 1593 |
+
)
|
| 1594 |
+
if client is None:
|
| 1595 |
+
_explicit = (resolved_provider or "").strip().lower()
|
| 1596 |
+
if _explicit and _explicit not in ("auto", "openrouter", "custom"):
|
| 1597 |
+
raise RuntimeError(
|
| 1598 |
+
f"Provider '{_explicit}' is set in config.yaml but no API key "
|
| 1599 |
+
f"was found. Set the {_explicit.upper()}_API_KEY environment "
|
| 1600 |
+
f"variable, or switch to a different provider with `hermes model`."
|
| 1601 |
+
)
|
| 1602 |
+
if not resolved_base_url:
|
| 1603 |
+
logger.warning("Provider %s unavailable, falling back to openrouter",
|
| 1604 |
+
resolved_provider)
|
| 1605 |
+
client, final_model = _get_cached_client(
|
| 1606 |
+
"openrouter", resolved_model or _OPENROUTER_MODEL,
|
| 1607 |
+
async_mode=True)
|
| 1608 |
+
if client is None:
|
| 1609 |
+
raise RuntimeError(
|
| 1610 |
+
f"No LLM provider configured for task={task} provider={resolved_provider}. "
|
| 1611 |
+
f"Run: hermes setup")
|
| 1612 |
+
|
| 1613 |
+
kwargs = _build_call_kwargs(
|
| 1614 |
+
resolved_provider, final_model, messages,
|
| 1615 |
+
temperature=temperature, max_tokens=max_tokens,
|
| 1616 |
+
tools=tools, timeout=timeout, extra_body=extra_body,
|
| 1617 |
+
base_url=resolved_base_url)
|
| 1618 |
+
|
| 1619 |
+
try:
|
| 1620 |
+
return await client.chat.completions.create(**kwargs)
|
| 1621 |
+
except Exception as first_err:
|
| 1622 |
+
err_str = str(first_err)
|
| 1623 |
+
if "max_tokens" in err_str or "unsupported_parameter" in err_str:
|
| 1624 |
+
kwargs.pop("max_tokens", None)
|
| 1625 |
+
kwargs["max_completion_tokens"] = max_tokens
|
| 1626 |
+
return await client.chat.completions.create(**kwargs)
|
| 1627 |
+
raise
|
agent/context_compressor.py
ADDED
|
@@ -0,0 +1,677 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Automatic context window compression for long conversations.
|
| 2 |
+
|
| 3 |
+
Self-contained class with its own OpenAI client for summarization.
|
| 4 |
+
Uses auxiliary model (cheap/fast) to summarize middle turns while
|
| 5 |
+
protecting head and tail context.
|
| 6 |
+
|
| 7 |
+
Improvements over v1:
|
| 8 |
+
- Structured summary template (Goal, Progress, Decisions, Files, Next Steps)
|
| 9 |
+
- Iterative summary updates (preserves info across multiple compactions)
|
| 10 |
+
- Token-budget tail protection instead of fixed message count
|
| 11 |
+
- Tool output pruning before LLM summarization (cheap pre-pass)
|
| 12 |
+
- Scaled summary budget (proportional to compressed content)
|
| 13 |
+
- Richer tool call/result detail in summarizer input
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
import logging
|
| 17 |
+
import os
|
| 18 |
+
from typing import Any, Dict, List, Optional
|
| 19 |
+
|
| 20 |
+
from agent.auxiliary_client import call_llm
|
| 21 |
+
from agent.model_metadata import (
|
| 22 |
+
get_model_context_length,
|
| 23 |
+
estimate_messages_tokens_rough,
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
logger = logging.getLogger(__name__)
|
| 27 |
+
|
| 28 |
+
SUMMARY_PREFIX = (
|
| 29 |
+
"[CONTEXT COMPACTION] Earlier turns in this conversation were compacted "
|
| 30 |
+
"to save context space. The summary below describes work that was "
|
| 31 |
+
"already completed, and the current session state may still reflect "
|
| 32 |
+
"that work (for example, files may already be changed). Use the summary "
|
| 33 |
+
"and the current state to continue from where things left off, and "
|
| 34 |
+
"avoid repeating work:"
|
| 35 |
+
)
|
| 36 |
+
LEGACY_SUMMARY_PREFIX = "[CONTEXT SUMMARY]:"
|
| 37 |
+
|
| 38 |
+
# Minimum tokens for the summary output
|
| 39 |
+
_MIN_SUMMARY_TOKENS = 2000
|
| 40 |
+
# Proportion of compressed content to allocate for summary
|
| 41 |
+
_SUMMARY_RATIO = 0.20
|
| 42 |
+
# Absolute ceiling for summary tokens (even on very large context windows)
|
| 43 |
+
_SUMMARY_TOKENS_CEILING = 12_000
|
| 44 |
+
|
| 45 |
+
# Placeholder used when pruning old tool results
|
| 46 |
+
_PRUNED_TOOL_PLACEHOLDER = "[Old tool output cleared to save context space]"
|
| 47 |
+
|
| 48 |
+
# Chars per token rough estimate
|
| 49 |
+
_CHARS_PER_TOKEN = 4
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
class ContextCompressor:
|
| 53 |
+
"""Compresses conversation context when approaching the model's context limit.
|
| 54 |
+
|
| 55 |
+
Algorithm:
|
| 56 |
+
1. Prune old tool results (cheap, no LLM call)
|
| 57 |
+
2. Protect head messages (system prompt + first exchange)
|
| 58 |
+
3. Protect tail messages by token budget (most recent ~20K tokens)
|
| 59 |
+
4. Summarize middle turns with structured LLM prompt
|
| 60 |
+
5. On subsequent compactions, iteratively update the previous summary
|
| 61 |
+
"""
|
| 62 |
+
|
| 63 |
+
def __init__(
|
| 64 |
+
self,
|
| 65 |
+
model: str,
|
| 66 |
+
threshold_percent: float = 0.50,
|
| 67 |
+
protect_first_n: int = 3,
|
| 68 |
+
protect_last_n: int = 20,
|
| 69 |
+
summary_target_ratio: float = 0.20,
|
| 70 |
+
quiet_mode: bool = False,
|
| 71 |
+
summary_model_override: str = None,
|
| 72 |
+
base_url: str = "",
|
| 73 |
+
api_key: str = "",
|
| 74 |
+
config_context_length: int | None = None,
|
| 75 |
+
provider: str = "",
|
| 76 |
+
):
|
| 77 |
+
self.model = model
|
| 78 |
+
self.base_url = base_url
|
| 79 |
+
self.api_key = api_key
|
| 80 |
+
self.provider = provider
|
| 81 |
+
self.threshold_percent = threshold_percent
|
| 82 |
+
self.protect_first_n = protect_first_n
|
| 83 |
+
self.protect_last_n = protect_last_n
|
| 84 |
+
self.summary_target_ratio = max(0.10, min(summary_target_ratio, 0.80))
|
| 85 |
+
self.quiet_mode = quiet_mode
|
| 86 |
+
|
| 87 |
+
self.context_length = get_model_context_length(
|
| 88 |
+
model, base_url=base_url, api_key=api_key,
|
| 89 |
+
config_context_length=config_context_length,
|
| 90 |
+
provider=provider,
|
| 91 |
+
)
|
| 92 |
+
self.threshold_tokens = int(self.context_length * threshold_percent)
|
| 93 |
+
self.compression_count = 0
|
| 94 |
+
|
| 95 |
+
# Derive token budgets: ratio is relative to the threshold, not total context
|
| 96 |
+
target_tokens = int(self.threshold_tokens * self.summary_target_ratio)
|
| 97 |
+
self.tail_token_budget = target_tokens
|
| 98 |
+
self.max_summary_tokens = min(
|
| 99 |
+
int(self.context_length * 0.05), _SUMMARY_TOKENS_CEILING,
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
if not quiet_mode:
|
| 103 |
+
logger.info(
|
| 104 |
+
"Context compressor initialized: model=%s context_length=%d "
|
| 105 |
+
"threshold=%d (%.0f%%) target_ratio=%.0f%% tail_budget=%d "
|
| 106 |
+
"provider=%s base_url=%s",
|
| 107 |
+
model, self.context_length, self.threshold_tokens,
|
| 108 |
+
threshold_percent * 100, self.summary_target_ratio * 100,
|
| 109 |
+
self.tail_token_budget,
|
| 110 |
+
provider or "none", base_url or "none",
|
| 111 |
+
)
|
| 112 |
+
self._context_probed = False # True after a step-down from context error
|
| 113 |
+
|
| 114 |
+
self.last_prompt_tokens = 0
|
| 115 |
+
self.last_completion_tokens = 0
|
| 116 |
+
self.last_total_tokens = 0
|
| 117 |
+
|
| 118 |
+
self.summary_model = summary_model_override or ""
|
| 119 |
+
|
| 120 |
+
# Stores the previous compaction summary for iterative updates
|
| 121 |
+
self._previous_summary: Optional[str] = None
|
| 122 |
+
|
| 123 |
+
def update_from_response(self, usage: Dict[str, Any]):
|
| 124 |
+
"""Update tracked token usage from API response."""
|
| 125 |
+
self.last_prompt_tokens = usage.get("prompt_tokens", 0)
|
| 126 |
+
self.last_completion_tokens = usage.get("completion_tokens", 0)
|
| 127 |
+
self.last_total_tokens = usage.get("total_tokens", 0)
|
| 128 |
+
|
| 129 |
+
def should_compress(self, prompt_tokens: int = None) -> bool:
|
| 130 |
+
"""Check if context exceeds the compression threshold."""
|
| 131 |
+
tokens = prompt_tokens if prompt_tokens is not None else self.last_prompt_tokens
|
| 132 |
+
return tokens >= self.threshold_tokens
|
| 133 |
+
|
| 134 |
+
def should_compress_preflight(self, messages: List[Dict[str, Any]]) -> bool:
|
| 135 |
+
"""Quick pre-flight check using rough estimate (before API call)."""
|
| 136 |
+
rough_estimate = estimate_messages_tokens_rough(messages)
|
| 137 |
+
return rough_estimate >= self.threshold_tokens
|
| 138 |
+
|
| 139 |
+
def get_status(self) -> Dict[str, Any]:
|
| 140 |
+
"""Get current compression status for display/logging."""
|
| 141 |
+
return {
|
| 142 |
+
"last_prompt_tokens": self.last_prompt_tokens,
|
| 143 |
+
"threshold_tokens": self.threshold_tokens,
|
| 144 |
+
"context_length": self.context_length,
|
| 145 |
+
"usage_percent": (self.last_prompt_tokens / self.context_length * 100) if self.context_length else 0,
|
| 146 |
+
"compression_count": self.compression_count,
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
# ------------------------------------------------------------------
|
| 150 |
+
# Tool output pruning (cheap pre-pass, no LLM call)
|
| 151 |
+
# ------------------------------------------------------------------
|
| 152 |
+
|
| 153 |
+
def _prune_old_tool_results(
|
| 154 |
+
self, messages: List[Dict[str, Any]], protect_tail_count: int,
|
| 155 |
+
) -> tuple[List[Dict[str, Any]], int]:
|
| 156 |
+
"""Replace old tool result contents with a short placeholder.
|
| 157 |
+
|
| 158 |
+
Walks backward from the end, protecting the most recent
|
| 159 |
+
``protect_tail_count`` messages. Older tool results get their
|
| 160 |
+
content replaced with a placeholder string.
|
| 161 |
+
|
| 162 |
+
Returns (pruned_messages, pruned_count).
|
| 163 |
+
"""
|
| 164 |
+
if not messages:
|
| 165 |
+
return messages, 0
|
| 166 |
+
|
| 167 |
+
result = [m.copy() for m in messages]
|
| 168 |
+
pruned = 0
|
| 169 |
+
prune_boundary = len(result) - protect_tail_count
|
| 170 |
+
|
| 171 |
+
for i in range(prune_boundary):
|
| 172 |
+
msg = result[i]
|
| 173 |
+
if msg.get("role") != "tool":
|
| 174 |
+
continue
|
| 175 |
+
content = msg.get("content", "")
|
| 176 |
+
if not content or content == _PRUNED_TOOL_PLACEHOLDER:
|
| 177 |
+
continue
|
| 178 |
+
# Only prune if the content is substantial (>200 chars)
|
| 179 |
+
if len(content) > 200:
|
| 180 |
+
result[i] = {**msg, "content": _PRUNED_TOOL_PLACEHOLDER}
|
| 181 |
+
pruned += 1
|
| 182 |
+
|
| 183 |
+
return result, pruned
|
| 184 |
+
|
| 185 |
+
# ------------------------------------------------------------------
|
| 186 |
+
# Summarization
|
| 187 |
+
# ------------------------------------------------------------------
|
| 188 |
+
|
| 189 |
+
def _compute_summary_budget(self, turns_to_summarize: List[Dict[str, Any]]) -> int:
|
| 190 |
+
"""Scale summary token budget with the amount of content being compressed.
|
| 191 |
+
|
| 192 |
+
The maximum scales with the model's context window (5% of context,
|
| 193 |
+
capped at ``_SUMMARY_TOKENS_CEILING``) so large-context models get
|
| 194 |
+
richer summaries instead of being hard-capped at 8K tokens.
|
| 195 |
+
"""
|
| 196 |
+
content_tokens = estimate_messages_tokens_rough(turns_to_summarize)
|
| 197 |
+
budget = int(content_tokens * _SUMMARY_RATIO)
|
| 198 |
+
return max(_MIN_SUMMARY_TOKENS, min(budget, self.max_summary_tokens))
|
| 199 |
+
|
| 200 |
+
def _serialize_for_summary(self, turns: List[Dict[str, Any]]) -> str:
|
| 201 |
+
"""Serialize conversation turns into labeled text for the summarizer.
|
| 202 |
+
|
| 203 |
+
Includes tool call arguments and result content (up to 3000 chars
|
| 204 |
+
per message) so the summarizer can preserve specific details like
|
| 205 |
+
file paths, commands, and outputs.
|
| 206 |
+
"""
|
| 207 |
+
parts = []
|
| 208 |
+
for msg in turns:
|
| 209 |
+
role = msg.get("role", "unknown")
|
| 210 |
+
content = msg.get("content") or ""
|
| 211 |
+
|
| 212 |
+
# Tool results: keep more content than before (3000 chars)
|
| 213 |
+
if role == "tool":
|
| 214 |
+
tool_id = msg.get("tool_call_id", "")
|
| 215 |
+
if len(content) > 3000:
|
| 216 |
+
content = content[:2000] + "\n...[truncated]...\n" + content[-800:]
|
| 217 |
+
parts.append(f"[TOOL RESULT {tool_id}]: {content}")
|
| 218 |
+
continue
|
| 219 |
+
|
| 220 |
+
# Assistant messages: include tool call names AND arguments
|
| 221 |
+
if role == "assistant":
|
| 222 |
+
if len(content) > 3000:
|
| 223 |
+
content = content[:2000] + "\n...[truncated]...\n" + content[-800:]
|
| 224 |
+
tool_calls = msg.get("tool_calls", [])
|
| 225 |
+
if tool_calls:
|
| 226 |
+
tc_parts = []
|
| 227 |
+
for tc in tool_calls:
|
| 228 |
+
if isinstance(tc, dict):
|
| 229 |
+
fn = tc.get("function", {})
|
| 230 |
+
name = fn.get("name", "?")
|
| 231 |
+
args = fn.get("arguments", "")
|
| 232 |
+
# Truncate long arguments but keep enough for context
|
| 233 |
+
if len(args) > 500:
|
| 234 |
+
args = args[:400] + "..."
|
| 235 |
+
tc_parts.append(f" {name}({args})")
|
| 236 |
+
else:
|
| 237 |
+
fn = getattr(tc, "function", None)
|
| 238 |
+
name = getattr(fn, "name", "?") if fn else "?"
|
| 239 |
+
tc_parts.append(f" {name}(...)")
|
| 240 |
+
content += "\n[Tool calls:\n" + "\n".join(tc_parts) + "\n]"
|
| 241 |
+
parts.append(f"[ASSISTANT]: {content}")
|
| 242 |
+
continue
|
| 243 |
+
|
| 244 |
+
# User and other roles
|
| 245 |
+
if len(content) > 3000:
|
| 246 |
+
content = content[:2000] + "\n...[truncated]...\n" + content[-800:]
|
| 247 |
+
parts.append(f"[{role.upper()}]: {content}")
|
| 248 |
+
|
| 249 |
+
return "\n\n".join(parts)
|
| 250 |
+
|
| 251 |
+
def _generate_summary(self, turns_to_summarize: List[Dict[str, Any]]) -> Optional[str]:
|
| 252 |
+
"""Generate a structured summary of conversation turns.
|
| 253 |
+
|
| 254 |
+
Uses a structured template (Goal, Progress, Decisions, Files, Next Steps)
|
| 255 |
+
inspired by Pi-mono and OpenCode. When a previous summary exists,
|
| 256 |
+
generates an iterative update instead of summarizing from scratch.
|
| 257 |
+
|
| 258 |
+
Returns None if all attempts fail — the caller should drop
|
| 259 |
+
the middle turns without a summary rather than inject a useless
|
| 260 |
+
placeholder.
|
| 261 |
+
"""
|
| 262 |
+
summary_budget = self._compute_summary_budget(turns_to_summarize)
|
| 263 |
+
content_to_summarize = self._serialize_for_summary(turns_to_summarize)
|
| 264 |
+
|
| 265 |
+
if self._previous_summary:
|
| 266 |
+
# Iterative update: preserve existing info, add new progress
|
| 267 |
+
prompt = f"""You are updating a context compaction summary. A previous compaction produced the summary below. New conversation turns have occurred since then and need to be incorporated.
|
| 268 |
+
|
| 269 |
+
PREVIOUS SUMMARY:
|
| 270 |
+
{self._previous_summary}
|
| 271 |
+
|
| 272 |
+
NEW TURNS TO INCORPORATE:
|
| 273 |
+
{content_to_summarize}
|
| 274 |
+
|
| 275 |
+
Update the summary using this exact structure. PRESERVE all existing information that is still relevant. ADD new progress. Move items from "In Progress" to "Done" when completed. Remove information only if it is clearly obsolete.
|
| 276 |
+
|
| 277 |
+
## Goal
|
| 278 |
+
[What the user is trying to accomplish — preserve from previous summary, update if goal evolved]
|
| 279 |
+
|
| 280 |
+
## Constraints & Preferences
|
| 281 |
+
[User preferences, coding style, constraints, important decisions — accumulate across compactions]
|
| 282 |
+
|
| 283 |
+
## Progress
|
| 284 |
+
### Done
|
| 285 |
+
[Completed work — include specific file paths, commands run, results obtained]
|
| 286 |
+
### In Progress
|
| 287 |
+
[Work currently underway]
|
| 288 |
+
### Blocked
|
| 289 |
+
[Any blockers or issues encountered]
|
| 290 |
+
|
| 291 |
+
## Key Decisions
|
| 292 |
+
[Important technical decisions and why they were made]
|
| 293 |
+
|
| 294 |
+
## Relevant Files
|
| 295 |
+
[Files read, modified, or created — with brief note on each. Accumulate across compactions.]
|
| 296 |
+
|
| 297 |
+
## Next Steps
|
| 298 |
+
[What needs to happen next to continue the work]
|
| 299 |
+
|
| 300 |
+
## Critical Context
|
| 301 |
+
[Any specific values, error messages, configuration details, or data that would be lost without explicit preservation]
|
| 302 |
+
|
| 303 |
+
Target ~{summary_budget} tokens. Be specific — include file paths, command outputs, error messages, and concrete values rather than vague descriptions.
|
| 304 |
+
|
| 305 |
+
Write only the summary body. Do not include any preamble or prefix."""
|
| 306 |
+
else:
|
| 307 |
+
# First compaction: summarize from scratch
|
| 308 |
+
prompt = f"""Create a structured handoff summary for a later assistant that will continue this conversation after earlier turns are compacted.
|
| 309 |
+
|
| 310 |
+
TURNS TO SUMMARIZE:
|
| 311 |
+
{content_to_summarize}
|
| 312 |
+
|
| 313 |
+
Use this exact structure:
|
| 314 |
+
|
| 315 |
+
## Goal
|
| 316 |
+
[What the user is trying to accomplish]
|
| 317 |
+
|
| 318 |
+
## Constraints & Preferences
|
| 319 |
+
[User preferences, coding style, constraints, important decisions]
|
| 320 |
+
|
| 321 |
+
## Progress
|
| 322 |
+
### Done
|
| 323 |
+
[Completed work — include specific file paths, commands run, results obtained]
|
| 324 |
+
### In Progress
|
| 325 |
+
[Work currently underway]
|
| 326 |
+
### Blocked
|
| 327 |
+
[Any blockers or issues encountered]
|
| 328 |
+
|
| 329 |
+
## Key Decisions
|
| 330 |
+
[Important technical decisions and why they were made]
|
| 331 |
+
|
| 332 |
+
## Relevant Files
|
| 333 |
+
[Files read, modified, or created — with brief note on each]
|
| 334 |
+
|
| 335 |
+
## Next Steps
|
| 336 |
+
[What needs to happen next to continue the work]
|
| 337 |
+
|
| 338 |
+
## Critical Context
|
| 339 |
+
[Any specific values, error messages, configuration details, or data that would be lost without explicit preservation]
|
| 340 |
+
|
| 341 |
+
Target ~{summary_budget} tokens. Be specific — include file paths, command outputs, error messages, and concrete values rather than vague descriptions. The goal is to prevent the next assistant from repeating work or losing important details.
|
| 342 |
+
|
| 343 |
+
Write only the summary body. Do not include any preamble or prefix."""
|
| 344 |
+
|
| 345 |
+
try:
|
| 346 |
+
call_kwargs = {
|
| 347 |
+
"task": "compression",
|
| 348 |
+
"messages": [{"role": "user", "content": prompt}],
|
| 349 |
+
"temperature": 0.3,
|
| 350 |
+
"max_tokens": summary_budget * 2,
|
| 351 |
+
"timeout": 45.0,
|
| 352 |
+
}
|
| 353 |
+
if self.summary_model:
|
| 354 |
+
call_kwargs["model"] = self.summary_model
|
| 355 |
+
response = call_llm(**call_kwargs)
|
| 356 |
+
content = response.choices[0].message.content
|
| 357 |
+
# Handle cases where content is not a string (e.g., dict from llama.cpp)
|
| 358 |
+
if not isinstance(content, str):
|
| 359 |
+
content = str(content) if content else ""
|
| 360 |
+
summary = content.strip()
|
| 361 |
+
# Store for iterative updates on next compaction
|
| 362 |
+
self._previous_summary = summary
|
| 363 |
+
return self._with_summary_prefix(summary)
|
| 364 |
+
except RuntimeError:
|
| 365 |
+
logging.warning("Context compression: no provider available for "
|
| 366 |
+
"summary. Middle turns will be dropped without summary.")
|
| 367 |
+
return None
|
| 368 |
+
except Exception as e:
|
| 369 |
+
logging.warning("Failed to generate context summary: %s", e)
|
| 370 |
+
return None
|
| 371 |
+
|
| 372 |
+
@staticmethod
|
| 373 |
+
def _with_summary_prefix(summary: str) -> str:
|
| 374 |
+
"""Normalize summary text to the current compaction handoff format."""
|
| 375 |
+
text = (summary or "").strip()
|
| 376 |
+
for prefix in (LEGACY_SUMMARY_PREFIX, SUMMARY_PREFIX):
|
| 377 |
+
if text.startswith(prefix):
|
| 378 |
+
text = text[len(prefix):].lstrip()
|
| 379 |
+
break
|
| 380 |
+
return f"{SUMMARY_PREFIX}\n{text}" if text else SUMMARY_PREFIX
|
| 381 |
+
|
| 382 |
+
# ------------------------------------------------------------------
|
| 383 |
+
# Tool-call / tool-result pair integrity helpers
|
| 384 |
+
# ------------------------------------------------------------------
|
| 385 |
+
|
| 386 |
+
@staticmethod
|
| 387 |
+
def _get_tool_call_id(tc) -> str:
|
| 388 |
+
"""Extract the call ID from a tool_call entry (dict or SimpleNamespace)."""
|
| 389 |
+
if isinstance(tc, dict):
|
| 390 |
+
return tc.get("id", "")
|
| 391 |
+
return getattr(tc, "id", "") or ""
|
| 392 |
+
|
| 393 |
+
def _sanitize_tool_pairs(self, messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
| 394 |
+
"""Fix orphaned tool_call / tool_result pairs after compression.
|
| 395 |
+
|
| 396 |
+
Two failure modes:
|
| 397 |
+
1. A tool *result* references a call_id whose assistant tool_call was
|
| 398 |
+
removed (summarized/truncated). The API rejects this with
|
| 399 |
+
"No tool call found for function call output with call_id ...".
|
| 400 |
+
2. An assistant message has tool_calls whose results were dropped.
|
| 401 |
+
The API rejects this because every tool_call must be followed by
|
| 402 |
+
a tool result with the matching call_id.
|
| 403 |
+
|
| 404 |
+
This method removes orphaned results and inserts stub results for
|
| 405 |
+
orphaned calls so the message list is always well-formed.
|
| 406 |
+
"""
|
| 407 |
+
surviving_call_ids: set = set()
|
| 408 |
+
for msg in messages:
|
| 409 |
+
if msg.get("role") == "assistant":
|
| 410 |
+
for tc in msg.get("tool_calls") or []:
|
| 411 |
+
cid = self._get_tool_call_id(tc)
|
| 412 |
+
if cid:
|
| 413 |
+
surviving_call_ids.add(cid)
|
| 414 |
+
|
| 415 |
+
result_call_ids: set = set()
|
| 416 |
+
for msg in messages:
|
| 417 |
+
if msg.get("role") == "tool":
|
| 418 |
+
cid = msg.get("tool_call_id")
|
| 419 |
+
if cid:
|
| 420 |
+
result_call_ids.add(cid)
|
| 421 |
+
|
| 422 |
+
# 1. Remove tool results whose call_id has no matching assistant tool_call
|
| 423 |
+
orphaned_results = result_call_ids - surviving_call_ids
|
| 424 |
+
if orphaned_results:
|
| 425 |
+
messages = [
|
| 426 |
+
m for m in messages
|
| 427 |
+
if not (m.get("role") == "tool" and m.get("tool_call_id") in orphaned_results)
|
| 428 |
+
]
|
| 429 |
+
if not self.quiet_mode:
|
| 430 |
+
logger.info("Compression sanitizer: removed %d orphaned tool result(s)", len(orphaned_results))
|
| 431 |
+
|
| 432 |
+
# 2. Add stub results for assistant tool_calls whose results were dropped
|
| 433 |
+
missing_results = surviving_call_ids - result_call_ids
|
| 434 |
+
if missing_results:
|
| 435 |
+
patched: List[Dict[str, Any]] = []
|
| 436 |
+
for msg in messages:
|
| 437 |
+
patched.append(msg)
|
| 438 |
+
if msg.get("role") == "assistant":
|
| 439 |
+
for tc in msg.get("tool_calls") or []:
|
| 440 |
+
cid = self._get_tool_call_id(tc)
|
| 441 |
+
if cid in missing_results:
|
| 442 |
+
patched.append({
|
| 443 |
+
"role": "tool",
|
| 444 |
+
"content": "[Result from earlier conversation — see context summary above]",
|
| 445 |
+
"tool_call_id": cid,
|
| 446 |
+
})
|
| 447 |
+
messages = patched
|
| 448 |
+
if not self.quiet_mode:
|
| 449 |
+
logger.info("Compression sanitizer: added %d stub tool result(s)", len(missing_results))
|
| 450 |
+
|
| 451 |
+
return messages
|
| 452 |
+
|
| 453 |
+
def _align_boundary_forward(self, messages: List[Dict[str, Any]], idx: int) -> int:
|
| 454 |
+
"""Push a compress-start boundary forward past any orphan tool results.
|
| 455 |
+
|
| 456 |
+
If ``messages[idx]`` is a tool result, slide forward until we hit a
|
| 457 |
+
non-tool message so we don't start the summarised region mid-group.
|
| 458 |
+
"""
|
| 459 |
+
while idx < len(messages) and messages[idx].get("role") == "tool":
|
| 460 |
+
idx += 1
|
| 461 |
+
return idx
|
| 462 |
+
|
| 463 |
+
def _align_boundary_backward(self, messages: List[Dict[str, Any]], idx: int) -> int:
|
| 464 |
+
"""Pull a compress-end boundary backward to avoid splitting a
|
| 465 |
+
tool_call / result group.
|
| 466 |
+
|
| 467 |
+
If the boundary falls in the middle of a tool-result group (i.e.
|
| 468 |
+
there are consecutive tool messages before ``idx``), walk backward
|
| 469 |
+
past all of them to find the parent assistant message. If found,
|
| 470 |
+
move the boundary before the assistant so the entire
|
| 471 |
+
assistant + tool_results group is included in the summarised region
|
| 472 |
+
rather than being split (which causes silent data loss when
|
| 473 |
+
``_sanitize_tool_pairs`` removes the orphaned tail results).
|
| 474 |
+
"""
|
| 475 |
+
if idx <= 0 or idx >= len(messages):
|
| 476 |
+
return idx
|
| 477 |
+
# Walk backward past consecutive tool results
|
| 478 |
+
check = idx - 1
|
| 479 |
+
while check >= 0 and messages[check].get("role") == "tool":
|
| 480 |
+
check -= 1
|
| 481 |
+
# If we landed on the parent assistant with tool_calls, pull the
|
| 482 |
+
# boundary before it so the whole group gets summarised together.
|
| 483 |
+
if check >= 0 and messages[check].get("role") == "assistant" and messages[check].get("tool_calls"):
|
| 484 |
+
idx = check
|
| 485 |
+
return idx
|
| 486 |
+
|
| 487 |
+
# ------------------------------------------------------------------
|
| 488 |
+
# Tail protection by token budget
|
| 489 |
+
# ------------------------------------------------------------------
|
| 490 |
+
|
| 491 |
+
def _find_tail_cut_by_tokens(
|
| 492 |
+
self, messages: List[Dict[str, Any]], head_end: int,
|
| 493 |
+
token_budget: int | None = None,
|
| 494 |
+
) -> int:
|
| 495 |
+
"""Walk backward from the end of messages, accumulating tokens until
|
| 496 |
+
the budget is reached. Returns the index where the tail starts.
|
| 497 |
+
|
| 498 |
+
``token_budget`` defaults to ``self.tail_token_budget`` which is
|
| 499 |
+
derived from ``summary_target_ratio * context_length``, so it
|
| 500 |
+
scales automatically with the model's context window.
|
| 501 |
+
|
| 502 |
+
Never cuts inside a tool_call/result group. Falls back to the old
|
| 503 |
+
``protect_last_n`` if the budget would protect fewer messages.
|
| 504 |
+
"""
|
| 505 |
+
if token_budget is None:
|
| 506 |
+
token_budget = self.tail_token_budget
|
| 507 |
+
n = len(messages)
|
| 508 |
+
min_tail = self.protect_last_n
|
| 509 |
+
accumulated = 0
|
| 510 |
+
cut_idx = n # start from beyond the end
|
| 511 |
+
|
| 512 |
+
for i in range(n - 1, head_end - 1, -1):
|
| 513 |
+
msg = messages[i]
|
| 514 |
+
content = msg.get("content") or ""
|
| 515 |
+
msg_tokens = len(content) // _CHARS_PER_TOKEN + 10 # +10 for role/metadata
|
| 516 |
+
# Include tool call arguments in estimate
|
| 517 |
+
for tc in msg.get("tool_calls") or []:
|
| 518 |
+
if isinstance(tc, dict):
|
| 519 |
+
args = tc.get("function", {}).get("arguments", "")
|
| 520 |
+
msg_tokens += len(args) // _CHARS_PER_TOKEN
|
| 521 |
+
if accumulated + msg_tokens > token_budget and (n - i) >= min_tail:
|
| 522 |
+
break
|
| 523 |
+
accumulated += msg_tokens
|
| 524 |
+
cut_idx = i
|
| 525 |
+
|
| 526 |
+
# Ensure we protect at least protect_last_n messages
|
| 527 |
+
fallback_cut = n - min_tail
|
| 528 |
+
if cut_idx > fallback_cut:
|
| 529 |
+
cut_idx = fallback_cut
|
| 530 |
+
|
| 531 |
+
# If the token budget would protect everything (small conversations),
|
| 532 |
+
# fall back to the fixed protect_last_n approach so compression can
|
| 533 |
+
# still remove middle turns.
|
| 534 |
+
if cut_idx <= head_end:
|
| 535 |
+
cut_idx = fallback_cut
|
| 536 |
+
|
| 537 |
+
# Align to avoid splitting tool groups
|
| 538 |
+
cut_idx = self._align_boundary_backward(messages, cut_idx)
|
| 539 |
+
|
| 540 |
+
return max(cut_idx, head_end + 1)
|
| 541 |
+
|
| 542 |
+
# ------------------------------------------------------------------
|
| 543 |
+
# Main compression entry point
|
| 544 |
+
# ------------------------------------------------------------------
|
| 545 |
+
|
| 546 |
+
def compress(self, messages: List[Dict[str, Any]], current_tokens: int = None) -> List[Dict[str, Any]]:
|
| 547 |
+
"""Compress conversation messages by summarizing middle turns.
|
| 548 |
+
|
| 549 |
+
Algorithm:
|
| 550 |
+
1. Prune old tool results (cheap pre-pass, no LLM call)
|
| 551 |
+
2. Protect head messages (system prompt + first exchange)
|
| 552 |
+
3. Find tail boundary by token budget (~20K tokens of recent context)
|
| 553 |
+
4. Summarize middle turns with structured LLM prompt
|
| 554 |
+
5. On re-compression, iteratively update the previous summary
|
| 555 |
+
|
| 556 |
+
After compression, orphaned tool_call / tool_result pairs are cleaned
|
| 557 |
+
up so the API never receives mismatched IDs.
|
| 558 |
+
"""
|
| 559 |
+
n_messages = len(messages)
|
| 560 |
+
if n_messages <= self.protect_first_n + self.protect_last_n + 1:
|
| 561 |
+
if not self.quiet_mode:
|
| 562 |
+
logger.warning(
|
| 563 |
+
"Cannot compress: only %d messages (need > %d)",
|
| 564 |
+
n_messages,
|
| 565 |
+
self.protect_first_n + self.protect_last_n + 1,
|
| 566 |
+
)
|
| 567 |
+
return messages
|
| 568 |
+
|
| 569 |
+
display_tokens = current_tokens if current_tokens else self.last_prompt_tokens or estimate_messages_tokens_rough(messages)
|
| 570 |
+
|
| 571 |
+
# Phase 1: Prune old tool results (cheap, no LLM call)
|
| 572 |
+
messages, pruned_count = self._prune_old_tool_results(
|
| 573 |
+
messages, protect_tail_count=self.protect_last_n * 3,
|
| 574 |
+
)
|
| 575 |
+
if pruned_count and not self.quiet_mode:
|
| 576 |
+
logger.info("Pre-compression: pruned %d old tool result(s)", pruned_count)
|
| 577 |
+
|
| 578 |
+
# Phase 2: Determine boundaries
|
| 579 |
+
compress_start = self.protect_first_n
|
| 580 |
+
compress_start = self._align_boundary_forward(messages, compress_start)
|
| 581 |
+
|
| 582 |
+
# Use token-budget tail protection instead of fixed message count
|
| 583 |
+
compress_end = self._find_tail_cut_by_tokens(messages, compress_start)
|
| 584 |
+
|
| 585 |
+
if compress_start >= compress_end:
|
| 586 |
+
return messages
|
| 587 |
+
|
| 588 |
+
turns_to_summarize = messages[compress_start:compress_end]
|
| 589 |
+
|
| 590 |
+
if not self.quiet_mode:
|
| 591 |
+
logger.info(
|
| 592 |
+
"Context compression triggered (%d tokens >= %d threshold)",
|
| 593 |
+
display_tokens,
|
| 594 |
+
self.threshold_tokens,
|
| 595 |
+
)
|
| 596 |
+
logger.info(
|
| 597 |
+
"Model context limit: %d tokens (%.0f%% = %d)",
|
| 598 |
+
self.context_length,
|
| 599 |
+
self.threshold_percent * 100,
|
| 600 |
+
self.threshold_tokens,
|
| 601 |
+
)
|
| 602 |
+
tail_msgs = n_messages - compress_end
|
| 603 |
+
logger.info(
|
| 604 |
+
"Summarizing turns %d-%d (%d turns), protecting %d head + %d tail messages",
|
| 605 |
+
compress_start + 1,
|
| 606 |
+
compress_end,
|
| 607 |
+
len(turns_to_summarize),
|
| 608 |
+
compress_start,
|
| 609 |
+
tail_msgs,
|
| 610 |
+
)
|
| 611 |
+
|
| 612 |
+
# Phase 3: Generate structured summary
|
| 613 |
+
summary = self._generate_summary(turns_to_summarize)
|
| 614 |
+
|
| 615 |
+
# Phase 4: Assemble compressed message list
|
| 616 |
+
compressed = []
|
| 617 |
+
for i in range(compress_start):
|
| 618 |
+
msg = messages[i].copy()
|
| 619 |
+
if i == 0 and msg.get("role") == "system" and self.compression_count == 0:
|
| 620 |
+
msg["content"] = (
|
| 621 |
+
(msg.get("content") or "")
|
| 622 |
+
+ "\n\n[Note: Some earlier conversation turns have been compacted into a handoff summary to preserve context space. The current session state may still reflect earlier work, so build on that summary and state rather than re-doing work.]"
|
| 623 |
+
)
|
| 624 |
+
compressed.append(msg)
|
| 625 |
+
|
| 626 |
+
_merge_summary_into_tail = False
|
| 627 |
+
if summary:
|
| 628 |
+
last_head_role = messages[compress_start - 1].get("role", "user") if compress_start > 0 else "user"
|
| 629 |
+
first_tail_role = messages[compress_end].get("role", "user") if compress_end < n_messages else "user"
|
| 630 |
+
# Pick a role that avoids consecutive same-role with both neighbors.
|
| 631 |
+
# Priority: avoid colliding with head (already committed), then tail.
|
| 632 |
+
if last_head_role in ("assistant", "tool"):
|
| 633 |
+
summary_role = "user"
|
| 634 |
+
else:
|
| 635 |
+
summary_role = "assistant"
|
| 636 |
+
# If the chosen role collides with the tail AND flipping wouldn't
|
| 637 |
+
# collide with the head, flip it.
|
| 638 |
+
if summary_role == first_tail_role:
|
| 639 |
+
flipped = "assistant" if summary_role == "user" else "user"
|
| 640 |
+
if flipped != last_head_role:
|
| 641 |
+
summary_role = flipped
|
| 642 |
+
else:
|
| 643 |
+
# Both roles would create consecutive same-role messages
|
| 644 |
+
# (e.g. head=assistant, tail=user — neither role works).
|
| 645 |
+
# Merge the summary into the first tail message instead
|
| 646 |
+
# of inserting a standalone message that breaks alternation.
|
| 647 |
+
_merge_summary_into_tail = True
|
| 648 |
+
if not _merge_summary_into_tail:
|
| 649 |
+
compressed.append({"role": summary_role, "content": summary})
|
| 650 |
+
else:
|
| 651 |
+
if not self.quiet_mode:
|
| 652 |
+
logger.warning("No summary model available — middle turns dropped without summary")
|
| 653 |
+
|
| 654 |
+
for i in range(compress_end, n_messages):
|
| 655 |
+
msg = messages[i].copy()
|
| 656 |
+
if _merge_summary_into_tail and i == compress_end:
|
| 657 |
+
original = msg.get("content") or ""
|
| 658 |
+
msg["content"] = summary + "\n\n" + original
|
| 659 |
+
_merge_summary_into_tail = False
|
| 660 |
+
compressed.append(msg)
|
| 661 |
+
|
| 662 |
+
self.compression_count += 1
|
| 663 |
+
|
| 664 |
+
compressed = self._sanitize_tool_pairs(compressed)
|
| 665 |
+
|
| 666 |
+
if not self.quiet_mode:
|
| 667 |
+
new_estimate = estimate_messages_tokens_rough(compressed)
|
| 668 |
+
saved_estimate = display_tokens - new_estimate
|
| 669 |
+
logger.info(
|
| 670 |
+
"Compressed: %d -> %d messages (~%d tokens saved)",
|
| 671 |
+
n_messages,
|
| 672 |
+
len(compressed),
|
| 673 |
+
saved_estimate,
|
| 674 |
+
)
|
| 675 |
+
logger.info("Compression #%d complete", self.compression_count)
|
| 676 |
+
|
| 677 |
+
return compressed
|
agent/context_references.py
ADDED
|
@@ -0,0 +1,485 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import asyncio
|
| 4 |
+
import inspect
|
| 5 |
+
import json
|
| 6 |
+
import mimetypes
|
| 7 |
+
import os
|
| 8 |
+
import re
|
| 9 |
+
import subprocess
|
| 10 |
+
from dataclasses import dataclass, field
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
from typing import Awaitable, Callable
|
| 13 |
+
|
| 14 |
+
from agent.model_metadata import estimate_tokens_rough
|
| 15 |
+
|
| 16 |
+
REFERENCE_PATTERN = re.compile(
|
| 17 |
+
r"(?<![\w/])@(?:(?P<simple>diff|staged)\b|(?P<kind>file|folder|git|url):(?P<value>\S+))"
|
| 18 |
+
)
|
| 19 |
+
TRAILING_PUNCTUATION = ",.;!?"
|
| 20 |
+
_SENSITIVE_HOME_DIRS = (".ssh", ".aws", ".gnupg", ".kube")
|
| 21 |
+
_SENSITIVE_HERMES_DIRS = (Path("skills") / ".hub",)
|
| 22 |
+
_SENSITIVE_HOME_FILES = (
|
| 23 |
+
Path(".ssh") / "authorized_keys",
|
| 24 |
+
Path(".ssh") / "id_rsa",
|
| 25 |
+
Path(".ssh") / "id_ed25519",
|
| 26 |
+
Path(".ssh") / "config",
|
| 27 |
+
Path(".bashrc"),
|
| 28 |
+
Path(".zshrc"),
|
| 29 |
+
Path(".profile"),
|
| 30 |
+
Path(".bash_profile"),
|
| 31 |
+
Path(".zprofile"),
|
| 32 |
+
Path(".netrc"),
|
| 33 |
+
Path(".pgpass"),
|
| 34 |
+
Path(".npmrc"),
|
| 35 |
+
Path(".pypirc"),
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
@dataclass(frozen=True)
|
| 40 |
+
class ContextReference:
|
| 41 |
+
raw: str
|
| 42 |
+
kind: str
|
| 43 |
+
target: str
|
| 44 |
+
start: int
|
| 45 |
+
end: int
|
| 46 |
+
line_start: int | None = None
|
| 47 |
+
line_end: int | None = None
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
@dataclass
|
| 51 |
+
class ContextReferenceResult:
|
| 52 |
+
message: str
|
| 53 |
+
original_message: str
|
| 54 |
+
references: list[ContextReference] = field(default_factory=list)
|
| 55 |
+
warnings: list[str] = field(default_factory=list)
|
| 56 |
+
injected_tokens: int = 0
|
| 57 |
+
expanded: bool = False
|
| 58 |
+
blocked: bool = False
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def parse_context_references(message: str) -> list[ContextReference]:
|
| 62 |
+
refs: list[ContextReference] = []
|
| 63 |
+
if not message:
|
| 64 |
+
return refs
|
| 65 |
+
|
| 66 |
+
for match in REFERENCE_PATTERN.finditer(message):
|
| 67 |
+
simple = match.group("simple")
|
| 68 |
+
if simple:
|
| 69 |
+
refs.append(
|
| 70 |
+
ContextReference(
|
| 71 |
+
raw=match.group(0),
|
| 72 |
+
kind=simple,
|
| 73 |
+
target="",
|
| 74 |
+
start=match.start(),
|
| 75 |
+
end=match.end(),
|
| 76 |
+
)
|
| 77 |
+
)
|
| 78 |
+
continue
|
| 79 |
+
|
| 80 |
+
kind = match.group("kind")
|
| 81 |
+
value = _strip_trailing_punctuation(match.group("value") or "")
|
| 82 |
+
line_start = None
|
| 83 |
+
line_end = None
|
| 84 |
+
target = value
|
| 85 |
+
|
| 86 |
+
if kind == "file":
|
| 87 |
+
range_match = re.match(r"^(?P<path>.+?):(?P<start>\d+)(?:-(?P<end>\d+))?$", value)
|
| 88 |
+
if range_match:
|
| 89 |
+
target = range_match.group("path")
|
| 90 |
+
line_start = int(range_match.group("start"))
|
| 91 |
+
line_end = int(range_match.group("end") or range_match.group("start"))
|
| 92 |
+
|
| 93 |
+
refs.append(
|
| 94 |
+
ContextReference(
|
| 95 |
+
raw=match.group(0),
|
| 96 |
+
kind=kind,
|
| 97 |
+
target=target,
|
| 98 |
+
start=match.start(),
|
| 99 |
+
end=match.end(),
|
| 100 |
+
line_start=line_start,
|
| 101 |
+
line_end=line_end,
|
| 102 |
+
)
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
return refs
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def preprocess_context_references(
|
| 109 |
+
message: str,
|
| 110 |
+
*,
|
| 111 |
+
cwd: str | Path,
|
| 112 |
+
context_length: int,
|
| 113 |
+
url_fetcher: Callable[[str], str | Awaitable[str]] | None = None,
|
| 114 |
+
allowed_root: str | Path | None = None,
|
| 115 |
+
) -> ContextReferenceResult:
|
| 116 |
+
coro = preprocess_context_references_async(
|
| 117 |
+
message,
|
| 118 |
+
cwd=cwd,
|
| 119 |
+
context_length=context_length,
|
| 120 |
+
url_fetcher=url_fetcher,
|
| 121 |
+
allowed_root=allowed_root,
|
| 122 |
+
)
|
| 123 |
+
# Safe for both CLI (no loop) and gateway (loop already running).
|
| 124 |
+
try:
|
| 125 |
+
loop = asyncio.get_running_loop()
|
| 126 |
+
except RuntimeError:
|
| 127 |
+
loop = None
|
| 128 |
+
if loop and loop.is_running():
|
| 129 |
+
import concurrent.futures
|
| 130 |
+
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as pool:
|
| 131 |
+
return pool.submit(asyncio.run, coro).result()
|
| 132 |
+
return asyncio.run(coro)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
async def preprocess_context_references_async(
|
| 136 |
+
message: str,
|
| 137 |
+
*,
|
| 138 |
+
cwd: str | Path,
|
| 139 |
+
context_length: int,
|
| 140 |
+
url_fetcher: Callable[[str], str | Awaitable[str]] | None = None,
|
| 141 |
+
allowed_root: str | Path | None = None,
|
| 142 |
+
) -> ContextReferenceResult:
|
| 143 |
+
refs = parse_context_references(message)
|
| 144 |
+
if not refs:
|
| 145 |
+
return ContextReferenceResult(message=message, original_message=message)
|
| 146 |
+
|
| 147 |
+
cwd_path = Path(cwd).expanduser().resolve()
|
| 148 |
+
# Default to the current working directory so @ references cannot escape
|
| 149 |
+
# the active workspace unless a caller explicitly widens the root.
|
| 150 |
+
allowed_root_path = (
|
| 151 |
+
Path(allowed_root).expanduser().resolve() if allowed_root is not None else cwd_path
|
| 152 |
+
)
|
| 153 |
+
warnings: list[str] = []
|
| 154 |
+
blocks: list[str] = []
|
| 155 |
+
injected_tokens = 0
|
| 156 |
+
|
| 157 |
+
for ref in refs:
|
| 158 |
+
warning, block = await _expand_reference(
|
| 159 |
+
ref,
|
| 160 |
+
cwd_path,
|
| 161 |
+
url_fetcher=url_fetcher,
|
| 162 |
+
allowed_root=allowed_root_path,
|
| 163 |
+
)
|
| 164 |
+
if warning:
|
| 165 |
+
warnings.append(warning)
|
| 166 |
+
if block:
|
| 167 |
+
blocks.append(block)
|
| 168 |
+
injected_tokens += estimate_tokens_rough(block)
|
| 169 |
+
|
| 170 |
+
hard_limit = max(1, int(context_length * 0.50))
|
| 171 |
+
soft_limit = max(1, int(context_length * 0.25))
|
| 172 |
+
if injected_tokens > hard_limit:
|
| 173 |
+
warnings.append(
|
| 174 |
+
f"@ context injection refused: {injected_tokens} tokens exceeds the 50% hard limit ({hard_limit})."
|
| 175 |
+
)
|
| 176 |
+
return ContextReferenceResult(
|
| 177 |
+
message=message,
|
| 178 |
+
original_message=message,
|
| 179 |
+
references=refs,
|
| 180 |
+
warnings=warnings,
|
| 181 |
+
injected_tokens=injected_tokens,
|
| 182 |
+
expanded=False,
|
| 183 |
+
blocked=True,
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
if injected_tokens > soft_limit:
|
| 187 |
+
warnings.append(
|
| 188 |
+
f"@ context injection warning: {injected_tokens} tokens exceeds the 25% soft limit ({soft_limit})."
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
stripped = _remove_reference_tokens(message, refs)
|
| 192 |
+
final = stripped
|
| 193 |
+
if warnings:
|
| 194 |
+
final = f"{final}\n\n--- Context Warnings ---\n" + "\n".join(f"- {warning}" for warning in warnings)
|
| 195 |
+
if blocks:
|
| 196 |
+
final = f"{final}\n\n--- Attached Context ---\n\n" + "\n\n".join(blocks)
|
| 197 |
+
|
| 198 |
+
return ContextReferenceResult(
|
| 199 |
+
message=final.strip(),
|
| 200 |
+
original_message=message,
|
| 201 |
+
references=refs,
|
| 202 |
+
warnings=warnings,
|
| 203 |
+
injected_tokens=injected_tokens,
|
| 204 |
+
expanded=bool(blocks or warnings),
|
| 205 |
+
blocked=False,
|
| 206 |
+
)
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
async def _expand_reference(
|
| 210 |
+
ref: ContextReference,
|
| 211 |
+
cwd: Path,
|
| 212 |
+
*,
|
| 213 |
+
url_fetcher: Callable[[str], str | Awaitable[str]] | None = None,
|
| 214 |
+
allowed_root: Path | None = None,
|
| 215 |
+
) -> tuple[str | None, str | None]:
|
| 216 |
+
try:
|
| 217 |
+
if ref.kind == "file":
|
| 218 |
+
return _expand_file_reference(ref, cwd, allowed_root=allowed_root)
|
| 219 |
+
if ref.kind == "folder":
|
| 220 |
+
return _expand_folder_reference(ref, cwd, allowed_root=allowed_root)
|
| 221 |
+
if ref.kind == "diff":
|
| 222 |
+
return _expand_git_reference(ref, cwd, ["diff"], "git diff")
|
| 223 |
+
if ref.kind == "staged":
|
| 224 |
+
return _expand_git_reference(ref, cwd, ["diff", "--staged"], "git diff --staged")
|
| 225 |
+
if ref.kind == "git":
|
| 226 |
+
count = max(1, min(int(ref.target or "1"), 10))
|
| 227 |
+
return _expand_git_reference(ref, cwd, ["log", f"-{count}", "-p"], f"git log -{count} -p")
|
| 228 |
+
if ref.kind == "url":
|
| 229 |
+
content = await _fetch_url_content(ref.target, url_fetcher=url_fetcher)
|
| 230 |
+
if not content:
|
| 231 |
+
return f"{ref.raw}: no content extracted", None
|
| 232 |
+
return None, f"🌐 {ref.raw} ({estimate_tokens_rough(content)} tokens)\n{content}"
|
| 233 |
+
except Exception as exc:
|
| 234 |
+
return f"{ref.raw}: {exc}", None
|
| 235 |
+
|
| 236 |
+
return f"{ref.raw}: unsupported reference type", None
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
def _expand_file_reference(
|
| 240 |
+
ref: ContextReference,
|
| 241 |
+
cwd: Path,
|
| 242 |
+
*,
|
| 243 |
+
allowed_root: Path | None = None,
|
| 244 |
+
) -> tuple[str | None, str | None]:
|
| 245 |
+
path = _resolve_path(cwd, ref.target, allowed_root=allowed_root)
|
| 246 |
+
_ensure_reference_path_allowed(path)
|
| 247 |
+
if not path.exists():
|
| 248 |
+
return f"{ref.raw}: file not found", None
|
| 249 |
+
if not path.is_file():
|
| 250 |
+
return f"{ref.raw}: path is not a file", None
|
| 251 |
+
if _is_binary_file(path):
|
| 252 |
+
return f"{ref.raw}: binary files are not supported", None
|
| 253 |
+
|
| 254 |
+
text = path.read_text(encoding="utf-8")
|
| 255 |
+
if ref.line_start is not None:
|
| 256 |
+
lines = text.splitlines()
|
| 257 |
+
start_idx = max(ref.line_start - 1, 0)
|
| 258 |
+
end_idx = min(ref.line_end or ref.line_start, len(lines))
|
| 259 |
+
text = "\n".join(lines[start_idx:end_idx])
|
| 260 |
+
|
| 261 |
+
lang = _code_fence_language(path)
|
| 262 |
+
label = ref.raw
|
| 263 |
+
return None, f"📄 {label} ({estimate_tokens_rough(text)} tokens)\n```{lang}\n{text}\n```"
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
def _expand_folder_reference(
|
| 267 |
+
ref: ContextReference,
|
| 268 |
+
cwd: Path,
|
| 269 |
+
*,
|
| 270 |
+
allowed_root: Path | None = None,
|
| 271 |
+
) -> tuple[str | None, str | None]:
|
| 272 |
+
path = _resolve_path(cwd, ref.target, allowed_root=allowed_root)
|
| 273 |
+
_ensure_reference_path_allowed(path)
|
| 274 |
+
if not path.exists():
|
| 275 |
+
return f"{ref.raw}: folder not found", None
|
| 276 |
+
if not path.is_dir():
|
| 277 |
+
return f"{ref.raw}: path is not a folder", None
|
| 278 |
+
|
| 279 |
+
listing = _build_folder_listing(path, cwd)
|
| 280 |
+
return None, f"📁 {ref.raw} ({estimate_tokens_rough(listing)} tokens)\n{listing}"
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
def _expand_git_reference(
|
| 284 |
+
ref: ContextReference,
|
| 285 |
+
cwd: Path,
|
| 286 |
+
args: list[str],
|
| 287 |
+
label: str,
|
| 288 |
+
) -> tuple[str | None, str | None]:
|
| 289 |
+
result = subprocess.run(
|
| 290 |
+
["git", *args],
|
| 291 |
+
cwd=cwd,
|
| 292 |
+
capture_output=True,
|
| 293 |
+
text=True,
|
| 294 |
+
)
|
| 295 |
+
if result.returncode != 0:
|
| 296 |
+
stderr = (result.stderr or "").strip() or "git command failed"
|
| 297 |
+
return f"{ref.raw}: {stderr}", None
|
| 298 |
+
content = result.stdout.strip()
|
| 299 |
+
if not content:
|
| 300 |
+
content = "(no output)"
|
| 301 |
+
return None, f"🧾 {label} ({estimate_tokens_rough(content)} tokens)\n```diff\n{content}\n```"
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
async def _fetch_url_content(
|
| 305 |
+
url: str,
|
| 306 |
+
*,
|
| 307 |
+
url_fetcher: Callable[[str], str | Awaitable[str]] | None = None,
|
| 308 |
+
) -> str:
|
| 309 |
+
fetcher = url_fetcher or _default_url_fetcher
|
| 310 |
+
content = fetcher(url)
|
| 311 |
+
if inspect.isawaitable(content):
|
| 312 |
+
content = await content
|
| 313 |
+
return str(content or "").strip()
|
| 314 |
+
|
| 315 |
+
|
| 316 |
+
async def _default_url_fetcher(url: str) -> str:
|
| 317 |
+
from tools.web_tools import web_extract_tool
|
| 318 |
+
|
| 319 |
+
raw = await web_extract_tool([url], format="markdown", use_llm_processing=True)
|
| 320 |
+
payload = json.loads(raw)
|
| 321 |
+
docs = payload.get("data", {}).get("documents", [])
|
| 322 |
+
if not docs:
|
| 323 |
+
return ""
|
| 324 |
+
doc = docs[0]
|
| 325 |
+
return str(doc.get("content") or doc.get("raw_content") or "").strip()
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
def _resolve_path(cwd: Path, target: str, *, allowed_root: Path | None = None) -> Path:
|
| 329 |
+
path = Path(os.path.expanduser(target))
|
| 330 |
+
if not path.is_absolute():
|
| 331 |
+
path = cwd / path
|
| 332 |
+
resolved = path.resolve()
|
| 333 |
+
if allowed_root is not None:
|
| 334 |
+
try:
|
| 335 |
+
resolved.relative_to(allowed_root)
|
| 336 |
+
except ValueError as exc:
|
| 337 |
+
raise ValueError("path is outside the allowed workspace") from exc
|
| 338 |
+
return resolved
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
def _ensure_reference_path_allowed(path: Path) -> None:
|
| 342 |
+
home = Path(os.path.expanduser("~")).resolve()
|
| 343 |
+
hermes_home = Path(
|
| 344 |
+
os.getenv("HERMES_HOME", str(home / ".hermes"))
|
| 345 |
+
).expanduser().resolve()
|
| 346 |
+
|
| 347 |
+
blocked_exact = {home / rel for rel in _SENSITIVE_HOME_FILES}
|
| 348 |
+
blocked_exact.add(hermes_home / ".env")
|
| 349 |
+
blocked_dirs = [home / rel for rel in _SENSITIVE_HOME_DIRS]
|
| 350 |
+
blocked_dirs.extend(hermes_home / rel for rel in _SENSITIVE_HERMES_DIRS)
|
| 351 |
+
|
| 352 |
+
if path in blocked_exact:
|
| 353 |
+
raise ValueError("path is a sensitive credential file and cannot be attached")
|
| 354 |
+
|
| 355 |
+
for blocked_dir in blocked_dirs:
|
| 356 |
+
try:
|
| 357 |
+
path.relative_to(blocked_dir)
|
| 358 |
+
except ValueError:
|
| 359 |
+
continue
|
| 360 |
+
raise ValueError("path is a sensitive credential or internal Hermes path and cannot be attached")
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
def _strip_trailing_punctuation(value: str) -> str:
|
| 364 |
+
stripped = value.rstrip(TRAILING_PUNCTUATION)
|
| 365 |
+
while stripped.endswith((")", "]", "}")):
|
| 366 |
+
closer = stripped[-1]
|
| 367 |
+
opener = {")": "(", "]": "[", "}": "{"}[closer]
|
| 368 |
+
if stripped.count(closer) > stripped.count(opener):
|
| 369 |
+
stripped = stripped[:-1]
|
| 370 |
+
continue
|
| 371 |
+
break
|
| 372 |
+
return stripped
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
def _remove_reference_tokens(message: str, refs: list[ContextReference]) -> str:
|
| 376 |
+
pieces: list[str] = []
|
| 377 |
+
cursor = 0
|
| 378 |
+
for ref in refs:
|
| 379 |
+
pieces.append(message[cursor:ref.start])
|
| 380 |
+
cursor = ref.end
|
| 381 |
+
pieces.append(message[cursor:])
|
| 382 |
+
text = "".join(pieces)
|
| 383 |
+
text = re.sub(r"\s{2,}", " ", text)
|
| 384 |
+
text = re.sub(r"\s+([,.;:!?])", r"\1", text)
|
| 385 |
+
return text.strip()
|
| 386 |
+
|
| 387 |
+
|
| 388 |
+
def _is_binary_file(path: Path) -> bool:
|
| 389 |
+
mime, _ = mimetypes.guess_type(path.name)
|
| 390 |
+
if mime and not mime.startswith("text/") and not any(
|
| 391 |
+
path.name.endswith(ext) for ext in (".py", ".md", ".txt", ".json", ".yaml", ".yml", ".toml", ".js", ".ts")
|
| 392 |
+
):
|
| 393 |
+
return True
|
| 394 |
+
chunk = path.read_bytes()[:4096]
|
| 395 |
+
return b"\x00" in chunk
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
def _build_folder_listing(path: Path, cwd: Path, limit: int = 200) -> str:
|
| 399 |
+
lines = [f"{path.relative_to(cwd)}/"]
|
| 400 |
+
entries = _iter_visible_entries(path, cwd, limit=limit)
|
| 401 |
+
for entry in entries:
|
| 402 |
+
rel = entry.relative_to(cwd)
|
| 403 |
+
indent = " " * max(len(rel.parts) - len(path.relative_to(cwd).parts) - 1, 0)
|
| 404 |
+
if entry.is_dir():
|
| 405 |
+
lines.append(f"{indent}- {entry.name}/")
|
| 406 |
+
else:
|
| 407 |
+
meta = _file_metadata(entry)
|
| 408 |
+
lines.append(f"{indent}- {entry.name} ({meta})")
|
| 409 |
+
if len(entries) >= limit:
|
| 410 |
+
lines.append("- ...")
|
| 411 |
+
return "\n".join(lines)
|
| 412 |
+
|
| 413 |
+
|
| 414 |
+
def _iter_visible_entries(path: Path, cwd: Path, limit: int) -> list[Path]:
|
| 415 |
+
rg_entries = _rg_files(path, cwd, limit=limit)
|
| 416 |
+
if rg_entries is not None:
|
| 417 |
+
output: list[Path] = []
|
| 418 |
+
seen_dirs: set[Path] = set()
|
| 419 |
+
for rel in rg_entries:
|
| 420 |
+
full = cwd / rel
|
| 421 |
+
for parent in full.parents:
|
| 422 |
+
if parent == cwd or parent in seen_dirs or path not in {parent, *parent.parents}:
|
| 423 |
+
continue
|
| 424 |
+
seen_dirs.add(parent)
|
| 425 |
+
output.append(parent)
|
| 426 |
+
output.append(full)
|
| 427 |
+
return sorted({p for p in output if p.exists()}, key=lambda p: (not p.is_dir(), str(p)))
|
| 428 |
+
|
| 429 |
+
output = []
|
| 430 |
+
for root, dirs, files in os.walk(path):
|
| 431 |
+
dirs[:] = sorted(d for d in dirs if not d.startswith(".") and d != "__pycache__")
|
| 432 |
+
files = sorted(f for f in files if not f.startswith("."))
|
| 433 |
+
root_path = Path(root)
|
| 434 |
+
for d in dirs:
|
| 435 |
+
output.append(root_path / d)
|
| 436 |
+
if len(output) >= limit:
|
| 437 |
+
return output
|
| 438 |
+
for f in files:
|
| 439 |
+
output.append(root_path / f)
|
| 440 |
+
if len(output) >= limit:
|
| 441 |
+
return output
|
| 442 |
+
return output
|
| 443 |
+
|
| 444 |
+
|
| 445 |
+
def _rg_files(path: Path, cwd: Path, limit: int) -> list[Path] | None:
|
| 446 |
+
try:
|
| 447 |
+
result = subprocess.run(
|
| 448 |
+
["rg", "--files", str(path.relative_to(cwd))],
|
| 449 |
+
cwd=cwd,
|
| 450 |
+
capture_output=True,
|
| 451 |
+
text=True,
|
| 452 |
+
)
|
| 453 |
+
except FileNotFoundError:
|
| 454 |
+
return None
|
| 455 |
+
if result.returncode != 0:
|
| 456 |
+
return None
|
| 457 |
+
files = [Path(line.strip()) for line in result.stdout.splitlines() if line.strip()]
|
| 458 |
+
return files[:limit]
|
| 459 |
+
|
| 460 |
+
|
| 461 |
+
def _file_metadata(path: Path) -> str:
|
| 462 |
+
if _is_binary_file(path):
|
| 463 |
+
return f"{path.stat().st_size} bytes"
|
| 464 |
+
try:
|
| 465 |
+
line_count = path.read_text(encoding="utf-8").count("\n") + 1
|
| 466 |
+
except Exception:
|
| 467 |
+
return f"{path.stat().st_size} bytes"
|
| 468 |
+
return f"{line_count} lines"
|
| 469 |
+
|
| 470 |
+
|
| 471 |
+
def _code_fence_language(path: Path) -> str:
|
| 472 |
+
mapping = {
|
| 473 |
+
".py": "python",
|
| 474 |
+
".js": "javascript",
|
| 475 |
+
".ts": "typescript",
|
| 476 |
+
".tsx": "tsx",
|
| 477 |
+
".jsx": "jsx",
|
| 478 |
+
".json": "json",
|
| 479 |
+
".md": "markdown",
|
| 480 |
+
".sh": "bash",
|
| 481 |
+
".yml": "yaml",
|
| 482 |
+
".yaml": "yaml",
|
| 483 |
+
".toml": "toml",
|
| 484 |
+
}
|
| 485 |
+
return mapping.get(path.suffix.lower(), "")
|
agent/copilot_acp_client.py
ADDED
|
@@ -0,0 +1,447 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""OpenAI-compatible shim that forwards Hermes requests to `copilot --acp`.
|
| 2 |
+
|
| 3 |
+
This adapter lets Hermes treat the GitHub Copilot ACP server as a chat-style
|
| 4 |
+
backend. Each request starts a short-lived ACP session, sends the formatted
|
| 5 |
+
conversation as a single prompt, collects text chunks, and converts the result
|
| 6 |
+
back into the minimal shape Hermes expects from an OpenAI client.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
from __future__ import annotations
|
| 10 |
+
|
| 11 |
+
import json
|
| 12 |
+
import os
|
| 13 |
+
import queue
|
| 14 |
+
import shlex
|
| 15 |
+
import subprocess
|
| 16 |
+
import threading
|
| 17 |
+
import time
|
| 18 |
+
from collections import deque
|
| 19 |
+
from pathlib import Path
|
| 20 |
+
from types import SimpleNamespace
|
| 21 |
+
from typing import Any
|
| 22 |
+
|
| 23 |
+
ACP_MARKER_BASE_URL = "acp://copilot"
|
| 24 |
+
_DEFAULT_TIMEOUT_SECONDS = 900.0
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def _resolve_command() -> str:
|
| 28 |
+
return (
|
| 29 |
+
os.getenv("HERMES_COPILOT_ACP_COMMAND", "").strip()
|
| 30 |
+
or os.getenv("COPILOT_CLI_PATH", "").strip()
|
| 31 |
+
or "copilot"
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def _resolve_args() -> list[str]:
|
| 36 |
+
raw = os.getenv("HERMES_COPILOT_ACP_ARGS", "").strip()
|
| 37 |
+
if not raw:
|
| 38 |
+
return ["--acp", "--stdio"]
|
| 39 |
+
return shlex.split(raw)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def _jsonrpc_error(message_id: Any, code: int, message: str) -> dict[str, Any]:
|
| 43 |
+
return {
|
| 44 |
+
"jsonrpc": "2.0",
|
| 45 |
+
"id": message_id,
|
| 46 |
+
"error": {
|
| 47 |
+
"code": code,
|
| 48 |
+
"message": message,
|
| 49 |
+
},
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def _format_messages_as_prompt(messages: list[dict[str, Any]], model: str | None = None) -> str:
|
| 54 |
+
sections: list[str] = [
|
| 55 |
+
"You are being used as the active ACP agent backend for Hermes.",
|
| 56 |
+
"Use your own ACP capabilities and respond directly in natural language.",
|
| 57 |
+
"Do not emit OpenAI tool-call JSON.",
|
| 58 |
+
]
|
| 59 |
+
if model:
|
| 60 |
+
sections.append(f"Hermes requested model hint: {model}")
|
| 61 |
+
|
| 62 |
+
transcript: list[str] = []
|
| 63 |
+
for message in messages:
|
| 64 |
+
if not isinstance(message, dict):
|
| 65 |
+
continue
|
| 66 |
+
role = str(message.get("role") or "unknown").strip().lower()
|
| 67 |
+
if role == "tool":
|
| 68 |
+
role = "tool"
|
| 69 |
+
elif role not in {"system", "user", "assistant"}:
|
| 70 |
+
role = "context"
|
| 71 |
+
|
| 72 |
+
content = message.get("content")
|
| 73 |
+
rendered = _render_message_content(content)
|
| 74 |
+
if not rendered:
|
| 75 |
+
continue
|
| 76 |
+
|
| 77 |
+
label = {
|
| 78 |
+
"system": "System",
|
| 79 |
+
"user": "User",
|
| 80 |
+
"assistant": "Assistant",
|
| 81 |
+
"tool": "Tool",
|
| 82 |
+
"context": "Context",
|
| 83 |
+
}.get(role, role.title())
|
| 84 |
+
transcript.append(f"{label}:\n{rendered}")
|
| 85 |
+
|
| 86 |
+
if transcript:
|
| 87 |
+
sections.append("Conversation transcript:\n\n" + "\n\n".join(transcript))
|
| 88 |
+
|
| 89 |
+
sections.append("Continue the conversation from the latest user request.")
|
| 90 |
+
return "\n\n".join(section.strip() for section in sections if section and section.strip())
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def _render_message_content(content: Any) -> str:
|
| 94 |
+
if content is None:
|
| 95 |
+
return ""
|
| 96 |
+
if isinstance(content, str):
|
| 97 |
+
return content.strip()
|
| 98 |
+
if isinstance(content, dict):
|
| 99 |
+
if "text" in content:
|
| 100 |
+
return str(content.get("text") or "").strip()
|
| 101 |
+
if "content" in content and isinstance(content.get("content"), str):
|
| 102 |
+
return str(content.get("content") or "").strip()
|
| 103 |
+
return json.dumps(content, ensure_ascii=True)
|
| 104 |
+
if isinstance(content, list):
|
| 105 |
+
parts: list[str] = []
|
| 106 |
+
for item in content:
|
| 107 |
+
if isinstance(item, str):
|
| 108 |
+
parts.append(item)
|
| 109 |
+
elif isinstance(item, dict):
|
| 110 |
+
text = item.get("text")
|
| 111 |
+
if isinstance(text, str) and text.strip():
|
| 112 |
+
parts.append(text.strip())
|
| 113 |
+
return "\n".join(parts).strip()
|
| 114 |
+
return str(content).strip()
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def _ensure_path_within_cwd(path_text: str, cwd: str) -> Path:
|
| 118 |
+
candidate = Path(path_text)
|
| 119 |
+
if not candidate.is_absolute():
|
| 120 |
+
raise PermissionError("ACP file-system paths must be absolute.")
|
| 121 |
+
resolved = candidate.resolve()
|
| 122 |
+
root = Path(cwd).resolve()
|
| 123 |
+
try:
|
| 124 |
+
resolved.relative_to(root)
|
| 125 |
+
except ValueError as exc:
|
| 126 |
+
raise PermissionError(f"Path '{resolved}' is outside the session cwd '{root}'.") from exc
|
| 127 |
+
return resolved
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
class _ACPChatCompletions:
|
| 131 |
+
def __init__(self, client: "CopilotACPClient"):
|
| 132 |
+
self._client = client
|
| 133 |
+
|
| 134 |
+
def create(self, **kwargs: Any) -> Any:
|
| 135 |
+
return self._client._create_chat_completion(**kwargs)
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
class _ACPChatNamespace:
|
| 139 |
+
def __init__(self, client: "CopilotACPClient"):
|
| 140 |
+
self.completions = _ACPChatCompletions(client)
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
class CopilotACPClient:
|
| 144 |
+
"""Minimal OpenAI-client-compatible facade for Copilot ACP."""
|
| 145 |
+
|
| 146 |
+
def __init__(
|
| 147 |
+
self,
|
| 148 |
+
*,
|
| 149 |
+
api_key: str | None = None,
|
| 150 |
+
base_url: str | None = None,
|
| 151 |
+
default_headers: dict[str, str] | None = None,
|
| 152 |
+
acp_command: str | None = None,
|
| 153 |
+
acp_args: list[str] | None = None,
|
| 154 |
+
acp_cwd: str | None = None,
|
| 155 |
+
command: str | None = None,
|
| 156 |
+
args: list[str] | None = None,
|
| 157 |
+
**_: Any,
|
| 158 |
+
):
|
| 159 |
+
self.api_key = api_key or "copilot-acp"
|
| 160 |
+
self.base_url = base_url or ACP_MARKER_BASE_URL
|
| 161 |
+
self._default_headers = dict(default_headers or {})
|
| 162 |
+
self._acp_command = acp_command or command or _resolve_command()
|
| 163 |
+
self._acp_args = list(acp_args or args or _resolve_args())
|
| 164 |
+
self._acp_cwd = str(Path(acp_cwd or os.getcwd()).resolve())
|
| 165 |
+
self.chat = _ACPChatNamespace(self)
|
| 166 |
+
self.is_closed = False
|
| 167 |
+
self._active_process: subprocess.Popen[str] | None = None
|
| 168 |
+
self._active_process_lock = threading.Lock()
|
| 169 |
+
|
| 170 |
+
def close(self) -> None:
|
| 171 |
+
proc: subprocess.Popen[str] | None
|
| 172 |
+
with self._active_process_lock:
|
| 173 |
+
proc = self._active_process
|
| 174 |
+
self._active_process = None
|
| 175 |
+
self.is_closed = True
|
| 176 |
+
if proc is None:
|
| 177 |
+
return
|
| 178 |
+
try:
|
| 179 |
+
proc.terminate()
|
| 180 |
+
proc.wait(timeout=2)
|
| 181 |
+
except Exception:
|
| 182 |
+
try:
|
| 183 |
+
proc.kill()
|
| 184 |
+
except Exception:
|
| 185 |
+
pass
|
| 186 |
+
|
| 187 |
+
def _create_chat_completion(
|
| 188 |
+
self,
|
| 189 |
+
*,
|
| 190 |
+
model: str | None = None,
|
| 191 |
+
messages: list[dict[str, Any]] | None = None,
|
| 192 |
+
timeout: float | None = None,
|
| 193 |
+
**_: Any,
|
| 194 |
+
) -> Any:
|
| 195 |
+
prompt_text = _format_messages_as_prompt(messages or [], model=model)
|
| 196 |
+
response_text, reasoning_text = self._run_prompt(
|
| 197 |
+
prompt_text,
|
| 198 |
+
timeout_seconds=float(timeout or _DEFAULT_TIMEOUT_SECONDS),
|
| 199 |
+
)
|
| 200 |
+
|
| 201 |
+
usage = SimpleNamespace(
|
| 202 |
+
prompt_tokens=0,
|
| 203 |
+
completion_tokens=0,
|
| 204 |
+
total_tokens=0,
|
| 205 |
+
prompt_tokens_details=SimpleNamespace(cached_tokens=0),
|
| 206 |
+
)
|
| 207 |
+
assistant_message = SimpleNamespace(
|
| 208 |
+
content=response_text,
|
| 209 |
+
tool_calls=[],
|
| 210 |
+
reasoning=reasoning_text or None,
|
| 211 |
+
reasoning_content=reasoning_text or None,
|
| 212 |
+
reasoning_details=None,
|
| 213 |
+
)
|
| 214 |
+
choice = SimpleNamespace(message=assistant_message, finish_reason="stop")
|
| 215 |
+
return SimpleNamespace(
|
| 216 |
+
choices=[choice],
|
| 217 |
+
usage=usage,
|
| 218 |
+
model=model or "copilot-acp",
|
| 219 |
+
)
|
| 220 |
+
|
| 221 |
+
def _run_prompt(self, prompt_text: str, *, timeout_seconds: float) -> tuple[str, str]:
|
| 222 |
+
try:
|
| 223 |
+
proc = subprocess.Popen(
|
| 224 |
+
[self._acp_command] + self._acp_args,
|
| 225 |
+
stdin=subprocess.PIPE,
|
| 226 |
+
stdout=subprocess.PIPE,
|
| 227 |
+
stderr=subprocess.PIPE,
|
| 228 |
+
text=True,
|
| 229 |
+
bufsize=1,
|
| 230 |
+
cwd=self._acp_cwd,
|
| 231 |
+
)
|
| 232 |
+
except FileNotFoundError as exc:
|
| 233 |
+
raise RuntimeError(
|
| 234 |
+
f"Could not start Copilot ACP command '{self._acp_command}'. "
|
| 235 |
+
"Install GitHub Copilot CLI or set HERMES_COPILOT_ACP_COMMAND/COPILOT_CLI_PATH."
|
| 236 |
+
) from exc
|
| 237 |
+
|
| 238 |
+
if proc.stdin is None or proc.stdout is None:
|
| 239 |
+
proc.kill()
|
| 240 |
+
raise RuntimeError("Copilot ACP process did not expose stdin/stdout pipes.")
|
| 241 |
+
|
| 242 |
+
self.is_closed = False
|
| 243 |
+
with self._active_process_lock:
|
| 244 |
+
self._active_process = proc
|
| 245 |
+
|
| 246 |
+
inbox: queue.Queue[dict[str, Any]] = queue.Queue()
|
| 247 |
+
stderr_tail: deque[str] = deque(maxlen=40)
|
| 248 |
+
|
| 249 |
+
def _stdout_reader() -> None:
|
| 250 |
+
for line in proc.stdout:
|
| 251 |
+
try:
|
| 252 |
+
inbox.put(json.loads(line))
|
| 253 |
+
except Exception:
|
| 254 |
+
inbox.put({"raw": line.rstrip("\n")})
|
| 255 |
+
|
| 256 |
+
def _stderr_reader() -> None:
|
| 257 |
+
if proc.stderr is None:
|
| 258 |
+
return
|
| 259 |
+
for line in proc.stderr:
|
| 260 |
+
stderr_tail.append(line.rstrip("\n"))
|
| 261 |
+
|
| 262 |
+
out_thread = threading.Thread(target=_stdout_reader, daemon=True)
|
| 263 |
+
err_thread = threading.Thread(target=_stderr_reader, daemon=True)
|
| 264 |
+
out_thread.start()
|
| 265 |
+
err_thread.start()
|
| 266 |
+
|
| 267 |
+
next_id = 0
|
| 268 |
+
|
| 269 |
+
def _request(method: str, params: dict[str, Any], *, text_parts: list[str] | None = None, reasoning_parts: list[str] | None = None) -> Any:
|
| 270 |
+
nonlocal next_id
|
| 271 |
+
next_id += 1
|
| 272 |
+
request_id = next_id
|
| 273 |
+
payload = {
|
| 274 |
+
"jsonrpc": "2.0",
|
| 275 |
+
"id": request_id,
|
| 276 |
+
"method": method,
|
| 277 |
+
"params": params,
|
| 278 |
+
}
|
| 279 |
+
proc.stdin.write(json.dumps(payload) + "\n")
|
| 280 |
+
proc.stdin.flush()
|
| 281 |
+
|
| 282 |
+
deadline = time.time() + timeout_seconds
|
| 283 |
+
while time.time() < deadline:
|
| 284 |
+
if proc.poll() is not None:
|
| 285 |
+
break
|
| 286 |
+
try:
|
| 287 |
+
msg = inbox.get(timeout=0.1)
|
| 288 |
+
except queue.Empty:
|
| 289 |
+
continue
|
| 290 |
+
|
| 291 |
+
if self._handle_server_message(
|
| 292 |
+
msg,
|
| 293 |
+
process=proc,
|
| 294 |
+
cwd=self._acp_cwd,
|
| 295 |
+
text_parts=text_parts,
|
| 296 |
+
reasoning_parts=reasoning_parts,
|
| 297 |
+
):
|
| 298 |
+
continue
|
| 299 |
+
|
| 300 |
+
if msg.get("id") != request_id:
|
| 301 |
+
continue
|
| 302 |
+
if "error" in msg:
|
| 303 |
+
err = msg.get("error") or {}
|
| 304 |
+
raise RuntimeError(
|
| 305 |
+
f"Copilot ACP {method} failed: {err.get('message') or err}"
|
| 306 |
+
)
|
| 307 |
+
return msg.get("result")
|
| 308 |
+
|
| 309 |
+
stderr_text = "\n".join(stderr_tail).strip()
|
| 310 |
+
if proc.poll() is not None and stderr_text:
|
| 311 |
+
raise RuntimeError(f"Copilot ACP process exited early: {stderr_text}")
|
| 312 |
+
raise TimeoutError(f"Timed out waiting for Copilot ACP response to {method}.")
|
| 313 |
+
|
| 314 |
+
try:
|
| 315 |
+
_request(
|
| 316 |
+
"initialize",
|
| 317 |
+
{
|
| 318 |
+
"protocolVersion": 1,
|
| 319 |
+
"clientCapabilities": {
|
| 320 |
+
"fs": {
|
| 321 |
+
"readTextFile": True,
|
| 322 |
+
"writeTextFile": True,
|
| 323 |
+
}
|
| 324 |
+
},
|
| 325 |
+
"clientInfo": {
|
| 326 |
+
"name": "hermes-agent",
|
| 327 |
+
"title": "Hermes Agent",
|
| 328 |
+
"version": "0.0.0",
|
| 329 |
+
},
|
| 330 |
+
},
|
| 331 |
+
)
|
| 332 |
+
session = _request(
|
| 333 |
+
"session/new",
|
| 334 |
+
{
|
| 335 |
+
"cwd": self._acp_cwd,
|
| 336 |
+
"mcpServers": [],
|
| 337 |
+
},
|
| 338 |
+
) or {}
|
| 339 |
+
session_id = str(session.get("sessionId") or "").strip()
|
| 340 |
+
if not session_id:
|
| 341 |
+
raise RuntimeError("Copilot ACP did not return a sessionId.")
|
| 342 |
+
|
| 343 |
+
text_parts: list[str] = []
|
| 344 |
+
reasoning_parts: list[str] = []
|
| 345 |
+
_request(
|
| 346 |
+
"session/prompt",
|
| 347 |
+
{
|
| 348 |
+
"sessionId": session_id,
|
| 349 |
+
"prompt": [
|
| 350 |
+
{
|
| 351 |
+
"type": "text",
|
| 352 |
+
"text": prompt_text,
|
| 353 |
+
}
|
| 354 |
+
],
|
| 355 |
+
},
|
| 356 |
+
text_parts=text_parts,
|
| 357 |
+
reasoning_parts=reasoning_parts,
|
| 358 |
+
)
|
| 359 |
+
return "".join(text_parts), "".join(reasoning_parts)
|
| 360 |
+
finally:
|
| 361 |
+
self.close()
|
| 362 |
+
|
| 363 |
+
def _handle_server_message(
|
| 364 |
+
self,
|
| 365 |
+
msg: dict[str, Any],
|
| 366 |
+
*,
|
| 367 |
+
process: subprocess.Popen[str],
|
| 368 |
+
cwd: str,
|
| 369 |
+
text_parts: list[str] | None,
|
| 370 |
+
reasoning_parts: list[str] | None,
|
| 371 |
+
) -> bool:
|
| 372 |
+
method = msg.get("method")
|
| 373 |
+
if not isinstance(method, str):
|
| 374 |
+
return False
|
| 375 |
+
|
| 376 |
+
if method == "session/update":
|
| 377 |
+
params = msg.get("params") or {}
|
| 378 |
+
update = params.get("update") or {}
|
| 379 |
+
kind = str(update.get("sessionUpdate") or "").strip()
|
| 380 |
+
content = update.get("content") or {}
|
| 381 |
+
chunk_text = ""
|
| 382 |
+
if isinstance(content, dict):
|
| 383 |
+
chunk_text = str(content.get("text") or "")
|
| 384 |
+
if kind == "agent_message_chunk" and chunk_text and text_parts is not None:
|
| 385 |
+
text_parts.append(chunk_text)
|
| 386 |
+
elif kind == "agent_thought_chunk" and chunk_text and reasoning_parts is not None:
|
| 387 |
+
reasoning_parts.append(chunk_text)
|
| 388 |
+
return True
|
| 389 |
+
|
| 390 |
+
if process.stdin is None:
|
| 391 |
+
return True
|
| 392 |
+
|
| 393 |
+
message_id = msg.get("id")
|
| 394 |
+
params = msg.get("params") or {}
|
| 395 |
+
|
| 396 |
+
if method == "session/request_permission":
|
| 397 |
+
response = {
|
| 398 |
+
"jsonrpc": "2.0",
|
| 399 |
+
"id": message_id,
|
| 400 |
+
"result": {
|
| 401 |
+
"outcome": {
|
| 402 |
+
"outcome": "allow_once",
|
| 403 |
+
}
|
| 404 |
+
},
|
| 405 |
+
}
|
| 406 |
+
elif method == "fs/read_text_file":
|
| 407 |
+
try:
|
| 408 |
+
path = _ensure_path_within_cwd(str(params.get("path") or ""), cwd)
|
| 409 |
+
content = path.read_text() if path.exists() else ""
|
| 410 |
+
line = params.get("line")
|
| 411 |
+
limit = params.get("limit")
|
| 412 |
+
if isinstance(line, int) and line > 1:
|
| 413 |
+
lines = content.splitlines(keepends=True)
|
| 414 |
+
start = line - 1
|
| 415 |
+
end = start + limit if isinstance(limit, int) and limit > 0 else None
|
| 416 |
+
content = "".join(lines[start:end])
|
| 417 |
+
response = {
|
| 418 |
+
"jsonrpc": "2.0",
|
| 419 |
+
"id": message_id,
|
| 420 |
+
"result": {
|
| 421 |
+
"content": content,
|
| 422 |
+
},
|
| 423 |
+
}
|
| 424 |
+
except Exception as exc:
|
| 425 |
+
response = _jsonrpc_error(message_id, -32602, str(exc))
|
| 426 |
+
elif method == "fs/write_text_file":
|
| 427 |
+
try:
|
| 428 |
+
path = _ensure_path_within_cwd(str(params.get("path") or ""), cwd)
|
| 429 |
+
path.parent.mkdir(parents=True, exist_ok=True)
|
| 430 |
+
path.write_text(str(params.get("content") or ""))
|
| 431 |
+
response = {
|
| 432 |
+
"jsonrpc": "2.0",
|
| 433 |
+
"id": message_id,
|
| 434 |
+
"result": None,
|
| 435 |
+
}
|
| 436 |
+
except Exception as exc:
|
| 437 |
+
response = _jsonrpc_error(message_id, -32602, str(exc))
|
| 438 |
+
else:
|
| 439 |
+
response = _jsonrpc_error(
|
| 440 |
+
message_id,
|
| 441 |
+
-32601,
|
| 442 |
+
f"ACP client method '{method}' is not supported by Hermes yet.",
|
| 443 |
+
)
|
| 444 |
+
|
| 445 |
+
process.stdin.write(json.dumps(response) + "\n")
|
| 446 |
+
process.stdin.flush()
|
| 447 |
+
return True
|
agent/display.py
ADDED
|
@@ -0,0 +1,708 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""CLI presentation -- spinner, kawaii faces, tool preview formatting.
|
| 2 |
+
|
| 3 |
+
Pure display functions and classes with no AIAgent dependency.
|
| 4 |
+
Used by AIAgent._execute_tool_calls for CLI feedback.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import json
|
| 8 |
+
import logging
|
| 9 |
+
import os
|
| 10 |
+
import sys
|
| 11 |
+
import threading
|
| 12 |
+
import time
|
| 13 |
+
|
| 14 |
+
# ANSI escape codes for coloring tool failure indicators
|
| 15 |
+
_RED = "\033[31m"
|
| 16 |
+
_RESET = "\033[0m"
|
| 17 |
+
|
| 18 |
+
logger = logging.getLogger(__name__)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
# =========================================================================
|
| 22 |
+
# Skin-aware helpers (lazy import to avoid circular deps)
|
| 23 |
+
# =========================================================================
|
| 24 |
+
|
| 25 |
+
def _get_skin():
|
| 26 |
+
"""Get the active skin config, or None if not available."""
|
| 27 |
+
try:
|
| 28 |
+
from hermes_cli.skin_engine import get_active_skin
|
| 29 |
+
return get_active_skin()
|
| 30 |
+
except Exception:
|
| 31 |
+
return None
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def get_skin_faces(key: str, default: list) -> list:
|
| 35 |
+
"""Get spinner face list from active skin, falling back to default."""
|
| 36 |
+
skin = _get_skin()
|
| 37 |
+
if skin:
|
| 38 |
+
faces = skin.get_spinner_list(key)
|
| 39 |
+
if faces:
|
| 40 |
+
return faces
|
| 41 |
+
return default
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def get_skin_verbs() -> list:
|
| 45 |
+
"""Get thinking verbs from active skin."""
|
| 46 |
+
skin = _get_skin()
|
| 47 |
+
if skin:
|
| 48 |
+
verbs = skin.get_spinner_list("thinking_verbs")
|
| 49 |
+
if verbs:
|
| 50 |
+
return verbs
|
| 51 |
+
return KawaiiSpinner.THINKING_VERBS
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def get_skin_tool_prefix() -> str:
|
| 55 |
+
"""Get tool output prefix character from active skin."""
|
| 56 |
+
skin = _get_skin()
|
| 57 |
+
if skin:
|
| 58 |
+
return skin.tool_prefix
|
| 59 |
+
return "┊"
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def get_tool_emoji(tool_name: str, default: str = "⚡") -> str:
|
| 63 |
+
"""Get the display emoji for a tool.
|
| 64 |
+
|
| 65 |
+
Resolution order:
|
| 66 |
+
1. Active skin's ``tool_emojis`` overrides (if a skin is loaded)
|
| 67 |
+
2. Tool registry's per-tool ``emoji`` field
|
| 68 |
+
3. *default* fallback
|
| 69 |
+
"""
|
| 70 |
+
# 1. Skin override
|
| 71 |
+
skin = _get_skin()
|
| 72 |
+
if skin and skin.tool_emojis:
|
| 73 |
+
override = skin.tool_emojis.get(tool_name)
|
| 74 |
+
if override:
|
| 75 |
+
return override
|
| 76 |
+
# 2. Registry default
|
| 77 |
+
try:
|
| 78 |
+
from tools.registry import registry
|
| 79 |
+
emoji = registry.get_emoji(tool_name, default="")
|
| 80 |
+
if emoji:
|
| 81 |
+
return emoji
|
| 82 |
+
except Exception:
|
| 83 |
+
pass
|
| 84 |
+
# 3. Hardcoded fallback
|
| 85 |
+
return default
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
# =========================================================================
|
| 89 |
+
# Tool preview (one-line summary of a tool call's primary argument)
|
| 90 |
+
# =========================================================================
|
| 91 |
+
|
| 92 |
+
def _oneline(text: str) -> str:
|
| 93 |
+
"""Collapse whitespace (including newlines) to single spaces."""
|
| 94 |
+
return " ".join(text.split())
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def build_tool_preview(tool_name: str, args: dict, max_len: int = 40) -> str | None:
|
| 98 |
+
"""Build a short preview of a tool call's primary argument for display."""
|
| 99 |
+
if not args:
|
| 100 |
+
return None
|
| 101 |
+
primary_args = {
|
| 102 |
+
"terminal": "command", "web_search": "query", "web_extract": "urls",
|
| 103 |
+
"read_file": "path", "write_file": "path", "patch": "path",
|
| 104 |
+
"search_files": "pattern", "browser_navigate": "url",
|
| 105 |
+
"browser_click": "ref", "browser_type": "text",
|
| 106 |
+
"image_generate": "prompt", "text_to_speech": "text",
|
| 107 |
+
"vision_analyze": "question", "mixture_of_agents": "user_prompt",
|
| 108 |
+
"skill_view": "name", "skills_list": "category",
|
| 109 |
+
"cronjob": "action",
|
| 110 |
+
"execute_code": "code", "delegate_task": "goal",
|
| 111 |
+
"clarify": "question", "skill_manage": "name",
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
if tool_name == "process":
|
| 115 |
+
action = args.get("action", "")
|
| 116 |
+
sid = args.get("session_id", "")
|
| 117 |
+
data = args.get("data", "")
|
| 118 |
+
timeout_val = args.get("timeout")
|
| 119 |
+
parts = [action]
|
| 120 |
+
if sid:
|
| 121 |
+
parts.append(sid[:16])
|
| 122 |
+
if data:
|
| 123 |
+
parts.append(f'"{_oneline(data[:20])}"')
|
| 124 |
+
if timeout_val and action == "wait":
|
| 125 |
+
parts.append(f"{timeout_val}s")
|
| 126 |
+
return " ".join(parts) if parts else None
|
| 127 |
+
|
| 128 |
+
if tool_name == "todo":
|
| 129 |
+
todos_arg = args.get("todos")
|
| 130 |
+
merge = args.get("merge", False)
|
| 131 |
+
if todos_arg is None:
|
| 132 |
+
return "reading task list"
|
| 133 |
+
elif merge:
|
| 134 |
+
return f"updating {len(todos_arg)} task(s)"
|
| 135 |
+
else:
|
| 136 |
+
return f"planning {len(todos_arg)} task(s)"
|
| 137 |
+
|
| 138 |
+
if tool_name == "session_search":
|
| 139 |
+
query = _oneline(args.get("query", ""))
|
| 140 |
+
return f"recall: \"{query[:25]}{'...' if len(query) > 25 else ''}\""
|
| 141 |
+
|
| 142 |
+
if tool_name == "memory":
|
| 143 |
+
action = args.get("action", "")
|
| 144 |
+
target = args.get("target", "")
|
| 145 |
+
if action == "add":
|
| 146 |
+
content = _oneline(args.get("content", ""))
|
| 147 |
+
return f"+{target}: \"{content[:25]}{'...' if len(content) > 25 else ''}\""
|
| 148 |
+
elif action == "replace":
|
| 149 |
+
return f"~{target}: \"{_oneline(args.get('old_text', '')[:20])}\""
|
| 150 |
+
elif action == "remove":
|
| 151 |
+
return f"-{target}: \"{_oneline(args.get('old_text', '')[:20])}\""
|
| 152 |
+
return action
|
| 153 |
+
|
| 154 |
+
if tool_name == "send_message":
|
| 155 |
+
target = args.get("target", "?")
|
| 156 |
+
msg = _oneline(args.get("message", ""))
|
| 157 |
+
if len(msg) > 20:
|
| 158 |
+
msg = msg[:17] + "..."
|
| 159 |
+
return f"to {target}: \"{msg}\""
|
| 160 |
+
|
| 161 |
+
if tool_name.startswith("rl_"):
|
| 162 |
+
rl_previews = {
|
| 163 |
+
"rl_list_environments": "listing envs",
|
| 164 |
+
"rl_select_environment": args.get("name", ""),
|
| 165 |
+
"rl_get_current_config": "reading config",
|
| 166 |
+
"rl_edit_config": f"{args.get('field', '')}={args.get('value', '')}",
|
| 167 |
+
"rl_start_training": "starting",
|
| 168 |
+
"rl_check_status": args.get("run_id", "")[:16],
|
| 169 |
+
"rl_stop_training": f"stopping {args.get('run_id', '')[:16]}",
|
| 170 |
+
"rl_get_results": args.get("run_id", "")[:16],
|
| 171 |
+
"rl_list_runs": "listing runs",
|
| 172 |
+
"rl_test_inference": f"{args.get('num_steps', 3)} steps",
|
| 173 |
+
}
|
| 174 |
+
return rl_previews.get(tool_name)
|
| 175 |
+
|
| 176 |
+
key = primary_args.get(tool_name)
|
| 177 |
+
if not key:
|
| 178 |
+
for fallback_key in ("query", "text", "command", "path", "name", "prompt", "code", "goal"):
|
| 179 |
+
if fallback_key in args:
|
| 180 |
+
key = fallback_key
|
| 181 |
+
break
|
| 182 |
+
|
| 183 |
+
if not key or key not in args:
|
| 184 |
+
return None
|
| 185 |
+
|
| 186 |
+
value = args[key]
|
| 187 |
+
if isinstance(value, list):
|
| 188 |
+
value = value[0] if value else ""
|
| 189 |
+
|
| 190 |
+
preview = _oneline(str(value))
|
| 191 |
+
if not preview:
|
| 192 |
+
return None
|
| 193 |
+
if len(preview) > max_len:
|
| 194 |
+
preview = preview[:max_len - 3] + "..."
|
| 195 |
+
return preview
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
# =========================================================================
|
| 199 |
+
# KawaiiSpinner
|
| 200 |
+
# =========================================================================
|
| 201 |
+
|
| 202 |
+
class KawaiiSpinner:
|
| 203 |
+
"""Animated spinner with kawaii faces for CLI feedback during tool execution."""
|
| 204 |
+
|
| 205 |
+
SPINNERS = {
|
| 206 |
+
'dots': ['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏'],
|
| 207 |
+
'bounce': ['⠁', '⠂', '⠄', '⡀', '⢀', '⠠', '⠐', '⠈'],
|
| 208 |
+
'grow': ['▁', '▂', '▃', '▄', '▅', '▆', '▇', '█', '▇', '▆', '▅', '▄', '▃', '▂'],
|
| 209 |
+
'arrows': ['←', '↖', '↑', '↗', '→', '↘', '↓', '↙'],
|
| 210 |
+
'star': ['✶', '✷', '✸', '✹', '✺', '✹', '✸', '✷'],
|
| 211 |
+
'moon': ['🌑', '🌒', '🌓', '🌔', '🌕', '🌖', '🌗', '🌘'],
|
| 212 |
+
'pulse': ['◜', '◠', '◝', '◞', '◡', '◟'],
|
| 213 |
+
'brain': ['🧠', '💭', '💡', '✨', '💫', '🌟', '💡', '💭'],
|
| 214 |
+
'sparkle': ['⁺', '˚', '*', '✧', '✦', '✧', '*', '˚'],
|
| 215 |
+
}
|
| 216 |
+
|
| 217 |
+
KAWAII_WAITING = [
|
| 218 |
+
"(。◕‿◕。)", "(◕‿◕✿)", "٩(◕‿◕。)۶", "(✿◠‿◠)", "( ˘▽˘)っ",
|
| 219 |
+
"♪(´ε` )", "(◕ᴗ◕✿)", "ヾ(^∇^)", "(≧◡≦)", "(★ω★)",
|
| 220 |
+
]
|
| 221 |
+
|
| 222 |
+
KAWAII_THINKING = [
|
| 223 |
+
"(。•́︿•̀。)", "(◔_◔)", "(¬‿¬)", "( •_•)>⌐■-■", "(⌐■_■)",
|
| 224 |
+
"(´・_・`)", "◉_◉", "(°ロ°)", "( ˘⌣˘)♡", "ヽ(>∀<☆)☆",
|
| 225 |
+
"٩(๑❛ᴗ❛๑)۶", "(⊙_⊙)", "(¬_¬)", "( ͡° ͜ʖ ͡°)", "ಠ_ಠ",
|
| 226 |
+
]
|
| 227 |
+
|
| 228 |
+
THINKING_VERBS = [
|
| 229 |
+
"pondering", "contemplating", "musing", "cogitating", "ruminating",
|
| 230 |
+
"deliberating", "mulling", "reflecting", "processing", "reasoning",
|
| 231 |
+
"analyzing", "computing", "synthesizing", "formulating", "brainstorming",
|
| 232 |
+
]
|
| 233 |
+
|
| 234 |
+
def __init__(self, message: str = "", spinner_type: str = 'dots'):
|
| 235 |
+
self.message = message
|
| 236 |
+
self.spinner_frames = self.SPINNERS.get(spinner_type, self.SPINNERS['dots'])
|
| 237 |
+
self.running = False
|
| 238 |
+
self.thread = None
|
| 239 |
+
self.frame_idx = 0
|
| 240 |
+
self.start_time = None
|
| 241 |
+
self.last_line_len = 0
|
| 242 |
+
self._last_flush_time = 0.0 # Rate-limit flushes for patch_stdout compat
|
| 243 |
+
# Capture stdout NOW, before any redirect_stdout(devnull) from
|
| 244 |
+
# child agents can replace sys.stdout with a black hole.
|
| 245 |
+
self._out = sys.stdout
|
| 246 |
+
|
| 247 |
+
def _write(self, text: str, end: str = '\n', flush: bool = False):
|
| 248 |
+
"""Write to the stdout captured at spinner creation time."""
|
| 249 |
+
try:
|
| 250 |
+
self._out.write(text + end)
|
| 251 |
+
if flush:
|
| 252 |
+
self._out.flush()
|
| 253 |
+
except (ValueError, OSError):
|
| 254 |
+
pass
|
| 255 |
+
|
| 256 |
+
def _animate(self):
|
| 257 |
+
# When stdout is not a real terminal (e.g. Docker, systemd, pipe),
|
| 258 |
+
# skip the animation entirely — it creates massive log bloat.
|
| 259 |
+
# Just log the start once and let stop() log the completion.
|
| 260 |
+
if not hasattr(self._out, 'isatty') or not self._out.isatty():
|
| 261 |
+
self._write(f" [tool] {self.message}", flush=True)
|
| 262 |
+
while self.running:
|
| 263 |
+
time.sleep(0.5)
|
| 264 |
+
return
|
| 265 |
+
|
| 266 |
+
# Cache skin wings at start (avoid per-frame imports)
|
| 267 |
+
skin = _get_skin()
|
| 268 |
+
wings = skin.get_spinner_wings() if skin else []
|
| 269 |
+
|
| 270 |
+
while self.running:
|
| 271 |
+
if os.getenv("HERMES_SPINNER_PAUSE"):
|
| 272 |
+
time.sleep(0.1)
|
| 273 |
+
continue
|
| 274 |
+
frame = self.spinner_frames[self.frame_idx % len(self.spinner_frames)]
|
| 275 |
+
elapsed = time.time() - self.start_time
|
| 276 |
+
if wings:
|
| 277 |
+
left, right = wings[self.frame_idx % len(wings)]
|
| 278 |
+
line = f" {left} {frame} {self.message} {right} ({elapsed:.1f}s)"
|
| 279 |
+
else:
|
| 280 |
+
line = f" {frame} {self.message} ({elapsed:.1f}s)"
|
| 281 |
+
pad = max(self.last_line_len - len(line), 0)
|
| 282 |
+
# Rate-limit flush() calls to avoid spinner spam under
|
| 283 |
+
# prompt_toolkit's patch_stdout. Each flush() pushes a queue
|
| 284 |
+
# item that may trigger a separate run_in_terminal() call; if
|
| 285 |
+
# items are processed one-at-a-time the \r overwrite is lost
|
| 286 |
+
# and every frame appears on its own line. By flushing at
|
| 287 |
+
# most every 0.4s we guarantee multiple \r-frames are batched
|
| 288 |
+
# into a single write, so the terminal collapses them correctly.
|
| 289 |
+
now = time.time()
|
| 290 |
+
should_flush = (now - self._last_flush_time) >= 0.4
|
| 291 |
+
self._write(f"\r{line}{' ' * pad}", end='', flush=should_flush)
|
| 292 |
+
if should_flush:
|
| 293 |
+
self._last_flush_time = now
|
| 294 |
+
self.last_line_len = len(line)
|
| 295 |
+
self.frame_idx += 1
|
| 296 |
+
time.sleep(0.12)
|
| 297 |
+
|
| 298 |
+
def start(self):
|
| 299 |
+
if self.running:
|
| 300 |
+
return
|
| 301 |
+
self.running = True
|
| 302 |
+
self.start_time = time.time()
|
| 303 |
+
self.thread = threading.Thread(target=self._animate, daemon=True)
|
| 304 |
+
self.thread.start()
|
| 305 |
+
|
| 306 |
+
def update_text(self, new_message: str):
|
| 307 |
+
self.message = new_message
|
| 308 |
+
|
| 309 |
+
def print_above(self, text: str):
|
| 310 |
+
"""Print a line above the spinner without disrupting animation.
|
| 311 |
+
|
| 312 |
+
Clears the current spinner line, prints the text, and lets the
|
| 313 |
+
next animation tick redraw the spinner on the line below.
|
| 314 |
+
Thread-safe: uses the captured stdout reference (self._out).
|
| 315 |
+
Works inside redirect_stdout(devnull) because _write bypasses
|
| 316 |
+
sys.stdout and writes to the stdout captured at spinner creation.
|
| 317 |
+
"""
|
| 318 |
+
if not self.running:
|
| 319 |
+
self._write(f" {text}", flush=True)
|
| 320 |
+
return
|
| 321 |
+
# Clear spinner line with spaces (not \033[K) to avoid garbled escape
|
| 322 |
+
# codes when prompt_toolkit's patch_stdout is active — same approach
|
| 323 |
+
# as stop(). Then print text; spinner redraws on next tick.
|
| 324 |
+
blanks = ' ' * max(self.last_line_len + 5, 40)
|
| 325 |
+
self._write(f"\r{blanks}\r {text}", flush=True)
|
| 326 |
+
|
| 327 |
+
def stop(self, final_message: str = None):
|
| 328 |
+
self.running = False
|
| 329 |
+
if self.thread:
|
| 330 |
+
self.thread.join(timeout=0.5)
|
| 331 |
+
|
| 332 |
+
is_tty = hasattr(self._out, 'isatty') and self._out.isatty()
|
| 333 |
+
if is_tty:
|
| 334 |
+
# Clear the spinner line with spaces instead of \033[K to avoid
|
| 335 |
+
# garbled escape codes when prompt_toolkit's patch_stdout is active.
|
| 336 |
+
blanks = ' ' * max(self.last_line_len + 5, 40)
|
| 337 |
+
self._write(f"\r{blanks}\r", end='', flush=True)
|
| 338 |
+
if final_message:
|
| 339 |
+
elapsed = f" ({time.time() - self.start_time:.1f}s)" if self.start_time else ""
|
| 340 |
+
if is_tty:
|
| 341 |
+
self._write(f" {final_message}", flush=True)
|
| 342 |
+
else:
|
| 343 |
+
self._write(f" [done] {final_message}{elapsed}", flush=True)
|
| 344 |
+
|
| 345 |
+
def __enter__(self):
|
| 346 |
+
self.start()
|
| 347 |
+
return self
|
| 348 |
+
|
| 349 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 350 |
+
self.stop()
|
| 351 |
+
return False
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
# =========================================================================
|
| 355 |
+
# Kawaii face arrays (used by AIAgent._execute_tool_calls for spinner text)
|
| 356 |
+
# =========================================================================
|
| 357 |
+
|
| 358 |
+
KAWAII_SEARCH = [
|
| 359 |
+
"♪(´ε` )", "(。◕‿◕。)", "ヾ(^∇^)", "(◕ᴗ◕✿)", "( ˘▽˘)っ",
|
| 360 |
+
"٩(◕‿◕。)۶", "(✿◠‿◠)", "♪~(´ε` )", "(ノ´ヮ`)ノ*:・゚✧", "\(◎o◎)/",
|
| 361 |
+
]
|
| 362 |
+
KAWAII_READ = [
|
| 363 |
+
"φ(゜▽゜*)♪", "( ˘▽˘)っ", "(⌐■_■)", "٩(。•́‿•̀。)۶", "(◕‿◕✿)",
|
| 364 |
+
"ヾ(@⌒ー⌒@)ノ", "(✧ω✧)", "♪(๑ᴖ◡ᴖ๑)♪", "(≧◡≦)", "( ´ ▽ ` )ノ",
|
| 365 |
+
]
|
| 366 |
+
KAWAII_TERMINAL = [
|
| 367 |
+
"ヽ(>∀<☆)ノ", "(ノ°∀°)ノ", "٩(^ᴗ^)۶", "ヾ(⌐■_■)ノ♪", "(•̀ᴗ•́)و",
|
| 368 |
+
"┗(^0^)┓", "(`・ω・´)", "\( ̄▽ ̄)/", "(ง •̀_•́)ง", "ヽ(´▽`)/",
|
| 369 |
+
]
|
| 370 |
+
KAWAII_BROWSER = [
|
| 371 |
+
"(ノ°∀°)ノ", "(☞゚ヮ゚)☞", "( ͡° ͜ʖ ͡°)", "┌( ಠ_ಠ)┘", "(⊙_⊙)?",
|
| 372 |
+
"ヾ(•ω•`)o", "( ̄ω ̄)", "( ˇωˇ )", "(ᵔᴥᵔ)", "\(◎o◎)/",
|
| 373 |
+
]
|
| 374 |
+
KAWAII_CREATE = [
|
| 375 |
+
"✧*。٩(ˊᗜˋ*)و✧", "(ノ◕ヮ◕)ノ*:・゚✧", "ヽ(>∀<☆)ノ", "٩(♡ε♡)۶", "(◕‿◕)♡",
|
| 376 |
+
"✿◕ ‿ ◕✿", "(*≧▽≦)", "ヾ(^-^)ノ", "(☆▽☆)", "°˖✧◝(⁰▿⁰)◜✧˖°",
|
| 377 |
+
]
|
| 378 |
+
KAWAII_SKILL = [
|
| 379 |
+
"ヾ(@⌒ー⌒@)ノ", "(๑˃ᴗ˂)ﻭ", "٩(◕‿◕。)۶", "(✿╹◡╹)", "ヽ(・∀・)ノ",
|
| 380 |
+
"(ノ´ヮ`)ノ*:・゚✧", "♪(๑ᴖ◡ᴖ���)♪", "(◠‿◠)", "٩(ˊᗜˋ*)و", "(^▽^)",
|
| 381 |
+
"ヾ(^∇^)", "(★ω★)/", "٩(。•́‿•̀。)۶", "(◕ᴗ◕✿)", "\(◎o◎)/",
|
| 382 |
+
"(✧ω✧)", "ヽ(>∀<☆)ノ", "( ˘▽˘)っ", "(≧◡≦) ♡", "ヾ( ̄▽ ̄)",
|
| 383 |
+
]
|
| 384 |
+
KAWAII_THINK = [
|
| 385 |
+
"(っ°Д°;)っ", "(;′⌒`)", "(・_・ヾ", "( ´_ゝ`)", "( ̄ヘ ̄)",
|
| 386 |
+
"(。-`ω´-)", "( ˘︹˘ )", "(¬_¬)", "ヽ(ー_ー )ノ", "(;一_一)",
|
| 387 |
+
]
|
| 388 |
+
KAWAII_GENERIC = [
|
| 389 |
+
"♪(´ε` )", "(◕‿◕✿)", "ヾ(^∇^)", "٩(◕‿◕。)۶", "(✿◠‿◠)",
|
| 390 |
+
"(ノ´ヮ`)ノ*:・゚✧", "ヽ(>∀<☆)ノ", "(☆▽☆)", "( ˘▽˘)っ", "(≧◡≦)",
|
| 391 |
+
]
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
# =========================================================================
|
| 395 |
+
# Cute tool message (completion line that replaces the spinner)
|
| 396 |
+
# =========================================================================
|
| 397 |
+
|
| 398 |
+
def _detect_tool_failure(tool_name: str, result: str | None) -> tuple[bool, str]:
|
| 399 |
+
"""Inspect a tool result string for signs of failure.
|
| 400 |
+
|
| 401 |
+
Returns ``(is_failure, suffix)`` where *suffix* is an informational tag
|
| 402 |
+
like ``" [exit 1]"`` for terminal failures, or ``" [error]"`` for generic
|
| 403 |
+
failures. On success, returns ``(False, "")``.
|
| 404 |
+
"""
|
| 405 |
+
if result is None:
|
| 406 |
+
return False, ""
|
| 407 |
+
|
| 408 |
+
if tool_name == "terminal":
|
| 409 |
+
try:
|
| 410 |
+
data = json.loads(result)
|
| 411 |
+
exit_code = data.get("exit_code")
|
| 412 |
+
if exit_code is not None and exit_code != 0:
|
| 413 |
+
return True, f" [exit {exit_code}]"
|
| 414 |
+
except (json.JSONDecodeError, TypeError, AttributeError):
|
| 415 |
+
logger.debug("Could not parse terminal result as JSON for exit code check")
|
| 416 |
+
return False, ""
|
| 417 |
+
|
| 418 |
+
# Memory-specific: distinguish "full" from real errors
|
| 419 |
+
if tool_name == "memory":
|
| 420 |
+
try:
|
| 421 |
+
data = json.loads(result)
|
| 422 |
+
if data.get("success") is False and "exceed the limit" in data.get("error", ""):
|
| 423 |
+
return True, " [full]"
|
| 424 |
+
except (json.JSONDecodeError, TypeError, AttributeError):
|
| 425 |
+
logger.debug("Could not parse memory result as JSON for capacity check")
|
| 426 |
+
|
| 427 |
+
# Generic heuristic for non-terminal tools
|
| 428 |
+
lower = result[:500].lower()
|
| 429 |
+
if '"error"' in lower or '"failed"' in lower or result.startswith("Error"):
|
| 430 |
+
return True, " [error]"
|
| 431 |
+
|
| 432 |
+
return False, ""
|
| 433 |
+
|
| 434 |
+
|
| 435 |
+
def get_cute_tool_message(
|
| 436 |
+
tool_name: str, args: dict, duration: float, result: str | None = None,
|
| 437 |
+
) -> str:
|
| 438 |
+
"""Generate a formatted tool completion line for CLI quiet mode.
|
| 439 |
+
|
| 440 |
+
Format: ``| {emoji} {verb:9} {detail} {duration}``
|
| 441 |
+
|
| 442 |
+
When *result* is provided the line is checked for failure indicators.
|
| 443 |
+
Failed tool calls get a red prefix and an informational suffix.
|
| 444 |
+
"""
|
| 445 |
+
dur = f"{duration:.1f}s"
|
| 446 |
+
is_failure, failure_suffix = _detect_tool_failure(tool_name, result)
|
| 447 |
+
skin_prefix = get_skin_tool_prefix()
|
| 448 |
+
|
| 449 |
+
def _trunc(s, n=40):
|
| 450 |
+
s = str(s)
|
| 451 |
+
return (s[:n-3] + "...") if len(s) > n else s
|
| 452 |
+
|
| 453 |
+
def _path(p, n=35):
|
| 454 |
+
p = str(p)
|
| 455 |
+
return ("..." + p[-(n-3):]) if len(p) > n else p
|
| 456 |
+
|
| 457 |
+
def _wrap(line: str) -> str:
|
| 458 |
+
"""Apply skin tool prefix and failure suffix."""
|
| 459 |
+
if skin_prefix != "┊":
|
| 460 |
+
line = line.replace("┊", skin_prefix, 1)
|
| 461 |
+
if not is_failure:
|
| 462 |
+
return line
|
| 463 |
+
return f"{line}{failure_suffix}"
|
| 464 |
+
|
| 465 |
+
if tool_name == "web_search":
|
| 466 |
+
return _wrap(f"┊ 🔍 search {_trunc(args.get('query', ''), 42)} {dur}")
|
| 467 |
+
if tool_name == "web_extract":
|
| 468 |
+
urls = args.get("urls", [])
|
| 469 |
+
if urls:
|
| 470 |
+
url = urls[0] if isinstance(urls, list) else str(urls)
|
| 471 |
+
domain = url.replace("https://", "").replace("http://", "").split("/")[0]
|
| 472 |
+
extra = f" +{len(urls)-1}" if len(urls) > 1 else ""
|
| 473 |
+
return _wrap(f"┊ 📄 fetch {_trunc(domain, 35)}{extra} {dur}")
|
| 474 |
+
return _wrap(f"┊ 📄 fetch pages {dur}")
|
| 475 |
+
if tool_name == "web_crawl":
|
| 476 |
+
url = args.get("url", "")
|
| 477 |
+
domain = url.replace("https://", "").replace("http://", "").split("/")[0]
|
| 478 |
+
return _wrap(f"┊ 🕸️ crawl {_trunc(domain, 35)} {dur}")
|
| 479 |
+
if tool_name == "terminal":
|
| 480 |
+
return _wrap(f"┊ 💻 $ {_trunc(args.get('command', ''), 42)} {dur}")
|
| 481 |
+
if tool_name == "process":
|
| 482 |
+
action = args.get("action", "?")
|
| 483 |
+
sid = args.get("session_id", "")[:12]
|
| 484 |
+
labels = {"list": "ls processes", "poll": f"poll {sid}", "log": f"log {sid}",
|
| 485 |
+
"wait": f"wait {sid}", "kill": f"kill {sid}", "write": f"write {sid}", "submit": f"submit {sid}"}
|
| 486 |
+
return _wrap(f"┊ ⚙️ proc {labels.get(action, f'{action} {sid}')} {dur}")
|
| 487 |
+
if tool_name == "read_file":
|
| 488 |
+
return _wrap(f"┊ 📖 read {_path(args.get('path', ''))} {dur}")
|
| 489 |
+
if tool_name == "write_file":
|
| 490 |
+
return _wrap(f"┊ ✍️ write {_path(args.get('path', ''))} {dur}")
|
| 491 |
+
if tool_name == "patch":
|
| 492 |
+
return _wrap(f"┊ 🔧 patch {_path(args.get('path', ''))} {dur}")
|
| 493 |
+
if tool_name == "search_files":
|
| 494 |
+
pattern = _trunc(args.get("pattern", ""), 35)
|
| 495 |
+
target = args.get("target", "content")
|
| 496 |
+
verb = "find" if target == "files" else "grep"
|
| 497 |
+
return _wrap(f"┊ 🔎 {verb:9} {pattern} {dur}")
|
| 498 |
+
if tool_name == "browser_navigate":
|
| 499 |
+
url = args.get("url", "")
|
| 500 |
+
domain = url.replace("https://", "").replace("http://", "").split("/")[0]
|
| 501 |
+
return _wrap(f"┊ 🌐 navigate {_trunc(domain, 35)} {dur}")
|
| 502 |
+
if tool_name == "browser_snapshot":
|
| 503 |
+
mode = "full" if args.get("full") else "compact"
|
| 504 |
+
return _wrap(f"┊ 📸 snapshot {mode} {dur}")
|
| 505 |
+
if tool_name == "browser_click":
|
| 506 |
+
return _wrap(f"┊ 👆 click {args.get('ref', '?')} {dur}")
|
| 507 |
+
if tool_name == "browser_type":
|
| 508 |
+
return _wrap(f"┊ ⌨️ type \"{_trunc(args.get('text', ''), 30)}\" {dur}")
|
| 509 |
+
if tool_name == "browser_scroll":
|
| 510 |
+
d = args.get("direction", "down")
|
| 511 |
+
arrow = {"down": "↓", "up": "↑", "right": "→", "left": "←"}.get(d, "↓")
|
| 512 |
+
return _wrap(f"┊ {arrow} scroll {d} {dur}")
|
| 513 |
+
if tool_name == "browser_back":
|
| 514 |
+
return _wrap(f"┊ ◀️ back {dur}")
|
| 515 |
+
if tool_name == "browser_press":
|
| 516 |
+
return _wrap(f"┊ ⌨️ press {args.get('key', '?')} {dur}")
|
| 517 |
+
if tool_name == "browser_close":
|
| 518 |
+
return _wrap(f"┊ 🚪 close browser {dur}")
|
| 519 |
+
if tool_name == "browser_get_images":
|
| 520 |
+
return _wrap(f"┊ 🖼️ images extracting {dur}")
|
| 521 |
+
if tool_name == "browser_vision":
|
| 522 |
+
return _wrap(f"┊ 👁️ vision analyzing page {dur}")
|
| 523 |
+
if tool_name == "todo":
|
| 524 |
+
todos_arg = args.get("todos")
|
| 525 |
+
merge = args.get("merge", False)
|
| 526 |
+
if todos_arg is None:
|
| 527 |
+
return _wrap(f"┊ 📋 plan reading tasks {dur}")
|
| 528 |
+
elif merge:
|
| 529 |
+
return _wrap(f"┊ 📋 plan update {len(todos_arg)} task(s) {dur}")
|
| 530 |
+
else:
|
| 531 |
+
return _wrap(f"┊ 📋 plan {len(todos_arg)} task(s) {dur}")
|
| 532 |
+
if tool_name == "session_search":
|
| 533 |
+
return _wrap(f"┊ 🔍 recall \"{_trunc(args.get('query', ''), 35)}\" {dur}")
|
| 534 |
+
if tool_name == "memory":
|
| 535 |
+
action = args.get("action", "?")
|
| 536 |
+
target = args.get("target", "")
|
| 537 |
+
if action == "add":
|
| 538 |
+
return _wrap(f"┊ 🧠 memory +{target}: \"{_trunc(args.get('content', ''), 30)}\" {dur}")
|
| 539 |
+
elif action == "replace":
|
| 540 |
+
return _wrap(f"┊ 🧠 memory ~{target}: \"{_trunc(args.get('old_text', ''), 20)}\" {dur}")
|
| 541 |
+
elif action == "remove":
|
| 542 |
+
return _wrap(f"┊ 🧠 memory -{target}: \"{_trunc(args.get('old_text', ''), 20)}\" {dur}")
|
| 543 |
+
return _wrap(f"┊ 🧠 memory {action} {dur}")
|
| 544 |
+
if tool_name == "skills_list":
|
| 545 |
+
return _wrap(f"┊ 📚 skills list {args.get('category', 'all')} {dur}")
|
| 546 |
+
if tool_name == "skill_view":
|
| 547 |
+
return _wrap(f"┊ 📚 skill {_trunc(args.get('name', ''), 30)} {dur}")
|
| 548 |
+
if tool_name == "image_generate":
|
| 549 |
+
return _wrap(f"┊ 🎨 create {_trunc(args.get('prompt', ''), 35)} {dur}")
|
| 550 |
+
if tool_name == "text_to_speech":
|
| 551 |
+
return _wrap(f"┊ 🔊 speak {_trunc(args.get('text', ''), 30)} {dur}")
|
| 552 |
+
if tool_name == "vision_analyze":
|
| 553 |
+
return _wrap(f"┊ 👁️ vision {_trunc(args.get('question', ''), 30)} {dur}")
|
| 554 |
+
if tool_name == "mixture_of_agents":
|
| 555 |
+
return _wrap(f"┊ 🧠 reason {_trunc(args.get('user_prompt', ''), 30)} {dur}")
|
| 556 |
+
if tool_name == "send_message":
|
| 557 |
+
return _wrap(f"┊ 📨 send {args.get('target', '?')}: \"{_trunc(args.get('message', ''), 25)}\" {dur}")
|
| 558 |
+
if tool_name == "cronjob":
|
| 559 |
+
action = args.get("action", "?")
|
| 560 |
+
if action == "create":
|
| 561 |
+
skills = args.get("skills") or ([] if not args.get("skill") else [args.get("skill")])
|
| 562 |
+
label = args.get("name") or (skills[0] if skills else None) or args.get("prompt", "task")
|
| 563 |
+
return _wrap(f"┊ ⏰ cron create {_trunc(label, 24)} {dur}")
|
| 564 |
+
if action == "list":
|
| 565 |
+
return _wrap(f"┊ ⏰ cron listing {dur}")
|
| 566 |
+
return _wrap(f"┊ ⏰ cron {action} {args.get('job_id', '')} {dur}")
|
| 567 |
+
if tool_name.startswith("rl_"):
|
| 568 |
+
rl = {
|
| 569 |
+
"rl_list_environments": "list envs", "rl_select_environment": f"select {args.get('name', '')}",
|
| 570 |
+
"rl_get_current_config": "get config", "rl_edit_config": f"set {args.get('field', '?')}",
|
| 571 |
+
"rl_start_training": "start training", "rl_check_status": f"status {args.get('run_id', '?')[:12]}",
|
| 572 |
+
"rl_stop_training": f"stop {args.get('run_id', '?')[:12]}", "rl_get_results": f"results {args.get('run_id', '?')[:12]}",
|
| 573 |
+
"rl_list_runs": "list runs", "rl_test_inference": "test inference",
|
| 574 |
+
}
|
| 575 |
+
return _wrap(f"┊ 🧪 rl {rl.get(tool_name, tool_name.replace('rl_', ''))} {dur}")
|
| 576 |
+
if tool_name == "execute_code":
|
| 577 |
+
code = args.get("code", "")
|
| 578 |
+
first_line = code.strip().split("\n")[0] if code.strip() else ""
|
| 579 |
+
return _wrap(f"┊ 🐍 exec {_trunc(first_line, 35)} {dur}")
|
| 580 |
+
if tool_name == "delegate_task":
|
| 581 |
+
tasks = args.get("tasks")
|
| 582 |
+
if tasks and isinstance(tasks, list):
|
| 583 |
+
return _wrap(f"┊ 🔀 delegate {len(tasks)} parallel tasks {dur}")
|
| 584 |
+
return _wrap(f"┊ 🔀 delegate {_trunc(args.get('goal', ''), 35)} {dur}")
|
| 585 |
+
|
| 586 |
+
preview = build_tool_preview(tool_name, args) or ""
|
| 587 |
+
return _wrap(f"┊ ⚡ {tool_name[:9]:9} {_trunc(preview, 35)} {dur}")
|
| 588 |
+
|
| 589 |
+
|
| 590 |
+
# =========================================================================
|
| 591 |
+
# Honcho session line (one-liner with clickable OSC 8 hyperlink)
|
| 592 |
+
# =========================================================================
|
| 593 |
+
|
| 594 |
+
_DIM = "\033[2m"
|
| 595 |
+
_SKY_BLUE = "\033[38;5;117m"
|
| 596 |
+
_ANSI_RESET = "\033[0m"
|
| 597 |
+
|
| 598 |
+
|
| 599 |
+
def honcho_session_url(workspace: str, session_name: str) -> str:
|
| 600 |
+
"""Build a Honcho app URL for a session."""
|
| 601 |
+
from urllib.parse import quote
|
| 602 |
+
return (
|
| 603 |
+
f"https://app.honcho.dev/explore"
|
| 604 |
+
f"?workspace={quote(workspace, safe='')}"
|
| 605 |
+
f"&view=sessions"
|
| 606 |
+
f"&session={quote(session_name, safe='')}"
|
| 607 |
+
)
|
| 608 |
+
|
| 609 |
+
|
| 610 |
+
def _osc8_link(url: str, text: str) -> str:
|
| 611 |
+
"""OSC 8 terminal hyperlink (clickable in iTerm2, Ghostty, WezTerm, etc.)."""
|
| 612 |
+
return f"\033]8;;{url}\033\\{text}\033]8;;\033\\"
|
| 613 |
+
|
| 614 |
+
|
| 615 |
+
def honcho_session_line(workspace: str, session_name: str) -> str:
|
| 616 |
+
"""One-line session indicator: `Honcho session: <clickable name>`."""
|
| 617 |
+
url = honcho_session_url(workspace, session_name)
|
| 618 |
+
linked_name = _osc8_link(url, f"{_SKY_BLUE}{session_name}{_ANSI_RESET}")
|
| 619 |
+
return f"{_DIM}Honcho session:{_ANSI_RESET} {linked_name}"
|
| 620 |
+
|
| 621 |
+
|
| 622 |
+
def write_tty(text: str) -> None:
|
| 623 |
+
"""Write directly to /dev/tty, bypassing stdout capture."""
|
| 624 |
+
try:
|
| 625 |
+
fd = os.open("/dev/tty", os.O_WRONLY)
|
| 626 |
+
os.write(fd, text.encode("utf-8"))
|
| 627 |
+
os.close(fd)
|
| 628 |
+
except OSError:
|
| 629 |
+
sys.stdout.write(text)
|
| 630 |
+
sys.stdout.flush()
|
| 631 |
+
|
| 632 |
+
|
| 633 |
+
# =========================================================================
|
| 634 |
+
# Context pressure display (CLI user-facing warnings)
|
| 635 |
+
# =========================================================================
|
| 636 |
+
|
| 637 |
+
# ANSI color codes for context pressure tiers
|
| 638 |
+
_CYAN = "\033[36m"
|
| 639 |
+
_YELLOW = "\033[33m"
|
| 640 |
+
_BOLD = "\033[1m"
|
| 641 |
+
_DIM_ANSI = "\033[2m"
|
| 642 |
+
|
| 643 |
+
# Bar characters
|
| 644 |
+
_BAR_FILLED = "▰"
|
| 645 |
+
_BAR_EMPTY = "▱"
|
| 646 |
+
_BAR_WIDTH = 20
|
| 647 |
+
|
| 648 |
+
|
| 649 |
+
def format_context_pressure(
|
| 650 |
+
compaction_progress: float,
|
| 651 |
+
threshold_tokens: int,
|
| 652 |
+
threshold_percent: float,
|
| 653 |
+
compression_enabled: bool = True,
|
| 654 |
+
) -> str:
|
| 655 |
+
"""Build a formatted context pressure line for CLI display.
|
| 656 |
+
|
| 657 |
+
The bar and percentage show progress toward the compaction threshold,
|
| 658 |
+
NOT the raw context window. 100% = compaction fires.
|
| 659 |
+
|
| 660 |
+
Args:
|
| 661 |
+
compaction_progress: How close to compaction (0.0–1.0, 1.0 = fires).
|
| 662 |
+
threshold_tokens: Compaction threshold in tokens.
|
| 663 |
+
threshold_percent: Compaction threshold as a fraction of context window.
|
| 664 |
+
compression_enabled: Whether auto-compression is active.
|
| 665 |
+
"""
|
| 666 |
+
pct_int = int(compaction_progress * 100)
|
| 667 |
+
filled = min(int(compaction_progress * _BAR_WIDTH), _BAR_WIDTH)
|
| 668 |
+
bar = _BAR_FILLED * filled + _BAR_EMPTY * (_BAR_WIDTH - filled)
|
| 669 |
+
|
| 670 |
+
threshold_k = f"{threshold_tokens // 1000}k" if threshold_tokens >= 1000 else str(threshold_tokens)
|
| 671 |
+
threshold_pct_int = int(threshold_percent * 100)
|
| 672 |
+
|
| 673 |
+
color = f"{_BOLD}{_YELLOW}"
|
| 674 |
+
icon = "⚠"
|
| 675 |
+
if compression_enabled:
|
| 676 |
+
hint = "compaction approaching"
|
| 677 |
+
else:
|
| 678 |
+
hint = "no auto-compaction"
|
| 679 |
+
|
| 680 |
+
return (
|
| 681 |
+
f" {color}{icon} context {bar} {pct_int}% to compaction{_ANSI_RESET}"
|
| 682 |
+
f" {_DIM_ANSI}{threshold_k} threshold ({threshold_pct_int}%) · {hint}{_ANSI_RESET}"
|
| 683 |
+
)
|
| 684 |
+
|
| 685 |
+
|
| 686 |
+
def format_context_pressure_gateway(
|
| 687 |
+
compaction_progress: float,
|
| 688 |
+
threshold_percent: float,
|
| 689 |
+
compression_enabled: bool = True,
|
| 690 |
+
) -> str:
|
| 691 |
+
"""Build a plain-text context pressure notification for messaging platforms.
|
| 692 |
+
|
| 693 |
+
No ANSI — just Unicode and plain text suitable for Telegram/Discord/etc.
|
| 694 |
+
The percentage shows progress toward the compaction threshold.
|
| 695 |
+
"""
|
| 696 |
+
pct_int = int(compaction_progress * 100)
|
| 697 |
+
filled = min(int(compaction_progress * _BAR_WIDTH), _BAR_WIDTH)
|
| 698 |
+
bar = _BAR_FILLED * filled + _BAR_EMPTY * (_BAR_WIDTH - filled)
|
| 699 |
+
|
| 700 |
+
threshold_pct_int = int(threshold_percent * 100)
|
| 701 |
+
|
| 702 |
+
icon = "⚠️"
|
| 703 |
+
if compression_enabled:
|
| 704 |
+
hint = f"Context compaction approaching (threshold: {threshold_pct_int}% of window)."
|
| 705 |
+
else:
|
| 706 |
+
hint = "Auto-compaction is disabled — context may be truncated."
|
| 707 |
+
|
| 708 |
+
return f"{icon} Context: {bar} {pct_int}% to compaction\n{hint}"
|
agent/insights.py
ADDED
|
@@ -0,0 +1,792 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Session Insights Engine for Hermes Agent.
|
| 3 |
+
|
| 4 |
+
Analyzes historical session data from the SQLite state database to produce
|
| 5 |
+
comprehensive usage insights — token consumption, cost estimates, tool usage
|
| 6 |
+
patterns, activity trends, model/platform breakdowns, and session metrics.
|
| 7 |
+
|
| 8 |
+
Inspired by Claude Code's /insights command, adapted for Hermes Agent's
|
| 9 |
+
multi-platform architecture with additional cost estimation and platform
|
| 10 |
+
breakdown capabilities.
|
| 11 |
+
|
| 12 |
+
Usage:
|
| 13 |
+
from agent.insights import InsightsEngine
|
| 14 |
+
engine = InsightsEngine(db)
|
| 15 |
+
report = engine.generate(days=30)
|
| 16 |
+
print(engine.format_terminal(report))
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
import json
|
| 20 |
+
import time
|
| 21 |
+
from collections import Counter, defaultdict
|
| 22 |
+
from datetime import datetime
|
| 23 |
+
from typing import Any, Dict, List
|
| 24 |
+
|
| 25 |
+
from agent.usage_pricing import (
|
| 26 |
+
CanonicalUsage,
|
| 27 |
+
DEFAULT_PRICING,
|
| 28 |
+
estimate_usage_cost,
|
| 29 |
+
format_duration_compact,
|
| 30 |
+
get_pricing,
|
| 31 |
+
has_known_pricing,
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
_DEFAULT_PRICING = DEFAULT_PRICING
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def _has_known_pricing(model_name: str, provider: str = None, base_url: str = None) -> bool:
|
| 38 |
+
"""Check if a model has known pricing (vs unknown/custom endpoint)."""
|
| 39 |
+
return has_known_pricing(model_name, provider=provider, base_url=base_url)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def _get_pricing(model_name: str) -> Dict[str, float]:
|
| 43 |
+
"""Look up pricing for a model. Uses fuzzy matching on model name.
|
| 44 |
+
|
| 45 |
+
Returns _DEFAULT_PRICING (zero cost) for unknown/custom models —
|
| 46 |
+
we can't assume costs for self-hosted endpoints, local inference, etc.
|
| 47 |
+
"""
|
| 48 |
+
return get_pricing(model_name)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def _estimate_cost(
|
| 52 |
+
session_or_model: Dict[str, Any] | str,
|
| 53 |
+
input_tokens: int = 0,
|
| 54 |
+
output_tokens: int = 0,
|
| 55 |
+
*,
|
| 56 |
+
cache_read_tokens: int = 0,
|
| 57 |
+
cache_write_tokens: int = 0,
|
| 58 |
+
provider: str = None,
|
| 59 |
+
base_url: str = None,
|
| 60 |
+
) -> tuple[float, str]:
|
| 61 |
+
"""Estimate the USD cost for a session row or a model/token tuple."""
|
| 62 |
+
if isinstance(session_or_model, dict):
|
| 63 |
+
session = session_or_model
|
| 64 |
+
model = session.get("model") or ""
|
| 65 |
+
usage = CanonicalUsage(
|
| 66 |
+
input_tokens=session.get("input_tokens") or 0,
|
| 67 |
+
output_tokens=session.get("output_tokens") or 0,
|
| 68 |
+
cache_read_tokens=session.get("cache_read_tokens") or 0,
|
| 69 |
+
cache_write_tokens=session.get("cache_write_tokens") or 0,
|
| 70 |
+
)
|
| 71 |
+
provider = session.get("billing_provider")
|
| 72 |
+
base_url = session.get("billing_base_url")
|
| 73 |
+
else:
|
| 74 |
+
model = session_or_model or ""
|
| 75 |
+
usage = CanonicalUsage(
|
| 76 |
+
input_tokens=input_tokens,
|
| 77 |
+
output_tokens=output_tokens,
|
| 78 |
+
cache_read_tokens=cache_read_tokens,
|
| 79 |
+
cache_write_tokens=cache_write_tokens,
|
| 80 |
+
)
|
| 81 |
+
result = estimate_usage_cost(
|
| 82 |
+
model,
|
| 83 |
+
usage,
|
| 84 |
+
provider=provider,
|
| 85 |
+
base_url=base_url,
|
| 86 |
+
)
|
| 87 |
+
return float(result.amount_usd or 0.0), result.status
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def _format_duration(seconds: float) -> str:
|
| 91 |
+
"""Format seconds into a human-readable duration string."""
|
| 92 |
+
return format_duration_compact(seconds)
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def _bar_chart(values: List[int], max_width: int = 20) -> List[str]:
|
| 96 |
+
"""Create simple horizontal bar chart strings from values."""
|
| 97 |
+
peak = max(values) if values else 1
|
| 98 |
+
if peak == 0:
|
| 99 |
+
return ["" for _ in values]
|
| 100 |
+
return ["█" * max(1, int(v / peak * max_width)) if v > 0 else "" for v in values]
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
class InsightsEngine:
|
| 104 |
+
"""
|
| 105 |
+
Analyzes session history and produces usage insights.
|
| 106 |
+
|
| 107 |
+
Works directly with a SessionDB instance (or raw sqlite3 connection)
|
| 108 |
+
to query session and message data.
|
| 109 |
+
"""
|
| 110 |
+
|
| 111 |
+
def __init__(self, db):
|
| 112 |
+
"""
|
| 113 |
+
Initialize with a SessionDB instance.
|
| 114 |
+
|
| 115 |
+
Args:
|
| 116 |
+
db: A SessionDB instance (from hermes_state.py)
|
| 117 |
+
"""
|
| 118 |
+
self.db = db
|
| 119 |
+
self._conn = db._conn
|
| 120 |
+
|
| 121 |
+
def generate(self, days: int = 30, source: str = None) -> Dict[str, Any]:
|
| 122 |
+
"""
|
| 123 |
+
Generate a complete insights report.
|
| 124 |
+
|
| 125 |
+
Args:
|
| 126 |
+
days: Number of days to look back (default: 30)
|
| 127 |
+
source: Optional filter by source platform
|
| 128 |
+
|
| 129 |
+
Returns:
|
| 130 |
+
Dict with all computed insights
|
| 131 |
+
"""
|
| 132 |
+
cutoff = time.time() - (days * 86400)
|
| 133 |
+
|
| 134 |
+
# Gather raw data
|
| 135 |
+
sessions = self._get_sessions(cutoff, source)
|
| 136 |
+
tool_usage = self._get_tool_usage(cutoff, source)
|
| 137 |
+
message_stats = self._get_message_stats(cutoff, source)
|
| 138 |
+
|
| 139 |
+
if not sessions:
|
| 140 |
+
return {
|
| 141 |
+
"days": days,
|
| 142 |
+
"source_filter": source,
|
| 143 |
+
"empty": True,
|
| 144 |
+
"overview": {},
|
| 145 |
+
"models": [],
|
| 146 |
+
"platforms": [],
|
| 147 |
+
"tools": [],
|
| 148 |
+
"activity": {},
|
| 149 |
+
"top_sessions": [],
|
| 150 |
+
}
|
| 151 |
+
|
| 152 |
+
# Compute insights
|
| 153 |
+
overview = self._compute_overview(sessions, message_stats)
|
| 154 |
+
models = self._compute_model_breakdown(sessions)
|
| 155 |
+
platforms = self._compute_platform_breakdown(sessions)
|
| 156 |
+
tools = self._compute_tool_breakdown(tool_usage)
|
| 157 |
+
activity = self._compute_activity_patterns(sessions)
|
| 158 |
+
top_sessions = self._compute_top_sessions(sessions)
|
| 159 |
+
|
| 160 |
+
return {
|
| 161 |
+
"days": days,
|
| 162 |
+
"source_filter": source,
|
| 163 |
+
"empty": False,
|
| 164 |
+
"generated_at": time.time(),
|
| 165 |
+
"overview": overview,
|
| 166 |
+
"models": models,
|
| 167 |
+
"platforms": platforms,
|
| 168 |
+
"tools": tools,
|
| 169 |
+
"activity": activity,
|
| 170 |
+
"top_sessions": top_sessions,
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
# =========================================================================
|
| 174 |
+
# Data gathering (SQL queries)
|
| 175 |
+
# =========================================================================
|
| 176 |
+
|
| 177 |
+
# Columns we actually need (skip system_prompt, model_config blobs)
|
| 178 |
+
_SESSION_COLS = ("id, source, model, started_at, ended_at, "
|
| 179 |
+
"message_count, tool_call_count, input_tokens, output_tokens, "
|
| 180 |
+
"cache_read_tokens, cache_write_tokens, billing_provider, "
|
| 181 |
+
"billing_base_url, billing_mode, estimated_cost_usd, "
|
| 182 |
+
"actual_cost_usd, cost_status, cost_source")
|
| 183 |
+
|
| 184 |
+
# Pre-computed query strings — f-string evaluated once at class definition,
|
| 185 |
+
# not at runtime, so no user-controlled value can alter the query structure.
|
| 186 |
+
_GET_SESSIONS_WITH_SOURCE = (
|
| 187 |
+
f"SELECT {_SESSION_COLS} FROM sessions"
|
| 188 |
+
" WHERE started_at >= ? AND source = ?"
|
| 189 |
+
" ORDER BY started_at DESC"
|
| 190 |
+
)
|
| 191 |
+
_GET_SESSIONS_ALL = (
|
| 192 |
+
f"SELECT {_SESSION_COLS} FROM sessions"
|
| 193 |
+
" WHERE started_at >= ?"
|
| 194 |
+
" ORDER BY started_at DESC"
|
| 195 |
+
)
|
| 196 |
+
|
| 197 |
+
def _get_sessions(self, cutoff: float, source: str = None) -> List[Dict]:
|
| 198 |
+
"""Fetch sessions within the time window."""
|
| 199 |
+
if source:
|
| 200 |
+
cursor = self._conn.execute(self._GET_SESSIONS_WITH_SOURCE, (cutoff, source))
|
| 201 |
+
else:
|
| 202 |
+
cursor = self._conn.execute(self._GET_SESSIONS_ALL, (cutoff,))
|
| 203 |
+
return [dict(row) for row in cursor.fetchall()]
|
| 204 |
+
|
| 205 |
+
def _get_tool_usage(self, cutoff: float, source: str = None) -> List[Dict]:
|
| 206 |
+
"""Get tool call counts from messages.
|
| 207 |
+
|
| 208 |
+
Uses two sources:
|
| 209 |
+
1. tool_name column on 'tool' role messages (set by gateway)
|
| 210 |
+
2. tool_calls JSON on 'assistant' role messages (covers CLI where
|
| 211 |
+
tool_name is not populated on tool responses)
|
| 212 |
+
"""
|
| 213 |
+
tool_counts = Counter()
|
| 214 |
+
|
| 215 |
+
# Source 1: explicit tool_name on tool response messages
|
| 216 |
+
if source:
|
| 217 |
+
cursor = self._conn.execute(
|
| 218 |
+
"""SELECT m.tool_name, COUNT(*) as count
|
| 219 |
+
FROM messages m
|
| 220 |
+
JOIN sessions s ON s.id = m.session_id
|
| 221 |
+
WHERE s.started_at >= ? AND s.source = ?
|
| 222 |
+
AND m.role = 'tool' AND m.tool_name IS NOT NULL
|
| 223 |
+
GROUP BY m.tool_name
|
| 224 |
+
ORDER BY count DESC""",
|
| 225 |
+
(cutoff, source),
|
| 226 |
+
)
|
| 227 |
+
else:
|
| 228 |
+
cursor = self._conn.execute(
|
| 229 |
+
"""SELECT m.tool_name, COUNT(*) as count
|
| 230 |
+
FROM messages m
|
| 231 |
+
JOIN sessions s ON s.id = m.session_id
|
| 232 |
+
WHERE s.started_at >= ?
|
| 233 |
+
AND m.role = 'tool' AND m.tool_name IS NOT NULL
|
| 234 |
+
GROUP BY m.tool_name
|
| 235 |
+
ORDER BY count DESC""",
|
| 236 |
+
(cutoff,),
|
| 237 |
+
)
|
| 238 |
+
for row in cursor.fetchall():
|
| 239 |
+
tool_counts[row["tool_name"]] += row["count"]
|
| 240 |
+
|
| 241 |
+
# Source 2: extract from tool_calls JSON on assistant messages
|
| 242 |
+
# (covers CLI sessions where tool_name is NULL on tool responses)
|
| 243 |
+
if source:
|
| 244 |
+
cursor2 = self._conn.execute(
|
| 245 |
+
"""SELECT m.tool_calls
|
| 246 |
+
FROM messages m
|
| 247 |
+
JOIN sessions s ON s.id = m.session_id
|
| 248 |
+
WHERE s.started_at >= ? AND s.source = ?
|
| 249 |
+
AND m.role = 'assistant' AND m.tool_calls IS NOT NULL""",
|
| 250 |
+
(cutoff, source),
|
| 251 |
+
)
|
| 252 |
+
else:
|
| 253 |
+
cursor2 = self._conn.execute(
|
| 254 |
+
"""SELECT m.tool_calls
|
| 255 |
+
FROM messages m
|
| 256 |
+
JOIN sessions s ON s.id = m.session_id
|
| 257 |
+
WHERE s.started_at >= ?
|
| 258 |
+
AND m.role = 'assistant' AND m.tool_calls IS NOT NULL""",
|
| 259 |
+
(cutoff,),
|
| 260 |
+
)
|
| 261 |
+
|
| 262 |
+
tool_calls_counts = Counter()
|
| 263 |
+
for row in cursor2.fetchall():
|
| 264 |
+
try:
|
| 265 |
+
calls = row["tool_calls"]
|
| 266 |
+
if isinstance(calls, str):
|
| 267 |
+
calls = json.loads(calls)
|
| 268 |
+
if isinstance(calls, list):
|
| 269 |
+
for call in calls:
|
| 270 |
+
func = call.get("function", {}) if isinstance(call, dict) else {}
|
| 271 |
+
name = func.get("name")
|
| 272 |
+
if name:
|
| 273 |
+
tool_calls_counts[name] += 1
|
| 274 |
+
except (json.JSONDecodeError, TypeError, AttributeError):
|
| 275 |
+
continue
|
| 276 |
+
|
| 277 |
+
# Merge: prefer tool_name source, supplement with tool_calls source
|
| 278 |
+
# for tools not already counted
|
| 279 |
+
if not tool_counts and tool_calls_counts:
|
| 280 |
+
# No tool_name data at all — use tool_calls exclusively
|
| 281 |
+
tool_counts = tool_calls_counts
|
| 282 |
+
elif tool_counts and tool_calls_counts:
|
| 283 |
+
# Both sources have data — use whichever has the higher count per tool
|
| 284 |
+
# (they may overlap, so take the max to avoid double-counting)
|
| 285 |
+
all_tools = set(tool_counts) | set(tool_calls_counts)
|
| 286 |
+
merged = Counter()
|
| 287 |
+
for tool in all_tools:
|
| 288 |
+
merged[tool] = max(tool_counts.get(tool, 0), tool_calls_counts.get(tool, 0))
|
| 289 |
+
tool_counts = merged
|
| 290 |
+
|
| 291 |
+
# Convert to the expected format
|
| 292 |
+
return [
|
| 293 |
+
{"tool_name": name, "count": count}
|
| 294 |
+
for name, count in tool_counts.most_common()
|
| 295 |
+
]
|
| 296 |
+
|
| 297 |
+
def _get_message_stats(self, cutoff: float, source: str = None) -> Dict:
|
| 298 |
+
"""Get aggregate message statistics."""
|
| 299 |
+
if source:
|
| 300 |
+
cursor = self._conn.execute(
|
| 301 |
+
"""SELECT
|
| 302 |
+
COUNT(*) as total_messages,
|
| 303 |
+
SUM(CASE WHEN m.role = 'user' THEN 1 ELSE 0 END) as user_messages,
|
| 304 |
+
SUM(CASE WHEN m.role = 'assistant' THEN 1 ELSE 0 END) as assistant_messages,
|
| 305 |
+
SUM(CASE WHEN m.role = 'tool' THEN 1 ELSE 0 END) as tool_messages
|
| 306 |
+
FROM messages m
|
| 307 |
+
JOIN sessions s ON s.id = m.session_id
|
| 308 |
+
WHERE s.started_at >= ? AND s.source = ?""",
|
| 309 |
+
(cutoff, source),
|
| 310 |
+
)
|
| 311 |
+
else:
|
| 312 |
+
cursor = self._conn.execute(
|
| 313 |
+
"""SELECT
|
| 314 |
+
COUNT(*) as total_messages,
|
| 315 |
+
SUM(CASE WHEN m.role = 'user' THEN 1 ELSE 0 END) as user_messages,
|
| 316 |
+
SUM(CASE WHEN m.role = 'assistant' THEN 1 ELSE 0 END) as assistant_messages,
|
| 317 |
+
SUM(CASE WHEN m.role = 'tool' THEN 1 ELSE 0 END) as tool_messages
|
| 318 |
+
FROM messages m
|
| 319 |
+
JOIN sessions s ON s.id = m.session_id
|
| 320 |
+
WHERE s.started_at >= ?""",
|
| 321 |
+
(cutoff,),
|
| 322 |
+
)
|
| 323 |
+
row = cursor.fetchone()
|
| 324 |
+
return dict(row) if row else {
|
| 325 |
+
"total_messages": 0, "user_messages": 0,
|
| 326 |
+
"assistant_messages": 0, "tool_messages": 0,
|
| 327 |
+
}
|
| 328 |
+
|
| 329 |
+
# =========================================================================
|
| 330 |
+
# Computation
|
| 331 |
+
# =========================================================================
|
| 332 |
+
|
| 333 |
+
def _compute_overview(self, sessions: List[Dict], message_stats: Dict) -> Dict:
|
| 334 |
+
"""Compute high-level overview statistics."""
|
| 335 |
+
total_input = sum(s.get("input_tokens") or 0 for s in sessions)
|
| 336 |
+
total_output = sum(s.get("output_tokens") or 0 for s in sessions)
|
| 337 |
+
total_cache_read = sum(s.get("cache_read_tokens") or 0 for s in sessions)
|
| 338 |
+
total_cache_write = sum(s.get("cache_write_tokens") or 0 for s in sessions)
|
| 339 |
+
total_tokens = total_input + total_output + total_cache_read + total_cache_write
|
| 340 |
+
total_tool_calls = sum(s.get("tool_call_count") or 0 for s in sessions)
|
| 341 |
+
total_messages = sum(s.get("message_count") or 0 for s in sessions)
|
| 342 |
+
|
| 343 |
+
# Cost estimation (weighted by model)
|
| 344 |
+
total_cost = 0.0
|
| 345 |
+
actual_cost = 0.0
|
| 346 |
+
models_with_pricing = set()
|
| 347 |
+
models_without_pricing = set()
|
| 348 |
+
unknown_cost_sessions = 0
|
| 349 |
+
included_cost_sessions = 0
|
| 350 |
+
for s in sessions:
|
| 351 |
+
model = s.get("model") or ""
|
| 352 |
+
estimated, status = _estimate_cost(s)
|
| 353 |
+
total_cost += estimated
|
| 354 |
+
actual_cost += s.get("actual_cost_usd") or 0.0
|
| 355 |
+
display = model.split("/")[-1] if "/" in model else (model or "unknown")
|
| 356 |
+
if status == "included":
|
| 357 |
+
included_cost_sessions += 1
|
| 358 |
+
elif status == "unknown":
|
| 359 |
+
unknown_cost_sessions += 1
|
| 360 |
+
if _has_known_pricing(model, s.get("billing_provider"), s.get("billing_base_url")):
|
| 361 |
+
models_with_pricing.add(display)
|
| 362 |
+
else:
|
| 363 |
+
models_without_pricing.add(display)
|
| 364 |
+
|
| 365 |
+
# Session duration stats (guard against negative durations from clock drift)
|
| 366 |
+
durations = []
|
| 367 |
+
for s in sessions:
|
| 368 |
+
start = s.get("started_at")
|
| 369 |
+
end = s.get("ended_at")
|
| 370 |
+
if start and end and end > start:
|
| 371 |
+
durations.append(end - start)
|
| 372 |
+
|
| 373 |
+
total_hours = sum(durations) / 3600 if durations else 0
|
| 374 |
+
avg_duration = sum(durations) / len(durations) if durations else 0
|
| 375 |
+
|
| 376 |
+
# Earliest and latest session
|
| 377 |
+
started_timestamps = [s["started_at"] for s in sessions if s.get("started_at")]
|
| 378 |
+
date_range_start = min(started_timestamps) if started_timestamps else None
|
| 379 |
+
date_range_end = max(started_timestamps) if started_timestamps else None
|
| 380 |
+
|
| 381 |
+
return {
|
| 382 |
+
"total_sessions": len(sessions),
|
| 383 |
+
"total_messages": total_messages,
|
| 384 |
+
"total_tool_calls": total_tool_calls,
|
| 385 |
+
"total_input_tokens": total_input,
|
| 386 |
+
"total_output_tokens": total_output,
|
| 387 |
+
"total_cache_read_tokens": total_cache_read,
|
| 388 |
+
"total_cache_write_tokens": total_cache_write,
|
| 389 |
+
"total_tokens": total_tokens,
|
| 390 |
+
"estimated_cost": total_cost,
|
| 391 |
+
"actual_cost": actual_cost,
|
| 392 |
+
"total_hours": total_hours,
|
| 393 |
+
"avg_session_duration": avg_duration,
|
| 394 |
+
"avg_messages_per_session": total_messages / len(sessions) if sessions else 0,
|
| 395 |
+
"avg_tokens_per_session": total_tokens / len(sessions) if sessions else 0,
|
| 396 |
+
"user_messages": message_stats.get("user_messages") or 0,
|
| 397 |
+
"assistant_messages": message_stats.get("assistant_messages") or 0,
|
| 398 |
+
"tool_messages": message_stats.get("tool_messages") or 0,
|
| 399 |
+
"date_range_start": date_range_start,
|
| 400 |
+
"date_range_end": date_range_end,
|
| 401 |
+
"models_with_pricing": sorted(models_with_pricing),
|
| 402 |
+
"models_without_pricing": sorted(models_without_pricing),
|
| 403 |
+
"unknown_cost_sessions": unknown_cost_sessions,
|
| 404 |
+
"included_cost_sessions": included_cost_sessions,
|
| 405 |
+
}
|
| 406 |
+
|
| 407 |
+
def _compute_model_breakdown(self, sessions: List[Dict]) -> List[Dict]:
|
| 408 |
+
"""Break down usage by model."""
|
| 409 |
+
model_data = defaultdict(lambda: {
|
| 410 |
+
"sessions": 0, "input_tokens": 0, "output_tokens": 0,
|
| 411 |
+
"cache_read_tokens": 0, "cache_write_tokens": 0,
|
| 412 |
+
"total_tokens": 0, "tool_calls": 0, "cost": 0.0,
|
| 413 |
+
})
|
| 414 |
+
|
| 415 |
+
for s in sessions:
|
| 416 |
+
model = s.get("model") or "unknown"
|
| 417 |
+
# Normalize: strip provider prefix for display
|
| 418 |
+
display_model = model.split("/")[-1] if "/" in model else model
|
| 419 |
+
d = model_data[display_model]
|
| 420 |
+
d["sessions"] += 1
|
| 421 |
+
inp = s.get("input_tokens") or 0
|
| 422 |
+
out = s.get("output_tokens") or 0
|
| 423 |
+
cache_read = s.get("cache_read_tokens") or 0
|
| 424 |
+
cache_write = s.get("cache_write_tokens") or 0
|
| 425 |
+
d["input_tokens"] += inp
|
| 426 |
+
d["output_tokens"] += out
|
| 427 |
+
d["cache_read_tokens"] += cache_read
|
| 428 |
+
d["cache_write_tokens"] += cache_write
|
| 429 |
+
d["total_tokens"] += inp + out + cache_read + cache_write
|
| 430 |
+
d["tool_calls"] += s.get("tool_call_count") or 0
|
| 431 |
+
estimate, status = _estimate_cost(s)
|
| 432 |
+
d["cost"] += estimate
|
| 433 |
+
d["has_pricing"] = _has_known_pricing(model, s.get("billing_provider"), s.get("billing_base_url"))
|
| 434 |
+
d["cost_status"] = status
|
| 435 |
+
|
| 436 |
+
result = [
|
| 437 |
+
{"model": model, **data}
|
| 438 |
+
for model, data in model_data.items()
|
| 439 |
+
]
|
| 440 |
+
# Sort by tokens first, fall back to session count when tokens are 0
|
| 441 |
+
result.sort(key=lambda x: (x["total_tokens"], x["sessions"]), reverse=True)
|
| 442 |
+
return result
|
| 443 |
+
|
| 444 |
+
def _compute_platform_breakdown(self, sessions: List[Dict]) -> List[Dict]:
|
| 445 |
+
"""Break down usage by platform/source."""
|
| 446 |
+
platform_data = defaultdict(lambda: {
|
| 447 |
+
"sessions": 0, "messages": 0, "input_tokens": 0,
|
| 448 |
+
"output_tokens": 0, "cache_read_tokens": 0,
|
| 449 |
+
"cache_write_tokens": 0, "total_tokens": 0, "tool_calls": 0,
|
| 450 |
+
})
|
| 451 |
+
|
| 452 |
+
for s in sessions:
|
| 453 |
+
source = s.get("source") or "unknown"
|
| 454 |
+
d = platform_data[source]
|
| 455 |
+
d["sessions"] += 1
|
| 456 |
+
d["messages"] += s.get("message_count") or 0
|
| 457 |
+
inp = s.get("input_tokens") or 0
|
| 458 |
+
out = s.get("output_tokens") or 0
|
| 459 |
+
cache_read = s.get("cache_read_tokens") or 0
|
| 460 |
+
cache_write = s.get("cache_write_tokens") or 0
|
| 461 |
+
d["input_tokens"] += inp
|
| 462 |
+
d["output_tokens"] += out
|
| 463 |
+
d["cache_read_tokens"] += cache_read
|
| 464 |
+
d["cache_write_tokens"] += cache_write
|
| 465 |
+
d["total_tokens"] += inp + out + cache_read + cache_write
|
| 466 |
+
d["tool_calls"] += s.get("tool_call_count") or 0
|
| 467 |
+
|
| 468 |
+
result = [
|
| 469 |
+
{"platform": platform, **data}
|
| 470 |
+
for platform, data in platform_data.items()
|
| 471 |
+
]
|
| 472 |
+
result.sort(key=lambda x: x["sessions"], reverse=True)
|
| 473 |
+
return result
|
| 474 |
+
|
| 475 |
+
def _compute_tool_breakdown(self, tool_usage: List[Dict]) -> List[Dict]:
|
| 476 |
+
"""Process tool usage data into a ranked list with percentages."""
|
| 477 |
+
total_calls = sum(t["count"] for t in tool_usage) if tool_usage else 0
|
| 478 |
+
result = []
|
| 479 |
+
for t in tool_usage:
|
| 480 |
+
pct = (t["count"] / total_calls * 100) if total_calls else 0
|
| 481 |
+
result.append({
|
| 482 |
+
"tool": t["tool_name"],
|
| 483 |
+
"count": t["count"],
|
| 484 |
+
"percentage": pct,
|
| 485 |
+
})
|
| 486 |
+
return result
|
| 487 |
+
|
| 488 |
+
def _compute_activity_patterns(self, sessions: List[Dict]) -> Dict:
|
| 489 |
+
"""Analyze activity patterns by day of week and hour."""
|
| 490 |
+
day_counts = Counter() # 0=Monday ... 6=Sunday
|
| 491 |
+
hour_counts = Counter()
|
| 492 |
+
daily_counts = Counter() # date string -> count
|
| 493 |
+
|
| 494 |
+
for s in sessions:
|
| 495 |
+
ts = s.get("started_at")
|
| 496 |
+
if not ts:
|
| 497 |
+
continue
|
| 498 |
+
dt = datetime.fromtimestamp(ts)
|
| 499 |
+
day_counts[dt.weekday()] += 1
|
| 500 |
+
hour_counts[dt.hour] += 1
|
| 501 |
+
daily_counts[dt.strftime("%Y-%m-%d")] += 1
|
| 502 |
+
|
| 503 |
+
day_names = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
|
| 504 |
+
day_breakdown = [
|
| 505 |
+
{"day": day_names[i], "count": day_counts.get(i, 0)}
|
| 506 |
+
for i in range(7)
|
| 507 |
+
]
|
| 508 |
+
|
| 509 |
+
hour_breakdown = [
|
| 510 |
+
{"hour": i, "count": hour_counts.get(i, 0)}
|
| 511 |
+
for i in range(24)
|
| 512 |
+
]
|
| 513 |
+
|
| 514 |
+
# Busiest day and hour
|
| 515 |
+
busiest_day = max(day_breakdown, key=lambda x: x["count"]) if day_breakdown else None
|
| 516 |
+
busiest_hour = max(hour_breakdown, key=lambda x: x["count"]) if hour_breakdown else None
|
| 517 |
+
|
| 518 |
+
# Active days (days with at least one session)
|
| 519 |
+
active_days = len(daily_counts)
|
| 520 |
+
|
| 521 |
+
# Streak calculation
|
| 522 |
+
if daily_counts:
|
| 523 |
+
all_dates = sorted(daily_counts.keys())
|
| 524 |
+
current_streak = 1
|
| 525 |
+
max_streak = 1
|
| 526 |
+
for i in range(1, len(all_dates)):
|
| 527 |
+
d1 = datetime.strptime(all_dates[i - 1], "%Y-%m-%d")
|
| 528 |
+
d2 = datetime.strptime(all_dates[i], "%Y-%m-%d")
|
| 529 |
+
if (d2 - d1).days == 1:
|
| 530 |
+
current_streak += 1
|
| 531 |
+
max_streak = max(max_streak, current_streak)
|
| 532 |
+
else:
|
| 533 |
+
current_streak = 1
|
| 534 |
+
else:
|
| 535 |
+
max_streak = 0
|
| 536 |
+
|
| 537 |
+
return {
|
| 538 |
+
"by_day": day_breakdown,
|
| 539 |
+
"by_hour": hour_breakdown,
|
| 540 |
+
"busiest_day": busiest_day,
|
| 541 |
+
"busiest_hour": busiest_hour,
|
| 542 |
+
"active_days": active_days,
|
| 543 |
+
"max_streak": max_streak,
|
| 544 |
+
}
|
| 545 |
+
|
| 546 |
+
def _compute_top_sessions(self, sessions: List[Dict]) -> List[Dict]:
|
| 547 |
+
"""Find notable sessions (longest, most messages, most tokens)."""
|
| 548 |
+
top = []
|
| 549 |
+
|
| 550 |
+
# Longest by duration
|
| 551 |
+
sessions_with_duration = [
|
| 552 |
+
s for s in sessions
|
| 553 |
+
if s.get("started_at") and s.get("ended_at")
|
| 554 |
+
]
|
| 555 |
+
if sessions_with_duration:
|
| 556 |
+
longest = max(
|
| 557 |
+
sessions_with_duration,
|
| 558 |
+
key=lambda s: (s["ended_at"] - s["started_at"]),
|
| 559 |
+
)
|
| 560 |
+
dur = longest["ended_at"] - longest["started_at"]
|
| 561 |
+
top.append({
|
| 562 |
+
"label": "Longest session",
|
| 563 |
+
"session_id": longest["id"][:16],
|
| 564 |
+
"value": _format_duration(dur),
|
| 565 |
+
"date": datetime.fromtimestamp(longest["started_at"]).strftime("%b %d"),
|
| 566 |
+
})
|
| 567 |
+
|
| 568 |
+
# Most messages
|
| 569 |
+
most_msgs = max(sessions, key=lambda s: s.get("message_count") or 0)
|
| 570 |
+
if (most_msgs.get("message_count") or 0) > 0:
|
| 571 |
+
top.append({
|
| 572 |
+
"label": "Most messages",
|
| 573 |
+
"session_id": most_msgs["id"][:16],
|
| 574 |
+
"value": f"{most_msgs['message_count']} msgs",
|
| 575 |
+
"date": datetime.fromtimestamp(most_msgs["started_at"]).strftime("%b %d") if most_msgs.get("started_at") else "?",
|
| 576 |
+
})
|
| 577 |
+
|
| 578 |
+
# Most tokens
|
| 579 |
+
most_tokens = max(
|
| 580 |
+
sessions,
|
| 581 |
+
key=lambda s: (s.get("input_tokens") or 0) + (s.get("output_tokens") or 0),
|
| 582 |
+
)
|
| 583 |
+
token_total = (most_tokens.get("input_tokens") or 0) + (most_tokens.get("output_tokens") or 0)
|
| 584 |
+
if token_total > 0:
|
| 585 |
+
top.append({
|
| 586 |
+
"label": "Most tokens",
|
| 587 |
+
"session_id": most_tokens["id"][:16],
|
| 588 |
+
"value": f"{token_total:,} tokens",
|
| 589 |
+
"date": datetime.fromtimestamp(most_tokens["started_at"]).strftime("%b %d") if most_tokens.get("started_at") else "?",
|
| 590 |
+
})
|
| 591 |
+
|
| 592 |
+
# Most tool calls
|
| 593 |
+
most_tools = max(sessions, key=lambda s: s.get("tool_call_count") or 0)
|
| 594 |
+
if (most_tools.get("tool_call_count") or 0) > 0:
|
| 595 |
+
top.append({
|
| 596 |
+
"label": "Most tool calls",
|
| 597 |
+
"session_id": most_tools["id"][:16],
|
| 598 |
+
"value": f"{most_tools['tool_call_count']} calls",
|
| 599 |
+
"date": datetime.fromtimestamp(most_tools["started_at"]).strftime("%b %d") if most_tools.get("started_at") else "?",
|
| 600 |
+
})
|
| 601 |
+
|
| 602 |
+
return top
|
| 603 |
+
|
| 604 |
+
# =========================================================================
|
| 605 |
+
# Formatting
|
| 606 |
+
# =========================================================================
|
| 607 |
+
|
| 608 |
+
def format_terminal(self, report: Dict) -> str:
|
| 609 |
+
"""Format the insights report for terminal display (CLI)."""
|
| 610 |
+
if report.get("empty"):
|
| 611 |
+
days = report.get("days", 30)
|
| 612 |
+
src = f" (source: {report['source_filter']})" if report.get("source_filter") else ""
|
| 613 |
+
return f" No sessions found in the last {days} days{src}."
|
| 614 |
+
|
| 615 |
+
lines = []
|
| 616 |
+
o = report["overview"]
|
| 617 |
+
days = report["days"]
|
| 618 |
+
src_filter = report.get("source_filter")
|
| 619 |
+
|
| 620 |
+
# Header
|
| 621 |
+
lines.append("")
|
| 622 |
+
lines.append(" ╔══════════════════════════════════════════════════════════╗")
|
| 623 |
+
lines.append(" ║ 📊 Hermes Insights ║")
|
| 624 |
+
period_label = f"Last {days} days"
|
| 625 |
+
if src_filter:
|
| 626 |
+
period_label += f" ({src_filter})"
|
| 627 |
+
padding = 58 - len(period_label) - 2
|
| 628 |
+
left_pad = padding // 2
|
| 629 |
+
right_pad = padding - left_pad
|
| 630 |
+
lines.append(f" ║{' ' * left_pad} {period_label} {' ' * right_pad}║")
|
| 631 |
+
lines.append(" ╚══════════════════════════════════════════════════════════╝")
|
| 632 |
+
lines.append("")
|
| 633 |
+
|
| 634 |
+
# Date range
|
| 635 |
+
if o.get("date_range_start") and o.get("date_range_end"):
|
| 636 |
+
start_str = datetime.fromtimestamp(o["date_range_start"]).strftime("%b %d, %Y")
|
| 637 |
+
end_str = datetime.fromtimestamp(o["date_range_end"]).strftime("%b %d, %Y")
|
| 638 |
+
lines.append(f" Period: {start_str} — {end_str}")
|
| 639 |
+
lines.append("")
|
| 640 |
+
|
| 641 |
+
# Overview
|
| 642 |
+
lines.append(" 📋 Overview")
|
| 643 |
+
lines.append(" " + "─" * 56)
|
| 644 |
+
lines.append(f" Sessions: {o['total_sessions']:<12} Messages: {o['total_messages']:,}")
|
| 645 |
+
lines.append(f" Tool calls: {o['total_tool_calls']:<12,} User messages: {o['user_messages']:,}")
|
| 646 |
+
lines.append(f" Input tokens: {o['total_input_tokens']:<12,} Output tokens: {o['total_output_tokens']:,}")
|
| 647 |
+
cost_str = f"${o['estimated_cost']:.2f}"
|
| 648 |
+
if o.get("models_without_pricing"):
|
| 649 |
+
cost_str += " *"
|
| 650 |
+
lines.append(f" Total tokens: {o['total_tokens']:<12,} Est. cost: {cost_str}")
|
| 651 |
+
if o["total_hours"] > 0:
|
| 652 |
+
lines.append(f" Active time: ~{_format_duration(o['total_hours'] * 3600):<11} Avg session: ~{_format_duration(o['avg_session_duration'])}")
|
| 653 |
+
lines.append(f" Avg msgs/session: {o['avg_messages_per_session']:.1f}")
|
| 654 |
+
lines.append("")
|
| 655 |
+
|
| 656 |
+
# Model breakdown
|
| 657 |
+
if report["models"]:
|
| 658 |
+
lines.append(" 🤖 Models Used")
|
| 659 |
+
lines.append(" " + "─" * 56)
|
| 660 |
+
lines.append(f" {'Model':<30} {'Sessions':>8} {'Tokens':>12} {'Cost':>8}")
|
| 661 |
+
for m in report["models"]:
|
| 662 |
+
model_name = m["model"][:28]
|
| 663 |
+
if m.get("has_pricing"):
|
| 664 |
+
cost_cell = f"${m['cost']:>6.2f}"
|
| 665 |
+
else:
|
| 666 |
+
cost_cell = " N/A"
|
| 667 |
+
lines.append(f" {model_name:<30} {m['sessions']:>8} {m['total_tokens']:>12,} {cost_cell}")
|
| 668 |
+
if o.get("models_without_pricing"):
|
| 669 |
+
lines.append(f" * Cost N/A for custom/self-hosted models")
|
| 670 |
+
lines.append("")
|
| 671 |
+
|
| 672 |
+
# Platform breakdown
|
| 673 |
+
if len(report["platforms"]) > 1 or (report["platforms"] and report["platforms"][0]["platform"] != "cli"):
|
| 674 |
+
lines.append(" 📱 Platforms")
|
| 675 |
+
lines.append(" " + "─" * 56)
|
| 676 |
+
lines.append(f" {'Platform':<14} {'Sessions':>8} {'Messages':>10} {'Tokens':>14}")
|
| 677 |
+
for p in report["platforms"]:
|
| 678 |
+
lines.append(f" {p['platform']:<14} {p['sessions']:>8} {p['messages']:>10,} {p['total_tokens']:>14,}")
|
| 679 |
+
lines.append("")
|
| 680 |
+
|
| 681 |
+
# Tool usage
|
| 682 |
+
if report["tools"]:
|
| 683 |
+
lines.append(" 🔧 Top Tools")
|
| 684 |
+
lines.append(" " + "─" * 56)
|
| 685 |
+
lines.append(f" {'Tool':<28} {'Calls':>8} {'%':>8}")
|
| 686 |
+
for t in report["tools"][:15]: # Top 15
|
| 687 |
+
lines.append(f" {t['tool']:<28} {t['count']:>8,} {t['percentage']:>7.1f}%")
|
| 688 |
+
if len(report["tools"]) > 15:
|
| 689 |
+
lines.append(f" ... and {len(report['tools']) - 15} more tools")
|
| 690 |
+
lines.append("")
|
| 691 |
+
|
| 692 |
+
# Activity patterns
|
| 693 |
+
act = report.get("activity", {})
|
| 694 |
+
if act.get("by_day"):
|
| 695 |
+
lines.append(" 📅 Activity Patterns")
|
| 696 |
+
lines.append(" " + "─" * 56)
|
| 697 |
+
|
| 698 |
+
# Day of week chart
|
| 699 |
+
day_values = [d["count"] for d in act["by_day"]]
|
| 700 |
+
bars = _bar_chart(day_values, max_width=15)
|
| 701 |
+
for i, d in enumerate(act["by_day"]):
|
| 702 |
+
bar = bars[i]
|
| 703 |
+
lines.append(f" {d['day']} {bar:<15} {d['count']}")
|
| 704 |
+
|
| 705 |
+
lines.append("")
|
| 706 |
+
|
| 707 |
+
# Peak hours (show top 5 busiest hours)
|
| 708 |
+
busy_hours = sorted(act["by_hour"], key=lambda x: x["count"], reverse=True)
|
| 709 |
+
busy_hours = [h for h in busy_hours if h["count"] > 0][:5]
|
| 710 |
+
if busy_hours:
|
| 711 |
+
hour_strs = []
|
| 712 |
+
for h in busy_hours:
|
| 713 |
+
hr = h["hour"]
|
| 714 |
+
ampm = "AM" if hr < 12 else "PM"
|
| 715 |
+
display_hr = hr % 12 or 12
|
| 716 |
+
hour_strs.append(f"{display_hr}{ampm} ({h['count']})")
|
| 717 |
+
lines.append(f" Peak hours: {', '.join(hour_strs)}")
|
| 718 |
+
|
| 719 |
+
if act.get("active_days"):
|
| 720 |
+
lines.append(f" Active days: {act['active_days']}")
|
| 721 |
+
if act.get("max_streak") and act["max_streak"] > 1:
|
| 722 |
+
lines.append(f" Best streak: {act['max_streak']} consecutive days")
|
| 723 |
+
lines.append("")
|
| 724 |
+
|
| 725 |
+
# Notable sessions
|
| 726 |
+
if report.get("top_sessions"):
|
| 727 |
+
lines.append(" 🏆 Notable Sessions")
|
| 728 |
+
lines.append(" " + "─" * 56)
|
| 729 |
+
for ts in report["top_sessions"]:
|
| 730 |
+
lines.append(f" {ts['label']:<20} {ts['value']:<18} ({ts['date']}, {ts['session_id']})")
|
| 731 |
+
lines.append("")
|
| 732 |
+
|
| 733 |
+
return "\n".join(lines)
|
| 734 |
+
|
| 735 |
+
def format_gateway(self, report: Dict) -> str:
|
| 736 |
+
"""Format the insights report for gateway/messaging (shorter)."""
|
| 737 |
+
if report.get("empty"):
|
| 738 |
+
days = report.get("days", 30)
|
| 739 |
+
return f"No sessions found in the last {days} days."
|
| 740 |
+
|
| 741 |
+
lines = []
|
| 742 |
+
o = report["overview"]
|
| 743 |
+
days = report["days"]
|
| 744 |
+
|
| 745 |
+
lines.append(f"📊 **Hermes Insights** — Last {days} days\n")
|
| 746 |
+
|
| 747 |
+
# Overview
|
| 748 |
+
lines.append(f"**Sessions:** {o['total_sessions']} | **Messages:** {o['total_messages']:,} | **Tool calls:** {o['total_tool_calls']:,}")
|
| 749 |
+
lines.append(f"**Tokens:** {o['total_tokens']:,} (in: {o['total_input_tokens']:,} / out: {o['total_output_tokens']:,})")
|
| 750 |
+
cost_note = ""
|
| 751 |
+
if o.get("models_without_pricing"):
|
| 752 |
+
cost_note = " _(excludes custom/self-hosted models)_"
|
| 753 |
+
lines.append(f"**Est. cost:** ${o['estimated_cost']:.2f}{cost_note}")
|
| 754 |
+
if o["total_hours"] > 0:
|
| 755 |
+
lines.append(f"**Active time:** ~{_format_duration(o['total_hours'] * 3600)} | **Avg session:** ~{_format_duration(o['avg_session_duration'])}")
|
| 756 |
+
lines.append("")
|
| 757 |
+
|
| 758 |
+
# Models (top 5)
|
| 759 |
+
if report["models"]:
|
| 760 |
+
lines.append("**🤖 Models:**")
|
| 761 |
+
for m in report["models"][:5]:
|
| 762 |
+
cost_str = f"${m['cost']:.2f}" if m.get("has_pricing") else "N/A"
|
| 763 |
+
lines.append(f" {m['model'][:25]} — {m['sessions']} sessions, {m['total_tokens']:,} tokens, {cost_str}")
|
| 764 |
+
lines.append("")
|
| 765 |
+
|
| 766 |
+
# Platforms (if multi-platform)
|
| 767 |
+
if len(report["platforms"]) > 1:
|
| 768 |
+
lines.append("**📱 Platforms:**")
|
| 769 |
+
for p in report["platforms"]:
|
| 770 |
+
lines.append(f" {p['platform']} — {p['sessions']} sessions, {p['messages']:,} msgs")
|
| 771 |
+
lines.append("")
|
| 772 |
+
|
| 773 |
+
# Tools (top 8)
|
| 774 |
+
if report["tools"]:
|
| 775 |
+
lines.append("**🔧 Top Tools:**")
|
| 776 |
+
for t in report["tools"][:8]:
|
| 777 |
+
lines.append(f" {t['tool']} — {t['count']:,} calls ({t['percentage']:.1f}%)")
|
| 778 |
+
lines.append("")
|
| 779 |
+
|
| 780 |
+
# Activity summary
|
| 781 |
+
act = report.get("activity", {})
|
| 782 |
+
if act.get("busiest_day") and act.get("busiest_hour"):
|
| 783 |
+
hr = act["busiest_hour"]["hour"]
|
| 784 |
+
ampm = "AM" if hr < 12 else "PM"
|
| 785 |
+
display_hr = hr % 12 or 12
|
| 786 |
+
lines.append(f"**📅 Busiest:** {act['busiest_day']['day']}s ({act['busiest_day']['count']} sessions), {display_hr}{ampm} ({act['busiest_hour']['count']} sessions)")
|
| 787 |
+
if act.get("active_days"):
|
| 788 |
+
lines.append(f"**Active days:** {act['active_days']}", )
|
| 789 |
+
if act.get("max_streak", 0) > 1:
|
| 790 |
+
lines.append(f"**Best streak:** {act['max_streak']} consecutive days")
|
| 791 |
+
|
| 792 |
+
return "\n".join(lines)
|
agent/model_metadata.py
ADDED
|
@@ -0,0 +1,897 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Model metadata, context lengths, and token estimation utilities.
|
| 2 |
+
|
| 3 |
+
Pure utility functions with no AIAgent dependency. Used by ContextCompressor
|
| 4 |
+
and run_agent.py for pre-flight context checks.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import logging
|
| 8 |
+
import os
|
| 9 |
+
import re
|
| 10 |
+
import time
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
from typing import Any, Dict, List, Optional
|
| 13 |
+
from urllib.parse import urlparse
|
| 14 |
+
|
| 15 |
+
import requests
|
| 16 |
+
import yaml
|
| 17 |
+
|
| 18 |
+
from hermes_constants import OPENROUTER_MODELS_URL
|
| 19 |
+
|
| 20 |
+
logger = logging.getLogger(__name__)
|
| 21 |
+
|
| 22 |
+
# Provider names that can appear as a "provider:" prefix before a model ID.
|
| 23 |
+
# Only these are stripped — Ollama-style "model:tag" colons (e.g. "qwen3.5:27b")
|
| 24 |
+
# are preserved so the full model name reaches cache lookups and server queries.
|
| 25 |
+
_PROVIDER_PREFIXES: frozenset[str] = frozenset({
|
| 26 |
+
"openrouter", "nous", "openai-codex", "copilot", "copilot-acp",
|
| 27 |
+
"zai", "kimi-coding", "minimax", "minimax-cn", "anthropic", "deepseek",
|
| 28 |
+
"opencode-zen", "opencode-go", "ai-gateway", "kilocode", "alibaba",
|
| 29 |
+
"custom", "local",
|
| 30 |
+
# Common aliases
|
| 31 |
+
"glm", "z-ai", "z.ai", "zhipu", "github", "github-copilot",
|
| 32 |
+
"github-models", "kimi", "moonshot", "claude", "deep-seek",
|
| 33 |
+
"opencode", "zen", "go", "vercel", "kilo", "dashscope", "aliyun", "qwen",
|
| 34 |
+
})
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
_OLLAMA_TAG_PATTERN = re.compile(
|
| 38 |
+
r"^(\d+\.?\d*b|latest|stable|q\d|fp?\d|instruct|chat|coder|vision|text)",
|
| 39 |
+
re.IGNORECASE,
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def _strip_provider_prefix(model: str) -> str:
|
| 44 |
+
"""Strip a recognised provider prefix from a model string.
|
| 45 |
+
|
| 46 |
+
``"local:my-model"`` → ``"my-model"``
|
| 47 |
+
``"qwen3.5:27b"`` → ``"qwen3.5:27b"`` (unchanged — not a provider prefix)
|
| 48 |
+
``"qwen:0.5b"`` → ``"qwen:0.5b"`` (unchanged — Ollama model:tag)
|
| 49 |
+
``"deepseek:latest"``→ ``"deepseek:latest"``(unchanged — Ollama model:tag)
|
| 50 |
+
"""
|
| 51 |
+
if ":" not in model or model.startswith("http"):
|
| 52 |
+
return model
|
| 53 |
+
prefix, suffix = model.split(":", 1)
|
| 54 |
+
prefix_lower = prefix.strip().lower()
|
| 55 |
+
if prefix_lower in _PROVIDER_PREFIXES:
|
| 56 |
+
# Don't strip if suffix looks like an Ollama tag (e.g. "7b", "latest", "q4_0")
|
| 57 |
+
if _OLLAMA_TAG_PATTERN.match(suffix.strip()):
|
| 58 |
+
return model
|
| 59 |
+
return suffix
|
| 60 |
+
return model
|
| 61 |
+
|
| 62 |
+
_model_metadata_cache: Dict[str, Dict[str, Any]] = {}
|
| 63 |
+
_model_metadata_cache_time: float = 0
|
| 64 |
+
_MODEL_CACHE_TTL = 3600
|
| 65 |
+
_endpoint_model_metadata_cache: Dict[str, Dict[str, Dict[str, Any]]] = {}
|
| 66 |
+
_endpoint_model_metadata_cache_time: Dict[str, float] = {}
|
| 67 |
+
_ENDPOINT_MODEL_CACHE_TTL = 300
|
| 68 |
+
|
| 69 |
+
# Descending tiers for context length probing when the model is unknown.
|
| 70 |
+
# We start at 128K (a safe default for most modern models) and step down
|
| 71 |
+
# on context-length errors until one works.
|
| 72 |
+
CONTEXT_PROBE_TIERS = [
|
| 73 |
+
128_000,
|
| 74 |
+
64_000,
|
| 75 |
+
32_000,
|
| 76 |
+
16_000,
|
| 77 |
+
8_000,
|
| 78 |
+
]
|
| 79 |
+
|
| 80 |
+
# Default context length when no detection method succeeds.
|
| 81 |
+
DEFAULT_FALLBACK_CONTEXT = CONTEXT_PROBE_TIERS[0]
|
| 82 |
+
|
| 83 |
+
# Thin fallback defaults — only broad model family patterns.
|
| 84 |
+
# These fire only when provider is unknown AND models.dev/OpenRouter/Anthropic
|
| 85 |
+
# all miss. Replaced the previous 80+ entry dict.
|
| 86 |
+
# For provider-specific context lengths, models.dev is the primary source.
|
| 87 |
+
DEFAULT_CONTEXT_LENGTHS = {
|
| 88 |
+
# Anthropic Claude 4.6 (1M context) — bare IDs only to avoid
|
| 89 |
+
# fuzzy-match collisions (e.g. "anthropic/claude-sonnet-4" is a
|
| 90 |
+
# substring of "anthropic/claude-sonnet-4.6").
|
| 91 |
+
# OpenRouter-prefixed models resolve via OpenRouter live API or models.dev.
|
| 92 |
+
"claude-opus-4-6": 1000000,
|
| 93 |
+
"claude-sonnet-4-6": 1000000,
|
| 94 |
+
"claude-opus-4.6": 1000000,
|
| 95 |
+
"claude-sonnet-4.6": 1000000,
|
| 96 |
+
# Catch-all for older Claude models (must sort after specific entries)
|
| 97 |
+
"claude": 200000,
|
| 98 |
+
# OpenAI
|
| 99 |
+
"gpt-4.1": 1047576,
|
| 100 |
+
"gpt-5": 128000,
|
| 101 |
+
"gpt-4": 128000,
|
| 102 |
+
# Google
|
| 103 |
+
"gemini": 1048576,
|
| 104 |
+
# DeepSeek
|
| 105 |
+
"deepseek": 128000,
|
| 106 |
+
# Meta
|
| 107 |
+
"llama": 131072,
|
| 108 |
+
# Qwen
|
| 109 |
+
"qwen": 131072,
|
| 110 |
+
# MiniMax
|
| 111 |
+
"minimax": 204800,
|
| 112 |
+
# GLM
|
| 113 |
+
"glm": 202752,
|
| 114 |
+
# Kimi
|
| 115 |
+
"kimi": 262144,
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
_CONTEXT_LENGTH_KEYS = (
|
| 119 |
+
"context_length",
|
| 120 |
+
"context_window",
|
| 121 |
+
"max_context_length",
|
| 122 |
+
"max_position_embeddings",
|
| 123 |
+
"max_model_len",
|
| 124 |
+
"max_input_tokens",
|
| 125 |
+
"max_sequence_length",
|
| 126 |
+
"max_seq_len",
|
| 127 |
+
"n_ctx_train",
|
| 128 |
+
"n_ctx",
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
_MAX_COMPLETION_KEYS = (
|
| 132 |
+
"max_completion_tokens",
|
| 133 |
+
"max_output_tokens",
|
| 134 |
+
"max_tokens",
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
# Local server hostnames / address patterns
|
| 138 |
+
_LOCAL_HOSTS = ("localhost", "127.0.0.1", "::1", "0.0.0.0")
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def _normalize_base_url(base_url: str) -> str:
|
| 142 |
+
return (base_url or "").strip().rstrip("/")
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def _is_openrouter_base_url(base_url: str) -> bool:
|
| 146 |
+
return "openrouter.ai" in _normalize_base_url(base_url).lower()
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
def _is_custom_endpoint(base_url: str) -> bool:
|
| 150 |
+
normalized = _normalize_base_url(base_url)
|
| 151 |
+
return bool(normalized) and not _is_openrouter_base_url(normalized)
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
_URL_TO_PROVIDER: Dict[str, str] = {
|
| 155 |
+
"api.openai.com": "openai",
|
| 156 |
+
"chatgpt.com": "openai",
|
| 157 |
+
"api.anthropic.com": "anthropic",
|
| 158 |
+
"api.z.ai": "zai",
|
| 159 |
+
"api.moonshot.ai": "kimi-coding",
|
| 160 |
+
"api.kimi.com": "kimi-coding",
|
| 161 |
+
"api.minimax": "minimax",
|
| 162 |
+
"dashscope.aliyuncs.com": "alibaba",
|
| 163 |
+
"dashscope-intl.aliyuncs.com": "alibaba",
|
| 164 |
+
"openrouter.ai": "openrouter",
|
| 165 |
+
"inference-api.nousresearch.com": "nous",
|
| 166 |
+
"api.deepseek.com": "deepseek",
|
| 167 |
+
"api.githubcopilot.com": "copilot",
|
| 168 |
+
"models.github.ai": "copilot",
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
def _infer_provider_from_url(base_url: str) -> Optional[str]:
|
| 173 |
+
"""Infer the models.dev provider name from a base URL.
|
| 174 |
+
|
| 175 |
+
This allows context length resolution via models.dev for custom endpoints
|
| 176 |
+
like DashScope (Alibaba), Z.AI, Kimi, etc. without requiring the user to
|
| 177 |
+
explicitly set the provider name in config.
|
| 178 |
+
"""
|
| 179 |
+
normalized = _normalize_base_url(base_url)
|
| 180 |
+
if not normalized:
|
| 181 |
+
return None
|
| 182 |
+
parsed = urlparse(normalized if "://" in normalized else f"https://{normalized}")
|
| 183 |
+
host = parsed.netloc.lower() or parsed.path.lower()
|
| 184 |
+
for url_part, provider in _URL_TO_PROVIDER.items():
|
| 185 |
+
if url_part in host:
|
| 186 |
+
return provider
|
| 187 |
+
return None
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
def _is_known_provider_base_url(base_url: str) -> bool:
|
| 191 |
+
return _infer_provider_from_url(base_url) is not None
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
def is_local_endpoint(base_url: str) -> bool:
|
| 195 |
+
"""Return True if base_url points to a local machine (localhost / RFC-1918 / WSL)."""
|
| 196 |
+
normalized = _normalize_base_url(base_url)
|
| 197 |
+
if not normalized:
|
| 198 |
+
return False
|
| 199 |
+
url = normalized if "://" in normalized else f"http://{normalized}"
|
| 200 |
+
try:
|
| 201 |
+
parsed = urlparse(url)
|
| 202 |
+
host = parsed.hostname or ""
|
| 203 |
+
except Exception:
|
| 204 |
+
return False
|
| 205 |
+
if host in _LOCAL_HOSTS:
|
| 206 |
+
return True
|
| 207 |
+
# RFC-1918 private ranges and link-local
|
| 208 |
+
import ipaddress
|
| 209 |
+
try:
|
| 210 |
+
addr = ipaddress.ip_address(host)
|
| 211 |
+
return addr.is_private or addr.is_loopback or addr.is_link_local
|
| 212 |
+
except ValueError:
|
| 213 |
+
pass
|
| 214 |
+
# Bare IP that looks like a private range (e.g. 172.26.x.x for WSL)
|
| 215 |
+
parts = host.split(".")
|
| 216 |
+
if len(parts) == 4:
|
| 217 |
+
try:
|
| 218 |
+
first, second = int(parts[0]), int(parts[1])
|
| 219 |
+
if first == 10:
|
| 220 |
+
return True
|
| 221 |
+
if first == 172 and 16 <= second <= 31:
|
| 222 |
+
return True
|
| 223 |
+
if first == 192 and second == 168:
|
| 224 |
+
return True
|
| 225 |
+
except ValueError:
|
| 226 |
+
pass
|
| 227 |
+
return False
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
def detect_local_server_type(base_url: str) -> Optional[str]:
|
| 231 |
+
"""Detect which local server is running at base_url by probing known endpoints.
|
| 232 |
+
|
| 233 |
+
Returns one of: "ollama", "lm-studio", "vllm", "llamacpp", or None.
|
| 234 |
+
"""
|
| 235 |
+
import httpx
|
| 236 |
+
|
| 237 |
+
normalized = _normalize_base_url(base_url)
|
| 238 |
+
server_url = normalized
|
| 239 |
+
if server_url.endswith("/v1"):
|
| 240 |
+
server_url = server_url[:-3]
|
| 241 |
+
|
| 242 |
+
try:
|
| 243 |
+
with httpx.Client(timeout=2.0) as client:
|
| 244 |
+
# LM Studio exposes /api/v1/models — check first (most specific)
|
| 245 |
+
try:
|
| 246 |
+
r = client.get(f"{server_url}/api/v1/models")
|
| 247 |
+
if r.status_code == 200:
|
| 248 |
+
return "lm-studio"
|
| 249 |
+
except Exception:
|
| 250 |
+
pass
|
| 251 |
+
# Ollama exposes /api/tags and responds with {"models": [...]}
|
| 252 |
+
# LM Studio returns {"error": "Unexpected endpoint"} with status 200
|
| 253 |
+
# on this path, so we must verify the response contains "models".
|
| 254 |
+
try:
|
| 255 |
+
r = client.get(f"{server_url}/api/tags")
|
| 256 |
+
if r.status_code == 200:
|
| 257 |
+
try:
|
| 258 |
+
data = r.json()
|
| 259 |
+
if "models" in data:
|
| 260 |
+
return "ollama"
|
| 261 |
+
except Exception:
|
| 262 |
+
pass
|
| 263 |
+
except Exception:
|
| 264 |
+
pass
|
| 265 |
+
# llama.cpp exposes /v1/props (older builds used /props without the /v1 prefix)
|
| 266 |
+
try:
|
| 267 |
+
r = client.get(f"{server_url}/v1/props")
|
| 268 |
+
if r.status_code != 200:
|
| 269 |
+
r = client.get(f"{server_url}/props") # fallback for older builds
|
| 270 |
+
if r.status_code == 200 and "default_generation_settings" in r.text:
|
| 271 |
+
return "llamacpp"
|
| 272 |
+
except Exception:
|
| 273 |
+
pass
|
| 274 |
+
# vLLM: /version
|
| 275 |
+
try:
|
| 276 |
+
r = client.get(f"{server_url}/version")
|
| 277 |
+
if r.status_code == 200:
|
| 278 |
+
data = r.json()
|
| 279 |
+
if "version" in data:
|
| 280 |
+
return "vllm"
|
| 281 |
+
except Exception:
|
| 282 |
+
pass
|
| 283 |
+
except Exception:
|
| 284 |
+
pass
|
| 285 |
+
|
| 286 |
+
return None
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
def _iter_nested_dicts(value: Any):
|
| 290 |
+
if isinstance(value, dict):
|
| 291 |
+
yield value
|
| 292 |
+
for nested in value.values():
|
| 293 |
+
yield from _iter_nested_dicts(nested)
|
| 294 |
+
elif isinstance(value, list):
|
| 295 |
+
for item in value:
|
| 296 |
+
yield from _iter_nested_dicts(item)
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
def _coerce_reasonable_int(value: Any, minimum: int = 1024, maximum: int = 10_000_000) -> Optional[int]:
|
| 300 |
+
try:
|
| 301 |
+
if isinstance(value, bool):
|
| 302 |
+
return None
|
| 303 |
+
if isinstance(value, str):
|
| 304 |
+
value = value.strip().replace(",", "")
|
| 305 |
+
result = int(value)
|
| 306 |
+
except (TypeError, ValueError):
|
| 307 |
+
return None
|
| 308 |
+
if minimum <= result <= maximum:
|
| 309 |
+
return result
|
| 310 |
+
return None
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
def _extract_first_int(payload: Dict[str, Any], keys: tuple[str, ...]) -> Optional[int]:
|
| 314 |
+
keyset = {key.lower() for key in keys}
|
| 315 |
+
for mapping in _iter_nested_dicts(payload):
|
| 316 |
+
for key, value in mapping.items():
|
| 317 |
+
if str(key).lower() not in keyset:
|
| 318 |
+
continue
|
| 319 |
+
coerced = _coerce_reasonable_int(value)
|
| 320 |
+
if coerced is not None:
|
| 321 |
+
return coerced
|
| 322 |
+
return None
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
def _extract_context_length(payload: Dict[str, Any]) -> Optional[int]:
|
| 326 |
+
return _extract_first_int(payload, _CONTEXT_LENGTH_KEYS)
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
def _extract_max_completion_tokens(payload: Dict[str, Any]) -> Optional[int]:
|
| 330 |
+
return _extract_first_int(payload, _MAX_COMPLETION_KEYS)
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
def _extract_pricing(payload: Dict[str, Any]) -> Dict[str, Any]:
|
| 334 |
+
alias_map = {
|
| 335 |
+
"prompt": ("prompt", "input", "input_cost_per_token", "prompt_token_cost"),
|
| 336 |
+
"completion": ("completion", "output", "output_cost_per_token", "completion_token_cost"),
|
| 337 |
+
"request": ("request", "request_cost"),
|
| 338 |
+
"cache_read": ("cache_read", "cached_prompt", "input_cache_read", "cache_read_cost_per_token"),
|
| 339 |
+
"cache_write": ("cache_write", "cache_creation", "input_cache_write", "cache_write_cost_per_token"),
|
| 340 |
+
}
|
| 341 |
+
for mapping in _iter_nested_dicts(payload):
|
| 342 |
+
normalized = {str(key).lower(): value for key, value in mapping.items()}
|
| 343 |
+
if not any(any(alias in normalized for alias in aliases) for aliases in alias_map.values()):
|
| 344 |
+
continue
|
| 345 |
+
pricing: Dict[str, Any] = {}
|
| 346 |
+
for target, aliases in alias_map.items():
|
| 347 |
+
for alias in aliases:
|
| 348 |
+
if alias in normalized and normalized[alias] not in (None, ""):
|
| 349 |
+
pricing[target] = normalized[alias]
|
| 350 |
+
break
|
| 351 |
+
if pricing:
|
| 352 |
+
return pricing
|
| 353 |
+
return {}
|
| 354 |
+
|
| 355 |
+
|
| 356 |
+
def _add_model_aliases(cache: Dict[str, Dict[str, Any]], model_id: str, entry: Dict[str, Any]) -> None:
|
| 357 |
+
cache[model_id] = entry
|
| 358 |
+
if "/" in model_id:
|
| 359 |
+
bare_model = model_id.split("/", 1)[1]
|
| 360 |
+
cache.setdefault(bare_model, entry)
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
def fetch_model_metadata(force_refresh: bool = False) -> Dict[str, Dict[str, Any]]:
|
| 364 |
+
"""Fetch model metadata from OpenRouter (cached for 1 hour)."""
|
| 365 |
+
global _model_metadata_cache, _model_metadata_cache_time
|
| 366 |
+
|
| 367 |
+
if not force_refresh and _model_metadata_cache and (time.time() - _model_metadata_cache_time) < _MODEL_CACHE_TTL:
|
| 368 |
+
return _model_metadata_cache
|
| 369 |
+
|
| 370 |
+
try:
|
| 371 |
+
response = requests.get(OPENROUTER_MODELS_URL, timeout=10)
|
| 372 |
+
response.raise_for_status()
|
| 373 |
+
data = response.json()
|
| 374 |
+
|
| 375 |
+
cache = {}
|
| 376 |
+
for model in data.get("data", []):
|
| 377 |
+
model_id = model.get("id", "")
|
| 378 |
+
entry = {
|
| 379 |
+
"context_length": model.get("context_length", 128000),
|
| 380 |
+
"max_completion_tokens": model.get("top_provider", {}).get("max_completion_tokens", 4096),
|
| 381 |
+
"name": model.get("name", model_id),
|
| 382 |
+
"pricing": model.get("pricing", {}),
|
| 383 |
+
}
|
| 384 |
+
_add_model_aliases(cache, model_id, entry)
|
| 385 |
+
canonical = model.get("canonical_slug", "")
|
| 386 |
+
if canonical and canonical != model_id:
|
| 387 |
+
_add_model_aliases(cache, canonical, entry)
|
| 388 |
+
|
| 389 |
+
_model_metadata_cache = cache
|
| 390 |
+
_model_metadata_cache_time = time.time()
|
| 391 |
+
logger.debug("Fetched metadata for %s models from OpenRouter", len(cache))
|
| 392 |
+
return cache
|
| 393 |
+
|
| 394 |
+
except Exception as e:
|
| 395 |
+
logging.warning(f"Failed to fetch model metadata from OpenRouter: {e}")
|
| 396 |
+
return _model_metadata_cache or {}
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
def fetch_endpoint_model_metadata(
|
| 400 |
+
base_url: str,
|
| 401 |
+
api_key: str = "",
|
| 402 |
+
force_refresh: bool = False,
|
| 403 |
+
) -> Dict[str, Dict[str, Any]]:
|
| 404 |
+
"""Fetch model metadata from an OpenAI-compatible ``/models`` endpoint.
|
| 405 |
+
|
| 406 |
+
This is used for explicit custom endpoints where hardcoded global model-name
|
| 407 |
+
defaults are unreliable. Results are cached in memory per base URL.
|
| 408 |
+
"""
|
| 409 |
+
normalized = _normalize_base_url(base_url)
|
| 410 |
+
if not normalized or _is_openrouter_base_url(normalized):
|
| 411 |
+
return {}
|
| 412 |
+
|
| 413 |
+
if not force_refresh:
|
| 414 |
+
cached = _endpoint_model_metadata_cache.get(normalized)
|
| 415 |
+
cached_at = _endpoint_model_metadata_cache_time.get(normalized, 0)
|
| 416 |
+
if cached is not None and (time.time() - cached_at) < _ENDPOINT_MODEL_CACHE_TTL:
|
| 417 |
+
return cached
|
| 418 |
+
|
| 419 |
+
candidates = [normalized]
|
| 420 |
+
if normalized.endswith("/v1"):
|
| 421 |
+
alternate = normalized[:-3].rstrip("/")
|
| 422 |
+
else:
|
| 423 |
+
alternate = normalized + "/v1"
|
| 424 |
+
if alternate and alternate not in candidates:
|
| 425 |
+
candidates.append(alternate)
|
| 426 |
+
|
| 427 |
+
headers = {"Authorization": f"Bearer {api_key}"} if api_key else {}
|
| 428 |
+
last_error: Optional[Exception] = None
|
| 429 |
+
|
| 430 |
+
for candidate in candidates:
|
| 431 |
+
url = candidate.rstrip("/") + "/models"
|
| 432 |
+
try:
|
| 433 |
+
response = requests.get(url, headers=headers, timeout=10)
|
| 434 |
+
response.raise_for_status()
|
| 435 |
+
payload = response.json()
|
| 436 |
+
cache: Dict[str, Dict[str, Any]] = {}
|
| 437 |
+
for model in payload.get("data", []):
|
| 438 |
+
if not isinstance(model, dict):
|
| 439 |
+
continue
|
| 440 |
+
model_id = model.get("id")
|
| 441 |
+
if not model_id:
|
| 442 |
+
continue
|
| 443 |
+
entry: Dict[str, Any] = {"name": model.get("name", model_id)}
|
| 444 |
+
context_length = _extract_context_length(model)
|
| 445 |
+
if context_length is not None:
|
| 446 |
+
entry["context_length"] = context_length
|
| 447 |
+
max_completion_tokens = _extract_max_completion_tokens(model)
|
| 448 |
+
if max_completion_tokens is not None:
|
| 449 |
+
entry["max_completion_tokens"] = max_completion_tokens
|
| 450 |
+
pricing = _extract_pricing(model)
|
| 451 |
+
if pricing:
|
| 452 |
+
entry["pricing"] = pricing
|
| 453 |
+
_add_model_aliases(cache, model_id, entry)
|
| 454 |
+
|
| 455 |
+
# If this is a llama.cpp server, query /props for actual allocated context
|
| 456 |
+
is_llamacpp = any(
|
| 457 |
+
m.get("owned_by") == "llamacpp"
|
| 458 |
+
for m in payload.get("data", []) if isinstance(m, dict)
|
| 459 |
+
)
|
| 460 |
+
if is_llamacpp:
|
| 461 |
+
try:
|
| 462 |
+
# Try /v1/props first (current llama.cpp); fall back to /props for older builds
|
| 463 |
+
base = candidate.rstrip("/").replace("/v1", "")
|
| 464 |
+
props_resp = requests.get(base + "/v1/props", headers=headers, timeout=5)
|
| 465 |
+
if not props_resp.ok:
|
| 466 |
+
props_resp = requests.get(base + "/props", headers=headers, timeout=5)
|
| 467 |
+
if props_resp.ok:
|
| 468 |
+
props = props_resp.json()
|
| 469 |
+
gen_settings = props.get("default_generation_settings", {})
|
| 470 |
+
n_ctx = gen_settings.get("n_ctx")
|
| 471 |
+
model_alias = props.get("model_alias", "")
|
| 472 |
+
if n_ctx and model_alias and model_alias in cache:
|
| 473 |
+
cache[model_alias]["context_length"] = n_ctx
|
| 474 |
+
except Exception:
|
| 475 |
+
pass
|
| 476 |
+
|
| 477 |
+
_endpoint_model_metadata_cache[normalized] = cache
|
| 478 |
+
_endpoint_model_metadata_cache_time[normalized] = time.time()
|
| 479 |
+
return cache
|
| 480 |
+
except Exception as exc:
|
| 481 |
+
last_error = exc
|
| 482 |
+
|
| 483 |
+
if last_error:
|
| 484 |
+
logger.debug("Failed to fetch model metadata from %s/models: %s", normalized, last_error)
|
| 485 |
+
_endpoint_model_metadata_cache[normalized] = {}
|
| 486 |
+
_endpoint_model_metadata_cache_time[normalized] = time.time()
|
| 487 |
+
return {}
|
| 488 |
+
|
| 489 |
+
|
| 490 |
+
def _get_context_cache_path() -> Path:
|
| 491 |
+
"""Return path to the persistent context length cache file."""
|
| 492 |
+
hermes_home = Path(os.environ.get("HERMES_HOME", Path.home() / ".hermes"))
|
| 493 |
+
return hermes_home / "context_length_cache.yaml"
|
| 494 |
+
|
| 495 |
+
|
| 496 |
+
def _load_context_cache() -> Dict[str, int]:
|
| 497 |
+
"""Load the model+provider -> context_length cache from disk."""
|
| 498 |
+
path = _get_context_cache_path()
|
| 499 |
+
if not path.exists():
|
| 500 |
+
return {}
|
| 501 |
+
try:
|
| 502 |
+
with open(path) as f:
|
| 503 |
+
data = yaml.safe_load(f) or {}
|
| 504 |
+
return data.get("context_lengths", {})
|
| 505 |
+
except Exception as e:
|
| 506 |
+
logger.debug("Failed to load context length cache: %s", e)
|
| 507 |
+
return {}
|
| 508 |
+
|
| 509 |
+
|
| 510 |
+
def save_context_length(model: str, base_url: str, length: int) -> None:
|
| 511 |
+
"""Persist a discovered context length for a model+provider combo.
|
| 512 |
+
|
| 513 |
+
Cache key is ``model@base_url`` so the same model name served from
|
| 514 |
+
different providers can have different limits.
|
| 515 |
+
"""
|
| 516 |
+
key = f"{model}@{base_url}"
|
| 517 |
+
cache = _load_context_cache()
|
| 518 |
+
if cache.get(key) == length:
|
| 519 |
+
return # already stored
|
| 520 |
+
cache[key] = length
|
| 521 |
+
path = _get_context_cache_path()
|
| 522 |
+
try:
|
| 523 |
+
path.parent.mkdir(parents=True, exist_ok=True)
|
| 524 |
+
with open(path, "w") as f:
|
| 525 |
+
yaml.dump({"context_lengths": cache}, f, default_flow_style=False)
|
| 526 |
+
logger.info("Cached context length %s -> %s tokens", key, f"{length:,}")
|
| 527 |
+
except Exception as e:
|
| 528 |
+
logger.debug("Failed to save context length cache: %s", e)
|
| 529 |
+
|
| 530 |
+
|
| 531 |
+
def get_cached_context_length(model: str, base_url: str) -> Optional[int]:
|
| 532 |
+
"""Look up a previously discovered context length for model+provider."""
|
| 533 |
+
key = f"{model}@{base_url}"
|
| 534 |
+
cache = _load_context_cache()
|
| 535 |
+
return cache.get(key)
|
| 536 |
+
|
| 537 |
+
|
| 538 |
+
def get_next_probe_tier(current_length: int) -> Optional[int]:
|
| 539 |
+
"""Return the next lower probe tier, or None if already at minimum."""
|
| 540 |
+
for tier in CONTEXT_PROBE_TIERS:
|
| 541 |
+
if tier < current_length:
|
| 542 |
+
return tier
|
| 543 |
+
return None
|
| 544 |
+
|
| 545 |
+
|
| 546 |
+
def parse_context_limit_from_error(error_msg: str) -> Optional[int]:
|
| 547 |
+
"""Try to extract the actual context limit from an API error message.
|
| 548 |
+
|
| 549 |
+
Many providers include the limit in their error text, e.g.:
|
| 550 |
+
- "maximum context length is 32768 tokens"
|
| 551 |
+
- "context_length_exceeded: 131072"
|
| 552 |
+
- "Maximum context size 32768 exceeded"
|
| 553 |
+
- "model's max context length is 65536"
|
| 554 |
+
"""
|
| 555 |
+
error_lower = error_msg.lower()
|
| 556 |
+
# Pattern: look for numbers near context-related keywords
|
| 557 |
+
patterns = [
|
| 558 |
+
r'(?:max(?:imum)?|limit)\s*(?:context\s*)?(?:length|size|window)?\s*(?:is|of|:)?\s*(\d{4,})',
|
| 559 |
+
r'context\s*(?:length|size|window)\s*(?:is|of|:)?\s*(\d{4,})',
|
| 560 |
+
r'(\d{4,})\s*(?:token)?\s*(?:context|limit)',
|
| 561 |
+
r'>\s*(\d{4,})\s*(?:max|limit|token)', # "250000 tokens > 200000 maximum"
|
| 562 |
+
r'(\d{4,})\s*(?:max(?:imum)?)\b', # "200000 maximum"
|
| 563 |
+
]
|
| 564 |
+
for pattern in patterns:
|
| 565 |
+
match = re.search(pattern, error_lower)
|
| 566 |
+
if match:
|
| 567 |
+
limit = int(match.group(1))
|
| 568 |
+
# Sanity check: must be a reasonable context length
|
| 569 |
+
if 1024 <= limit <= 10_000_000:
|
| 570 |
+
return limit
|
| 571 |
+
return None
|
| 572 |
+
|
| 573 |
+
|
| 574 |
+
def _model_id_matches(candidate_id: str, lookup_model: str) -> bool:
|
| 575 |
+
"""Return True if *candidate_id* (from server) matches *lookup_model* (configured).
|
| 576 |
+
|
| 577 |
+
Supports two forms:
|
| 578 |
+
- Exact match: "nvidia-nemotron-super-49b-v1" == "nvidia-nemotron-super-49b-v1"
|
| 579 |
+
- Slug match: "nvidia/nvidia-nemotron-super-49b-v1" matches "nvidia-nemotron-super-49b-v1"
|
| 580 |
+
(the part after the last "/" equals lookup_model)
|
| 581 |
+
|
| 582 |
+
This covers LM Studio's native API which stores models as "publisher/slug"
|
| 583 |
+
while users typically configure only the slug after the "local:" prefix.
|
| 584 |
+
"""
|
| 585 |
+
if candidate_id == lookup_model:
|
| 586 |
+
return True
|
| 587 |
+
# Slug match: basename of candidate equals the lookup name
|
| 588 |
+
if "/" in candidate_id and candidate_id.rsplit("/", 1)[1] == lookup_model:
|
| 589 |
+
return True
|
| 590 |
+
return False
|
| 591 |
+
|
| 592 |
+
|
| 593 |
+
def _query_local_context_length(model: str, base_url: str) -> Optional[int]:
|
| 594 |
+
"""Query a local server for the model's context length."""
|
| 595 |
+
import httpx
|
| 596 |
+
|
| 597 |
+
# Strip recognised provider prefix (e.g., "local:model-name" → "model-name").
|
| 598 |
+
# Ollama "model:tag" colons (e.g. "qwen3.5:27b") are intentionally preserved.
|
| 599 |
+
model = _strip_provider_prefix(model)
|
| 600 |
+
|
| 601 |
+
# Strip /v1 suffix to get the server root
|
| 602 |
+
server_url = base_url.rstrip("/")
|
| 603 |
+
if server_url.endswith("/v1"):
|
| 604 |
+
server_url = server_url[:-3]
|
| 605 |
+
|
| 606 |
+
try:
|
| 607 |
+
server_type = detect_local_server_type(base_url)
|
| 608 |
+
except Exception:
|
| 609 |
+
server_type = None
|
| 610 |
+
|
| 611 |
+
try:
|
| 612 |
+
with httpx.Client(timeout=3.0) as client:
|
| 613 |
+
# Ollama: /api/show returns model details with context info
|
| 614 |
+
if server_type == "ollama":
|
| 615 |
+
resp = client.post(f"{server_url}/api/show", json={"name": model})
|
| 616 |
+
if resp.status_code == 200:
|
| 617 |
+
data = resp.json()
|
| 618 |
+
# Check model_info for context length
|
| 619 |
+
model_info = data.get("model_info", {})
|
| 620 |
+
for key, value in model_info.items():
|
| 621 |
+
if "context_length" in key and isinstance(value, (int, float)):
|
| 622 |
+
return int(value)
|
| 623 |
+
# Check parameters string for num_ctx
|
| 624 |
+
params = data.get("parameters", "")
|
| 625 |
+
if "num_ctx" in params:
|
| 626 |
+
for line in params.split("\n"):
|
| 627 |
+
if "num_ctx" in line:
|
| 628 |
+
parts = line.strip().split()
|
| 629 |
+
if len(parts) >= 2:
|
| 630 |
+
try:
|
| 631 |
+
return int(parts[-1])
|
| 632 |
+
except ValueError:
|
| 633 |
+
pass
|
| 634 |
+
|
| 635 |
+
# LM Studio native API: /api/v1/models returns max_context_length.
|
| 636 |
+
# This is more reliable than the OpenAI-compat /v1/models which
|
| 637 |
+
# doesn't include context window information for LM Studio servers.
|
| 638 |
+
# Use _model_id_matches for fuzzy matching: LM Studio stores models as
|
| 639 |
+
# "publisher/slug" but users configure only "slug" after "local:" prefix.
|
| 640 |
+
if server_type == "lm-studio":
|
| 641 |
+
resp = client.get(f"{server_url}/api/v1/models")
|
| 642 |
+
if resp.status_code == 200:
|
| 643 |
+
data = resp.json()
|
| 644 |
+
for m in data.get("models", []):
|
| 645 |
+
if _model_id_matches(m.get("key", ""), model) or _model_id_matches(m.get("id", ""), model):
|
| 646 |
+
# Prefer loaded instance context (actual runtime value)
|
| 647 |
+
for inst in m.get("loaded_instances", []):
|
| 648 |
+
cfg = inst.get("config", {})
|
| 649 |
+
ctx = cfg.get("context_length")
|
| 650 |
+
if ctx and isinstance(ctx, (int, float)):
|
| 651 |
+
return int(ctx)
|
| 652 |
+
# Fall back to max_context_length (theoretical model max)
|
| 653 |
+
ctx = m.get("max_context_length") or m.get("context_length")
|
| 654 |
+
if ctx and isinstance(ctx, (int, float)):
|
| 655 |
+
return int(ctx)
|
| 656 |
+
|
| 657 |
+
# LM Studio / vLLM / llama.cpp: try /v1/models/{model}
|
| 658 |
+
resp = client.get(f"{server_url}/v1/models/{model}")
|
| 659 |
+
if resp.status_code == 200:
|
| 660 |
+
data = resp.json()
|
| 661 |
+
# vLLM returns max_model_len
|
| 662 |
+
ctx = data.get("max_model_len") or data.get("context_length") or data.get("max_tokens")
|
| 663 |
+
if ctx and isinstance(ctx, (int, float)):
|
| 664 |
+
return int(ctx)
|
| 665 |
+
|
| 666 |
+
# Try /v1/models and find the model in the list.
|
| 667 |
+
# Use _model_id_matches to handle "publisher/slug" vs bare "slug".
|
| 668 |
+
resp = client.get(f"{server_url}/v1/models")
|
| 669 |
+
if resp.status_code == 200:
|
| 670 |
+
data = resp.json()
|
| 671 |
+
models_list = data.get("data", [])
|
| 672 |
+
for m in models_list:
|
| 673 |
+
if _model_id_matches(m.get("id", ""), model):
|
| 674 |
+
ctx = m.get("max_model_len") or m.get("context_length") or m.get("max_tokens")
|
| 675 |
+
if ctx and isinstance(ctx, (int, float)):
|
| 676 |
+
return int(ctx)
|
| 677 |
+
except Exception:
|
| 678 |
+
pass
|
| 679 |
+
|
| 680 |
+
return None
|
| 681 |
+
|
| 682 |
+
|
| 683 |
+
def _normalize_model_version(model: str) -> str:
|
| 684 |
+
"""Normalize version separators for matching.
|
| 685 |
+
|
| 686 |
+
Nous uses dashes: claude-opus-4-6, claude-sonnet-4-5
|
| 687 |
+
OpenRouter uses dots: claude-opus-4.6, claude-sonnet-4.5
|
| 688 |
+
Normalize both to dashes for comparison.
|
| 689 |
+
"""
|
| 690 |
+
return model.replace(".", "-")
|
| 691 |
+
|
| 692 |
+
|
| 693 |
+
def _query_anthropic_context_length(model: str, base_url: str, api_key: str) -> Optional[int]:
|
| 694 |
+
"""Query Anthropic's /v1/models endpoint for context length.
|
| 695 |
+
|
| 696 |
+
Only works with regular ANTHROPIC_API_KEY (sk-ant-api*).
|
| 697 |
+
OAuth tokens (sk-ant-oat*) from Claude Code return 401.
|
| 698 |
+
"""
|
| 699 |
+
if not api_key or api_key.startswith("sk-ant-oat"):
|
| 700 |
+
return None # OAuth tokens can't access /v1/models
|
| 701 |
+
try:
|
| 702 |
+
base = base_url.rstrip("/")
|
| 703 |
+
if base.endswith("/v1"):
|
| 704 |
+
base = base[:-3]
|
| 705 |
+
url = f"{base}/v1/models?limit=1000"
|
| 706 |
+
headers = {
|
| 707 |
+
"x-api-key": api_key,
|
| 708 |
+
"anthropic-version": "2023-06-01",
|
| 709 |
+
}
|
| 710 |
+
resp = requests.get(url, headers=headers, timeout=10)
|
| 711 |
+
if resp.status_code != 200:
|
| 712 |
+
return None
|
| 713 |
+
data = resp.json()
|
| 714 |
+
for m in data.get("data", []):
|
| 715 |
+
if m.get("id") == model:
|
| 716 |
+
ctx = m.get("max_input_tokens")
|
| 717 |
+
if isinstance(ctx, int) and ctx > 0:
|
| 718 |
+
return ctx
|
| 719 |
+
except Exception as e:
|
| 720 |
+
logger.debug("Anthropic /v1/models query failed: %s", e)
|
| 721 |
+
return None
|
| 722 |
+
|
| 723 |
+
|
| 724 |
+
def _resolve_nous_context_length(model: str) -> Optional[int]:
|
| 725 |
+
"""Resolve Nous Portal model context length via OpenRouter metadata.
|
| 726 |
+
|
| 727 |
+
Nous model IDs are bare (e.g. 'claude-opus-4-6') while OpenRouter uses
|
| 728 |
+
prefixed IDs (e.g. 'anthropic/claude-opus-4.6'). Try suffix matching
|
| 729 |
+
with version normalization (dot↔dash).
|
| 730 |
+
"""
|
| 731 |
+
metadata = fetch_model_metadata() # OpenRouter cache
|
| 732 |
+
# Exact match first
|
| 733 |
+
if model in metadata:
|
| 734 |
+
return metadata[model].get("context_length")
|
| 735 |
+
|
| 736 |
+
normalized = _normalize_model_version(model).lower()
|
| 737 |
+
|
| 738 |
+
for or_id, entry in metadata.items():
|
| 739 |
+
bare = or_id.split("/", 1)[1] if "/" in or_id else or_id
|
| 740 |
+
if bare.lower() == model.lower() or _normalize_model_version(bare).lower() == normalized:
|
| 741 |
+
return entry.get("context_length")
|
| 742 |
+
|
| 743 |
+
# Partial prefix match for cases like gemini-3-flash → gemini-3-flash-preview
|
| 744 |
+
# Require match to be at a word boundary (followed by -, :, or end of string)
|
| 745 |
+
model_lower = model.lower()
|
| 746 |
+
for or_id, entry in metadata.items():
|
| 747 |
+
bare = or_id.split("/", 1)[1] if "/" in or_id else or_id
|
| 748 |
+
for candidate, query in [(bare.lower(), model_lower), (_normalize_model_version(bare).lower(), normalized)]:
|
| 749 |
+
if candidate.startswith(query) and (
|
| 750 |
+
len(candidate) == len(query) or candidate[len(query)] in "-:."
|
| 751 |
+
):
|
| 752 |
+
return entry.get("context_length")
|
| 753 |
+
|
| 754 |
+
return None
|
| 755 |
+
|
| 756 |
+
|
| 757 |
+
def get_model_context_length(
|
| 758 |
+
model: str,
|
| 759 |
+
base_url: str = "",
|
| 760 |
+
api_key: str = "",
|
| 761 |
+
config_context_length: int | None = None,
|
| 762 |
+
provider: str = "",
|
| 763 |
+
) -> int:
|
| 764 |
+
"""Get the context length for a model.
|
| 765 |
+
|
| 766 |
+
Resolution order:
|
| 767 |
+
0. Explicit config override (model.context_length or custom_providers per-model)
|
| 768 |
+
1. Persistent cache (previously discovered via probing)
|
| 769 |
+
2. Active endpoint metadata (/models for explicit custom endpoints)
|
| 770 |
+
3. Local server query (for local endpoints)
|
| 771 |
+
4. Anthropic /v1/models API (API-key users only, not OAuth)
|
| 772 |
+
5. OpenRouter live API metadata
|
| 773 |
+
6. Nous suffix-match via OpenRouter cache
|
| 774 |
+
7. models.dev registry lookup (provider-aware)
|
| 775 |
+
8. Thin hardcoded defaults (broad family patterns)
|
| 776 |
+
9. Default fallback (128K)
|
| 777 |
+
"""
|
| 778 |
+
# 0. Explicit config override — user knows best
|
| 779 |
+
if config_context_length is not None and isinstance(config_context_length, int) and config_context_length > 0:
|
| 780 |
+
return config_context_length
|
| 781 |
+
|
| 782 |
+
# Normalise provider-prefixed model names (e.g. "local:model-name" →
|
| 783 |
+
# "model-name") so cache lookups and server queries use the bare ID that
|
| 784 |
+
# local servers actually know about. Ollama "model:tag" colons are preserved.
|
| 785 |
+
model = _strip_provider_prefix(model)
|
| 786 |
+
|
| 787 |
+
# 1. Check persistent cache (model+provider)
|
| 788 |
+
if base_url:
|
| 789 |
+
cached = get_cached_context_length(model, base_url)
|
| 790 |
+
if cached is not None:
|
| 791 |
+
return cached
|
| 792 |
+
|
| 793 |
+
# 2. Active endpoint metadata for truly custom/unknown endpoints.
|
| 794 |
+
# Known providers (Copilot, OpenAI, Anthropic, etc.) skip this — their
|
| 795 |
+
# /models endpoint may report a provider-imposed limit (e.g. Copilot
|
| 796 |
+
# returns 128k) instead of the model's full context (400k). models.dev
|
| 797 |
+
# has the correct per-provider values and is checked at step 5+.
|
| 798 |
+
if _is_custom_endpoint(base_url) and not _is_known_provider_base_url(base_url):
|
| 799 |
+
endpoint_metadata = fetch_endpoint_model_metadata(base_url, api_key=api_key)
|
| 800 |
+
matched = endpoint_metadata.get(model)
|
| 801 |
+
if not matched:
|
| 802 |
+
# Single-model servers: if only one model is loaded, use it
|
| 803 |
+
if len(endpoint_metadata) == 1:
|
| 804 |
+
matched = next(iter(endpoint_metadata.values()))
|
| 805 |
+
else:
|
| 806 |
+
# Fuzzy match: substring in either direction
|
| 807 |
+
for key, entry in endpoint_metadata.items():
|
| 808 |
+
if model in key or key in model:
|
| 809 |
+
matched = entry
|
| 810 |
+
break
|
| 811 |
+
if matched:
|
| 812 |
+
context_length = matched.get("context_length")
|
| 813 |
+
if isinstance(context_length, int):
|
| 814 |
+
return context_length
|
| 815 |
+
if not _is_known_provider_base_url(base_url):
|
| 816 |
+
# 3. Try querying local server directly
|
| 817 |
+
if is_local_endpoint(base_url):
|
| 818 |
+
local_ctx = _query_local_context_length(model, base_url)
|
| 819 |
+
if local_ctx and local_ctx > 0:
|
| 820 |
+
save_context_length(model, base_url, local_ctx)
|
| 821 |
+
return local_ctx
|
| 822 |
+
logger.info(
|
| 823 |
+
"Could not detect context length for model %r at %s — "
|
| 824 |
+
"defaulting to %s tokens (probe-down). Set model.context_length "
|
| 825 |
+
"in config.yaml to override.",
|
| 826 |
+
model, base_url, f"{DEFAULT_FALLBACK_CONTEXT:,}",
|
| 827 |
+
)
|
| 828 |
+
return DEFAULT_FALLBACK_CONTEXT
|
| 829 |
+
|
| 830 |
+
# 4. Anthropic /v1/models API (only for regular API keys, not OAuth)
|
| 831 |
+
if provider == "anthropic" or (
|
| 832 |
+
base_url and "api.anthropic.com" in base_url
|
| 833 |
+
):
|
| 834 |
+
ctx = _query_anthropic_context_length(model, base_url or "https://api.anthropic.com", api_key)
|
| 835 |
+
if ctx:
|
| 836 |
+
return ctx
|
| 837 |
+
|
| 838 |
+
# 5. Provider-aware lookups (before generic OpenRouter cache)
|
| 839 |
+
# These are provider-specific and take priority over the generic OR cache,
|
| 840 |
+
# since the same model can have different context limits per provider
|
| 841 |
+
# (e.g. claude-opus-4.6 is 1M on Anthropic but 128K on GitHub Copilot).
|
| 842 |
+
# If provider is generic (openrouter/custom/empty), try to infer from URL.
|
| 843 |
+
effective_provider = provider
|
| 844 |
+
if not effective_provider or effective_provider in ("openrouter", "custom"):
|
| 845 |
+
if base_url:
|
| 846 |
+
inferred = _infer_provider_from_url(base_url)
|
| 847 |
+
if inferred:
|
| 848 |
+
effective_provider = inferred
|
| 849 |
+
|
| 850 |
+
if effective_provider == "nous":
|
| 851 |
+
ctx = _resolve_nous_context_length(model)
|
| 852 |
+
if ctx:
|
| 853 |
+
return ctx
|
| 854 |
+
if effective_provider:
|
| 855 |
+
from agent.models_dev import lookup_models_dev_context
|
| 856 |
+
ctx = lookup_models_dev_context(effective_provider, model)
|
| 857 |
+
if ctx:
|
| 858 |
+
return ctx
|
| 859 |
+
|
| 860 |
+
# 6. OpenRouter live API metadata (provider-unaware fallback)
|
| 861 |
+
metadata = fetch_model_metadata()
|
| 862 |
+
if model in metadata:
|
| 863 |
+
return metadata[model].get("context_length", 128000)
|
| 864 |
+
|
| 865 |
+
# 8. Hardcoded defaults (fuzzy match — longest key first for specificity)
|
| 866 |
+
# Only check `default_model in model` (is the key a substring of the input).
|
| 867 |
+
# The reverse (`model in default_model`) causes shorter names like
|
| 868 |
+
# "claude-sonnet-4" to incorrectly match "claude-sonnet-4-6" and return 1M.
|
| 869 |
+
model_lower = model.lower()
|
| 870 |
+
for default_model, length in sorted(
|
| 871 |
+
DEFAULT_CONTEXT_LENGTHS.items(), key=lambda x: len(x[0]), reverse=True
|
| 872 |
+
):
|
| 873 |
+
if default_model in model_lower:
|
| 874 |
+
return length
|
| 875 |
+
|
| 876 |
+
# 9. Query local server as last resort
|
| 877 |
+
if base_url and is_local_endpoint(base_url):
|
| 878 |
+
local_ctx = _query_local_context_length(model, base_url)
|
| 879 |
+
if local_ctx and local_ctx > 0:
|
| 880 |
+
save_context_length(model, base_url, local_ctx)
|
| 881 |
+
return local_ctx
|
| 882 |
+
|
| 883 |
+
# 10. Default fallback — 128K
|
| 884 |
+
return DEFAULT_FALLBACK_CONTEXT
|
| 885 |
+
|
| 886 |
+
|
| 887 |
+
def estimate_tokens_rough(text: str) -> int:
|
| 888 |
+
"""Rough token estimate (~4 chars/token) for pre-flight checks."""
|
| 889 |
+
if not text:
|
| 890 |
+
return 0
|
| 891 |
+
return len(text) // 4
|
| 892 |
+
|
| 893 |
+
|
| 894 |
+
def estimate_messages_tokens_rough(messages: List[Dict[str, Any]]) -> int:
|
| 895 |
+
"""Rough token estimate for a message list (pre-flight only)."""
|
| 896 |
+
total_chars = sum(len(str(msg)) for msg in messages)
|
| 897 |
+
return total_chars // 4
|
agent/models_dev.py
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Models.dev registry integration for provider-aware context length detection.
|
| 2 |
+
|
| 3 |
+
Fetches model metadata from https://models.dev/api.json — a community-maintained
|
| 4 |
+
database of 3800+ models across 100+ providers, including per-provider context
|
| 5 |
+
windows, pricing, and capabilities.
|
| 6 |
+
|
| 7 |
+
Data is cached in memory (1hr TTL) and on disk (~/.hermes/models_dev_cache.json)
|
| 8 |
+
to avoid cold-start network latency.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import json
|
| 12 |
+
import logging
|
| 13 |
+
import os
|
| 14 |
+
import time
|
| 15 |
+
from pathlib import Path
|
| 16 |
+
from typing import Any, Dict, Optional
|
| 17 |
+
|
| 18 |
+
import requests
|
| 19 |
+
|
| 20 |
+
logger = logging.getLogger(__name__)
|
| 21 |
+
|
| 22 |
+
MODELS_DEV_URL = "https://models.dev/api.json"
|
| 23 |
+
_MODELS_DEV_CACHE_TTL = 3600 # 1 hour in-memory
|
| 24 |
+
|
| 25 |
+
# In-memory cache
|
| 26 |
+
_models_dev_cache: Dict[str, Any] = {}
|
| 27 |
+
_models_dev_cache_time: float = 0
|
| 28 |
+
|
| 29 |
+
# Provider ID mapping: Hermes provider names → models.dev provider IDs
|
| 30 |
+
PROVIDER_TO_MODELS_DEV: Dict[str, str] = {
|
| 31 |
+
"openrouter": "openrouter",
|
| 32 |
+
"anthropic": "anthropic",
|
| 33 |
+
"zai": "zai",
|
| 34 |
+
"kimi-coding": "kimi-for-coding",
|
| 35 |
+
"minimax": "minimax",
|
| 36 |
+
"minimax-cn": "minimax-cn",
|
| 37 |
+
"deepseek": "deepseek",
|
| 38 |
+
"alibaba": "alibaba",
|
| 39 |
+
"copilot": "github-copilot",
|
| 40 |
+
"ai-gateway": "vercel",
|
| 41 |
+
"opencode-zen": "opencode",
|
| 42 |
+
"opencode-go": "opencode-go",
|
| 43 |
+
"kilocode": "kilo",
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def _get_cache_path() -> Path:
|
| 48 |
+
"""Return path to disk cache file."""
|
| 49 |
+
env_val = os.environ.get("HERMES_HOME", "")
|
| 50 |
+
hermes_home = Path(env_val) if env_val else Path.home() / ".hermes"
|
| 51 |
+
return hermes_home / "models_dev_cache.json"
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def _load_disk_cache() -> Dict[str, Any]:
|
| 55 |
+
"""Load models.dev data from disk cache."""
|
| 56 |
+
try:
|
| 57 |
+
cache_path = _get_cache_path()
|
| 58 |
+
if cache_path.exists():
|
| 59 |
+
with open(cache_path, encoding="utf-8") as f:
|
| 60 |
+
return json.load(f)
|
| 61 |
+
except Exception as e:
|
| 62 |
+
logger.debug("Failed to load models.dev disk cache: %s", e)
|
| 63 |
+
return {}
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def _save_disk_cache(data: Dict[str, Any]) -> None:
|
| 67 |
+
"""Save models.dev data to disk cache."""
|
| 68 |
+
try:
|
| 69 |
+
cache_path = _get_cache_path()
|
| 70 |
+
cache_path.parent.mkdir(parents=True, exist_ok=True)
|
| 71 |
+
with open(cache_path, "w", encoding="utf-8") as f:
|
| 72 |
+
json.dump(data, f, separators=(",", ":"))
|
| 73 |
+
except Exception as e:
|
| 74 |
+
logger.debug("Failed to save models.dev disk cache: %s", e)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def fetch_models_dev(force_refresh: bool = False) -> Dict[str, Any]:
|
| 78 |
+
"""Fetch models.dev registry. In-memory cache (1hr) + disk fallback.
|
| 79 |
+
|
| 80 |
+
Returns the full registry dict keyed by provider ID, or empty dict on failure.
|
| 81 |
+
"""
|
| 82 |
+
global _models_dev_cache, _models_dev_cache_time
|
| 83 |
+
|
| 84 |
+
# Check in-memory cache
|
| 85 |
+
if (
|
| 86 |
+
not force_refresh
|
| 87 |
+
and _models_dev_cache
|
| 88 |
+
and (time.time() - _models_dev_cache_time) < _MODELS_DEV_CACHE_TTL
|
| 89 |
+
):
|
| 90 |
+
return _models_dev_cache
|
| 91 |
+
|
| 92 |
+
# Try network fetch
|
| 93 |
+
try:
|
| 94 |
+
response = requests.get(MODELS_DEV_URL, timeout=15)
|
| 95 |
+
response.raise_for_status()
|
| 96 |
+
data = response.json()
|
| 97 |
+
if isinstance(data, dict) and len(data) > 0:
|
| 98 |
+
_models_dev_cache = data
|
| 99 |
+
_models_dev_cache_time = time.time()
|
| 100 |
+
_save_disk_cache(data)
|
| 101 |
+
logger.debug(
|
| 102 |
+
"Fetched models.dev registry: %d providers, %d total models",
|
| 103 |
+
len(data),
|
| 104 |
+
sum(len(p.get("models", {})) for p in data.values() if isinstance(p, dict)),
|
| 105 |
+
)
|
| 106 |
+
return data
|
| 107 |
+
except Exception as e:
|
| 108 |
+
logger.debug("Failed to fetch models.dev: %s", e)
|
| 109 |
+
|
| 110 |
+
# Fall back to disk cache — use a short TTL (5 min) so we retry
|
| 111 |
+
# the network fetch soon instead of serving stale data for a full hour.
|
| 112 |
+
if not _models_dev_cache:
|
| 113 |
+
_models_dev_cache = _load_disk_cache()
|
| 114 |
+
if _models_dev_cache:
|
| 115 |
+
_models_dev_cache_time = time.time() - _MODELS_DEV_CACHE_TTL + 300
|
| 116 |
+
logger.debug("Loaded models.dev from disk cache (%d providers)", len(_models_dev_cache))
|
| 117 |
+
|
| 118 |
+
return _models_dev_cache
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def lookup_models_dev_context(provider: str, model: str) -> Optional[int]:
|
| 122 |
+
"""Look up context_length for a provider+model combo in models.dev.
|
| 123 |
+
|
| 124 |
+
Returns the context window in tokens, or None if not found.
|
| 125 |
+
Handles case-insensitive matching and filters out context=0 entries.
|
| 126 |
+
"""
|
| 127 |
+
mdev_provider_id = PROVIDER_TO_MODELS_DEV.get(provider)
|
| 128 |
+
if not mdev_provider_id:
|
| 129 |
+
return None
|
| 130 |
+
|
| 131 |
+
data = fetch_models_dev()
|
| 132 |
+
provider_data = data.get(mdev_provider_id)
|
| 133 |
+
if not isinstance(provider_data, dict):
|
| 134 |
+
return None
|
| 135 |
+
|
| 136 |
+
models = provider_data.get("models", {})
|
| 137 |
+
if not isinstance(models, dict):
|
| 138 |
+
return None
|
| 139 |
+
|
| 140 |
+
# Exact match
|
| 141 |
+
entry = models.get(model)
|
| 142 |
+
if entry:
|
| 143 |
+
ctx = _extract_context(entry)
|
| 144 |
+
if ctx:
|
| 145 |
+
return ctx
|
| 146 |
+
|
| 147 |
+
# Case-insensitive match
|
| 148 |
+
model_lower = model.lower()
|
| 149 |
+
for mid, mdata in models.items():
|
| 150 |
+
if mid.lower() == model_lower:
|
| 151 |
+
ctx = _extract_context(mdata)
|
| 152 |
+
if ctx:
|
| 153 |
+
return ctx
|
| 154 |
+
|
| 155 |
+
return None
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def _extract_context(entry: Dict[str, Any]) -> Optional[int]:
|
| 159 |
+
"""Extract context_length from a models.dev model entry.
|
| 160 |
+
|
| 161 |
+
Returns None for invalid/zero values (some audio/image models have context=0).
|
| 162 |
+
"""
|
| 163 |
+
if not isinstance(entry, dict):
|
| 164 |
+
return None
|
| 165 |
+
limit = entry.get("limit")
|
| 166 |
+
if not isinstance(limit, dict):
|
| 167 |
+
return None
|
| 168 |
+
ctx = limit.get("context")
|
| 169 |
+
if isinstance(ctx, (int, float)) and ctx > 0:
|
| 170 |
+
return int(ctx)
|
| 171 |
+
return None
|
agent/prompt_builder.py
ADDED
|
@@ -0,0 +1,604 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""System prompt assembly -- identity, platform hints, skills index, context files.
|
| 2 |
+
|
| 3 |
+
All functions are stateless. AIAgent._build_system_prompt() calls these to
|
| 4 |
+
assemble pieces, then combines them with memory and ephemeral prompts.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import logging
|
| 8 |
+
import os
|
| 9 |
+
import re
|
| 10 |
+
from pathlib import Path
|
| 11 |
+
from typing import Optional
|
| 12 |
+
|
| 13 |
+
logger = logging.getLogger(__name__)
|
| 14 |
+
|
| 15 |
+
# ---------------------------------------------------------------------------
|
| 16 |
+
# Context file scanning — detect prompt injection in AGENTS.md, .cursorrules,
|
| 17 |
+
# SOUL.md before they get injected into the system prompt.
|
| 18 |
+
# ---------------------------------------------------------------------------
|
| 19 |
+
|
| 20 |
+
_CONTEXT_THREAT_PATTERNS = [
|
| 21 |
+
(r'ignore\s+(previous|all|above|prior)\s+instructions', "prompt_injection"),
|
| 22 |
+
(r'do\s+not\s+tell\s+the\s+user', "deception_hide"),
|
| 23 |
+
(r'system\s+prompt\s+override', "sys_prompt_override"),
|
| 24 |
+
(r'disregard\s+(your|all|any)\s+(instructions|rules|guidelines)', "disregard_rules"),
|
| 25 |
+
(r'act\s+as\s+(if|though)\s+you\s+(have\s+no|don\'t\s+have)\s+(restrictions|limits|rules)', "bypass_restrictions"),
|
| 26 |
+
(r'<!--[^>]*(?:ignore|override|system|secret|hidden)[^>]*-->', "html_comment_injection"),
|
| 27 |
+
(r'<\s*div\s+style\s*=\s*["\'].*display\s*:\s*none', "hidden_div"),
|
| 28 |
+
(r'translate\s+.*\s+into\s+.*\s+and\s+(execute|run|eval)', "translate_execute"),
|
| 29 |
+
(r'curl\s+[^\n]*\$\{?\w*(KEY|TOKEN|SECRET|PASSWORD|CREDENTIAL|API)', "exfil_curl"),
|
| 30 |
+
(r'cat\s+[^\n]*(\.env|credentials|\.netrc|\.pgpass)', "read_secrets"),
|
| 31 |
+
]
|
| 32 |
+
|
| 33 |
+
_CONTEXT_INVISIBLE_CHARS = {
|
| 34 |
+
'\u200b', '\u200c', '\u200d', '\u2060', '\ufeff',
|
| 35 |
+
'\u202a', '\u202b', '\u202c', '\u202d', '\u202e',
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def _scan_context_content(content: str, filename: str) -> str:
|
| 40 |
+
"""Scan context file content for injection. Returns sanitized content."""
|
| 41 |
+
findings = []
|
| 42 |
+
|
| 43 |
+
# Check invisible unicode
|
| 44 |
+
for char in _CONTEXT_INVISIBLE_CHARS:
|
| 45 |
+
if char in content:
|
| 46 |
+
findings.append(f"invisible unicode U+{ord(char):04X}")
|
| 47 |
+
|
| 48 |
+
# Check threat patterns
|
| 49 |
+
for pattern, pid in _CONTEXT_THREAT_PATTERNS:
|
| 50 |
+
if re.search(pattern, content, re.IGNORECASE):
|
| 51 |
+
findings.append(pid)
|
| 52 |
+
|
| 53 |
+
if findings:
|
| 54 |
+
logger.warning("Context file %s blocked: %s", filename, ", ".join(findings))
|
| 55 |
+
return f"[BLOCKED: {filename} contained potential prompt injection ({', '.join(findings)}). Content not loaded.]"
|
| 56 |
+
|
| 57 |
+
return content
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def _find_git_root(start: Path) -> Optional[Path]:
|
| 61 |
+
"""Walk *start* and its parents looking for a ``.git`` directory.
|
| 62 |
+
|
| 63 |
+
Returns the directory containing ``.git``, or ``None`` if we hit the
|
| 64 |
+
filesystem root without finding one.
|
| 65 |
+
"""
|
| 66 |
+
current = start.resolve()
|
| 67 |
+
for parent in [current, *current.parents]:
|
| 68 |
+
if (parent / ".git").exists():
|
| 69 |
+
return parent
|
| 70 |
+
return None
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
_HERMES_MD_NAMES = (".hermes.md", "HERMES.md")
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def _find_hermes_md(cwd: Path) -> Optional[Path]:
|
| 77 |
+
"""Discover the nearest ``.hermes.md`` or ``HERMES.md``.
|
| 78 |
+
|
| 79 |
+
Search order: *cwd* first, then each parent directory up to (and
|
| 80 |
+
including) the git repository root. Returns the first match, or
|
| 81 |
+
``None`` if nothing is found.
|
| 82 |
+
"""
|
| 83 |
+
stop_at = _find_git_root(cwd)
|
| 84 |
+
current = cwd.resolve()
|
| 85 |
+
|
| 86 |
+
for directory in [current, *current.parents]:
|
| 87 |
+
for name in _HERMES_MD_NAMES:
|
| 88 |
+
candidate = directory / name
|
| 89 |
+
if candidate.is_file():
|
| 90 |
+
return candidate
|
| 91 |
+
# Stop walking at the git root (or filesystem root).
|
| 92 |
+
if stop_at and directory == stop_at:
|
| 93 |
+
break
|
| 94 |
+
return None
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def _strip_yaml_frontmatter(content: str) -> str:
|
| 98 |
+
"""Remove optional YAML frontmatter (``---`` delimited) from *content*.
|
| 99 |
+
|
| 100 |
+
The frontmatter may contain structured config (model overrides, tool
|
| 101 |
+
settings) that will be handled separately in a future PR. For now we
|
| 102 |
+
strip it so only the human-readable markdown body is injected into the
|
| 103 |
+
system prompt.
|
| 104 |
+
"""
|
| 105 |
+
if content.startswith("---"):
|
| 106 |
+
end = content.find("\n---", 3)
|
| 107 |
+
if end != -1:
|
| 108 |
+
# Skip past the closing --- and any trailing newline
|
| 109 |
+
body = content[end + 4:].lstrip("\n")
|
| 110 |
+
return body if body else content
|
| 111 |
+
return content
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
# =========================================================================
|
| 115 |
+
# Constants
|
| 116 |
+
# =========================================================================
|
| 117 |
+
|
| 118 |
+
DEFAULT_AGENT_IDENTITY = (
|
| 119 |
+
"You are Hermes Agent, an intelligent AI assistant created by Nous Research. "
|
| 120 |
+
"You are helpful, knowledgeable, and direct. You assist users with a wide "
|
| 121 |
+
"range of tasks including answering questions, writing and editing code, "
|
| 122 |
+
"analyzing information, creative work, and executing actions via your tools. "
|
| 123 |
+
"You communicate clearly, admit uncertainty when appropriate, and prioritize "
|
| 124 |
+
"being genuinely useful over being verbose unless otherwise directed below. "
|
| 125 |
+
"Be targeted and efficient in your exploration and investigations."
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
MEMORY_GUIDANCE = (
|
| 129 |
+
"You have persistent memory across sessions. Save durable facts using the memory "
|
| 130 |
+
"tool: user preferences, environment details, tool quirks, and stable conventions. "
|
| 131 |
+
"Memory is injected into every turn, so keep it compact and focused on facts that "
|
| 132 |
+
"will still matter later.\n"
|
| 133 |
+
"Prioritize what reduces future user steering — the most valuable memory is one "
|
| 134 |
+
"that prevents the user from having to correct or remind you again. "
|
| 135 |
+
"User preferences and recurring corrections matter more than procedural task details.\n"
|
| 136 |
+
"Do NOT save task progress, session outcomes, completed-work logs, or temporary TODO "
|
| 137 |
+
"state to memory; use session_search to recall those from past transcripts. "
|
| 138 |
+
"If you've discovered a new way to do something, solved a problem that could be "
|
| 139 |
+
"necessary later, save it as a skill with the skill tool."
|
| 140 |
+
)
|
| 141 |
+
|
| 142 |
+
SESSION_SEARCH_GUIDANCE = (
|
| 143 |
+
"When the user references something from a past conversation or you suspect "
|
| 144 |
+
"relevant cross-session context exists, use session_search to recall it before "
|
| 145 |
+
"asking them to repeat themselves."
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
SKILLS_GUIDANCE = (
|
| 149 |
+
"After completing a complex task (5+ tool calls), fixing a tricky error, "
|
| 150 |
+
"or discovering a non-trivial workflow, save the approach as a "
|
| 151 |
+
"skill with skill_manage so you can reuse it next time.\n"
|
| 152 |
+
"When using a skill and finding it outdated, incomplete, or wrong, "
|
| 153 |
+
"patch it immediately with skill_manage(action='patch') — don't wait to be asked. "
|
| 154 |
+
"Skills that aren't maintained become liabilities."
|
| 155 |
+
)
|
| 156 |
+
|
| 157 |
+
PLATFORM_HINTS = {
|
| 158 |
+
"whatsapp": (
|
| 159 |
+
"You are on a text messaging communication platform, WhatsApp. "
|
| 160 |
+
"Please do not use markdown as it does not render. "
|
| 161 |
+
"You can send media files natively: to deliver a file to the user, "
|
| 162 |
+
"include MEDIA:/absolute/path/to/file in your response. The file "
|
| 163 |
+
"will be sent as a native WhatsApp attachment — images (.jpg, .png, "
|
| 164 |
+
".webp) appear as photos, videos (.mp4, .mov) play inline, and other "
|
| 165 |
+
"files arrive as downloadable documents. You can also include image "
|
| 166 |
+
"URLs in markdown format  and they will be sent as photos."
|
| 167 |
+
),
|
| 168 |
+
"telegram": (
|
| 169 |
+
"You are on a text messaging communication platform, Telegram. "
|
| 170 |
+
"Please do not use markdown as it does not render. "
|
| 171 |
+
"You can send media files natively: to deliver a file to the user, "
|
| 172 |
+
"include MEDIA:/absolute/path/to/file in your response. Images "
|
| 173 |
+
"(.png, .jpg, .webp) appear as photos, audio (.ogg) sends as voice "
|
| 174 |
+
"bubbles, and videos (.mp4) play inline. You can also include image "
|
| 175 |
+
"URLs in markdown format  and they will be sent as native photos."
|
| 176 |
+
),
|
| 177 |
+
"discord": (
|
| 178 |
+
"You are in a Discord server or group chat communicating with your user. "
|
| 179 |
+
"You can send media files natively: include MEDIA:/absolute/path/to/file "
|
| 180 |
+
"in your response. Images (.png, .jpg, .webp) are sent as photo "
|
| 181 |
+
"attachments, audio as file attachments. You can also include image URLs "
|
| 182 |
+
"in markdown format  and they will be sent as attachments."
|
| 183 |
+
),
|
| 184 |
+
"slack": (
|
| 185 |
+
"You are in a Slack workspace communicating with your user. "
|
| 186 |
+
"You can send media files natively: include MEDIA:/absolute/path/to/file "
|
| 187 |
+
"in your response. Images (.png, .jpg, .webp) are uploaded as photo "
|
| 188 |
+
"attachments, audio as file attachments. You can also include image URLs "
|
| 189 |
+
"in markdown format  and they will be uploaded as attachments."
|
| 190 |
+
),
|
| 191 |
+
"signal": (
|
| 192 |
+
"You are on a text messaging communication platform, Signal. "
|
| 193 |
+
"Please do not use markdown as it does not render. "
|
| 194 |
+
"You can send media files natively: to deliver a file to the user, "
|
| 195 |
+
"include MEDIA:/absolute/path/to/file in your response. Images "
|
| 196 |
+
"(.png, .jpg, .webp) appear as photos, audio as attachments, and other "
|
| 197 |
+
"files arrive as downloadable documents. You can also include image "
|
| 198 |
+
"URLs in markdown format  and they will be sent as photos."
|
| 199 |
+
),
|
| 200 |
+
"email": (
|
| 201 |
+
"You are communicating via email. Write clear, well-structured responses "
|
| 202 |
+
"suitable for email. Use plain text formatting (no markdown). "
|
| 203 |
+
"Keep responses concise but complete. You can send file attachments — "
|
| 204 |
+
"include MEDIA:/absolute/path/to/file in your response. The subject line "
|
| 205 |
+
"is preserved for threading. Do not include greetings or sign-offs unless "
|
| 206 |
+
"contextually appropriate."
|
| 207 |
+
),
|
| 208 |
+
"cron": (
|
| 209 |
+
"You are running as a scheduled cron job. There is no user present — you "
|
| 210 |
+
"cannot ask questions, request clarification, or wait for follow-up. Execute "
|
| 211 |
+
"the task fully and autonomously, making reasonable decisions where needed. "
|
| 212 |
+
"Your final response is automatically delivered to the job's configured "
|
| 213 |
+
"destination — put the primary content directly in your response."
|
| 214 |
+
),
|
| 215 |
+
"cli": (
|
| 216 |
+
"You are a CLI AI Agent. Try not to use markdown but simple text "
|
| 217 |
+
"renderable inside a terminal."
|
| 218 |
+
),
|
| 219 |
+
"sms": (
|
| 220 |
+
"You are communicating via SMS. Keep responses concise and use plain text "
|
| 221 |
+
"only — no markdown, no formatting. SMS messages are limited to ~1600 "
|
| 222 |
+
"characters, so be brief and direct."
|
| 223 |
+
),
|
| 224 |
+
}
|
| 225 |
+
|
| 226 |
+
CONTEXT_FILE_MAX_CHARS = 20_000
|
| 227 |
+
CONTEXT_TRUNCATE_HEAD_RATIO = 0.7
|
| 228 |
+
CONTEXT_TRUNCATE_TAIL_RATIO = 0.2
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
# =========================================================================
|
| 232 |
+
# Skills index
|
| 233 |
+
# =========================================================================
|
| 234 |
+
|
| 235 |
+
def _parse_skill_file(skill_file: Path) -> tuple[bool, dict, str]:
|
| 236 |
+
"""Read a SKILL.md once and return platform compatibility, frontmatter, and description.
|
| 237 |
+
|
| 238 |
+
Returns (is_compatible, frontmatter, description). On any error, returns
|
| 239 |
+
(True, {}, "") to err on the side of showing the skill.
|
| 240 |
+
"""
|
| 241 |
+
try:
|
| 242 |
+
from tools.skills_tool import _parse_frontmatter, skill_matches_platform
|
| 243 |
+
|
| 244 |
+
raw = skill_file.read_text(encoding="utf-8")[:2000]
|
| 245 |
+
frontmatter, _ = _parse_frontmatter(raw)
|
| 246 |
+
|
| 247 |
+
if not skill_matches_platform(frontmatter):
|
| 248 |
+
return False, {}, ""
|
| 249 |
+
|
| 250 |
+
desc = ""
|
| 251 |
+
raw_desc = frontmatter.get("description", "")
|
| 252 |
+
if raw_desc:
|
| 253 |
+
desc = str(raw_desc).strip().strip("'\"")
|
| 254 |
+
if len(desc) > 60:
|
| 255 |
+
desc = desc[:57] + "..."
|
| 256 |
+
|
| 257 |
+
return True, frontmatter, desc
|
| 258 |
+
except Exception as e:
|
| 259 |
+
logger.debug("Failed to parse skill file %s: %s", skill_file, e)
|
| 260 |
+
return True, {}, ""
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
def _read_skill_conditions(skill_file: Path) -> dict:
|
| 264 |
+
"""Extract conditional activation fields from SKILL.md frontmatter."""
|
| 265 |
+
try:
|
| 266 |
+
from tools.skills_tool import _parse_frontmatter
|
| 267 |
+
raw = skill_file.read_text(encoding="utf-8")[:2000]
|
| 268 |
+
frontmatter, _ = _parse_frontmatter(raw)
|
| 269 |
+
hermes = frontmatter.get("metadata", {}).get("hermes", {})
|
| 270 |
+
return {
|
| 271 |
+
"fallback_for_toolsets": hermes.get("fallback_for_toolsets", []),
|
| 272 |
+
"requires_toolsets": hermes.get("requires_toolsets", []),
|
| 273 |
+
"fallback_for_tools": hermes.get("fallback_for_tools", []),
|
| 274 |
+
"requires_tools": hermes.get("requires_tools", []),
|
| 275 |
+
}
|
| 276 |
+
except Exception as e:
|
| 277 |
+
logger.debug("Failed to read skill conditions from %s: %s", skill_file, e)
|
| 278 |
+
return {}
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
def _skill_should_show(
|
| 282 |
+
conditions: dict,
|
| 283 |
+
available_tools: "set[str] | None",
|
| 284 |
+
available_toolsets: "set[str] | None",
|
| 285 |
+
) -> bool:
|
| 286 |
+
"""Return False if the skill's conditional activation rules exclude it."""
|
| 287 |
+
if available_tools is None and available_toolsets is None:
|
| 288 |
+
return True # No filtering info — show everything (backward compat)
|
| 289 |
+
|
| 290 |
+
at = available_tools or set()
|
| 291 |
+
ats = available_toolsets or set()
|
| 292 |
+
|
| 293 |
+
# fallback_for: hide when the primary tool/toolset IS available
|
| 294 |
+
for ts in conditions.get("fallback_for_toolsets", []):
|
| 295 |
+
if ts in ats:
|
| 296 |
+
return False
|
| 297 |
+
for t in conditions.get("fallback_for_tools", []):
|
| 298 |
+
if t in at:
|
| 299 |
+
return False
|
| 300 |
+
|
| 301 |
+
# requires: hide when a required tool/toolset is NOT available
|
| 302 |
+
for ts in conditions.get("requires_toolsets", []):
|
| 303 |
+
if ts not in ats:
|
| 304 |
+
return False
|
| 305 |
+
for t in conditions.get("requires_tools", []):
|
| 306 |
+
if t not in at:
|
| 307 |
+
return False
|
| 308 |
+
|
| 309 |
+
return True
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
def build_skills_system_prompt(
|
| 313 |
+
available_tools: "set[str] | None" = None,
|
| 314 |
+
available_toolsets: "set[str] | None" = None,
|
| 315 |
+
) -> str:
|
| 316 |
+
"""Build a compact skill index for the system prompt.
|
| 317 |
+
|
| 318 |
+
Scans ~/.hermes/skills/ for SKILL.md files grouped by category.
|
| 319 |
+
Includes per-skill descriptions from frontmatter so the model can
|
| 320 |
+
match skills by meaning, not just name.
|
| 321 |
+
Filters out skills incompatible with the current OS platform.
|
| 322 |
+
"""
|
| 323 |
+
hermes_home = Path(os.getenv("HERMES_HOME", Path.home() / ".hermes"))
|
| 324 |
+
skills_dir = hermes_home / "skills"
|
| 325 |
+
|
| 326 |
+
if not skills_dir.exists():
|
| 327 |
+
return ""
|
| 328 |
+
|
| 329 |
+
# Collect skills with descriptions, grouped by category.
|
| 330 |
+
# Each entry: (skill_name, description)
|
| 331 |
+
# Supports sub-categories: skills/mlops/training/axolotl/SKILL.md
|
| 332 |
+
# -> category "mlops/training", skill "axolotl"
|
| 333 |
+
# Load disabled skill names once for the entire scan
|
| 334 |
+
try:
|
| 335 |
+
from tools.skills_tool import _get_disabled_skill_names
|
| 336 |
+
disabled = _get_disabled_skill_names()
|
| 337 |
+
except Exception:
|
| 338 |
+
disabled = set()
|
| 339 |
+
|
| 340 |
+
skills_by_category: dict[str, list[tuple[str, str]]] = {}
|
| 341 |
+
for skill_file in skills_dir.rglob("SKILL.md"):
|
| 342 |
+
is_compatible, frontmatter, desc = _parse_skill_file(skill_file)
|
| 343 |
+
if not is_compatible:
|
| 344 |
+
continue
|
| 345 |
+
rel_path = skill_file.relative_to(skills_dir)
|
| 346 |
+
parts = rel_path.parts
|
| 347 |
+
if len(parts) >= 2:
|
| 348 |
+
skill_name = parts[-2]
|
| 349 |
+
category = "/".join(parts[:-2]) if len(parts) > 2 else parts[0]
|
| 350 |
+
else:
|
| 351 |
+
category = "general"
|
| 352 |
+
skill_name = skill_file.parent.name
|
| 353 |
+
# Respect user's disabled skills config
|
| 354 |
+
fm_name = frontmatter.get("name", skill_name)
|
| 355 |
+
if fm_name in disabled or skill_name in disabled:
|
| 356 |
+
continue
|
| 357 |
+
# Skip skills whose conditional activation rules exclude them
|
| 358 |
+
conditions = _read_skill_conditions(skill_file)
|
| 359 |
+
if not _skill_should_show(conditions, available_tools, available_toolsets):
|
| 360 |
+
continue
|
| 361 |
+
skills_by_category.setdefault(category, []).append((skill_name, desc))
|
| 362 |
+
|
| 363 |
+
if not skills_by_category:
|
| 364 |
+
return ""
|
| 365 |
+
|
| 366 |
+
# Read category-level descriptions from DESCRIPTION.md
|
| 367 |
+
# Checks both the exact category path and parent directories
|
| 368 |
+
category_descriptions = {}
|
| 369 |
+
for category in skills_by_category:
|
| 370 |
+
cat_path = Path(category)
|
| 371 |
+
desc_file = skills_dir / cat_path / "DESCRIPTION.md"
|
| 372 |
+
if desc_file.exists():
|
| 373 |
+
try:
|
| 374 |
+
content = desc_file.read_text(encoding="utf-8")
|
| 375 |
+
match = re.search(r"^---\s*\n.*?description:\s*(.+?)\s*\n.*?^---", content, re.MULTILINE | re.DOTALL)
|
| 376 |
+
if match:
|
| 377 |
+
category_descriptions[category] = match.group(1).strip()
|
| 378 |
+
except Exception as e:
|
| 379 |
+
logger.debug("Could not read skill description %s: %s", desc_file, e)
|
| 380 |
+
|
| 381 |
+
index_lines = []
|
| 382 |
+
for category in sorted(skills_by_category.keys()):
|
| 383 |
+
cat_desc = category_descriptions.get(category, "")
|
| 384 |
+
if cat_desc:
|
| 385 |
+
index_lines.append(f" {category}: {cat_desc}")
|
| 386 |
+
else:
|
| 387 |
+
index_lines.append(f" {category}:")
|
| 388 |
+
# Deduplicate and sort skills within each category
|
| 389 |
+
seen = set()
|
| 390 |
+
for name, desc in sorted(skills_by_category[category], key=lambda x: x[0]):
|
| 391 |
+
if name in seen:
|
| 392 |
+
continue
|
| 393 |
+
seen.add(name)
|
| 394 |
+
if desc:
|
| 395 |
+
index_lines.append(f" - {name}: {desc}")
|
| 396 |
+
else:
|
| 397 |
+
index_lines.append(f" - {name}")
|
| 398 |
+
|
| 399 |
+
return (
|
| 400 |
+
"## Skills (mandatory)\n"
|
| 401 |
+
"Before replying, scan the skills below. If one clearly matches your task, "
|
| 402 |
+
"load it with skill_view(name) and follow its instructions. "
|
| 403 |
+
"If a skill has issues, fix it with skill_manage(action='patch').\n"
|
| 404 |
+
"After difficult/iterative tasks, offer to save as a skill. "
|
| 405 |
+
"If a skill you loaded was missing steps, had wrong commands, or needed "
|
| 406 |
+
"pitfalls you discovered, update it before finishing.\n"
|
| 407 |
+
"\n"
|
| 408 |
+
"<available_skills>\n"
|
| 409 |
+
+ "\n".join(index_lines) + "\n"
|
| 410 |
+
"</available_skills>\n"
|
| 411 |
+
"\n"
|
| 412 |
+
"If none match, proceed normally without loading a skill."
|
| 413 |
+
)
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
# =========================================================================
|
| 417 |
+
# Context files (SOUL.md, AGENTS.md, .cursorrules)
|
| 418 |
+
# =========================================================================
|
| 419 |
+
|
| 420 |
+
def _truncate_content(content: str, filename: str, max_chars: int = CONTEXT_FILE_MAX_CHARS) -> str:
|
| 421 |
+
"""Head/tail truncation with a marker in the middle."""
|
| 422 |
+
if len(content) <= max_chars:
|
| 423 |
+
return content
|
| 424 |
+
head_chars = int(max_chars * CONTEXT_TRUNCATE_HEAD_RATIO)
|
| 425 |
+
tail_chars = int(max_chars * CONTEXT_TRUNCATE_TAIL_RATIO)
|
| 426 |
+
head = content[:head_chars]
|
| 427 |
+
tail = content[-tail_chars:]
|
| 428 |
+
marker = f"\n\n[...truncated {filename}: kept {head_chars}+{tail_chars} of {len(content)} chars. Use file tools to read the full file.]\n\n"
|
| 429 |
+
return head + marker + tail
|
| 430 |
+
|
| 431 |
+
|
| 432 |
+
def load_soul_md() -> Optional[str]:
|
| 433 |
+
"""Load SOUL.md from HERMES_HOME and return its content, or None.
|
| 434 |
+
|
| 435 |
+
Used as the agent identity (slot #1 in the system prompt). When this
|
| 436 |
+
returns content, ``build_context_files_prompt`` should be called with
|
| 437 |
+
``skip_soul=True`` so SOUL.md isn't injected twice.
|
| 438 |
+
"""
|
| 439 |
+
try:
|
| 440 |
+
from hermes_cli.config import ensure_hermes_home
|
| 441 |
+
ensure_hermes_home()
|
| 442 |
+
except Exception as e:
|
| 443 |
+
logger.debug("Could not ensure HERMES_HOME before loading SOUL.md: %s", e)
|
| 444 |
+
|
| 445 |
+
soul_path = Path(os.getenv("HERMES_HOME", Path.home() / ".hermes")) / "SOUL.md"
|
| 446 |
+
if not soul_path.exists():
|
| 447 |
+
return None
|
| 448 |
+
try:
|
| 449 |
+
content = soul_path.read_text(encoding="utf-8").strip()
|
| 450 |
+
if not content:
|
| 451 |
+
return None
|
| 452 |
+
content = _scan_context_content(content, "SOUL.md")
|
| 453 |
+
content = _truncate_content(content, "SOUL.md")
|
| 454 |
+
return content
|
| 455 |
+
except Exception as e:
|
| 456 |
+
logger.debug("Could not read SOUL.md from %s: %s", soul_path, e)
|
| 457 |
+
return None
|
| 458 |
+
|
| 459 |
+
|
| 460 |
+
def _load_hermes_md(cwd_path: Path) -> str:
|
| 461 |
+
""".hermes.md / HERMES.md — walk to git root."""
|
| 462 |
+
hermes_md_path = _find_hermes_md(cwd_path)
|
| 463 |
+
if not hermes_md_path:
|
| 464 |
+
return ""
|
| 465 |
+
try:
|
| 466 |
+
content = hermes_md_path.read_text(encoding="utf-8").strip()
|
| 467 |
+
if not content:
|
| 468 |
+
return ""
|
| 469 |
+
content = _strip_yaml_frontmatter(content)
|
| 470 |
+
rel = hermes_md_path.name
|
| 471 |
+
try:
|
| 472 |
+
rel = str(hermes_md_path.relative_to(cwd_path))
|
| 473 |
+
except ValueError:
|
| 474 |
+
pass
|
| 475 |
+
content = _scan_context_content(content, rel)
|
| 476 |
+
result = f"## {rel}\n\n{content}"
|
| 477 |
+
return _truncate_content(result, ".hermes.md")
|
| 478 |
+
except Exception as e:
|
| 479 |
+
logger.debug("Could not read %s: %s", hermes_md_path, e)
|
| 480 |
+
return ""
|
| 481 |
+
|
| 482 |
+
|
| 483 |
+
def _load_agents_md(cwd_path: Path) -> str:
|
| 484 |
+
"""AGENTS.md — hierarchical, recursive directory walk."""
|
| 485 |
+
top_level_agents = None
|
| 486 |
+
for name in ["AGENTS.md", "agents.md"]:
|
| 487 |
+
candidate = cwd_path / name
|
| 488 |
+
if candidate.exists():
|
| 489 |
+
top_level_agents = candidate
|
| 490 |
+
break
|
| 491 |
+
|
| 492 |
+
if not top_level_agents:
|
| 493 |
+
return ""
|
| 494 |
+
|
| 495 |
+
agents_files = []
|
| 496 |
+
for root, dirs, files in os.walk(cwd_path):
|
| 497 |
+
dirs[:] = [d for d in dirs if not d.startswith('.') and d not in ('node_modules', '__pycache__', 'venv', '.venv')]
|
| 498 |
+
for f in files:
|
| 499 |
+
if f.lower() == "agents.md":
|
| 500 |
+
agents_files.append(Path(root) / f)
|
| 501 |
+
agents_files.sort(key=lambda p: len(p.parts))
|
| 502 |
+
|
| 503 |
+
total_content = ""
|
| 504 |
+
for agents_path in agents_files:
|
| 505 |
+
try:
|
| 506 |
+
content = agents_path.read_text(encoding="utf-8").strip()
|
| 507 |
+
if content:
|
| 508 |
+
rel_path = agents_path.relative_to(cwd_path)
|
| 509 |
+
content = _scan_context_content(content, str(rel_path))
|
| 510 |
+
total_content += f"## {rel_path}\n\n{content}\n\n"
|
| 511 |
+
except Exception as e:
|
| 512 |
+
logger.debug("Could not read %s: %s", agents_path, e)
|
| 513 |
+
|
| 514 |
+
if not total_content:
|
| 515 |
+
return ""
|
| 516 |
+
return _truncate_content(total_content, "AGENTS.md")
|
| 517 |
+
|
| 518 |
+
|
| 519 |
+
def _load_claude_md(cwd_path: Path) -> str:
|
| 520 |
+
"""CLAUDE.md / claude.md — cwd only."""
|
| 521 |
+
for name in ["CLAUDE.md", "claude.md"]:
|
| 522 |
+
candidate = cwd_path / name
|
| 523 |
+
if candidate.exists():
|
| 524 |
+
try:
|
| 525 |
+
content = candidate.read_text(encoding="utf-8").strip()
|
| 526 |
+
if content:
|
| 527 |
+
content = _scan_context_content(content, name)
|
| 528 |
+
result = f"## {name}\n\n{content}"
|
| 529 |
+
return _truncate_content(result, "CLAUDE.md")
|
| 530 |
+
except Exception as e:
|
| 531 |
+
logger.debug("Could not read %s: %s", candidate, e)
|
| 532 |
+
return ""
|
| 533 |
+
|
| 534 |
+
|
| 535 |
+
def _load_cursorrules(cwd_path: Path) -> str:
|
| 536 |
+
""".cursorrules + .cursor/rules/*.mdc — cwd only."""
|
| 537 |
+
cursorrules_content = ""
|
| 538 |
+
cursorrules_file = cwd_path / ".cursorrules"
|
| 539 |
+
if cursorrules_file.exists():
|
| 540 |
+
try:
|
| 541 |
+
content = cursorrules_file.read_text(encoding="utf-8").strip()
|
| 542 |
+
if content:
|
| 543 |
+
content = _scan_context_content(content, ".cursorrules")
|
| 544 |
+
cursorrules_content += f"## .cursorrules\n\n{content}\n\n"
|
| 545 |
+
except Exception as e:
|
| 546 |
+
logger.debug("Could not read .cursorrules: %s", e)
|
| 547 |
+
|
| 548 |
+
cursor_rules_dir = cwd_path / ".cursor" / "rules"
|
| 549 |
+
if cursor_rules_dir.exists() and cursor_rules_dir.is_dir():
|
| 550 |
+
mdc_files = sorted(cursor_rules_dir.glob("*.mdc"))
|
| 551 |
+
for mdc_file in mdc_files:
|
| 552 |
+
try:
|
| 553 |
+
content = mdc_file.read_text(encoding="utf-8").strip()
|
| 554 |
+
if content:
|
| 555 |
+
content = _scan_context_content(content, f".cursor/rules/{mdc_file.name}")
|
| 556 |
+
cursorrules_content += f"## .cursor/rules/{mdc_file.name}\n\n{content}\n\n"
|
| 557 |
+
except Exception as e:
|
| 558 |
+
logger.debug("Could not read %s: %s", mdc_file, e)
|
| 559 |
+
|
| 560 |
+
if not cursorrules_content:
|
| 561 |
+
return ""
|
| 562 |
+
return _truncate_content(cursorrules_content, ".cursorrules")
|
| 563 |
+
|
| 564 |
+
|
| 565 |
+
def build_context_files_prompt(cwd: Optional[str] = None, skip_soul: bool = False) -> str:
|
| 566 |
+
"""Discover and load context files for the system prompt.
|
| 567 |
+
|
| 568 |
+
Priority (first found wins — only ONE project context type is loaded):
|
| 569 |
+
1. .hermes.md / HERMES.md (walk to git root)
|
| 570 |
+
2. AGENTS.md / agents.md (recursive directory walk)
|
| 571 |
+
3. CLAUDE.md / claude.md (cwd only)
|
| 572 |
+
4. .cursorrules / .cursor/rules/*.mdc (cwd only)
|
| 573 |
+
|
| 574 |
+
SOUL.md from HERMES_HOME is independent and always included when present.
|
| 575 |
+
Each context source is capped at 20,000 chars.
|
| 576 |
+
|
| 577 |
+
When *skip_soul* is True, SOUL.md is not included here (it was already
|
| 578 |
+
loaded via ``load_soul_md()`` for the identity slot).
|
| 579 |
+
"""
|
| 580 |
+
if cwd is None:
|
| 581 |
+
cwd = os.getcwd()
|
| 582 |
+
|
| 583 |
+
cwd_path = Path(cwd).resolve()
|
| 584 |
+
sections = []
|
| 585 |
+
|
| 586 |
+
# Priority-based project context: first match wins
|
| 587 |
+
project_context = (
|
| 588 |
+
_load_hermes_md(cwd_path)
|
| 589 |
+
or _load_agents_md(cwd_path)
|
| 590 |
+
or _load_claude_md(cwd_path)
|
| 591 |
+
or _load_cursorrules(cwd_path)
|
| 592 |
+
)
|
| 593 |
+
if project_context:
|
| 594 |
+
sections.append(project_context)
|
| 595 |
+
|
| 596 |
+
# SOUL.md from HERMES_HOME only — skip when already loaded as identity
|
| 597 |
+
if not skip_soul:
|
| 598 |
+
soul_content = load_soul_md()
|
| 599 |
+
if soul_content:
|
| 600 |
+
sections.append(soul_content)
|
| 601 |
+
|
| 602 |
+
if not sections:
|
| 603 |
+
return ""
|
| 604 |
+
return "# Project Context\n\nThe following project context files have been loaded and should be followed:\n\n" + "\n".join(sections)
|
agent/prompt_caching.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Anthropic prompt caching (system_and_3 strategy).
|
| 2 |
+
|
| 3 |
+
Reduces input token costs by ~75% on multi-turn conversations by caching
|
| 4 |
+
the conversation prefix. Uses 4 cache_control breakpoints (Anthropic max):
|
| 5 |
+
1. System prompt (stable across all turns)
|
| 6 |
+
2-4. Last 3 non-system messages (rolling window)
|
| 7 |
+
|
| 8 |
+
Pure functions -- no class state, no AIAgent dependency.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import copy
|
| 12 |
+
from typing import Any, Dict, List
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def _apply_cache_marker(msg: dict, cache_marker: dict, native_anthropic: bool = False) -> None:
|
| 16 |
+
"""Add cache_control to a single message, handling all format variations."""
|
| 17 |
+
role = msg.get("role", "")
|
| 18 |
+
content = msg.get("content")
|
| 19 |
+
|
| 20 |
+
if role == "tool":
|
| 21 |
+
if native_anthropic:
|
| 22 |
+
msg["cache_control"] = cache_marker
|
| 23 |
+
return
|
| 24 |
+
|
| 25 |
+
if content is None or content == "":
|
| 26 |
+
msg["cache_control"] = cache_marker
|
| 27 |
+
return
|
| 28 |
+
|
| 29 |
+
if isinstance(content, str):
|
| 30 |
+
msg["content"] = [
|
| 31 |
+
{"type": "text", "text": content, "cache_control": cache_marker}
|
| 32 |
+
]
|
| 33 |
+
return
|
| 34 |
+
|
| 35 |
+
if isinstance(content, list) and content:
|
| 36 |
+
last = content[-1]
|
| 37 |
+
if isinstance(last, dict):
|
| 38 |
+
last["cache_control"] = cache_marker
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def apply_anthropic_cache_control(
|
| 42 |
+
api_messages: List[Dict[str, Any]],
|
| 43 |
+
cache_ttl: str = "5m",
|
| 44 |
+
native_anthropic: bool = False,
|
| 45 |
+
) -> List[Dict[str, Any]]:
|
| 46 |
+
"""Apply system_and_3 caching strategy to messages for Anthropic models.
|
| 47 |
+
|
| 48 |
+
Places up to 4 cache_control breakpoints: system prompt + last 3 non-system messages.
|
| 49 |
+
|
| 50 |
+
Returns:
|
| 51 |
+
Deep copy of messages with cache_control breakpoints injected.
|
| 52 |
+
"""
|
| 53 |
+
messages = copy.deepcopy(api_messages)
|
| 54 |
+
if not messages:
|
| 55 |
+
return messages
|
| 56 |
+
|
| 57 |
+
marker = {"type": "ephemeral"}
|
| 58 |
+
if cache_ttl == "1h":
|
| 59 |
+
marker["ttl"] = "1h"
|
| 60 |
+
|
| 61 |
+
breakpoints_used = 0
|
| 62 |
+
|
| 63 |
+
if messages[0].get("role") == "system":
|
| 64 |
+
_apply_cache_marker(messages[0], marker, native_anthropic=native_anthropic)
|
| 65 |
+
breakpoints_used += 1
|
| 66 |
+
|
| 67 |
+
remaining = 4 - breakpoints_used
|
| 68 |
+
non_sys = [i for i in range(len(messages)) if messages[i].get("role") != "system"]
|
| 69 |
+
for idx in non_sys[-remaining:]:
|
| 70 |
+
_apply_cache_marker(messages[idx], marker, native_anthropic=native_anthropic)
|
| 71 |
+
|
| 72 |
+
return messages
|
agent/redact.py
ADDED
|
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Regex-based secret redaction for logs and tool output.
|
| 2 |
+
|
| 3 |
+
Applies pattern matching to mask API keys, tokens, and credentials
|
| 4 |
+
before they reach log files, verbose output, or gateway logs.
|
| 5 |
+
|
| 6 |
+
Short tokens (< 18 chars) are fully masked. Longer tokens preserve
|
| 7 |
+
the first 6 and last 4 characters for debuggability.
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import logging
|
| 11 |
+
import os
|
| 12 |
+
import re
|
| 13 |
+
|
| 14 |
+
logger = logging.getLogger(__name__)
|
| 15 |
+
|
| 16 |
+
# Known API key prefixes -- match the prefix + contiguous token chars
|
| 17 |
+
_PREFIX_PATTERNS = [
|
| 18 |
+
r"sk-[A-Za-z0-9_-]{10,}", # OpenAI / OpenRouter / Anthropic (sk-ant-*)
|
| 19 |
+
r"ghp_[A-Za-z0-9]{10,}", # GitHub PAT (classic)
|
| 20 |
+
r"github_pat_[A-Za-z0-9_]{10,}", # GitHub PAT (fine-grained)
|
| 21 |
+
r"xox[baprs]-[A-Za-z0-9-]{10,}", # Slack tokens
|
| 22 |
+
r"AIza[A-Za-z0-9_-]{30,}", # Google API keys
|
| 23 |
+
r"pplx-[A-Za-z0-9]{10,}", # Perplexity
|
| 24 |
+
r"fal_[A-Za-z0-9_-]{10,}", # Fal.ai
|
| 25 |
+
r"fc-[A-Za-z0-9]{10,}", # Firecrawl
|
| 26 |
+
r"bb_live_[A-Za-z0-9_-]{10,}", # BrowserBase
|
| 27 |
+
r"gAAAA[A-Za-z0-9_=-]{20,}", # Codex encrypted tokens
|
| 28 |
+
r"AKIA[A-Z0-9]{16}", # AWS Access Key ID
|
| 29 |
+
r"sk_live_[A-Za-z0-9]{10,}", # Stripe secret key (live)
|
| 30 |
+
r"sk_test_[A-Za-z0-9]{10,}", # Stripe secret key (test)
|
| 31 |
+
r"rk_live_[A-Za-z0-9]{10,}", # Stripe restricted key
|
| 32 |
+
r"SG\.[A-Za-z0-9_-]{10,}", # SendGrid API key
|
| 33 |
+
r"hf_[A-Za-z0-9]{10,}", # HuggingFace token
|
| 34 |
+
r"r8_[A-Za-z0-9]{10,}", # Replicate API token
|
| 35 |
+
r"npm_[A-Za-z0-9]{10,}", # npm access token
|
| 36 |
+
r"pypi-[A-Za-z0-9_-]{10,}", # PyPI API token
|
| 37 |
+
r"dop_v1_[A-Za-z0-9]{10,}", # DigitalOcean PAT
|
| 38 |
+
r"doo_v1_[A-Za-z0-9]{10,}", # DigitalOcean OAuth
|
| 39 |
+
r"am_[A-Za-z0-9_-]{10,}", # AgentMail API key
|
| 40 |
+
]
|
| 41 |
+
|
| 42 |
+
# ENV assignment patterns: KEY=value where KEY contains a secret-like name
|
| 43 |
+
_SECRET_ENV_NAMES = r"(?:API_?KEY|TOKEN|SECRET|PASSWORD|PASSWD|CREDENTIAL|AUTH)"
|
| 44 |
+
_ENV_ASSIGN_RE = re.compile(
|
| 45 |
+
rf"([A-Z_]*{_SECRET_ENV_NAMES}[A-Z_]*)\s*=\s*(['\"]?)(\S+)\2",
|
| 46 |
+
re.IGNORECASE,
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
# JSON field patterns: "apiKey": "value", "token": "value", etc.
|
| 50 |
+
_JSON_KEY_NAMES = r"(?:api_?[Kk]ey|token|secret|password|access_token|refresh_token|auth_token|bearer|secret_value|raw_secret|secret_input|key_material)"
|
| 51 |
+
_JSON_FIELD_RE = re.compile(
|
| 52 |
+
rf'("{_JSON_KEY_NAMES}")\s*:\s*"([^"]+)"',
|
| 53 |
+
re.IGNORECASE,
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
# Authorization headers
|
| 57 |
+
_AUTH_HEADER_RE = re.compile(
|
| 58 |
+
r"(Authorization:\s*Bearer\s+)(\S+)",
|
| 59 |
+
re.IGNORECASE,
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
# Telegram bot tokens: bot<digits>:<token> or <digits>:<token>,
|
| 63 |
+
# where token part is restricted to [-A-Za-z0-9_] and length >= 30
|
| 64 |
+
_TELEGRAM_RE = re.compile(
|
| 65 |
+
r"(bot)?(\d{8,}):([-A-Za-z0-9_]{30,})",
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
# Private key blocks: -----BEGIN RSA PRIVATE KEY----- ... -----END RSA PRIVATE KEY-----
|
| 69 |
+
_PRIVATE_KEY_RE = re.compile(
|
| 70 |
+
r"-----BEGIN[A-Z ]*PRIVATE KEY-----[\s\S]*?-----END[A-Z ]*PRIVATE KEY-----"
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
# Database connection strings: protocol://user:PASSWORD@host
|
| 74 |
+
# Catches postgres, mysql, mongodb, redis, amqp URLs and redacts the password
|
| 75 |
+
_DB_CONNSTR_RE = re.compile(
|
| 76 |
+
r"((?:postgres(?:ql)?|mysql|mongodb(?:\+srv)?|redis|amqp)://[^:]+:)([^@]+)(@)",
|
| 77 |
+
re.IGNORECASE,
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
# E.164 phone numbers: +<country><number>, 7-15 digits
|
| 81 |
+
# Negative lookahead prevents matching hex strings or identifiers
|
| 82 |
+
_SIGNAL_PHONE_RE = re.compile(r"(\+[1-9]\d{6,14})(?![A-Za-z0-9])")
|
| 83 |
+
|
| 84 |
+
# Compile known prefix patterns into one alternation
|
| 85 |
+
_PREFIX_RE = re.compile(
|
| 86 |
+
r"(?<![A-Za-z0-9_-])(" + "|".join(_PREFIX_PATTERNS) + r")(?![A-Za-z0-9_-])"
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def _mask_token(token: str) -> str:
|
| 91 |
+
"""Mask a token, preserving prefix for long tokens."""
|
| 92 |
+
if len(token) < 18:
|
| 93 |
+
return "***"
|
| 94 |
+
return f"{token[:6]}...{token[-4:]}"
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def redact_sensitive_text(text: str) -> str:
|
| 98 |
+
"""Apply all redaction patterns to a block of text.
|
| 99 |
+
|
| 100 |
+
Safe to call on any string -- non-matching text passes through unchanged.
|
| 101 |
+
Disabled when security.redact_secrets is false in config.yaml.
|
| 102 |
+
"""
|
| 103 |
+
if text is None:
|
| 104 |
+
return None
|
| 105 |
+
if not isinstance(text, str):
|
| 106 |
+
text = str(text)
|
| 107 |
+
if not text:
|
| 108 |
+
return text
|
| 109 |
+
if os.getenv("HERMES_REDACT_SECRETS", "").lower() in ("0", "false", "no", "off"):
|
| 110 |
+
return text
|
| 111 |
+
|
| 112 |
+
# Known prefixes (sk-, ghp_, etc.)
|
| 113 |
+
text = _PREFIX_RE.sub(lambda m: _mask_token(m.group(1)), text)
|
| 114 |
+
|
| 115 |
+
# ENV assignments: OPENAI_API_KEY=sk-abc...
|
| 116 |
+
def _redact_env(m):
|
| 117 |
+
name, quote, value = m.group(1), m.group(2), m.group(3)
|
| 118 |
+
return f"{name}={quote}{_mask_token(value)}{quote}"
|
| 119 |
+
text = _ENV_ASSIGN_RE.sub(_redact_env, text)
|
| 120 |
+
|
| 121 |
+
# JSON fields: "apiKey": "value"
|
| 122 |
+
def _redact_json(m):
|
| 123 |
+
key, value = m.group(1), m.group(2)
|
| 124 |
+
return f'{key}: "{_mask_token(value)}"'
|
| 125 |
+
text = _JSON_FIELD_RE.sub(_redact_json, text)
|
| 126 |
+
|
| 127 |
+
# Authorization headers
|
| 128 |
+
text = _AUTH_HEADER_RE.sub(
|
| 129 |
+
lambda m: m.group(1) + _mask_token(m.group(2)),
|
| 130 |
+
text,
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
# Telegram bot tokens
|
| 134 |
+
def _redact_telegram(m):
|
| 135 |
+
prefix = m.group(1) or ""
|
| 136 |
+
digits = m.group(2)
|
| 137 |
+
return f"{prefix}{digits}:***"
|
| 138 |
+
text = _TELEGRAM_RE.sub(_redact_telegram, text)
|
| 139 |
+
|
| 140 |
+
# Private key blocks
|
| 141 |
+
text = _PRIVATE_KEY_RE.sub("[REDACTED PRIVATE KEY]", text)
|
| 142 |
+
|
| 143 |
+
# Database connection string passwords
|
| 144 |
+
text = _DB_CONNSTR_RE.sub(lambda m: f"{m.group(1)}***{m.group(3)}", text)
|
| 145 |
+
|
| 146 |
+
# E.164 phone numbers (Signal, WhatsApp)
|
| 147 |
+
def _redact_phone(m):
|
| 148 |
+
phone = m.group(1)
|
| 149 |
+
if len(phone) <= 8:
|
| 150 |
+
return phone[:2] + "****" + phone[-2:]
|
| 151 |
+
return phone[:4] + "****" + phone[-4:]
|
| 152 |
+
text = _SIGNAL_PHONE_RE.sub(_redact_phone, text)
|
| 153 |
+
|
| 154 |
+
return text
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
class RedactingFormatter(logging.Formatter):
|
| 158 |
+
"""Log formatter that redacts secrets from all log messages."""
|
| 159 |
+
|
| 160 |
+
def __init__(self, fmt=None, datefmt=None, style='%', **kwargs):
|
| 161 |
+
super().__init__(fmt, datefmt, style, **kwargs)
|
| 162 |
+
|
| 163 |
+
def format(self, record: logging.LogRecord) -> str:
|
| 164 |
+
original = super().format(record)
|
| 165 |
+
return redact_sensitive_text(original)
|
agent/skill_commands.py
ADDED
|
@@ -0,0 +1,282 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Shared slash command helpers for skills and built-in prompt-style modes.
|
| 2 |
+
|
| 3 |
+
Shared between CLI (cli.py) and gateway (gateway/run.py) so both surfaces
|
| 4 |
+
can invoke skills via /skill-name commands and prompt-only built-ins like
|
| 5 |
+
/plan.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import json
|
| 9 |
+
import logging
|
| 10 |
+
import re
|
| 11 |
+
from datetime import datetime
|
| 12 |
+
from pathlib import Path
|
| 13 |
+
from typing import Any, Dict, Optional
|
| 14 |
+
|
| 15 |
+
logger = logging.getLogger(__name__)
|
| 16 |
+
|
| 17 |
+
_skill_commands: Dict[str, Dict[str, Any]] = {}
|
| 18 |
+
_PLAN_SLUG_RE = re.compile(r"[^a-z0-9]+")
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def build_plan_path(
|
| 22 |
+
user_instruction: str = "",
|
| 23 |
+
*,
|
| 24 |
+
now: datetime | None = None,
|
| 25 |
+
) -> Path:
|
| 26 |
+
"""Return the default workspace-relative markdown path for a /plan invocation.
|
| 27 |
+
|
| 28 |
+
Relative paths are intentional: file tools are task/backend-aware and resolve
|
| 29 |
+
them against the active working directory for local, docker, ssh, modal,
|
| 30 |
+
daytona, and similar terminal backends. That keeps the plan with the active
|
| 31 |
+
workspace instead of the Hermes host's global home directory.
|
| 32 |
+
"""
|
| 33 |
+
slug_source = (user_instruction or "").strip().splitlines()[0] if user_instruction else ""
|
| 34 |
+
slug = _PLAN_SLUG_RE.sub("-", slug_source.lower()).strip("-")
|
| 35 |
+
if slug:
|
| 36 |
+
slug = "-".join(part for part in slug.split("-")[:8] if part)[:48].strip("-")
|
| 37 |
+
slug = slug or "conversation-plan"
|
| 38 |
+
timestamp = (now or datetime.now()).strftime("%Y-%m-%d_%H%M%S")
|
| 39 |
+
return Path(".hermes") / "plans" / f"{timestamp}-{slug}.md"
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def _load_skill_payload(skill_identifier: str, task_id: str | None = None) -> tuple[dict[str, Any], Path | None, str] | None:
|
| 43 |
+
"""Load a skill by name/path and return (loaded_payload, skill_dir, display_name)."""
|
| 44 |
+
raw_identifier = (skill_identifier or "").strip()
|
| 45 |
+
if not raw_identifier:
|
| 46 |
+
return None
|
| 47 |
+
|
| 48 |
+
try:
|
| 49 |
+
from tools.skills_tool import SKILLS_DIR, skill_view
|
| 50 |
+
|
| 51 |
+
identifier_path = Path(raw_identifier).expanduser()
|
| 52 |
+
if identifier_path.is_absolute():
|
| 53 |
+
try:
|
| 54 |
+
normalized = str(identifier_path.resolve().relative_to(SKILLS_DIR.resolve()))
|
| 55 |
+
except Exception:
|
| 56 |
+
normalized = raw_identifier
|
| 57 |
+
else:
|
| 58 |
+
normalized = raw_identifier.lstrip("/")
|
| 59 |
+
|
| 60 |
+
loaded_skill = json.loads(skill_view(normalized, task_id=task_id))
|
| 61 |
+
except Exception:
|
| 62 |
+
return None
|
| 63 |
+
|
| 64 |
+
if not loaded_skill.get("success"):
|
| 65 |
+
return None
|
| 66 |
+
|
| 67 |
+
skill_name = str(loaded_skill.get("name") or normalized)
|
| 68 |
+
skill_path = str(loaded_skill.get("path") or "")
|
| 69 |
+
skill_dir = None
|
| 70 |
+
if skill_path:
|
| 71 |
+
try:
|
| 72 |
+
skill_dir = SKILLS_DIR / Path(skill_path).parent
|
| 73 |
+
except Exception:
|
| 74 |
+
skill_dir = None
|
| 75 |
+
|
| 76 |
+
return loaded_skill, skill_dir, skill_name
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def _build_skill_message(
|
| 80 |
+
loaded_skill: dict[str, Any],
|
| 81 |
+
skill_dir: Path | None,
|
| 82 |
+
activation_note: str,
|
| 83 |
+
user_instruction: str = "",
|
| 84 |
+
runtime_note: str = "",
|
| 85 |
+
) -> str:
|
| 86 |
+
"""Format a loaded skill into a user/system message payload."""
|
| 87 |
+
from tools.skills_tool import SKILLS_DIR
|
| 88 |
+
|
| 89 |
+
content = str(loaded_skill.get("content") or "")
|
| 90 |
+
|
| 91 |
+
parts = [activation_note, "", content.strip()]
|
| 92 |
+
|
| 93 |
+
if loaded_skill.get("setup_skipped"):
|
| 94 |
+
parts.extend(
|
| 95 |
+
[
|
| 96 |
+
"",
|
| 97 |
+
"[Skill setup note: Required environment setup was skipped. Continue loading the skill and explain any reduced functionality if it matters.]",
|
| 98 |
+
]
|
| 99 |
+
)
|
| 100 |
+
elif loaded_skill.get("gateway_setup_hint"):
|
| 101 |
+
parts.extend(
|
| 102 |
+
[
|
| 103 |
+
"",
|
| 104 |
+
f"[Skill setup note: {loaded_skill['gateway_setup_hint']}]",
|
| 105 |
+
]
|
| 106 |
+
)
|
| 107 |
+
elif loaded_skill.get("setup_needed") and loaded_skill.get("setup_note"):
|
| 108 |
+
parts.extend(
|
| 109 |
+
[
|
| 110 |
+
"",
|
| 111 |
+
f"[Skill setup note: {loaded_skill['setup_note']}]",
|
| 112 |
+
]
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
supporting = []
|
| 116 |
+
linked_files = loaded_skill.get("linked_files") or {}
|
| 117 |
+
for entries in linked_files.values():
|
| 118 |
+
if isinstance(entries, list):
|
| 119 |
+
supporting.extend(entries)
|
| 120 |
+
|
| 121 |
+
if not supporting and skill_dir:
|
| 122 |
+
for subdir in ("references", "templates", "scripts", "assets"):
|
| 123 |
+
subdir_path = skill_dir / subdir
|
| 124 |
+
if subdir_path.exists():
|
| 125 |
+
for f in sorted(subdir_path.rglob("*")):
|
| 126 |
+
if f.is_file():
|
| 127 |
+
rel = str(f.relative_to(skill_dir))
|
| 128 |
+
supporting.append(rel)
|
| 129 |
+
|
| 130 |
+
if supporting and skill_dir:
|
| 131 |
+
skill_view_target = str(skill_dir.relative_to(SKILLS_DIR))
|
| 132 |
+
parts.append("")
|
| 133 |
+
parts.append("[This skill has supporting files you can load with the skill_view tool:]")
|
| 134 |
+
for sf in supporting:
|
| 135 |
+
parts.append(f"- {sf}")
|
| 136 |
+
parts.append(
|
| 137 |
+
f'\nTo view any of these, use: skill_view(name="{skill_view_target}", file_path="<path>")'
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
if user_instruction:
|
| 141 |
+
parts.append("")
|
| 142 |
+
parts.append(f"The user has provided the following instruction alongside the skill invocation: {user_instruction}")
|
| 143 |
+
|
| 144 |
+
if runtime_note:
|
| 145 |
+
parts.append("")
|
| 146 |
+
parts.append(f"[Runtime note: {runtime_note}]")
|
| 147 |
+
|
| 148 |
+
return "\n".join(parts)
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
def scan_skill_commands() -> Dict[str, Dict[str, Any]]:
|
| 152 |
+
"""Scan ~/.hermes/skills/ and return a mapping of /command -> skill info.
|
| 153 |
+
|
| 154 |
+
Returns:
|
| 155 |
+
Dict mapping "/skill-name" to {name, description, skill_md_path, skill_dir}.
|
| 156 |
+
"""
|
| 157 |
+
global _skill_commands
|
| 158 |
+
_skill_commands = {}
|
| 159 |
+
try:
|
| 160 |
+
from tools.skills_tool import SKILLS_DIR, _parse_frontmatter, skill_matches_platform, _get_disabled_skill_names
|
| 161 |
+
if not SKILLS_DIR.exists():
|
| 162 |
+
return _skill_commands
|
| 163 |
+
disabled = _get_disabled_skill_names()
|
| 164 |
+
for skill_md in SKILLS_DIR.rglob("SKILL.md"):
|
| 165 |
+
if any(part in ('.git', '.github', '.hub') for part in skill_md.parts):
|
| 166 |
+
continue
|
| 167 |
+
try:
|
| 168 |
+
content = skill_md.read_text(encoding='utf-8')
|
| 169 |
+
frontmatter, body = _parse_frontmatter(content)
|
| 170 |
+
# Skip skills incompatible with the current OS platform
|
| 171 |
+
if not skill_matches_platform(frontmatter):
|
| 172 |
+
continue
|
| 173 |
+
name = frontmatter.get('name', skill_md.parent.name)
|
| 174 |
+
# Respect user's disabled skills config
|
| 175 |
+
if name in disabled:
|
| 176 |
+
continue
|
| 177 |
+
description = frontmatter.get('description', '')
|
| 178 |
+
if not description:
|
| 179 |
+
for line in body.strip().split('\n'):
|
| 180 |
+
line = line.strip()
|
| 181 |
+
if line and not line.startswith('#'):
|
| 182 |
+
description = line[:80]
|
| 183 |
+
break
|
| 184 |
+
cmd_name = name.lower().replace(' ', '-').replace('_', '-')
|
| 185 |
+
_skill_commands[f"/{cmd_name}"] = {
|
| 186 |
+
"name": name,
|
| 187 |
+
"description": description or f"Invoke the {name} skill",
|
| 188 |
+
"skill_md_path": str(skill_md),
|
| 189 |
+
"skill_dir": str(skill_md.parent),
|
| 190 |
+
}
|
| 191 |
+
except Exception:
|
| 192 |
+
continue
|
| 193 |
+
except Exception:
|
| 194 |
+
pass
|
| 195 |
+
return _skill_commands
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
def get_skill_commands() -> Dict[str, Dict[str, Any]]:
|
| 199 |
+
"""Return the current skill commands mapping (scan first if empty)."""
|
| 200 |
+
if not _skill_commands:
|
| 201 |
+
scan_skill_commands()
|
| 202 |
+
return _skill_commands
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
def build_skill_invocation_message(
|
| 206 |
+
cmd_key: str,
|
| 207 |
+
user_instruction: str = "",
|
| 208 |
+
task_id: str | None = None,
|
| 209 |
+
runtime_note: str = "",
|
| 210 |
+
) -> Optional[str]:
|
| 211 |
+
"""Build the user message content for a skill slash command invocation.
|
| 212 |
+
|
| 213 |
+
Args:
|
| 214 |
+
cmd_key: The command key including leading slash (e.g., "/gif-search").
|
| 215 |
+
user_instruction: Optional text the user typed after the command.
|
| 216 |
+
|
| 217 |
+
Returns:
|
| 218 |
+
The formatted message string, or None if the skill wasn't found.
|
| 219 |
+
"""
|
| 220 |
+
commands = get_skill_commands()
|
| 221 |
+
skill_info = commands.get(cmd_key)
|
| 222 |
+
if not skill_info:
|
| 223 |
+
return None
|
| 224 |
+
|
| 225 |
+
loaded = _load_skill_payload(skill_info["skill_dir"], task_id=task_id)
|
| 226 |
+
if not loaded:
|
| 227 |
+
return f"[Failed to load skill: {skill_info['name']}]"
|
| 228 |
+
|
| 229 |
+
loaded_skill, skill_dir, skill_name = loaded
|
| 230 |
+
activation_note = (
|
| 231 |
+
f'[SYSTEM: The user has invoked the "{skill_name}" skill, indicating they want '
|
| 232 |
+
"you to follow its instructions. The full skill content is loaded below.]"
|
| 233 |
+
)
|
| 234 |
+
return _build_skill_message(
|
| 235 |
+
loaded_skill,
|
| 236 |
+
skill_dir,
|
| 237 |
+
activation_note,
|
| 238 |
+
user_instruction=user_instruction,
|
| 239 |
+
runtime_note=runtime_note,
|
| 240 |
+
)
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
def build_preloaded_skills_prompt(
|
| 244 |
+
skill_identifiers: list[str],
|
| 245 |
+
task_id: str | None = None,
|
| 246 |
+
) -> tuple[str, list[str], list[str]]:
|
| 247 |
+
"""Load one or more skills for session-wide CLI preloading.
|
| 248 |
+
|
| 249 |
+
Returns (prompt_text, loaded_skill_names, missing_identifiers).
|
| 250 |
+
"""
|
| 251 |
+
prompt_parts: list[str] = []
|
| 252 |
+
loaded_names: list[str] = []
|
| 253 |
+
missing: list[str] = []
|
| 254 |
+
|
| 255 |
+
seen: set[str] = set()
|
| 256 |
+
for raw_identifier in skill_identifiers:
|
| 257 |
+
identifier = (raw_identifier or "").strip()
|
| 258 |
+
if not identifier or identifier in seen:
|
| 259 |
+
continue
|
| 260 |
+
seen.add(identifier)
|
| 261 |
+
|
| 262 |
+
loaded = _load_skill_payload(identifier, task_id=task_id)
|
| 263 |
+
if not loaded:
|
| 264 |
+
missing.append(identifier)
|
| 265 |
+
continue
|
| 266 |
+
|
| 267 |
+
loaded_skill, skill_dir, skill_name = loaded
|
| 268 |
+
activation_note = (
|
| 269 |
+
f'[SYSTEM: The user launched this CLI session with the "{skill_name}" skill '
|
| 270 |
+
"preloaded. Treat its instructions as active guidance for the duration of this "
|
| 271 |
+
"session unless the user overrides them.]"
|
| 272 |
+
)
|
| 273 |
+
prompt_parts.append(
|
| 274 |
+
_build_skill_message(
|
| 275 |
+
loaded_skill,
|
| 276 |
+
skill_dir,
|
| 277 |
+
activation_note,
|
| 278 |
+
)
|
| 279 |
+
)
|
| 280 |
+
loaded_names.append(skill_name)
|
| 281 |
+
|
| 282 |
+
return "\n\n".join(prompt_parts), loaded_names, missing
|
agent/smart_model_routing.py
ADDED
|
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Helpers for optional cheap-vs-strong model routing."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import os
|
| 6 |
+
import re
|
| 7 |
+
from typing import Any, Dict, Optional
|
| 8 |
+
|
| 9 |
+
_COMPLEX_KEYWORDS = {
|
| 10 |
+
"debug",
|
| 11 |
+
"debugging",
|
| 12 |
+
"implement",
|
| 13 |
+
"implementation",
|
| 14 |
+
"refactor",
|
| 15 |
+
"patch",
|
| 16 |
+
"traceback",
|
| 17 |
+
"stacktrace",
|
| 18 |
+
"exception",
|
| 19 |
+
"error",
|
| 20 |
+
"analyze",
|
| 21 |
+
"analysis",
|
| 22 |
+
"investigate",
|
| 23 |
+
"architecture",
|
| 24 |
+
"design",
|
| 25 |
+
"compare",
|
| 26 |
+
"benchmark",
|
| 27 |
+
"optimize",
|
| 28 |
+
"optimise",
|
| 29 |
+
"review",
|
| 30 |
+
"terminal",
|
| 31 |
+
"shell",
|
| 32 |
+
"tool",
|
| 33 |
+
"tools",
|
| 34 |
+
"pytest",
|
| 35 |
+
"test",
|
| 36 |
+
"tests",
|
| 37 |
+
"plan",
|
| 38 |
+
"planning",
|
| 39 |
+
"delegate",
|
| 40 |
+
"subagent",
|
| 41 |
+
"cron",
|
| 42 |
+
"docker",
|
| 43 |
+
"kubernetes",
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
_URL_RE = re.compile(r"https?://|www\.", re.IGNORECASE)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def _coerce_bool(value: Any, default: bool = False) -> bool:
|
| 50 |
+
if value is None:
|
| 51 |
+
return default
|
| 52 |
+
if isinstance(value, bool):
|
| 53 |
+
return value
|
| 54 |
+
if isinstance(value, str):
|
| 55 |
+
return value.strip().lower() in {"1", "true", "yes", "on"}
|
| 56 |
+
return bool(value)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def _coerce_int(value: Any, default: int) -> int:
|
| 60 |
+
try:
|
| 61 |
+
return int(value)
|
| 62 |
+
except (TypeError, ValueError):
|
| 63 |
+
return default
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def choose_cheap_model_route(user_message: str, routing_config: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
|
| 67 |
+
"""Return the configured cheap-model route when a message looks simple.
|
| 68 |
+
|
| 69 |
+
Conservative by design: if the message has signs of code/tool/debugging/
|
| 70 |
+
long-form work, keep the primary model.
|
| 71 |
+
"""
|
| 72 |
+
cfg = routing_config or {}
|
| 73 |
+
if not _coerce_bool(cfg.get("enabled"), False):
|
| 74 |
+
return None
|
| 75 |
+
|
| 76 |
+
cheap_model = cfg.get("cheap_model") or {}
|
| 77 |
+
if not isinstance(cheap_model, dict):
|
| 78 |
+
return None
|
| 79 |
+
provider = str(cheap_model.get("provider") or "").strip().lower()
|
| 80 |
+
model = str(cheap_model.get("model") or "").strip()
|
| 81 |
+
if not provider or not model:
|
| 82 |
+
return None
|
| 83 |
+
|
| 84 |
+
text = (user_message or "").strip()
|
| 85 |
+
if not text:
|
| 86 |
+
return None
|
| 87 |
+
|
| 88 |
+
max_chars = _coerce_int(cfg.get("max_simple_chars"), 160)
|
| 89 |
+
max_words = _coerce_int(cfg.get("max_simple_words"), 28)
|
| 90 |
+
|
| 91 |
+
if len(text) > max_chars:
|
| 92 |
+
return None
|
| 93 |
+
if len(text.split()) > max_words:
|
| 94 |
+
return None
|
| 95 |
+
if text.count("\n") > 1:
|
| 96 |
+
return None
|
| 97 |
+
if "```" in text or "`" in text:
|
| 98 |
+
return None
|
| 99 |
+
if _URL_RE.search(text):
|
| 100 |
+
return None
|
| 101 |
+
|
| 102 |
+
lowered = text.lower()
|
| 103 |
+
words = {token.strip(".,:;!?()[]{}\"'`") for token in lowered.split()}
|
| 104 |
+
if words & _COMPLEX_KEYWORDS:
|
| 105 |
+
return None
|
| 106 |
+
|
| 107 |
+
route = dict(cheap_model)
|
| 108 |
+
route["provider"] = provider
|
| 109 |
+
route["model"] = model
|
| 110 |
+
route["routing_reason"] = "simple_turn"
|
| 111 |
+
return route
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def resolve_turn_route(user_message: str, routing_config: Optional[Dict[str, Any]], primary: Dict[str, Any]) -> Dict[str, Any]:
|
| 115 |
+
"""Resolve the effective model/runtime for one turn.
|
| 116 |
+
|
| 117 |
+
Returns a dict with model/runtime/signature/label fields.
|
| 118 |
+
"""
|
| 119 |
+
route = choose_cheap_model_route(user_message, routing_config)
|
| 120 |
+
if not route:
|
| 121 |
+
return {
|
| 122 |
+
"model": primary.get("model"),
|
| 123 |
+
"runtime": {
|
| 124 |
+
"api_key": primary.get("api_key"),
|
| 125 |
+
"base_url": primary.get("base_url"),
|
| 126 |
+
"provider": primary.get("provider"),
|
| 127 |
+
"api_mode": primary.get("api_mode"),
|
| 128 |
+
"command": primary.get("command"),
|
| 129 |
+
"args": list(primary.get("args") or []),
|
| 130 |
+
},
|
| 131 |
+
"label": None,
|
| 132 |
+
"signature": (
|
| 133 |
+
primary.get("model"),
|
| 134 |
+
primary.get("provider"),
|
| 135 |
+
primary.get("base_url"),
|
| 136 |
+
primary.get("api_mode"),
|
| 137 |
+
primary.get("command"),
|
| 138 |
+
tuple(primary.get("args") or ()),
|
| 139 |
+
),
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
from hermes_cli.runtime_provider import resolve_runtime_provider
|
| 143 |
+
|
| 144 |
+
explicit_api_key = None
|
| 145 |
+
api_key_env = str(route.get("api_key_env") or "").strip()
|
| 146 |
+
if api_key_env:
|
| 147 |
+
explicit_api_key = os.getenv(api_key_env) or None
|
| 148 |
+
|
| 149 |
+
try:
|
| 150 |
+
runtime = resolve_runtime_provider(
|
| 151 |
+
requested=route.get("provider"),
|
| 152 |
+
explicit_api_key=explicit_api_key,
|
| 153 |
+
explicit_base_url=route.get("base_url"),
|
| 154 |
+
)
|
| 155 |
+
except Exception:
|
| 156 |
+
return {
|
| 157 |
+
"model": primary.get("model"),
|
| 158 |
+
"runtime": {
|
| 159 |
+
"api_key": primary.get("api_key"),
|
| 160 |
+
"base_url": primary.get("base_url"),
|
| 161 |
+
"provider": primary.get("provider"),
|
| 162 |
+
"api_mode": primary.get("api_mode"),
|
| 163 |
+
"command": primary.get("command"),
|
| 164 |
+
"args": list(primary.get("args") or []),
|
| 165 |
+
},
|
| 166 |
+
"label": None,
|
| 167 |
+
"signature": (
|
| 168 |
+
primary.get("model"),
|
| 169 |
+
primary.get("provider"),
|
| 170 |
+
primary.get("base_url"),
|
| 171 |
+
primary.get("api_mode"),
|
| 172 |
+
primary.get("command"),
|
| 173 |
+
tuple(primary.get("args") or ()),
|
| 174 |
+
),
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
return {
|
| 178 |
+
"model": route.get("model"),
|
| 179 |
+
"runtime": {
|
| 180 |
+
"api_key": runtime.get("api_key"),
|
| 181 |
+
"base_url": runtime.get("base_url"),
|
| 182 |
+
"provider": runtime.get("provider"),
|
| 183 |
+
"api_mode": runtime.get("api_mode"),
|
| 184 |
+
"command": runtime.get("command"),
|
| 185 |
+
"args": list(runtime.get("args") or []),
|
| 186 |
+
},
|
| 187 |
+
"label": f"smart route → {route.get('model')} ({runtime.get('provider')})",
|
| 188 |
+
"signature": (
|
| 189 |
+
route.get("model"),
|
| 190 |
+
runtime.get("provider"),
|
| 191 |
+
runtime.get("base_url"),
|
| 192 |
+
runtime.get("api_mode"),
|
| 193 |
+
runtime.get("command"),
|
| 194 |
+
tuple(runtime.get("args") or ()),
|
| 195 |
+
),
|
| 196 |
+
}
|