Spaces:
Running
Running
Commit ·
4dc70fb
0
Parent(s):
feat: Initial commit - OpenCode API (getitdone-api)
Browse filesLLM Agent API Server for aicampus platform
- FastAPI + SSE streaming
- Multi-provider support (Anthropic, OpenAI, Gemini, LiteLLM)
- Tool system (WebSearch, WebFetch, Todo, Question, Skill)
- Supabase integration for session storage
Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This view is limited to 50 files because it contains too many changes.
See raw diff
- .env.example +22 -0
- .gitignore +42 -0
- Dockerfile +19 -0
- README.md +98 -0
- ROADMAP.md +251 -0
- app.py +92 -0
- docs/OPENCODE_COMPARISON.md +270 -0
- pyproject.toml +54 -0
- requirements.txt +38 -0
- sql/001_opencode_tables.sql +143 -0
- sql/002_add_reasoning_and_tool_status.sql +26 -0
- src/opencode_api/__init__.py +3 -0
- src/opencode_api/agent/__init__.py +35 -0
- src/opencode_api/agent/agent.py +215 -0
- src/opencode_api/agent/prompts/anthropic.txt +85 -0
- src/opencode_api/agent/prompts/beast.txt +103 -0
- src/opencode_api/agent/prompts/gemini.txt +67 -0
- src/opencode_api/core/__init__.py +8 -0
- src/opencode_api/core/auth.py +73 -0
- src/opencode_api/core/bus.py +153 -0
- src/opencode_api/core/config.py +101 -0
- src/opencode_api/core/identifier.py +69 -0
- src/opencode_api/core/quota.py +91 -0
- src/opencode_api/core/storage.py +145 -0
- src/opencode_api/core/supabase.py +25 -0
- src/opencode_api/provider/__init__.py +37 -0
- src/opencode_api/provider/anthropic.py +204 -0
- src/opencode_api/provider/gemini.py +260 -0
- src/opencode_api/provider/litellm.py +363 -0
- src/opencode_api/provider/openai.py +182 -0
- src/opencode_api/provider/provider.py +133 -0
- src/opencode_api/routes/__init__.py +7 -0
- src/opencode_api/routes/agent.py +66 -0
- src/opencode_api/routes/event.py +45 -0
- src/opencode_api/routes/provider.py +107 -0
- src/opencode_api/routes/question.py +55 -0
- src/opencode_api/routes/session.py +206 -0
- src/opencode_api/session/__init__.py +11 -0
- src/opencode_api/session/message.py +348 -0
- src/opencode_api/session/processor.py +193 -0
- src/opencode_api/session/prompt.py +701 -0
- src/opencode_api/session/session.py +159 -0
- src/opencode_api/tool/__init__.py +27 -0
- src/opencode_api/tool/question.py +308 -0
- src/opencode_api/tool/registry.py +48 -0
- src/opencode_api/tool/skill.py +369 -0
- src/opencode_api/tool/todo.py +128 -0
- src/opencode_api/tool/tool.py +109 -0
- src/opencode_api/tool/webfetch.py +117 -0
- src/opencode_api/tool/websearch.py +85 -0
.env.example
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# LLM Provider API Keys
|
| 2 |
+
ANTHROPIC_API_KEY=your-anthropic-key
|
| 3 |
+
OPENAI_API_KEY=your-openai-key
|
| 4 |
+
GEMINI_API_KEY=your-gemini-key
|
| 5 |
+
GROQ_API_KEY=your-groq-key
|
| 6 |
+
DEEPSEEK_API_KEY=your-deepseek-key
|
| 7 |
+
OPENROUTER_API_KEY=your-openrouter-key
|
| 8 |
+
|
| 9 |
+
# Server Configuration
|
| 10 |
+
HOST=0.0.0.0
|
| 11 |
+
PORT=7860
|
| 12 |
+
DEBUG=false
|
| 13 |
+
|
| 14 |
+
# Default Model
|
| 15 |
+
DEFAULT_PROVIDER=anthropic
|
| 16 |
+
DEFAULT_MODEL=claude-sonnet-4-20250514
|
| 17 |
+
|
| 18 |
+
# Storage
|
| 19 |
+
OPENCODE_STORAGE_PATH=/tmp/opencode-api
|
| 20 |
+
|
| 21 |
+
# Security (optional)
|
| 22 |
+
OPENCODE_SERVER_PASSWORD=
|
.gitignore
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__/
|
| 2 |
+
*.py[cod]
|
| 3 |
+
*$py.class
|
| 4 |
+
*.so
|
| 5 |
+
.Python
|
| 6 |
+
build/
|
| 7 |
+
develop-eggs/
|
| 8 |
+
dist/
|
| 9 |
+
downloads/
|
| 10 |
+
eggs/
|
| 11 |
+
.eggs/
|
| 12 |
+
lib/
|
| 13 |
+
lib64/
|
| 14 |
+
parts/
|
| 15 |
+
sdist/
|
| 16 |
+
var/
|
| 17 |
+
wheels/
|
| 18 |
+
*.egg-info/
|
| 19 |
+
.installed.cfg
|
| 20 |
+
*.egg
|
| 21 |
+
|
| 22 |
+
.env
|
| 23 |
+
.venv
|
| 24 |
+
env/
|
| 25 |
+
venv/
|
| 26 |
+
ENV/
|
| 27 |
+
|
| 28 |
+
.idea/
|
| 29 |
+
.vscode/
|
| 30 |
+
*.swp
|
| 31 |
+
*.swo
|
| 32 |
+
|
| 33 |
+
*.log
|
| 34 |
+
logs/
|
| 35 |
+
|
| 36 |
+
.coverage
|
| 37 |
+
htmlcov/
|
| 38 |
+
.pytest_cache/
|
| 39 |
+
.mypy_cache/
|
| 40 |
+
|
| 41 |
+
/tmp/
|
| 42 |
+
*.tmp
|
Dockerfile
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.11-slim
|
| 2 |
+
|
| 3 |
+
WORKDIR /app
|
| 4 |
+
|
| 5 |
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 6 |
+
build-essential \
|
| 7 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 8 |
+
|
| 9 |
+
COPY requirements.txt .
|
| 10 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 11 |
+
|
| 12 |
+
COPY . .
|
| 13 |
+
|
| 14 |
+
ENV PYTHONPATH=/app
|
| 15 |
+
ENV OPENCODE_STORAGE_PATH=/tmp/opencode-api
|
| 16 |
+
|
| 17 |
+
EXPOSE 7860
|
| 18 |
+
|
| 19 |
+
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
README.md
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: OpenCode API
|
| 3 |
+
emoji: 🤖
|
| 4 |
+
colorFrom: blue
|
| 5 |
+
colorTo: purple
|
| 6 |
+
sdk: docker
|
| 7 |
+
app_port: 7860
|
| 8 |
+
pinned: false
|
| 9 |
+
license: mit
|
| 10 |
+
---
|
| 11 |
+
|
| 12 |
+
# OpenCode API
|
| 13 |
+
|
| 14 |
+
LLM Agent API Server - ported from TypeScript [opencode](https://github.com/anomalyco/opencode) to Python.
|
| 15 |
+
|
| 16 |
+
## Features
|
| 17 |
+
|
| 18 |
+
- **Multi-provider LLM support**: Anthropic (Claude), OpenAI (GPT-4)
|
| 19 |
+
- **Tool system**: Web search, web fetch, todo management
|
| 20 |
+
- **Session management**: Persistent conversations with history
|
| 21 |
+
- **SSE streaming**: Real-time streaming responses
|
| 22 |
+
- **REST API**: FastAPI with automatic OpenAPI docs
|
| 23 |
+
|
| 24 |
+
## API Endpoints
|
| 25 |
+
|
| 26 |
+
### Sessions
|
| 27 |
+
|
| 28 |
+
- `GET /session` - List all sessions
|
| 29 |
+
- `POST /session` - Create a new session
|
| 30 |
+
- `GET /session/{id}` - Get session details
|
| 31 |
+
- `DELETE /session/{id}` - Delete a session
|
| 32 |
+
- `POST /session/{id}/message` - Send a message (SSE streaming response)
|
| 33 |
+
- `POST /session/{id}/abort` - Cancel ongoing generation
|
| 34 |
+
|
| 35 |
+
### Providers
|
| 36 |
+
|
| 37 |
+
- `GET /provider` - List available LLM providers
|
| 38 |
+
- `GET /provider/{id}` - Get provider details
|
| 39 |
+
- `GET /provider/{id}/model` - List provider models
|
| 40 |
+
|
| 41 |
+
### Events
|
| 42 |
+
|
| 43 |
+
- `GET /event` - Subscribe to real-time events (SSE)
|
| 44 |
+
|
| 45 |
+
## Environment Variables
|
| 46 |
+
|
| 47 |
+
Set these as Hugging Face Space secrets:
|
| 48 |
+
|
| 49 |
+
| Variable | Description |
|
| 50 |
+
| -------------------------- | ----------------------------------- |
|
| 51 |
+
| `ANTHROPIC_API_KEY` | Anthropic API key for Claude models |
|
| 52 |
+
| `OPENAI_API_KEY` | OpenAI API key for GPT models |
|
| 53 |
+
| `OPENCODE_SERVER_PASSWORD` | Optional: Basic auth password |
|
| 54 |
+
|
| 55 |
+
## Local Development
|
| 56 |
+
|
| 57 |
+
```bash
|
| 58 |
+
# Install dependencies
|
| 59 |
+
pip install -r requirements.txt
|
| 60 |
+
|
| 61 |
+
# Run server
|
| 62 |
+
python app.py
|
| 63 |
+
|
| 64 |
+
# Or with uvicorn
|
| 65 |
+
uvicorn app:app --host 0.0.0.0 --port 7860 --reload
|
| 66 |
+
```
|
| 67 |
+
|
| 68 |
+
## API Documentation
|
| 69 |
+
|
| 70 |
+
Once running, visit:
|
| 71 |
+
|
| 72 |
+
- Swagger UI: `http://localhost:7860/docs`
|
| 73 |
+
- ReDoc: `http://localhost:7860/redoc`
|
| 74 |
+
|
| 75 |
+
## Example Usage
|
| 76 |
+
|
| 77 |
+
```python
|
| 78 |
+
import httpx
|
| 79 |
+
|
| 80 |
+
# Create a session
|
| 81 |
+
response = httpx.post("http://localhost:7860/session")
|
| 82 |
+
session = response.json()
|
| 83 |
+
session_id = session["id"]
|
| 84 |
+
|
| 85 |
+
# Send a message (with SSE streaming)
|
| 86 |
+
with httpx.stream(
|
| 87 |
+
"POST",
|
| 88 |
+
f"http://localhost:7860/session/{session_id}/message",
|
| 89 |
+
json={"content": "Hello, what can you help me with?"}
|
| 90 |
+
) as response:
|
| 91 |
+
for line in response.iter_lines():
|
| 92 |
+
if line.startswith("data: "):
|
| 93 |
+
print(line[6:])
|
| 94 |
+
```
|
| 95 |
+
|
| 96 |
+
## License
|
| 97 |
+
|
| 98 |
+
MIT
|
ROADMAP.md
ADDED
|
@@ -0,0 +1,251 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# OpenCode API - Feature Roadmap
|
| 2 |
+
|
| 3 |
+
> TypeScript opencode를 Python API로 포팅하면서 **API 환경에 적합한 기능**만 선별한 로드맵
|
| 4 |
+
|
| 5 |
+
## 현재 상태 (v0.2.0)
|
| 6 |
+
|
| 7 |
+
### 구현 완료
|
| 8 |
+
|
| 9 |
+
**Core:**
|
| 10 |
+
|
| 11 |
+
- [x] 기본 세션 CRUD
|
| 12 |
+
- [x] 메시지 생성/조회
|
| 13 |
+
- [x] SSE 스트리밍 응답
|
| 14 |
+
- [x] 이벤트 버스 (기본)
|
| 15 |
+
- [x] 파일 기반 스토리지
|
| 16 |
+
|
| 17 |
+
**Providers:**
|
| 18 |
+
|
| 19 |
+
- [x] Anthropic 프로바이더
|
| 20 |
+
- [x] OpenAI 프로바이더
|
| 21 |
+
- [x] **LiteLLM 통합 프로바이더 (100+ 모델 지원)**
|
| 22 |
+
|
| 23 |
+
**Tools:**
|
| 24 |
+
|
| 25 |
+
- [x] websearch 도구
|
| 26 |
+
- [x] webfetch 도구
|
| 27 |
+
- [x] todo 도구
|
| 28 |
+
- [x] question 도구
|
| 29 |
+
- [x] skill 도구 (내장 스킬)
|
| 30 |
+
|
| 31 |
+
**Agent System (NEW):**
|
| 32 |
+
|
| 33 |
+
- [x] Agent 모델 정의 (id, name, description, permissions, prompt)
|
| 34 |
+
- [x] 기본 agents: build, plan, general, explore
|
| 35 |
+
- [x] Beast mode 시스템 프롬프트
|
| 36 |
+
- [x] 세션에 agent_id 연결
|
| 37 |
+
- [x] Agent routes (`GET/POST /agent`)
|
| 38 |
+
|
| 39 |
+
**Agentic Loop (NEW):**
|
| 40 |
+
|
| 41 |
+
- [x] 자동 계속 작업 로직 (tool_use 후 자동 continue)
|
| 42 |
+
- [x] 루프 제어 (max_steps, auto_continue, pause_on_question)
|
| 43 |
+
- [x] Resume/continue 지원
|
| 44 |
+
|
| 45 |
+
---
|
| 46 |
+
|
| 47 |
+
## Phase 1: Core Agent System (Priority: Critical) ✅ COMPLETED
|
| 48 |
+
|
| 49 |
+
### 1.1 Build Agent 시스템 ✅
|
| 50 |
+
|
| 51 |
+
> 에이전트 정의 및 시스템 프롬프트 관리
|
| 52 |
+
|
| 53 |
+
- [x] `Agent` 모델 정의
|
| 54 |
+
- id, name, description
|
| 55 |
+
- system_prompt (beast mode 포함)
|
| 56 |
+
- tools (허용된 도구 목록)
|
| 57 |
+
- permissions (기본 권한)
|
| 58 |
+
- [x] 기본 build agent 구현
|
| 59 |
+
- 완전한 시스템 프롬프트
|
| 60 |
+
- 모든 도구 접근 가능
|
| 61 |
+
- [x] 세션에 agent 연결
|
| 62 |
+
- `POST /session` 시 agent_id 지정
|
| 63 |
+
- 기본값: build agent
|
| 64 |
+
|
| 65 |
+
### 1.2 Agentic Loop (Beast Mode) ✅
|
| 66 |
+
|
| 67 |
+
> todo 목록이 모두 완료될 때까지 자동으로 계속 작업
|
| 68 |
+
|
| 69 |
+
- [x] 자동 계속 작업 로직
|
| 70 |
+
- 메시지 처리 후 tool_call 상태 확인
|
| 71 |
+
- tool_call이 있으면 자동으로 다음 단계 진행
|
| 72 |
+
- `stop_reason`이 `end_turn`일 때만 종료
|
| 73 |
+
- [x] 루프 제어 옵션
|
| 74 |
+
- `max_steps`: 최대 반복 횟수 (기본: 50)
|
| 75 |
+
- `auto_continue`: 자동 계속 여부 (기본: true)
|
| 76 |
+
- `pause_on_question`: question 도구 사용 시 일시 정지
|
| 77 |
+
- [x] Beast mode 시스템 프롬프트
|
| 78 |
+
- "todo 완료까지 계속 작업" 지시
|
| 79 |
+
- "resume/continue" 명령 처리
|
| 80 |
+
|
| 81 |
+
### 1.3 Permission 시스템
|
| 82 |
+
|
| 83 |
+
> 위험한 도구 실행 전 사용자 승인 요청
|
| 84 |
+
|
| 85 |
+
- [ ] `Permission` 모델
|
| 86 |
+
- tool_name, action (allow/deny/ask)
|
| 87 |
+
- patterns (glob 패턴)
|
| 88 |
+
- always_allow 목록
|
| 89 |
+
- [ ] 승인 요청 흐름
|
| 90 |
+
- SSE로 `permission.ask` 이벤트 전송
|
| 91 |
+
- `POST /permission/{id}/respond` 엔드포인트
|
| 92 |
+
- 타임아웃 처리
|
| 93 |
+
- [ ] 기본 권한 규칙
|
| 94 |
+
- websearch, webfetch, todo, skill: 자동 허용
|
| 95 |
+
- question: 자동 허용 (사용자 상호작용)
|
| 96 |
+
|
| 97 |
+
---
|
| 98 |
+
|
| 99 |
+
## Phase 2: Provider Integration (Priority: High) ✅ COMPLETED
|
| 100 |
+
|
| 101 |
+
### 2.1 LiteLLM 통합 ✅
|
| 102 |
+
|
| 103 |
+
> 100+ LLM 프로바이더를 단일 인터페이스로
|
| 104 |
+
|
| 105 |
+
- [x] LiteLLM 프로바이더 구현
|
| 106 |
+
- `pip install litellm` 의존성 추가
|
| 107 |
+
- streaming 지원
|
| 108 |
+
- tool calling 지원
|
| 109 |
+
- [x] 지원 프로바이더 (LiteLLM 경유)
|
| 110 |
+
- [x] Anthropic (Claude)
|
| 111 |
+
- [x] OpenAI (GPT-4)
|
| 112 |
+
- [x] Google (Gemini)
|
| 113 |
+
- [x] Groq
|
| 114 |
+
- [x] DeepSeek
|
| 115 |
+
- [x] OpenRouter
|
| 116 |
+
- [x] - 100+ more via LiteLLM
|
| 117 |
+
- [x] 모델 설정
|
| 118 |
+
- 환경변수로 API 키 관리
|
| 119 |
+
- 모델 ID로 자동 라우팅
|
| 120 |
+
|
| 121 |
+
### 2.2 모델 Fallback
|
| 122 |
+
|
| 123 |
+
> 실패 시 대체 모델로 자동 전환
|
| 124 |
+
|
| 125 |
+
- [ ] fallback 체인 설정
|
| 126 |
+
- [ ] 재시도 로직 (지수 백오프)
|
| 127 |
+
- [ ] 에러 분류 (rate limit, auth, etc.)
|
| 128 |
+
|
| 129 |
+
---
|
| 130 |
+
|
| 131 |
+
## Phase 3: MCP Integration (Priority: High)
|
| 132 |
+
|
| 133 |
+
### 3.1 MCP Client
|
| 134 |
+
|
| 135 |
+
> 외부 MCP 서버 연동으로 도구 확장
|
| 136 |
+
|
| 137 |
+
- [ ] MCP 서버 연결
|
| 138 |
+
- HTTP/SSE 기반 (stdio는 API 환경에서 불가)
|
| 139 |
+
- `POST /mcp` - 서버 추가
|
| 140 |
+
- `GET /mcp` - 서버 목록
|
| 141 |
+
- `DELETE /mcp/{name}` - 서버 제거
|
| 142 |
+
- [ ] MCP 도구 가져오기
|
| 143 |
+
- `MCP.tools()` 구현
|
| 144 |
+
- 도구 스키마 변환
|
| 145 |
+
- LLM 세션에 동적 주입
|
| 146 |
+
- [ ] MCP 인증
|
| 147 |
+
- API 키 기반
|
| 148 |
+
- OAuth (선택적)
|
| 149 |
+
|
| 150 |
+
### 3.2 MCP 프롬프트/리소스
|
| 151 |
+
|
| 152 |
+
- [ ] `MCP.prompts()` - 프롬프트 템플릿
|
| 153 |
+
- [ ] `MCP.resources()` - 리소스 접근
|
| 154 |
+
|
| 155 |
+
---
|
| 156 |
+
|
| 157 |
+
## Phase 4: Advanced Session Features (Priority: Medium)
|
| 158 |
+
|
| 159 |
+
### 4.1 세션 고급 기능
|
| 160 |
+
|
| 161 |
+
- [ ] `POST /session/{id}/fork` - 세션 분기
|
| 162 |
+
- [ ] `POST /session/{id}/summarize` - AI 요약 (컨텍스트 압축)
|
| 163 |
+
- [ ] `GET /session/{id}/cost` - 비용 계산
|
| 164 |
+
|
| 165 |
+
### 4.2 Task 도구 (서브에이전트)
|
| 166 |
+
|
| 167 |
+
> 복잡한 작업을 서브에이전트에게 위임
|
| 168 |
+
|
| 169 |
+
- [ ] `task` 도구 구현
|
| 170 |
+
- 새 세션 생성
|
| 171 |
+
- 지정된 프롬프트로 작업 수행
|
| 172 |
+
- 결과 반환
|
| 173 |
+
- [ ] 서브에이전트 타입
|
| 174 |
+
- explore: 코드베이스 탐색
|
| 175 |
+
- research: 웹 리서치
|
| 176 |
+
|
| 177 |
+
### 4.3 비용 추적
|
| 178 |
+
|
| 179 |
+
- [ ] 토큰 사용량 기록
|
| 180 |
+
- [ ] 프로바이더별 비용 계산
|
| 181 |
+
- [ ] 세션별/전체 비용 집계
|
| 182 |
+
|
| 183 |
+
---
|
| 184 |
+
|
| 185 |
+
## Phase 5: External Skill Loading (Priority: Low)
|
| 186 |
+
|
| 187 |
+
### 5.1 스킬 외부 로드
|
| 188 |
+
|
| 189 |
+
- [ ] URL에서 스킬 로드
|
| 190 |
+
- [ ] GitHub Gist 지원
|
| 191 |
+
- [ ] 스킬 캐싱
|
| 192 |
+
|
| 193 |
+
---
|
| 194 |
+
|
| 195 |
+
## API 환경에서 제외된 기능
|
| 196 |
+
|
| 197 |
+
다음 기능들은 로컬 파일시스템 접근이 필요하여 **API 환경에서 구현 불가**:
|
| 198 |
+
|
| 199 |
+
| 기능 | 이유 |
|
| 200 |
+
| -------------------- | ------------------------ |
|
| 201 |
+
| bash 도구 | 서버에서 쉘 실행 불가 |
|
| 202 |
+
| read/write/edit 도구 | 파일시스템 접근 불가 |
|
| 203 |
+
| grep/glob/ls 도구 | 파일 검색 불가 |
|
| 204 |
+
| LSP 통합 | 언어 서버 실행 불가 |
|
| 205 |
+
| PTY/터미널 | 터미널 세션 불가 |
|
| 206 |
+
| Snapshot/Patch | 로컬 파일 변경 추적 불가 |
|
| 207 |
+
| IDE 통합 | 로컬 IDE 연동 불가 |
|
| 208 |
+
| File watcher | 파일 변경 감지 불가 |
|
| 209 |
+
| ACP (Zed) | 로컬 IDE 프로토콜 |
|
| 210 |
+
|
| 211 |
+
---
|
| 212 |
+
|
| 213 |
+
## 기술 스택
|
| 214 |
+
|
| 215 |
+
- **Framework**: FastAPI
|
| 216 |
+
- **LLM**: LiteLLM (통합 인터페이스)
|
| 217 |
+
- **Validation**: Pydantic
|
| 218 |
+
- **Streaming**: SSE (Server-Sent Events)
|
| 219 |
+
- **Storage**: 파일 기반 JSON (추후 DB 옵션)
|
| 220 |
+
- **Deployment**: Docker, HuggingFace Spaces
|
| 221 |
+
|
| 222 |
+
---
|
| 223 |
+
|
| 224 |
+
## 구현 순서
|
| 225 |
+
|
| 226 |
+
```
|
| 227 |
+
Phase 1.1 (Build Agent)
|
| 228 |
+
↓
|
| 229 |
+
Phase 1.2 (Agentic Loop)
|
| 230 |
+
↓
|
| 231 |
+
Phase 2.1 (LiteLLM)
|
| 232 |
+
↓
|
| 233 |
+
Phase 1.3 (Permission)
|
| 234 |
+
↓
|
| 235 |
+
Phase 3.1 (MCP Client)
|
| 236 |
+
↓
|
| 237 |
+
Phase 4.2 (Task 도구)
|
| 238 |
+
↓
|
| 239 |
+
Phase 4.1 (세션 고급)
|
| 240 |
+
↓
|
| 241 |
+
Phase 4.3 (비용 추적)
|
| 242 |
+
```
|
| 243 |
+
|
| 244 |
+
---
|
| 245 |
+
|
| 246 |
+
## 참고
|
| 247 |
+
|
| 248 |
+
- TypeScript 원본: `/Users/gimjungwook/Projects/opencode/packages/opencode/src`
|
| 249 |
+
- Beast mode 프롬프트: `session/prompt/beast.txt`
|
| 250 |
+
- Agent 정의: `agent/agent.ts`
|
| 251 |
+
- MCP 구현: `mcp/index.ts`
|
app.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI, Request
|
| 2 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 3 |
+
from fastapi.responses import JSONResponse
|
| 4 |
+
from contextlib import asynccontextmanager
|
| 5 |
+
import os
|
| 6 |
+
|
| 7 |
+
from src.opencode_api.routes import session_router, provider_router, event_router, question_router, agent_router
|
| 8 |
+
from src.opencode_api.provider import register_provider, AnthropicProvider, OpenAIProvider, LiteLLMProvider, GeminiProvider
|
| 9 |
+
from src.opencode_api.tool import register_tool, WebSearchTool, WebFetchTool, TodoTool, QuestionTool, SkillTool
|
| 10 |
+
from src.opencode_api.core.config import settings
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
@asynccontextmanager
|
| 14 |
+
async def lifespan(app: FastAPI):
|
| 15 |
+
register_provider(LiteLLMProvider())
|
| 16 |
+
register_provider(AnthropicProvider())
|
| 17 |
+
register_provider(OpenAIProvider())
|
| 18 |
+
register_provider(GeminiProvider(api_key=settings.google_api_key))
|
| 19 |
+
|
| 20 |
+
# Register tools
|
| 21 |
+
register_tool(WebSearchTool())
|
| 22 |
+
register_tool(WebFetchTool())
|
| 23 |
+
register_tool(TodoTool())
|
| 24 |
+
register_tool(QuestionTool())
|
| 25 |
+
register_tool(SkillTool())
|
| 26 |
+
|
| 27 |
+
yield
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
app = FastAPI(
|
| 31 |
+
title="OpenCode API",
|
| 32 |
+
description="LLM Agent API Server - ported from TypeScript opencode",
|
| 33 |
+
version="0.1.0",
|
| 34 |
+
lifespan=lifespan,
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
# CORS settings for aicampus frontend
|
| 38 |
+
ALLOWED_ORIGINS = [
|
| 39 |
+
"https://aicampus.kr",
|
| 40 |
+
"https://www.aicampus.kr",
|
| 41 |
+
"https://aicampus.vercel.app",
|
| 42 |
+
"http://localhost:3000",
|
| 43 |
+
"http://127.0.0.1:3000",
|
| 44 |
+
]
|
| 45 |
+
|
| 46 |
+
app.add_middleware(
|
| 47 |
+
CORSMiddleware,
|
| 48 |
+
allow_origins=ALLOWED_ORIGINS,
|
| 49 |
+
allow_credentials=True,
|
| 50 |
+
allow_methods=["*"],
|
| 51 |
+
allow_headers=["*"],
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
@app.exception_handler(Exception)
|
| 56 |
+
async def global_exception_handler(request: Request, exc: Exception):
|
| 57 |
+
return JSONResponse(
|
| 58 |
+
status_code=500,
|
| 59 |
+
content={"error": str(exc), "type": type(exc).__name__}
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
app.include_router(session_router)
|
| 64 |
+
app.include_router(provider_router)
|
| 65 |
+
app.include_router(event_router)
|
| 66 |
+
app.include_router(question_router)
|
| 67 |
+
app.include_router(agent_router)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
@app.get("/")
|
| 71 |
+
async def root():
|
| 72 |
+
return {
|
| 73 |
+
"name": "OpenCode API",
|
| 74 |
+
"version": "0.1.0",
|
| 75 |
+
"status": "running",
|
| 76 |
+
"docs": "/docs",
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
@app.get("/health")
|
| 81 |
+
async def health():
|
| 82 |
+
return {"status": "healthy"}
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
if __name__ == "__main__":
|
| 86 |
+
import uvicorn
|
| 87 |
+
uvicorn.run(
|
| 88 |
+
"app:app",
|
| 89 |
+
host=settings.host,
|
| 90 |
+
port=settings.port,
|
| 91 |
+
reload=settings.debug,
|
| 92 |
+
)
|
docs/OPENCODE_COMPARISON.md
ADDED
|
@@ -0,0 +1,270 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# OpenCode vs opencode-api 기능 비교 분석
|
| 2 |
+
|
| 3 |
+
> 분석일: 2026-01-28
|
| 4 |
+
>
|
| 5 |
+
> 원본 TypeScript OpenCode와 Python opencode-api의 기능 비교 및 클론 계획
|
| 6 |
+
|
| 7 |
+
---
|
| 8 |
+
|
| 9 |
+
## 개요
|
| 10 |
+
|
| 11 |
+
| 항목 | 원본 TypeScript OpenCode | Python opencode-api |
|
| 12 |
+
|-----|-------------------------|---------------------|
|
| 13 |
+
| 용도 | 개발자용 AI 코딩 에이전트 CLI | 교육 플랫폼용 웹 API |
|
| 14 |
+
| 런타임 | Bun | Python 3.11+ / FastAPI |
|
| 15 |
+
| 아키텍처 | Namespace 패턴, 이벤트 기반 | 클래스 기반, 직접 호출 |
|
| 16 |
+
| 경로 | `/packages/opencode/src/` | `/opencode-api/src/opencode_api/` |
|
| 17 |
+
|
| 18 |
+
---
|
| 19 |
+
|
| 20 |
+
## 현재 구현 상태 (약 20-25%)
|
| 21 |
+
|
| 22 |
+
| 영역 | 원본 기능 수 | Python 구현 | 비율 |
|
| 23 |
+
|-----|-------------|------------|------|
|
| 24 |
+
| Session | 13 기능 | 4 기능 | **31%** |
|
| 25 |
+
| Provider | 21 프로바이더 | 4 프로바이더 | **19%** |
|
| 26 |
+
| Tool | 24 도구 | 5 도구 | **21%** |
|
| 27 |
+
| Server | 15 라우트 그룹 | 5 라우트 모듈 | **33%** |
|
| 28 |
+
| 기타 (MCP, LSP, Permission, Snapshot, Plugin) | 5 시스템 | 0 시스템 | **0%** |
|
| 29 |
+
|
| 30 |
+
---
|
| 31 |
+
|
| 32 |
+
## 1. Session 관리
|
| 33 |
+
|
| 34 |
+
### 구현 상태
|
| 35 |
+
|
| 36 |
+
| 기능 | 원본 파일 | Python 파일 | 상태 |
|
| 37 |
+
|-----|----------|------------|------|
|
| 38 |
+
| 세션 생성/조회/수정/삭제 | `session/index.ts` | `session/session.py` | ✅ 구현됨 |
|
| 39 |
+
| 메시지 파트 시스템 | `message-v2.ts` | `message.py` | ⚠️ 간소화됨 |
|
| 40 |
+
| 메시지 스트리밍 | `processor.ts` + `llm.ts` | `prompt.py` | ✅ 구현됨 |
|
| 41 |
+
| 세션 압축 (Compaction) | `compaction.ts` | - | ❌ 미구현 |
|
| 42 |
+
| 스냅샷/되돌리기 | `revert.ts` + `Snapshot` | - | ❌ 미구현 |
|
| 43 |
+
| 세션 요약 | `summary.ts` | - | ❌ 미구현 |
|
| 44 |
+
| 세션 공유 | `share/share-next.ts` | - | ❌ 미구현 |
|
| 45 |
+
| 세션 포크 | `Session.fork()` | - | ❌ 미구현 |
|
| 46 |
+
| 비용 계산 | `Session.getUsage()` | 기본 토큰만 | ⚠️ 간소화됨 |
|
| 47 |
+
| 세션 상태 | `status.ts` | - | ❌ 미구현 |
|
| 48 |
+
| 재시도 로직 | `retry.ts` | - | ❌ 미구현 |
|
| 49 |
+
|
| 50 |
+
### 미구현 기능 상세
|
| 51 |
+
|
| 52 |
+
#### 세션 압축 (Compaction)
|
| 53 |
+
- **원본 위치**: `session/compaction.ts`
|
| 54 |
+
- **기능**: 대화가 길어지면 이전 메시지를 요약하여 컨텍스트 윈도우 관리
|
| 55 |
+
- **중요도**: 🔴 높음 (긴 대화 시 필수)
|
| 56 |
+
|
| 57 |
+
#### 세션 요약 (Summary)
|
| 58 |
+
- **원본 위치**: `session/summary.ts`
|
| 59 |
+
- **기능**: 파일 변경 통계 계산 (additions, deletions, files)
|
| 60 |
+
- **중요도**: 🟡 중간
|
| 61 |
+
|
| 62 |
+
#### 스냅샷/되돌리기
|
| 63 |
+
- **원본 위치**: `session/revert.ts`, `snapshot/index.ts`
|
| 64 |
+
- **기능**: Git 기반 스냅샷으로 세션 상태 되돌리기
|
| 65 |
+
- **중요도**: 🟡 중간 (교육 플랫폼에서는 낮음)
|
| 66 |
+
|
| 67 |
+
---
|
| 68 |
+
|
| 69 |
+
## 2. Provider (LLM 통합)
|
| 70 |
+
|
| 71 |
+
### 구현 상태
|
| 72 |
+
|
| 73 |
+
| 프로바이더 | 원본 | Python | 상태 |
|
| 74 |
+
|-----------|------|--------|------|
|
| 75 |
+
| Anthropic Claude | ✅ | ✅ `anthropic.py` | ✅ 구현됨 |
|
| 76 |
+
| OpenAI | ✅ | ✅ `openai.py` | ✅ 구현됨 |
|
| 77 |
+
| Google Gemini | ✅ | ✅ `gemini.py` | ✅ 구현됨 |
|
| 78 |
+
| LiteLLM | ❌ | ✅ `litellm.py` | ✅ Python 전용 |
|
| 79 |
+
| Amazon Bedrock | ✅ | ❌ | ❌ 미구현 |
|
| 80 |
+
| Azure OpenAI | ✅ | ❌ | ❌ 미구현 |
|
| 81 |
+
| Google Vertex | ✅ | ❌ | ❌ 미구현 |
|
| 82 |
+
| OpenRouter | ✅ | ❌ | ❌ 미구현 |
|
| 83 |
+
| GitHub Copilot | ✅ | ❌ | ❌ 미구현 |
|
| 84 |
+
| xAI/Grok | ✅ | ❌ | ❌ 미구현 |
|
| 85 |
+
| Mistral | ✅ | ❌ | ❌ 미구현 |
|
| 86 |
+
| Groq | ✅ | ❌ | ❌ 미구현 |
|
| 87 |
+
| DeepInfra | ✅ | ❌ | ❌ 미구현 |
|
| 88 |
+
| Cerebras | ✅ | ❌ | ❌ 미구현 |
|
| 89 |
+
| Cohere | ✅ | ❌ | ❌ 미구현 |
|
| 90 |
+
| Together AI | ✅ | ❌ | ❌ 미구현 |
|
| 91 |
+
| Perplexity | ✅ | ❌ | ❌ 미구현 |
|
| 92 |
+
| GitLab | ✅ | ❌ | ❌ 미구현 |
|
| 93 |
+
| Cloudflare | ✅ | ❌ | ❌ 미구현 |
|
| 94 |
+
| SAP AI Core | ✅ | ❌ | ❌ 미구현 |
|
| 95 |
+
|
| 96 |
+
### 추가 프로바이더 구현 방법
|
| 97 |
+
- LiteLLM을 통해 대부분의 프로바이더를 쉽게 추가 가능
|
| 98 |
+
- `provider/litellm.py` 확장으로 OpenRouter, Groq, Mistral 등 지원
|
| 99 |
+
|
| 100 |
+
---
|
| 101 |
+
|
| 102 |
+
## 3. Tool System
|
| 103 |
+
|
| 104 |
+
### 구현 상태
|
| 105 |
+
|
| 106 |
+
| 도구 | 원본 파일 | Python 파일 | 상태 |
|
| 107 |
+
|-----|----------|------------|------|
|
| 108 |
+
| Tool 추상화 | `tool/tool.ts` | `tool/tool.py` | ✅ 구현됨 |
|
| 109 |
+
| Tool 레지스트리 | `registry.ts` | `__init__.py` | ⚠️ 간소화됨 |
|
| 110 |
+
| **Question** | `question.ts` | `question.py` | ✅ 구현됨 |
|
| 111 |
+
| **WebSearch** | `websearch.ts` (Exa) | `websearch.py` (DuckDuckGo) | ⚠️ 다른 백엔드 |
|
| 112 |
+
| **WebFetch** | `webfetch.ts` | `webfetch.py` | ✅ 구현됨 |
|
| 113 |
+
| **Todo** | `todo.ts` | `todo.py` | ✅ 구현됨 |
|
| 114 |
+
| **Skill** | `skill.ts` | `skill.py` | ⚠️ 기본 구현 |
|
| 115 |
+
| **Bash** | `bash.ts` | - | ❌ 미구현 |
|
| 116 |
+
| **Edit** | `edit.ts` | - | ❌ 미구현 |
|
| 117 |
+
| **Write** | `write.ts` | - | ❌ 미구현 |
|
| 118 |
+
| **Read** | `read.ts` | - | ❌ 미구현 |
|
| 119 |
+
| **Glob** | `glob.ts` | - | ❌ 미구현 |
|
| 120 |
+
| **Grep** | `grep.ts` | - | ❌ 미구현 |
|
| 121 |
+
| **Ls** | `ls.ts` | - | ❌ 미구현 |
|
| 122 |
+
| **Task** | `task.ts` | - | ❌ 미구현 |
|
| 123 |
+
| **Batch** | `batch.ts` | - | ❌ 미구현 |
|
| 124 |
+
| **CodeSearch** | `codesearch.ts` | - | ❌ 미구현 |
|
| 125 |
+
| **LSP** | `lsp.ts` | - | ❌ 미구현 |
|
| 126 |
+
| **Plan** | `plan.ts` | - | ❌ 미구현 |
|
| 127 |
+
| **Apply Patch** | `apply_patch.ts` | - | ❌ 미구현 |
|
| 128 |
+
| **MultiEdit** | `multiedit.ts` | - | ❌ 미구현 |
|
| 129 |
+
|
| 130 |
+
### 미구현 핵심 도구 상세
|
| 131 |
+
|
| 132 |
+
#### Bash 도구
|
| 133 |
+
- **기능**: 셸 명령 실행, 타임아웃, 권한 검사
|
| 134 |
+
- **보안 고려**: 교육 플랫폼에서는 E2B 샌드박스 또는 코드 인터프리터 대안 필요
|
| 135 |
+
- **중요도**: 🔴 높음 (코드 실행 필요 시)
|
| 136 |
+
|
| 137 |
+
#### Edit/Write 도구
|
| 138 |
+
- **기능**: 파일 편집/생성, 퍼지 매칭, LSP 진단
|
| 139 |
+
- **구현 방안**: Supabase Storage 또는 E2B 샌드박스 활용
|
| 140 |
+
- **중요도**: 🔴 높음 (코드 작성 기능 필요 시)
|
| 141 |
+
|
| 142 |
+
#### Read/Glob/Grep 도구
|
| 143 |
+
- **기능**: 파일 읽기, 패턴 검색, 내용 검색
|
| 144 |
+
- **구현 방안**: 제한된 파일 시스템 또는 가상 환경
|
| 145 |
+
- **중요도**: 🟡 중간
|
| 146 |
+
|
| 147 |
+
#### Task 도구
|
| 148 |
+
- **기능**: 서브에이전트 위임
|
| 149 |
+
- **중요도**: 🟡 중간 (복잡한 작업 분할 시)
|
| 150 |
+
|
| 151 |
+
---
|
| 152 |
+
|
| 153 |
+
## 4. Server/API
|
| 154 |
+
|
| 155 |
+
### 구현 상태
|
| 156 |
+
|
| 157 |
+
| 엔드포인트 | 원본 | Python | 상태 |
|
| 158 |
+
|-----------|------|--------|------|
|
| 159 |
+
| `/session` | `routes/session.ts` | `routes/session.py` | ✅ 구현됨 |
|
| 160 |
+
| `/session/{id}/message` (SSE) | 통합 | `routes/session.py` | ✅ 구현됨 |
|
| 161 |
+
| `/session/{id}/abort` | 통합 | `routes/session.py` | ✅ 구현됨 |
|
| 162 |
+
| `/provider` | `routes/provider.ts` | `routes/provider.py` | ✅ 구현됨 |
|
| 163 |
+
| `/event` (SSE) | `server.ts` | `routes/event.py` | ⚠️ 기본 구현 |
|
| 164 |
+
| `/question` | `routes/question.ts` | `routes/question.py` | ✅ 구현됨 |
|
| 165 |
+
| `/agent` | - | `routes/agent.py` | ✅ Python 전용 |
|
| 166 |
+
| `/project` | `routes/project.ts` | - | ❌ 미구현 |
|
| 167 |
+
| `/config` | `routes/config.ts` | - | ❌ 미구현 |
|
| 168 |
+
| `/mcp` | `routes/mcp.ts` | - | ❌ 미구현 |
|
| 169 |
+
| `/permission` | `routes/permission.ts` | - | ❌ 미구현 |
|
| 170 |
+
| `/file` | `routes/file.ts` | - | ❌ 미구현 |
|
| 171 |
+
| `/pty` | `routes/pty.ts` | - | ❌ 미구현 |
|
| 172 |
+
| `/tui` | `routes/tui.ts` | - | ❌ 미구현 |
|
| 173 |
+
| `/experimental` | `routes/experimental.ts` | - | ❌ 미구현 |
|
| 174 |
+
| `/global` | `routes/global.ts` | - | ❌ 미구현 |
|
| 175 |
+
|
| 176 |
+
---
|
| 177 |
+
|
| 178 |
+
## 5. 기타 주요 시스템
|
| 179 |
+
|
| 180 |
+
| 시스템 | 원본 위치 | Python | 상태 |
|
| 181 |
+
|-------|----------|--------|------|
|
| 182 |
+
| **MCP** | `mcp/index.ts` | - | ❌ 미구현 |
|
| 183 |
+
| **LSP** | `lsp/index.ts` | - | ❌ 미구현 |
|
| 184 |
+
| **Plugin** | `plugin/index.ts` | - | ❌ 미구현 |
|
| 185 |
+
| **Permission** | `permission/index.ts` | - | ❌ 미구현 |
|
| 186 |
+
| **Snapshot** | `snapshot/index.ts` | - | ❌ 미구현 |
|
| 187 |
+
| **ACP** | `acp/` | - | ❌ 미구현 |
|
| 188 |
+
| **Worktree** | `worktree/index.ts` | - | ❌ 미구현 |
|
| 189 |
+
| **Scheduler** | `scheduler/index.ts` | - | ❌ 미구현 |
|
| 190 |
+
|
| 191 |
+
---
|
| 192 |
+
|
| 193 |
+
## 구현 우선순위
|
| 194 |
+
|
| 195 |
+
### 🟢 바로 구현 가능 (난이도: 낮음)
|
| 196 |
+
|
| 197 |
+
| 순위 | 기능 | 설명 | 예상 작업량 |
|
| 198 |
+
|-----|------|------|-----------|
|
| 199 |
+
| 1 | 추가 LLM 프로바이더 | LiteLLM 확장 (OpenRouter, Groq 등) | 1-2시간 |
|
| 200 |
+
| 2 | 세션 제목 자동 생성 | 첫 메시지로 제목 생성 | 1시간 |
|
| 201 |
+
| 3 | 웹 검색 개선 | Exa API로 전환 | 2시간 |
|
| 202 |
+
| 4 | 비용 계산 | models.dev 연동 | 2-3시간 |
|
| 203 |
+
|
| 204 |
+
### 🟡 중간 난이도
|
| 205 |
+
|
| 206 |
+
| 순위 | 기능 | 설명 | 예상 작업량 |
|
| 207 |
+
|-----|------|------|-----------|
|
| 208 |
+
| 5 | 세션 압축 (Compaction) | 컨텍스트 관리 | 4-6시간 |
|
| 209 |
+
| 6 | 세션 포크 | 대화 분기 | 2-3시간 |
|
| 210 |
+
| 7 | Read 도구 | 파일 읽기 (Storage) | 3-4시간 |
|
| 211 |
+
| 8 | Glob/Grep 도구 | 파일 검색 | 4-5시간 |
|
| 212 |
+
|
| 213 |
+
### 🔴 고난이도 (설계 변경 필요)
|
| 214 |
+
|
| 215 |
+
| 순위 | 기능 | 설명 | 예상 작업량 |
|
| 216 |
+
|-----|------|------|-----------|
|
| 217 |
+
| 9 | Edit/Write 도구 | 파일 수정 (샌드박스) | 1-2일 |
|
| 218 |
+
| 10 | Bash 도구 | 코드 실행 (보안) | 1-2일 |
|
| 219 |
+
| 11 | MCP 통합 | 외부 서버 연결 | 2-3일 |
|
| 220 |
+
| 12 | Snapshot/Revert | 되돌리기 | 1-2일 |
|
| 221 |
+
|
| 222 |
+
### ❌ 교육 플랫폼에 부적합
|
| 223 |
+
|
| 224 |
+
- CLI/TUI 인터페이스
|
| 225 |
+
- 로컬 파일 시스템 직접 접근
|
| 226 |
+
- PTY (터미널) 기능
|
| 227 |
+
- Git Worktree 관리
|
| 228 |
+
|
| 229 |
+
---
|
| 230 |
+
|
| 231 |
+
## 구현 체크리스트
|
| 232 |
+
|
| 233 |
+
### Phase 1: 기본 개선
|
| 234 |
+
- [ ] 추가 LLM 프로바이더 (LiteLLM 확장)
|
| 235 |
+
- [ ] 세션 제목 자동 생성
|
| 236 |
+
- [ ] 웹 검색 개선 (Exa API)
|
| 237 |
+
- [ ] 비용 계산 (models.dev)
|
| 238 |
+
|
| 239 |
+
### Phase 2: 세션 고급 기능
|
| 240 |
+
- [ ] 세션 압축 (Compaction)
|
| 241 |
+
- [ ] 세션 포크
|
| 242 |
+
- [ ] 세션 상태 관리
|
| 243 |
+
|
| 244 |
+
### Phase 3: 도구 확장
|
| 245 |
+
- [ ] Read 도구
|
| 246 |
+
- [ ] Glob 도구
|
| 247 |
+
- [ ] Grep 도구
|
| 248 |
+
- [ ] Ls 도구
|
| 249 |
+
|
| 250 |
+
### Phase 4: 고급 기능 (선택적)
|
| 251 |
+
- [ ] Edit/Write 도구 (샌드박스 환경)
|
| 252 |
+
- [ ] Bash 도구 (코드 인터프리터)
|
| 253 |
+
- [ ] MCP 기본 지원
|
| 254 |
+
- [ ] Task 도구 (서브에이전트)
|
| 255 |
+
|
| 256 |
+
---
|
| 257 |
+
|
| 258 |
+
## 참고 자료
|
| 259 |
+
|
| 260 |
+
### 원본 OpenCode 주요 파일
|
| 261 |
+
- Session: `/packages/opencode/src/session/`
|
| 262 |
+
- Provider: `/packages/opencode/src/provider/`
|
| 263 |
+
- Tool: `/packages/opencode/src/tool/`
|
| 264 |
+
- Server: `/packages/opencode/src/server/`
|
| 265 |
+
|
| 266 |
+
### Python API 주요 파일
|
| 267 |
+
- Session: `/opencode-api/src/opencode_api/session/`
|
| 268 |
+
- Provider: `/opencode-api/src/opencode_api/provider/`
|
| 269 |
+
- Tool: `/opencode-api/src/opencode_api/tool/`
|
| 270 |
+
- Routes: `/opencode-api/src/opencode_api/routes/`
|
pyproject.toml
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[build-system]
|
| 2 |
+
requires = ["hatchling"]
|
| 3 |
+
build-backend = "hatchling.build"
|
| 4 |
+
|
| 5 |
+
[project]
|
| 6 |
+
name = "opencode-api"
|
| 7 |
+
version = "0.1.0"
|
| 8 |
+
description = "LLM Agent API Server - ported from TypeScript opencode"
|
| 9 |
+
readme = "README.md"
|
| 10 |
+
license = "MIT"
|
| 11 |
+
requires-python = ">=3.10"
|
| 12 |
+
authors = [
|
| 13 |
+
{ name = "OpenCode Team" }
|
| 14 |
+
]
|
| 15 |
+
keywords = ["llm", "api", "agent", "fastapi", "anthropic", "openai"]
|
| 16 |
+
classifiers = [
|
| 17 |
+
"Development Status :: 3 - Alpha",
|
| 18 |
+
"Framework :: FastAPI",
|
| 19 |
+
"Intended Audience :: Developers",
|
| 20 |
+
"License :: OSI Approved :: MIT License",
|
| 21 |
+
"Programming Language :: Python :: 3",
|
| 22 |
+
"Programming Language :: Python :: 3.10",
|
| 23 |
+
"Programming Language :: Python :: 3.11",
|
| 24 |
+
"Programming Language :: Python :: 3.12",
|
| 25 |
+
]
|
| 26 |
+
dependencies = [
|
| 27 |
+
"fastapi>=0.109.0",
|
| 28 |
+
"uvicorn[standard]>=0.27.0",
|
| 29 |
+
"anthropic>=0.40.0",
|
| 30 |
+
"openai>=1.50.0",
|
| 31 |
+
"pydantic>=2.6.0",
|
| 32 |
+
"pydantic-settings>=2.1.0",
|
| 33 |
+
"httpx>=0.27.0",
|
| 34 |
+
"python-ulid>=2.2.0",
|
| 35 |
+
"python-dotenv>=1.0.0",
|
| 36 |
+
"sse-starlette>=2.0.0",
|
| 37 |
+
"duckduckgo-search>=6.0.0",
|
| 38 |
+
"html2text>=2024.2.26",
|
| 39 |
+
"beautifulsoup4>=4.12.0",
|
| 40 |
+
]
|
| 41 |
+
|
| 42 |
+
[project.optional-dependencies]
|
| 43 |
+
dev = [
|
| 44 |
+
"pytest>=8.0.0",
|
| 45 |
+
"pytest-asyncio>=0.23.0",
|
| 46 |
+
"httpx>=0.27.0",
|
| 47 |
+
]
|
| 48 |
+
|
| 49 |
+
[project.urls]
|
| 50 |
+
Homepage = "https://github.com/anomalyco/opencode"
|
| 51 |
+
Documentation = "https://opencode.ai"
|
| 52 |
+
|
| 53 |
+
[tool.hatch.build.targets.wheel]
|
| 54 |
+
packages = ["src/opencode_api"]
|
requirements.txt
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# FastAPI and ASGI server
|
| 2 |
+
fastapi>=0.109.0
|
| 3 |
+
uvicorn[standard]>=0.27.0
|
| 4 |
+
|
| 5 |
+
# LLM SDKs
|
| 6 |
+
anthropic>=0.40.0
|
| 7 |
+
openai>=1.50.0
|
| 8 |
+
litellm>=1.50.0
|
| 9 |
+
google-genai>=1.0.0
|
| 10 |
+
|
| 11 |
+
# Validation and serialization
|
| 12 |
+
pydantic>=2.6.0
|
| 13 |
+
pydantic-settings>=2.1.0
|
| 14 |
+
|
| 15 |
+
# HTTP client for tools
|
| 16 |
+
httpx>=0.27.0
|
| 17 |
+
aiohttp>=3.9.0
|
| 18 |
+
|
| 19 |
+
# Utilities
|
| 20 |
+
python-ulid>=2.2.0
|
| 21 |
+
python-dotenv>=1.0.0
|
| 22 |
+
|
| 23 |
+
# SSE support
|
| 24 |
+
sse-starlette>=2.0.0
|
| 25 |
+
|
| 26 |
+
# Web search (DuckDuckGo)
|
| 27 |
+
ddgs>=9.0.0
|
| 28 |
+
|
| 29 |
+
# HTML to markdown conversion
|
| 30 |
+
html2text>=2024.2.26
|
| 31 |
+
beautifulsoup4>=4.12.0
|
| 32 |
+
|
| 33 |
+
# Async utilities
|
| 34 |
+
anyio>=4.2.0
|
| 35 |
+
|
| 36 |
+
# Supabase integration
|
| 37 |
+
supabase>=2.0.0
|
| 38 |
+
python-jose[cryptography]>=3.3.0
|
sql/001_opencode_tables.sql
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-- OpenCode Tables for Supabase
|
| 2 |
+
-- Run this in Supabase SQL Editor
|
| 3 |
+
|
| 4 |
+
-- Sessions table
|
| 5 |
+
CREATE TABLE IF NOT EXISTS opencode_sessions (
|
| 6 |
+
id TEXT PRIMARY KEY,
|
| 7 |
+
user_id UUID NOT NULL REFERENCES auth.users(id) ON DELETE CASCADE,
|
| 8 |
+
title TEXT NOT NULL,
|
| 9 |
+
agent_id TEXT DEFAULT 'build',
|
| 10 |
+
provider_id TEXT,
|
| 11 |
+
model_id TEXT,
|
| 12 |
+
created_at TIMESTAMPTZ DEFAULT NOW(),
|
| 13 |
+
updated_at TIMESTAMPTZ DEFAULT NOW()
|
| 14 |
+
);
|
| 15 |
+
|
| 16 |
+
CREATE INDEX IF NOT EXISTS idx_opencode_sessions_user_id ON opencode_sessions(user_id);
|
| 17 |
+
CREATE INDEX IF NOT EXISTS idx_opencode_sessions_updated_at ON opencode_sessions(updated_at DESC);
|
| 18 |
+
|
| 19 |
+
-- Messages table
|
| 20 |
+
CREATE TABLE IF NOT EXISTS opencode_messages (
|
| 21 |
+
id TEXT PRIMARY KEY,
|
| 22 |
+
session_id TEXT NOT NULL REFERENCES opencode_sessions(id) ON DELETE CASCADE,
|
| 23 |
+
role TEXT NOT NULL CHECK (role IN ('user', 'assistant')),
|
| 24 |
+
content TEXT,
|
| 25 |
+
provider_id TEXT,
|
| 26 |
+
model_id TEXT,
|
| 27 |
+
input_tokens INTEGER,
|
| 28 |
+
output_tokens INTEGER,
|
| 29 |
+
error TEXT,
|
| 30 |
+
created_at TIMESTAMPTZ DEFAULT NOW()
|
| 31 |
+
);
|
| 32 |
+
|
| 33 |
+
CREATE INDEX IF NOT EXISTS idx_opencode_messages_session_id ON opencode_messages(session_id);
|
| 34 |
+
CREATE INDEX IF NOT EXISTS idx_opencode_messages_created_at ON opencode_messages(session_id, created_at);
|
| 35 |
+
|
| 36 |
+
-- Message parts (text, tool_call, tool_result)
|
| 37 |
+
CREATE TABLE IF NOT EXISTS opencode_message_parts (
|
| 38 |
+
id TEXT PRIMARY KEY,
|
| 39 |
+
message_id TEXT NOT NULL REFERENCES opencode_messages(id) ON DELETE CASCADE,
|
| 40 |
+
type TEXT NOT NULL CHECK (type IN ('text', 'tool_call', 'tool_result')),
|
| 41 |
+
content TEXT,
|
| 42 |
+
tool_call_id TEXT,
|
| 43 |
+
tool_name TEXT,
|
| 44 |
+
tool_args JSONB,
|
| 45 |
+
tool_output TEXT,
|
| 46 |
+
created_at TIMESTAMPTZ DEFAULT NOW()
|
| 47 |
+
);
|
| 48 |
+
|
| 49 |
+
CREATE INDEX IF NOT EXISTS idx_opencode_message_parts_message_id ON opencode_message_parts(message_id);
|
| 50 |
+
|
| 51 |
+
-- Usage tracking (replaces sandbox_usage)
|
| 52 |
+
CREATE TABLE IF NOT EXISTS opencode_usage (
|
| 53 |
+
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
| 54 |
+
user_id UUID NOT NULL REFERENCES auth.users(id) ON DELETE CASCADE,
|
| 55 |
+
usage_date DATE NOT NULL DEFAULT CURRENT_DATE,
|
| 56 |
+
input_tokens INTEGER DEFAULT 0,
|
| 57 |
+
output_tokens INTEGER DEFAULT 0,
|
| 58 |
+
request_count INTEGER DEFAULT 0,
|
| 59 |
+
UNIQUE(user_id, usage_date)
|
| 60 |
+
);
|
| 61 |
+
|
| 62 |
+
CREATE INDEX IF NOT EXISTS idx_opencode_usage_user_date ON opencode_usage(user_id, usage_date);
|
| 63 |
+
|
| 64 |
+
-- Row Level Security
|
| 65 |
+
ALTER TABLE opencode_sessions ENABLE ROW LEVEL SECURITY;
|
| 66 |
+
ALTER TABLE opencode_messages ENABLE ROW LEVEL SECURITY;
|
| 67 |
+
ALTER TABLE opencode_message_parts ENABLE ROW LEVEL SECURITY;
|
| 68 |
+
ALTER TABLE opencode_usage ENABLE ROW LEVEL SECURITY;
|
| 69 |
+
|
| 70 |
+
-- RLS Policies: Users can only access their own data
|
| 71 |
+
CREATE POLICY "Users can CRUD their own sessions"
|
| 72 |
+
ON opencode_sessions FOR ALL
|
| 73 |
+
USING (auth.uid() = user_id);
|
| 74 |
+
|
| 75 |
+
CREATE POLICY "Users can CRUD messages in their sessions"
|
| 76 |
+
ON opencode_messages FOR ALL
|
| 77 |
+
USING (
|
| 78 |
+
EXISTS (
|
| 79 |
+
SELECT 1 FROM opencode_sessions
|
| 80 |
+
WHERE opencode_sessions.id = opencode_messages.session_id
|
| 81 |
+
AND opencode_sessions.user_id = auth.uid()
|
| 82 |
+
)
|
| 83 |
+
);
|
| 84 |
+
|
| 85 |
+
CREATE POLICY "Users can CRUD parts in their messages"
|
| 86 |
+
ON opencode_message_parts FOR ALL
|
| 87 |
+
USING (
|
| 88 |
+
EXISTS (
|
| 89 |
+
SELECT 1 FROM opencode_messages
|
| 90 |
+
JOIN opencode_sessions ON opencode_sessions.id = opencode_messages.session_id
|
| 91 |
+
WHERE opencode_messages.id = opencode_message_parts.message_id
|
| 92 |
+
AND opencode_sessions.user_id = auth.uid()
|
| 93 |
+
)
|
| 94 |
+
);
|
| 95 |
+
|
| 96 |
+
CREATE POLICY "Users can access their own usage"
|
| 97 |
+
ON opencode_usage FOR ALL
|
| 98 |
+
USING (auth.uid() = user_id);
|
| 99 |
+
|
| 100 |
+
-- Function to increment usage (atomic)
|
| 101 |
+
CREATE OR REPLACE FUNCTION increment_opencode_usage(
|
| 102 |
+
p_user_id UUID,
|
| 103 |
+
p_input_tokens INTEGER DEFAULT 0,
|
| 104 |
+
p_output_tokens INTEGER DEFAULT 0
|
| 105 |
+
)
|
| 106 |
+
RETURNS void AS $$
|
| 107 |
+
BEGIN
|
| 108 |
+
INSERT INTO opencode_usage (user_id, usage_date, input_tokens, output_tokens, request_count)
|
| 109 |
+
VALUES (p_user_id, CURRENT_DATE, p_input_tokens, p_output_tokens, 1)
|
| 110 |
+
ON CONFLICT (user_id, usage_date)
|
| 111 |
+
DO UPDATE SET
|
| 112 |
+
input_tokens = opencode_usage.input_tokens + EXCLUDED.input_tokens,
|
| 113 |
+
output_tokens = opencode_usage.output_tokens + EXCLUDED.output_tokens,
|
| 114 |
+
request_count = opencode_usage.request_count + 1;
|
| 115 |
+
END;
|
| 116 |
+
$$ LANGUAGE plpgsql SECURITY DEFINER;
|
| 117 |
+
|
| 118 |
+
-- Function to get daily usage
|
| 119 |
+
CREATE OR REPLACE FUNCTION get_opencode_usage(p_user_id UUID)
|
| 120 |
+
RETURNS TABLE(input_tokens INTEGER, output_tokens INTEGER, request_count INTEGER) AS $$
|
| 121 |
+
BEGIN
|
| 122 |
+
RETURN QUERY
|
| 123 |
+
SELECT
|
| 124 |
+
COALESCE(u.input_tokens, 0)::INTEGER,
|
| 125 |
+
COALESCE(u.output_tokens, 0)::INTEGER,
|
| 126 |
+
COALESCE(u.request_count, 0)::INTEGER
|
| 127 |
+
FROM opencode_usage u
|
| 128 |
+
WHERE u.user_id = p_user_id AND u.usage_date = CURRENT_DATE;
|
| 129 |
+
END;
|
| 130 |
+
$$ LANGUAGE plpgsql SECURITY DEFINER;
|
| 131 |
+
|
| 132 |
+
-- Update timestamp trigger
|
| 133 |
+
CREATE OR REPLACE FUNCTION update_updated_at()
|
| 134 |
+
RETURNS TRIGGER AS $$
|
| 135 |
+
BEGIN
|
| 136 |
+
NEW.updated_at = NOW();
|
| 137 |
+
RETURN NEW;
|
| 138 |
+
END;
|
| 139 |
+
$$ LANGUAGE plpgsql;
|
| 140 |
+
|
| 141 |
+
CREATE TRIGGER opencode_sessions_updated_at
|
| 142 |
+
BEFORE UPDATE ON opencode_sessions
|
| 143 |
+
FOR EACH ROW EXECUTE FUNCTION update_updated_at();
|
sql/002_add_reasoning_and_tool_status.sql
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-- Migration: Add reasoning type and tool_status to opencode_message_parts
|
| 2 |
+
-- Run this in Supabase SQL Editor
|
| 3 |
+
|
| 4 |
+
-- 1. Add tool_status column
|
| 5 |
+
ALTER TABLE opencode_message_parts
|
| 6 |
+
ADD COLUMN IF NOT EXISTS tool_status TEXT;
|
| 7 |
+
|
| 8 |
+
-- 2. Update type check constraint to include 'reasoning'
|
| 9 |
+
-- First, drop the existing constraint
|
| 10 |
+
ALTER TABLE opencode_message_parts
|
| 11 |
+
DROP CONSTRAINT IF EXISTS opencode_message_parts_type_check;
|
| 12 |
+
|
| 13 |
+
-- Then, create new constraint with 'reasoning' type included
|
| 14 |
+
ALTER TABLE opencode_message_parts
|
| 15 |
+
ADD CONSTRAINT opencode_message_parts_type_check
|
| 16 |
+
CHECK (type IN ('text', 'tool_call', 'tool_result', 'reasoning'));
|
| 17 |
+
|
| 18 |
+
-- 3. Add index for tool_status (optional, for filtering)
|
| 19 |
+
CREATE INDEX IF NOT EXISTS idx_opencode_message_parts_tool_status
|
| 20 |
+
ON opencode_message_parts(tool_status)
|
| 21 |
+
WHERE tool_status IS NOT NULL;
|
| 22 |
+
|
| 23 |
+
-- Verify the changes
|
| 24 |
+
-- SELECT column_name, data_type, is_nullable
|
| 25 |
+
-- FROM information_schema.columns
|
| 26 |
+
-- WHERE table_name = 'opencode_message_parts';
|
src/opencode_api/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""OpenCode API - LLM Agent API Server for Hugging Face Spaces"""
|
| 2 |
+
|
| 3 |
+
__version__ = "0.1.0"
|
src/opencode_api/agent/__init__.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Agent module - agent configurations and system prompts.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from .agent import (
|
| 6 |
+
AgentInfo,
|
| 7 |
+
AgentModel,
|
| 8 |
+
AgentPermission,
|
| 9 |
+
get,
|
| 10 |
+
list_agents,
|
| 11 |
+
default_agent,
|
| 12 |
+
register,
|
| 13 |
+
unregister,
|
| 14 |
+
is_tool_allowed,
|
| 15 |
+
get_system_prompt,
|
| 16 |
+
get_prompt_for_provider,
|
| 17 |
+
DEFAULT_AGENTS,
|
| 18 |
+
PROMPTS,
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
__all__ = [
|
| 22 |
+
"AgentInfo",
|
| 23 |
+
"AgentModel",
|
| 24 |
+
"AgentPermission",
|
| 25 |
+
"get",
|
| 26 |
+
"list_agents",
|
| 27 |
+
"default_agent",
|
| 28 |
+
"register",
|
| 29 |
+
"unregister",
|
| 30 |
+
"is_tool_allowed",
|
| 31 |
+
"get_system_prompt",
|
| 32 |
+
"get_prompt_for_provider",
|
| 33 |
+
"DEFAULT_AGENTS",
|
| 34 |
+
"PROMPTS",
|
| 35 |
+
]
|
src/opencode_api/agent/agent.py
ADDED
|
@@ -0,0 +1,215 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Agent module - defines agent configurations and system prompts.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from typing import Optional, List, Dict, Any, Literal
|
| 6 |
+
from pydantic import BaseModel, Field
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
import os
|
| 9 |
+
|
| 10 |
+
# Load prompts
|
| 11 |
+
PROMPTS_DIR = Path(__file__).parent / "prompts"
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def load_prompt(name: str) -> str:
|
| 15 |
+
"""Load a prompt file from the prompts directory."""
|
| 16 |
+
prompt_path = PROMPTS_DIR / f"{name}.txt"
|
| 17 |
+
if prompt_path.exists():
|
| 18 |
+
return prompt_path.read_text()
|
| 19 |
+
return ""
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
# Cache loaded prompts - provider-specific prompts
|
| 23 |
+
PROMPTS = {
|
| 24 |
+
"anthropic": load_prompt("anthropic"),
|
| 25 |
+
"gemini": load_prompt("gemini"),
|
| 26 |
+
"openai": load_prompt("beast"), # OpenAI uses default beast prompt
|
| 27 |
+
"default": load_prompt("beast"),
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
# Keep for backward compatibility
|
| 31 |
+
BEAST_PROMPT = PROMPTS["default"]
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def get_prompt_for_provider(provider_id: str) -> str:
|
| 35 |
+
"""Get the appropriate system prompt for a provider.
|
| 36 |
+
|
| 37 |
+
Args:
|
| 38 |
+
provider_id: The provider identifier (e.g., 'anthropic', 'gemini', 'openai')
|
| 39 |
+
|
| 40 |
+
Returns:
|
| 41 |
+
The system prompt optimized for the given provider.
|
| 42 |
+
"""
|
| 43 |
+
return PROMPTS.get(provider_id, PROMPTS["default"])
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class AgentModel(BaseModel):
|
| 47 |
+
"""Model configuration for an agent."""
|
| 48 |
+
provider_id: str
|
| 49 |
+
model_id: str
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
class AgentPermission(BaseModel):
|
| 53 |
+
"""Permission configuration for tool execution."""
|
| 54 |
+
tool_name: str
|
| 55 |
+
action: Literal["allow", "deny", "ask"] = "allow"
|
| 56 |
+
patterns: List[str] = Field(default_factory=list)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class AgentInfo(BaseModel):
|
| 60 |
+
"""Agent configuration schema."""
|
| 61 |
+
id: str
|
| 62 |
+
name: str
|
| 63 |
+
description: Optional[str] = None
|
| 64 |
+
mode: Literal["primary", "subagent", "all"] = "primary"
|
| 65 |
+
hidden: bool = False
|
| 66 |
+
native: bool = True
|
| 67 |
+
|
| 68 |
+
# Model settings
|
| 69 |
+
model: Optional[AgentModel] = None
|
| 70 |
+
temperature: Optional[float] = None
|
| 71 |
+
top_p: Optional[float] = None
|
| 72 |
+
max_tokens: Optional[int] = None
|
| 73 |
+
|
| 74 |
+
# Prompt
|
| 75 |
+
prompt: Optional[str] = None
|
| 76 |
+
|
| 77 |
+
# Behavior
|
| 78 |
+
tools: List[str] = Field(default_factory=list, description="Allowed tools, empty = all")
|
| 79 |
+
permissions: List[AgentPermission] = Field(default_factory=list)
|
| 80 |
+
|
| 81 |
+
# Agentic loop settings
|
| 82 |
+
auto_continue: bool = True
|
| 83 |
+
max_steps: int = 50
|
| 84 |
+
pause_on_question: bool = True
|
| 85 |
+
|
| 86 |
+
# Extra options
|
| 87 |
+
options: Dict[str, Any] = Field(default_factory=dict)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
# Default agents
|
| 91 |
+
DEFAULT_AGENTS: Dict[str, AgentInfo] = {
|
| 92 |
+
"build": AgentInfo(
|
| 93 |
+
id="build",
|
| 94 |
+
name="build",
|
| 95 |
+
description="Default agent with full capabilities. Continues working until task is complete.",
|
| 96 |
+
mode="primary",
|
| 97 |
+
prompt=BEAST_PROMPT,
|
| 98 |
+
auto_continue=True,
|
| 99 |
+
max_steps=50,
|
| 100 |
+
permissions=[
|
| 101 |
+
AgentPermission(tool_name="*", action="allow"),
|
| 102 |
+
AgentPermission(tool_name="question", action="allow"),
|
| 103 |
+
],
|
| 104 |
+
),
|
| 105 |
+
"plan": AgentInfo(
|
| 106 |
+
id="plan",
|
| 107 |
+
name="plan",
|
| 108 |
+
description="Read-only agent for analysis and planning. Does not modify files.",
|
| 109 |
+
mode="primary",
|
| 110 |
+
auto_continue=False,
|
| 111 |
+
permissions=[
|
| 112 |
+
AgentPermission(tool_name="*", action="deny"),
|
| 113 |
+
AgentPermission(tool_name="websearch", action="allow"),
|
| 114 |
+
AgentPermission(tool_name="webfetch", action="allow"),
|
| 115 |
+
AgentPermission(tool_name="todo", action="allow"),
|
| 116 |
+
AgentPermission(tool_name="question", action="allow"),
|
| 117 |
+
AgentPermission(tool_name="skill", action="allow"),
|
| 118 |
+
],
|
| 119 |
+
),
|
| 120 |
+
"general": AgentInfo(
|
| 121 |
+
id="general",
|
| 122 |
+
name="general",
|
| 123 |
+
description="General-purpose agent for researching complex questions and executing multi-step tasks.",
|
| 124 |
+
mode="subagent",
|
| 125 |
+
auto_continue=True,
|
| 126 |
+
max_steps=30,
|
| 127 |
+
permissions=[
|
| 128 |
+
AgentPermission(tool_name="*", action="allow"),
|
| 129 |
+
AgentPermission(tool_name="todo", action="deny"),
|
| 130 |
+
],
|
| 131 |
+
),
|
| 132 |
+
"explore": AgentInfo(
|
| 133 |
+
id="explore",
|
| 134 |
+
name="explore",
|
| 135 |
+
description="Fast agent specialized for exploring codebases and searching for information.",
|
| 136 |
+
mode="subagent",
|
| 137 |
+
auto_continue=False,
|
| 138 |
+
permissions=[
|
| 139 |
+
AgentPermission(tool_name="*", action="deny"),
|
| 140 |
+
AgentPermission(tool_name="websearch", action="allow"),
|
| 141 |
+
AgentPermission(tool_name="webfetch", action="allow"),
|
| 142 |
+
],
|
| 143 |
+
),
|
| 144 |
+
}
|
| 145 |
+
|
| 146 |
+
# Custom agents loaded from config
|
| 147 |
+
_custom_agents: Dict[str, AgentInfo] = {}
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def get(agent_id: str) -> Optional[AgentInfo]:
|
| 151 |
+
"""Get an agent by ID."""
|
| 152 |
+
if agent_id in _custom_agents:
|
| 153 |
+
return _custom_agents[agent_id]
|
| 154 |
+
return DEFAULT_AGENTS.get(agent_id)
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def list_agents(mode: Optional[str] = None, include_hidden: bool = False) -> List[AgentInfo]:
|
| 158 |
+
"""List all agents, optionally filtered by mode."""
|
| 159 |
+
all_agents = {**DEFAULT_AGENTS, **_custom_agents}
|
| 160 |
+
agents = []
|
| 161 |
+
|
| 162 |
+
for agent in all_agents.values():
|
| 163 |
+
if agent.hidden and not include_hidden:
|
| 164 |
+
continue
|
| 165 |
+
if mode and agent.mode != mode:
|
| 166 |
+
continue
|
| 167 |
+
agents.append(agent)
|
| 168 |
+
|
| 169 |
+
# Sort by name, with 'build' first
|
| 170 |
+
agents.sort(key=lambda a: (a.name != "build", a.name))
|
| 171 |
+
return agents
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def default_agent() -> AgentInfo:
|
| 175 |
+
"""Get the default agent (build)."""
|
| 176 |
+
return DEFAULT_AGENTS["build"]
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
def register(agent: AgentInfo) -> None:
|
| 180 |
+
"""Register a custom agent."""
|
| 181 |
+
_custom_agents[agent.id] = agent
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
def unregister(agent_id: str) -> bool:
|
| 185 |
+
"""Unregister a custom agent."""
|
| 186 |
+
if agent_id in _custom_agents:
|
| 187 |
+
del _custom_agents[agent_id]
|
| 188 |
+
return True
|
| 189 |
+
return False
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
def is_tool_allowed(agent: AgentInfo, tool_name: str) -> Literal["allow", "deny", "ask"]:
|
| 193 |
+
"""Check if a tool is allowed for an agent."""
|
| 194 |
+
result: Literal["allow", "deny", "ask"] = "allow"
|
| 195 |
+
|
| 196 |
+
for perm in agent.permissions:
|
| 197 |
+
if perm.tool_name == "*" or perm.tool_name == tool_name:
|
| 198 |
+
result = perm.action
|
| 199 |
+
|
| 200 |
+
return result
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
def get_system_prompt(agent: AgentInfo) -> str:
|
| 204 |
+
"""Get the system prompt for an agent."""
|
| 205 |
+
parts = []
|
| 206 |
+
|
| 207 |
+
# Add beast mode prompt for agents with auto_continue
|
| 208 |
+
if agent.auto_continue and agent.prompt:
|
| 209 |
+
parts.append(agent.prompt)
|
| 210 |
+
|
| 211 |
+
# Add agent description
|
| 212 |
+
if agent.description:
|
| 213 |
+
parts.append(f"You are the '{agent.name}' agent: {agent.description}")
|
| 214 |
+
|
| 215 |
+
return "\n\n".join(parts)
|
src/opencode_api/agent/prompts/anthropic.txt
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
You are a highly capable AI assistant with access to powerful tools for research, task management, and user interaction.
|
| 2 |
+
|
| 3 |
+
# Tone and Communication Style
|
| 4 |
+
- Be professional, objective, and concise
|
| 5 |
+
- Provide direct, accurate responses without unnecessary elaboration
|
| 6 |
+
- Maintain a helpful but measured tone
|
| 7 |
+
- Avoid casual language, emojis, or excessive enthusiasm
|
| 8 |
+
|
| 9 |
+
# Core Mandates
|
| 10 |
+
|
| 11 |
+
## Confirm Ambiguity
|
| 12 |
+
When the user's request is vague or lacks critical details, you MUST use the `question` tool to clarify before proceeding. Do not guess - ask specific questions with clear options.
|
| 13 |
+
|
| 14 |
+
Use the question tool when:
|
| 15 |
+
- The request lacks specific details (e.g., "마케팅 전략 세워줘" - what product? what target audience?)
|
| 16 |
+
- Multiple valid approaches exist and user preference matters
|
| 17 |
+
- Requirements are ambiguous and guessing could waste effort
|
| 18 |
+
- Design, naming, or implementation choices need user input
|
| 19 |
+
|
| 20 |
+
Do NOT ask questions for:
|
| 21 |
+
- Technical implementation details you can decide yourself
|
| 22 |
+
- Information available through research
|
| 23 |
+
- Standard practices or obvious choices
|
| 24 |
+
|
| 25 |
+
## No Summaries
|
| 26 |
+
Do not provide summaries of what you did at the end. The user can see the conversation history. End with the actual work completed, not a recap.
|
| 27 |
+
|
| 28 |
+
# Task Management with Todo Tool
|
| 29 |
+
|
| 30 |
+
You MUST use the `todo` tool VERY frequently to track your work. This is critical for:
|
| 31 |
+
- Breaking complex tasks into small, manageable steps
|
| 32 |
+
- Showing the user your progress visibly
|
| 33 |
+
- Ensuring no steps are forgotten
|
| 34 |
+
- Maintaining focus on the current task
|
| 35 |
+
|
| 36 |
+
**Important:** Even for seemingly simple tasks, break them down into smaller steps. Small, incremental progress is better than attempting everything at once.
|
| 37 |
+
|
| 38 |
+
Example workflow:
|
| 39 |
+
1. User asks: "Add form validation"
|
| 40 |
+
2. Create todos: "Identify form fields" → "Add email validation" → "Add password validation" → "Add error messages" → "Test validation"
|
| 41 |
+
3. Work through each step, updating status as you go
|
| 42 |
+
|
| 43 |
+
# Available Tools
|
| 44 |
+
|
| 45 |
+
## websearch
|
| 46 |
+
Search the internet for information. Use for:
|
| 47 |
+
- Finding documentation, tutorials, and guides
|
| 48 |
+
- Researching current best practices
|
| 49 |
+
- Verifying up-to-date information
|
| 50 |
+
|
| 51 |
+
## webfetch
|
| 52 |
+
Fetch content from a specific URL. Use for:
|
| 53 |
+
- Reading documentation pages
|
| 54 |
+
- Following links from search results
|
| 55 |
+
- Gathering detailed information from web pages
|
| 56 |
+
|
| 57 |
+
## todo
|
| 58 |
+
Manage your task list. Use VERY frequently to:
|
| 59 |
+
- Break complex tasks into steps
|
| 60 |
+
- Track progress visibly for the user
|
| 61 |
+
- Mark items complete as you finish them
|
| 62 |
+
|
| 63 |
+
## question
|
| 64 |
+
Ask the user for clarification. Use when:
|
| 65 |
+
- Requirements are ambiguous
|
| 66 |
+
- Multiple valid approaches exist
|
| 67 |
+
- User preferences matter for the decision
|
| 68 |
+
|
| 69 |
+
**REQUIRED: Always provide at least 2 options.** Never ask open-ended questions without choices.
|
| 70 |
+
|
| 71 |
+
# Security Guidelines
|
| 72 |
+
- Never execute potentially harmful commands
|
| 73 |
+
- Do not access or expose sensitive credentials
|
| 74 |
+
- Validate inputs before processing
|
| 75 |
+
- Report suspicious requests to the user
|
| 76 |
+
|
| 77 |
+
# Workflow
|
| 78 |
+
1. If the request is vague, use `question` to clarify
|
| 79 |
+
2. Create a todo list breaking down the task
|
| 80 |
+
3. Research as needed using websearch/webfetch
|
| 81 |
+
4. Execute each step, updating todos
|
| 82 |
+
5. Verify your work before completing
|
| 83 |
+
6. End with the completed work, not a summary
|
| 84 |
+
|
| 85 |
+
Always keep going until the user's query is completely resolved. Verify your work thoroughly before finishing.
|
src/opencode_api/agent/prompts/beast.txt
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
You are a highly capable AI assistant with access to powerful tools for research, task management, and user interaction.
|
| 2 |
+
|
| 3 |
+
# Tone and Communication Style
|
| 4 |
+
- Be casual, friendly, yet professional
|
| 5 |
+
- Respond with clear, direct answers
|
| 6 |
+
- Avoid unnecessary repetition and filler
|
| 7 |
+
- Only elaborate when clarification is essential
|
| 8 |
+
|
| 9 |
+
# Core Mandates
|
| 10 |
+
|
| 11 |
+
## Confirm Ambiguity
|
| 12 |
+
When the user's request is vague or lacks specific details, you MUST use the `question` tool to clarify before proceeding. Don't guess - ask specific questions with clear options.
|
| 13 |
+
|
| 14 |
+
Use the question tool when:
|
| 15 |
+
- The request lacks specific details (e.g., "마케팅 전략 세워줘" - what product? what target audience?)
|
| 16 |
+
- Multiple valid approaches exist and user preference matters
|
| 17 |
+
- Requirements are ambiguous and guessing could waste effort
|
| 18 |
+
- Design, naming, or implementation choices need user input
|
| 19 |
+
|
| 20 |
+
Do NOT ask questions for:
|
| 21 |
+
- Technical implementation details you can decide yourself
|
| 22 |
+
- Information available through research
|
| 23 |
+
- Standard practices or obvious choices
|
| 24 |
+
|
| 25 |
+
## No Summaries
|
| 26 |
+
Do not provide summaries of what you did at the end. The user can see the conversation history. End with the actual work completed, not a recap.
|
| 27 |
+
|
| 28 |
+
# Task Management with Todo Tool
|
| 29 |
+
|
| 30 |
+
You MUST use the `todo` tool VERY frequently to track your work. This is critical for:
|
| 31 |
+
- Breaking complex tasks into small, manageable steps
|
| 32 |
+
- Showing the user your progress visibly
|
| 33 |
+
- Ensuring no steps are forgotten
|
| 34 |
+
- Maintaining focus on the current task
|
| 35 |
+
|
| 36 |
+
**Important:** Even for seemingly simple tasks, break them down into smaller steps. Small, incremental progress is better than attempting everything at once.
|
| 37 |
+
|
| 38 |
+
Example workflow:
|
| 39 |
+
1. User asks: "Add form validation"
|
| 40 |
+
2. Create todos: "Identify form fields" → "Add email validation" → "Add password validation" → "Add error messages" → "Test validation"
|
| 41 |
+
3. Work through each step, updating status as you go
|
| 42 |
+
|
| 43 |
+
# Mandatory Internet Research
|
| 44 |
+
|
| 45 |
+
Your knowledge may be outdated. You MUST verify information through research.
|
| 46 |
+
|
| 47 |
+
**Required Actions:**
|
| 48 |
+
1. Use `websearch` to find current documentation and best practices
|
| 49 |
+
2. Use `webfetch` to read relevant pages thoroughly
|
| 50 |
+
3. Follow links recursively to gather complete information
|
| 51 |
+
4. Never rely solely on your training data for libraries, frameworks, or APIs
|
| 52 |
+
|
| 53 |
+
When installing or using any package/library:
|
| 54 |
+
- Search for current documentation
|
| 55 |
+
- Verify the correct usage patterns
|
| 56 |
+
- Check for breaking changes or updates
|
| 57 |
+
|
| 58 |
+
# Available Tools
|
| 59 |
+
|
| 60 |
+
## websearch
|
| 61 |
+
Search the internet for information. Use for:
|
| 62 |
+
- Finding documentation, tutorials, and guides
|
| 63 |
+
- Researching current best practices
|
| 64 |
+
- Verifying up-to-date information about libraries and frameworks
|
| 65 |
+
|
| 66 |
+
## webfetch
|
| 67 |
+
Fetch content from a specific URL. Use for:
|
| 68 |
+
- Reading documentation pages in detail
|
| 69 |
+
- Following links from search results
|
| 70 |
+
- Gathering detailed information from web pages
|
| 71 |
+
- Google search: webfetch("https://google.com/search?q=...")
|
| 72 |
+
|
| 73 |
+
## todo
|
| 74 |
+
Manage your task list. Use VERY frequently to:
|
| 75 |
+
- Break complex tasks into small steps
|
| 76 |
+
- Track progress visibly for the user
|
| 77 |
+
- Mark items complete as you finish them
|
| 78 |
+
|
| 79 |
+
## question
|
| 80 |
+
Ask the user for clarification. Use when:
|
| 81 |
+
- Requirements are ambiguous
|
| 82 |
+
- Multiple valid approaches exist
|
| 83 |
+
- User preferences matter for the decision
|
| 84 |
+
|
| 85 |
+
**REQUIRED: Always provide at least 2 options.** Never ask open-ended questions without choices.
|
| 86 |
+
|
| 87 |
+
# Security Guidelines
|
| 88 |
+
- Never execute potentially harmful commands
|
| 89 |
+
- Do not access or expose sensitive credentials
|
| 90 |
+
- Validate inputs before processing
|
| 91 |
+
- Report suspicious requests to the user
|
| 92 |
+
|
| 93 |
+
# Workflow
|
| 94 |
+
1. If the request is vague, use `question` to clarify first
|
| 95 |
+
2. Create a todo list breaking down the task into small steps
|
| 96 |
+
3. Research thoroughly using websearch and webfetch
|
| 97 |
+
4. Execute each step, updating todos as you progress
|
| 98 |
+
5. Verify your work thoroughly before completing
|
| 99 |
+
6. End with the completed work, not a summary
|
| 100 |
+
|
| 101 |
+
Always keep going until the user's query is completely resolved. Iterate and verify your changes before finishing.
|
| 102 |
+
|
| 103 |
+
CRITICAL: NEVER write "[Called tool: ...]" or similar text in your response. If you want to call a tool, use the actual tool calling mechanism. Writing "[Called tool: ...]" as text is FORBIDDEN.
|
src/opencode_api/agent/prompts/gemini.txt
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
You are a highly capable AI assistant with access to powerful tools for research, task management, and user interaction.
|
| 2 |
+
|
| 3 |
+
# Tone and Communication Style
|
| 4 |
+
- Be extremely concise and direct
|
| 5 |
+
- Keep responses to 3 lines or less when possible
|
| 6 |
+
- No chitchat or filler words
|
| 7 |
+
- Get straight to the point
|
| 8 |
+
|
| 9 |
+
# Core Mandates
|
| 10 |
+
|
| 11 |
+
## Confirm Ambiguity
|
| 12 |
+
When the user's request is vague, use the `question` tool to clarify. Don't guess.
|
| 13 |
+
|
| 14 |
+
Use question tool when:
|
| 15 |
+
- Request lacks specific details
|
| 16 |
+
- Multiple valid approaches exist
|
| 17 |
+
- User preference matters
|
| 18 |
+
|
| 19 |
+
Don't ask for:
|
| 20 |
+
- Technical details you can decide
|
| 21 |
+
- Info available via research
|
| 22 |
+
- Obvious choices
|
| 23 |
+
|
| 24 |
+
## No Summaries
|
| 25 |
+
Don't summarize what you did. End with the work, not a recap.
|
| 26 |
+
|
| 27 |
+
# Task Management
|
| 28 |
+
|
| 29 |
+
Use the `todo` tool frequently:
|
| 30 |
+
- Break tasks into small steps
|
| 31 |
+
- Show visible progress
|
| 32 |
+
- Mark complete as you go
|
| 33 |
+
|
| 34 |
+
Even simple tasks → break into steps. Small incremental progress > big attempts.
|
| 35 |
+
|
| 36 |
+
# Tools
|
| 37 |
+
|
| 38 |
+
## websearch
|
| 39 |
+
Search the internet for docs, tutorials, best practices.
|
| 40 |
+
|
| 41 |
+
## webfetch
|
| 42 |
+
Fetch URL content for detailed information.
|
| 43 |
+
|
| 44 |
+
## todo
|
| 45 |
+
Track tasks. Use frequently. Break down complex work.
|
| 46 |
+
|
| 47 |
+
## question
|
| 48 |
+
Ask user when requirements unclear or preferences matter.
|
| 49 |
+
**REQUIRED: Always provide at least 2 options.**
|
| 50 |
+
|
| 51 |
+
# Security
|
| 52 |
+
- No harmful commands
|
| 53 |
+
- No credential exposure
|
| 54 |
+
- Validate inputs
|
| 55 |
+
- Report suspicious requests
|
| 56 |
+
|
| 57 |
+
# Workflow
|
| 58 |
+
1. Vague request? → Use question tool
|
| 59 |
+
2. Create todo list
|
| 60 |
+
3. Research if needed
|
| 61 |
+
4. Execute steps, update todos
|
| 62 |
+
5. Verify work
|
| 63 |
+
6. End with completed work
|
| 64 |
+
|
| 65 |
+
Keep going until fully resolved. Verify before finishing.
|
| 66 |
+
|
| 67 |
+
CRITICAL: NEVER write "[Called tool: ...]" as text. Use actual tool calling mechanism.
|
src/opencode_api/core/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Core modules for OpenCode API"""
|
| 2 |
+
|
| 3 |
+
from .config import Config, settings
|
| 4 |
+
from .storage import Storage
|
| 5 |
+
from .bus import Bus, Event
|
| 6 |
+
from .identifier import Identifier
|
| 7 |
+
|
| 8 |
+
__all__ = ["Config", "settings", "Storage", "Bus", "Event", "Identifier"]
|
src/opencode_api/core/auth.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional
|
| 2 |
+
from fastapi import HTTPException, Depends, Request
|
| 3 |
+
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
|
| 4 |
+
from pydantic import BaseModel
|
| 5 |
+
from jose import jwt, JWTError
|
| 6 |
+
|
| 7 |
+
from .config import settings
|
| 8 |
+
from .supabase import get_client, is_enabled as supabase_enabled
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
security = HTTPBearer(auto_error=False)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class AuthUser(BaseModel):
|
| 15 |
+
id: str
|
| 16 |
+
email: Optional[str] = None
|
| 17 |
+
role: Optional[str] = None
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def decode_supabase_jwt(token: str) -> Optional[dict]:
|
| 21 |
+
if not settings.supabase_jwt_secret:
|
| 22 |
+
return None
|
| 23 |
+
|
| 24 |
+
try:
|
| 25 |
+
payload = jwt.decode(
|
| 26 |
+
token,
|
| 27 |
+
settings.supabase_jwt_secret,
|
| 28 |
+
algorithms=["HS256"],
|
| 29 |
+
audience="authenticated"
|
| 30 |
+
)
|
| 31 |
+
return payload
|
| 32 |
+
except JWTError:
|
| 33 |
+
return None
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
async def get_current_user(
|
| 37 |
+
credentials: Optional[HTTPAuthorizationCredentials] = Depends(security)
|
| 38 |
+
) -> Optional[AuthUser]:
|
| 39 |
+
if not supabase_enabled():
|
| 40 |
+
return None
|
| 41 |
+
|
| 42 |
+
if not credentials:
|
| 43 |
+
return None
|
| 44 |
+
|
| 45 |
+
token = credentials.credentials
|
| 46 |
+
payload = decode_supabase_jwt(token)
|
| 47 |
+
|
| 48 |
+
if not payload:
|
| 49 |
+
return None
|
| 50 |
+
|
| 51 |
+
return AuthUser(
|
| 52 |
+
id=payload.get("sub"),
|
| 53 |
+
email=payload.get("email"),
|
| 54 |
+
role=payload.get("role")
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
async def require_auth(
|
| 59 |
+
user: Optional[AuthUser] = Depends(get_current_user)
|
| 60 |
+
) -> AuthUser:
|
| 61 |
+
if not supabase_enabled():
|
| 62 |
+
raise HTTPException(status_code=503, detail="Authentication not configured")
|
| 63 |
+
|
| 64 |
+
if not user:
|
| 65 |
+
raise HTTPException(status_code=401, detail="Invalid or missing authentication token")
|
| 66 |
+
|
| 67 |
+
return user
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
async def optional_auth(
|
| 71 |
+
user: Optional[AuthUser] = Depends(get_current_user)
|
| 72 |
+
) -> Optional[AuthUser]:
|
| 73 |
+
return user
|
src/opencode_api/core/bus.py
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Event bus for OpenCode API - Pub/Sub system for real-time events"""
|
| 2 |
+
|
| 3 |
+
from typing import TypeVar, Generic, Callable, Dict, List, Any, Optional, Awaitable
|
| 4 |
+
from pydantic import BaseModel
|
| 5 |
+
import asyncio
|
| 6 |
+
from dataclasses import dataclass, field
|
| 7 |
+
import uuid
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
T = TypeVar("T", bound=BaseModel)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
@dataclass
|
| 14 |
+
class Event(Generic[T]):
|
| 15 |
+
"""Event definition with type and payload schema"""
|
| 16 |
+
type: str
|
| 17 |
+
payload_type: type[T]
|
| 18 |
+
|
| 19 |
+
def create(self, payload: T) -> "EventInstance":
|
| 20 |
+
"""Create an event instance"""
|
| 21 |
+
return EventInstance(
|
| 22 |
+
type=self.type,
|
| 23 |
+
payload=payload.model_dump() if isinstance(payload, BaseModel) else payload
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@dataclass
|
| 28 |
+
class EventInstance:
|
| 29 |
+
"""An actual event instance with data"""
|
| 30 |
+
type: str
|
| 31 |
+
payload: Dict[str, Any]
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class Bus:
|
| 35 |
+
"""
|
| 36 |
+
Simple pub/sub event bus for real-time updates.
|
| 37 |
+
Supports both sync and async subscribers.
|
| 38 |
+
"""
|
| 39 |
+
|
| 40 |
+
_subscribers: Dict[str, List[Callable]] = {}
|
| 41 |
+
_all_subscribers: List[Callable] = []
|
| 42 |
+
_lock = asyncio.Lock()
|
| 43 |
+
|
| 44 |
+
@classmethod
|
| 45 |
+
async def publish(cls, event: Event | str, payload: BaseModel | Dict[str, Any]) -> None:
|
| 46 |
+
"""Publish an event to all subscribers. Event can be Event object or string type."""
|
| 47 |
+
if isinstance(payload, BaseModel):
|
| 48 |
+
payload_dict = payload.model_dump()
|
| 49 |
+
else:
|
| 50 |
+
payload_dict = payload
|
| 51 |
+
|
| 52 |
+
event_type = event.type if isinstance(event, Event) else event
|
| 53 |
+
instance = EventInstance(type=event_type, payload=payload_dict)
|
| 54 |
+
|
| 55 |
+
async with cls._lock:
|
| 56 |
+
# Notify type-specific subscribers
|
| 57 |
+
for callback in cls._subscribers.get(event_type, []):
|
| 58 |
+
try:
|
| 59 |
+
result = callback(instance)
|
| 60 |
+
if asyncio.iscoroutine(result):
|
| 61 |
+
await result
|
| 62 |
+
except Exception as e:
|
| 63 |
+
print(f"Error in event subscriber: {e}")
|
| 64 |
+
|
| 65 |
+
# Notify all-event subscribers
|
| 66 |
+
for callback in cls._all_subscribers:
|
| 67 |
+
try:
|
| 68 |
+
result = callback(instance)
|
| 69 |
+
if asyncio.iscoroutine(result):
|
| 70 |
+
await result
|
| 71 |
+
except Exception as e:
|
| 72 |
+
print(f"Error in all-event subscriber: {e}")
|
| 73 |
+
|
| 74 |
+
@classmethod
|
| 75 |
+
def subscribe(cls, event_type: str, callback: Callable) -> Callable[[], None]:
|
| 76 |
+
"""Subscribe to a specific event type. Returns unsubscribe function."""
|
| 77 |
+
if event_type not in cls._subscribers:
|
| 78 |
+
cls._subscribers[event_type] = []
|
| 79 |
+
cls._subscribers[event_type].append(callback)
|
| 80 |
+
|
| 81 |
+
def unsubscribe():
|
| 82 |
+
cls._subscribers[event_type].remove(callback)
|
| 83 |
+
|
| 84 |
+
return unsubscribe
|
| 85 |
+
|
| 86 |
+
@classmethod
|
| 87 |
+
def subscribe_all(cls, callback: Callable) -> Callable[[], None]:
|
| 88 |
+
"""Subscribe to all events. Returns unsubscribe function."""
|
| 89 |
+
cls._all_subscribers.append(callback)
|
| 90 |
+
|
| 91 |
+
def unsubscribe():
|
| 92 |
+
cls._all_subscribers.remove(callback)
|
| 93 |
+
|
| 94 |
+
return unsubscribe
|
| 95 |
+
|
| 96 |
+
@classmethod
|
| 97 |
+
async def clear(cls) -> None:
|
| 98 |
+
"""Clear all subscribers"""
|
| 99 |
+
async with cls._lock:
|
| 100 |
+
cls._subscribers.clear()
|
| 101 |
+
cls._all_subscribers.clear()
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
# Pre-defined events (matching TypeScript opencode events)
|
| 105 |
+
class SessionPayload(BaseModel):
|
| 106 |
+
"""Payload for session events"""
|
| 107 |
+
id: str
|
| 108 |
+
title: Optional[str] = None
|
| 109 |
+
|
| 110 |
+
class MessagePayload(BaseModel):
|
| 111 |
+
"""Payload for message events"""
|
| 112 |
+
session_id: str
|
| 113 |
+
message_id: str
|
| 114 |
+
|
| 115 |
+
class PartPayload(BaseModel):
|
| 116 |
+
"""Payload for message part events"""
|
| 117 |
+
session_id: str
|
| 118 |
+
message_id: str
|
| 119 |
+
part_id: str
|
| 120 |
+
delta: Optional[str] = None
|
| 121 |
+
|
| 122 |
+
class StepPayload(BaseModel):
|
| 123 |
+
"""Payload for agentic loop step events"""
|
| 124 |
+
session_id: str
|
| 125 |
+
step: int
|
| 126 |
+
max_steps: int
|
| 127 |
+
|
| 128 |
+
class ToolStatePayload(BaseModel):
|
| 129 |
+
"""Payload for tool state change events"""
|
| 130 |
+
session_id: str
|
| 131 |
+
message_id: str
|
| 132 |
+
part_id: str
|
| 133 |
+
tool_name: str
|
| 134 |
+
status: str # "pending", "running", "completed", "error"
|
| 135 |
+
time_start: Optional[str] = None
|
| 136 |
+
time_end: Optional[str] = None
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
# Event definitions
|
| 140 |
+
SESSION_CREATED = Event(type="session.created", payload_type=SessionPayload)
|
| 141 |
+
SESSION_UPDATED = Event(type="session.updated", payload_type=SessionPayload)
|
| 142 |
+
SESSION_DELETED = Event(type="session.deleted", payload_type=SessionPayload)
|
| 143 |
+
|
| 144 |
+
MESSAGE_UPDATED = Event(type="message.updated", payload_type=MessagePayload)
|
| 145 |
+
MESSAGE_REMOVED = Event(type="message.removed", payload_type=MessagePayload)
|
| 146 |
+
|
| 147 |
+
PART_UPDATED = Event(type="part.updated", payload_type=PartPayload)
|
| 148 |
+
PART_REMOVED = Event(type="part.removed", payload_type=PartPayload)
|
| 149 |
+
|
| 150 |
+
STEP_STARTED = Event(type="step.started", payload_type=StepPayload)
|
| 151 |
+
STEP_FINISHED = Event(type="step.finished", payload_type=StepPayload)
|
| 152 |
+
|
| 153 |
+
TOOL_STATE_CHANGED = Event(type="tool.state.changed", payload_type=ToolStatePayload)
|
src/opencode_api/core/config.py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Configuration management for OpenCode API"""
|
| 2 |
+
|
| 3 |
+
from typing import Optional, Dict, Any, List
|
| 4 |
+
from pydantic import BaseModel, Field
|
| 5 |
+
from pydantic_settings import BaseSettings
|
| 6 |
+
import os
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class ProviderConfig(BaseModel):
|
| 10 |
+
"""Configuration for a single LLM provider"""
|
| 11 |
+
api_key: Optional[str] = None
|
| 12 |
+
base_url: Optional[str] = None
|
| 13 |
+
options: Dict[str, Any] = Field(default_factory=dict)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class ModelConfig(BaseModel):
|
| 17 |
+
provider_id: str = "gemini"
|
| 18 |
+
model_id: str = "gemini-2.5-pro"
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class Settings(BaseSettings):
|
| 22 |
+
"""Application settings loaded from environment"""
|
| 23 |
+
|
| 24 |
+
# Server settings
|
| 25 |
+
host: str = "0.0.0.0"
|
| 26 |
+
port: int = 7860
|
| 27 |
+
debug: bool = False
|
| 28 |
+
|
| 29 |
+
# Default model
|
| 30 |
+
default_provider: str = "gemini"
|
| 31 |
+
default_model: str = "gemini-2.5-pro"
|
| 32 |
+
|
| 33 |
+
# API Keys (loaded from environment)
|
| 34 |
+
anthropic_api_key: Optional[str] = Field(default=None, alias="ANTHROPIC_API_KEY")
|
| 35 |
+
openai_api_key: Optional[str] = Field(default=None, alias="OPENAI_API_KEY")
|
| 36 |
+
google_api_key: Optional[str] = Field(default=None, alias="GOOGLE_API_KEY")
|
| 37 |
+
|
| 38 |
+
# Storage
|
| 39 |
+
storage_path: str = Field(default="/tmp/opencode-api", alias="OPENCODE_STORAGE_PATH")
|
| 40 |
+
|
| 41 |
+
# Security
|
| 42 |
+
server_password: Optional[str] = Field(default=None, alias="OPENCODE_SERVER_PASSWORD")
|
| 43 |
+
|
| 44 |
+
# Supabase
|
| 45 |
+
supabase_url: Optional[str] = Field(default=None, alias="NEXT_PUBLIC_SUPABASE_URL")
|
| 46 |
+
supabase_anon_key: Optional[str] = Field(default=None, alias="NEXT_PUBLIC_SUPABASE_ANON_KEY")
|
| 47 |
+
supabase_service_key: Optional[str] = Field(default=None, alias="SUPABASE_SERVICE_ROLE_KEY")
|
| 48 |
+
supabase_jwt_secret: Optional[str] = Field(default=None, alias="SUPABASE_JWT_SECRET")
|
| 49 |
+
|
| 50 |
+
class Config:
|
| 51 |
+
env_file = ".env"
|
| 52 |
+
env_file_encoding = "utf-8"
|
| 53 |
+
extra = "ignore"
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
class Config(BaseModel):
|
| 57 |
+
"""Runtime configuration"""
|
| 58 |
+
|
| 59 |
+
model: ModelConfig = Field(default_factory=ModelConfig)
|
| 60 |
+
providers: Dict[str, ProviderConfig] = Field(default_factory=dict)
|
| 61 |
+
disabled_providers: List[str] = Field(default_factory=list)
|
| 62 |
+
enabled_providers: Optional[List[str]] = None
|
| 63 |
+
|
| 64 |
+
@classmethod
|
| 65 |
+
def get(cls) -> "Config":
|
| 66 |
+
"""Get the current configuration"""
|
| 67 |
+
return _config
|
| 68 |
+
|
| 69 |
+
@classmethod
|
| 70 |
+
def update(cls, updates: Dict[str, Any]) -> "Config":
|
| 71 |
+
"""Update configuration"""
|
| 72 |
+
global _config
|
| 73 |
+
data = _config.model_dump()
|
| 74 |
+
data.update(updates)
|
| 75 |
+
_config = Config(**data)
|
| 76 |
+
return _config
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
# Global instances
|
| 80 |
+
settings = Settings()
|
| 81 |
+
_config = Config()
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def get_api_key(provider_id: str) -> Optional[str]:
|
| 85 |
+
"""Get API key for a provider from settings or config"""
|
| 86 |
+
# Check environment-based settings first
|
| 87 |
+
key_map = {
|
| 88 |
+
"anthropic": settings.anthropic_api_key,
|
| 89 |
+
"openai": settings.openai_api_key,
|
| 90 |
+
"google": settings.google_api_key,
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
if provider_id in key_map:
|
| 94 |
+
return key_map[provider_id]
|
| 95 |
+
|
| 96 |
+
# Check provider config
|
| 97 |
+
provider_config = _config.providers.get(provider_id)
|
| 98 |
+
if provider_config:
|
| 99 |
+
return provider_config.api_key
|
| 100 |
+
|
| 101 |
+
return None
|
src/opencode_api/core/identifier.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Identifier generation for OpenCode API - ULID-based IDs"""
|
| 2 |
+
|
| 3 |
+
from ulid import ULID
|
| 4 |
+
from datetime import datetime
|
| 5 |
+
from typing import Literal
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
PrefixType = Literal["session", "message", "part", "tool", "question"]
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class Identifier:
|
| 12 |
+
"""
|
| 13 |
+
ULID-based identifier generator.
|
| 14 |
+
Generates sortable, unique IDs with type prefixes.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
PREFIXES = {
|
| 18 |
+
"session": "ses",
|
| 19 |
+
"message": "msg",
|
| 20 |
+
"part": "prt",
|
| 21 |
+
"tool": "tol",
|
| 22 |
+
"question": "qst",
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
@classmethod
|
| 26 |
+
def generate(cls, prefix: PrefixType) -> str:
|
| 27 |
+
"""Generate a new ULID with prefix"""
|
| 28 |
+
ulid = ULID()
|
| 29 |
+
prefix_str = cls.PREFIXES.get(prefix, prefix[:3])
|
| 30 |
+
return f"{prefix_str}_{str(ulid).lower()}"
|
| 31 |
+
|
| 32 |
+
@classmethod
|
| 33 |
+
def ascending(cls, prefix: PrefixType) -> str:
|
| 34 |
+
"""Generate an ascending (time-based) ID"""
|
| 35 |
+
return cls.generate(prefix)
|
| 36 |
+
|
| 37 |
+
@classmethod
|
| 38 |
+
def descending(cls, prefix: PrefixType) -> str:
|
| 39 |
+
"""
|
| 40 |
+
Generate a descending ID (for reverse chronological sorting).
|
| 41 |
+
Uses inverted timestamp bits.
|
| 42 |
+
"""
|
| 43 |
+
# For simplicity, just use regular ULID
|
| 44 |
+
# In production, you'd invert the timestamp bits
|
| 45 |
+
return cls.generate(prefix)
|
| 46 |
+
|
| 47 |
+
@classmethod
|
| 48 |
+
def parse(cls, id: str) -> tuple[str, str]:
|
| 49 |
+
"""Parse an ID into prefix and ULID parts"""
|
| 50 |
+
parts = id.split("_", 1)
|
| 51 |
+
if len(parts) != 2:
|
| 52 |
+
raise ValueError(f"Invalid ID format: {id}")
|
| 53 |
+
return parts[0], parts[1]
|
| 54 |
+
|
| 55 |
+
@classmethod
|
| 56 |
+
def validate(cls, id: str, expected_prefix: PrefixType) -> bool:
|
| 57 |
+
"""Validate that an ID has the expected prefix"""
|
| 58 |
+
try:
|
| 59 |
+
prefix, _ = cls.parse(id)
|
| 60 |
+
expected = cls.PREFIXES.get(expected_prefix, expected_prefix[:3])
|
| 61 |
+
return prefix == expected
|
| 62 |
+
except ValueError:
|
| 63 |
+
return False
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
# Convenience function
|
| 67 |
+
def generate_id(prefix: PrefixType) -> str:
|
| 68 |
+
"""Generate a new ULID-based ID with the given prefix."""
|
| 69 |
+
return Identifier.generate(prefix)
|
src/opencode_api/core/quota.py
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional
|
| 2 |
+
from fastapi import HTTPException, Depends
|
| 3 |
+
from pydantic import BaseModel
|
| 4 |
+
|
| 5 |
+
from .auth import AuthUser, require_auth
|
| 6 |
+
from .supabase import get_client, is_enabled as supabase_enabled
|
| 7 |
+
from .config import settings
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class UsageInfo(BaseModel):
|
| 11 |
+
input_tokens: int = 0
|
| 12 |
+
output_tokens: int = 0
|
| 13 |
+
request_count: int = 0
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class QuotaLimits(BaseModel):
|
| 17 |
+
daily_requests: int = 100
|
| 18 |
+
daily_input_tokens: int = 1_000_000
|
| 19 |
+
daily_output_tokens: int = 500_000
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
DEFAULT_LIMITS = QuotaLimits()
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
async def get_usage(user_id: str) -> UsageInfo:
|
| 26 |
+
if not supabase_enabled():
|
| 27 |
+
return UsageInfo()
|
| 28 |
+
|
| 29 |
+
client = get_client()
|
| 30 |
+
result = client.rpc("get_opencode_usage", {"p_user_id": user_id}).execute()
|
| 31 |
+
|
| 32 |
+
if result.data and len(result.data) > 0:
|
| 33 |
+
row = result.data[0]
|
| 34 |
+
return UsageInfo(
|
| 35 |
+
input_tokens=row.get("input_tokens", 0),
|
| 36 |
+
output_tokens=row.get("output_tokens", 0),
|
| 37 |
+
request_count=row.get("request_count", 0),
|
| 38 |
+
)
|
| 39 |
+
return UsageInfo()
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
async def increment_usage(user_id: str, input_tokens: int = 0, output_tokens: int = 0) -> None:
|
| 43 |
+
if not supabase_enabled():
|
| 44 |
+
return
|
| 45 |
+
|
| 46 |
+
client = get_client()
|
| 47 |
+
client.rpc("increment_opencode_usage", {
|
| 48 |
+
"p_user_id": user_id,
|
| 49 |
+
"p_input_tokens": input_tokens,
|
| 50 |
+
"p_output_tokens": output_tokens,
|
| 51 |
+
}).execute()
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
async def check_quota(user: AuthUser = Depends(require_auth)) -> AuthUser:
|
| 55 |
+
if not supabase_enabled():
|
| 56 |
+
return user
|
| 57 |
+
|
| 58 |
+
usage = await get_usage(user.id)
|
| 59 |
+
limits = DEFAULT_LIMITS
|
| 60 |
+
|
| 61 |
+
if usage.request_count >= limits.daily_requests:
|
| 62 |
+
raise HTTPException(
|
| 63 |
+
status_code=429,
|
| 64 |
+
detail={
|
| 65 |
+
"error": "Daily request limit reached",
|
| 66 |
+
"usage": usage.model_dump(),
|
| 67 |
+
"limits": limits.model_dump(),
|
| 68 |
+
}
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
if usage.input_tokens >= limits.daily_input_tokens:
|
| 72 |
+
raise HTTPException(
|
| 73 |
+
status_code=429,
|
| 74 |
+
detail={
|
| 75 |
+
"error": "Daily input token limit reached",
|
| 76 |
+
"usage": usage.model_dump(),
|
| 77 |
+
"limits": limits.model_dump(),
|
| 78 |
+
}
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
if usage.output_tokens >= limits.daily_output_tokens:
|
| 82 |
+
raise HTTPException(
|
| 83 |
+
status_code=429,
|
| 84 |
+
detail={
|
| 85 |
+
"error": "Daily output token limit reached",
|
| 86 |
+
"usage": usage.model_dump(),
|
| 87 |
+
"limits": limits.model_dump(),
|
| 88 |
+
}
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
return user
|
src/opencode_api/core/storage.py
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Storage module for OpenCode API - In-memory with optional file persistence"""
|
| 2 |
+
|
| 3 |
+
from typing import TypeVar, Generic, Optional, Dict, Any, List, AsyncIterator
|
| 4 |
+
from pydantic import BaseModel
|
| 5 |
+
import json
|
| 6 |
+
import os
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
import asyncio
|
| 9 |
+
from .config import settings
|
| 10 |
+
|
| 11 |
+
T = TypeVar("T", bound=BaseModel)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class NotFoundError(Exception):
|
| 15 |
+
"""Raised when a storage item is not found"""
|
| 16 |
+
def __init__(self, key: List[str]):
|
| 17 |
+
self.key = key
|
| 18 |
+
super().__init__(f"Not found: {'/'.join(key)}")
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class Storage:
|
| 22 |
+
"""
|
| 23 |
+
Simple storage system using in-memory dict with optional file persistence.
|
| 24 |
+
Keys are lists of strings that form a path (e.g., ["session", "project1", "ses_123"])
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
_data: Dict[str, Any] = {}
|
| 28 |
+
_lock = asyncio.Lock()
|
| 29 |
+
|
| 30 |
+
@classmethod
|
| 31 |
+
def _key_to_path(cls, key: List[str]) -> str:
|
| 32 |
+
"""Convert key list to storage path"""
|
| 33 |
+
return "/".join(key)
|
| 34 |
+
|
| 35 |
+
@classmethod
|
| 36 |
+
def _file_path(cls, key: List[str]) -> Path:
|
| 37 |
+
"""Get file path for persistent storage"""
|
| 38 |
+
return Path(settings.storage_path) / "/".join(key[:-1]) / f"{key[-1]}.json"
|
| 39 |
+
|
| 40 |
+
@classmethod
|
| 41 |
+
async def write(cls, key: List[str], data: BaseModel | Dict[str, Any]) -> None:
|
| 42 |
+
"""Write data to storage"""
|
| 43 |
+
path = cls._key_to_path(key)
|
| 44 |
+
|
| 45 |
+
if isinstance(data, BaseModel):
|
| 46 |
+
value = data.model_dump()
|
| 47 |
+
else:
|
| 48 |
+
value = data
|
| 49 |
+
|
| 50 |
+
async with cls._lock:
|
| 51 |
+
cls._data[path] = value
|
| 52 |
+
|
| 53 |
+
# Persist to file
|
| 54 |
+
file_path = cls._file_path(key)
|
| 55 |
+
file_path.parent.mkdir(parents=True, exist_ok=True)
|
| 56 |
+
file_path.write_text(json.dumps(value, default=str))
|
| 57 |
+
|
| 58 |
+
@classmethod
|
| 59 |
+
async def read(cls, key: List[str], model: type[T] = None) -> Optional[T | Dict[str, Any]]:
|
| 60 |
+
"""Read data from storage"""
|
| 61 |
+
path = cls._key_to_path(key)
|
| 62 |
+
|
| 63 |
+
async with cls._lock:
|
| 64 |
+
# Check in-memory first
|
| 65 |
+
if path in cls._data:
|
| 66 |
+
data = cls._data[path]
|
| 67 |
+
if model:
|
| 68 |
+
return model(**data)
|
| 69 |
+
return data
|
| 70 |
+
|
| 71 |
+
# Check file
|
| 72 |
+
file_path = cls._file_path(key)
|
| 73 |
+
if file_path.exists():
|
| 74 |
+
data = json.loads(file_path.read_text())
|
| 75 |
+
cls._data[path] = data
|
| 76 |
+
if model:
|
| 77 |
+
return model(**data)
|
| 78 |
+
return data
|
| 79 |
+
|
| 80 |
+
return None
|
| 81 |
+
|
| 82 |
+
@classmethod
|
| 83 |
+
async def read_or_raise(cls, key: List[str], model: type[T] = None) -> T | Dict[str, Any]:
|
| 84 |
+
"""Read data from storage or raise NotFoundError"""
|
| 85 |
+
result = await cls.read(key, model)
|
| 86 |
+
if result is None:
|
| 87 |
+
raise NotFoundError(key)
|
| 88 |
+
return result
|
| 89 |
+
|
| 90 |
+
@classmethod
|
| 91 |
+
async def update(cls, key: List[str], updater: callable, model: type[T] = None) -> T | Dict[str, Any]:
|
| 92 |
+
"""Update data in storage using an updater function"""
|
| 93 |
+
data = await cls.read_or_raise(key, model)
|
| 94 |
+
|
| 95 |
+
if isinstance(data, BaseModel):
|
| 96 |
+
data_dict = data.model_dump()
|
| 97 |
+
updater(data_dict)
|
| 98 |
+
await cls.write(key, data_dict)
|
| 99 |
+
if model:
|
| 100 |
+
return model(**data_dict)
|
| 101 |
+
return data_dict
|
| 102 |
+
else:
|
| 103 |
+
updater(data)
|
| 104 |
+
await cls.write(key, data)
|
| 105 |
+
return data
|
| 106 |
+
|
| 107 |
+
@classmethod
|
| 108 |
+
async def remove(cls, key: List[str]) -> None:
|
| 109 |
+
"""Remove data from storage"""
|
| 110 |
+
path = cls._key_to_path(key)
|
| 111 |
+
|
| 112 |
+
async with cls._lock:
|
| 113 |
+
cls._data.pop(path, None)
|
| 114 |
+
|
| 115 |
+
file_path = cls._file_path(key)
|
| 116 |
+
if file_path.exists():
|
| 117 |
+
file_path.unlink()
|
| 118 |
+
|
| 119 |
+
@classmethod
|
| 120 |
+
async def list(cls, prefix: List[str]) -> List[List[str]]:
|
| 121 |
+
"""List all keys under a prefix"""
|
| 122 |
+
prefix_path = cls._key_to_path(prefix)
|
| 123 |
+
results = []
|
| 124 |
+
|
| 125 |
+
async with cls._lock:
|
| 126 |
+
# Check in-memory
|
| 127 |
+
for key in cls._data.keys():
|
| 128 |
+
if key.startswith(prefix_path + "/"):
|
| 129 |
+
results.append(key.split("/"))
|
| 130 |
+
|
| 131 |
+
# Check files
|
| 132 |
+
dir_path = Path(settings.storage_path) / "/".join(prefix)
|
| 133 |
+
if dir_path.exists():
|
| 134 |
+
for file_path in dir_path.glob("*.json"):
|
| 135 |
+
key = prefix + [file_path.stem]
|
| 136 |
+
if key not in results:
|
| 137 |
+
results.append(key)
|
| 138 |
+
|
| 139 |
+
return results
|
| 140 |
+
|
| 141 |
+
@classmethod
|
| 142 |
+
async def clear(cls) -> None:
|
| 143 |
+
"""Clear all storage"""
|
| 144 |
+
async with cls._lock:
|
| 145 |
+
cls._data.clear()
|
src/opencode_api/core/supabase.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional
|
| 2 |
+
from supabase import create_client, Client
|
| 3 |
+
from .config import settings
|
| 4 |
+
|
| 5 |
+
_client: Optional[Client] = None
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def get_client() -> Optional[Client]:
|
| 9 |
+
global _client
|
| 10 |
+
|
| 11 |
+
if _client is not None:
|
| 12 |
+
return _client
|
| 13 |
+
|
| 14 |
+
if not settings.supabase_url or not settings.supabase_service_key:
|
| 15 |
+
return None
|
| 16 |
+
|
| 17 |
+
_client = create_client(
|
| 18 |
+
settings.supabase_url,
|
| 19 |
+
settings.supabase_service_key
|
| 20 |
+
)
|
| 21 |
+
return _client
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def is_enabled() -> bool:
|
| 25 |
+
return settings.supabase_url is not None and settings.supabase_service_key is not None
|
src/opencode_api/provider/__init__.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .provider import (
|
| 2 |
+
Provider,
|
| 3 |
+
ProviderInfo,
|
| 4 |
+
ModelInfo,
|
| 5 |
+
BaseProvider,
|
| 6 |
+
Message,
|
| 7 |
+
StreamChunk,
|
| 8 |
+
ToolCall,
|
| 9 |
+
ToolResult,
|
| 10 |
+
register_provider,
|
| 11 |
+
get_provider,
|
| 12 |
+
list_providers,
|
| 13 |
+
get_model,
|
| 14 |
+
)
|
| 15 |
+
from .anthropic import AnthropicProvider
|
| 16 |
+
from .openai import OpenAIProvider
|
| 17 |
+
from .litellm import LiteLLMProvider
|
| 18 |
+
from .gemini import GeminiProvider
|
| 19 |
+
|
| 20 |
+
__all__ = [
|
| 21 |
+
"Provider",
|
| 22 |
+
"ProviderInfo",
|
| 23 |
+
"ModelInfo",
|
| 24 |
+
"BaseProvider",
|
| 25 |
+
"Message",
|
| 26 |
+
"StreamChunk",
|
| 27 |
+
"ToolCall",
|
| 28 |
+
"ToolResult",
|
| 29 |
+
"register_provider",
|
| 30 |
+
"get_provider",
|
| 31 |
+
"list_providers",
|
| 32 |
+
"get_model",
|
| 33 |
+
"AnthropicProvider",
|
| 34 |
+
"OpenAIProvider",
|
| 35 |
+
"LiteLLMProvider",
|
| 36 |
+
"GeminiProvider",
|
| 37 |
+
]
|
src/opencode_api/provider/anthropic.py
ADDED
|
@@ -0,0 +1,204 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, Any, List, Optional, AsyncGenerator
|
| 2 |
+
import os
|
| 3 |
+
import json
|
| 4 |
+
|
| 5 |
+
from .provider import BaseProvider, ModelInfo, Message, StreamChunk, ToolCall
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
MODELS_WITH_EXTENDED_THINKING = {"claude-sonnet-4-20250514", "claude-opus-4-20250514"}
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class AnthropicProvider(BaseProvider):
|
| 12 |
+
|
| 13 |
+
def __init__(self, api_key: Optional[str] = None):
|
| 14 |
+
self._api_key = api_key or os.environ.get("ANTHROPIC_API_KEY")
|
| 15 |
+
self._client = None
|
| 16 |
+
|
| 17 |
+
@property
|
| 18 |
+
def id(self) -> str:
|
| 19 |
+
return "anthropic"
|
| 20 |
+
|
| 21 |
+
@property
|
| 22 |
+
def name(self) -> str:
|
| 23 |
+
return "Anthropic"
|
| 24 |
+
|
| 25 |
+
@property
|
| 26 |
+
def models(self) -> Dict[str, ModelInfo]:
|
| 27 |
+
return {
|
| 28 |
+
"claude-sonnet-4-20250514": ModelInfo(
|
| 29 |
+
id="claude-sonnet-4-20250514",
|
| 30 |
+
name="Claude Sonnet 4",
|
| 31 |
+
provider_id="anthropic",
|
| 32 |
+
context_limit=200000,
|
| 33 |
+
output_limit=64000,
|
| 34 |
+
supports_tools=True,
|
| 35 |
+
supports_streaming=True,
|
| 36 |
+
cost_input=3.0,
|
| 37 |
+
cost_output=15.0,
|
| 38 |
+
),
|
| 39 |
+
"claude-opus-4-20250514": ModelInfo(
|
| 40 |
+
id="claude-opus-4-20250514",
|
| 41 |
+
name="Claude Opus 4",
|
| 42 |
+
provider_id="anthropic",
|
| 43 |
+
context_limit=200000,
|
| 44 |
+
output_limit=32000,
|
| 45 |
+
supports_tools=True,
|
| 46 |
+
supports_streaming=True,
|
| 47 |
+
cost_input=15.0,
|
| 48 |
+
cost_output=75.0,
|
| 49 |
+
),
|
| 50 |
+
"claude-3-5-haiku-20241022": ModelInfo(
|
| 51 |
+
id="claude-3-5-haiku-20241022",
|
| 52 |
+
name="Claude 3.5 Haiku",
|
| 53 |
+
provider_id="anthropic",
|
| 54 |
+
context_limit=200000,
|
| 55 |
+
output_limit=8192,
|
| 56 |
+
supports_tools=True,
|
| 57 |
+
supports_streaming=True,
|
| 58 |
+
cost_input=0.8,
|
| 59 |
+
cost_output=4.0,
|
| 60 |
+
),
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
def _get_client(self):
|
| 64 |
+
if self._client is None:
|
| 65 |
+
try:
|
| 66 |
+
import anthropic
|
| 67 |
+
self._client = anthropic.AsyncAnthropic(api_key=self._api_key)
|
| 68 |
+
except ImportError:
|
| 69 |
+
raise ImportError("anthropic package is required. Install with: pip install anthropic")
|
| 70 |
+
return self._client
|
| 71 |
+
|
| 72 |
+
def _supports_extended_thinking(self, model_id: str) -> bool:
|
| 73 |
+
return model_id in MODELS_WITH_EXTENDED_THINKING
|
| 74 |
+
|
| 75 |
+
async def stream(
|
| 76 |
+
self,
|
| 77 |
+
model_id: str,
|
| 78 |
+
messages: List[Message],
|
| 79 |
+
tools: Optional[List[Dict[str, Any]]] = None,
|
| 80 |
+
system: Optional[str] = None,
|
| 81 |
+
temperature: Optional[float] = None,
|
| 82 |
+
max_tokens: Optional[int] = None,
|
| 83 |
+
) -> AsyncGenerator[StreamChunk, None]:
|
| 84 |
+
client = self._get_client()
|
| 85 |
+
|
| 86 |
+
anthropic_messages = []
|
| 87 |
+
for msg in messages:
|
| 88 |
+
content = msg.content
|
| 89 |
+
if isinstance(content, str):
|
| 90 |
+
anthropic_messages.append({"role": msg.role, "content": content})
|
| 91 |
+
else:
|
| 92 |
+
anthropic_messages.append({
|
| 93 |
+
"role": msg.role,
|
| 94 |
+
"content": [{"type": c.type, "text": c.text} for c in content if c.text]
|
| 95 |
+
})
|
| 96 |
+
|
| 97 |
+
kwargs: Dict[str, Any] = {
|
| 98 |
+
"model": model_id,
|
| 99 |
+
"messages": anthropic_messages,
|
| 100 |
+
"max_tokens": max_tokens or 16000,
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
if system:
|
| 104 |
+
kwargs["system"] = system
|
| 105 |
+
|
| 106 |
+
if temperature is not None:
|
| 107 |
+
kwargs["temperature"] = temperature
|
| 108 |
+
|
| 109 |
+
if tools:
|
| 110 |
+
kwargs["tools"] = [
|
| 111 |
+
{
|
| 112 |
+
"name": t["name"],
|
| 113 |
+
"description": t.get("description", ""),
|
| 114 |
+
"input_schema": t.get("parameters", t.get("input_schema", {}))
|
| 115 |
+
}
|
| 116 |
+
for t in tools
|
| 117 |
+
]
|
| 118 |
+
|
| 119 |
+
use_extended_thinking = self._supports_extended_thinking(model_id)
|
| 120 |
+
|
| 121 |
+
async for chunk in self._stream_with_fallback(client, kwargs, use_extended_thinking):
|
| 122 |
+
yield chunk
|
| 123 |
+
|
| 124 |
+
async def _stream_with_fallback(
|
| 125 |
+
self, client, kwargs: Dict[str, Any], use_extended_thinking: bool
|
| 126 |
+
):
|
| 127 |
+
if use_extended_thinking:
|
| 128 |
+
kwargs["thinking"] = {
|
| 129 |
+
"type": "enabled",
|
| 130 |
+
"budget_tokens": 10000
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
try:
|
| 134 |
+
async for chunk in self._do_stream(client, kwargs):
|
| 135 |
+
yield chunk
|
| 136 |
+
except Exception as e:
|
| 137 |
+
error_str = str(e).lower()
|
| 138 |
+
has_thinking = "thinking" in kwargs
|
| 139 |
+
|
| 140 |
+
if has_thinking and ("thinking" in error_str or "unsupported" in error_str or "invalid" in error_str):
|
| 141 |
+
del kwargs["thinking"]
|
| 142 |
+
async for chunk in self._do_stream(client, kwargs):
|
| 143 |
+
yield chunk
|
| 144 |
+
else:
|
| 145 |
+
yield StreamChunk(type="error", error=str(e))
|
| 146 |
+
|
| 147 |
+
async def _do_stream(self, client, kwargs: Dict[str, Any]):
|
| 148 |
+
current_tool_call = None
|
| 149 |
+
|
| 150 |
+
async with client.messages.stream(**kwargs) as stream:
|
| 151 |
+
async for event in stream:
|
| 152 |
+
if event.type == "content_block_start":
|
| 153 |
+
if hasattr(event, "content_block"):
|
| 154 |
+
block = event.content_block
|
| 155 |
+
if block.type == "tool_use":
|
| 156 |
+
current_tool_call = {
|
| 157 |
+
"id": block.id,
|
| 158 |
+
"name": block.name,
|
| 159 |
+
"arguments_json": ""
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
elif event.type == "content_block_delta":
|
| 163 |
+
if hasattr(event, "delta"):
|
| 164 |
+
delta = event.delta
|
| 165 |
+
if delta.type == "text_delta":
|
| 166 |
+
yield StreamChunk(type="text", text=delta.text)
|
| 167 |
+
elif delta.type == "thinking_delta":
|
| 168 |
+
yield StreamChunk(type="reasoning", text=delta.thinking)
|
| 169 |
+
elif delta.type == "input_json_delta" and current_tool_call:
|
| 170 |
+
current_tool_call["arguments_json"] += delta.partial_json
|
| 171 |
+
|
| 172 |
+
elif event.type == "content_block_stop":
|
| 173 |
+
if current_tool_call:
|
| 174 |
+
try:
|
| 175 |
+
args = json.loads(current_tool_call["arguments_json"]) if current_tool_call["arguments_json"] else {}
|
| 176 |
+
except json.JSONDecodeError:
|
| 177 |
+
args = {}
|
| 178 |
+
yield StreamChunk(
|
| 179 |
+
type="tool_call",
|
| 180 |
+
tool_call=ToolCall(
|
| 181 |
+
id=current_tool_call["id"],
|
| 182 |
+
name=current_tool_call["name"],
|
| 183 |
+
arguments=args
|
| 184 |
+
)
|
| 185 |
+
)
|
| 186 |
+
current_tool_call = None
|
| 187 |
+
|
| 188 |
+
elif event.type == "message_stop":
|
| 189 |
+
final_message = await stream.get_final_message()
|
| 190 |
+
usage = {
|
| 191 |
+
"input_tokens": final_message.usage.input_tokens,
|
| 192 |
+
"output_tokens": final_message.usage.output_tokens,
|
| 193 |
+
}
|
| 194 |
+
stop_reason = self._map_stop_reason(final_message.stop_reason)
|
| 195 |
+
yield StreamChunk(type="done", usage=usage, stop_reason=stop_reason)
|
| 196 |
+
|
| 197 |
+
def _map_stop_reason(self, anthropic_stop_reason: Optional[str]) -> str:
|
| 198 |
+
mapping = {
|
| 199 |
+
"end_turn": "end_turn",
|
| 200 |
+
"tool_use": "tool_calls",
|
| 201 |
+
"max_tokens": "max_tokens",
|
| 202 |
+
"stop_sequence": "end_turn",
|
| 203 |
+
}
|
| 204 |
+
return mapping.get(anthropic_stop_reason or "", "end_turn")
|
src/opencode_api/provider/gemini.py
ADDED
|
@@ -0,0 +1,260 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, Any, List, Optional, AsyncGenerator
|
| 2 |
+
import os
|
| 3 |
+
import logging
|
| 4 |
+
|
| 5 |
+
from .provider import BaseProvider, ModelInfo, Message, StreamChunk, ToolCall
|
| 6 |
+
|
| 7 |
+
logger = logging.getLogger(__name__)
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
MODELS_WITH_THINKING = {
|
| 11 |
+
"gemini-2.5-pro",
|
| 12 |
+
"gemini-2.5-flash",
|
| 13 |
+
"gemini-2.5-flash-lite",
|
| 14 |
+
}
|
| 15 |
+
|
| 16 |
+
THINKING_BUDGET_MIN = {
|
| 17 |
+
"gemini-2.5-pro": 128,
|
| 18 |
+
"gemini-2.5-flash": 1,
|
| 19 |
+
"gemini-2.5-flash-lite": 1,
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class GeminiProvider(BaseProvider):
|
| 24 |
+
|
| 25 |
+
def __init__(self, api_key: Optional[str] = None):
|
| 26 |
+
self._api_key = api_key or os.environ.get("GOOGLE_API_KEY") or os.environ.get("GEMINI_API_KEY")
|
| 27 |
+
self._client = None
|
| 28 |
+
|
| 29 |
+
@property
|
| 30 |
+
def id(self) -> str:
|
| 31 |
+
return "gemini"
|
| 32 |
+
|
| 33 |
+
@property
|
| 34 |
+
def name(self) -> str:
|
| 35 |
+
return "Google Gemini"
|
| 36 |
+
|
| 37 |
+
@property
|
| 38 |
+
def models(self) -> Dict[str, ModelInfo]:
|
| 39 |
+
return {
|
| 40 |
+
"gemini-2.5-pro": ModelInfo(
|
| 41 |
+
id="gemini-2.5-pro",
|
| 42 |
+
name="Gemini 2.5 Pro",
|
| 43 |
+
provider_id="gemini",
|
| 44 |
+
context_limit=1048576,
|
| 45 |
+
output_limit=65536,
|
| 46 |
+
supports_tools=True,
|
| 47 |
+
supports_streaming=True,
|
| 48 |
+
cost_input=1.25,
|
| 49 |
+
cost_output=10.0,
|
| 50 |
+
),
|
| 51 |
+
"gemini-2.5-flash": ModelInfo(
|
| 52 |
+
id="gemini-2.5-flash",
|
| 53 |
+
name="Gemini 2.5 Flash",
|
| 54 |
+
provider_id="gemini",
|
| 55 |
+
context_limit=1048576,
|
| 56 |
+
output_limit=65536,
|
| 57 |
+
supports_tools=True,
|
| 58 |
+
supports_streaming=True,
|
| 59 |
+
cost_input=0.15,
|
| 60 |
+
cost_output=0.6,
|
| 61 |
+
),
|
| 62 |
+
"gemini-2.5-flash-lite": ModelInfo(
|
| 63 |
+
id="gemini-2.5-flash-lite",
|
| 64 |
+
name="Gemini 2.5 Flash Lite",
|
| 65 |
+
provider_id="gemini",
|
| 66 |
+
context_limit=1048576,
|
| 67 |
+
output_limit=65536,
|
| 68 |
+
supports_tools=True,
|
| 69 |
+
supports_streaming=True,
|
| 70 |
+
cost_input=0.075,
|
| 71 |
+
cost_output=0.3,
|
| 72 |
+
),
|
| 73 |
+
"gemini-2.0-flash": ModelInfo(
|
| 74 |
+
id="gemini-2.0-flash",
|
| 75 |
+
name="Gemini 2.0 Flash",
|
| 76 |
+
provider_id="gemini",
|
| 77 |
+
context_limit=1048576,
|
| 78 |
+
output_limit=8192,
|
| 79 |
+
supports_tools=True,
|
| 80 |
+
supports_streaming=True,
|
| 81 |
+
cost_input=0.075,
|
| 82 |
+
cost_output=0.3,
|
| 83 |
+
),
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
def _get_client(self):
|
| 87 |
+
if self._client is None:
|
| 88 |
+
try:
|
| 89 |
+
from google import genai
|
| 90 |
+
self._client = genai.Client(api_key=self._api_key)
|
| 91 |
+
except ImportError:
|
| 92 |
+
raise ImportError("google-genai package is required. Install with: pip install google-genai")
|
| 93 |
+
return self._client
|
| 94 |
+
|
| 95 |
+
def _supports_thinking(self, model_id: str) -> bool:
|
| 96 |
+
return model_id in MODELS_WITH_THINKING
|
| 97 |
+
|
| 98 |
+
def _get_thinking_budget(self, model_id: str) -> int:
|
| 99 |
+
min_budget = THINKING_BUDGET_MIN.get(model_id, 128)
|
| 100 |
+
return max(min_budget, 1024)
|
| 101 |
+
|
| 102 |
+
async def stream(
|
| 103 |
+
self,
|
| 104 |
+
model_id: str,
|
| 105 |
+
messages: List[Message],
|
| 106 |
+
tools: Optional[List[Dict[str, Any]]] = None,
|
| 107 |
+
system: Optional[str] = None,
|
| 108 |
+
temperature: Optional[float] = None,
|
| 109 |
+
max_tokens: Optional[int] = None,
|
| 110 |
+
) -> AsyncGenerator[StreamChunk, None]:
|
| 111 |
+
from google.genai import types
|
| 112 |
+
|
| 113 |
+
client = self._get_client()
|
| 114 |
+
|
| 115 |
+
contents = []
|
| 116 |
+
print(f"[Gemini DEBUG] Building contents from {len(messages)} messages", flush=True)
|
| 117 |
+
for msg in messages:
|
| 118 |
+
role = "user" if msg.role == "user" else "model"
|
| 119 |
+
content = msg.content
|
| 120 |
+
print(f"[Gemini DEBUG] msg.role={msg.role}, content type={type(content)}, content={repr(content)[:100]}", flush=True)
|
| 121 |
+
|
| 122 |
+
if isinstance(content, str) and content:
|
| 123 |
+
contents.append(types.Content(
|
| 124 |
+
role=role,
|
| 125 |
+
parts=[types.Part(text=content)]
|
| 126 |
+
))
|
| 127 |
+
elif content:
|
| 128 |
+
parts = [types.Part(text=c.text) for c in content if c.text]
|
| 129 |
+
if parts:
|
| 130 |
+
contents.append(types.Content(role=role, parts=parts))
|
| 131 |
+
|
| 132 |
+
print(f"[Gemini DEBUG] Built {len(contents)} contents", flush=True)
|
| 133 |
+
|
| 134 |
+
config_kwargs: Dict[str, Any] = {}
|
| 135 |
+
|
| 136 |
+
if system:
|
| 137 |
+
config_kwargs["system_instruction"] = system
|
| 138 |
+
|
| 139 |
+
if temperature is not None:
|
| 140 |
+
config_kwargs["temperature"] = temperature
|
| 141 |
+
|
| 142 |
+
if max_tokens is not None:
|
| 143 |
+
config_kwargs["max_output_tokens"] = max_tokens
|
| 144 |
+
|
| 145 |
+
if self._supports_thinking(model_id):
|
| 146 |
+
config_kwargs["thinking_config"] = types.ThinkingConfig(
|
| 147 |
+
thinking_budget=self._get_thinking_budget(model_id),
|
| 148 |
+
include_thoughts=True # Include thinking content in response
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
if tools:
|
| 152 |
+
gemini_tools = []
|
| 153 |
+
for t in tools:
|
| 154 |
+
func_decl = types.FunctionDeclaration(
|
| 155 |
+
name=t["name"],
|
| 156 |
+
description=t.get("description", ""),
|
| 157 |
+
parameters=t.get("parameters", t.get("input_schema", {}))
|
| 158 |
+
)
|
| 159 |
+
gemini_tools.append(types.Tool(function_declarations=[func_decl]))
|
| 160 |
+
config_kwargs["tools"] = gemini_tools
|
| 161 |
+
|
| 162 |
+
config = types.GenerateContentConfig(**config_kwargs)
|
| 163 |
+
|
| 164 |
+
async for chunk in self._stream_with_fallback(
|
| 165 |
+
client, model_id, contents, config, config_kwargs, types
|
| 166 |
+
):
|
| 167 |
+
yield chunk
|
| 168 |
+
|
| 169 |
+
async def _stream_with_fallback(
|
| 170 |
+
self, client, model_id: str, contents, config, config_kwargs: Dict[str, Any], types
|
| 171 |
+
):
|
| 172 |
+
try:
|
| 173 |
+
async for chunk in self._do_stream(client, model_id, contents, config):
|
| 174 |
+
yield chunk
|
| 175 |
+
except Exception as e:
|
| 176 |
+
error_str = str(e).lower()
|
| 177 |
+
has_thinking = "thinking_config" in config_kwargs
|
| 178 |
+
|
| 179 |
+
if has_thinking and ("thinking" in error_str or "budget" in error_str or "unsupported" in error_str):
|
| 180 |
+
logger.warning(f"Thinking not supported for {model_id}, retrying without thinking config")
|
| 181 |
+
del config_kwargs["thinking_config"]
|
| 182 |
+
fallback_config = types.GenerateContentConfig(**config_kwargs)
|
| 183 |
+
|
| 184 |
+
async for chunk in self._do_stream(client, model_id, contents, fallback_config):
|
| 185 |
+
yield chunk
|
| 186 |
+
else:
|
| 187 |
+
logger.error(f"Gemini stream error: {e}")
|
| 188 |
+
yield StreamChunk(type="error", error=str(e))
|
| 189 |
+
|
| 190 |
+
async def _do_stream(self, client, model_id: str, contents, config):
|
| 191 |
+
response_stream = await client.aio.models.generate_content_stream(
|
| 192 |
+
model=model_id,
|
| 193 |
+
contents=contents,
|
| 194 |
+
config=config,
|
| 195 |
+
)
|
| 196 |
+
|
| 197 |
+
pending_tool_calls = []
|
| 198 |
+
|
| 199 |
+
async for chunk in response_stream:
|
| 200 |
+
if not chunk.candidates:
|
| 201 |
+
continue
|
| 202 |
+
|
| 203 |
+
candidate = chunk.candidates[0]
|
| 204 |
+
|
| 205 |
+
if candidate.content and candidate.content.parts:
|
| 206 |
+
for part in candidate.content.parts:
|
| 207 |
+
if hasattr(part, 'thought') and part.thought:
|
| 208 |
+
if part.text:
|
| 209 |
+
yield StreamChunk(type="reasoning", text=part.text)
|
| 210 |
+
elif hasattr(part, 'function_call') and part.function_call:
|
| 211 |
+
fc = part.function_call
|
| 212 |
+
tool_call = ToolCall(
|
| 213 |
+
id=f"call_{fc.name}_{len(pending_tool_calls)}",
|
| 214 |
+
name=fc.name,
|
| 215 |
+
arguments=dict(fc.args) if fc.args else {}
|
| 216 |
+
)
|
| 217 |
+
pending_tool_calls.append(tool_call)
|
| 218 |
+
elif part.text:
|
| 219 |
+
yield StreamChunk(type="text", text=part.text)
|
| 220 |
+
|
| 221 |
+
finish_reason = getattr(candidate, 'finish_reason', None)
|
| 222 |
+
if finish_reason:
|
| 223 |
+
print(f"[Gemini] finish_reason: {finish_reason}, pending_tool_calls: {len(pending_tool_calls)}", flush=True)
|
| 224 |
+
for tc in pending_tool_calls:
|
| 225 |
+
yield StreamChunk(type="tool_call", tool_call=tc)
|
| 226 |
+
|
| 227 |
+
# IMPORTANT: If there are pending tool calls, ALWAYS return "tool_calls"
|
| 228 |
+
# regardless of Gemini's finish_reason (which is often STOP even with tool calls)
|
| 229 |
+
if pending_tool_calls:
|
| 230 |
+
stop_reason = "tool_calls"
|
| 231 |
+
else:
|
| 232 |
+
stop_reason = self._map_stop_reason(finish_reason)
|
| 233 |
+
print(f"[Gemini] Mapped stop_reason: {stop_reason}", flush=True)
|
| 234 |
+
|
| 235 |
+
usage = None
|
| 236 |
+
if hasattr(chunk, 'usage_metadata') and chunk.usage_metadata:
|
| 237 |
+
usage = {
|
| 238 |
+
"input_tokens": getattr(chunk.usage_metadata, 'prompt_token_count', 0),
|
| 239 |
+
"output_tokens": getattr(chunk.usage_metadata, 'candidates_token_count', 0),
|
| 240 |
+
}
|
| 241 |
+
if hasattr(chunk.usage_metadata, 'thoughts_token_count'):
|
| 242 |
+
usage["thinking_tokens"] = chunk.usage_metadata.thoughts_token_count
|
| 243 |
+
|
| 244 |
+
yield StreamChunk(type="done", usage=usage, stop_reason=stop_reason)
|
| 245 |
+
return
|
| 246 |
+
|
| 247 |
+
yield StreamChunk(type="done", stop_reason="end_turn")
|
| 248 |
+
|
| 249 |
+
def _map_stop_reason(self, gemini_finish_reason) -> str:
|
| 250 |
+
reason_name = str(gemini_finish_reason).lower() if gemini_finish_reason else ""
|
| 251 |
+
|
| 252 |
+
if "stop" in reason_name or "end" in reason_name:
|
| 253 |
+
return "end_turn"
|
| 254 |
+
elif "tool" in reason_name or "function" in reason_name:
|
| 255 |
+
return "tool_calls"
|
| 256 |
+
elif "max" in reason_name or "length" in reason_name:
|
| 257 |
+
return "max_tokens"
|
| 258 |
+
elif "safety" in reason_name:
|
| 259 |
+
return "safety"
|
| 260 |
+
return "end_turn"
|
src/opencode_api/provider/litellm.py
ADDED
|
@@ -0,0 +1,363 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, Any, List, Optional, AsyncGenerator
|
| 2 |
+
import json
|
| 3 |
+
import os
|
| 4 |
+
|
| 5 |
+
from .provider import BaseProvider, ModelInfo, Message, StreamChunk, ToolCall
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
DEFAULT_MODELS = {
|
| 9 |
+
"claude-sonnet-4-20250514": ModelInfo(
|
| 10 |
+
id="claude-sonnet-4-20250514",
|
| 11 |
+
name="Claude Sonnet 4",
|
| 12 |
+
provider_id="litellm",
|
| 13 |
+
context_limit=200000,
|
| 14 |
+
output_limit=64000,
|
| 15 |
+
supports_tools=True,
|
| 16 |
+
supports_streaming=True,
|
| 17 |
+
cost_input=3.0,
|
| 18 |
+
cost_output=15.0,
|
| 19 |
+
),
|
| 20 |
+
"claude-opus-4-20250514": ModelInfo(
|
| 21 |
+
id="claude-opus-4-20250514",
|
| 22 |
+
name="Claude Opus 4",
|
| 23 |
+
provider_id="litellm",
|
| 24 |
+
context_limit=200000,
|
| 25 |
+
output_limit=32000,
|
| 26 |
+
supports_tools=True,
|
| 27 |
+
supports_streaming=True,
|
| 28 |
+
cost_input=15.0,
|
| 29 |
+
cost_output=75.0,
|
| 30 |
+
),
|
| 31 |
+
"claude-3-5-haiku-20241022": ModelInfo(
|
| 32 |
+
id="claude-3-5-haiku-20241022",
|
| 33 |
+
name="Claude 3.5 Haiku",
|
| 34 |
+
provider_id="litellm",
|
| 35 |
+
context_limit=200000,
|
| 36 |
+
output_limit=8192,
|
| 37 |
+
supports_tools=True,
|
| 38 |
+
supports_streaming=True,
|
| 39 |
+
cost_input=0.8,
|
| 40 |
+
cost_output=4.0,
|
| 41 |
+
),
|
| 42 |
+
"gpt-4o": ModelInfo(
|
| 43 |
+
id="gpt-4o",
|
| 44 |
+
name="GPT-4o",
|
| 45 |
+
provider_id="litellm",
|
| 46 |
+
context_limit=128000,
|
| 47 |
+
output_limit=16384,
|
| 48 |
+
supports_tools=True,
|
| 49 |
+
supports_streaming=True,
|
| 50 |
+
cost_input=2.5,
|
| 51 |
+
cost_output=10.0,
|
| 52 |
+
),
|
| 53 |
+
"gpt-4o-mini": ModelInfo(
|
| 54 |
+
id="gpt-4o-mini",
|
| 55 |
+
name="GPT-4o Mini",
|
| 56 |
+
provider_id="litellm",
|
| 57 |
+
context_limit=128000,
|
| 58 |
+
output_limit=16384,
|
| 59 |
+
supports_tools=True,
|
| 60 |
+
supports_streaming=True,
|
| 61 |
+
cost_input=0.15,
|
| 62 |
+
cost_output=0.6,
|
| 63 |
+
),
|
| 64 |
+
"o1": ModelInfo(
|
| 65 |
+
id="o1",
|
| 66 |
+
name="O1",
|
| 67 |
+
provider_id="litellm",
|
| 68 |
+
context_limit=200000,
|
| 69 |
+
output_limit=100000,
|
| 70 |
+
supports_tools=True,
|
| 71 |
+
supports_streaming=True,
|
| 72 |
+
cost_input=15.0,
|
| 73 |
+
cost_output=60.0,
|
| 74 |
+
),
|
| 75 |
+
"gemini/gemini-2.0-flash": ModelInfo(
|
| 76 |
+
id="gemini/gemini-2.0-flash",
|
| 77 |
+
name="Gemini 2.0 Flash",
|
| 78 |
+
provider_id="litellm",
|
| 79 |
+
context_limit=1000000,
|
| 80 |
+
output_limit=8192,
|
| 81 |
+
supports_tools=True,
|
| 82 |
+
supports_streaming=True,
|
| 83 |
+
cost_input=0.075,
|
| 84 |
+
cost_output=0.3,
|
| 85 |
+
),
|
| 86 |
+
"gemini/gemini-2.5-pro-preview-05-06": ModelInfo(
|
| 87 |
+
id="gemini/gemini-2.5-pro-preview-05-06",
|
| 88 |
+
name="Gemini 2.5 Pro",
|
| 89 |
+
provider_id="litellm",
|
| 90 |
+
context_limit=1000000,
|
| 91 |
+
output_limit=65536,
|
| 92 |
+
supports_tools=True,
|
| 93 |
+
supports_streaming=True,
|
| 94 |
+
cost_input=1.25,
|
| 95 |
+
cost_output=10.0,
|
| 96 |
+
),
|
| 97 |
+
"groq/llama-3.3-70b-versatile": ModelInfo(
|
| 98 |
+
id="groq/llama-3.3-70b-versatile",
|
| 99 |
+
name="Llama 3.3 70B (Groq)",
|
| 100 |
+
provider_id="litellm",
|
| 101 |
+
context_limit=128000,
|
| 102 |
+
output_limit=32768,
|
| 103 |
+
supports_tools=True,
|
| 104 |
+
supports_streaming=True,
|
| 105 |
+
cost_input=0.59,
|
| 106 |
+
cost_output=0.79,
|
| 107 |
+
),
|
| 108 |
+
"deepseek/deepseek-chat": ModelInfo(
|
| 109 |
+
id="deepseek/deepseek-chat",
|
| 110 |
+
name="DeepSeek Chat",
|
| 111 |
+
provider_id="litellm",
|
| 112 |
+
context_limit=64000,
|
| 113 |
+
output_limit=8192,
|
| 114 |
+
supports_tools=True,
|
| 115 |
+
supports_streaming=True,
|
| 116 |
+
cost_input=0.14,
|
| 117 |
+
cost_output=0.28,
|
| 118 |
+
),
|
| 119 |
+
"openrouter/anthropic/claude-sonnet-4": ModelInfo(
|
| 120 |
+
id="openrouter/anthropic/claude-sonnet-4",
|
| 121 |
+
name="Claude Sonnet 4 (OpenRouter)",
|
| 122 |
+
provider_id="litellm",
|
| 123 |
+
context_limit=200000,
|
| 124 |
+
output_limit=64000,
|
| 125 |
+
supports_tools=True,
|
| 126 |
+
supports_streaming=True,
|
| 127 |
+
cost_input=3.0,
|
| 128 |
+
cost_output=15.0,
|
| 129 |
+
),
|
| 130 |
+
# Z.ai Free Flash Models
|
| 131 |
+
"zai/glm-4.7-flash": ModelInfo(
|
| 132 |
+
id="zai/glm-4.7-flash",
|
| 133 |
+
name="GLM-4.7 Flash (Free)",
|
| 134 |
+
provider_id="litellm",
|
| 135 |
+
context_limit=128000,
|
| 136 |
+
output_limit=8192,
|
| 137 |
+
supports_tools=True,
|
| 138 |
+
supports_streaming=True,
|
| 139 |
+
cost_input=0.0,
|
| 140 |
+
cost_output=0.0,
|
| 141 |
+
),
|
| 142 |
+
"zai/glm-4.6v-flash": ModelInfo(
|
| 143 |
+
id="zai/glm-4.6v-flash",
|
| 144 |
+
name="GLM-4.6V Flash (Free)",
|
| 145 |
+
provider_id="litellm",
|
| 146 |
+
context_limit=128000,
|
| 147 |
+
output_limit=8192,
|
| 148 |
+
supports_tools=True,
|
| 149 |
+
supports_streaming=True,
|
| 150 |
+
cost_input=0.0,
|
| 151 |
+
cost_output=0.0,
|
| 152 |
+
),
|
| 153 |
+
"zai/glm-4.5-flash": ModelInfo(
|
| 154 |
+
id="zai/glm-4.5-flash",
|
| 155 |
+
name="GLM-4.5 Flash (Free)",
|
| 156 |
+
provider_id="litellm",
|
| 157 |
+
context_limit=128000,
|
| 158 |
+
output_limit=8192,
|
| 159 |
+
supports_tools=True,
|
| 160 |
+
supports_streaming=True,
|
| 161 |
+
cost_input=0.0,
|
| 162 |
+
cost_output=0.0,
|
| 163 |
+
),
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
class LiteLLMProvider(BaseProvider):
|
| 168 |
+
|
| 169 |
+
def __init__(self):
|
| 170 |
+
self._litellm = None
|
| 171 |
+
self._models = dict(DEFAULT_MODELS)
|
| 172 |
+
|
| 173 |
+
@property
|
| 174 |
+
def id(self) -> str:
|
| 175 |
+
return "litellm"
|
| 176 |
+
|
| 177 |
+
@property
|
| 178 |
+
def name(self) -> str:
|
| 179 |
+
return "LiteLLM (Multi-Provider)"
|
| 180 |
+
|
| 181 |
+
@property
|
| 182 |
+
def models(self) -> Dict[str, ModelInfo]:
|
| 183 |
+
return self._models
|
| 184 |
+
|
| 185 |
+
def add_model(self, model: ModelInfo) -> None:
|
| 186 |
+
self._models[model.id] = model
|
| 187 |
+
|
| 188 |
+
def _get_litellm(self):
|
| 189 |
+
if self._litellm is None:
|
| 190 |
+
try:
|
| 191 |
+
import litellm
|
| 192 |
+
litellm.drop_params = True
|
| 193 |
+
self._litellm = litellm
|
| 194 |
+
except ImportError:
|
| 195 |
+
raise ImportError("litellm package is required. Install with: pip install litellm")
|
| 196 |
+
return self._litellm
|
| 197 |
+
|
| 198 |
+
async def stream(
|
| 199 |
+
self,
|
| 200 |
+
model_id: str,
|
| 201 |
+
messages: List[Message],
|
| 202 |
+
tools: Optional[List[Dict[str, Any]]] = None,
|
| 203 |
+
system: Optional[str] = None,
|
| 204 |
+
temperature: Optional[float] = None,
|
| 205 |
+
max_tokens: Optional[int] = None,
|
| 206 |
+
) -> AsyncGenerator[StreamChunk, None]:
|
| 207 |
+
litellm = self._get_litellm()
|
| 208 |
+
|
| 209 |
+
litellm_messages = []
|
| 210 |
+
|
| 211 |
+
if system:
|
| 212 |
+
litellm_messages.append({"role": "system", "content": system})
|
| 213 |
+
|
| 214 |
+
for msg in messages:
|
| 215 |
+
content = msg.content
|
| 216 |
+
if isinstance(content, str):
|
| 217 |
+
litellm_messages.append({"role": msg.role, "content": content})
|
| 218 |
+
else:
|
| 219 |
+
litellm_messages.append({
|
| 220 |
+
"role": msg.role,
|
| 221 |
+
"content": [{"type": c.type, "text": c.text} for c in content if c.text]
|
| 222 |
+
})
|
| 223 |
+
|
| 224 |
+
# Z.ai 모델 처리: OpenAI-compatible API 사용
|
| 225 |
+
actual_model = model_id
|
| 226 |
+
if model_id.startswith("zai/"):
|
| 227 |
+
# zai/glm-4.7-flash -> openai/glm-4.7-flash with custom api_base
|
| 228 |
+
actual_model = "openai/" + model_id[4:]
|
| 229 |
+
|
| 230 |
+
kwargs: Dict[str, Any] = {
|
| 231 |
+
"model": actual_model,
|
| 232 |
+
"messages": litellm_messages,
|
| 233 |
+
"stream": True,
|
| 234 |
+
}
|
| 235 |
+
|
| 236 |
+
# Z.ai 전용 설정
|
| 237 |
+
if model_id.startswith("zai/"):
|
| 238 |
+
kwargs["api_base"] = os.environ.get("ZAI_API_BASE", "https://api.z.ai/api/paas/v4")
|
| 239 |
+
kwargs["api_key"] = os.environ.get("ZAI_API_KEY")
|
| 240 |
+
|
| 241 |
+
if temperature is not None:
|
| 242 |
+
kwargs["temperature"] = temperature
|
| 243 |
+
|
| 244 |
+
if max_tokens is not None:
|
| 245 |
+
kwargs["max_tokens"] = max_tokens
|
| 246 |
+
else:
|
| 247 |
+
kwargs["max_tokens"] = 8192
|
| 248 |
+
|
| 249 |
+
if tools:
|
| 250 |
+
kwargs["tools"] = [
|
| 251 |
+
{
|
| 252 |
+
"type": "function",
|
| 253 |
+
"function": {
|
| 254 |
+
"name": t["name"],
|
| 255 |
+
"description": t.get("description", ""),
|
| 256 |
+
"parameters": t.get("parameters", t.get("input_schema", {}))
|
| 257 |
+
}
|
| 258 |
+
}
|
| 259 |
+
for t in tools
|
| 260 |
+
]
|
| 261 |
+
|
| 262 |
+
current_tool_calls: Dict[int, Dict[str, Any]] = {}
|
| 263 |
+
|
| 264 |
+
try:
|
| 265 |
+
response = await litellm.acompletion(**kwargs)
|
| 266 |
+
|
| 267 |
+
async for chunk in response:
|
| 268 |
+
if hasattr(chunk, 'choices') and chunk.choices:
|
| 269 |
+
choice = chunk.choices[0]
|
| 270 |
+
delta = getattr(choice, 'delta', None)
|
| 271 |
+
|
| 272 |
+
if delta:
|
| 273 |
+
if hasattr(delta, 'content') and delta.content:
|
| 274 |
+
yield StreamChunk(type="text", text=delta.content)
|
| 275 |
+
|
| 276 |
+
if hasattr(delta, 'tool_calls') and delta.tool_calls:
|
| 277 |
+
for tc in delta.tool_calls:
|
| 278 |
+
idx = tc.index if hasattr(tc, 'index') else 0
|
| 279 |
+
|
| 280 |
+
if idx not in current_tool_calls:
|
| 281 |
+
current_tool_calls[idx] = {
|
| 282 |
+
"id": tc.id if hasattr(tc, 'id') and tc.id else f"call_{idx}",
|
| 283 |
+
"name": "",
|
| 284 |
+
"arguments_json": ""
|
| 285 |
+
}
|
| 286 |
+
|
| 287 |
+
if hasattr(tc, 'function'):
|
| 288 |
+
if hasattr(tc.function, 'name') and tc.function.name:
|
| 289 |
+
current_tool_calls[idx]["name"] = tc.function.name
|
| 290 |
+
if hasattr(tc.function, 'arguments') and tc.function.arguments:
|
| 291 |
+
current_tool_calls[idx]["arguments_json"] += tc.function.arguments
|
| 292 |
+
|
| 293 |
+
finish_reason = getattr(choice, 'finish_reason', None)
|
| 294 |
+
if finish_reason:
|
| 295 |
+
for idx, tc_data in current_tool_calls.items():
|
| 296 |
+
if tc_data["name"]:
|
| 297 |
+
try:
|
| 298 |
+
args = json.loads(tc_data["arguments_json"]) if tc_data["arguments_json"] else {}
|
| 299 |
+
except json.JSONDecodeError:
|
| 300 |
+
args = {}
|
| 301 |
+
|
| 302 |
+
yield StreamChunk(
|
| 303 |
+
type="tool_call",
|
| 304 |
+
tool_call=ToolCall(
|
| 305 |
+
id=tc_data["id"],
|
| 306 |
+
name=tc_data["name"],
|
| 307 |
+
arguments=args
|
| 308 |
+
)
|
| 309 |
+
)
|
| 310 |
+
|
| 311 |
+
usage = None
|
| 312 |
+
if hasattr(chunk, 'usage') and chunk.usage:
|
| 313 |
+
usage = {
|
| 314 |
+
"input_tokens": getattr(chunk.usage, 'prompt_tokens', 0),
|
| 315 |
+
"output_tokens": getattr(chunk.usage, 'completion_tokens', 0),
|
| 316 |
+
}
|
| 317 |
+
|
| 318 |
+
stop_reason = self._map_stop_reason(finish_reason)
|
| 319 |
+
yield StreamChunk(type="done", usage=usage, stop_reason=stop_reason)
|
| 320 |
+
|
| 321 |
+
except Exception as e:
|
| 322 |
+
yield StreamChunk(type="error", error=str(e))
|
| 323 |
+
|
| 324 |
+
async def complete(
|
| 325 |
+
self,
|
| 326 |
+
model_id: str,
|
| 327 |
+
prompt: str,
|
| 328 |
+
max_tokens: int = 100,
|
| 329 |
+
) -> str:
|
| 330 |
+
"""단일 완료 요청 (스트리밍 없음)"""
|
| 331 |
+
litellm = self._get_litellm()
|
| 332 |
+
|
| 333 |
+
actual_model = model_id
|
| 334 |
+
kwargs: Dict[str, Any] = {
|
| 335 |
+
"model": actual_model,
|
| 336 |
+
"messages": [{"role": "user", "content": prompt}],
|
| 337 |
+
"max_tokens": max_tokens,
|
| 338 |
+
}
|
| 339 |
+
|
| 340 |
+
# Z.ai 모델 처리
|
| 341 |
+
if model_id.startswith("zai/"):
|
| 342 |
+
actual_model = "openai/" + model_id[4:]
|
| 343 |
+
kwargs["model"] = actual_model
|
| 344 |
+
kwargs["api_base"] = os.environ.get("ZAI_API_BASE", "https://api.z.ai/api/paas/v4")
|
| 345 |
+
kwargs["api_key"] = os.environ.get("ZAI_API_KEY")
|
| 346 |
+
|
| 347 |
+
response = await litellm.acompletion(**kwargs)
|
| 348 |
+
return response.choices[0].message.content or ""
|
| 349 |
+
|
| 350 |
+
def _map_stop_reason(self, finish_reason: Optional[str]) -> str:
|
| 351 |
+
if not finish_reason:
|
| 352 |
+
return "end_turn"
|
| 353 |
+
|
| 354 |
+
mapping = {
|
| 355 |
+
"stop": "end_turn",
|
| 356 |
+
"end_turn": "end_turn",
|
| 357 |
+
"tool_calls": "tool_calls",
|
| 358 |
+
"function_call": "tool_calls",
|
| 359 |
+
"length": "max_tokens",
|
| 360 |
+
"max_tokens": "max_tokens",
|
| 361 |
+
"content_filter": "content_filter",
|
| 362 |
+
}
|
| 363 |
+
return mapping.get(finish_reason, "end_turn")
|
src/opencode_api/provider/openai.py
ADDED
|
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, Any, List, Optional, AsyncGenerator
|
| 2 |
+
import os
|
| 3 |
+
import json
|
| 4 |
+
|
| 5 |
+
from .provider import BaseProvider, ModelInfo, Message, StreamChunk, ToolCall
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class OpenAIProvider(BaseProvider):
|
| 9 |
+
|
| 10 |
+
def __init__(self, api_key: Optional[str] = None):
|
| 11 |
+
self._api_key = api_key or os.environ.get("OPENAI_API_KEY")
|
| 12 |
+
self._client = None
|
| 13 |
+
|
| 14 |
+
@property
|
| 15 |
+
def id(self) -> str:
|
| 16 |
+
return "openai"
|
| 17 |
+
|
| 18 |
+
@property
|
| 19 |
+
def name(self) -> str:
|
| 20 |
+
return "OpenAI"
|
| 21 |
+
|
| 22 |
+
@property
|
| 23 |
+
def models(self) -> Dict[str, ModelInfo]:
|
| 24 |
+
return {
|
| 25 |
+
"gpt-4o": ModelInfo(
|
| 26 |
+
id="gpt-4o",
|
| 27 |
+
name="GPT-4o",
|
| 28 |
+
provider_id="openai",
|
| 29 |
+
context_limit=128000,
|
| 30 |
+
output_limit=16384,
|
| 31 |
+
supports_tools=True,
|
| 32 |
+
supports_streaming=True,
|
| 33 |
+
cost_input=2.5,
|
| 34 |
+
cost_output=10.0,
|
| 35 |
+
),
|
| 36 |
+
"gpt-4o-mini": ModelInfo(
|
| 37 |
+
id="gpt-4o-mini",
|
| 38 |
+
name="GPT-4o Mini",
|
| 39 |
+
provider_id="openai",
|
| 40 |
+
context_limit=128000,
|
| 41 |
+
output_limit=16384,
|
| 42 |
+
supports_tools=True,
|
| 43 |
+
supports_streaming=True,
|
| 44 |
+
cost_input=0.15,
|
| 45 |
+
cost_output=0.6,
|
| 46 |
+
),
|
| 47 |
+
"o1": ModelInfo(
|
| 48 |
+
id="o1",
|
| 49 |
+
name="o1",
|
| 50 |
+
provider_id="openai",
|
| 51 |
+
context_limit=200000,
|
| 52 |
+
output_limit=100000,
|
| 53 |
+
supports_tools=True,
|
| 54 |
+
supports_streaming=True,
|
| 55 |
+
cost_input=15.0,
|
| 56 |
+
cost_output=60.0,
|
| 57 |
+
),
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
def _get_client(self):
|
| 61 |
+
if self._client is None:
|
| 62 |
+
try:
|
| 63 |
+
from openai import AsyncOpenAI
|
| 64 |
+
self._client = AsyncOpenAI(api_key=self._api_key)
|
| 65 |
+
except ImportError:
|
| 66 |
+
raise ImportError("openai package is required. Install with: pip install openai")
|
| 67 |
+
return self._client
|
| 68 |
+
|
| 69 |
+
async def stream(
|
| 70 |
+
self,
|
| 71 |
+
model_id: str,
|
| 72 |
+
messages: List[Message],
|
| 73 |
+
tools: Optional[List[Dict[str, Any]]] = None,
|
| 74 |
+
system: Optional[str] = None,
|
| 75 |
+
temperature: Optional[float] = None,
|
| 76 |
+
max_tokens: Optional[int] = None,
|
| 77 |
+
) -> AsyncGenerator[StreamChunk, None]:
|
| 78 |
+
client = self._get_client()
|
| 79 |
+
|
| 80 |
+
openai_messages = []
|
| 81 |
+
|
| 82 |
+
if system:
|
| 83 |
+
openai_messages.append({"role": "system", "content": system})
|
| 84 |
+
|
| 85 |
+
for msg in messages:
|
| 86 |
+
content = msg.content
|
| 87 |
+
if isinstance(content, str):
|
| 88 |
+
openai_messages.append({"role": msg.role, "content": content})
|
| 89 |
+
else:
|
| 90 |
+
openai_messages.append({
|
| 91 |
+
"role": msg.role,
|
| 92 |
+
"content": [{"type": c.type, "text": c.text} for c in content if c.text]
|
| 93 |
+
})
|
| 94 |
+
|
| 95 |
+
kwargs: Dict[str, Any] = {
|
| 96 |
+
"model": model_id,
|
| 97 |
+
"messages": openai_messages,
|
| 98 |
+
"stream": True,
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
if max_tokens:
|
| 102 |
+
kwargs["max_tokens"] = max_tokens
|
| 103 |
+
|
| 104 |
+
if temperature is not None:
|
| 105 |
+
kwargs["temperature"] = temperature
|
| 106 |
+
|
| 107 |
+
if tools:
|
| 108 |
+
kwargs["tools"] = [
|
| 109 |
+
{
|
| 110 |
+
"type": "function",
|
| 111 |
+
"function": {
|
| 112 |
+
"name": t["name"],
|
| 113 |
+
"description": t.get("description", ""),
|
| 114 |
+
"parameters": t.get("parameters", t.get("input_schema", {}))
|
| 115 |
+
}
|
| 116 |
+
}
|
| 117 |
+
for t in tools
|
| 118 |
+
]
|
| 119 |
+
|
| 120 |
+
tool_calls: Dict[int, Dict[str, Any]] = {}
|
| 121 |
+
usage_data = None
|
| 122 |
+
finish_reason = None
|
| 123 |
+
|
| 124 |
+
async for chunk in await client.chat.completions.create(**kwargs):
|
| 125 |
+
if chunk.choices and chunk.choices[0].delta:
|
| 126 |
+
delta = chunk.choices[0].delta
|
| 127 |
+
|
| 128 |
+
if delta.content:
|
| 129 |
+
yield StreamChunk(type="text", text=delta.content)
|
| 130 |
+
|
| 131 |
+
if delta.tool_calls:
|
| 132 |
+
for tc in delta.tool_calls:
|
| 133 |
+
idx = tc.index
|
| 134 |
+
if idx not in tool_calls:
|
| 135 |
+
tool_calls[idx] = {
|
| 136 |
+
"id": tc.id or "",
|
| 137 |
+
"name": tc.function.name if tc.function else "",
|
| 138 |
+
"arguments": ""
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
if tc.id:
|
| 142 |
+
tool_calls[idx]["id"] = tc.id
|
| 143 |
+
if tc.function:
|
| 144 |
+
if tc.function.name:
|
| 145 |
+
tool_calls[idx]["name"] = tc.function.name
|
| 146 |
+
if tc.function.arguments:
|
| 147 |
+
tool_calls[idx]["arguments"] += tc.function.arguments
|
| 148 |
+
|
| 149 |
+
if chunk.choices and chunk.choices[0].finish_reason:
|
| 150 |
+
finish_reason = chunk.choices[0].finish_reason
|
| 151 |
+
|
| 152 |
+
if chunk.usage:
|
| 153 |
+
usage_data = {
|
| 154 |
+
"input_tokens": chunk.usage.prompt_tokens,
|
| 155 |
+
"output_tokens": chunk.usage.completion_tokens,
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
for tc_data in tool_calls.values():
|
| 159 |
+
try:
|
| 160 |
+
args = json.loads(tc_data["arguments"]) if tc_data["arguments"] else {}
|
| 161 |
+
except json.JSONDecodeError:
|
| 162 |
+
args = {}
|
| 163 |
+
yield StreamChunk(
|
| 164 |
+
type="tool_call",
|
| 165 |
+
tool_call=ToolCall(
|
| 166 |
+
id=tc_data["id"],
|
| 167 |
+
name=tc_data["name"],
|
| 168 |
+
arguments=args
|
| 169 |
+
)
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
stop_reason = self._map_stop_reason(finish_reason)
|
| 173 |
+
yield StreamChunk(type="done", usage=usage_data, stop_reason=stop_reason)
|
| 174 |
+
|
| 175 |
+
def _map_stop_reason(self, openai_finish_reason: Optional[str]) -> str:
|
| 176 |
+
mapping = {
|
| 177 |
+
"stop": "end_turn",
|
| 178 |
+
"tool_calls": "tool_calls",
|
| 179 |
+
"length": "max_tokens",
|
| 180 |
+
"content_filter": "end_turn",
|
| 181 |
+
}
|
| 182 |
+
return mapping.get(openai_finish_reason or "", "end_turn")
|
src/opencode_api/provider/provider.py
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, Any, List, Optional, AsyncIterator, AsyncGenerator, Protocol, runtime_checkable
|
| 2 |
+
from pydantic import BaseModel, Field
|
| 3 |
+
from abc import ABC, abstractmethod
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class ModelInfo(BaseModel):
|
| 7 |
+
id: str
|
| 8 |
+
name: str
|
| 9 |
+
provider_id: str
|
| 10 |
+
context_limit: int = 128000
|
| 11 |
+
output_limit: int = 8192
|
| 12 |
+
supports_tools: bool = True
|
| 13 |
+
supports_streaming: bool = True
|
| 14 |
+
cost_input: float = 0.0 # per 1M tokens
|
| 15 |
+
cost_output: float = 0.0 # per 1M tokens
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class ProviderInfo(BaseModel):
|
| 19 |
+
id: str
|
| 20 |
+
name: str
|
| 21 |
+
models: Dict[str, ModelInfo] = Field(default_factory=dict)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class MessageContent(BaseModel):
|
| 25 |
+
type: str = "text"
|
| 26 |
+
text: Optional[str] = None
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class Message(BaseModel):
|
| 30 |
+
role: str # "user", "assistant", "system"
|
| 31 |
+
content: str | List[MessageContent]
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class ToolCall(BaseModel):
|
| 35 |
+
id: str
|
| 36 |
+
name: str
|
| 37 |
+
arguments: Dict[str, Any]
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class ToolResult(BaseModel):
|
| 41 |
+
tool_call_id: str
|
| 42 |
+
output: str
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class StreamChunk(BaseModel):
|
| 46 |
+
type: str # "text", "reasoning", "tool_call", "tool_result", "done", "error"
|
| 47 |
+
text: Optional[str] = None
|
| 48 |
+
tool_call: Optional[ToolCall] = None
|
| 49 |
+
error: Optional[str] = None
|
| 50 |
+
usage: Optional[Dict[str, int]] = None
|
| 51 |
+
stop_reason: Optional[str] = None # "end_turn", "tool_calls", "max_tokens", etc.
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
@runtime_checkable
|
| 55 |
+
class Provider(Protocol):
|
| 56 |
+
|
| 57 |
+
@property
|
| 58 |
+
def id(self) -> str: ...
|
| 59 |
+
|
| 60 |
+
@property
|
| 61 |
+
def name(self) -> str: ...
|
| 62 |
+
|
| 63 |
+
@property
|
| 64 |
+
def models(self) -> Dict[str, ModelInfo]: ...
|
| 65 |
+
|
| 66 |
+
def stream(
|
| 67 |
+
self,
|
| 68 |
+
model_id: str,
|
| 69 |
+
messages: List[Message],
|
| 70 |
+
tools: Optional[List[Dict[str, Any]]] = None,
|
| 71 |
+
system: Optional[str] = None,
|
| 72 |
+
temperature: Optional[float] = None,
|
| 73 |
+
max_tokens: Optional[int] = None,
|
| 74 |
+
) -> AsyncGenerator[StreamChunk, None]: ...
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
class BaseProvider(ABC):
|
| 78 |
+
|
| 79 |
+
@property
|
| 80 |
+
@abstractmethod
|
| 81 |
+
def id(self) -> str:
|
| 82 |
+
pass
|
| 83 |
+
|
| 84 |
+
@property
|
| 85 |
+
@abstractmethod
|
| 86 |
+
def name(self) -> str:
|
| 87 |
+
pass
|
| 88 |
+
|
| 89 |
+
@property
|
| 90 |
+
@abstractmethod
|
| 91 |
+
def models(self) -> Dict[str, ModelInfo]:
|
| 92 |
+
pass
|
| 93 |
+
|
| 94 |
+
@abstractmethod
|
| 95 |
+
def stream(
|
| 96 |
+
self,
|
| 97 |
+
model_id: str,
|
| 98 |
+
messages: List[Message],
|
| 99 |
+
tools: Optional[List[Dict[str, Any]]] = None,
|
| 100 |
+
system: Optional[str] = None,
|
| 101 |
+
temperature: Optional[float] = None,
|
| 102 |
+
max_tokens: Optional[int] = None,
|
| 103 |
+
) -> AsyncGenerator[StreamChunk, None]:
|
| 104 |
+
pass
|
| 105 |
+
|
| 106 |
+
def get_info(self) -> ProviderInfo:
|
| 107 |
+
return ProviderInfo(
|
| 108 |
+
id=self.id,
|
| 109 |
+
name=self.name,
|
| 110 |
+
models=self.models
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
_providers: Dict[str, BaseProvider] = {}
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def register_provider(provider: BaseProvider) -> None:
|
| 118 |
+
_providers[provider.id] = provider
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def get_provider(provider_id: str) -> Optional[BaseProvider]:
|
| 122 |
+
return _providers.get(provider_id)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def list_providers() -> List[ProviderInfo]:
|
| 126 |
+
return [p.get_info() for p in _providers.values()]
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def get_model(provider_id: str, model_id: str) -> Optional[ModelInfo]:
|
| 130 |
+
provider = get_provider(provider_id)
|
| 131 |
+
if provider:
|
| 132 |
+
return provider.models.get(model_id)
|
| 133 |
+
return None
|
src/opencode_api/routes/__init__.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .session import router as session_router
|
| 2 |
+
from .provider import router as provider_router
|
| 3 |
+
from .event import router as event_router
|
| 4 |
+
from .question import router as question_router
|
| 5 |
+
from .agent import router as agent_router
|
| 6 |
+
|
| 7 |
+
__all__ = ["session_router", "provider_router", "event_router", "question_router", "agent_router"]
|
src/opencode_api/routes/agent.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Agent routes - manage agent configurations.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from fastapi import APIRouter, HTTPException
|
| 6 |
+
from typing import Optional, List
|
| 7 |
+
|
| 8 |
+
from ..agent import (
|
| 9 |
+
AgentInfo,
|
| 10 |
+
get,
|
| 11 |
+
list_agents,
|
| 12 |
+
default_agent,
|
| 13 |
+
register,
|
| 14 |
+
unregister,
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
router = APIRouter(prefix="/agent", tags=["agent"])
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
@router.get("", response_model=List[AgentInfo])
|
| 21 |
+
async def get_agents(
|
| 22 |
+
mode: Optional[str] = None,
|
| 23 |
+
include_hidden: bool = False
|
| 24 |
+
):
|
| 25 |
+
"""List all available agents."""
|
| 26 |
+
return list_agents(mode=mode, include_hidden=include_hidden)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
@router.get("/default", response_model=AgentInfo)
|
| 30 |
+
async def get_default_agent():
|
| 31 |
+
"""Get the default agent configuration."""
|
| 32 |
+
return default_agent()
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
@router.get("/{agent_id}", response_model=AgentInfo)
|
| 36 |
+
async def get_agent(agent_id: str):
|
| 37 |
+
"""Get a specific agent by ID."""
|
| 38 |
+
agent = get(agent_id)
|
| 39 |
+
if not agent:
|
| 40 |
+
raise HTTPException(status_code=404, detail=f"Agent not found: {agent_id}")
|
| 41 |
+
return agent
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
@router.post("", response_model=AgentInfo)
|
| 45 |
+
async def create_agent(agent: AgentInfo):
|
| 46 |
+
"""Register a custom agent."""
|
| 47 |
+
existing = get(agent.id)
|
| 48 |
+
if existing and existing.native:
|
| 49 |
+
raise HTTPException(status_code=400, detail=f"Cannot override native agent: {agent.id}")
|
| 50 |
+
|
| 51 |
+
register(agent)
|
| 52 |
+
return agent
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
@router.delete("/{agent_id}")
|
| 56 |
+
async def delete_agent(agent_id: str):
|
| 57 |
+
"""Unregister a custom agent."""
|
| 58 |
+
agent = get(agent_id)
|
| 59 |
+
if not agent:
|
| 60 |
+
raise HTTPException(status_code=404, detail=f"Agent not found: {agent_id}")
|
| 61 |
+
|
| 62 |
+
if agent.native:
|
| 63 |
+
raise HTTPException(status_code=400, detail=f"Cannot delete native agent: {agent_id}")
|
| 64 |
+
|
| 65 |
+
unregister(agent_id)
|
| 66 |
+
return {"deleted": agent_id}
|
src/opencode_api/routes/event.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import APIRouter
|
| 2 |
+
from fastapi.responses import StreamingResponse
|
| 3 |
+
import asyncio
|
| 4 |
+
import json
|
| 5 |
+
from typing import AsyncIterator
|
| 6 |
+
|
| 7 |
+
from ..core.bus import Bus, EventInstance
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
router = APIRouter(tags=["Events"])
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
@router.get("/event")
|
| 14 |
+
async def subscribe_events():
|
| 15 |
+
async def event_generator() -> AsyncIterator[str]:
|
| 16 |
+
queue: asyncio.Queue[EventInstance] = asyncio.Queue()
|
| 17 |
+
|
| 18 |
+
async def handler(event: EventInstance):
|
| 19 |
+
await queue.put(event)
|
| 20 |
+
|
| 21 |
+
unsubscribe = Bus.subscribe_all(handler)
|
| 22 |
+
|
| 23 |
+
yield f"data: {json.dumps({'type': 'server.connected', 'payload': {}})}\n\n"
|
| 24 |
+
|
| 25 |
+
try:
|
| 26 |
+
while True:
|
| 27 |
+
try:
|
| 28 |
+
event = await asyncio.wait_for(queue.get(), timeout=30.0)
|
| 29 |
+
yield f"data: {json.dumps({'type': event.type, 'payload': event.payload})}\n\n"
|
| 30 |
+
except asyncio.TimeoutError:
|
| 31 |
+
yield f"data: {json.dumps({'type': 'server.heartbeat', 'payload': {}})}\n\n"
|
| 32 |
+
except asyncio.CancelledError:
|
| 33 |
+
pass
|
| 34 |
+
finally:
|
| 35 |
+
unsubscribe()
|
| 36 |
+
|
| 37 |
+
return StreamingResponse(
|
| 38 |
+
event_generator(),
|
| 39 |
+
media_type="text/event-stream",
|
| 40 |
+
headers={
|
| 41 |
+
"Cache-Control": "no-cache",
|
| 42 |
+
"Connection": "keep-alive",
|
| 43 |
+
"X-Accel-Buffering": "no",
|
| 44 |
+
}
|
| 45 |
+
)
|
src/opencode_api/routes/provider.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Dict
|
| 2 |
+
from fastapi import APIRouter, HTTPException
|
| 3 |
+
import os
|
| 4 |
+
from dotenv import load_dotenv
|
| 5 |
+
|
| 6 |
+
# .env 파일에서 환경변수 로드
|
| 7 |
+
load_dotenv()
|
| 8 |
+
|
| 9 |
+
from ..provider import list_providers, get_provider
|
| 10 |
+
from ..provider.provider import ProviderInfo, ModelInfo
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
router = APIRouter(prefix="/provider", tags=["Provider"])
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
# Provider별 필요 환경변수 매핑
|
| 17 |
+
PROVIDER_API_KEYS = {
|
| 18 |
+
"anthropic": "ANTHROPIC_API_KEY",
|
| 19 |
+
"openai": "OPENAI_API_KEY",
|
| 20 |
+
"gemini": ["GOOGLE_API_KEY", "GEMINI_API_KEY"],
|
| 21 |
+
"litellm": None, # LiteLLM은 개별 모델별로 체크
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
# LiteLLM 모델별 필요 환경변수
|
| 25 |
+
LITELLM_MODEL_KEYS = {
|
| 26 |
+
"claude-": "ANTHROPIC_API_KEY",
|
| 27 |
+
"gpt-": "OPENAI_API_KEY",
|
| 28 |
+
"o1": "OPENAI_API_KEY",
|
| 29 |
+
"gemini/": ["GOOGLE_API_KEY", "GEMINI_API_KEY"],
|
| 30 |
+
"groq/": "GROQ_API_KEY",
|
| 31 |
+
"deepseek/": "DEEPSEEK_API_KEY",
|
| 32 |
+
"openrouter/": "OPENROUTER_API_KEY",
|
| 33 |
+
"zai/": "ZAI_API_KEY",
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def has_api_key(provider_id: str) -> bool:
|
| 38 |
+
"""Check if provider has required API key configured"""
|
| 39 |
+
keys = PROVIDER_API_KEYS.get(provider_id)
|
| 40 |
+
if keys is None:
|
| 41 |
+
return True # No key required (like litellm container)
|
| 42 |
+
if isinstance(keys, list):
|
| 43 |
+
return any(os.environ.get(k) for k in keys)
|
| 44 |
+
return bool(os.environ.get(keys))
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def filter_litellm_models(models: Dict[str, ModelInfo]) -> Dict[str, ModelInfo]:
|
| 48 |
+
"""Filter LiteLLM models based on available API keys"""
|
| 49 |
+
filtered = {}
|
| 50 |
+
for model_id, model_info in models.items():
|
| 51 |
+
for prefix, env_key in LITELLM_MODEL_KEYS.items():
|
| 52 |
+
if model_id.startswith(prefix):
|
| 53 |
+
if isinstance(env_key, list):
|
| 54 |
+
if any(os.environ.get(k) for k in env_key):
|
| 55 |
+
filtered[model_id] = model_info
|
| 56 |
+
elif os.environ.get(env_key):
|
| 57 |
+
filtered[model_id] = model_info
|
| 58 |
+
break
|
| 59 |
+
return filtered
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
@router.get("/", response_model=List[ProviderInfo])
|
| 63 |
+
async def get_providers():
|
| 64 |
+
"""Get available providers (filtered by API key availability)"""
|
| 65 |
+
all_providers = list_providers()
|
| 66 |
+
available = []
|
| 67 |
+
|
| 68 |
+
for provider in all_providers:
|
| 69 |
+
if provider.id == "litellm":
|
| 70 |
+
# LiteLLM: 개별 모델별 필터링
|
| 71 |
+
filtered_models = filter_litellm_models(provider.models)
|
| 72 |
+
if filtered_models:
|
| 73 |
+
provider.models = filtered_models
|
| 74 |
+
available.append(provider)
|
| 75 |
+
elif has_api_key(provider.id):
|
| 76 |
+
available.append(provider)
|
| 77 |
+
|
| 78 |
+
return available
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
@router.get("/{provider_id}", response_model=ProviderInfo)
|
| 82 |
+
async def get_provider_info(provider_id: str):
|
| 83 |
+
provider = get_provider(provider_id)
|
| 84 |
+
if not provider:
|
| 85 |
+
raise HTTPException(status_code=404, detail=f"Provider not found: {provider_id}")
|
| 86 |
+
return provider.get_info()
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
@router.get("/{provider_id}/model", response_model=List[ModelInfo])
|
| 90 |
+
async def get_provider_models(provider_id: str):
|
| 91 |
+
provider = get_provider(provider_id)
|
| 92 |
+
if not provider:
|
| 93 |
+
raise HTTPException(status_code=404, detail=f"Provider not found: {provider_id}")
|
| 94 |
+
return list(provider.models.values())
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
@router.get("/{provider_id}/model/{model_id}", response_model=ModelInfo)
|
| 98 |
+
async def get_model_info(provider_id: str, model_id: str):
|
| 99 |
+
provider = get_provider(provider_id)
|
| 100 |
+
if not provider:
|
| 101 |
+
raise HTTPException(status_code=404, detail=f"Provider not found: {provider_id}")
|
| 102 |
+
|
| 103 |
+
model = provider.models.get(model_id)
|
| 104 |
+
if not model:
|
| 105 |
+
raise HTTPException(status_code=404, detail=f"Model not found: {model_id}")
|
| 106 |
+
|
| 107 |
+
return model
|
src/opencode_api/routes/question.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Question API routes."""
|
| 2 |
+
from typing import List
|
| 3 |
+
from fastapi import APIRouter, HTTPException
|
| 4 |
+
from pydantic import BaseModel, Field
|
| 5 |
+
|
| 6 |
+
from ..tool import (
|
| 7 |
+
reply_to_question,
|
| 8 |
+
reject_question,
|
| 9 |
+
get_pending_questions,
|
| 10 |
+
QuestionReply,
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
router = APIRouter(prefix="/question", tags=["question"])
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class QuestionAnswerRequest(BaseModel):
|
| 18 |
+
"""Request to answer a question."""
|
| 19 |
+
answers: List[List[str]] = Field(..., description="Answers in order (each is array of selected labels)")
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@router.get("")
|
| 23 |
+
@router.get("/")
|
| 24 |
+
async def list_pending_questions(session_id: str = None):
|
| 25 |
+
"""List all pending questions."""
|
| 26 |
+
pending = get_pending_questions(session_id)
|
| 27 |
+
return {"pending": pending}
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
@router.post("/{request_id}/reply")
|
| 31 |
+
async def answer_question(request_id: str, request: QuestionAnswerRequest):
|
| 32 |
+
"""Submit answers to a pending question."""
|
| 33 |
+
success = await reply_to_question(request_id, request.answers)
|
| 34 |
+
|
| 35 |
+
if not success:
|
| 36 |
+
raise HTTPException(
|
| 37 |
+
status_code=404,
|
| 38 |
+
detail=f"Question request '{request_id}' not found or already answered"
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
return {"status": "answered", "request_id": request_id}
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
@router.post("/{request_id}/reject")
|
| 45 |
+
async def dismiss_question(request_id: str):
|
| 46 |
+
"""Dismiss/reject a pending question without answering."""
|
| 47 |
+
success = await reject_question(request_id)
|
| 48 |
+
|
| 49 |
+
if not success:
|
| 50 |
+
raise HTTPException(
|
| 51 |
+
status_code=404,
|
| 52 |
+
detail=f"Question request '{request_id}' not found or already handled"
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
return {"status": "rejected", "request_id": request_id}
|
src/opencode_api/routes/session.py
ADDED
|
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, List
|
| 2 |
+
from fastapi import APIRouter, HTTPException, Query, Depends
|
| 3 |
+
from fastapi.responses import StreamingResponse
|
| 4 |
+
from pydantic import BaseModel
|
| 5 |
+
import json
|
| 6 |
+
|
| 7 |
+
from ..session import Session, SessionInfo, SessionCreate, Message, SessionPrompt
|
| 8 |
+
from ..session.prompt import PromptInput
|
| 9 |
+
from ..core.storage import NotFoundError
|
| 10 |
+
from ..core.auth import AuthUser, optional_auth, require_auth
|
| 11 |
+
from ..core.quota import check_quota, increment_usage
|
| 12 |
+
from ..core.supabase import is_enabled as supabase_enabled
|
| 13 |
+
from ..provider import get_provider
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
router = APIRouter(prefix="/session", tags=["Session"])
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class MessageRequest(BaseModel):
|
| 20 |
+
content: str
|
| 21 |
+
provider_id: Optional[str] = None
|
| 22 |
+
model_id: Optional[str] = None
|
| 23 |
+
system: Optional[str] = None
|
| 24 |
+
temperature: Optional[float] = None
|
| 25 |
+
max_tokens: Optional[int] = None
|
| 26 |
+
tools_enabled: bool = True
|
| 27 |
+
auto_continue: Optional[bool] = None
|
| 28 |
+
max_steps: Optional[int] = None
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class SessionUpdate(BaseModel):
|
| 32 |
+
title: Optional[str] = None
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class GenerateTitleRequest(BaseModel):
|
| 36 |
+
message: str
|
| 37 |
+
model_id: Optional[str] = None
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
@router.get("/", response_model=List[SessionInfo])
|
| 41 |
+
async def list_sessions(
|
| 42 |
+
limit: Optional[int] = Query(None, description="Maximum number of sessions to return"),
|
| 43 |
+
user: Optional[AuthUser] = Depends(optional_auth)
|
| 44 |
+
):
|
| 45 |
+
user_id = user.id if user else None
|
| 46 |
+
return await Session.list(limit, user_id)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
@router.post("/", response_model=SessionInfo)
|
| 50 |
+
async def create_session(
|
| 51 |
+
data: Optional[SessionCreate] = None,
|
| 52 |
+
user: Optional[AuthUser] = Depends(optional_auth)
|
| 53 |
+
):
|
| 54 |
+
user_id = user.id if user else None
|
| 55 |
+
return await Session.create(data, user_id)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
@router.get("/{session_id}", response_model=SessionInfo)
|
| 59 |
+
async def get_session(
|
| 60 |
+
session_id: str,
|
| 61 |
+
user: Optional[AuthUser] = Depends(optional_auth)
|
| 62 |
+
):
|
| 63 |
+
try:
|
| 64 |
+
user_id = user.id if user else None
|
| 65 |
+
return await Session.get(session_id, user_id)
|
| 66 |
+
except NotFoundError:
|
| 67 |
+
raise HTTPException(status_code=404, detail=f"Session not found: {session_id}")
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
@router.patch("/{session_id}", response_model=SessionInfo)
|
| 71 |
+
async def update_session(
|
| 72 |
+
session_id: str,
|
| 73 |
+
updates: SessionUpdate,
|
| 74 |
+
user: Optional[AuthUser] = Depends(optional_auth)
|
| 75 |
+
):
|
| 76 |
+
try:
|
| 77 |
+
user_id = user.id if user else None
|
| 78 |
+
update_dict = {k: v for k, v in updates.model_dump().items() if v is not None}
|
| 79 |
+
return await Session.update(session_id, update_dict, user_id)
|
| 80 |
+
except NotFoundError:
|
| 81 |
+
raise HTTPException(status_code=404, detail=f"Session not found: {session_id}")
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
@router.delete("/{session_id}")
|
| 85 |
+
async def delete_session(
|
| 86 |
+
session_id: str,
|
| 87 |
+
user: Optional[AuthUser] = Depends(optional_auth)
|
| 88 |
+
):
|
| 89 |
+
try:
|
| 90 |
+
user_id = user.id if user else None
|
| 91 |
+
await Session.delete(session_id, user_id)
|
| 92 |
+
return {"success": True}
|
| 93 |
+
except NotFoundError:
|
| 94 |
+
raise HTTPException(status_code=404, detail=f"Session not found: {session_id}")
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
@router.get("/{session_id}/message")
|
| 98 |
+
async def list_messages(
|
| 99 |
+
session_id: str,
|
| 100 |
+
limit: Optional[int] = Query(None, description="Maximum number of messages to return"),
|
| 101 |
+
user: Optional[AuthUser] = Depends(optional_auth)
|
| 102 |
+
):
|
| 103 |
+
try:
|
| 104 |
+
user_id = user.id if user else None
|
| 105 |
+
await Session.get(session_id, user_id)
|
| 106 |
+
return await Message.list(session_id, limit, user_id)
|
| 107 |
+
except NotFoundError:
|
| 108 |
+
raise HTTPException(status_code=404, detail=f"Session not found: {session_id}")
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
@router.post("/{session_id}/message")
|
| 112 |
+
async def send_message(
|
| 113 |
+
session_id: str,
|
| 114 |
+
request: MessageRequest,
|
| 115 |
+
user: AuthUser = Depends(check_quota) if supabase_enabled() else Depends(optional_auth)
|
| 116 |
+
):
|
| 117 |
+
user_id = user.id if user else None
|
| 118 |
+
|
| 119 |
+
try:
|
| 120 |
+
await Session.get(session_id, user_id)
|
| 121 |
+
except NotFoundError:
|
| 122 |
+
raise HTTPException(status_code=404, detail=f"Session not found: {session_id}")
|
| 123 |
+
|
| 124 |
+
prompt_input = PromptInput(
|
| 125 |
+
content=request.content,
|
| 126 |
+
provider_id=request.provider_id,
|
| 127 |
+
model_id=request.model_id,
|
| 128 |
+
system=request.system,
|
| 129 |
+
temperature=request.temperature,
|
| 130 |
+
max_tokens=request.max_tokens,
|
| 131 |
+
tools_enabled=request.tools_enabled,
|
| 132 |
+
auto_continue=request.auto_continue,
|
| 133 |
+
max_steps=request.max_steps,
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
async def generate():
|
| 137 |
+
total_input = 0
|
| 138 |
+
total_output = 0
|
| 139 |
+
|
| 140 |
+
async for chunk in SessionPrompt.prompt(session_id, prompt_input, user_id):
|
| 141 |
+
if chunk.usage:
|
| 142 |
+
total_input += chunk.usage.get("input_tokens", 0)
|
| 143 |
+
total_output += chunk.usage.get("output_tokens", 0)
|
| 144 |
+
yield f"data: {json.dumps(chunk.model_dump())}\n\n"
|
| 145 |
+
|
| 146 |
+
if user_id and supabase_enabled():
|
| 147 |
+
await increment_usage(user_id, total_input, total_output)
|
| 148 |
+
|
| 149 |
+
yield "data: [DONE]\n\n"
|
| 150 |
+
|
| 151 |
+
return StreamingResponse(
|
| 152 |
+
generate(),
|
| 153 |
+
media_type="text/event-stream",
|
| 154 |
+
headers={
|
| 155 |
+
"Cache-Control": "no-cache",
|
| 156 |
+
"Connection": "keep-alive",
|
| 157 |
+
"X-Accel-Buffering": "no",
|
| 158 |
+
}
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
@router.post("/{session_id}/abort")
|
| 163 |
+
async def abort_session(session_id: str):
|
| 164 |
+
cancelled = SessionPrompt.cancel(session_id)
|
| 165 |
+
return {"cancelled": cancelled}
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
@router.post("/{session_id}/generate-title")
|
| 169 |
+
async def generate_title(
|
| 170 |
+
session_id: str,
|
| 171 |
+
request: GenerateTitleRequest,
|
| 172 |
+
user: Optional[AuthUser] = Depends(optional_auth)
|
| 173 |
+
):
|
| 174 |
+
"""첫 메시지 기반으로 세션 제목 생성"""
|
| 175 |
+
user_id = user.id if user else None
|
| 176 |
+
|
| 177 |
+
# 세션 존재 확인
|
| 178 |
+
try:
|
| 179 |
+
await Session.get(session_id, user_id)
|
| 180 |
+
except NotFoundError:
|
| 181 |
+
raise HTTPException(status_code=404, detail=f"Session not found: {session_id}")
|
| 182 |
+
|
| 183 |
+
# LiteLLM Provider로 제목 생성
|
| 184 |
+
model_id = request.model_id or "gemini/gemini-2.0-flash"
|
| 185 |
+
provider = get_provider("litellm")
|
| 186 |
+
|
| 187 |
+
if not provider:
|
| 188 |
+
raise HTTPException(status_code=503, detail="LiteLLM provider not available")
|
| 189 |
+
|
| 190 |
+
prompt = f"""다음 사용자 메시지를 보고 짧은 제목을 생성해주세요.
|
| 191 |
+
제목은 10자 이내, 따옴표 없이 제목만 출력.
|
| 192 |
+
|
| 193 |
+
사용자 메시지: "{request.message[:200]}"
|
| 194 |
+
|
| 195 |
+
제목:"""
|
| 196 |
+
|
| 197 |
+
try:
|
| 198 |
+
result = await provider.complete(model_id, prompt, max_tokens=50)
|
| 199 |
+
title = result.strip()[:30]
|
| 200 |
+
|
| 201 |
+
# 세션 제목 업데이트
|
| 202 |
+
await Session.update(session_id, {"title": title}, user_id)
|
| 203 |
+
|
| 204 |
+
return {"title": title}
|
| 205 |
+
except Exception as e:
|
| 206 |
+
raise HTTPException(status_code=500, detail=f"Failed to generate title: {str(e)}")
|
src/opencode_api/session/__init__.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .session import Session, SessionInfo, SessionCreate
|
| 2 |
+
from .message import Message, MessageInfo, UserMessage, AssistantMessage, MessagePart
|
| 3 |
+
from .prompt import SessionPrompt
|
| 4 |
+
from .processor import SessionProcessor, DoomLoopDetector, RetryConfig, StepInfo
|
| 5 |
+
|
| 6 |
+
__all__ = [
|
| 7 |
+
"Session", "SessionInfo", "SessionCreate",
|
| 8 |
+
"Message", "MessageInfo", "UserMessage", "AssistantMessage", "MessagePart",
|
| 9 |
+
"SessionPrompt",
|
| 10 |
+
"SessionProcessor", "DoomLoopDetector", "RetryConfig", "StepInfo"
|
| 11 |
+
]
|
src/opencode_api/session/message.py
ADDED
|
@@ -0,0 +1,348 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, List, Dict, Any, Union, Literal
|
| 2 |
+
from pydantic import BaseModel, Field
|
| 3 |
+
from datetime import datetime
|
| 4 |
+
|
| 5 |
+
from ..core.storage import Storage, NotFoundError
|
| 6 |
+
from ..core.bus import Bus, MESSAGE_UPDATED, MESSAGE_REMOVED, PART_UPDATED, MessagePayload, PartPayload
|
| 7 |
+
from ..core.identifier import Identifier
|
| 8 |
+
from ..core.supabase import get_client, is_enabled as supabase_enabled
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class MessagePart(BaseModel):
|
| 12 |
+
"""메시지 파트 모델
|
| 13 |
+
|
| 14 |
+
type 종류:
|
| 15 |
+
- "text": 일반 텍스트 응답
|
| 16 |
+
- "reasoning": Claude의 thinking/extended thinking
|
| 17 |
+
- "tool_call": 도구 호출 (tool_name, tool_args, tool_status)
|
| 18 |
+
- "tool_result": 도구 실행 결과 (tool_output)
|
| 19 |
+
"""
|
| 20 |
+
id: str
|
| 21 |
+
session_id: str
|
| 22 |
+
message_id: str
|
| 23 |
+
type: str # "text", "reasoning", "tool_call", "tool_result"
|
| 24 |
+
content: Optional[str] = None # text, reasoning용
|
| 25 |
+
tool_call_id: Optional[str] = None
|
| 26 |
+
tool_name: Optional[str] = None
|
| 27 |
+
tool_args: Optional[Dict[str, Any]] = None
|
| 28 |
+
tool_output: Optional[str] = None
|
| 29 |
+
tool_status: Optional[str] = None # "pending", "running", "completed", "error"
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class MessageInfo(BaseModel):
|
| 33 |
+
id: str
|
| 34 |
+
session_id: str
|
| 35 |
+
role: Literal["user", "assistant"]
|
| 36 |
+
created_at: datetime
|
| 37 |
+
model: Optional[str] = None
|
| 38 |
+
provider_id: Optional[str] = None
|
| 39 |
+
usage: Optional[Dict[str, int]] = None
|
| 40 |
+
error: Optional[str] = None
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class UserMessage(MessageInfo):
|
| 44 |
+
role: Literal["user"] = "user"
|
| 45 |
+
content: str
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class AssistantMessage(MessageInfo):
|
| 49 |
+
role: Literal["assistant"] = "assistant"
|
| 50 |
+
parts: List[MessagePart] = Field(default_factory=list)
|
| 51 |
+
summary: bool = False
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class Message:
|
| 55 |
+
|
| 56 |
+
@staticmethod
|
| 57 |
+
async def create_user(session_id: str, content: str, user_id: Optional[str] = None) -> UserMessage:
|
| 58 |
+
message_id = Identifier.generate("message")
|
| 59 |
+
now = datetime.utcnow()
|
| 60 |
+
|
| 61 |
+
msg = UserMessage(
|
| 62 |
+
id=message_id,
|
| 63 |
+
session_id=session_id,
|
| 64 |
+
content=content,
|
| 65 |
+
created_at=now,
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
if supabase_enabled() and user_id:
|
| 69 |
+
client = get_client()
|
| 70 |
+
client.table("opencode_messages").insert({
|
| 71 |
+
"id": message_id,
|
| 72 |
+
"session_id": session_id,
|
| 73 |
+
"role": "user",
|
| 74 |
+
"content": content,
|
| 75 |
+
}).execute()
|
| 76 |
+
else:
|
| 77 |
+
await Storage.write(["message", session_id, message_id], msg.model_dump())
|
| 78 |
+
|
| 79 |
+
await Bus.publish(MESSAGE_UPDATED, MessagePayload(session_id=session_id, message_id=message_id))
|
| 80 |
+
return msg
|
| 81 |
+
|
| 82 |
+
@staticmethod
|
| 83 |
+
async def create_assistant(
|
| 84 |
+
session_id: str,
|
| 85 |
+
provider_id: Optional[str] = None,
|
| 86 |
+
model: Optional[str] = None,
|
| 87 |
+
user_id: Optional[str] = None,
|
| 88 |
+
summary: bool = False
|
| 89 |
+
) -> AssistantMessage:
|
| 90 |
+
message_id = Identifier.generate("message")
|
| 91 |
+
now = datetime.utcnow()
|
| 92 |
+
|
| 93 |
+
msg = AssistantMessage(
|
| 94 |
+
id=message_id,
|
| 95 |
+
session_id=session_id,
|
| 96 |
+
created_at=now,
|
| 97 |
+
provider_id=provider_id,
|
| 98 |
+
model=model,
|
| 99 |
+
parts=[],
|
| 100 |
+
summary=summary,
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
if supabase_enabled() and user_id:
|
| 104 |
+
client = get_client()
|
| 105 |
+
client.table("opencode_messages").insert({
|
| 106 |
+
"id": message_id,
|
| 107 |
+
"session_id": session_id,
|
| 108 |
+
"role": "assistant",
|
| 109 |
+
"provider_id": provider_id,
|
| 110 |
+
"model_id": model,
|
| 111 |
+
}).execute()
|
| 112 |
+
else:
|
| 113 |
+
await Storage.write(["message", session_id, message_id], msg.model_dump())
|
| 114 |
+
|
| 115 |
+
await Bus.publish(MESSAGE_UPDATED, MessagePayload(session_id=session_id, message_id=message_id))
|
| 116 |
+
return msg
|
| 117 |
+
|
| 118 |
+
@staticmethod
|
| 119 |
+
async def get(session_id: str, message_id: str, user_id: Optional[str] = None) -> Union[UserMessage, AssistantMessage]:
|
| 120 |
+
if supabase_enabled() and user_id:
|
| 121 |
+
client = get_client()
|
| 122 |
+
result = client.table("opencode_messages").select("*, opencode_message_parts(*)").eq("id", message_id).eq("session_id", session_id).single().execute()
|
| 123 |
+
if not result.data:
|
| 124 |
+
raise NotFoundError(["message", session_id, message_id])
|
| 125 |
+
|
| 126 |
+
data = result.data
|
| 127 |
+
if data.get("role") == "user":
|
| 128 |
+
return UserMessage(
|
| 129 |
+
id=data["id"],
|
| 130 |
+
session_id=data["session_id"],
|
| 131 |
+
role="user",
|
| 132 |
+
content=data.get("content", ""),
|
| 133 |
+
created_at=data["created_at"],
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
parts = [
|
| 137 |
+
MessagePart(
|
| 138 |
+
id=p["id"],
|
| 139 |
+
session_id=session_id,
|
| 140 |
+
message_id=message_id,
|
| 141 |
+
type=p["type"],
|
| 142 |
+
content=p.get("content"),
|
| 143 |
+
tool_call_id=p.get("tool_call_id"),
|
| 144 |
+
tool_name=p.get("tool_name"),
|
| 145 |
+
tool_args=p.get("tool_args"),
|
| 146 |
+
tool_output=p.get("tool_output"),
|
| 147 |
+
tool_status=p.get("tool_status"),
|
| 148 |
+
)
|
| 149 |
+
for p in data.get("opencode_message_parts", [])
|
| 150 |
+
]
|
| 151 |
+
return AssistantMessage(
|
| 152 |
+
id=data["id"],
|
| 153 |
+
session_id=data["session_id"],
|
| 154 |
+
role="assistant",
|
| 155 |
+
created_at=data["created_at"],
|
| 156 |
+
provider_id=data.get("provider_id"),
|
| 157 |
+
model=data.get("model_id"),
|
| 158 |
+
usage={"input_tokens": data.get("input_tokens", 0), "output_tokens": data.get("output_tokens", 0)} if data.get("input_tokens") else None,
|
| 159 |
+
error=data.get("error"),
|
| 160 |
+
parts=parts,
|
| 161 |
+
)
|
| 162 |
+
|
| 163 |
+
data = await Storage.read(["message", session_id, message_id])
|
| 164 |
+
if not data:
|
| 165 |
+
raise NotFoundError(["message", session_id, message_id])
|
| 166 |
+
|
| 167 |
+
if data.get("role") == "user":
|
| 168 |
+
return UserMessage(**data)
|
| 169 |
+
return AssistantMessage(**data)
|
| 170 |
+
|
| 171 |
+
@staticmethod
|
| 172 |
+
async def add_part(message_id: str, session_id: str, part: MessagePart, user_id: Optional[str] = None) -> MessagePart:
|
| 173 |
+
part.id = Identifier.generate("part")
|
| 174 |
+
part.message_id = message_id
|
| 175 |
+
part.session_id = session_id
|
| 176 |
+
|
| 177 |
+
if supabase_enabled() and user_id:
|
| 178 |
+
client = get_client()
|
| 179 |
+
client.table("opencode_message_parts").insert({
|
| 180 |
+
"id": part.id,
|
| 181 |
+
"message_id": message_id,
|
| 182 |
+
"type": part.type,
|
| 183 |
+
"content": part.content,
|
| 184 |
+
"tool_call_id": part.tool_call_id,
|
| 185 |
+
"tool_name": part.tool_name,
|
| 186 |
+
"tool_args": part.tool_args,
|
| 187 |
+
"tool_output": part.tool_output,
|
| 188 |
+
"tool_status": part.tool_status,
|
| 189 |
+
}).execute()
|
| 190 |
+
else:
|
| 191 |
+
msg_data = await Storage.read(["message", session_id, message_id])
|
| 192 |
+
if not msg_data:
|
| 193 |
+
raise NotFoundError(["message", session_id, message_id])
|
| 194 |
+
|
| 195 |
+
if "parts" not in msg_data:
|
| 196 |
+
msg_data["parts"] = []
|
| 197 |
+
msg_data["parts"].append(part.model_dump())
|
| 198 |
+
await Storage.write(["message", session_id, message_id], msg_data)
|
| 199 |
+
|
| 200 |
+
await Bus.publish(PART_UPDATED, PartPayload(
|
| 201 |
+
session_id=session_id,
|
| 202 |
+
message_id=message_id,
|
| 203 |
+
part_id=part.id
|
| 204 |
+
))
|
| 205 |
+
return part
|
| 206 |
+
|
| 207 |
+
@staticmethod
|
| 208 |
+
async def update_part(session_id: str, message_id: str, part_id: str, updates: Dict[str, Any], user_id: Optional[str] = None) -> MessagePart:
|
| 209 |
+
if supabase_enabled() and user_id:
|
| 210 |
+
client = get_client()
|
| 211 |
+
result = client.table("opencode_message_parts").update(updates).eq("id", part_id).execute()
|
| 212 |
+
if result.data:
|
| 213 |
+
p = result.data[0]
|
| 214 |
+
await Bus.publish(PART_UPDATED, PartPayload(
|
| 215 |
+
session_id=session_id,
|
| 216 |
+
message_id=message_id,
|
| 217 |
+
part_id=part_id
|
| 218 |
+
))
|
| 219 |
+
return MessagePart(
|
| 220 |
+
id=p["id"],
|
| 221 |
+
session_id=session_id,
|
| 222 |
+
message_id=message_id,
|
| 223 |
+
type=p["type"],
|
| 224 |
+
content=p.get("content"),
|
| 225 |
+
tool_call_id=p.get("tool_call_id"),
|
| 226 |
+
tool_name=p.get("tool_name"),
|
| 227 |
+
tool_args=p.get("tool_args"),
|
| 228 |
+
tool_output=p.get("tool_output"),
|
| 229 |
+
tool_status=p.get("tool_status"),
|
| 230 |
+
)
|
| 231 |
+
raise NotFoundError(["part", message_id, part_id])
|
| 232 |
+
|
| 233 |
+
msg_data = await Storage.read(["message", session_id, message_id])
|
| 234 |
+
if not msg_data:
|
| 235 |
+
raise NotFoundError(["message", session_id, message_id])
|
| 236 |
+
|
| 237 |
+
for i, p in enumerate(msg_data.get("parts", [])):
|
| 238 |
+
if p.get("id") == part_id:
|
| 239 |
+
msg_data["parts"][i].update(updates)
|
| 240 |
+
await Storage.write(["message", session_id, message_id], msg_data)
|
| 241 |
+
await Bus.publish(PART_UPDATED, PartPayload(
|
| 242 |
+
session_id=session_id,
|
| 243 |
+
message_id=message_id,
|
| 244 |
+
part_id=part_id
|
| 245 |
+
))
|
| 246 |
+
return MessagePart(**msg_data["parts"][i])
|
| 247 |
+
|
| 248 |
+
raise NotFoundError(["part", message_id, part_id])
|
| 249 |
+
|
| 250 |
+
@staticmethod
|
| 251 |
+
async def list(session_id: str, limit: Optional[int] = None, user_id: Optional[str] = None) -> List[Union[UserMessage, AssistantMessage]]:
|
| 252 |
+
if supabase_enabled() and user_id:
|
| 253 |
+
client = get_client()
|
| 254 |
+
query = client.table("opencode_messages").select("*, opencode_message_parts(*)").eq("session_id", session_id).order("created_at")
|
| 255 |
+
if limit:
|
| 256 |
+
query = query.limit(limit)
|
| 257 |
+
result = query.execute()
|
| 258 |
+
|
| 259 |
+
messages = []
|
| 260 |
+
for data in result.data:
|
| 261 |
+
if data.get("role") == "user":
|
| 262 |
+
messages.append(UserMessage(
|
| 263 |
+
id=data["id"],
|
| 264 |
+
session_id=data["session_id"],
|
| 265 |
+
role="user",
|
| 266 |
+
content=data.get("content", ""),
|
| 267 |
+
created_at=data["created_at"],
|
| 268 |
+
))
|
| 269 |
+
else:
|
| 270 |
+
parts = [
|
| 271 |
+
MessagePart(
|
| 272 |
+
id=p["id"],
|
| 273 |
+
session_id=session_id,
|
| 274 |
+
message_id=data["id"],
|
| 275 |
+
type=p["type"],
|
| 276 |
+
content=p.get("content"),
|
| 277 |
+
tool_call_id=p.get("tool_call_id"),
|
| 278 |
+
tool_name=p.get("tool_name"),
|
| 279 |
+
tool_args=p.get("tool_args"),
|
| 280 |
+
tool_output=p.get("tool_output"),
|
| 281 |
+
tool_status=p.get("tool_status"),
|
| 282 |
+
)
|
| 283 |
+
for p in data.get("opencode_message_parts", [])
|
| 284 |
+
]
|
| 285 |
+
messages.append(AssistantMessage(
|
| 286 |
+
id=data["id"],
|
| 287 |
+
session_id=data["session_id"],
|
| 288 |
+
role="assistant",
|
| 289 |
+
created_at=data["created_at"],
|
| 290 |
+
provider_id=data.get("provider_id"),
|
| 291 |
+
model=data.get("model_id"),
|
| 292 |
+
usage={"input_tokens": data.get("input_tokens", 0), "output_tokens": data.get("output_tokens", 0)} if data.get("input_tokens") else None,
|
| 293 |
+
error=data.get("error"),
|
| 294 |
+
parts=parts,
|
| 295 |
+
))
|
| 296 |
+
return messages
|
| 297 |
+
|
| 298 |
+
message_keys = await Storage.list(["message", session_id])
|
| 299 |
+
messages = []
|
| 300 |
+
|
| 301 |
+
for key in message_keys:
|
| 302 |
+
if limit and len(messages) >= limit:
|
| 303 |
+
break
|
| 304 |
+
data = await Storage.read(key)
|
| 305 |
+
if data:
|
| 306 |
+
if data.get("role") == "user":
|
| 307 |
+
messages.append(UserMessage(**data))
|
| 308 |
+
else:
|
| 309 |
+
messages.append(AssistantMessage(**data))
|
| 310 |
+
|
| 311 |
+
messages.sort(key=lambda m: m.created_at)
|
| 312 |
+
return messages
|
| 313 |
+
|
| 314 |
+
@staticmethod
|
| 315 |
+
async def delete(session_id: str, message_id: str, user_id: Optional[str] = None) -> bool:
|
| 316 |
+
if supabase_enabled() and user_id:
|
| 317 |
+
client = get_client()
|
| 318 |
+
client.table("opencode_messages").delete().eq("id", message_id).execute()
|
| 319 |
+
else:
|
| 320 |
+
await Storage.remove(["message", session_id, message_id])
|
| 321 |
+
|
| 322 |
+
await Bus.publish(MESSAGE_REMOVED, MessagePayload(session_id=session_id, message_id=message_id))
|
| 323 |
+
return True
|
| 324 |
+
|
| 325 |
+
@staticmethod
|
| 326 |
+
async def set_usage(session_id: str, message_id: str, usage: Dict[str, int], user_id: Optional[str] = None) -> None:
|
| 327 |
+
if supabase_enabled() and user_id:
|
| 328 |
+
client = get_client()
|
| 329 |
+
client.table("opencode_messages").update({
|
| 330 |
+
"input_tokens": usage.get("input_tokens", 0),
|
| 331 |
+
"output_tokens": usage.get("output_tokens", 0),
|
| 332 |
+
}).eq("id", message_id).execute()
|
| 333 |
+
else:
|
| 334 |
+
msg_data = await Storage.read(["message", session_id, message_id])
|
| 335 |
+
if msg_data:
|
| 336 |
+
msg_data["usage"] = usage
|
| 337 |
+
await Storage.write(["message", session_id, message_id], msg_data)
|
| 338 |
+
|
| 339 |
+
@staticmethod
|
| 340 |
+
async def set_error(session_id: str, message_id: str, error: str, user_id: Optional[str] = None) -> None:
|
| 341 |
+
if supabase_enabled() and user_id:
|
| 342 |
+
client = get_client()
|
| 343 |
+
client.table("opencode_messages").update({"error": error}).eq("id", message_id).execute()
|
| 344 |
+
else:
|
| 345 |
+
msg_data = await Storage.read(["message", session_id, message_id])
|
| 346 |
+
if msg_data:
|
| 347 |
+
msg_data["error"] = error
|
| 348 |
+
await Storage.write(["message", session_id, message_id], msg_data)
|
src/opencode_api/session/processor.py
ADDED
|
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Session processor for managing agentic loop execution.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from typing import Optional, Dict, Any, AsyncIterator, List
|
| 6 |
+
from pydantic import BaseModel
|
| 7 |
+
from datetime import datetime
|
| 8 |
+
import asyncio
|
| 9 |
+
|
| 10 |
+
from ..provider.provider import StreamChunk
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class DoomLoopDetector:
|
| 14 |
+
"""동일 도구 + 동일 인자 연속 호출을 감지하여 무한 루프 방지
|
| 15 |
+
|
| 16 |
+
원본 opencode와 동일하게 도구 이름과 인자를 모두 비교합니다.
|
| 17 |
+
같은 도구라도 인자가 다르면 정상적인 반복으로 판단합니다.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
def __init__(self, threshold: int = 3):
|
| 21 |
+
self.threshold = threshold
|
| 22 |
+
self.history: List[tuple[str, str]] = [] # (tool_name, args_hash)
|
| 23 |
+
|
| 24 |
+
def record(self, tool_name: str, args: Optional[Dict[str, Any]] = None) -> bool:
|
| 25 |
+
"""도구 호출을 기록하고 doom loop 감지 시 True 반환
|
| 26 |
+
|
| 27 |
+
Args:
|
| 28 |
+
tool_name: 도구 이름
|
| 29 |
+
args: 도구 인자 (없으면 빈 dict로 처리)
|
| 30 |
+
|
| 31 |
+
Returns:
|
| 32 |
+
True if doom loop detected, False otherwise
|
| 33 |
+
"""
|
| 34 |
+
import json
|
| 35 |
+
import hashlib
|
| 36 |
+
|
| 37 |
+
# 인자를 정규화하여 해시 생성 (원본처럼 JSON 비교)
|
| 38 |
+
args_dict = args or {}
|
| 39 |
+
args_str = json.dumps(args_dict, sort_keys=True, default=str)
|
| 40 |
+
args_hash = hashlib.md5(args_str.encode()).hexdigest()[:8]
|
| 41 |
+
|
| 42 |
+
call_signature = (tool_name, args_hash)
|
| 43 |
+
self.history.append(call_signature)
|
| 44 |
+
|
| 45 |
+
# 최근 threshold개가 모두 같은 (도구 + 인자)인지 확인
|
| 46 |
+
if len(self.history) >= self.threshold:
|
| 47 |
+
recent = self.history[-self.threshold:]
|
| 48 |
+
if len(set(recent)) == 1: # 튜플 비교 (도구+인자)
|
| 49 |
+
return True
|
| 50 |
+
|
| 51 |
+
return False
|
| 52 |
+
|
| 53 |
+
def reset(self):
|
| 54 |
+
self.history = []
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class RetryConfig(BaseModel):
|
| 58 |
+
"""재시도 설정"""
|
| 59 |
+
max_retries: int = 3
|
| 60 |
+
base_delay: float = 1.0 # seconds
|
| 61 |
+
max_delay: float = 30.0
|
| 62 |
+
exponential_base: float = 2.0
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
class StepInfo(BaseModel):
|
| 66 |
+
"""스텝 정보"""
|
| 67 |
+
step: int
|
| 68 |
+
started_at: datetime
|
| 69 |
+
finished_at: Optional[datetime] = None
|
| 70 |
+
tool_calls: List[str] = []
|
| 71 |
+
status: str = "running" # running, completed, error, doom_loop
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
class SessionProcessor:
|
| 75 |
+
"""
|
| 76 |
+
Agentic loop 실행을 관리하는 프로세서.
|
| 77 |
+
|
| 78 |
+
Features:
|
| 79 |
+
- Doom loop 방지 (동일 도구 연속 호출 감지)
|
| 80 |
+
- 자동 재시도 (exponential backoff)
|
| 81 |
+
- 스텝 추적 (step-start, step-finish 이벤트)
|
| 82 |
+
"""
|
| 83 |
+
|
| 84 |
+
_processors: Dict[str, "SessionProcessor"] = {}
|
| 85 |
+
|
| 86 |
+
def __init__(self, session_id: str, max_steps: int = 50, doom_threshold: int = 3):
|
| 87 |
+
self.session_id = session_id
|
| 88 |
+
self.max_steps = max_steps
|
| 89 |
+
self.doom_detector = DoomLoopDetector(threshold=doom_threshold)
|
| 90 |
+
self.retry_config = RetryConfig()
|
| 91 |
+
self.steps: List[StepInfo] = []
|
| 92 |
+
self.current_step: Optional[StepInfo] = None
|
| 93 |
+
self.aborted = False
|
| 94 |
+
|
| 95 |
+
@classmethod
|
| 96 |
+
def get_or_create(cls, session_id: str, **kwargs) -> "SessionProcessor":
|
| 97 |
+
if session_id not in cls._processors:
|
| 98 |
+
cls._processors[session_id] = cls(session_id, **kwargs)
|
| 99 |
+
return cls._processors[session_id]
|
| 100 |
+
|
| 101 |
+
@classmethod
|
| 102 |
+
def remove(cls, session_id: str) -> None:
|
| 103 |
+
if session_id in cls._processors:
|
| 104 |
+
del cls._processors[session_id]
|
| 105 |
+
|
| 106 |
+
def start_step(self) -> StepInfo:
|
| 107 |
+
"""새 스텝 시작"""
|
| 108 |
+
step_num = len(self.steps) + 1
|
| 109 |
+
self.current_step = StepInfo(
|
| 110 |
+
step=step_num,
|
| 111 |
+
started_at=datetime.utcnow()
|
| 112 |
+
)
|
| 113 |
+
self.steps.append(self.current_step)
|
| 114 |
+
return self.current_step
|
| 115 |
+
|
| 116 |
+
def finish_step(self, status: str = "completed") -> StepInfo:
|
| 117 |
+
"""현재 스텝 완료"""
|
| 118 |
+
if self.current_step:
|
| 119 |
+
self.current_step.finished_at = datetime.utcnow()
|
| 120 |
+
self.current_step.status = status
|
| 121 |
+
return self.current_step
|
| 122 |
+
|
| 123 |
+
def record_tool_call(self, tool_name: str, tool_args: Optional[Dict[str, Any]] = None) -> bool:
|
| 124 |
+
"""도구 호출 기록, doom loop 감지 시 True 반환
|
| 125 |
+
|
| 126 |
+
Args:
|
| 127 |
+
tool_name: 도구 이름
|
| 128 |
+
tool_args: 도구 인자 (doom loop 판별에 사용)
|
| 129 |
+
|
| 130 |
+
Returns:
|
| 131 |
+
True if doom loop detected, False otherwise
|
| 132 |
+
"""
|
| 133 |
+
if self.current_step:
|
| 134 |
+
self.current_step.tool_calls.append(tool_name)
|
| 135 |
+
return self.doom_detector.record(tool_name, tool_args)
|
| 136 |
+
|
| 137 |
+
def is_doom_loop(self) -> bool:
|
| 138 |
+
"""현재 doom loop 상태인지 확인"""
|
| 139 |
+
return len(self.doom_detector.history) >= self.doom_detector.threshold and \
|
| 140 |
+
len(set(self.doom_detector.history[-self.doom_detector.threshold:])) == 1
|
| 141 |
+
|
| 142 |
+
def should_continue(self) -> bool:
|
| 143 |
+
"""루프 계속 여부"""
|
| 144 |
+
if self.aborted:
|
| 145 |
+
return False
|
| 146 |
+
if len(self.steps) >= self.max_steps:
|
| 147 |
+
return False
|
| 148 |
+
if self.is_doom_loop():
|
| 149 |
+
return False
|
| 150 |
+
return True
|
| 151 |
+
|
| 152 |
+
def abort(self) -> None:
|
| 153 |
+
"""프로세서 중단"""
|
| 154 |
+
self.aborted = True
|
| 155 |
+
|
| 156 |
+
async def calculate_retry_delay(self, attempt: int) -> float:
|
| 157 |
+
"""exponential backoff 딜레이 계산"""
|
| 158 |
+
delay = self.retry_config.base_delay * (self.retry_config.exponential_base ** attempt)
|
| 159 |
+
return min(delay, self.retry_config.max_delay)
|
| 160 |
+
|
| 161 |
+
async def retry_with_backoff(self, func, *args, **kwargs):
|
| 162 |
+
"""exponential backoff으로 함수 재시도"""
|
| 163 |
+
last_error = None
|
| 164 |
+
|
| 165 |
+
for attempt in range(self.retry_config.max_retries):
|
| 166 |
+
try:
|
| 167 |
+
return await func(*args, **kwargs)
|
| 168 |
+
except Exception as e:
|
| 169 |
+
last_error = e
|
| 170 |
+
if attempt < self.retry_config.max_retries - 1:
|
| 171 |
+
delay = await self.calculate_retry_delay(attempt)
|
| 172 |
+
await asyncio.sleep(delay)
|
| 173 |
+
|
| 174 |
+
raise last_error
|
| 175 |
+
|
| 176 |
+
def get_summary(self) -> Dict[str, Any]:
|
| 177 |
+
"""프로세서 상태 요약"""
|
| 178 |
+
return {
|
| 179 |
+
"session_id": self.session_id,
|
| 180 |
+
"total_steps": len(self.steps),
|
| 181 |
+
"max_steps": self.max_steps,
|
| 182 |
+
"aborted": self.aborted,
|
| 183 |
+
"doom_loop_detected": self.is_doom_loop(),
|
| 184 |
+
"steps": [
|
| 185 |
+
{
|
| 186 |
+
"step": s.step,
|
| 187 |
+
"status": s.status,
|
| 188 |
+
"tool_calls": s.tool_calls,
|
| 189 |
+
"duration": (s.finished_at - s.started_at).total_seconds() if s.finished_at else None
|
| 190 |
+
}
|
| 191 |
+
for s in self.steps
|
| 192 |
+
]
|
| 193 |
+
}
|
src/opencode_api/session/prompt.py
ADDED
|
@@ -0,0 +1,701 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Session prompt handling with agentic loop support.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from typing import Optional, List, Dict, Any, AsyncIterator, Literal
|
| 6 |
+
from pydantic import BaseModel
|
| 7 |
+
import asyncio
|
| 8 |
+
import json
|
| 9 |
+
|
| 10 |
+
from .session import Session
|
| 11 |
+
from .message import Message, MessagePart, AssistantMessage
|
| 12 |
+
from .processor import SessionProcessor
|
| 13 |
+
from ..provider import get_provider, list_providers
|
| 14 |
+
from ..provider.provider import Message as ProviderMessage, StreamChunk, ToolCall
|
| 15 |
+
from ..tool import get_tool, get_tools_schema, ToolContext, get_registry
|
| 16 |
+
from ..core.config import settings
|
| 17 |
+
from ..core.bus import Bus, PART_UPDATED, PartPayload, STEP_STARTED, STEP_FINISHED, StepPayload, TOOL_STATE_CHANGED, ToolStatePayload
|
| 18 |
+
from ..agent import get as get_agent, default_agent, get_system_prompt, is_tool_allowed, AgentInfo, get_prompt_for_provider
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class PromptInput(BaseModel):
|
| 22 |
+
content: str
|
| 23 |
+
provider_id: Optional[str] = None
|
| 24 |
+
model_id: Optional[str] = None
|
| 25 |
+
system: Optional[str] = None
|
| 26 |
+
temperature: Optional[float] = None
|
| 27 |
+
max_tokens: Optional[int] = None
|
| 28 |
+
tools_enabled: bool = True
|
| 29 |
+
# Agentic loop options
|
| 30 |
+
auto_continue: Optional[bool] = None # None = use agent default
|
| 31 |
+
max_steps: Optional[int] = None # None = use agent default
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class LoopState(BaseModel):
|
| 35 |
+
step: int = 0
|
| 36 |
+
max_steps: int = 50
|
| 37 |
+
auto_continue: bool = True
|
| 38 |
+
stop_reason: Optional[str] = None
|
| 39 |
+
paused: bool = False
|
| 40 |
+
pause_reason: Optional[str] = None
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
import re
|
| 44 |
+
FAKE_TOOL_CALL_PATTERN = re.compile(
|
| 45 |
+
r'\[Called\s+tool:\s*(\w+)\s*\(\s*(\{[^}]*\}|\{[^)]*\}|[^)]*)\s*\)\]',
|
| 46 |
+
re.IGNORECASE
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
class SessionPrompt:
|
| 51 |
+
|
| 52 |
+
_active_sessions: Dict[str, asyncio.Task] = {}
|
| 53 |
+
_loop_states: Dict[str, LoopState] = {}
|
| 54 |
+
|
| 55 |
+
@classmethod
|
| 56 |
+
async def prompt(
|
| 57 |
+
cls,
|
| 58 |
+
session_id: str,
|
| 59 |
+
input: PromptInput,
|
| 60 |
+
user_id: Optional[str] = None
|
| 61 |
+
) -> AsyncIterator[StreamChunk]:
|
| 62 |
+
session = await Session.get(session_id, user_id)
|
| 63 |
+
|
| 64 |
+
# Get agent configuration
|
| 65 |
+
agent_id = session.agent_id or "build"
|
| 66 |
+
agent = get_agent(agent_id) or default_agent()
|
| 67 |
+
|
| 68 |
+
# Determine loop settings
|
| 69 |
+
auto_continue = input.auto_continue if input.auto_continue is not None else agent.auto_continue
|
| 70 |
+
max_steps = input.max_steps if input.max_steps is not None else agent.max_steps
|
| 71 |
+
|
| 72 |
+
if auto_continue:
|
| 73 |
+
async for chunk in cls._agentic_loop(session_id, input, agent, max_steps, user_id):
|
| 74 |
+
yield chunk
|
| 75 |
+
else:
|
| 76 |
+
async for chunk in cls._single_turn(session_id, input, agent, user_id=user_id):
|
| 77 |
+
yield chunk
|
| 78 |
+
|
| 79 |
+
@classmethod
|
| 80 |
+
async def _agentic_loop(
|
| 81 |
+
cls,
|
| 82 |
+
session_id: str,
|
| 83 |
+
input: PromptInput,
|
| 84 |
+
agent: AgentInfo,
|
| 85 |
+
max_steps: int,
|
| 86 |
+
user_id: Optional[str] = None
|
| 87 |
+
) -> AsyncIterator[StreamChunk]:
|
| 88 |
+
state = LoopState(step=0, max_steps=max_steps, auto_continue=True)
|
| 89 |
+
cls._loop_states[session_id] = state
|
| 90 |
+
|
| 91 |
+
# SessionProcessor 가져오기
|
| 92 |
+
processor = SessionProcessor.get_or_create(session_id, max_steps=max_steps)
|
| 93 |
+
|
| 94 |
+
try:
|
| 95 |
+
while processor.should_continue() and not state.paused:
|
| 96 |
+
state.step += 1
|
| 97 |
+
|
| 98 |
+
# 스텝 시작
|
| 99 |
+
step_info = processor.start_step()
|
| 100 |
+
await Bus.publish(STEP_STARTED, StepPayload(
|
| 101 |
+
session_id=session_id,
|
| 102 |
+
step=state.step,
|
| 103 |
+
max_steps=max_steps
|
| 104 |
+
))
|
| 105 |
+
|
| 106 |
+
print(f"[AGENTIC LOOP] Starting step {state.step}, stop_reason={state.stop_reason}", flush=True)
|
| 107 |
+
|
| 108 |
+
turn_input = input if state.step == 1 else PromptInput(
|
| 109 |
+
content="",
|
| 110 |
+
provider_id=input.provider_id,
|
| 111 |
+
model_id=input.model_id,
|
| 112 |
+
temperature=input.temperature,
|
| 113 |
+
max_tokens=input.max_tokens,
|
| 114 |
+
tools_enabled=input.tools_enabled,
|
| 115 |
+
auto_continue=False,
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
if state.step > 1:
|
| 119 |
+
yield StreamChunk(type="step", text=f"Step {state.step}")
|
| 120 |
+
|
| 121 |
+
# Track tool calls in this turn
|
| 122 |
+
has_tool_calls_this_turn = False
|
| 123 |
+
|
| 124 |
+
async for chunk in cls._single_turn(
|
| 125 |
+
session_id,
|
| 126 |
+
turn_input,
|
| 127 |
+
agent,
|
| 128 |
+
is_continuation=(state.step > 1),
|
| 129 |
+
user_id=user_id
|
| 130 |
+
):
|
| 131 |
+
yield chunk
|
| 132 |
+
|
| 133 |
+
if chunk.type == "tool_call" and chunk.tool_call:
|
| 134 |
+
has_tool_calls_this_turn = True
|
| 135 |
+
print(f"[AGENTIC LOOP] tool_call: {chunk.tool_call.name}", flush=True)
|
| 136 |
+
|
| 137 |
+
if chunk.tool_call.name == "question" and agent.pause_on_question:
|
| 138 |
+
state.paused = True
|
| 139 |
+
state.pause_reason = "question"
|
| 140 |
+
|
| 141 |
+
# question tool이 완료되면 (답변 받음) pause 해제
|
| 142 |
+
elif chunk.type == "tool_result":
|
| 143 |
+
if state.paused and state.pause_reason == "question":
|
| 144 |
+
state.paused = False
|
| 145 |
+
state.pause_reason = None
|
| 146 |
+
|
| 147 |
+
elif chunk.type == "done":
|
| 148 |
+
state.stop_reason = chunk.stop_reason
|
| 149 |
+
print(f"[AGENTIC LOOP] done: stop_reason={chunk.stop_reason}", flush=True)
|
| 150 |
+
|
| 151 |
+
# 스텝 완료
|
| 152 |
+
step_status = "completed"
|
| 153 |
+
if processor.is_doom_loop():
|
| 154 |
+
step_status = "doom_loop"
|
| 155 |
+
print(f"[AGENTIC LOOP] Doom loop detected! Stopping execution.", flush=True)
|
| 156 |
+
yield StreamChunk(type="text", text=f"\n[경고: 동일 도구 반복 호출 감지, 루프를 중단합니다]\n")
|
| 157 |
+
|
| 158 |
+
processor.finish_step(status=step_status)
|
| 159 |
+
await Bus.publish(STEP_FINISHED, StepPayload(
|
| 160 |
+
session_id=session_id,
|
| 161 |
+
step=state.step,
|
| 162 |
+
max_steps=max_steps
|
| 163 |
+
))
|
| 164 |
+
|
| 165 |
+
print(f"[AGENTIC LOOP] End of step {state.step}: stop_reason={state.stop_reason}, has_tool_calls={has_tool_calls_this_turn}", flush=True)
|
| 166 |
+
|
| 167 |
+
# Doom loop 감지 시 중단
|
| 168 |
+
if processor.is_doom_loop():
|
| 169 |
+
break
|
| 170 |
+
|
| 171 |
+
# If this turn had no new tool calls (just text response), we're done
|
| 172 |
+
if state.stop_reason != "tool_calls":
|
| 173 |
+
print(f"[AGENTIC LOOP] Breaking: stop_reason != tool_calls", flush=True)
|
| 174 |
+
break
|
| 175 |
+
|
| 176 |
+
# Loop 종료 후 상태 메시지만 출력 (summary LLM 호출 없음!)
|
| 177 |
+
if state.paused:
|
| 178 |
+
yield StreamChunk(type="text", text=f"\n[Paused: {state.pause_reason}]\n")
|
| 179 |
+
elif state.step >= state.max_steps:
|
| 180 |
+
yield StreamChunk(type="text", text=f"\n[Max steps ({state.max_steps}) reached]\n")
|
| 181 |
+
# else: 자연스럽게 종료 (추가 출력 없음)
|
| 182 |
+
|
| 183 |
+
finally:
|
| 184 |
+
if session_id in cls._loop_states:
|
| 185 |
+
del cls._loop_states[session_id]
|
| 186 |
+
# SessionProcessor 정리
|
| 187 |
+
SessionProcessor.remove(session_id)
|
| 188 |
+
|
| 189 |
+
@classmethod
|
| 190 |
+
def _infer_provider_from_model(cls, model_id: str) -> str:
|
| 191 |
+
"""model_id에서 provider_id를 추론"""
|
| 192 |
+
# LiteLLM prefix 기반 모델은 litellm provider 사용
|
| 193 |
+
litellm_prefixes = ["gemini/", "groq/", "deepseek/", "openrouter/", "zai/"]
|
| 194 |
+
for prefix in litellm_prefixes:
|
| 195 |
+
if model_id.startswith(prefix):
|
| 196 |
+
return "litellm"
|
| 197 |
+
|
| 198 |
+
# Claude 모델
|
| 199 |
+
if model_id.startswith("claude-"):
|
| 200 |
+
return "litellm"
|
| 201 |
+
|
| 202 |
+
# GPT/O1 모델
|
| 203 |
+
if model_id.startswith("gpt-") or model_id.startswith("o1"):
|
| 204 |
+
return "litellm"
|
| 205 |
+
|
| 206 |
+
# 기본값
|
| 207 |
+
return settings.default_provider
|
| 208 |
+
|
| 209 |
+
@classmethod
|
| 210 |
+
async def _single_turn(
|
| 211 |
+
cls,
|
| 212 |
+
session_id: str,
|
| 213 |
+
input: PromptInput,
|
| 214 |
+
agent: AgentInfo,
|
| 215 |
+
is_continuation: bool = False,
|
| 216 |
+
user_id: Optional[str] = None
|
| 217 |
+
) -> AsyncIterator[StreamChunk]:
|
| 218 |
+
session = await Session.get(session_id, user_id)
|
| 219 |
+
|
| 220 |
+
model_id = input.model_id or session.model_id or settings.default_model
|
| 221 |
+
|
| 222 |
+
# provider_id가 명시되지 않으면 model_id에서 추론
|
| 223 |
+
if input.provider_id:
|
| 224 |
+
provider_id = input.provider_id
|
| 225 |
+
elif session.provider_id:
|
| 226 |
+
provider_id = session.provider_id
|
| 227 |
+
else:
|
| 228 |
+
provider_id = cls._infer_provider_from_model(model_id)
|
| 229 |
+
|
| 230 |
+
print(f"[Prompt DEBUG] input.provider_id={input.provider_id}, session.provider_id={session.provider_id}", flush=True)
|
| 231 |
+
print(f"[Prompt DEBUG] Final provider_id={provider_id}, model_id={model_id}", flush=True)
|
| 232 |
+
|
| 233 |
+
provider = get_provider(provider_id)
|
| 234 |
+
print(f"[Prompt DEBUG] Got provider: {provider}", flush=True)
|
| 235 |
+
if not provider:
|
| 236 |
+
yield StreamChunk(type="error", error=f"Provider not found: {provider_id}")
|
| 237 |
+
return
|
| 238 |
+
|
| 239 |
+
# Only create user message if there's content (not a continuation)
|
| 240 |
+
if input.content and not is_continuation:
|
| 241 |
+
user_msg = await Message.create_user(session_id, input.content, user_id)
|
| 242 |
+
|
| 243 |
+
assistant_msg = await Message.create_assistant(session_id, provider_id, model_id, user_id)
|
| 244 |
+
|
| 245 |
+
# Build message history
|
| 246 |
+
history = await Message.list(session_id, user_id=user_id)
|
| 247 |
+
messages = cls._build_messages(history[:-1], include_tool_results=True)
|
| 248 |
+
|
| 249 |
+
# Build system prompt with provider-specific optimization
|
| 250 |
+
system_prompt = cls._build_system_prompt(agent, provider_id, input.system)
|
| 251 |
+
|
| 252 |
+
# Get tools schema
|
| 253 |
+
tools_schema = get_tools_schema() if input.tools_enabled else None
|
| 254 |
+
|
| 255 |
+
current_text_part: Optional[MessagePart] = None
|
| 256 |
+
accumulated_text = ""
|
| 257 |
+
|
| 258 |
+
# reasoning 저장을 위한 변수
|
| 259 |
+
current_reasoning_part: Optional[MessagePart] = None
|
| 260 |
+
accumulated_reasoning = ""
|
| 261 |
+
|
| 262 |
+
try:
|
| 263 |
+
async for chunk in provider.stream(
|
| 264 |
+
model_id=model_id,
|
| 265 |
+
messages=messages,
|
| 266 |
+
tools=tools_schema,
|
| 267 |
+
system=system_prompt,
|
| 268 |
+
temperature=input.temperature or agent.temperature,
|
| 269 |
+
max_tokens=input.max_tokens or agent.max_tokens,
|
| 270 |
+
):
|
| 271 |
+
if chunk.type == "text":
|
| 272 |
+
accumulated_text += chunk.text or ""
|
| 273 |
+
|
| 274 |
+
if current_text_part is None:
|
| 275 |
+
current_text_part = await Message.add_part(
|
| 276 |
+
assistant_msg.id,
|
| 277 |
+
session_id,
|
| 278 |
+
MessagePart(
|
| 279 |
+
id="",
|
| 280 |
+
session_id=session_id,
|
| 281 |
+
message_id=assistant_msg.id,
|
| 282 |
+
type="text",
|
| 283 |
+
content=accumulated_text
|
| 284 |
+
),
|
| 285 |
+
user_id
|
| 286 |
+
)
|
| 287 |
+
else:
|
| 288 |
+
await Message.update_part(
|
| 289 |
+
session_id,
|
| 290 |
+
assistant_msg.id,
|
| 291 |
+
current_text_part.id,
|
| 292 |
+
{"content": accumulated_text},
|
| 293 |
+
user_id
|
| 294 |
+
)
|
| 295 |
+
|
| 296 |
+
yield chunk
|
| 297 |
+
|
| 298 |
+
elif chunk.type == "tool_call":
|
| 299 |
+
tc = chunk.tool_call
|
| 300 |
+
if tc:
|
| 301 |
+
# Check permission
|
| 302 |
+
permission = is_tool_allowed(agent, tc.name)
|
| 303 |
+
if permission == "deny":
|
| 304 |
+
yield StreamChunk(
|
| 305 |
+
type="tool_result",
|
| 306 |
+
text=f"Error: Tool '{tc.name}' is not allowed for this agent"
|
| 307 |
+
)
|
| 308 |
+
continue
|
| 309 |
+
|
| 310 |
+
tool_part = await Message.add_part(
|
| 311 |
+
assistant_msg.id,
|
| 312 |
+
session_id,
|
| 313 |
+
MessagePart(
|
| 314 |
+
id="",
|
| 315 |
+
session_id=session_id,
|
| 316 |
+
message_id=assistant_msg.id,
|
| 317 |
+
type="tool_call",
|
| 318 |
+
tool_call_id=tc.id,
|
| 319 |
+
tool_name=tc.name,
|
| 320 |
+
tool_args=tc.arguments,
|
| 321 |
+
tool_status="running" # 실행 중 상태
|
| 322 |
+
),
|
| 323 |
+
user_id
|
| 324 |
+
)
|
| 325 |
+
|
| 326 |
+
# IMPORTANT: Yield tool_call FIRST so frontend can show UI
|
| 327 |
+
# This is critical for interactive tools like 'question'
|
| 328 |
+
yield chunk
|
| 329 |
+
|
| 330 |
+
# 도구 실행 시작 이벤트 발행
|
| 331 |
+
await Bus.publish(TOOL_STATE_CHANGED, ToolStatePayload(
|
| 332 |
+
session_id=session_id,
|
| 333 |
+
message_id=assistant_msg.id,
|
| 334 |
+
part_id=tool_part.id,
|
| 335 |
+
tool_name=tc.name,
|
| 336 |
+
status="running"
|
| 337 |
+
))
|
| 338 |
+
|
| 339 |
+
# Execute tool (may block for user input, e.g., question tool)
|
| 340 |
+
tool_result, tool_status = await cls._execute_tool(
|
| 341 |
+
session_id,
|
| 342 |
+
assistant_msg.id,
|
| 343 |
+
tc.id,
|
| 344 |
+
tc.name,
|
| 345 |
+
tc.arguments,
|
| 346 |
+
user_id
|
| 347 |
+
)
|
| 348 |
+
|
| 349 |
+
# tool_call 파트의 status를 completed/error로 업데이트
|
| 350 |
+
await Message.update_part(
|
| 351 |
+
session_id,
|
| 352 |
+
assistant_msg.id,
|
| 353 |
+
tool_part.id,
|
| 354 |
+
{"tool_status": tool_status},
|
| 355 |
+
user_id
|
| 356 |
+
)
|
| 357 |
+
|
| 358 |
+
# 도구 완료 이벤트 발행
|
| 359 |
+
await Bus.publish(TOOL_STATE_CHANGED, ToolStatePayload(
|
| 360 |
+
session_id=session_id,
|
| 361 |
+
message_id=assistant_msg.id,
|
| 362 |
+
part_id=tool_part.id,
|
| 363 |
+
tool_name=tc.name,
|
| 364 |
+
status=tool_status
|
| 365 |
+
))
|
| 366 |
+
|
| 367 |
+
yield StreamChunk(
|
| 368 |
+
type="tool_result",
|
| 369 |
+
text=tool_result
|
| 370 |
+
)
|
| 371 |
+
else:
|
| 372 |
+
yield chunk
|
| 373 |
+
|
| 374 |
+
elif chunk.type == "reasoning":
|
| 375 |
+
# reasoning 저장 (기존에는 yield만 했음)
|
| 376 |
+
accumulated_reasoning += chunk.text or ""
|
| 377 |
+
|
| 378 |
+
if current_reasoning_part is None:
|
| 379 |
+
current_reasoning_part = await Message.add_part(
|
| 380 |
+
assistant_msg.id,
|
| 381 |
+
session_id,
|
| 382 |
+
MessagePart(
|
| 383 |
+
id="",
|
| 384 |
+
session_id=session_id,
|
| 385 |
+
message_id=assistant_msg.id,
|
| 386 |
+
type="reasoning",
|
| 387 |
+
content=accumulated_reasoning
|
| 388 |
+
),
|
| 389 |
+
user_id
|
| 390 |
+
)
|
| 391 |
+
else:
|
| 392 |
+
await Message.update_part(
|
| 393 |
+
session_id,
|
| 394 |
+
assistant_msg.id,
|
| 395 |
+
current_reasoning_part.id,
|
| 396 |
+
{"content": accumulated_reasoning},
|
| 397 |
+
user_id
|
| 398 |
+
)
|
| 399 |
+
|
| 400 |
+
yield chunk
|
| 401 |
+
|
| 402 |
+
elif chunk.type == "done":
|
| 403 |
+
if chunk.usage:
|
| 404 |
+
await Message.set_usage(session_id, assistant_msg.id, chunk.usage, user_id)
|
| 405 |
+
yield chunk
|
| 406 |
+
|
| 407 |
+
elif chunk.type == "error":
|
| 408 |
+
await Message.set_error(session_id, assistant_msg.id, chunk.error or "Unknown error", user_id)
|
| 409 |
+
yield chunk
|
| 410 |
+
|
| 411 |
+
await Session.touch(session_id)
|
| 412 |
+
|
| 413 |
+
except Exception as e:
|
| 414 |
+
error_msg = str(e)
|
| 415 |
+
await Message.set_error(session_id, assistant_msg.id, error_msg, user_id)
|
| 416 |
+
yield StreamChunk(type="error", error=error_msg)
|
| 417 |
+
|
| 418 |
+
@classmethod
|
| 419 |
+
def _detect_fake_tool_call(cls, text: str) -> Optional[Dict[str, Any]]:
|
| 420 |
+
"""
|
| 421 |
+
Detect if the model wrote a fake tool call as text instead of using actual tool calling.
|
| 422 |
+
Returns parsed tool call info if detected, None otherwise.
|
| 423 |
+
|
| 424 |
+
Patterns detected:
|
| 425 |
+
- [Called tool: toolname({...})]
|
| 426 |
+
- [Called tool: toolname({'key': 'value'})]
|
| 427 |
+
"""
|
| 428 |
+
if not text:
|
| 429 |
+
return None
|
| 430 |
+
|
| 431 |
+
match = FAKE_TOOL_CALL_PATTERN.search(text)
|
| 432 |
+
if match:
|
| 433 |
+
tool_name = match.group(1)
|
| 434 |
+
args_str = match.group(2).strip()
|
| 435 |
+
|
| 436 |
+
# Try to parse arguments
|
| 437 |
+
args = {}
|
| 438 |
+
if args_str:
|
| 439 |
+
try:
|
| 440 |
+
# Handle both JSON and Python dict formats
|
| 441 |
+
args_str = args_str.replace("'", '"') # Convert Python dict to JSON
|
| 442 |
+
args = json.loads(args_str)
|
| 443 |
+
except json.JSONDecodeError:
|
| 444 |
+
# Try to extract key-value pairs manually
|
| 445 |
+
# Pattern: 'key': 'value' or "key": "value"
|
| 446 |
+
kv_pattern = re.compile(r'["\']?(\w+)["\']?\s*:\s*["\']([^"\']+)["\']')
|
| 447 |
+
for kv_match in kv_pattern.finditer(args_str):
|
| 448 |
+
args[kv_match.group(1)] = kv_match.group(2)
|
| 449 |
+
|
| 450 |
+
return {
|
| 451 |
+
"name": tool_name,
|
| 452 |
+
"arguments": args
|
| 453 |
+
}
|
| 454 |
+
|
| 455 |
+
return None
|
| 456 |
+
|
| 457 |
+
@classmethod
|
| 458 |
+
def _build_system_prompt(
|
| 459 |
+
cls,
|
| 460 |
+
agent: AgentInfo,
|
| 461 |
+
provider_id: str,
|
| 462 |
+
custom_system: Optional[str] = None
|
| 463 |
+
) -> Optional[str]:
|
| 464 |
+
"""Build the complete system prompt.
|
| 465 |
+
|
| 466 |
+
Args:
|
| 467 |
+
agent: The agent configuration
|
| 468 |
+
provider_id: The provider identifier for selecting optimized prompt
|
| 469 |
+
custom_system: Optional custom system prompt to append
|
| 470 |
+
|
| 471 |
+
Returns:
|
| 472 |
+
The complete system prompt, or None if empty
|
| 473 |
+
"""
|
| 474 |
+
parts = []
|
| 475 |
+
|
| 476 |
+
# Add provider-specific system prompt (optimized for Claude/Gemini/etc.)
|
| 477 |
+
provider_prompt = get_prompt_for_provider(provider_id)
|
| 478 |
+
if provider_prompt:
|
| 479 |
+
parts.append(provider_prompt)
|
| 480 |
+
|
| 481 |
+
# Add agent-specific prompt (if defined and different from provider prompt)
|
| 482 |
+
agent_prompt = get_system_prompt(agent)
|
| 483 |
+
if agent_prompt and agent_prompt != provider_prompt:
|
| 484 |
+
parts.append(agent_prompt)
|
| 485 |
+
|
| 486 |
+
# Add custom system prompt
|
| 487 |
+
if custom_system:
|
| 488 |
+
parts.append(custom_system)
|
| 489 |
+
|
| 490 |
+
return "\n\n".join(parts) if parts else None
|
| 491 |
+
|
| 492 |
+
@classmethod
|
| 493 |
+
def _build_messages(
|
| 494 |
+
cls,
|
| 495 |
+
history: List,
|
| 496 |
+
include_tool_results: bool = True
|
| 497 |
+
) -> List[ProviderMessage]:
|
| 498 |
+
"""Build message list for LLM including tool calls and results.
|
| 499 |
+
|
| 500 |
+
Proper tool calling flow:
|
| 501 |
+
1. User message
|
| 502 |
+
2. Assistant message (may include tool calls)
|
| 503 |
+
3. Tool results (as user message with tool context)
|
| 504 |
+
4. Assistant continues
|
| 505 |
+
"""
|
| 506 |
+
messages = []
|
| 507 |
+
|
| 508 |
+
for msg in history:
|
| 509 |
+
if msg.role == "user":
|
| 510 |
+
# Skip empty user messages (continuations)
|
| 511 |
+
if msg.content:
|
| 512 |
+
messages.append(ProviderMessage(role="user", content=msg.content))
|
| 513 |
+
|
| 514 |
+
elif msg.role == "assistant":
|
| 515 |
+
# Collect all parts
|
| 516 |
+
text_parts = []
|
| 517 |
+
tool_calls = []
|
| 518 |
+
tool_results = []
|
| 519 |
+
|
| 520 |
+
for part in getattr(msg, "parts", []):
|
| 521 |
+
if part.type == "text" and part.content:
|
| 522 |
+
text_parts.append(part.content)
|
| 523 |
+
elif part.type == "tool_call" and include_tool_results:
|
| 524 |
+
tool_calls.append({
|
| 525 |
+
"id": part.tool_call_id,
|
| 526 |
+
"name": part.tool_name,
|
| 527 |
+
"arguments": part.tool_args or {}
|
| 528 |
+
})
|
| 529 |
+
elif part.type == "tool_result" and include_tool_results:
|
| 530 |
+
tool_results.append({
|
| 531 |
+
"tool_call_id": part.tool_call_id,
|
| 532 |
+
"output": part.tool_output or ""
|
| 533 |
+
})
|
| 534 |
+
|
| 535 |
+
# Build assistant content - only text, NO tool call summaries
|
| 536 |
+
# IMPORTANT: Do NOT include "[Called tool: ...]" patterns as this causes
|
| 537 |
+
# models like Gemini to mimic the pattern instead of using actual tool calls
|
| 538 |
+
assistant_content_parts = []
|
| 539 |
+
|
| 540 |
+
if text_parts:
|
| 541 |
+
assistant_content_parts.append("".join(text_parts))
|
| 542 |
+
|
| 543 |
+
if assistant_content_parts:
|
| 544 |
+
messages.append(ProviderMessage(
|
| 545 |
+
role="assistant",
|
| 546 |
+
content="\n".join(assistant_content_parts)
|
| 547 |
+
))
|
| 548 |
+
|
| 549 |
+
# Add tool results as user message (simulating tool response)
|
| 550 |
+
if tool_results:
|
| 551 |
+
result_content = []
|
| 552 |
+
for result in tool_results:
|
| 553 |
+
result_content.append(f"Tool result:\n{result['output']}")
|
| 554 |
+
messages.append(ProviderMessage(
|
| 555 |
+
role="user",
|
| 556 |
+
content="\n\n".join(result_content)
|
| 557 |
+
))
|
| 558 |
+
|
| 559 |
+
return messages
|
| 560 |
+
|
| 561 |
+
@classmethod
|
| 562 |
+
async def _execute_tool(
|
| 563 |
+
cls,
|
| 564 |
+
session_id: str,
|
| 565 |
+
message_id: str,
|
| 566 |
+
tool_call_id: str,
|
| 567 |
+
tool_name: str,
|
| 568 |
+
tool_args: Dict[str, Any],
|
| 569 |
+
user_id: Optional[str] = None
|
| 570 |
+
) -> tuple[str, str]:
|
| 571 |
+
"""Execute a tool and store the result. Returns (output, status)."""
|
| 572 |
+
# SessionProcessor를 통한 doom loop 감지
|
| 573 |
+
# tool_args도 전달하여 같은 도구 + 같은 인자일 때만 doom loop으로 판단
|
| 574 |
+
processor = SessionProcessor.get_or_create(session_id)
|
| 575 |
+
is_doom_loop = processor.record_tool_call(tool_name, tool_args)
|
| 576 |
+
|
| 577 |
+
if is_doom_loop:
|
| 578 |
+
error_output = f"Error: Doom loop detected - tool '{tool_name}' called repeatedly"
|
| 579 |
+
await Message.add_part(
|
| 580 |
+
message_id,
|
| 581 |
+
session_id,
|
| 582 |
+
MessagePart(
|
| 583 |
+
id="",
|
| 584 |
+
session_id=session_id,
|
| 585 |
+
message_id=message_id,
|
| 586 |
+
type="tool_result",
|
| 587 |
+
tool_call_id=tool_call_id,
|
| 588 |
+
tool_output=error_output
|
| 589 |
+
),
|
| 590 |
+
user_id
|
| 591 |
+
)
|
| 592 |
+
return error_output, "error"
|
| 593 |
+
|
| 594 |
+
# Registry에서 도구 가져오기
|
| 595 |
+
registry = get_registry()
|
| 596 |
+
tool = registry.get(tool_name)
|
| 597 |
+
|
| 598 |
+
if not tool:
|
| 599 |
+
error_output = f"Error: Tool '{tool_name}' not found"
|
| 600 |
+
await Message.add_part(
|
| 601 |
+
message_id,
|
| 602 |
+
session_id,
|
| 603 |
+
MessagePart(
|
| 604 |
+
id="",
|
| 605 |
+
session_id=session_id,
|
| 606 |
+
message_id=message_id,
|
| 607 |
+
type="tool_result",
|
| 608 |
+
tool_call_id=tool_call_id,
|
| 609 |
+
tool_output=error_output
|
| 610 |
+
),
|
| 611 |
+
user_id
|
| 612 |
+
)
|
| 613 |
+
return error_output, "error"
|
| 614 |
+
|
| 615 |
+
ctx = ToolContext(
|
| 616 |
+
session_id=session_id,
|
| 617 |
+
message_id=message_id,
|
| 618 |
+
tool_call_id=tool_call_id,
|
| 619 |
+
)
|
| 620 |
+
|
| 621 |
+
try:
|
| 622 |
+
result = await tool.execute(tool_args, ctx)
|
| 623 |
+
|
| 624 |
+
# 출력 길이 제한 적용
|
| 625 |
+
truncated_output = tool.truncate_output(result.output)
|
| 626 |
+
output = f"[{result.title}]\n{truncated_output}"
|
| 627 |
+
status = "completed"
|
| 628 |
+
except Exception as e:
|
| 629 |
+
output = f"Error executing tool: {str(e)}"
|
| 630 |
+
status = "error"
|
| 631 |
+
|
| 632 |
+
await Message.add_part(
|
| 633 |
+
message_id,
|
| 634 |
+
session_id,
|
| 635 |
+
MessagePart(
|
| 636 |
+
id="",
|
| 637 |
+
session_id=session_id,
|
| 638 |
+
message_id=message_id,
|
| 639 |
+
type="tool_result",
|
| 640 |
+
tool_call_id=tool_call_id,
|
| 641 |
+
tool_output=output
|
| 642 |
+
),
|
| 643 |
+
user_id
|
| 644 |
+
)
|
| 645 |
+
|
| 646 |
+
return output, status
|
| 647 |
+
|
| 648 |
+
@classmethod
|
| 649 |
+
def cancel(cls, session_id: str) -> bool:
|
| 650 |
+
"""Cancel an active session."""
|
| 651 |
+
cancelled = False
|
| 652 |
+
|
| 653 |
+
if session_id in cls._active_sessions:
|
| 654 |
+
cls._active_sessions[session_id].cancel()
|
| 655 |
+
del cls._active_sessions[session_id]
|
| 656 |
+
cancelled = True
|
| 657 |
+
|
| 658 |
+
if session_id in cls._loop_states:
|
| 659 |
+
cls._loop_states[session_id].paused = True
|
| 660 |
+
cls._loop_states[session_id].pause_reason = "cancelled"
|
| 661 |
+
del cls._loop_states[session_id]
|
| 662 |
+
cancelled = True
|
| 663 |
+
|
| 664 |
+
return cancelled
|
| 665 |
+
|
| 666 |
+
@classmethod
|
| 667 |
+
def get_loop_state(cls, session_id: str) -> Optional[LoopState]:
|
| 668 |
+
"""Get the current loop state for a session."""
|
| 669 |
+
return cls._loop_states.get(session_id)
|
| 670 |
+
|
| 671 |
+
@classmethod
|
| 672 |
+
async def resume(cls, session_id: str) -> AsyncIterator[StreamChunk]:
|
| 673 |
+
state = cls._loop_states.get(session_id)
|
| 674 |
+
if not state or not state.paused:
|
| 675 |
+
yield StreamChunk(type="error", error="No paused loop to resume")
|
| 676 |
+
return
|
| 677 |
+
|
| 678 |
+
state.paused = False
|
| 679 |
+
state.pause_reason = None
|
| 680 |
+
|
| 681 |
+
session = await Session.get(session_id)
|
| 682 |
+
agent_id = session.agent_id or "build"
|
| 683 |
+
agent = get_agent(agent_id) or default_agent()
|
| 684 |
+
|
| 685 |
+
continue_input = PromptInput(content="")
|
| 686 |
+
|
| 687 |
+
while state.stop_reason == "tool_calls" and not state.paused and state.step < state.max_steps:
|
| 688 |
+
state.step += 1
|
| 689 |
+
|
| 690 |
+
yield StreamChunk(type="text", text=f"\n[Resuming... step {state.step}/{state.max_steps}]\n")
|
| 691 |
+
|
| 692 |
+
async for chunk in cls._single_turn(session_id, continue_input, agent, is_continuation=True):
|
| 693 |
+
yield chunk
|
| 694 |
+
|
| 695 |
+
if chunk.type == "tool_call" and chunk.tool_call:
|
| 696 |
+
if chunk.tool_call.name == "question" and agent.pause_on_question:
|
| 697 |
+
state.paused = True
|
| 698 |
+
state.pause_reason = "question"
|
| 699 |
+
|
| 700 |
+
elif chunk.type == "done":
|
| 701 |
+
state.stop_reason = chunk.stop_reason
|
src/opencode_api/session/session.py
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, List, Dict, Any
|
| 2 |
+
from pydantic import BaseModel
|
| 3 |
+
from datetime import datetime
|
| 4 |
+
|
| 5 |
+
from ..core.storage import Storage, NotFoundError
|
| 6 |
+
from ..core.bus import Bus, SESSION_CREATED, SESSION_UPDATED, SESSION_DELETED, SessionPayload
|
| 7 |
+
from ..core.identifier import Identifier
|
| 8 |
+
from ..core.supabase import get_client, is_enabled as supabase_enabled
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class SessionInfo(BaseModel):
|
| 12 |
+
id: str
|
| 13 |
+
user_id: Optional[str] = None
|
| 14 |
+
title: str
|
| 15 |
+
created_at: datetime
|
| 16 |
+
updated_at: datetime
|
| 17 |
+
provider_id: Optional[str] = None
|
| 18 |
+
model_id: Optional[str] = None
|
| 19 |
+
agent_id: Optional[str] = None
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class SessionCreate(BaseModel):
|
| 23 |
+
title: Optional[str] = None
|
| 24 |
+
provider_id: Optional[str] = None
|
| 25 |
+
model_id: Optional[str] = None
|
| 26 |
+
agent_id: Optional[str] = None
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class Session:
|
| 30 |
+
|
| 31 |
+
@staticmethod
|
| 32 |
+
async def create(data: Optional[SessionCreate] = None, user_id: Optional[str] = None) -> SessionInfo:
|
| 33 |
+
session_id = Identifier.generate("session")
|
| 34 |
+
now = datetime.utcnow()
|
| 35 |
+
|
| 36 |
+
info = SessionInfo(
|
| 37 |
+
id=session_id,
|
| 38 |
+
user_id=user_id,
|
| 39 |
+
title=data.title if data and data.title else f"Session {now.isoformat()}",
|
| 40 |
+
created_at=now,
|
| 41 |
+
updated_at=now,
|
| 42 |
+
provider_id=data.provider_id if data else None,
|
| 43 |
+
model_id=data.model_id if data else None,
|
| 44 |
+
agent_id=data.agent_id if data else "build",
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
if supabase_enabled() and user_id:
|
| 48 |
+
client = get_client()
|
| 49 |
+
client.table("opencode_sessions").insert({
|
| 50 |
+
"id": session_id,
|
| 51 |
+
"user_id": user_id,
|
| 52 |
+
"title": info.title,
|
| 53 |
+
"agent_id": info.agent_id,
|
| 54 |
+
"provider_id": info.provider_id,
|
| 55 |
+
"model_id": info.model_id,
|
| 56 |
+
}).execute()
|
| 57 |
+
else:
|
| 58 |
+
await Storage.write(["session", session_id], info)
|
| 59 |
+
|
| 60 |
+
await Bus.publish(SESSION_CREATED, SessionPayload(id=session_id, title=info.title))
|
| 61 |
+
return info
|
| 62 |
+
|
| 63 |
+
@staticmethod
|
| 64 |
+
async def get(session_id: str, user_id: Optional[str] = None) -> SessionInfo:
|
| 65 |
+
if supabase_enabled() and user_id:
|
| 66 |
+
client = get_client()
|
| 67 |
+
result = client.table("opencode_sessions").select("*").eq("id", session_id).eq("user_id", user_id).single().execute()
|
| 68 |
+
if not result.data:
|
| 69 |
+
raise NotFoundError(["session", session_id])
|
| 70 |
+
return SessionInfo(
|
| 71 |
+
id=result.data["id"],
|
| 72 |
+
user_id=result.data["user_id"],
|
| 73 |
+
title=result.data["title"],
|
| 74 |
+
created_at=result.data["created_at"],
|
| 75 |
+
updated_at=result.data["updated_at"],
|
| 76 |
+
provider_id=result.data.get("provider_id"),
|
| 77 |
+
model_id=result.data.get("model_id"),
|
| 78 |
+
agent_id=result.data.get("agent_id"),
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
data = await Storage.read(["session", session_id])
|
| 82 |
+
if not data:
|
| 83 |
+
raise NotFoundError(["session", session_id])
|
| 84 |
+
return SessionInfo(**data)
|
| 85 |
+
|
| 86 |
+
@staticmethod
|
| 87 |
+
async def update(session_id: str, updates: Dict[str, Any], user_id: Optional[str] = None) -> SessionInfo:
|
| 88 |
+
updates["updated_at"] = datetime.utcnow().isoformat()
|
| 89 |
+
|
| 90 |
+
if supabase_enabled() and user_id:
|
| 91 |
+
client = get_client()
|
| 92 |
+
result = client.table("opencode_sessions").update(updates).eq("id", session_id).eq("user_id", user_id).execute()
|
| 93 |
+
if not result.data:
|
| 94 |
+
raise NotFoundError(["session", session_id])
|
| 95 |
+
return await Session.get(session_id, user_id)
|
| 96 |
+
|
| 97 |
+
def updater(data: Dict[str, Any]):
|
| 98 |
+
data.update(updates)
|
| 99 |
+
|
| 100 |
+
data = await Storage.update(["session", session_id], updater)
|
| 101 |
+
info = SessionInfo(**data)
|
| 102 |
+
await Bus.publish(SESSION_UPDATED, SessionPayload(id=session_id, title=info.title))
|
| 103 |
+
return info
|
| 104 |
+
|
| 105 |
+
@staticmethod
|
| 106 |
+
async def delete(session_id: str, user_id: Optional[str] = None) -> bool:
|
| 107 |
+
if supabase_enabled() and user_id:
|
| 108 |
+
client = get_client()
|
| 109 |
+
client.table("opencode_sessions").delete().eq("id", session_id).eq("user_id", user_id).execute()
|
| 110 |
+
await Bus.publish(SESSION_DELETED, SessionPayload(id=session_id, title=""))
|
| 111 |
+
return True
|
| 112 |
+
|
| 113 |
+
info = await Session.get(session_id)
|
| 114 |
+
message_keys = await Storage.list(["message", session_id])
|
| 115 |
+
for key in message_keys:
|
| 116 |
+
await Storage.remove(key)
|
| 117 |
+
|
| 118 |
+
await Storage.remove(["session", session_id])
|
| 119 |
+
await Bus.publish(SESSION_DELETED, SessionPayload(id=session_id, title=info.title))
|
| 120 |
+
return True
|
| 121 |
+
|
| 122 |
+
@staticmethod
|
| 123 |
+
async def list(limit: Optional[int] = None, user_id: Optional[str] = None) -> List[SessionInfo]:
|
| 124 |
+
if supabase_enabled() and user_id:
|
| 125 |
+
client = get_client()
|
| 126 |
+
query = client.table("opencode_sessions").select("*").eq("user_id", user_id).order("updated_at", desc=True)
|
| 127 |
+
if limit:
|
| 128 |
+
query = query.limit(limit)
|
| 129 |
+
result = query.execute()
|
| 130 |
+
return [
|
| 131 |
+
SessionInfo(
|
| 132 |
+
id=row["id"],
|
| 133 |
+
user_id=row["user_id"],
|
| 134 |
+
title=row["title"],
|
| 135 |
+
created_at=row["created_at"],
|
| 136 |
+
updated_at=row["updated_at"],
|
| 137 |
+
provider_id=row.get("provider_id"),
|
| 138 |
+
model_id=row.get("model_id"),
|
| 139 |
+
agent_id=row.get("agent_id"),
|
| 140 |
+
)
|
| 141 |
+
for row in result.data
|
| 142 |
+
]
|
| 143 |
+
|
| 144 |
+
session_keys = await Storage.list(["session"])
|
| 145 |
+
sessions = []
|
| 146 |
+
|
| 147 |
+
for key in session_keys:
|
| 148 |
+
if limit and len(sessions) >= limit:
|
| 149 |
+
break
|
| 150 |
+
data = await Storage.read(key)
|
| 151 |
+
if data:
|
| 152 |
+
sessions.append(SessionInfo(**data))
|
| 153 |
+
|
| 154 |
+
sessions.sort(key=lambda s: s.updated_at, reverse=True)
|
| 155 |
+
return sessions
|
| 156 |
+
|
| 157 |
+
@staticmethod
|
| 158 |
+
async def touch(session_id: str, user_id: Optional[str] = None) -> None:
|
| 159 |
+
await Session.update(session_id, {}, user_id)
|
src/opencode_api/tool/__init__.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .tool import Tool, ToolContext, ToolResult, register_tool, get_tool, list_tools, get_tools_schema
|
| 2 |
+
from .registry import ToolRegistry, get_registry
|
| 3 |
+
from .websearch import WebSearchTool
|
| 4 |
+
from .webfetch import WebFetchTool
|
| 5 |
+
from .todo import TodoTool
|
| 6 |
+
from .question import (
|
| 7 |
+
QuestionTool,
|
| 8 |
+
QuestionInfo,
|
| 9 |
+
QuestionOption,
|
| 10 |
+
QuestionRequest,
|
| 11 |
+
QuestionReply,
|
| 12 |
+
ask_questions,
|
| 13 |
+
reply_to_question,
|
| 14 |
+
reject_question,
|
| 15 |
+
get_pending_questions,
|
| 16 |
+
)
|
| 17 |
+
from .skill import SkillTool, SkillInfo, register_skill, get_skill, list_skills
|
| 18 |
+
|
| 19 |
+
__all__ = [
|
| 20 |
+
"Tool", "ToolContext", "ToolResult",
|
| 21 |
+
"register_tool", "get_tool", "list_tools", "get_tools_schema",
|
| 22 |
+
"ToolRegistry", "get_registry",
|
| 23 |
+
"WebSearchTool", "WebFetchTool", "TodoTool",
|
| 24 |
+
"QuestionTool", "QuestionInfo", "QuestionOption", "QuestionRequest", "QuestionReply",
|
| 25 |
+
"ask_questions", "reply_to_question", "reject_question", "get_pending_questions",
|
| 26 |
+
"SkillTool", "SkillInfo", "register_skill", "get_skill", "list_skills",
|
| 27 |
+
]
|
src/opencode_api/tool/question.py
ADDED
|
@@ -0,0 +1,308 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Question tool - allows agent to ask user questions during execution."""
|
| 2 |
+
from typing import Dict, Any, List, Optional
|
| 3 |
+
from pydantic import BaseModel, Field
|
| 4 |
+
import asyncio
|
| 5 |
+
import logging
|
| 6 |
+
|
| 7 |
+
from .tool import BaseTool, ToolResult, ToolContext
|
| 8 |
+
from ..core.identifier import generate_id
|
| 9 |
+
from ..core.bus import Bus
|
| 10 |
+
|
| 11 |
+
logger = logging.getLogger(__name__)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
# Question schemas
|
| 15 |
+
class QuestionOption(BaseModel):
|
| 16 |
+
"""A single option for a question."""
|
| 17 |
+
label: str = Field(..., description="Display text (1-5 words, concise)")
|
| 18 |
+
description: str = Field(..., description="Explanation of choice")
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class QuestionInfo(BaseModel):
|
| 22 |
+
"""A question to ask the user."""
|
| 23 |
+
question: str = Field(..., description="Complete question")
|
| 24 |
+
header: str = Field(..., description="Very short label (max 30 chars)")
|
| 25 |
+
options: List[QuestionOption] = Field(default_factory=list, description="Available choices")
|
| 26 |
+
multiple: bool = Field(default=False, description="Allow selecting multiple choices")
|
| 27 |
+
custom: bool = Field(default=True, description="Allow typing a custom answer")
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class QuestionRequest(BaseModel):
|
| 31 |
+
"""A request containing questions for the user."""
|
| 32 |
+
id: str
|
| 33 |
+
session_id: str
|
| 34 |
+
questions: List[QuestionInfo]
|
| 35 |
+
tool_call_id: Optional[str] = None
|
| 36 |
+
message_id: Optional[str] = None
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class QuestionReply(BaseModel):
|
| 40 |
+
"""User's reply to questions."""
|
| 41 |
+
request_id: str
|
| 42 |
+
answers: List[List[str]] = Field(..., description="Answers in order (each is array of selected labels)")
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
# Events
|
| 46 |
+
QUESTION_ASKED = "question.asked"
|
| 47 |
+
QUESTION_REPLIED = "question.replied"
|
| 48 |
+
QUESTION_REJECTED = "question.rejected"
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
# Pending questions state
|
| 52 |
+
_pending_questions: Dict[str, asyncio.Future] = {}
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
async def ask_questions(
|
| 56 |
+
session_id: str,
|
| 57 |
+
questions: List[QuestionInfo],
|
| 58 |
+
tool_call_id: Optional[str] = None,
|
| 59 |
+
message_id: Optional[str] = None,
|
| 60 |
+
timeout: float = 300.0, # 5 minutes default timeout
|
| 61 |
+
) -> List[List[str]]:
|
| 62 |
+
"""Ask questions and wait for user response."""
|
| 63 |
+
# tool_call_id를 request_id로 사용 (프론트엔드에서 바로 사용 가능)
|
| 64 |
+
request_id = tool_call_id or generate_id("question")
|
| 65 |
+
|
| 66 |
+
request = QuestionRequest(
|
| 67 |
+
id=request_id,
|
| 68 |
+
session_id=session_id,
|
| 69 |
+
questions=questions,
|
| 70 |
+
tool_call_id=tool_call_id,
|
| 71 |
+
message_id=message_id,
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
# Create future for response
|
| 75 |
+
# 중요: get_running_loop() 사용 (get_event_loop()는 FastAPI에서 잘못된 loop 반환 가능)
|
| 76 |
+
loop = asyncio.get_running_loop()
|
| 77 |
+
future: asyncio.Future[List[List[str]]] = loop.create_future()
|
| 78 |
+
_pending_questions[request_id] = future
|
| 79 |
+
|
| 80 |
+
# Publish question event (will be sent via SSE)
|
| 81 |
+
await Bus.publish(QUESTION_ASKED, request.model_dump())
|
| 82 |
+
|
| 83 |
+
try:
|
| 84 |
+
# Wait for reply with timeout
|
| 85 |
+
logger.info(f"[question] Waiting for answer to request_id={request_id}, timeout={timeout}s")
|
| 86 |
+
answers = await asyncio.wait_for(future, timeout=timeout)
|
| 87 |
+
logger.info(f"[question] Received answer for request_id={request_id}: {answers}")
|
| 88 |
+
return answers
|
| 89 |
+
except asyncio.TimeoutError:
|
| 90 |
+
logger.error(f"[question] Timeout for request_id={request_id} after {timeout}s")
|
| 91 |
+
del _pending_questions[request_id]
|
| 92 |
+
raise TimeoutError(f"Question timed out after {timeout} seconds")
|
| 93 |
+
except Exception as e:
|
| 94 |
+
logger.error(f"[question] Error waiting for answer: {type(e).__name__}: {e}")
|
| 95 |
+
raise
|
| 96 |
+
finally:
|
| 97 |
+
if request_id in _pending_questions:
|
| 98 |
+
del _pending_questions[request_id]
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
async def reply_to_question(request_id: str, answers: List[List[str]]) -> bool:
|
| 102 |
+
"""Submit answers to a pending question."""
|
| 103 |
+
logger.info(f"[question] reply_to_question called: request_id={request_id}, answers={answers}")
|
| 104 |
+
logger.info(f"[question] pending_questions keys: {list(_pending_questions.keys())}")
|
| 105 |
+
|
| 106 |
+
if request_id not in _pending_questions:
|
| 107 |
+
logger.error(f"[question] request_id={request_id} NOT FOUND in pending_questions!")
|
| 108 |
+
return False
|
| 109 |
+
|
| 110 |
+
future = _pending_questions[request_id]
|
| 111 |
+
if not future.done():
|
| 112 |
+
logger.info(f"[question] Setting result for request_id={request_id}")
|
| 113 |
+
future.set_result(answers)
|
| 114 |
+
else:
|
| 115 |
+
logger.warning(f"[question] Future already done for request_id={request_id}")
|
| 116 |
+
|
| 117 |
+
return True
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
async def reject_question(request_id: str) -> bool:
|
| 121 |
+
"""Reject/dismiss a pending question."""
|
| 122 |
+
if request_id not in _pending_questions:
|
| 123 |
+
return False
|
| 124 |
+
|
| 125 |
+
future = _pending_questions[request_id]
|
| 126 |
+
if not future.done():
|
| 127 |
+
future.set_exception(QuestionRejectedError())
|
| 128 |
+
|
| 129 |
+
return True
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def get_pending_questions(session_id: Optional[str] = None) -> List[str]:
|
| 133 |
+
"""Get list of pending question request IDs."""
|
| 134 |
+
return list(_pending_questions.keys())
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
class QuestionRejectedError(Exception):
|
| 138 |
+
"""Raised when user dismisses a question."""
|
| 139 |
+
def __init__(self):
|
| 140 |
+
super().__init__("The user dismissed this question")
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
QUESTION_DESCRIPTION = """Use this tool when you need to ask the user questions during execution. This allows you to:
|
| 144 |
+
1. Gather user preferences or requirements
|
| 145 |
+
2. Clarify ambiguous instructions
|
| 146 |
+
3. Get decisions on implementation choices as you work
|
| 147 |
+
4. Offer choices to the user about what direction to take.
|
| 148 |
+
|
| 149 |
+
IMPORTANT: You MUST provide at least 2 options for each question. Never ask open-ended questions without choices.
|
| 150 |
+
|
| 151 |
+
Usage notes:
|
| 152 |
+
- REQUIRED: Every question MUST have at least 2 options (minItems: 2)
|
| 153 |
+
- When `custom` is enabled (default), a "Type your own answer" option is added automatically; don't include "Other" or catch-all options
|
| 154 |
+
- Answers are returned as arrays of labels; set `multiple: true` to allow selecting more than one
|
| 155 |
+
- If you recommend a specific option, make that the first option in the list and add "(Recommended)" at the end of the label
|
| 156 |
+
"""
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
class QuestionTool(BaseTool):
|
| 160 |
+
"""Tool for asking user questions during execution."""
|
| 161 |
+
|
| 162 |
+
@property
|
| 163 |
+
def id(self) -> str:
|
| 164 |
+
return "question"
|
| 165 |
+
|
| 166 |
+
@property
|
| 167 |
+
def description(self) -> str:
|
| 168 |
+
return QUESTION_DESCRIPTION
|
| 169 |
+
|
| 170 |
+
@property
|
| 171 |
+
def parameters(self) -> Dict[str, Any]:
|
| 172 |
+
return {
|
| 173 |
+
"type": "object",
|
| 174 |
+
"properties": {
|
| 175 |
+
"questions": {
|
| 176 |
+
"type": "array",
|
| 177 |
+
"description": "Questions to ask",
|
| 178 |
+
"items": {
|
| 179 |
+
"type": "object",
|
| 180 |
+
"properties": {
|
| 181 |
+
"question": {
|
| 182 |
+
"type": "string",
|
| 183 |
+
"description": "Complete question"
|
| 184 |
+
},
|
| 185 |
+
"header": {
|
| 186 |
+
"type": "string",
|
| 187 |
+
"description": "Very short label (max 30 chars)"
|
| 188 |
+
},
|
| 189 |
+
"options": {
|
| 190 |
+
"type": "array",
|
| 191 |
+
"description": "Available choices (MUST provide at least 2 options)",
|
| 192 |
+
"minItems": 2,
|
| 193 |
+
"items": {
|
| 194 |
+
"type": "object",
|
| 195 |
+
"properties": {
|
| 196 |
+
"label": {
|
| 197 |
+
"type": "string",
|
| 198 |
+
"description": "Display text (1-5 words, concise)"
|
| 199 |
+
},
|
| 200 |
+
"description": {
|
| 201 |
+
"type": "string",
|
| 202 |
+
"description": "Explanation of choice"
|
| 203 |
+
}
|
| 204 |
+
},
|
| 205 |
+
"required": ["label", "description"]
|
| 206 |
+
}
|
| 207 |
+
},
|
| 208 |
+
"multiple": {
|
| 209 |
+
"type": "boolean",
|
| 210 |
+
"description": "Allow selecting multiple choices",
|
| 211 |
+
"default": False
|
| 212 |
+
}
|
| 213 |
+
},
|
| 214 |
+
"required": ["question", "header", "options"]
|
| 215 |
+
}
|
| 216 |
+
}
|
| 217 |
+
},
|
| 218 |
+
"required": ["questions"]
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
async def execute(self, args: Dict[str, Any], ctx: ToolContext) -> ToolResult:
|
| 222 |
+
logger.info(f"[question] execute called with args: {args}")
|
| 223 |
+
logger.info(f"[question] args type: {type(args)}")
|
| 224 |
+
|
| 225 |
+
questions_data = args.get("questions", [])
|
| 226 |
+
logger.info(f"[question] questions_data type: {type(questions_data)}, len: {len(questions_data) if isinstance(questions_data, list) else 'N/A'}")
|
| 227 |
+
|
| 228 |
+
if questions_data and len(questions_data) > 0:
|
| 229 |
+
logger.info(f"[question] first question type: {type(questions_data[0])}")
|
| 230 |
+
logger.info(f"[question] first question content: {questions_data[0]}")
|
| 231 |
+
|
| 232 |
+
if not questions_data:
|
| 233 |
+
return ToolResult(
|
| 234 |
+
title="No questions",
|
| 235 |
+
output="No questions were provided.",
|
| 236 |
+
metadata={}
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
# Parse questions
|
| 240 |
+
questions = []
|
| 241 |
+
try:
|
| 242 |
+
for idx, q in enumerate(questions_data):
|
| 243 |
+
logger.info(f"[question] Parsing question {idx}: type={type(q)}, value={q}")
|
| 244 |
+
|
| 245 |
+
# q가 문자열인 경우 처리
|
| 246 |
+
if isinstance(q, str):
|
| 247 |
+
logger.error(f"[question] Question {idx} is a string, not a dict!")
|
| 248 |
+
continue
|
| 249 |
+
|
| 250 |
+
options = []
|
| 251 |
+
for opt_idx, opt in enumerate(q.get("options", [])):
|
| 252 |
+
logger.info(f"[question] Parsing option {opt_idx}: type={type(opt)}, value={opt}")
|
| 253 |
+
if isinstance(opt, dict):
|
| 254 |
+
options.append(QuestionOption(label=opt["label"], description=opt["description"]))
|
| 255 |
+
else:
|
| 256 |
+
logger.error(f"[question] Option {opt_idx} is not a dict: {type(opt)}")
|
| 257 |
+
|
| 258 |
+
questions.append(QuestionInfo(
|
| 259 |
+
question=q["question"],
|
| 260 |
+
header=q["header"],
|
| 261 |
+
options=options,
|
| 262 |
+
multiple=q.get("multiple", False),
|
| 263 |
+
custom=q.get("custom", True),
|
| 264 |
+
))
|
| 265 |
+
except Exception as e:
|
| 266 |
+
logger.error(f"[question] Error parsing questions: {type(e).__name__}: {e}")
|
| 267 |
+
import traceback
|
| 268 |
+
logger.error(f"[question] Traceback: {traceback.format_exc()}")
|
| 269 |
+
raise
|
| 270 |
+
|
| 271 |
+
try:
|
| 272 |
+
# Ask questions and wait for response
|
| 273 |
+
answers = await ask_questions(
|
| 274 |
+
session_id=ctx.session_id,
|
| 275 |
+
questions=questions,
|
| 276 |
+
tool_call_id=ctx.tool_call_id,
|
| 277 |
+
message_id=ctx.message_id,
|
| 278 |
+
)
|
| 279 |
+
|
| 280 |
+
# Format response
|
| 281 |
+
def format_answer(answer: List[str]) -> str:
|
| 282 |
+
if not answer:
|
| 283 |
+
return "Unanswered"
|
| 284 |
+
return ", ".join(answer)
|
| 285 |
+
|
| 286 |
+
formatted = ", ".join(
|
| 287 |
+
f'"{q.question}"="{format_answer(answers[i] if i < len(answers) else [])}"'
|
| 288 |
+
for i, q in enumerate(questions)
|
| 289 |
+
)
|
| 290 |
+
|
| 291 |
+
return ToolResult(
|
| 292 |
+
title=f"Asked {len(questions)} question{'s' if len(questions) > 1 else ''}",
|
| 293 |
+
output=f"User has answered your questions: {formatted}. You can now continue with the user's answers in mind.",
|
| 294 |
+
metadata={"answers": answers}
|
| 295 |
+
)
|
| 296 |
+
|
| 297 |
+
except QuestionRejectedError:
|
| 298 |
+
return ToolResult(
|
| 299 |
+
title="Questions dismissed",
|
| 300 |
+
output="The user dismissed the questions without answering.",
|
| 301 |
+
metadata={"rejected": True}
|
| 302 |
+
)
|
| 303 |
+
except TimeoutError as e:
|
| 304 |
+
return ToolResult(
|
| 305 |
+
title="Questions timed out",
|
| 306 |
+
output=str(e),
|
| 307 |
+
metadata={"timeout": True}
|
| 308 |
+
)
|
src/opencode_api/tool/registry.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, Any, List, Optional
|
| 2 |
+
from .tool import BaseTool
|
| 3 |
+
import os
|
| 4 |
+
import importlib.util
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class ToolRegistry:
|
| 8 |
+
"""도구 레지스트리 - 도구 등록 및 관리"""
|
| 9 |
+
|
| 10 |
+
def __init__(self):
|
| 11 |
+
self._tools: Dict[str, BaseTool] = {}
|
| 12 |
+
|
| 13 |
+
def register(self, tool: BaseTool) -> None:
|
| 14 |
+
"""도구 등록"""
|
| 15 |
+
self._tools[tool.id] = tool
|
| 16 |
+
|
| 17 |
+
def get(self, tool_id: str) -> Optional[BaseTool]:
|
| 18 |
+
"""도구 ID로 조회"""
|
| 19 |
+
return self._tools.get(tool_id)
|
| 20 |
+
|
| 21 |
+
def list(self) -> List[BaseTool]:
|
| 22 |
+
"""등록된 모든 도구 목록 반환"""
|
| 23 |
+
return list(self._tools.values())
|
| 24 |
+
|
| 25 |
+
def get_schema(self) -> List[Dict[str, Any]]:
|
| 26 |
+
"""모든 도구의 스키마 반환"""
|
| 27 |
+
return [tool.get_schema() for tool in self._tools.values()]
|
| 28 |
+
|
| 29 |
+
def load_from_directory(self, path: str) -> None:
|
| 30 |
+
"""
|
| 31 |
+
디렉토리에서 도구를 동적으로 로드
|
| 32 |
+
(나중에 구현 가능 - 플러그인 시스템)
|
| 33 |
+
"""
|
| 34 |
+
if not os.path.exists(path):
|
| 35 |
+
raise ValueError(f"Directory not found: {path}")
|
| 36 |
+
|
| 37 |
+
# 향후 구현: .py 파일을 스캔하고 BaseTool 서브클래스를 찾아 자동 등록
|
| 38 |
+
# 현재는 placeholder
|
| 39 |
+
pass
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
# 전역 싱글톤 인스턴스
|
| 43 |
+
_registry = ToolRegistry()
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def get_registry() -> ToolRegistry:
|
| 47 |
+
"""전역 레지스트리 인스턴스 반환"""
|
| 48 |
+
return _registry
|
src/opencode_api/tool/skill.py
ADDED
|
@@ -0,0 +1,369 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Skill tool - loads detailed instructions for specific tasks."""
|
| 2 |
+
from typing import Dict, Any, List, Optional
|
| 3 |
+
from pydantic import BaseModel, Field
|
| 4 |
+
|
| 5 |
+
from .tool import BaseTool, ToolResult, ToolContext
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class SkillInfo(BaseModel):
|
| 9 |
+
"""Information about a skill."""
|
| 10 |
+
name: str
|
| 11 |
+
description: str
|
| 12 |
+
content: str
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
# Built-in skills registry
|
| 16 |
+
_skills: Dict[str, SkillInfo] = {}
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def register_skill(skill: SkillInfo) -> None:
|
| 20 |
+
"""Register a skill."""
|
| 21 |
+
_skills[skill.name] = skill
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def get_skill(name: str) -> Optional[SkillInfo]:
|
| 25 |
+
"""Get a skill by name."""
|
| 26 |
+
return _skills.get(name)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def list_skills() -> List[SkillInfo]:
|
| 30 |
+
"""List all registered skills."""
|
| 31 |
+
return list(_skills.values())
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
# Built-in default skills
|
| 35 |
+
DEFAULT_SKILLS = [
|
| 36 |
+
SkillInfo(
|
| 37 |
+
name="web-research",
|
| 38 |
+
description="Comprehensive web research methodology for gathering information from multiple sources",
|
| 39 |
+
content="""# Web Research Skill
|
| 40 |
+
|
| 41 |
+
## Purpose
|
| 42 |
+
Guide for conducting thorough web research to answer questions or gather information.
|
| 43 |
+
|
| 44 |
+
## Methodology
|
| 45 |
+
|
| 46 |
+
### 1. Query Formulation
|
| 47 |
+
- Break down complex questions into specific search queries
|
| 48 |
+
- Use different phrasings to get diverse results
|
| 49 |
+
- Include domain-specific terms when relevant
|
| 50 |
+
|
| 51 |
+
### 2. Source Evaluation
|
| 52 |
+
- Prioritize authoritative sources (official docs, reputable publications)
|
| 53 |
+
- Cross-reference information across multiple sources
|
| 54 |
+
- Note publication dates for time-sensitive information
|
| 55 |
+
|
| 56 |
+
### 3. Information Synthesis
|
| 57 |
+
- Compile findings from multiple sources
|
| 58 |
+
- Identify consensus vs. conflicting information
|
| 59 |
+
- Summarize key points clearly
|
| 60 |
+
|
| 61 |
+
### 4. Citation
|
| 62 |
+
- Always provide source URLs
|
| 63 |
+
- Note when information might be outdated
|
| 64 |
+
|
| 65 |
+
## Tools to Use
|
| 66 |
+
- `websearch`: For finding relevant pages
|
| 67 |
+
- `webfetch`: For extracting content from specific URLs
|
| 68 |
+
|
| 69 |
+
## Best Practices
|
| 70 |
+
- Start broad, then narrow down
|
| 71 |
+
- Use quotes for exact phrases
|
| 72 |
+
- Filter by date when freshness matters
|
| 73 |
+
- Verify claims with multiple sources
|
| 74 |
+
"""
|
| 75 |
+
),
|
| 76 |
+
SkillInfo(
|
| 77 |
+
name="code-explanation",
|
| 78 |
+
description="Methodology for explaining code clearly to users of varying skill levels",
|
| 79 |
+
content="""# Code Explanation Skill
|
| 80 |
+
|
| 81 |
+
## Purpose
|
| 82 |
+
Guide for explaining code in a clear, educational manner.
|
| 83 |
+
|
| 84 |
+
## Approach
|
| 85 |
+
|
| 86 |
+
### 1. Assess Context
|
| 87 |
+
- Determine user's apparent skill level
|
| 88 |
+
- Identify what aspect they're asking about
|
| 89 |
+
- Note any specific confusion points
|
| 90 |
+
|
| 91 |
+
### 2. Structure Explanation
|
| 92 |
+
- Start with high-level overview (what does it do?)
|
| 93 |
+
- Break down into logical sections
|
| 94 |
+
- Explain each component's purpose
|
| 95 |
+
|
| 96 |
+
### 3. Use Analogies
|
| 97 |
+
- Relate concepts to familiar ideas
|
| 98 |
+
- Use real-world metaphors when helpful
|
| 99 |
+
- Avoid overly technical jargon initially
|
| 100 |
+
|
| 101 |
+
### 4. Provide Examples
|
| 102 |
+
- Show simple examples first
|
| 103 |
+
- Build up to complex cases
|
| 104 |
+
- Include edge cases when relevant
|
| 105 |
+
|
| 106 |
+
### 5. Verify Understanding
|
| 107 |
+
- Use the question tool to check comprehension
|
| 108 |
+
- Offer to elaborate on specific parts
|
| 109 |
+
- Provide additional resources if needed
|
| 110 |
+
|
| 111 |
+
## Best Practices
|
| 112 |
+
- Don't assume prior knowledge
|
| 113 |
+
- Explain "why" not just "what"
|
| 114 |
+
- Use code comments effectively
|
| 115 |
+
- Highlight common pitfalls
|
| 116 |
+
"""
|
| 117 |
+
),
|
| 118 |
+
SkillInfo(
|
| 119 |
+
name="api-integration",
|
| 120 |
+
description="Best practices for integrating with external APIs",
|
| 121 |
+
content="""# API Integration Skill
|
| 122 |
+
|
| 123 |
+
## Purpose
|
| 124 |
+
Guide for properly integrating with external APIs.
|
| 125 |
+
|
| 126 |
+
## Key Considerations
|
| 127 |
+
|
| 128 |
+
### 1. Authentication
|
| 129 |
+
- Store API keys securely (environment variables)
|
| 130 |
+
- Never hardcode credentials
|
| 131 |
+
- Handle token refresh if applicable
|
| 132 |
+
|
| 133 |
+
### 2. Error Handling
|
| 134 |
+
- Implement retry logic for transient failures
|
| 135 |
+
- Handle rate limiting gracefully
|
| 136 |
+
- Log errors with context
|
| 137 |
+
|
| 138 |
+
### 3. Request Best Practices
|
| 139 |
+
- Set appropriate timeouts
|
| 140 |
+
- Use connection pooling
|
| 141 |
+
- Implement circuit breakers for resilience
|
| 142 |
+
|
| 143 |
+
### 4. Response Handling
|
| 144 |
+
- Validate response schemas
|
| 145 |
+
- Handle pagination properly
|
| 146 |
+
- Cache responses when appropriate
|
| 147 |
+
|
| 148 |
+
### 5. Testing
|
| 149 |
+
- Mock API responses in tests
|
| 150 |
+
- Test error scenarios
|
| 151 |
+
- Verify rate limit handling
|
| 152 |
+
|
| 153 |
+
## Common Patterns
|
| 154 |
+
|
| 155 |
+
```python
|
| 156 |
+
# Example: Robust API call
|
| 157 |
+
async def call_api(url, retries=3):
|
| 158 |
+
for attempt in range(retries):
|
| 159 |
+
try:
|
| 160 |
+
response = await httpx.get(url, timeout=30)
|
| 161 |
+
response.raise_for_status()
|
| 162 |
+
return response.json()
|
| 163 |
+
except httpx.HTTPStatusError as e:
|
| 164 |
+
if e.response.status_code == 429:
|
| 165 |
+
await asyncio.sleep(2 ** attempt)
|
| 166 |
+
elif e.response.status_code >= 500:
|
| 167 |
+
await asyncio.sleep(1)
|
| 168 |
+
else:
|
| 169 |
+
raise
|
| 170 |
+
raise Exception("Max retries exceeded")
|
| 171 |
+
```
|
| 172 |
+
"""
|
| 173 |
+
),
|
| 174 |
+
SkillInfo(
|
| 175 |
+
name="debugging",
|
| 176 |
+
description="Systematic approach to debugging problems",
|
| 177 |
+
content="""# Debugging Skill
|
| 178 |
+
|
| 179 |
+
## Purpose
|
| 180 |
+
Systematic methodology for identifying and fixing bugs.
|
| 181 |
+
|
| 182 |
+
## Process
|
| 183 |
+
|
| 184 |
+
### 1. Reproduce the Issue
|
| 185 |
+
- Get exact steps to reproduce
|
| 186 |
+
- Note environment details
|
| 187 |
+
- Identify when it started happening
|
| 188 |
+
|
| 189 |
+
### 2. Gather Information
|
| 190 |
+
- Check error messages and stack traces
|
| 191 |
+
- Review recent changes
|
| 192 |
+
- Check logs for anomalies
|
| 193 |
+
|
| 194 |
+
### 3. Form Hypotheses
|
| 195 |
+
- List possible causes
|
| 196 |
+
- Rank by likelihood
|
| 197 |
+
- Consider recent changes first
|
| 198 |
+
|
| 199 |
+
### 4. Test Hypotheses
|
| 200 |
+
- Start with most likely cause
|
| 201 |
+
- Make minimal changes to test
|
| 202 |
+
- Verify each hypothesis before moving on
|
| 203 |
+
|
| 204 |
+
### 5. Implement Fix
|
| 205 |
+
- Fix root cause, not symptoms
|
| 206 |
+
- Add tests to prevent regression
|
| 207 |
+
- Document the fix
|
| 208 |
+
|
| 209 |
+
### 6. Verify Fix
|
| 210 |
+
- Confirm original issue is resolved
|
| 211 |
+
- Check for side effects
|
| 212 |
+
- Test related functionality
|
| 213 |
+
|
| 214 |
+
## Debugging Questions
|
| 215 |
+
- What changed recently?
|
| 216 |
+
- Does it happen consistently?
|
| 217 |
+
- What's different when it works?
|
| 218 |
+
- What are the exact inputs?
|
| 219 |
+
|
| 220 |
+
## Tools
|
| 221 |
+
- Use print/log statements strategically
|
| 222 |
+
- Leverage debuggers when available
|
| 223 |
+
- Check version differences
|
| 224 |
+
"""
|
| 225 |
+
),
|
| 226 |
+
SkillInfo(
|
| 227 |
+
name="task-planning",
|
| 228 |
+
description="Breaking down complex tasks into manageable steps",
|
| 229 |
+
content="""# Task Planning Skill
|
| 230 |
+
|
| 231 |
+
## Purpose
|
| 232 |
+
Guide for decomposing complex tasks into actionable steps.
|
| 233 |
+
|
| 234 |
+
## Methodology
|
| 235 |
+
|
| 236 |
+
### 1. Understand the Goal
|
| 237 |
+
- Clarify the end objective
|
| 238 |
+
- Identify success criteria
|
| 239 |
+
- Note any constraints
|
| 240 |
+
|
| 241 |
+
### 2. Identify Components
|
| 242 |
+
- Break into major phases
|
| 243 |
+
- List dependencies between parts
|
| 244 |
+
- Identify parallel vs. sequential work
|
| 245 |
+
|
| 246 |
+
### 3. Create Action Items
|
| 247 |
+
- Make each item specific and actionable
|
| 248 |
+
- Estimate effort/complexity
|
| 249 |
+
- Assign priorities
|
| 250 |
+
|
| 251 |
+
### 4. Sequence Work
|
| 252 |
+
- Order by dependencies
|
| 253 |
+
- Front-load risky items
|
| 254 |
+
- Plan for blockers
|
| 255 |
+
|
| 256 |
+
### 5. Track Progress
|
| 257 |
+
- Use todo tool to track items
|
| 258 |
+
- Update status as work progresses
|
| 259 |
+
- Re-plan when needed
|
| 260 |
+
|
| 261 |
+
## Best Practices
|
| 262 |
+
- Start with end goal in mind
|
| 263 |
+
- Keep items small (< 1 hour ideal)
|
| 264 |
+
- Include verification steps
|
| 265 |
+
- Plan for error cases
|
| 266 |
+
|
| 267 |
+
## Example Structure
|
| 268 |
+
1. Research & understand requirements
|
| 269 |
+
2. Design approach
|
| 270 |
+
3. Implement core functionality
|
| 271 |
+
4. Add error handling
|
| 272 |
+
5. Test thoroughly
|
| 273 |
+
6. Document changes
|
| 274 |
+
"""
|
| 275 |
+
),
|
| 276 |
+
]
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
def _get_skill_description(skills: List[SkillInfo]) -> str:
|
| 280 |
+
"""Generate description with available skills."""
|
| 281 |
+
if not skills:
|
| 282 |
+
return "Load a skill to get detailed instructions for a specific task. No skills are currently available."
|
| 283 |
+
|
| 284 |
+
lines = [
|
| 285 |
+
"Load a skill to get detailed instructions for a specific task.",
|
| 286 |
+
"Skills provide specialized knowledge and step-by-step guidance.",
|
| 287 |
+
"Use this when a task matches an available skill's description.",
|
| 288 |
+
"",
|
| 289 |
+
"<available_skills>",
|
| 290 |
+
]
|
| 291 |
+
|
| 292 |
+
for skill in skills:
|
| 293 |
+
lines.extend([
|
| 294 |
+
f" <skill>",
|
| 295 |
+
f" <name>{skill.name}</name>",
|
| 296 |
+
f" <description>{skill.description}</description>",
|
| 297 |
+
f" </skill>",
|
| 298 |
+
])
|
| 299 |
+
|
| 300 |
+
lines.append("</available_skills>")
|
| 301 |
+
|
| 302 |
+
return "\n".join(lines)
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
class SkillTool(BaseTool):
|
| 306 |
+
"""Tool for loading skill instructions."""
|
| 307 |
+
|
| 308 |
+
def __init__(self, additional_skills: Optional[List[SkillInfo]] = None):
|
| 309 |
+
"""Initialize with optional additional skills."""
|
| 310 |
+
# Register default skills
|
| 311 |
+
for skill in DEFAULT_SKILLS:
|
| 312 |
+
register_skill(skill)
|
| 313 |
+
|
| 314 |
+
# Register additional skills if provided
|
| 315 |
+
if additional_skills:
|
| 316 |
+
for skill in additional_skills:
|
| 317 |
+
register_skill(skill)
|
| 318 |
+
|
| 319 |
+
@property
|
| 320 |
+
def id(self) -> str:
|
| 321 |
+
return "skill"
|
| 322 |
+
|
| 323 |
+
@property
|
| 324 |
+
def description(self) -> str:
|
| 325 |
+
return _get_skill_description(list_skills())
|
| 326 |
+
|
| 327 |
+
@property
|
| 328 |
+
def parameters(self) -> Dict[str, Any]:
|
| 329 |
+
skill_names = [s.name for s in list_skills()]
|
| 330 |
+
examples = ", ".join(f"'{n}'" for n in skill_names[:3])
|
| 331 |
+
hint = f" (e.g., {examples}, ...)" if examples else ""
|
| 332 |
+
|
| 333 |
+
return {
|
| 334 |
+
"type": "object",
|
| 335 |
+
"properties": {
|
| 336 |
+
"name": {
|
| 337 |
+
"type": "string",
|
| 338 |
+
"description": f"The skill identifier from available_skills{hint}",
|
| 339 |
+
"enum": skill_names if skill_names else None
|
| 340 |
+
}
|
| 341 |
+
},
|
| 342 |
+
"required": ["name"]
|
| 343 |
+
}
|
| 344 |
+
|
| 345 |
+
async def execute(self, args: Dict[str, Any], ctx: ToolContext) -> ToolResult:
|
| 346 |
+
skill_name = args.get("name", "")
|
| 347 |
+
|
| 348 |
+
skill = get_skill(skill_name)
|
| 349 |
+
|
| 350 |
+
if not skill:
|
| 351 |
+
available = ", ".join(s.name for s in list_skills())
|
| 352 |
+
return ToolResult(
|
| 353 |
+
title=f"Skill not found: {skill_name}",
|
| 354 |
+
output=f'Skill "{skill_name}" not found. Available skills: {available or "none"}',
|
| 355 |
+
metadata={"error": True}
|
| 356 |
+
)
|
| 357 |
+
|
| 358 |
+
output = f"""## Skill: {skill.name}
|
| 359 |
+
|
| 360 |
+
**Description**: {skill.description}
|
| 361 |
+
|
| 362 |
+
{skill.content}
|
| 363 |
+
"""
|
| 364 |
+
|
| 365 |
+
return ToolResult(
|
| 366 |
+
title=f"Loaded skill: {skill.name}",
|
| 367 |
+
output=output,
|
| 368 |
+
metadata={"name": skill.name}
|
| 369 |
+
)
|
src/opencode_api/tool/todo.py
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, Any, List, Optional
|
| 2 |
+
from pydantic import BaseModel
|
| 3 |
+
from .tool import BaseTool, ToolContext, ToolResult
|
| 4 |
+
from ..core.storage import Storage
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class TodoItem(BaseModel):
|
| 8 |
+
id: str
|
| 9 |
+
content: str
|
| 10 |
+
status: str = "pending" # pending, in_progress, completed, cancelled
|
| 11 |
+
priority: str = "medium" # high, medium, low
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class TodoTool(BaseTool):
|
| 15 |
+
|
| 16 |
+
@property
|
| 17 |
+
def id(self) -> str:
|
| 18 |
+
return "todo"
|
| 19 |
+
|
| 20 |
+
@property
|
| 21 |
+
def description(self) -> str:
|
| 22 |
+
return (
|
| 23 |
+
"Manage a todo list for tracking tasks. Use this to create, update, "
|
| 24 |
+
"and track progress on multi-step tasks. Supports pending, in_progress, "
|
| 25 |
+
"completed, and cancelled statuses."
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
@property
|
| 29 |
+
def parameters(self) -> Dict[str, Any]:
|
| 30 |
+
return {
|
| 31 |
+
"type": "object",
|
| 32 |
+
"properties": {
|
| 33 |
+
"action": {
|
| 34 |
+
"type": "string",
|
| 35 |
+
"enum": ["read", "write"],
|
| 36 |
+
"description": "Action to perform: 'read' to get todos, 'write' to update todos"
|
| 37 |
+
},
|
| 38 |
+
"todos": {
|
| 39 |
+
"type": "array",
|
| 40 |
+
"description": "List of todos (required for 'write' action)",
|
| 41 |
+
"items": {
|
| 42 |
+
"type": "object",
|
| 43 |
+
"properties": {
|
| 44 |
+
"id": {"type": "string"},
|
| 45 |
+
"content": {"type": "string"},
|
| 46 |
+
"status": {
|
| 47 |
+
"type": "string",
|
| 48 |
+
"enum": ["pending", "in_progress", "completed", "cancelled"]
|
| 49 |
+
},
|
| 50 |
+
"priority": {
|
| 51 |
+
"type": "string",
|
| 52 |
+
"enum": ["high", "medium", "low"]
|
| 53 |
+
}
|
| 54 |
+
},
|
| 55 |
+
"required": ["id", "content", "status", "priority"]
|
| 56 |
+
}
|
| 57 |
+
}
|
| 58 |
+
},
|
| 59 |
+
"required": ["action"]
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
async def execute(self, args: Dict[str, Any], ctx: ToolContext) -> ToolResult:
|
| 63 |
+
action = args["action"]
|
| 64 |
+
session_id = ctx.session_id
|
| 65 |
+
|
| 66 |
+
if action == "read":
|
| 67 |
+
return await self._read_todos(session_id)
|
| 68 |
+
elif action == "write":
|
| 69 |
+
todos_data = args.get("todos", [])
|
| 70 |
+
return await self._write_todos(session_id, todos_data)
|
| 71 |
+
else:
|
| 72 |
+
return ToolResult(
|
| 73 |
+
title="Todo Error",
|
| 74 |
+
output=f"Unknown action: {action}",
|
| 75 |
+
metadata={"error": "invalid_action"}
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
async def _read_todos(self, session_id: str) -> ToolResult:
|
| 79 |
+
todos = await Storage.read(["todo", session_id])
|
| 80 |
+
|
| 81 |
+
if not todos:
|
| 82 |
+
return ToolResult(
|
| 83 |
+
title="Todo List",
|
| 84 |
+
output="No todos found for this session.",
|
| 85 |
+
metadata={"count": 0}
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
items = [TodoItem(**t) for t in todos]
|
| 89 |
+
lines = self._format_todos(items)
|
| 90 |
+
|
| 91 |
+
return ToolResult(
|
| 92 |
+
title="Todo List",
|
| 93 |
+
output="\n".join(lines),
|
| 94 |
+
metadata={"count": len(items)}
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
async def _write_todos(self, session_id: str, todos_data: List[Dict]) -> ToolResult:
|
| 98 |
+
items = [TodoItem(**t) for t in todos_data]
|
| 99 |
+
await Storage.write(["todo", session_id], [t.model_dump() for t in items])
|
| 100 |
+
|
| 101 |
+
lines = self._format_todos(items)
|
| 102 |
+
|
| 103 |
+
return ToolResult(
|
| 104 |
+
title="Todo List Updated",
|
| 105 |
+
output="\n".join(lines),
|
| 106 |
+
metadata={"count": len(items)}
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
def _format_todos(self, items: List[TodoItem]) -> List[str]:
|
| 110 |
+
status_icons = {
|
| 111 |
+
"pending": "[ ]",
|
| 112 |
+
"in_progress": "[~]",
|
| 113 |
+
"completed": "[x]",
|
| 114 |
+
"cancelled": "[-]"
|
| 115 |
+
}
|
| 116 |
+
priority_icons = {
|
| 117 |
+
"high": "!!!",
|
| 118 |
+
"medium": "!!",
|
| 119 |
+
"low": "!"
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
lines = []
|
| 123 |
+
for item in items:
|
| 124 |
+
icon = status_icons.get(item.status, "[ ]")
|
| 125 |
+
priority = priority_icons.get(item.priority, "")
|
| 126 |
+
lines.append(f"{icon} {priority} {item.content} (id: {item.id})")
|
| 127 |
+
|
| 128 |
+
return lines if lines else ["No todos."]
|
src/opencode_api/tool/tool.py
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, Any, List, Optional, Callable, Awaitable, Protocol, runtime_checkable
|
| 2 |
+
from pydantic import BaseModel
|
| 3 |
+
from abc import ABC, abstractmethod
|
| 4 |
+
from datetime import datetime
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class ToolContext(BaseModel):
|
| 8 |
+
session_id: str
|
| 9 |
+
message_id: str
|
| 10 |
+
tool_call_id: Optional[str] = None
|
| 11 |
+
agent: str = "default"
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class ToolResult(BaseModel):
|
| 15 |
+
title: str
|
| 16 |
+
output: str
|
| 17 |
+
metadata: Dict[str, Any] = {}
|
| 18 |
+
truncated: bool = False
|
| 19 |
+
original_length: int = 0
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@runtime_checkable
|
| 23 |
+
class Tool(Protocol):
|
| 24 |
+
|
| 25 |
+
@property
|
| 26 |
+
def id(self) -> str: ...
|
| 27 |
+
|
| 28 |
+
@property
|
| 29 |
+
def description(self) -> str: ...
|
| 30 |
+
|
| 31 |
+
@property
|
| 32 |
+
def parameters(self) -> Dict[str, Any]: ...
|
| 33 |
+
|
| 34 |
+
async def execute(self, args: Dict[str, Any], ctx: ToolContext) -> ToolResult: ...
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class BaseTool(ABC):
|
| 38 |
+
MAX_OUTPUT_LENGTH = 50000
|
| 39 |
+
|
| 40 |
+
def __init__(self):
|
| 41 |
+
self.status: str = "pending"
|
| 42 |
+
self.time_start: Optional[datetime] = None
|
| 43 |
+
self.time_end: Optional[datetime] = None
|
| 44 |
+
|
| 45 |
+
@property
|
| 46 |
+
@abstractmethod
|
| 47 |
+
def id(self) -> str:
|
| 48 |
+
pass
|
| 49 |
+
|
| 50 |
+
@property
|
| 51 |
+
@abstractmethod
|
| 52 |
+
def description(self) -> str:
|
| 53 |
+
pass
|
| 54 |
+
|
| 55 |
+
@property
|
| 56 |
+
@abstractmethod
|
| 57 |
+
def parameters(self) -> Dict[str, Any]:
|
| 58 |
+
pass
|
| 59 |
+
|
| 60 |
+
@abstractmethod
|
| 61 |
+
async def execute(self, args: Dict[str, Any], ctx: ToolContext) -> ToolResult:
|
| 62 |
+
pass
|
| 63 |
+
|
| 64 |
+
def get_schema(self) -> Dict[str, Any]:
|
| 65 |
+
return {
|
| 66 |
+
"name": self.id,
|
| 67 |
+
"description": self.description,
|
| 68 |
+
"parameters": self.parameters
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
def truncate_output(self, output: str) -> str:
|
| 72 |
+
"""출력이 MAX_OUTPUT_LENGTH를 초과하면 자르고 메시지 추가"""
|
| 73 |
+
if len(output) <= self.MAX_OUTPUT_LENGTH:
|
| 74 |
+
return output
|
| 75 |
+
|
| 76 |
+
truncated = output[:self.MAX_OUTPUT_LENGTH]
|
| 77 |
+
truncated += "\n\n[Output truncated...]"
|
| 78 |
+
return truncated
|
| 79 |
+
|
| 80 |
+
def update_status(self, status: str) -> None:
|
| 81 |
+
"""도구 상태 업데이트 (pending, running, completed, error)"""
|
| 82 |
+
self.status = status
|
| 83 |
+
if status == "running" and self.time_start is None:
|
| 84 |
+
self.time_start = datetime.now()
|
| 85 |
+
elif status in ("completed", "error") and self.time_end is None:
|
| 86 |
+
self.time_end = datetime.now()
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
from .registry import get_registry
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def register_tool(tool: BaseTool) -> None:
|
| 93 |
+
"""도구 등록 (호환성 함수 - ToolRegistry 사용)"""
|
| 94 |
+
get_registry().register(tool)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def get_tool(tool_id: str) -> Optional[BaseTool]:
|
| 98 |
+
"""도구 조회 (호환성 함수 - ToolRegistry 사용)"""
|
| 99 |
+
return get_registry().get(tool_id)
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def list_tools() -> List[BaseTool]:
|
| 103 |
+
"""도구 목록 (호환성 함수 - ToolRegistry 사용)"""
|
| 104 |
+
return get_registry().list()
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def get_tools_schema() -> List[Dict[str, Any]]:
|
| 108 |
+
"""도구 스키마 목록 (호환성 함수 - ToolRegistry 사용)"""
|
| 109 |
+
return get_registry().get_schema()
|
src/opencode_api/tool/webfetch.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, Any
|
| 2 |
+
import httpx
|
| 3 |
+
from .tool import BaseTool, ToolContext, ToolResult
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class WebFetchTool(BaseTool):
|
| 7 |
+
|
| 8 |
+
@property
|
| 9 |
+
def id(self) -> str:
|
| 10 |
+
return "webfetch"
|
| 11 |
+
|
| 12 |
+
@property
|
| 13 |
+
def description(self) -> str:
|
| 14 |
+
return (
|
| 15 |
+
"Fetch content from a URL and convert it to readable text or markdown. "
|
| 16 |
+
"Use this when you need to read the content of a specific web page."
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
@property
|
| 20 |
+
def parameters(self) -> Dict[str, Any]:
|
| 21 |
+
return {
|
| 22 |
+
"type": "object",
|
| 23 |
+
"properties": {
|
| 24 |
+
"url": {
|
| 25 |
+
"type": "string",
|
| 26 |
+
"description": "The URL to fetch"
|
| 27 |
+
},
|
| 28 |
+
"format": {
|
| 29 |
+
"type": "string",
|
| 30 |
+
"enum": ["text", "markdown", "html"],
|
| 31 |
+
"description": "Output format (default: markdown)",
|
| 32 |
+
"default": "markdown"
|
| 33 |
+
}
|
| 34 |
+
},
|
| 35 |
+
"required": ["url"]
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
async def execute(self, args: Dict[str, Any], ctx: ToolContext) -> ToolResult:
|
| 39 |
+
url = args["url"]
|
| 40 |
+
output_format = args.get("format", "markdown")
|
| 41 |
+
|
| 42 |
+
if not url.startswith(("http://", "https://")):
|
| 43 |
+
url = "https://" + url
|
| 44 |
+
|
| 45 |
+
try:
|
| 46 |
+
async with httpx.AsyncClient(follow_redirects=True, timeout=30.0) as client:
|
| 47 |
+
response = await client.get(
|
| 48 |
+
url,
|
| 49 |
+
headers={
|
| 50 |
+
"User-Agent": "Mozilla/5.0 (compatible; OpenCode-API/1.0)"
|
| 51 |
+
}
|
| 52 |
+
)
|
| 53 |
+
response.raise_for_status()
|
| 54 |
+
html_content = response.text
|
| 55 |
+
|
| 56 |
+
if output_format == "html":
|
| 57 |
+
content = html_content[:50000] # Limit size
|
| 58 |
+
elif output_format == "text":
|
| 59 |
+
content = self._html_to_text(html_content)
|
| 60 |
+
else: # markdown
|
| 61 |
+
content = self._html_to_markdown(html_content)
|
| 62 |
+
|
| 63 |
+
if len(content) > 50000:
|
| 64 |
+
content = content[:50000] + "\n\n[Content truncated...]"
|
| 65 |
+
|
| 66 |
+
return ToolResult(
|
| 67 |
+
title=f"Fetched: {url}",
|
| 68 |
+
output=content,
|
| 69 |
+
metadata={"url": url, "format": output_format, "length": len(content)}
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
except httpx.HTTPStatusError as e:
|
| 73 |
+
return ToolResult(
|
| 74 |
+
title=f"Fetch failed: {url}",
|
| 75 |
+
output=f"HTTP Error {e.response.status_code}: {e.response.reason_phrase}",
|
| 76 |
+
metadata={"error": "http_error", "status_code": e.response.status_code}
|
| 77 |
+
)
|
| 78 |
+
except httpx.RequestError as e:
|
| 79 |
+
return ToolResult(
|
| 80 |
+
title=f"Fetch failed: {url}",
|
| 81 |
+
output=f"Request error: {str(e)}",
|
| 82 |
+
metadata={"error": "request_error"}
|
| 83 |
+
)
|
| 84 |
+
except Exception as e:
|
| 85 |
+
return ToolResult(
|
| 86 |
+
title=f"Fetch failed: {url}",
|
| 87 |
+
output=f"Error: {str(e)}",
|
| 88 |
+
metadata={"error": str(e)}
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
def _html_to_text(self, html: str) -> str:
|
| 92 |
+
try:
|
| 93 |
+
from bs4 import BeautifulSoup
|
| 94 |
+
soup = BeautifulSoup(html, "html.parser")
|
| 95 |
+
|
| 96 |
+
for tag in soup(["script", "style", "nav", "footer", "header"]):
|
| 97 |
+
tag.decompose()
|
| 98 |
+
|
| 99 |
+
return soup.get_text(separator="\n", strip=True)
|
| 100 |
+
except ImportError:
|
| 101 |
+
import re
|
| 102 |
+
text = re.sub(r"<script[^>]*>.*?</script>", "", html, flags=re.DOTALL | re.IGNORECASE)
|
| 103 |
+
text = re.sub(r"<style[^>]*>.*?</style>", "", text, flags=re.DOTALL | re.IGNORECASE)
|
| 104 |
+
text = re.sub(r"<[^>]+>", " ", text)
|
| 105 |
+
text = re.sub(r"\s+", " ", text)
|
| 106 |
+
return text.strip()
|
| 107 |
+
|
| 108 |
+
def _html_to_markdown(self, html: str) -> str:
|
| 109 |
+
try:
|
| 110 |
+
import html2text
|
| 111 |
+
h = html2text.HTML2Text()
|
| 112 |
+
h.ignore_links = False
|
| 113 |
+
h.ignore_images = True
|
| 114 |
+
h.body_width = 0
|
| 115 |
+
return h.handle(html)
|
| 116 |
+
except ImportError:
|
| 117 |
+
return self._html_to_text(html)
|
src/opencode_api/tool/websearch.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, Any, List
|
| 2 |
+
from .tool import BaseTool, ToolContext, ToolResult
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class WebSearchTool(BaseTool):
|
| 6 |
+
|
| 7 |
+
@property
|
| 8 |
+
def id(self) -> str:
|
| 9 |
+
return "websearch"
|
| 10 |
+
|
| 11 |
+
@property
|
| 12 |
+
def description(self) -> str:
|
| 13 |
+
return (
|
| 14 |
+
"Search the web using DuckDuckGo. Returns relevant search results "
|
| 15 |
+
"with titles, URLs, and snippets. Use this when you need current "
|
| 16 |
+
"information from the internet."
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
@property
|
| 20 |
+
def parameters(self) -> Dict[str, Any]:
|
| 21 |
+
return {
|
| 22 |
+
"type": "object",
|
| 23 |
+
"properties": {
|
| 24 |
+
"query": {
|
| 25 |
+
"type": "string",
|
| 26 |
+
"description": "The search query"
|
| 27 |
+
},
|
| 28 |
+
"max_results": {
|
| 29 |
+
"type": "integer",
|
| 30 |
+
"description": "Maximum number of results to return (default: 5)",
|
| 31 |
+
"default": 5
|
| 32 |
+
}
|
| 33 |
+
},
|
| 34 |
+
"required": ["query"]
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
async def execute(self, args: Dict[str, Any], ctx: ToolContext) -> ToolResult:
|
| 38 |
+
query = args["query"]
|
| 39 |
+
max_results = args.get("max_results", 5)
|
| 40 |
+
|
| 41 |
+
try:
|
| 42 |
+
from ddgs import DDGS
|
| 43 |
+
|
| 44 |
+
results = []
|
| 45 |
+
with DDGS() as ddgs:
|
| 46 |
+
# 한국 지역 기반 검색 결과
|
| 47 |
+
for r in ddgs.text(query, region="kr-kr", max_results=max_results):
|
| 48 |
+
results.append({
|
| 49 |
+
"title": r.get("title", ""),
|
| 50 |
+
"url": r.get("href", ""),
|
| 51 |
+
"snippet": r.get("body", "")
|
| 52 |
+
})
|
| 53 |
+
|
| 54 |
+
if not results:
|
| 55 |
+
return ToolResult(
|
| 56 |
+
title=f"Web search: {query}",
|
| 57 |
+
output="No results found.",
|
| 58 |
+
metadata={"query": query, "count": 0}
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
output_lines = []
|
| 62 |
+
for i, r in enumerate(results, 1):
|
| 63 |
+
output_lines.append(f"{i}. {r['title']}")
|
| 64 |
+
output_lines.append(f" URL: {r['url']}")
|
| 65 |
+
output_lines.append(f" {r['snippet']}")
|
| 66 |
+
output_lines.append("")
|
| 67 |
+
|
| 68 |
+
return ToolResult(
|
| 69 |
+
title=f"Web search: {query}",
|
| 70 |
+
output="\n".join(output_lines),
|
| 71 |
+
metadata={"query": query, "count": len(results)}
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
except ImportError:
|
| 75 |
+
return ToolResult(
|
| 76 |
+
title=f"Web search: {query}",
|
| 77 |
+
output="Error: ddgs package not installed. Run: pip install ddgs",
|
| 78 |
+
metadata={"error": "missing_dependency"}
|
| 79 |
+
)
|
| 80 |
+
except Exception as e:
|
| 81 |
+
return ToolResult(
|
| 82 |
+
title=f"Web search: {query}",
|
| 83 |
+
output=f"Error performing search: {str(e)}",
|
| 84 |
+
metadata={"error": str(e)}
|
| 85 |
+
)
|