Somrat Sorkar commited on
Commit
0bfb89f
Β·
1 Parent(s): facefdf

Fix all provider names to match OpenClaw system (zai, moonshot, mistral, xai, opencode, etc.)

Browse files
Files changed (3) hide show
  1. .env.example +89 -160
  2. README.md +55 -67
  3. start.sh +37 -30
.env.example CHANGED
@@ -9,134 +9,119 @@
9
  # - Anthropic: sk-ant-v0-...
10
  # - OpenAI: sk-...
11
  # - Google: AIzaSy...
 
12
  LLM_API_KEY=your_api_key_here
13
 
14
  # [REQUIRED] LLM model to use (format: provider/model-name)
15
  # Auto-detects provider from prefix β€” any provider is supported!
16
- # Model IDs sourced from https://openrouter.ai/api/v1/models
17
  #
18
- # ── Top-Tier Providers ──
19
  #
20
- # Anthropic Claude:
21
  # - anthropic/claude-opus-4-6
22
  # - anthropic/claude-sonnet-4-6
23
  # - anthropic/claude-sonnet-4-5
24
  # - anthropic/claude-haiku-4-5
25
  #
26
- # OpenAI:
27
- # - openai/gpt-5.4-pro (1M context)
28
- # - openai/gpt-5.4 (1M context)
29
  # - openai/gpt-5.4-mini
30
  # - openai/gpt-5.4-nano
31
  # - openai/gpt-4.1
32
  # - openai/gpt-4.1-mini
33
- # - openai/gpt-4.1-nano
34
  #
35
- # Google Gemini:
36
  # - google/gemini-3.1-pro-preview
37
- # - google/gemini-3.1-flash-lite-preview
38
  # - google/gemini-2.5-pro
39
  # - google/gemini-2.5-flash
40
  #
41
- # DeepSeek:
42
  # - deepseek/deepseek-v3.2
43
  # - deepseek/deepseek-r1-0528
44
  # - deepseek/deepseek-r1
45
- # - deepseek/deepseek-chat-v3.1
46
  #
47
- # ── Chinese/Asian Providers ──
48
  #
49
- # Qwen (Alibaba):
50
- # - qwen/qwen3.6-plus-preview:free (1M context, free!)
51
- # - qwen/qwen3-max
52
- # - qwen/qwen3-coder
53
- # - qwen/qwen3.5-397b-a17b
54
- # - qwen/qwen3.5-35b-a3b
55
- #
56
- # Z.ai (GLM):
57
- # - z-ai/glm-5
58
- # - z-ai/glm-5-turbo
59
- # - z-ai/glm-4.7
60
- # - z-ai/glm-4.7-flash
61
- # - z-ai/glm-4.5-air:free (free!)
62
- #
63
- # Moonshot (Kimi):
64
- # - moonshotai/kimi-k2.5
65
- # - moonshotai/kimi-k2
66
- # - moonshotai/kimi-k2-thinking
67
- #
68
- # Xiaomi (MiMo):
69
- # - xiaomi/mimo-v2-pro (1M context)
70
- # - xiaomi/mimo-v2-omni (multimodal)
71
- # - xiaomi/mimo-v2-flash
72
- #
73
- # ByteDance Seed:
74
- # - bytedance-seed/seed-2.0-lite
75
- # - bytedance-seed/seed-2.0-mini
76
- #
77
- # Baidu (ERNIE):
78
- # - baidu/ernie-4.5-300b-a47b
79
- # - baidu/ernie-4.5-21b-a3b
80
- #
81
- # Tencent:
82
- # - tencent/hunyuan-a13b-instruct
83
- #
84
- # StepFun:
85
- # - stepfun/step-3.5-flash
86
- # - stepfun/step-3.5-flash:free (free!)
87
  #
88
- # ── Western Providers ──
 
89
  #
90
- # Mistral:
91
- # - mistralai/mistral-large-2512
92
- # - mistralai/mistral-medium-3.1
93
- # - mistralai/mistral-small-2603
94
- # - mistralai/devstral-medium (coding)
95
- # - mistralai/codestral-2508
96
- #
97
- # xAI (Grok):
98
- # - x-ai/grok-4.20-beta
99
- # - x-ai/grok-4.20-multi-agent-beta
100
- # - x-ai/grok-4.1-fast
101
- # - x-ai/grok-4
102
- #
103
- # Meta (Llama):
104
- # - meta-llama/llama-4-maverick
105
- # - meta-llama/llama-4-scout
106
- # - meta-llama/llama-3.3-70b-instruct
107
- #
108
- # NVIDIA:
109
- # - nvidia/nemotron-3-super-120b-a12b
110
- # - nvidia/nemotron-3-super-120b-a12b:free (free!)
111
  #
112
- # Cohere:
113
- # - cohere/command-a
114
- # - cohere/command-r-plus-08-2024
 
 
 
115
  #
116
- # Perplexity:
117
- # - perplexity/sonar-deep-research
118
- # - perplexity/sonar-pro
119
  #
120
- # MiniMax:
 
 
 
 
 
 
 
 
 
 
 
 
121
  # - minimax/minimax-m2.7
122
  # - minimax/minimax-m2.5
123
- # - minimax/minimax-m2.5:free (free!)
124
  #
125
- # ── Speed & Specialty ──
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
  #
127
- # Inception (ultra-fast reasoning):
128
- # - inception/mercury-2 (1000+ tok/sec)
129
- # - inception/mercury-coder
130
  #
131
- # Amazon:
132
- # - amazon/nova-premier-v1
133
- # - amazon/nova-pro-v1
134
  #
135
- # ── OpenRouter (100+ models via single API key) ──
136
- # Use any model above via OpenRouter with a single API key!
137
- # See https://openrouter.ai/models for complete list
138
  #
139
- # Or any other provider β€” format: provider/model-name (auto-detected)
140
  LLM_MODEL=anthropic/claude-sonnet-4-5
141
 
142
  # [REQUIRED] Gateway authentication token
@@ -144,100 +129,44 @@ LLM_MODEL=anthropic/claude-sonnet-4-5
144
  GATEWAY_TOKEN=your_gateway_token_here
145
 
146
  # ── OPTIONAL: Telegram Integration ──
147
- # Enable Telegram bot integration
148
  # Get bot token from: https://t.me/BotFather
149
  TELEGRAM_BOT_TOKEN=your_bot_token_here
150
 
151
- # Single user ID for DM access
152
- # Get your ID from: https://t.me/userinfobot
153
  TELEGRAM_USER_ID=123456789
154
 
155
  # Multiple user IDs (comma-separated for team access)
156
  # TELEGRAM_USER_IDS=123456789,987654321,555555555
157
 
158
  # ── OPTIONAL: Workspace Backup to HF Dataset ──
159
- # Enable automatic workspace backup & restore
160
- # Your HuggingFace username
161
  HF_USERNAME=your_hf_username
162
-
163
- # HuggingFace API token (with write access)
164
- # Get from: https://huggingface.co/settings/tokens
165
  HF_TOKEN=hf_your_token_here
166
 
167
- # Name of the backup dataset (auto-created if missing)
168
  # Default: huggingclaw-backup
169
  BACKUP_DATASET_NAME=huggingclaw-backup
170
 
171
- # Git commit email for workspace syncs
172
- # Default: openclaw@example.com
173
  WORKSPACE_GIT_USER=openclaw@example.com
174
-
175
- # Git commit name for workspace syncs
176
- # Default: OpenClaw Bot
177
  WORKSPACE_GIT_NAME=OpenClaw Bot
178
 
179
- # ── OPTIONAL: Background Services Configuration ──
180
- # Keep-alive ping interval in seconds (prevents HF Spaces sleep)
181
- # Default: 300 (5 minutes)
182
- # Set to 0 to disable (not recommended on HF Spaces)
183
  KEEP_ALIVE_INTERVAL=300
184
 
185
- # Workspace auto-sync interval in seconds
186
- # Default: 600 (10 minutes)
187
  SYNC_INTERVAL=600
188
 
189
- # ── OPTIONAL: Advanced Configuration ──
190
- # Pin OpenClaw version (default: latest)
191
- # Example: OPENCLAW_VERSION=2026.3.24
192
  OPENCLAW_VERSION=latest
193
 
194
- # Health endpoint port (default: 7861)
195
  HEALTH_PORT=7861
196
 
197
  # ════════════════════════════════════════════════════════════════
198
- # QUICK START CHECKLIST
199
- # ════════════════════════════════════════════════════════════════
200
- #
201
- # βœ… Minimum setup (3 secrets):
202
- # 1. LLM_API_KEY - Get from your LLM provider
203
- # 2. LLM_MODEL - Choose a model above
204
- # 3. GATEWAY_TOKEN - Run: openssl rand -hex 32
205
- #
206
- # βœ… Add Telegram (2 more secrets):
207
- # 4. TELEGRAM_BOT_TOKEN - From @BotFather
208
- # 5. TELEGRAM_USER_ID - From @userinfobot
209
- #
210
- # βœ… Enable Backup (2 more secrets):
211
- # 6. HF_USERNAME - Your HF account name
212
- # 7. HF_TOKEN - From HF Settings β†’ Tokens
213
- #
214
- # ════════════════════════════════════════════════════════════════
215
- # DEPLOYMENT OPTIONS
216
- # ════════════════════════════════════════════════════════════════
217
- #
218
- # πŸ“¦ HuggingFace Spaces (Recommended):
219
- # - Click "Duplicate this Space"
220
- # - Go to Settings β†’ Secrets
221
- # - Add LLM_API_KEY, LLM_MODEL, GATEWAY_TOKEN
222
- # - Deploy (automatic!)
223
- #
224
- # 🐳 Docker Local:
225
- # docker build -t huggingclaw .
226
- # docker run -p 7860:7860 --env-file .env huggingclaw
227
- #
228
- # πŸ’» Direct (without Docker):
229
- # npm install -g openclaw@latest
230
- # export $(cat .env | xargs)
231
- # bash start.sh
232
- #
233
- # ════════════════════════════════════════════════════════════════
234
- # VERIFY YOUR SETUP
235
- # ════════════════════════════════════════════════════════════════
236
- #
237
- # After deployment, check:
238
- # 1. Logs for "🦞 HuggingClaw Gateway" banner
239
- # 2. Health endpoint: curl https://YOUR-SPACE-URL.hf.space/health
240
- # 3. Control UI: https://YOUR-SPACE-URL.hf.space
241
- # 4. (If Telegram) DM your bot to test
242
- #
243
  # ════════════════════════════════════════════════════════════════
 
9
  # - Anthropic: sk-ant-v0-...
10
  # - OpenAI: sk-...
11
  # - Google: AIzaSy...
12
+ # - OpenRouter: sk-or-v1-... (300+ models via single key)
13
  LLM_API_KEY=your_api_key_here
14
 
15
  # [REQUIRED] LLM model to use (format: provider/model-name)
16
  # Auto-detects provider from prefix β€” any provider is supported!
17
+ # Provider IDs from OpenClaw docs: docs.openclaw.ai/concepts/model-providers
18
  #
19
+ # ── Core Providers ──
20
  #
21
+ # Anthropic (ANTHROPIC_API_KEY):
22
  # - anthropic/claude-opus-4-6
23
  # - anthropic/claude-sonnet-4-6
24
  # - anthropic/claude-sonnet-4-5
25
  # - anthropic/claude-haiku-4-5
26
  #
27
+ # OpenAI (OPENAI_API_KEY):
28
+ # - openai/gpt-5.4-pro
29
+ # - openai/gpt-5.4
30
  # - openai/gpt-5.4-mini
31
  # - openai/gpt-5.4-nano
32
  # - openai/gpt-4.1
33
  # - openai/gpt-4.1-mini
 
34
  #
35
+ # Google Gemini (GEMINI_API_KEY):
36
  # - google/gemini-3.1-pro-preview
37
+ # - google/gemini-3-flash-preview
38
  # - google/gemini-2.5-pro
39
  # - google/gemini-2.5-flash
40
  #
41
+ # DeepSeek (DEEPSEEK_API_KEY):
42
  # - deepseek/deepseek-v3.2
43
  # - deepseek/deepseek-r1-0528
44
  # - deepseek/deepseek-r1
 
45
  #
46
+ # ── OpenCode Providers ──
47
  #
48
+ # OpenCode Zen β€” tested & verified models (OPENCODE_API_KEY):
49
+ # - opencode/claude-opus-4-6
50
+ # - opencode/gpt-5.4
51
+ # Get key from: https://opencode.ai/auth
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  #
53
+ # OpenCode Go β€” low-cost open models (OPENCODE_API_KEY):
54
+ # - opencode-go/kimi-k2.5
55
  #
56
+ # ── Gateway/Router Providers ──
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  #
58
+ # OpenRouter β€” 300+ models via single API key (OPENROUTER_API_KEY):
59
+ # - openrouter/anthropic/claude-sonnet-4-6
60
+ # - openrouter/openai/gpt-5.4
61
+ # - openrouter/deepseek/deepseek-v3.2
62
+ # - openrouter/meta-llama/llama-3.3-70b-instruct:free
63
+ # Get key from: https://openrouter.ai
64
  #
65
+ # Kilo Gateway (KILOCODE_API_KEY):
66
+ # - kilocode/anthropic/claude-opus-4.6
 
67
  #
68
+ # ── Chinese/Asian Providers ──
69
+ #
70
+ # Z.ai / GLM (ZAI_API_KEY) β€” OpenClaw normalizes z-ai/z.ai β†’ zai:
71
+ # - zai/glm-5
72
+ # - zai/glm-5-turbo
73
+ # - zai/glm-4.7
74
+ # - zai/glm-4.7-flash
75
+ #
76
+ # Moonshot / Kimi (MOONSHOT_API_KEY):
77
+ # - moonshot/kimi-k2.5
78
+ # - moonshot/kimi-k2-thinking
79
+ #
80
+ # MiniMax (MINIMAX_API_KEY):
81
  # - minimax/minimax-m2.7
82
  # - minimax/minimax-m2.5
 
83
  #
84
+ # Xiaomi / MiMo (XIAOMI_API_KEY):
85
+ # - xiaomi/mimo-v2-pro
86
+ # - xiaomi/mimo-v2-omni
87
+ #
88
+ # Volcengine / Doubao (VOLCANO_ENGINE_API_KEY):
89
+ # - volcengine/doubao-seed-1-8-251228
90
+ # - volcengine/kimi-k2-5-260127
91
+ #
92
+ # BytePlus β€” international (BYTEPLUS_API_KEY):
93
+ # - byteplus/seed-1-8-251228
94
+ #
95
+ # ── Western Providers ──
96
+ #
97
+ # Mistral (MISTRAL_API_KEY):
98
+ # - mistral/mistral-large-latest
99
+ # - mistral/mistral-small-2603
100
+ # - mistral/devstral-medium
101
+ #
102
+ # xAI / Grok (XAI_API_KEY):
103
+ # - xai/grok-4.20-beta
104
+ # - xai/grok-4
105
+ #
106
+ # NVIDIA (NVIDIA_API_KEY):
107
+ # - nvidia/nemotron-3-super-120b-a12b
108
+ #
109
+ # Groq (GROQ_API_KEY):
110
+ # - groq/mixtral-8x7b-32768
111
+ #
112
+ # Cohere (COHERE_API_KEY):
113
+ # - cohere/command-a
114
  #
115
+ # Together (TOGETHER_API_KEY):
116
+ # - together/meta-llama/llama-3.3-70b-instruct
 
117
  #
118
+ # Cerebras (CEREBRAS_API_KEY):
119
+ # - cerebras/zai-glm-4.7
 
120
  #
121
+ # HuggingFace Inference (HUGGINGFACE_HUB_TOKEN):
122
+ # - huggingface/deepseek-ai/DeepSeek-R1
 
123
  #
124
+ # Or any other OpenClaw-supported provider (format: provider/model-name)
125
  LLM_MODEL=anthropic/claude-sonnet-4-5
126
 
127
  # [REQUIRED] Gateway authentication token
 
129
  GATEWAY_TOKEN=your_gateway_token_here
130
 
131
  # ── OPTIONAL: Telegram Integration ──
 
132
  # Get bot token from: https://t.me/BotFather
133
  TELEGRAM_BOT_TOKEN=your_bot_token_here
134
 
135
+ # Single user ID (from https://t.me/userinfobot)
 
136
  TELEGRAM_USER_ID=123456789
137
 
138
  # Multiple user IDs (comma-separated for team access)
139
  # TELEGRAM_USER_IDS=123456789,987654321,555555555
140
 
141
  # ── OPTIONAL: Workspace Backup to HF Dataset ──
 
 
142
  HF_USERNAME=your_hf_username
 
 
 
143
  HF_TOKEN=hf_your_token_here
144
 
145
+ # Backup dataset name (auto-created if missing)
146
  # Default: huggingclaw-backup
147
  BACKUP_DATASET_NAME=huggingclaw-backup
148
 
149
+ # Git commit identity for workspace syncs
 
150
  WORKSPACE_GIT_USER=openclaw@example.com
 
 
 
151
  WORKSPACE_GIT_NAME=OpenClaw Bot
152
 
153
+ # ── OPTIONAL: Background Services ──
154
+ # Keep-alive ping interval (seconds). Default: 300. Set 0 to disable.
 
 
155
  KEEP_ALIVE_INTERVAL=300
156
 
157
+ # Workspace auto-sync interval (seconds). Default: 600.
 
158
  SYNC_INTERVAL=600
159
 
160
+ # ── OPTIONAL: Advanced ──
161
+ # Pin OpenClaw version. Default: latest
 
162
  OPENCLAW_VERSION=latest
163
 
164
+ # Health endpoint port. Default: 7861
165
  HEALTH_PORT=7861
166
 
167
  # ════════════════════════════════════════════════════════════════
168
+ # QUICK START: Only 3 secrets required!
169
+ # 1. LLM_API_KEY β†’ From your LLM provider
170
+ # 2. LLM_MODEL β†’ Pick a model above
171
+ # 3. GATEWAY_TOKEN β†’ Run: openssl rand -hex 32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172
  # ════════════════════════════════════════════════════════════════
README.md CHANGED
@@ -118,7 +118,7 @@ See **`.env.example`** for the complete reference with examples.
118
 
119
  ## πŸ€– LLM Provider Setup
120
 
121
- Just set `LLM_MODEL` with the correct provider prefix β€” **any provider is supported**! The provider is auto-detected from the model name. All model IDs sourced from [OpenRouter](https://openrouter.ai/models).
122
 
123
  ### Anthropic (Claude)
124
  ```
@@ -139,131 +139,113 @@ Models: `openai/gpt-5.4-pro` Β· `openai/gpt-5.4` Β· `openai/gpt-5.4-mini` Β· `op
139
  LLM_API_KEY=AIzaSy...
140
  LLM_MODEL=google/gemini-2.5-flash
141
  ```
142
- Models: `google/gemini-3.1-pro-preview` Β· `google/gemini-3.1-flash-lite-preview` Β· `google/gemini-2.5-pro` Β· `google/gemini-2.5-flash`
143
 
144
  ### DeepSeek
145
  ```
146
  LLM_API_KEY=sk-...
147
  LLM_MODEL=deepseek/deepseek-v3.2
148
  ```
149
- Models: `deepseek/deepseek-v3.2` Β· `deepseek/deepseek-r1-0528` Β· `deepseek/deepseek-r1` Β· `deepseek/deepseek-chat-v3.1`
150
  Get key from: [DeepSeek Platform](https://platform.deepseek.com)
151
 
152
- ### Qwen (Alibaba)
153
  ```
154
- LLM_API_KEY=your_qwen_api_key
155
- LLM_MODEL=qwen/qwen3-max
156
  ```
157
- Models: `qwen/qwen3.6-plus-preview:free` (free!) Β· `qwen/qwen3-max` Β· `qwen/qwen3-coder` Β· `qwen/qwen3.5-397b-a17b` Β· `qwen/qwen3.5-35b-a3b`
158
- Get key from: [DashScope](https://dashscope.aliyun.com)
 
 
 
 
 
 
 
159
 
160
  ### Z.ai (GLM)
161
  ```
162
  LLM_API_KEY=your_zai_api_key
163
- LLM_MODEL=z-ai/glm-5-turbo
164
  ```
165
- Models: `z-ai/glm-5` Β· `z-ai/glm-5-turbo` Β· `z-ai/glm-4.7` Β· `z-ai/glm-4.7-flash` Β· `z-ai/glm-4.5-air:free` (free!)
166
- Get key from: [Z.ai](https://z.ai)
167
 
168
  ### Moonshot (Kimi)
169
  ```
170
  LLM_API_KEY=sk-...
171
- LLM_MODEL=moonshotai/kimi-k2.5
172
  ```
173
- Models: `moonshotai/kimi-k2.5` Β· `moonshotai/kimi-k2` Β· `moonshotai/kimi-k2-thinking`
174
  Get key from: [Moonshot API](https://platform.moonshot.cn)
175
 
176
  ### Mistral
177
  ```
178
  LLM_API_KEY=your_mistral_api_key
179
- LLM_MODEL=mistralai/mistral-large-2512
180
  ```
181
- Models: `mistralai/mistral-large-2512` Β· `mistralai/mistral-medium-3.1` Β· `mistralai/mistral-small-2603` Β· `mistralai/devstral-medium` Β· `mistralai/codestral-2508`
182
  Get key from: [Mistral Console](https://console.mistral.ai)
183
 
184
  ### xAI (Grok)
185
  ```
186
  LLM_API_KEY=your_xai_api_key
187
- LLM_MODEL=x-ai/grok-4.20-beta
188
  ```
189
- Models: `x-ai/grok-4.20-beta` Β· `x-ai/grok-4.20-multi-agent-beta` Β· `x-ai/grok-4.1-fast` Β· `x-ai/grok-4`
190
  Get key from: [xAI Console](https://console.x.ai)
191
 
192
- ### Meta (Llama)
193
- ```
194
- LLM_API_KEY=your_meta_api_key
195
- LLM_MODEL=meta-llama/llama-4-maverick
196
- ```
197
- Models: `meta-llama/llama-4-maverick` Β· `meta-llama/llama-4-scout` Β· `meta-llama/llama-3.3-70b-instruct:free` (free!)
198
-
199
  ### MiniMax
200
  ```
201
  LLM_API_KEY=your_minimax_api_key
202
  LLM_MODEL=minimax/minimax-m2.7
203
  ```
204
- Models: `minimax/minimax-m2.7` Β· `minimax/minimax-m2.5` Β· `minimax/minimax-m2.5:free` (free!)
205
- Get key from: [MiniMax Platform](https://api.minimaxi.com)
206
 
207
- ### NVIDIA (Nemotron)
208
  ```
209
  LLM_API_KEY=your_nvidia_api_key
210
  LLM_MODEL=nvidia/nemotron-3-super-120b-a12b
211
  ```
212
- Models: `nvidia/nemotron-3-super-120b-a12b` Β· `nvidia/nemotron-3-super-120b-a12b:free` (free!)
213
  Get key from: [NVIDIA API](https://api.nvidia.com)
214
 
215
- ### Cohere
216
- ```
217
- LLM_API_KEY=your_cohere_api_key
218
- LLM_MODEL=cohere/command-a
219
- ```
220
- Models: `cohere/command-a` Β· `cohere/command-r-plus-08-2024`
221
- Get key from: [Cohere Dashboard](https://dashboard.cohere.com)
222
-
223
- ### Perplexity
224
- ```
225
- LLM_API_KEY=your_perplexity_api_key
226
- LLM_MODEL=perplexity/sonar-pro
227
- ```
228
- Models: `perplexity/sonar-deep-research` Β· `perplexity/sonar-pro` Β· `perplexity/sonar-reasoning-pro`
229
- Get key from: [Perplexity API](https://www.perplexity.ai/settings/api)
230
-
231
  ### Xiaomi (MiMo)
232
  ```
233
  LLM_API_KEY=your_xiaomi_api_key
234
  LLM_MODEL=xiaomi/mimo-v2-pro
235
  ```
236
- Models: `xiaomi/mimo-v2-pro` (1M context) Β· `xiaomi/mimo-v2-omni` (multimodal) Β· `xiaomi/mimo-v2-flash`
237
 
238
- ### ByteDance (Seed)
239
  ```
240
- LLM_API_KEY=your_bytedance_api_key
241
- LLM_MODEL=bytedance-seed/seed-2.0-lite
242
  ```
243
- Models: `bytedance-seed/seed-2.0-lite` Β· `bytedance-seed/seed-2.0-mini`
244
  Get key from: [Volcengine](https://www.volcengine.com)
245
 
246
- ### Baidu (ERNIE)
247
  ```
248
- LLM_API_KEY=your_baidu_api_key
249
- LLM_MODEL=baidu/ernie-4.5-300b-a47b
250
  ```
251
- Models: `baidu/ernie-4.5-300b-a47b` Β· `baidu/ernie-4.5-21b-a3b`
252
 
253
- ### Inception (Mercury β€” ultra-fast)
254
  ```
255
- LLM_API_KEY=your_inception_api_key
256
- LLM_MODEL=inception/mercury-2
257
  ```
258
- Models: `inception/mercury-2` (1000+ tok/sec!) Β· `inception/mercury-coder`
259
- Get key from: [Inception Labs](https://www.inceptionlabs.ai)
260
 
261
- ### Amazon (Nova)
262
  ```
263
- LLM_API_KEY=your_amazon_api_key
264
- LLM_MODEL=amazon/nova-premier-v1
265
  ```
266
- Models: `amazon/nova-premier-v1` Β· `amazon/nova-pro-v1` Β· `amazon/nova-lite-v1`
267
 
268
  ### OpenRouter (300+ models via single API key)
269
  ```
@@ -272,18 +254,22 @@ LLM_MODEL=openrouter/anthropic/claude-sonnet-4-6
272
  ```
273
  With OpenRouter, you can access **every model above** with a single API key! Just prefix with `openrouter/`:
274
  - `openrouter/anthropic/claude-sonnet-4-6` β€” Anthropic Claude
275
- - `openrouter/openai/gpt-5.4` β€” OpenAI (1M context)
276
- - `openrouter/deepseek/deepseek-v3.2` β€” DeepSeek V3.2
277
  - `openrouter/google/gemini-2.5-flash` β€” Google Gemini
278
- - `openrouter/qwen/qwen3.6-plus-preview:free` β€” Qwen (free!)
279
- - `openrouter/x-ai/grok-4.20-beta` β€” xAI Grok
280
  - `openrouter/meta-llama/llama-3.3-70b-instruct:free` β€” Llama (free!)
281
- - `openrouter/mistralai/mistral-large-2512` β€” Mistral Large
282
  - `openrouter/moonshotai/kimi-k2.5` β€” Moonshot Kimi
283
  - `openrouter/z-ai/glm-5-turbo` β€” Z.ai GLM
284
 
285
  Get key from: [OpenRouter.ai](https://openrouter.ai) Β· [Full model list](https://openrouter.ai/models)
286
 
 
 
 
 
 
 
 
287
  ### Any Other Provider
288
  HuggingClaw supports **any LLM provider** that OpenClaw supports. Just use:
289
  ```
@@ -292,6 +278,8 @@ LLM_MODEL=provider/model-name
292
  ```
293
  The provider prefix is auto-detected and mapped to the appropriate environment variable.
294
 
 
 
295
  ---
296
 
297
  ## πŸ“± Telegram Setup
 
118
 
119
  ## πŸ€– LLM Provider Setup
120
 
121
+ Just set `LLM_MODEL` with the correct provider prefix β€” **any provider is supported**! The provider is auto-detected from the model name. All provider IDs from [OpenClaw docs](https://docs.openclaw.ai/concepts/model-providers).
122
 
123
  ### Anthropic (Claude)
124
  ```
 
139
  LLM_API_KEY=AIzaSy...
140
  LLM_MODEL=google/gemini-2.5-flash
141
  ```
142
+ Models: `google/gemini-3.1-pro-preview` Β· `google/gemini-3-flash-preview` Β· `google/gemini-2.5-pro` Β· `google/gemini-2.5-flash`
143
 
144
  ### DeepSeek
145
  ```
146
  LLM_API_KEY=sk-...
147
  LLM_MODEL=deepseek/deepseek-v3.2
148
  ```
149
+ Models: `deepseek/deepseek-v3.2` Β· `deepseek/deepseek-r1-0528` Β· `deepseek/deepseek-r1`
150
  Get key from: [DeepSeek Platform](https://platform.deepseek.com)
151
 
152
+ ### OpenCode Zen (tested & verified models)
153
  ```
154
+ LLM_API_KEY=your_opencode_api_key
155
+ LLM_MODEL=opencode/claude-opus-4-6
156
  ```
157
+ Models: `opencode/claude-opus-4-6` Β· `opencode/gpt-5.4`
158
+ Get key from: [OpenCode.ai](https://opencode.ai/auth)
159
+
160
+ ### OpenCode Go (low-cost open models)
161
+ ```
162
+ LLM_API_KEY=your_opencode_api_key
163
+ LLM_MODEL=opencode-go/kimi-k2.5
164
+ ```
165
+ Get key from: [OpenCode.ai](https://opencode.ai/auth)
166
 
167
  ### Z.ai (GLM)
168
  ```
169
  LLM_API_KEY=your_zai_api_key
170
+ LLM_MODEL=zai/glm-5
171
  ```
172
+ Models: `zai/glm-5` Β· `zai/glm-5-turbo` Β· `zai/glm-4.7` Β· `zai/glm-4.7-flash`
173
+ Get key from: [Z.ai](https://z.ai) Β· Note: `z-ai/` and `z.ai/` prefixes auto-normalize to `zai/`
174
 
175
  ### Moonshot (Kimi)
176
  ```
177
  LLM_API_KEY=sk-...
178
+ LLM_MODEL=moonshot/kimi-k2.5
179
  ```
180
+ Models: `moonshot/kimi-k2.5` Β· `moonshot/kimi-k2-thinking`
181
  Get key from: [Moonshot API](https://platform.moonshot.cn)
182
 
183
  ### Mistral
184
  ```
185
  LLM_API_KEY=your_mistral_api_key
186
+ LLM_MODEL=mistral/mistral-large-latest
187
  ```
188
+ Models: `mistral/mistral-large-latest` Β· `mistral/mistral-small-2603` Β· `mistral/devstral-medium` Β· `mistral/codestral-2508`
189
  Get key from: [Mistral Console](https://console.mistral.ai)
190
 
191
  ### xAI (Grok)
192
  ```
193
  LLM_API_KEY=your_xai_api_key
194
+ LLM_MODEL=xai/grok-4.20-beta
195
  ```
196
+ Models: `xai/grok-4.20-beta` Β· `xai/grok-4` Β· `xai/grok-4.1-fast`
197
  Get key from: [xAI Console](https://console.x.ai)
198
 
 
 
 
 
 
 
 
199
  ### MiniMax
200
  ```
201
  LLM_API_KEY=your_minimax_api_key
202
  LLM_MODEL=minimax/minimax-m2.7
203
  ```
204
+ Models: `minimax/minimax-m2.7` Β· `minimax/minimax-m2.5`
205
+ Get key from: [MiniMax Platform](https://platform.minimax.io)
206
 
207
+ ### NVIDIA
208
  ```
209
  LLM_API_KEY=your_nvidia_api_key
210
  LLM_MODEL=nvidia/nemotron-3-super-120b-a12b
211
  ```
 
212
  Get key from: [NVIDIA API](https://api.nvidia.com)
213
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
  ### Xiaomi (MiMo)
215
  ```
216
  LLM_API_KEY=your_xiaomi_api_key
217
  LLM_MODEL=xiaomi/mimo-v2-pro
218
  ```
219
+ Models: `xiaomi/mimo-v2-pro` Β· `xiaomi/mimo-v2-omni`
220
 
221
+ ### Volcengine (Doubao / ByteDance)
222
  ```
223
+ LLM_API_KEY=your_volcengine_api_key
224
+ LLM_MODEL=volcengine/doubao-seed-1-8-251228
225
  ```
226
+ Models: `volcengine/doubao-seed-1-8-251228` Β· `volcengine/kimi-k2-5-260127` Β· `volcengine/glm-4-7-251222`
227
  Get key from: [Volcengine](https://www.volcengine.com)
228
 
229
+ ### Groq
230
  ```
231
+ LLM_API_KEY=your_groq_api_key
232
+ LLM_MODEL=groq/mixtral-8x7b-32768
233
  ```
234
+ Get key from: [Groq Console](https://console.groq.com)
235
 
236
+ ### Cohere
237
  ```
238
+ LLM_API_KEY=your_cohere_api_key
239
+ LLM_MODEL=cohere/command-a
240
  ```
241
+ Get key from: [Cohere Dashboard](https://dashboard.cohere.com)
 
242
 
243
+ ### HuggingFace Inference
244
  ```
245
+ LLM_API_KEY=hf_your_token
246
+ LLM_MODEL=huggingface/deepseek-ai/DeepSeek-R1
247
  ```
248
+ Get key from: [HuggingFace Tokens](https://huggingface.co/settings/tokens)
249
 
250
  ### OpenRouter (300+ models via single API key)
251
  ```
 
254
  ```
255
  With OpenRouter, you can access **every model above** with a single API key! Just prefix with `openrouter/`:
256
  - `openrouter/anthropic/claude-sonnet-4-6` β€” Anthropic Claude
257
+ - `openrouter/openai/gpt-5.4` β€” OpenAI
258
+ - `openrouter/deepseek/deepseek-v3.2` β€” DeepSeek
259
  - `openrouter/google/gemini-2.5-flash` β€” Google Gemini
 
 
260
  - `openrouter/meta-llama/llama-3.3-70b-instruct:free` β€” Llama (free!)
 
261
  - `openrouter/moonshotai/kimi-k2.5` β€” Moonshot Kimi
262
  - `openrouter/z-ai/glm-5-turbo` β€” Z.ai GLM
263
 
264
  Get key from: [OpenRouter.ai](https://openrouter.ai) Β· [Full model list](https://openrouter.ai/models)
265
 
266
+ ### Kilo Gateway
267
+ ```
268
+ LLM_API_KEY=your_kilocode_api_key
269
+ LLM_MODEL=kilocode/anthropic/claude-opus-4.6
270
+ ```
271
+ Get key from: [Kilo.ai](https://kilo.ai)
272
+
273
  ### Any Other Provider
274
  HuggingClaw supports **any LLM provider** that OpenClaw supports. Just use:
275
  ```
 
278
  ```
279
  The provider prefix is auto-detected and mapped to the appropriate environment variable.
280
 
281
+ Full provider list: [OpenClaw Model Providers](https://docs.openclaw.ai/concepts/model-providers) Β· [OpenCode Providers](https://opencode.ai/docs/providers)
282
+
283
  ---
284
 
285
  ## πŸ“± Telegram Setup
start.sh CHANGED
@@ -43,38 +43,45 @@ fi
43
  LLM_PROVIDER=$(echo "$LLM_MODEL" | cut -d'/' -f1)
44
 
45
  # Map provider prefix to the correct API key environment variable
46
- # Based on actual OpenRouter model IDs: https://openrouter.ai/api/v1/models
 
47
  case "$LLM_PROVIDER" in
48
- openrouter) export OPENROUTER_API_KEY="$LLM_API_KEY" ;;
49
- anthropic) export ANTHROPIC_API_KEY="$LLM_API_KEY" ;;
50
- openai) export OPENAI_API_KEY="$LLM_API_KEY" ;;
51
- google) export GOOGLE_API_KEY="$LLM_API_KEY" ;;
52
- deepseek) export DEEPSEEK_API_KEY="$LLM_API_KEY" ;;
53
- mistralai) export MISTRAL_API_KEY="$LLM_API_KEY" ;;
54
- qwen) export QWEN_API_KEY="$LLM_API_KEY" ;;
55
- x-ai) export XAI_API_KEY="$LLM_API_KEY" ;;
56
- meta-llama) export META_API_KEY="$LLM_API_KEY" ;;
57
- minimax) export MINIMAX_API_KEY="$LLM_API_KEY" ;;
58
- z-ai) export ZAI_API_KEY="$LLM_API_KEY" ;;
59
- moonshotai) export MOONSHOT_API_KEY="$LLM_API_KEY" ;;
60
- nvidia) export NVIDIA_API_KEY="$LLM_API_KEY" ;;
61
- cohere) export COHERE_API_KEY="$LLM_API_KEY" ;;
62
- perplexity) export PERPLEXITY_API_KEY="$LLM_API_KEY" ;;
63
- bytedance-seed) export BYTEDANCE_API_KEY="$LLM_API_KEY" ;;
64
- xiaomi) export XIAOMI_API_KEY="$LLM_API_KEY" ;;
65
- amazon) export AMAZON_API_KEY="$LLM_API_KEY" ;;
66
- reka|rekaai) export REKA_API_KEY="$LLM_API_KEY" ;;
67
- inception) export INCEPTION_API_KEY="$LLM_API_KEY" ;;
68
- kwaipilot) export KWAIPILOT_API_KEY="$LLM_API_KEY" ;;
69
- ai21) export AI21_API_KEY="$LLM_API_KEY" ;;
70
- baidu) export BAIDU_API_KEY="$LLM_API_KEY" ;;
71
- tencent) export TENCENT_API_KEY="$LLM_API_KEY" ;;
72
- stepfun) export STEPFUN_API_KEY="$LLM_API_KEY" ;;
73
- inflection) export INFLECTION_API_KEY="$LLM_API_KEY" ;;
74
- writer) export WRITER_API_KEY="$LLM_API_KEY" ;;
75
- upstage) export UPSTAGE_API_KEY="$LLM_API_KEY" ;;
 
 
 
 
 
 
 
76
  *)
77
- # Fallback: export as ANTHROPIC (default) and also as generic
78
  export ANTHROPIC_API_KEY="$LLM_API_KEY"
79
  ;;
80
  esac
 
43
  LLM_PROVIDER=$(echo "$LLM_MODEL" | cut -d'/' -f1)
44
 
45
  # Map provider prefix to the correct API key environment variable
46
+ # Based on OpenClaw provider system: /usr/local/lib/node_modules/openclaw/docs/concepts/model-providers.md
47
+ # Note: OpenClaw normalizes some prefixes (z-ai β†’ zai, z.ai β†’ zai, etc.)
48
  case "$LLM_PROVIDER" in
49
+ # ── Core Providers ──
50
+ anthropic) export ANTHROPIC_API_KEY="$LLM_API_KEY" ;;
51
+ openai|openai-codex) export OPENAI_API_KEY="$LLM_API_KEY" ;;
52
+ google|google-vertex) export GEMINI_API_KEY="$LLM_API_KEY" ;;
53
+ deepseek) export DEEPSEEK_API_KEY="$LLM_API_KEY" ;;
54
+ # ── OpenCode Providers ──
55
+ opencode) export OPENCODE_API_KEY="$LLM_API_KEY" ;;
56
+ opencode-go) export OPENCODE_API_KEY="$LLM_API_KEY" ;;
57
+ # ── Gateway/Router Providers ──
58
+ openrouter) export OPENROUTER_API_KEY="$LLM_API_KEY" ;;
59
+ kilocode) export KILOCODE_API_KEY="$LLM_API_KEY" ;;
60
+ vercel-ai-gateway) export AI_GATEWAY_API_KEY="$LLM_API_KEY" ;;
61
+ # ── Chinese/Asian Providers ──
62
+ zai|z-ai|z.ai|zhipu) export ZAI_API_KEY="$LLM_API_KEY" ;;
63
+ moonshot) export MOONSHOT_API_KEY="$LLM_API_KEY" ;;
64
+ kimi-coding) export KIMI_API_KEY="$LLM_API_KEY" ;;
65
+ minimax) export MINIMAX_API_KEY="$LLM_API_KEY" ;;
66
+ qwen|modelstudio) export MODELSTUDIO_API_KEY="$LLM_API_KEY" ;;
67
+ xiaomi) export XIAOMI_API_KEY="$LLM_API_KEY" ;;
68
+ volcengine|volcengine-plan) export VOLCANO_ENGINE_API_KEY="$LLM_API_KEY" ;;
69
+ byteplus|byteplus-plan) export BYTEPLUS_API_KEY="$LLM_API_KEY" ;;
70
+ qianfan) export QIANFAN_API_KEY="$LLM_API_KEY" ;;
71
+ # ── Western Providers ──
72
+ mistral|mistralai) export MISTRAL_API_KEY="$LLM_API_KEY" ;;
73
+ xai|x-ai) export XAI_API_KEY="$LLM_API_KEY" ;;
74
+ nvidia) export NVIDIA_API_KEY="$LLM_API_KEY" ;;
75
+ cohere) export COHERE_API_KEY="$LLM_API_KEY" ;;
76
+ groq) export GROQ_API_KEY="$LLM_API_KEY" ;;
77
+ together) export TOGETHER_API_KEY="$LLM_API_KEY" ;;
78
+ huggingface) export HUGGINGFACE_HUB_TOKEN="$LLM_API_KEY" ;;
79
+ cerebras) export CEREBRAS_API_KEY="$LLM_API_KEY" ;;
80
+ venice) export VENICE_API_KEY="$LLM_API_KEY" ;;
81
+ synthetic) export SYNTHETIC_API_KEY="$LLM_API_KEY" ;;
82
+ github-copilot) export COPILOT_GITHUB_TOKEN="$LLM_API_KEY" ;;
83
+ # ── Fallback: Anthropic (default) ──
84
  *)
 
85
  export ANTHROPIC_API_KEY="$LLM_API_KEY"
86
  ;;
87
  esac