Somrat Sorkar commited on
Commit
abf5aa0
·
1 Parent(s): b19cd4a

Fix provider names to match OpenRouter API (z-ai, mistralai, moonshotai, deepseek, meta-llama, etc.)

Browse files
Files changed (3) hide show
  1. .env.example +98 -53
  2. README.md +95 -79
  3. start.sh +39 -41
.env.example CHANGED
@@ -13,86 +13,131 @@ LLM_API_KEY=your_api_key_here
13
 
14
  # [REQUIRED] LLM model to use (format: provider/model-name)
15
  # Auto-detects provider from prefix — any provider is supported!
16
- #
 
 
 
17
  # Anthropic Claude:
18
- # - anthropic/claude-opus-4-6
19
- # - anthropic/claude-sonnet-4-5
20
- # - anthropic/claude-haiku-4-5
 
21
  #
22
  # OpenAI:
23
- # - openai/gpt-4-turbo
24
- # - openai/gpt-4
25
- # - openai/gpt-3.5-turbo
 
 
 
 
26
  #
27
  # Google Gemini:
 
 
 
28
  # - google/gemini-2.5-flash
29
- # - google/gemini-2.0-flash
30
- # - google/gemini-1.5-pro
31
  #
32
- # Zhipu (ChatGLM) / ZAI:
33
- # - zhipu/glm-4-plus
34
- # - zhipu/glm-4
35
- # - zai/glm-4 (alias)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  #
37
  # Moonshot (Kimi):
38
- # - moonshot/moonshot-v1-128k
39
- # - moonshot/moonshot-v1-32k
 
40
  #
41
- # MiniMax:
42
- # - minimax/minimax-01
43
- # - minimax/minimax-text-01
 
44
  #
45
- # Mistral:
46
- # - mistral/mistral-large
47
- # - mistral/mistral-medium
48
- # - mistral/mistral-small
49
  #
50
- # Cohere:
51
- # - cohere/command-r
52
- # - cohere/command-r-plus
53
  #
54
- # Groq:
55
- # - groq/mixtral-8x7b-32768
56
- # - groq/llama2-70b-4096
57
  #
58
- # Qwen:
59
- # - qwen/qwen3.6-plus-preview (free, 1M context)
60
- # - qwen/qwen3.5-35b-a3b
61
- # - qwen/qwen3.5-9b
 
 
 
 
 
 
 
 
62
  #
63
- # xAI Grok:
64
  # - x-ai/grok-4.20-beta
65
  # - x-ai/grok-4.20-multi-agent-beta
 
 
 
 
 
 
 
66
  #
67
  # NVIDIA:
68
  # - nvidia/nemotron-3-super-120b-a12b
69
- # - nvidia/nemotron-3-super-120b-a12b (free)
70
  #
71
- # Reka:
72
- # - reka/reka-edge
 
73
  #
74
- # Xiaomi:
75
- # - xiaomi/mimo-v2-pro (1M context)
76
- # - xiaomi/mimo-v2-omni (256K context, multimodal)
77
  #
78
- # ByteDance Seed:
79
- # - bytedance-seed/seed-2.0-lite
80
- # - bytedance-seed/seed-2.0-mini
 
81
  #
82
- # Z.ai GLM:
83
- # - z-ai/glm-5-turbo
 
 
 
84
  #
85
- # KwaiPilot:
86
- # - kwaipilot/kat-coder-pro-v2
 
87
  #
88
- # OpenRouter (any model via OpenRouter proxy):
89
- # - openrouter/google/lyria-3-pro-preview (music generation)
90
- # - openrouter/inception/mercury-2 (fast reasoning)
91
- # Note: With OpenRouter, you can access 100+ models with a single API key!
92
- # See https://openrouter.ai/models for complete list
93
  #
94
- # Or any other provider supported by OpenClaw (format: provider/model-name)
95
- LLM_MODEL=anthropic/claude-sonnet-4-5
96
 
97
  # [REQUIRED] Gateway authentication token
98
  # Generate: openssl rand -hex 32
 
13
 
14
  # [REQUIRED] LLM model to use (format: provider/model-name)
15
  # Auto-detects provider from prefix — any provider is supported!
16
+ # Model IDs sourced from https://openrouter.ai/api/v1/models
17
+ #
18
+ # ── Top-Tier Providers ──
19
+ #
20
  # Anthropic Claude:
21
+ # - anthropic/claude-opus-4.6
22
+ # - anthropic/claude-sonnet-4.6
23
+ # - anthropic/claude-sonnet-4.5
24
+ # - anthropic/claude-haiku-4.5
25
  #
26
  # OpenAI:
27
+ # - openai/gpt-5.4-pro (1M context)
28
+ # - openai/gpt-5.4 (1M context)
29
+ # - openai/gpt-5.4-mini
30
+ # - openai/gpt-5.4-nano
31
+ # - openai/gpt-4.1
32
+ # - openai/gpt-4.1-mini
33
+ # - openai/gpt-4.1-nano
34
  #
35
  # Google Gemini:
36
+ # - google/gemini-3.1-pro-preview
37
+ # - google/gemini-3.1-flash-lite-preview
38
+ # - google/gemini-2.5-pro
39
  # - google/gemini-2.5-flash
 
 
40
  #
41
+ # DeepSeek:
42
+ # - deepseek/deepseek-v3.2
43
+ # - deepseek/deepseek-r1-0528
44
+ # - deepseek/deepseek-r1
45
+ # - deepseek/deepseek-chat-v3.1
46
+ #
47
+ # ── Chinese/Asian Providers ──
48
+ #
49
+ # Qwen (Alibaba):
50
+ # - qwen/qwen3.6-plus-preview:free (1M context, free!)
51
+ # - qwen/qwen3-max
52
+ # - qwen/qwen3-coder
53
+ # - qwen/qwen3.5-397b-a17b
54
+ # - qwen/qwen3.5-35b-a3b
55
+ #
56
+ # Z.ai (GLM):
57
+ # - z-ai/glm-5
58
+ # - z-ai/glm-5-turbo
59
+ # - z-ai/glm-4.7
60
+ # - z-ai/glm-4.7-flash
61
+ # - z-ai/glm-4.5-air:free (free!)
62
  #
63
  # Moonshot (Kimi):
64
+ # - moonshotai/kimi-k2.5
65
+ # - moonshotai/kimi-k2
66
+ # - moonshotai/kimi-k2-thinking
67
  #
68
+ # Xiaomi (MiMo):
69
+ # - xiaomi/mimo-v2-pro (1M context)
70
+ # - xiaomi/mimo-v2-omni (multimodal)
71
+ # - xiaomi/mimo-v2-flash
72
  #
73
+ # ByteDance Seed:
74
+ # - bytedance-seed/seed-2.0-lite
75
+ # - bytedance-seed/seed-2.0-mini
 
76
  #
77
+ # Baidu (ERNIE):
78
+ # - baidu/ernie-4.5-300b-a47b
79
+ # - baidu/ernie-4.5-21b-a3b
80
  #
81
+ # Tencent:
82
+ # - tencent/hunyuan-a13b-instruct
 
83
  #
84
+ # StepFun:
85
+ # - stepfun/step-3.5-flash
86
+ # - stepfun/step-3.5-flash:free (free!)
87
+ #
88
+ # ── Western Providers ──
89
+ #
90
+ # Mistral:
91
+ # - mistralai/mistral-large-2512
92
+ # - mistralai/mistral-medium-3.1
93
+ # - mistralai/mistral-small-2603
94
+ # - mistralai/devstral-medium (coding)
95
+ # - mistralai/codestral-2508
96
  #
97
+ # xAI (Grok):
98
  # - x-ai/grok-4.20-beta
99
  # - x-ai/grok-4.20-multi-agent-beta
100
+ # - x-ai/grok-4.1-fast
101
+ # - x-ai/grok-4
102
+ #
103
+ # Meta (Llama):
104
+ # - meta-llama/llama-4-maverick
105
+ # - meta-llama/llama-4-scout
106
+ # - meta-llama/llama-3.3-70b-instruct
107
  #
108
  # NVIDIA:
109
  # - nvidia/nemotron-3-super-120b-a12b
110
+ # - nvidia/nemotron-3-super-120b-a12b:free (free!)
111
  #
112
+ # Cohere:
113
+ # - cohere/command-a
114
+ # - cohere/command-r-plus-08-2024
115
  #
116
+ # Perplexity:
117
+ # - perplexity/sonar-deep-research
118
+ # - perplexity/sonar-pro
119
  #
120
+ # MiniMax:
121
+ # - minimax/minimax-m2.7
122
+ # - minimax/minimax-m2.5
123
+ # - minimax/minimax-m2.5:free (free!)
124
  #
125
+ # ── Speed & Specialty ──
126
+ #
127
+ # Inception (ultra-fast reasoning):
128
+ # - inception/mercury-2 (1000+ tok/sec)
129
+ # - inception/mercury-coder
130
  #
131
+ # Amazon:
132
+ # - amazon/nova-premier-v1
133
+ # - amazon/nova-pro-v1
134
  #
135
+ # ── OpenRouter (100+ models via single API key) ──
136
+ # Use any model above via OpenRouter with a single API key!
137
+ # See https://openrouter.ai/models for complete list
 
 
138
  #
139
+ # Or any other provider format: provider/model-name (auto-detected)
140
+ LLM_MODEL=anthropic/claude-sonnet-4.5
141
 
142
  # [REQUIRED] Gateway authentication token
143
  # Generate: openssl rand -hex 32
README.md CHANGED
@@ -118,108 +118,122 @@ See **`.env.example`** for the complete reference with examples.
118
 
119
  ## 🤖 LLM Provider Setup
120
 
121
- Just set `LLM_MODEL` with the correct provider prefix — **any provider is supported**! The provider is auto-detected from the model name.
122
 
123
  ### Anthropic (Claude)
124
  ```
125
  LLM_API_KEY=sk-ant-v0-...
126
- LLM_MODEL=anthropic/claude-haiku-4-5
127
  ```
128
- Models: `anthropic/claude-opus-4-6` · `anthropic/claude-sonnet-4-5` · `anthropic/claude-haiku-4-5`
129
 
130
  ### OpenAI
131
  ```
132
  LLM_API_KEY=sk-...
133
- LLM_MODEL=openai/gpt-4
134
  ```
135
- Models: `openai/gpt-4-turbo` · `openai/gpt-4` · `openai/gpt-3.5-turbo`
136
 
137
  ### Google (Gemini)
138
  ```
139
  LLM_API_KEY=AIzaSy...
140
  LLM_MODEL=google/gemini-2.5-flash
141
  ```
142
- Models: `google/gemini-2.5-flash` · `google/gemini-2.0-flash` · `google/gemini-1.5-pro`
143
 
144
- ### Zhipu (ChatGLM) / ZAI
145
  ```
146
- LLM_API_KEY=your_zhipu_api_key
147
- LLM_MODEL=zhipu/glm-4-plus
148
  ```
149
- Models: `zhipu/glm-4-plus` · `zhipu/glm-4` · `zai/glm-4` (alias)
150
- Get key from: [Zhipu Platform](https://open.bigmodel.cn)
151
 
152
- ### Moonshot (Kimi)
153
  ```
154
- LLM_API_KEY=sk-...
155
- LLM_MODEL=moonshot/moonshot-v1-128k
156
  ```
157
- Models: `moonshot/moonshot-v1-128k` · `moonshot/moonshot-v1-32k`
158
- Get key from: [Moonshot API](https://platform.moonshot.cn)
159
 
160
- ### MiniMax
161
  ```
162
- LLM_API_KEY=your_minimax_api_key
163
- LLM_MODEL=minimax/minimax-01
164
  ```
165
- Models: `minimax/minimax-01` · `minimax/minimax-text-01`
166
- Get key from: [MiniMax Platform](https://api.minimaxi.com)
167
 
168
- ### Mistral
169
  ```
170
- LLM_API_KEY=your_mistral_api_key
171
- LLM_MODEL=mistral/mistral-large
172
  ```
173
- Models: `mistral/mistral-large` · `mistral/mistral-medium` · `mistral/mistral-small`
174
- Get key from: [Mistral Console](https://console.mistral.ai)
175
 
176
- ### Cohere
177
  ```
178
- LLM_API_KEY=your_cohere_api_key
179
- LLM_MODEL=cohere/command-r
180
  ```
181
- Models: `cohere/command-r` · `cohere/command-r-plus`
182
- Get key from: [Cohere Dashboard](https://dashboard.cohere.com)
183
 
184
- ### Groq
185
  ```
186
- LLM_API_KEY=your_groq_api_key
187
- LLM_MODEL=groq/mixtral-8x7b-32768
188
  ```
189
- Models: `groq/mixtral-8x7b-32768` · `groq/llama2-70b-4096`
190
- Get key from: [Groq Console](https://console.groq.com)
191
 
192
- ### Qwen
193
  ```
194
- LLM_API_KEY=your_qwen_api_key
195
- LLM_MODEL=qwen/qwen3.6-plus-preview
196
  ```
197
- Models: `qwen/qwen3.6-plus-preview` (free!) · `qwen/qwen3.5-35b-a3b` · `qwen/qwen3.5-9b`
198
- Get key from: [Qwen API](https://dashscope.aliyun.com)
199
 
200
- ### xAI (Grok)
201
  ```
202
- LLM_API_KEY=your_xai_api_key
203
- LLM_MODEL=x-ai/grok-4.20-beta
204
  ```
205
- Models: `x-ai/grok-4.20-beta` · `x-ai/grok-4.20-multi-agent-beta`
206
- Get key from: [xAI Console](https://console.x.ai)
207
 
208
  ### NVIDIA (Nemotron)
209
  ```
210
  LLM_API_KEY=your_nvidia_api_key
211
  LLM_MODEL=nvidia/nemotron-3-super-120b-a12b
212
  ```
213
- Models: `nvidia/nemotron-3-super-120b-a12b` · `nvidia/nemotron-3-super-120b-a12b` (free)
214
  Get key from: [NVIDIA API](https://api.nvidia.com)
215
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
216
  ### Xiaomi (MiMo)
217
  ```
218
  LLM_API_KEY=your_xiaomi_api_key
219
  LLM_MODEL=xiaomi/mimo-v2-pro
220
  ```
221
- Models: `xiaomi/mimo-v2-pro` (1M context) · `xiaomi/mimo-v2-omni` (multimodal)
222
- Get key from: [Xiaomi](https://xiaoai.xiaomi.com)
223
 
224
  ### ByteDance (Seed)
225
  ```
@@ -227,40 +241,42 @@ LLM_API_KEY=your_bytedance_api_key
227
  LLM_MODEL=bytedance-seed/seed-2.0-lite
228
  ```
229
  Models: `bytedance-seed/seed-2.0-lite` · `bytedance-seed/seed-2.0-mini`
230
- Get key from: [ByteDance](https://www.volcengine.com)
231
 
232
- ### Z.ai (GLM)
233
  ```
234
- LLM_API_KEY=your_zai_api_key
235
- LLM_MODEL=z-ai/glm-5-turbo
236
  ```
237
- Models: `z-ai/glm-5-turbo`
238
- Get key from: [Z.ai](https://z.ai)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
239
 
240
- ### OpenRouter (100+ models via single API)
241
- ```
242
- LLM_API_KEY=your_openrouter_api_key
243
- LLM_MODEL=openrouter/google/lyria-3-pro-preview
244
- ```
245
- **Popular models via OpenRouter:**
246
- - `openrouter/openai/gpt-5.4-pro` — Latest OpenAI (1M context)
247
- - `openrouter/openai/gpt-5.4-mini` — Fast, efficient OpenAI
248
- - `openrouter/google/gemini-3.1-flash-lite-preview` — Google's latest
249
- - `openrouter/anthropic/claude-opus-4-6` — Latest Claude via OpenRouter
250
- - `openrouter/mistral/mistral-small-2603` — Mistral's latest
251
- - `openrouter/inception/mercury-2` — Ultra-fast reasoning (1000 tok/sec)
252
- - `openrouter/qwen/qwen3.6-plus-preview` — Free tier available!
253
- - `openrouter/x-ai/grok-4.20-beta` — xAI's latest Grok
254
- - `openrouter/nvidia/nemotron-3-super-120b-a12b` — NVIDIA's powerhouse
255
- - `openrouter/xiaomi/mimo-v2-pro` — Xiaomi's 1M context model
256
-
257
- **Why OpenRouter?**
258
- - Single API key for 100+ models
259
- - Unified pricing and routing
260
- - Auto-fallback to other models
261
- - No vendor lock-in
262
-
263
- Get key from: [OpenRouter.ai](https://openrouter.ai) (free tier available!)
264
 
265
  ### Any Other Provider
266
  HuggingClaw supports **any LLM provider** that OpenClaw supports. Just use:
 
118
 
119
  ## 🤖 LLM Provider Setup
120
 
121
+ Just set `LLM_MODEL` with the correct provider prefix — **any provider is supported**! The provider is auto-detected from the model name. All model IDs sourced from [OpenRouter](https://openrouter.ai/models).
122
 
123
  ### Anthropic (Claude)
124
  ```
125
  LLM_API_KEY=sk-ant-v0-...
126
+ LLM_MODEL=anthropic/claude-sonnet-4.5
127
  ```
128
+ Models: `anthropic/claude-opus-4.6` · `anthropic/claude-sonnet-4.6` · `anthropic/claude-sonnet-4.5` · `anthropic/claude-haiku-4.5`
129
 
130
  ### OpenAI
131
  ```
132
  LLM_API_KEY=sk-...
133
+ LLM_MODEL=openai/gpt-5.4
134
  ```
135
+ Models: `openai/gpt-5.4-pro` · `openai/gpt-5.4` · `openai/gpt-5.4-mini` · `openai/gpt-5.4-nano` · `openai/gpt-4.1` · `openai/gpt-4.1-mini`
136
 
137
  ### Google (Gemini)
138
  ```
139
  LLM_API_KEY=AIzaSy...
140
  LLM_MODEL=google/gemini-2.5-flash
141
  ```
142
+ Models: `google/gemini-3.1-pro-preview` · `google/gemini-3.1-flash-lite-preview` · `google/gemini-2.5-pro` · `google/gemini-2.5-flash`
143
 
144
+ ### DeepSeek
145
  ```
146
+ LLM_API_KEY=sk-...
147
+ LLM_MODEL=deepseek/deepseek-v3.2
148
  ```
149
+ Models: `deepseek/deepseek-v3.2` · `deepseek/deepseek-r1-0528` · `deepseek/deepseek-r1` · `deepseek/deepseek-chat-v3.1`
150
+ Get key from: [DeepSeek Platform](https://platform.deepseek.com)
151
 
152
+ ### Qwen (Alibaba)
153
  ```
154
+ LLM_API_KEY=your_qwen_api_key
155
+ LLM_MODEL=qwen/qwen3-max
156
  ```
157
+ Models: `qwen/qwen3.6-plus-preview:free` (free!) · `qwen/qwen3-max` · `qwen/qwen3-coder` · `qwen/qwen3.5-397b-a17b` · `qwen/qwen3.5-35b-a3b`
158
+ Get key from: [DashScope](https://dashscope.aliyun.com)
159
 
160
+ ### Z.ai (GLM)
161
  ```
162
+ LLM_API_KEY=your_zai_api_key
163
+ LLM_MODEL=z-ai/glm-5-turbo
164
  ```
165
+ Models: `z-ai/glm-5` · `z-ai/glm-5-turbo` · `z-ai/glm-4.7` · `z-ai/glm-4.7-flash` · `z-ai/glm-4.5-air:free` (free!)
166
+ Get key from: [Z.ai](https://z.ai)
167
 
168
+ ### Moonshot (Kimi)
169
  ```
170
+ LLM_API_KEY=sk-...
171
+ LLM_MODEL=moonshotai/kimi-k2.5
172
  ```
173
+ Models: `moonshotai/kimi-k2.5` · `moonshotai/kimi-k2` · `moonshotai/kimi-k2-thinking`
174
+ Get key from: [Moonshot API](https://platform.moonshot.cn)
175
 
176
+ ### Mistral
177
  ```
178
+ LLM_API_KEY=your_mistral_api_key
179
+ LLM_MODEL=mistralai/mistral-large-2512
180
  ```
181
+ Models: `mistralai/mistral-large-2512` · `mistralai/mistral-medium-3.1` · `mistralai/mistral-small-2603` · `mistralai/devstral-medium` · `mistralai/codestral-2508`
182
+ Get key from: [Mistral Console](https://console.mistral.ai)
183
 
184
+ ### xAI (Grok)
185
  ```
186
+ LLM_API_KEY=your_xai_api_key
187
+ LLM_MODEL=x-ai/grok-4.20-beta
188
  ```
189
+ Models: `x-ai/grok-4.20-beta` · `x-ai/grok-4.20-multi-agent-beta` · `x-ai/grok-4.1-fast` · `x-ai/grok-4`
190
+ Get key from: [xAI Console](https://console.x.ai)
191
 
192
+ ### Meta (Llama)
193
  ```
194
+ LLM_API_KEY=your_meta_api_key
195
+ LLM_MODEL=meta-llama/llama-4-maverick
196
  ```
197
+ Models: `meta-llama/llama-4-maverick` · `meta-llama/llama-4-scout` · `meta-llama/llama-3.3-70b-instruct:free` (free!)
 
198
 
199
+ ### MiniMax
200
  ```
201
+ LLM_API_KEY=your_minimax_api_key
202
+ LLM_MODEL=minimax/minimax-m2.7
203
  ```
204
+ Models: `minimax/minimax-m2.7` · `minimax/minimax-m2.5` · `minimax/minimax-m2.5:free` (free!)
205
+ Get key from: [MiniMax Platform](https://api.minimaxi.com)
206
 
207
  ### NVIDIA (Nemotron)
208
  ```
209
  LLM_API_KEY=your_nvidia_api_key
210
  LLM_MODEL=nvidia/nemotron-3-super-120b-a12b
211
  ```
212
+ Models: `nvidia/nemotron-3-super-120b-a12b` · `nvidia/nemotron-3-super-120b-a12b:free` (free!)
213
  Get key from: [NVIDIA API](https://api.nvidia.com)
214
 
215
+ ### Cohere
216
+ ```
217
+ LLM_API_KEY=your_cohere_api_key
218
+ LLM_MODEL=cohere/command-a
219
+ ```
220
+ Models: `cohere/command-a` · `cohere/command-r-plus-08-2024`
221
+ Get key from: [Cohere Dashboard](https://dashboard.cohere.com)
222
+
223
+ ### Perplexity
224
+ ```
225
+ LLM_API_KEY=your_perplexity_api_key
226
+ LLM_MODEL=perplexity/sonar-pro
227
+ ```
228
+ Models: `perplexity/sonar-deep-research` · `perplexity/sonar-pro` · `perplexity/sonar-reasoning-pro`
229
+ Get key from: [Perplexity API](https://www.perplexity.ai/settings/api)
230
+
231
  ### Xiaomi (MiMo)
232
  ```
233
  LLM_API_KEY=your_xiaomi_api_key
234
  LLM_MODEL=xiaomi/mimo-v2-pro
235
  ```
236
+ Models: `xiaomi/mimo-v2-pro` (1M context) · `xiaomi/mimo-v2-omni` (multimodal) · `xiaomi/mimo-v2-flash`
 
237
 
238
  ### ByteDance (Seed)
239
  ```
 
241
  LLM_MODEL=bytedance-seed/seed-2.0-lite
242
  ```
243
  Models: `bytedance-seed/seed-2.0-lite` · `bytedance-seed/seed-2.0-mini`
244
+ Get key from: [Volcengine](https://www.volcengine.com)
245
 
246
+ ### Baidu (ERNIE)
247
  ```
248
+ LLM_API_KEY=your_baidu_api_key
249
+ LLM_MODEL=baidu/ernie-4.5-300b-a47b
250
  ```
251
+ Models: `baidu/ernie-4.5-300b-a47b` · `baidu/ernie-4.5-21b-a3b`
252
+
253
+ ### Inception (Mercury — ultra-fast)
254
+ ```
255
+ LLM_API_KEY=your_inception_api_key
256
+ LLM_MODEL=inception/mercury-2
257
+ ```
258
+ Models: `inception/mercury-2` (1000+ tok/sec!) · `inception/mercury-coder`
259
+ Get key from: [Inception Labs](https://www.inceptionlabs.ai)
260
+
261
+ ### Amazon (Nova)
262
+ ```
263
+ LLM_API_KEY=your_amazon_api_key
264
+ LLM_MODEL=amazon/nova-premier-v1
265
+ ```
266
+ Models: `amazon/nova-premier-v1` · `amazon/nova-pro-v1` · `amazon/nova-lite-v1`
267
+
268
+ ### OpenRouter (300+ models via single API key)
269
+ ```
270
+ LLM_API_KEY=sk-or-v1-...
271
+ LLM_MODEL=openrouter/auto
272
+ ```
273
+ With OpenRouter, you can access **every model above** with a single API key! Just prefix with `openrouter/`:
274
+ - `openrouter/openai/gpt-5.4-pro`
275
+ - `openrouter/anthropic/claude-opus-4.6`
276
+ - `openrouter/deepseek/deepseek-v3.2`
277
+ - `openrouter/qwen/qwen3.6-plus-preview:free`
278
 
279
+ Get key from: [OpenRouter.ai](https://openrouter.ai) · [Full model list](https://openrouter.ai/models)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
280
 
281
  ### Any Other Provider
282
  HuggingClaw supports **any LLM provider** that OpenClaw supports. Just use:
start.sh CHANGED
@@ -39,47 +39,45 @@ if [[ "$LLM_MODEL" == "anthropic/gemini"* ]]; then
39
  echo "⚠️ Corrected model from anthropic/gemini* to google/gemini*"
40
  fi
41
 
42
- # Auto-detect and set provider-specific API key from model name
43
- if [[ "$LLM_MODEL" == "openrouter/"* ]]; then
44
- export OPENROUTER_API_KEY="$LLM_API_KEY"
45
- elif [[ "$LLM_MODEL" == "google/"* ]]; then
46
- export GOOGLE_API_KEY="$LLM_API_KEY"
47
- elif [[ "$LLM_MODEL" == "openai/"* ]]; then
48
- export OPENAI_API_KEY="$LLM_API_KEY"
49
- elif [[ "$LLM_MODEL" == "zhipu/"* ]] || [[ "$LLM_MODEL" == "zai/"* ]]; then
50
- export ZHIPU_API_KEY="$LLM_API_KEY"
51
- elif [[ "$LLM_MODEL" == "moonshot/"* ]]; then
52
- export MOONSHOT_API_KEY="$LLM_API_KEY"
53
- elif [[ "$LLM_MODEL" == "minimax/"* ]]; then
54
- export MINIMAX_API_KEY="$LLM_API_KEY"
55
- elif [[ "$LLM_MODEL" == "mistral/"* ]] || [[ "$LLM_MODEL" == "mistralai/"* ]]; then
56
- export MISTRAL_API_KEY="$LLM_API_KEY"
57
- elif [[ "$LLM_MODEL" == "cohere/"* ]]; then
58
- export COHERE_API_KEY="$LLM_API_KEY"
59
- elif [[ "$LLM_MODEL" == "groq/"* ]]; then
60
- export GROQ_API_KEY="$LLM_API_KEY"
61
- elif [[ "$LLM_MODEL" == "qwen/"* ]]; then
62
- export QWEN_API_KEY="$LLM_API_KEY"
63
- elif [[ "$LLM_MODEL" == "x-ai/"* ]] || [[ "$LLM_MODEL" == "xai/"* ]]; then
64
- export XAI_API_KEY="$LLM_API_KEY"
65
- elif [[ "$LLM_MODEL" == "nvidia/"* ]]; then
66
- export NVIDIA_API_KEY="$LLM_API_KEY"
67
- elif [[ "$LLM_MODEL" == "reka/"* ]]; then
68
- export REKA_API_KEY="$LLM_API_KEY"
69
- elif [[ "$LLM_MODEL" == "bytedance/"* ]] || [[ "$LLM_MODEL" == "seed/"* ]]; then
70
- export BYTEDANCE_API_KEY="$LLM_API_KEY"
71
- elif [[ "$LLM_MODEL" == "kwaipilot/"* ]]; then
72
- export KWAIPILOT_API_KEY="$LLM_API_KEY"
73
- elif [[ "$LLM_MODEL" == "z-ai/"* ]]; then
74
- export ZAI_API_KEY="$LLM_API_KEY"
75
- elif [[ "$LLM_MODEL" == "inception/"* ]]; then
76
- export INCEPTION_API_KEY="$LLM_API_KEY"
77
- elif [[ "$LLM_MODEL" == "xiaomi/"* ]]; then
78
- export XIAOMI_API_KEY="$LLM_API_KEY"
79
- else
80
- # Default to Anthropic for claude/* or anthropic/* models
81
- export ANTHROPIC_API_KEY="$LLM_API_KEY"
82
- fi
83
 
84
  # ── Setup directories ──
85
  mkdir -p /home/node/.openclaw/agents/main/sessions
 
39
  echo "⚠️ Corrected model from anthropic/gemini* to google/gemini*"
40
  fi
41
 
42
+ # Extract provider prefix from model name (e.g. "google/gemini-2.5-flash" → "google")
43
+ LLM_PROVIDER=$(echo "$LLM_MODEL" | cut -d'/' -f1)
44
+
45
+ # Map provider prefix to the correct API key environment variable
46
+ # Based on actual OpenRouter model IDs: https://openrouter.ai/api/v1/models
47
+ case "$LLM_PROVIDER" in
48
+ openrouter) export OPENROUTER_API_KEY="$LLM_API_KEY" ;;
49
+ anthropic) export ANTHROPIC_API_KEY="$LLM_API_KEY" ;;
50
+ openai) export OPENAI_API_KEY="$LLM_API_KEY" ;;
51
+ google) export GOOGLE_API_KEY="$LLM_API_KEY" ;;
52
+ deepseek) export DEEPSEEK_API_KEY="$LLM_API_KEY" ;;
53
+ mistralai) export MISTRAL_API_KEY="$LLM_API_KEY" ;;
54
+ qwen) export QWEN_API_KEY="$LLM_API_KEY" ;;
55
+ x-ai) export XAI_API_KEY="$LLM_API_KEY" ;;
56
+ meta-llama) export META_API_KEY="$LLM_API_KEY" ;;
57
+ minimax) export MINIMAX_API_KEY="$LLM_API_KEY" ;;
58
+ z-ai) export ZAI_API_KEY="$LLM_API_KEY" ;;
59
+ moonshotai) export MOONSHOT_API_KEY="$LLM_API_KEY" ;;
60
+ nvidia) export NVIDIA_API_KEY="$LLM_API_KEY" ;;
61
+ cohere) export COHERE_API_KEY="$LLM_API_KEY" ;;
62
+ perplexity) export PERPLEXITY_API_KEY="$LLM_API_KEY" ;;
63
+ bytedance-seed) export BYTEDANCE_API_KEY="$LLM_API_KEY" ;;
64
+ xiaomi) export XIAOMI_API_KEY="$LLM_API_KEY" ;;
65
+ amazon) export AMAZON_API_KEY="$LLM_API_KEY" ;;
66
+ reka|rekaai) export REKA_API_KEY="$LLM_API_KEY" ;;
67
+ inception) export INCEPTION_API_KEY="$LLM_API_KEY" ;;
68
+ kwaipilot) export KWAIPILOT_API_KEY="$LLM_API_KEY" ;;
69
+ ai21) export AI21_API_KEY="$LLM_API_KEY" ;;
70
+ baidu) export BAIDU_API_KEY="$LLM_API_KEY" ;;
71
+ tencent) export TENCENT_API_KEY="$LLM_API_KEY" ;;
72
+ stepfun) export STEPFUN_API_KEY="$LLM_API_KEY" ;;
73
+ inflection) export INFLECTION_API_KEY="$LLM_API_KEY" ;;
74
+ writer) export WRITER_API_KEY="$LLM_API_KEY" ;;
75
+ upstage) export UPSTAGE_API_KEY="$LLM_API_KEY" ;;
76
+ *)
77
+ # Fallback: export as ANTHROPIC (default) and also as generic
78
+ export ANTHROPIC_API_KEY="$LLM_API_KEY"
79
+ ;;
80
+ esac
 
 
81
 
82
  # ── Setup directories ──
83
  mkdir -p /home/node/.openclaw/agents/main/sessions