Spaces:
Running
Running
Update app/.pyfun
Browse files- app/.pyfun +117 -14
app/.pyfun
CHANGED
|
@@ -83,19 +83,19 @@ SEARCH_TIMEOUT_SEC = "30"
|
|
| 83 |
active = "true"
|
| 84 |
base_url = "https://openrouter.ai/api/v1"
|
| 85 |
env_key = "OPENROUTER_API_KEY" # β .env: OPENROUTER_API_KEY=sk-or-...
|
| 86 |
-
default_model = "
|
| 87 |
-
models = "openai/gpt-4o, meta-llama/llama-3-8b-instruct, mistralai/mistral-7b-instruct"
|
| 88 |
fallback_to = "" # last in chain, no further fallback
|
| 89 |
[LLM_PROVIDER.openrouter_END]
|
| 90 |
|
| 91 |
-
[LLM_PROVIDER.huggingface]
|
| 92 |
active = "true"
|
| 93 |
base_url = "https://api-inference.huggingface.co/models"
|
| 94 |
env_key = "HF_TOKEN" # β .env: HF_TOKEN=hf_...
|
| 95 |
-
default_model = "
|
| 96 |
-
models = "
|
| 97 |
fallback_to = ""
|
| 98 |
-
[LLM_PROVIDER.huggingface_END]
|
| 99 |
|
| 100 |
# ββ Add more LLM providers below ββββββββββββββββββββββββββββββββββββββββββ
|
| 101 |
# [LLM_PROVIDER.mistral]
|
|
@@ -287,19 +287,122 @@ SEARCH_TIMEOUT_SEC = "30"
|
|
| 287 |
capabilities = "text, code, fast, cheap"
|
| 288 |
[MODEL.mistral-7b-instruct_END]
|
| 289 |
|
| 290 |
-
|
| 291 |
-
|
|
|
|
|
|
|
|
|
|
| 292 |
|
| 293 |
[MODEL.dolphin-mistral-24b-venice-edition]
|
| 294 |
-
provider = "
|
| 295 |
-
context_tokens = ""
|
| 296 |
-
max_output_tokens = ""
|
| 297 |
requests_per_min = ""
|
| 298 |
requests_per_day = ""
|
| 299 |
-
cost_input_per_1k = ""
|
| 300 |
-
cost_output_per_1k = ""
|
| 301 |
capabilities = "uncensored, text, code, fast, very cheap"
|
| 302 |
-
[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 303 |
|
| 304 |
[MODELS_END]
|
| 305 |
|
|
|
|
| 83 |
active = "true"
|
| 84 |
base_url = "https://openrouter.ai/api/v1"
|
| 85 |
env_key = "OPENROUTER_API_KEY" # β .env: OPENROUTER_API_KEY=sk-or-...
|
| 86 |
+
default_model = "cognitivecomputations/dolphin-mistral-24b-venice-edition:free"
|
| 87 |
+
models = "openai/gpt-4o, meta-llama/llama-3-8b-instruct, mistralai/mistral-7b-instruct, cognitivecomputations/dolphin-mistral-24b-venice-edition:free, deepseek/deepseek-chat-v3.1:free, nvidia/nemotron-nano-9b-v2:free, google/gemma-3-27b-it:free, openai/gpt-oss-20b:free, qwen/qwen3-coder:free, qwen/qwen2.5-vl-72b-instruct:free, nousresearch/deephermes-3-llama-3-8b-preview:free, mistralai/Mistral-7B-Instruct-v0.3, meta-llama/Llama-3.3-70B-Instruct"
|
| 88 |
fallback_to = "" # last in chain, no further fallback
|
| 89 |
[LLM_PROVIDER.openrouter_END]
|
| 90 |
|
| 91 |
+
[LLM_PROVIDER.huggingface]
|
| 92 |
active = "true"
|
| 93 |
base_url = "https://api-inference.huggingface.co/models"
|
| 94 |
env_key = "HF_TOKEN" # β .env: HF_TOKEN=hf_...
|
| 95 |
+
default_model = "meta-llama/Llama-3.3-70B-Instruct"
|
| 96 |
+
models = "meta-llama/Llama-3.3-70B-Instruct, mistralai/Mistral-7B-Instruct-v0.3, google/gemma-3-27b-it, microsoft/phi-4, HuggingFaceH4/zephyr-7b-beta, tiiuae/falcon-7b-instruct"
|
| 97 |
fallback_to = ""
|
| 98 |
+
[LLM_PROVIDER.huggingface_END]
|
| 99 |
|
| 100 |
# ββ Add more LLM providers below ββββββββββββββββββββββββββββββββββββββββββ
|
| 101 |
# [LLM_PROVIDER.mistral]
|
|
|
|
| 287 |
capabilities = "text, code, fast, cheap"
|
| 288 |
[MODEL.mistral-7b-instruct_END]
|
| 289 |
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
# Free / OpenRouter / HuggingFace models
|
| 294 |
+
# Source: openrouter.ai, huggingface.co (Stand MΓ€rz 2026 β Werte kΓΆnnen sich Γ€ndern!)
|
| 295 |
|
| 296 |
[MODEL.dolphin-mistral-24b-venice-edition]
|
| 297 |
+
provider = "openrouter"
|
| 298 |
+
context_tokens = "32768"
|
| 299 |
+
max_output_tokens = "4096"
|
| 300 |
requests_per_min = ""
|
| 301 |
requests_per_day = ""
|
| 302 |
+
cost_input_per_1k = "0.00000"
|
| 303 |
+
cost_output_per_1k = "0.00000"
|
| 304 |
capabilities = "uncensored, text, code, fast, very cheap"
|
| 305 |
+
[MODEL.dolphin-mistral-24b-venice-edition_END]
|
| 306 |
+
|
| 307 |
+
[MODEL.deepseek-chat-v3.1]
|
| 308 |
+
provider = "openrouter"
|
| 309 |
+
context_tokens = "163840"
|
| 310 |
+
max_output_tokens = "8192"
|
| 311 |
+
requests_per_min = ""
|
| 312 |
+
requests_per_day = ""
|
| 313 |
+
cost_input_per_1k = "0.00000"
|
| 314 |
+
cost_output_per_1k = "0.00000"
|
| 315 |
+
capabilities = "text, code, reasoning, fast, free"
|
| 316 |
+
[MODEL.deepseek-chat-v3.1_END]
|
| 317 |
+
|
| 318 |
+
[MODEL.nvidia-nemotron-nano-9b-v2]
|
| 319 |
+
provider = "openrouter"
|
| 320 |
+
context_tokens = "131072"
|
| 321 |
+
max_output_tokens = "4096"
|
| 322 |
+
requests_per_min = ""
|
| 323 |
+
requests_per_day = ""
|
| 324 |
+
cost_input_per_1k = "0.00000"
|
| 325 |
+
cost_output_per_1k = "0.00000"
|
| 326 |
+
capabilities = "text, code, reasoning, fast, free"
|
| 327 |
+
[MODEL.nvidia-nemotron-nano-9b-v2_END]
|
| 328 |
+
|
| 329 |
+
[MODEL.gemma-3-27b-it]
|
| 330 |
+
provider = "openrouter"
|
| 331 |
+
context_tokens = "131072"
|
| 332 |
+
max_output_tokens = "8192"
|
| 333 |
+
requests_per_min = ""
|
| 334 |
+
requests_per_day = ""
|
| 335 |
+
cost_input_per_1k = "0.00000"
|
| 336 |
+
cost_output_per_1k = "0.00000"
|
| 337 |
+
capabilities = "text, code, vision, multilingual, free"
|
| 338 |
+
[MODEL.gemma-3-27b-it_END]
|
| 339 |
+
|
| 340 |
+
[MODEL.gpt-oss-20b]
|
| 341 |
+
provider = "openrouter"
|
| 342 |
+
context_tokens = "128000"
|
| 343 |
+
max_output_tokens = "4096"
|
| 344 |
+
requests_per_min = ""
|
| 345 |
+
requests_per_day = ""
|
| 346 |
+
cost_input_per_1k = "0.00000"
|
| 347 |
+
cost_output_per_1k = "0.00000"
|
| 348 |
+
capabilities = "text, code, free"
|
| 349 |
+
[MODEL.gpt-oss-20b_END]
|
| 350 |
+
|
| 351 |
+
[MODEL.qwen3-coder]
|
| 352 |
+
provider = "openrouter"
|
| 353 |
+
context_tokens = "262144"
|
| 354 |
+
max_output_tokens = "8192"
|
| 355 |
+
requests_per_min = ""
|
| 356 |
+
requests_per_day = ""
|
| 357 |
+
cost_input_per_1k = "0.00000"
|
| 358 |
+
cost_output_per_1k = "0.00000"
|
| 359 |
+
capabilities = "text, code, agentic, long-context, free"
|
| 360 |
+
[MODEL.qwen3-coder_END]
|
| 361 |
+
|
| 362 |
+
[MODEL.qwen2.5-vl-72b-instruct]
|
| 363 |
+
provider = "openrouter"
|
| 364 |
+
context_tokens = "131072"
|
| 365 |
+
max_output_tokens = "8192"
|
| 366 |
+
requests_per_min = ""
|
| 367 |
+
requests_per_day = ""
|
| 368 |
+
cost_input_per_1k = "0.00000"
|
| 369 |
+
cost_output_per_1k = "0.00000"
|
| 370 |
+
capabilities = "text, code, vision, multilingual, free"
|
| 371 |
+
[MODEL.qwen2.5-vl-72b-instruct_END]
|
| 372 |
+
|
| 373 |
+
[MODEL.deephermes-3-llama-3-8b-preview]
|
| 374 |
+
provider = "openrouter"
|
| 375 |
+
context_tokens = "131072"
|
| 376 |
+
max_output_tokens = "4096"
|
| 377 |
+
requests_per_min = ""
|
| 378 |
+
requests_per_day = ""
|
| 379 |
+
cost_input_per_1k = "0.00000"
|
| 380 |
+
cost_output_per_1k = "0.00000"
|
| 381 |
+
capabilities = "text, code, reasoning, uncensored, free"
|
| 382 |
+
[MODEL.deephermes-3-llama-3-8b-preview_END]
|
| 383 |
+
|
| 384 |
+
[MODEL.Mistral-7B-Instruct-v0.3]
|
| 385 |
+
provider = "openrouter"
|
| 386 |
+
context_tokens = "32768"
|
| 387 |
+
max_output_tokens = "4096"
|
| 388 |
+
requests_per_min = "60"
|
| 389 |
+
requests_per_day = "10000"
|
| 390 |
+
cost_input_per_1k = "0.00006"
|
| 391 |
+
cost_output_per_1k = "0.00006"
|
| 392 |
+
capabilities = "text, code, fast, cheap"
|
| 393 |
+
[MODEL.Mistral-7B-Instruct-v0.3_END]
|
| 394 |
+
|
| 395 |
+
[MODEL.Llama-3.3-70B-Instruct]
|
| 396 |
+
provider = "openrouter"
|
| 397 |
+
context_tokens = "131072"
|
| 398 |
+
max_output_tokens = "8192"
|
| 399 |
+
requests_per_min = "60"
|
| 400 |
+
requests_per_day = "10000"
|
| 401 |
+
cost_input_per_1k = "0.00012"
|
| 402 |
+
cost_output_per_1k = "0.00030"
|
| 403 |
+
capabilities = "text, code, reasoning, multilingual"
|
| 404 |
+
[MODEL.Llama-3.3-70B-Instruct_END]
|
| 405 |
+
|
| 406 |
|
| 407 |
[MODELS_END]
|
| 408 |
|