Spaces:
Running
Running
Update app/.pyfun
Browse files- app/.pyfun +3 -3
app/.pyfun
CHANGED
|
@@ -85,7 +85,7 @@ SEARCH_TIMEOUT_SEC = "30"
|
|
| 85 |
env_key = "OPENROUTER_API_KEY" # β .env: OPENROUTER_API_KEY=sk-or-...
|
| 86 |
default_model = "cognitivecomputations/dolphin-mistral-24b-venice-edition:free"
|
| 87 |
models = "openai/gpt-4o, meta-llama/llama-3-8b-instruct, mistralai/mistral-7b-instruct, cognitivecomputations/dolphin-mistral-24b-venice-edition:free, deepseek/deepseek-chat-v3.1:free, nvidia/nemotron-nano-9b-v2:free, google/gemma-3-27b-it:free, openai/gpt-oss-20b:free, qwen/qwen3-coder:free, qwen/qwen2.5-vl-72b-instruct:free, nousresearch/deephermes-3-llama-3-8b-preview:free, mistralai/Mistral-7B-Instruct-v0.3, meta-llama/Llama-3.3-70B-Instruct"
|
| 88 |
-
fallback_to = ""
|
| 89 |
[LLM_PROVIDER.openrouter_END]
|
| 90 |
|
| 91 |
[LLM_PROVIDER.huggingface]
|
|
@@ -93,8 +93,8 @@ SEARCH_TIMEOUT_SEC = "30"
|
|
| 93 |
base_url = "https://api-inference.huggingface.co/models"
|
| 94 |
env_key = "HF_TOKEN" # β .env: HF_TOKEN=hf_...
|
| 95 |
default_model = "meta-llama/Llama-3.3-70B-Instruct"
|
| 96 |
-
models
|
| 97 |
-
fallback_to = ""
|
| 98 |
[LLM_PROVIDER.huggingface_END]
|
| 99 |
|
| 100 |
# ββ Add more LLM providers below ββββββββββββββββββββββββββββββββββββββββββ
|
|
|
|
| 85 |
env_key = "OPENROUTER_API_KEY" # β .env: OPENROUTER_API_KEY=sk-or-...
|
| 86 |
default_model = "cognitivecomputations/dolphin-mistral-24b-venice-edition:free"
|
| 87 |
models = "openai/gpt-4o, meta-llama/llama-3-8b-instruct, mistralai/mistral-7b-instruct, cognitivecomputations/dolphin-mistral-24b-venice-edition:free, deepseek/deepseek-chat-v3.1:free, nvidia/nemotron-nano-9b-v2:free, google/gemma-3-27b-it:free, openai/gpt-oss-20b:free, qwen/qwen3-coder:free, qwen/qwen2.5-vl-72b-instruct:free, nousresearch/deephermes-3-llama-3-8b-preview:free, mistralai/Mistral-7B-Instruct-v0.3, meta-llama/Llama-3.3-70B-Instruct"
|
| 88 |
+
fallback_to = "huggingface"
|
| 89 |
[LLM_PROVIDER.openrouter_END]
|
| 90 |
|
| 91 |
[LLM_PROVIDER.huggingface]
|
|
|
|
| 93 |
base_url = "https://api-inference.huggingface.co/models"
|
| 94 |
env_key = "HF_TOKEN" # β .env: HF_TOKEN=hf_...
|
| 95 |
default_model = "meta-llama/Llama-3.3-70B-Instruct"
|
| 96 |
+
models = "meta-llama/Llama-3.3-70B-Instruct, mistralai/Mistral-Nemo-Instruct-2407, ..."
|
| 97 |
+
fallback_to = "" # last in chain, no further fallback
|
| 98 |
[LLM_PROVIDER.huggingface_END]
|
| 99 |
|
| 100 |
# ββ Add more LLM providers below ββββββββββββββββββββββββββββββββββββββββββ
|