Spaces:
Running
Running
Update app/.pyfun
Browse files- app/.pyfun +8 -8
app/.pyfun
CHANGED
|
@@ -88,14 +88,14 @@ SEARCH_TIMEOUT_SEC = "30"
|
|
| 88 |
fallback_to = "" # last in chain, no further fallback
|
| 89 |
[LLM_PROVIDER.openrouter_END]
|
| 90 |
|
| 91 |
-
|
| 92 |
-
active
|
| 93 |
-
base_url
|
| 94 |
-
env_key
|
| 95 |
-
default_model
|
| 96 |
-
models
|
| 97 |
-
fallback_to
|
| 98 |
-
|
| 99 |
|
| 100 |
# ββ Add more LLM providers below ββββββββββββββββββββββββββββββββββββββββββ
|
| 101 |
# [LLM_PROVIDER.mistral]
|
|
|
|
| 88 |
fallback_to = "" # last in chain, no further fallback
|
| 89 |
[LLM_PROVIDER.openrouter_END]
|
| 90 |
|
| 91 |
+
[LLM_PROVIDER.huggingface]
|
| 92 |
+
active = "true"
|
| 93 |
+
base_url = "https://api-inference.huggingface.co/models"
|
| 94 |
+
env_key = "HF_TOKEN" # β .env: HF_TOKEN=hf_...
|
| 95 |
+
default_model = "cognitivecomputations/dolphin-mistral-24b-venice-edition:free"
|
| 96 |
+
models = "cognitivecomputations/dolphin-mistral-24b-venice-edition:free, deepseek/deepseek-chat-v3.1:free, nvidia/nemotron-nano-9b-v2:free, google/gemma-3-27b-it:free, openai/gpt-oss-20b:free, qwen/qwen3-coder:free, qwen/qwen2.5-vl-72b-instruct:free, nousresearch/deephermes-3-llama-3-8b-preview:free, mistralai/Mistral-7B-Instruct-v0.3, meta-llama/Llama-3.3-70B-Instruct"
|
| 97 |
+
fallback_to = ""
|
| 98 |
+
[LLM_PROVIDER.huggingface_END]
|
| 99 |
|
| 100 |
# ββ Add more LLM providers below ββββββββββββββββββββββββββββββββββββββββββ
|
| 101 |
# [LLM_PROVIDER.mistral]
|