Merge branch 'main' of Mungert:spaces/Mungert/GradLLM
Browse files- .huggingface.yml +1 -1
- appsettings.json +1 -1
.huggingface.yml
CHANGED
|
@@ -10,7 +10,7 @@ env:
|
|
| 10 |
# Enable verbose oneDNN kernel logging (see logs at startup/inference)
|
| 11 |
ONEDNN_VERBOSE: "1"
|
| 12 |
# Optional: limit MKL threading to avoid oversubscription
|
| 13 |
-
OMP_NUM_THREADS: "
|
| 14 |
KMP_AFFINITY: "granularity=fine,compact,1,0"
|
| 15 |
|
| 16 |
# Optional: if you want deterministic installs, you can specify a custom pip index
|
|
|
|
| 10 |
# Enable verbose oneDNN kernel logging (see logs at startup/inference)
|
| 11 |
ONEDNN_VERBOSE: "1"
|
| 12 |
# Optional: limit MKL threading to avoid oversubscription
|
| 13 |
+
OMP_NUM_THREADS: "2"
|
| 14 |
KMP_AFFINITY: "granularity=fine,compact,1,0"
|
| 15 |
|
| 16 |
# Optional: if you want deterministic installs, you can specify a custom pip index
|
appsettings.json
CHANGED
|
@@ -40,7 +40,7 @@
|
|
| 40 |
"LlmUserPromptTimeout": 1200,
|
| 41 |
"LlmSessionIdleTimeout": 1440,
|
| 42 |
"LlmGptModel": "gpt-4.1-mini",
|
| 43 |
-
"LlmHFModelID": "
|
| 44 |
"LlmHFKey": ".env",
|
| 45 |
"LlmHFUrl": "https://api.novita.ai/v3/openai/chat/completions",
|
| 46 |
"LlmHFModelVersion": "qwen_3",
|
|
|
|
| 40 |
"LlmUserPromptTimeout": 1200,
|
| 41 |
"LlmSessionIdleTimeout": 1440,
|
| 42 |
"LlmGptModel": "gpt-4.1-mini",
|
| 43 |
+
"LlmHFModelID": "Qwen/Qwen3-1.7B",
|
| 44 |
"LlmHFKey": ".env",
|
| 45 |
"LlmHFUrl": "https://api.novita.ai/v3/openai/chat/completions",
|
| 46 |
"LlmHFModelVersion": "qwen_3",
|