Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -145,10 +145,10 @@ from huggingface_hub import snapshot_download
|
|
| 145 |
|
| 146 |
#### Deploy Minimax 2.5 insplace of gpt oss 120b its larger . and better and more recet leeases
|
| 147 |
snapshot_download(
|
| 148 |
-
repo_id="unsloth/
|
| 149 |
repo_type="model",
|
| 150 |
local_dir="./models/",
|
| 151 |
-
allow_patterns=["
|
| 152 |
token=huggingface_token # only if gated/private
|
| 153 |
)
|
| 154 |
|
|
@@ -358,11 +358,11 @@ demo = gr.ChatInterface(
|
|
| 358 |
# "Qwen3-Coder-Next-Q4_K_M.gguf",
|
| 359 |
# "gpt-oss-20b-Q4_K_M.gguf",
|
| 360 |
# "Qwen3-Next-80B-A3B-Instruct-Q4_K_M.gguf",
|
| 361 |
-
"
|
| 362 |
# "Qwen3-VL-32B-Thinking-Q8_0.gguf",
|
| 363 |
# "Q8_0/gpt-oss-120b-Q8_0-00001-of-00002.gguf"
|
| 364 |
],
|
| 365 |
-
value="
|
| 366 |
label="Model",
|
| 367 |
),
|
| 368 |
gr.Textbox(
|
|
|
|
| 145 |
|
| 146 |
#### Deploy Minimax 2.5 insplace of gpt oss 120b its larger . and better and more recet leeases
|
| 147 |
snapshot_download(
|
| 148 |
+
repo_id="unsloth/Qwen3-Coder-Next-GGUF",
|
| 149 |
repo_type="model",
|
| 150 |
local_dir="./models/",
|
| 151 |
+
allow_patterns=["Q6_K/*"], # 👈 folder inside repo
|
| 152 |
token=huggingface_token # only if gated/private
|
| 153 |
)
|
| 154 |
|
|
|
|
| 358 |
# "Qwen3-Coder-Next-Q4_K_M.gguf",
|
| 359 |
# "gpt-oss-20b-Q4_K_M.gguf",
|
| 360 |
# "Qwen3-Next-80B-A3B-Instruct-Q4_K_M.gguf",
|
| 361 |
+
"Q6_K/Qwen3-Coder-Next-Q6_K-00001-of-00003.gguf",
|
| 362 |
# "Qwen3-VL-32B-Thinking-Q8_0.gguf",
|
| 363 |
# "Q8_0/gpt-oss-120b-Q8_0-00001-of-00002.gguf"
|
| 364 |
],
|
| 365 |
+
value="Q6_K/Qwen3-Coder-Next-Q6_K-00001-of-00003.gguf",
|
| 366 |
label="Model",
|
| 367 |
),
|
| 368 |
gr.Textbox(
|