rphrp1985 commited on
Commit
2a70f3e
·
verified ·
1 Parent(s): 4983bca

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -10
app.py CHANGED
@@ -48,15 +48,25 @@ huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
48
  # local_dir="./models",
49
  # token=huggingface_token
50
  # )
51
- from huggingface_hub import snapshot_download
52
-
53
- snapshot_download(
54
- repo_id="unsloth/MiniMax-M2.5-GGUF",
55
- repo_type="model",
56
- local_dir="./models/minmax",
57
- allow_patterns=["UD-IQ1_S/*"], # 👈 folder inside repo
58
- token=huggingface_token # only if gated/private
59
  )
 
 
 
 
 
 
 
 
 
 
 
 
60
 
61
 
62
  # llm = Llama.from_pretrained(
@@ -204,10 +214,10 @@ demo = gr.ChatInterface(
204
  "gemma-2-27b-it-Q5_K_M.gguf",
205
  # "2b_it_v2.gguf",
206
  "GLM-4.7-Flash-UD-Q8_K_XL.gguf",
207
- "minmax/UD-IQ1_S/MiniMax-M2.5-UD-IQ1_S-00001-of-00003.gguf",
208
  "gpt-oss-20b-Q4_K_M.gguf"
209
  ],
210
- value="GLM-4.7-Flash-UD-Q8_K_XL.gguf",
211
  label="Model",
212
  ),
213
  gr.Textbox(
 
48
  # local_dir="./models",
49
  # token=huggingface_token
50
  # )
51
+
52
+
53
+ hf_hub_download(
54
+ repo_id="unsloth/Qwen3-Coder-Next-GGUF",
55
+ filename="Qwen3-Coder-Next-Q4_K_S.gguf",
56
+ local_dir="./models"
 
 
57
  )
58
+ # from huggingface_hub import snapshot_download
59
+
60
+ # snapshot_download(
61
+ # repo_id="unsloth/MiniMax-M2.5-GGUF",
62
+ # repo_type="model",
63
+ # local_dir="./models/minmax",
64
+ # allow_patterns=["UD-IQ1_S/*"], # 👈 folder inside repo
65
+ # token=huggingface_token # only if gated/private
66
+ # )
67
+
68
+
69
+
70
 
71
 
72
  # llm = Llama.from_pretrained(
 
214
  "gemma-2-27b-it-Q5_K_M.gguf",
215
  # "2b_it_v2.gguf",
216
  "GLM-4.7-Flash-UD-Q8_K_XL.gguf",
217
+ "Qwen3-Coder-Next-Q4_K_S.gguf",
218
  "gpt-oss-20b-Q4_K_M.gguf"
219
  ],
220
+ value="Qwen3-Coder-Next-Q4_K_S.gguf",
221
  label="Model",
222
  ),
223
  gr.Textbox(