Spaces:
Sleeping
Sleeping
File size: 9,913 Bytes
4b1a870 f8adeca 4f287d1 4b1a870 7dbbd34 be1a281 7dbbd34 eb42bac 3876f01 eb42bac 3876f01 eb42bac 7dbbd34 4b1a870 fb8c2d6 f8adeca 64f5b61 4b1a870 1fb4cbc 5077254 64f5b61 6a3a40b e8b597a a1e277d a8b8eab da89d76 d5afea3 7b54adc a1e277d da89d76 9333d7f 2a70f3e 6f59be3 2a70f3e eb42bac 6a3a40b b4536ab 6a3a40b b4536ab 6a3a40b 2a70f3e 55a6ce7 0aef262 a9aecee 0aef262 55a6ce7 0aef262 55a6ce7 0aef262 55a6ce7 0aef262 a9aecee 337f4a0 19b7e36 96696ee 4983bca 96696ee 4983bca 4b1a870 f8adeca 511b1cc 6b59e6e 20fbcf3 1bacf29 20fbcf3 9771522 511b1cc 6b59e6e 23ba155 4b1a870 9449b72 4b1a870 9771522 20fbcf3 9771522 508cdcf 9771522 511b1cc 9771522 95111ea 511b1cc 508cdcf 9771522 753b432 5d3c589 37bd1e2 06ee8e7 37bd1e2 753b432 20fbcf3 6b59e6e 753b432 2133f1a 4b1a870 2133f1a 4b1a870 2133f1a 4b1a870 2133f1a 4b1a870 a0b5579 2ac2a22 924ebe6 2ac2a22 924ebe6 2ac2a22 fe3f066 2ac2a22 4b1a870 fe3f066 4b1a870 fe3f066 64f5b61 a8b8eab 64f5b61 6a3a40b a8b8eab b4536ab a8b8eab 4b1a870 b4536ab fe3f066 099218b 4b1a870 fe3f066 4b1a870 099218b fe3f066 4b1a870 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 | import spaces
import os
os.environ["LLAMA_CUBLAS"] = "1"
os.environ["GGML_CUDA_FORCE_DMMV"] = "1"
import json
import subprocess
### monkey patch
import llama_cpp._internals as internals
from llama_cpp.llama_chat_format import Qwen3VLChatHandler
# 2️⃣ Monkey patch BEFORE creating Llama()
_original_close = internals.LlamaModel.close
def safe_close(self):
try:
if hasattr(self, "sampler") and self.sampler is not None:
return _original_close(self)
except Exception:
pass
internals.LlamaModel.close = safe_close
def safe_del(self):
try:
self.close()
except Exception:
pass
internals.LlamaModel.__del__ = safe_del
##### final verdict
# GLM 4.7 flash fast infrence
#qwen 3 VL
#mini max 2.5
# qwen 3 coder next
#gpt oss 120B
from llama_cpp import Llama
from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
from llama_cpp_agent.providers import LlamaCppPythonProvider
from llama_cpp_agent.chat_history import BasicChatHistory
from llama_cpp_agent.chat_history.messages import Roles
import gradio as gr
from huggingface_hub import hf_hub_download
import llama_cpp
print(llama_cpp.__file__)
print(llama_cpp.__version__)
huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
# hf_hub_download(
# repo_id="bartowski/gemma-2-9b-it-GGUF",
# filename="gemma-2-9b-it-Q5_K_M.gguf",
# local_dir="./models"
# )
# hf_hub_download(
# repo_id="bartowski/gemma-2-27b-it-GGUF",
# filename="gemma-2-27b-it-Q5_K_M.gguf",
# local_dir="./models"
# )
# hf_hub_download(
# repo_id="google/gemma-2-2b-it-GGUF",
# filename="2b_it_v2.gguf",
# local_dir="./models",
# token=huggingface_token
# )
# hf_hub_download(
# repo_id="unsloth/GLM-4.7-Flash-GGUF",
# filename="GLM-4.7-Flash-Q8_0.gguf",
# local_dir="./models",
# token=huggingface_token
# )
# hf_hub_download(
# repo_id="unsloth/gpt-oss-20b-GGUF",
# filename="gpt-oss-20b-Q4_K_M.gguf",
# local_dir="./models",
# token=huggingface_token
# )
# hf_hub_download(
# repo_id="unsloth/gpt-oss-20b-GGUF",
# filename="gpt-oss-20b-Q4_K_M.gguf",
# local_dir="./models",
# token=huggingface_token
# )
# hf_hub_download(
# repo_id="unsloth/Qwen3-Next-80B-A3B-Instruct-GGUF",
# filename="Qwen3-Next-80B-A3B-Instruct-Q4_K_M.gguf",
# local_dir="./models",
# token=huggingface_token
# )
# hf_hub_download(
# repo_id="unsloth/Qwen3-VL-32B-Thinking-GGUF",
# filename="Qwen3-VL-32B-Thinking-Q8_0.gguf",
# local_dir="./models"
# )
# hf_hub_download(
# repo_id="unsloth/Qwen3-Coder-Next-GGUF",
# filename="Qwen3-Coder-Next-Q4_K_M.gguf",
# local_dir="./models"
# )
from huggingface_hub import snapshot_download
# snapshot_download(
# repo_id="unsloth/Qwen3-Coder-Next-GGUF",
# repo_type="model",
# local_dir="./models/",
# allow_patterns=["Q5_K_M/*"], # 👈 folder inside repo
# token=huggingface_token # only if gated/private
# )
#### Deploy Minimax 2.5 insplace of gpt oss 120b its larger . and better and more recet leeases
snapshot_download(
repo_id="unsloth/Qwen3-Coder-Next-GGUF",
repo_type="model",
local_dir="./models/",
allow_patterns=["Q6_K/*"], # 👈 folder inside repo
token=huggingface_token # only if gated/private
)
# llm = Llama.from_pretrained(
# repo_id="stepfun-ai/Step-3.5-Flash-GGUF-Q4_K_S",
# # ALWAYS first shard only here
# filename="step3p5_flash_Q4_K_S-00001-of-00012.gguf",
# # Download all shards
# additional_files=[
# f"step3p5_flash_Q4_K_S-{i:05d}-of-00012.gguf"
# for i in range(2, 13)
# ],
# local_dir="./models",
# # Performance settings
# flash_attn=True,
# n_gpu_layers=-1, # use full GPU (if you have enough VRAM)
# n_batch=2048,
# n_ctx=4096, # 8000 is heavy unless needed
# )
# llm = Llama.from_pretrained(
# repo_id="stepfun-ai/Step-3.5-Flash-GGUF-Q4_K_S",
# filename="step3p5_flash_Q4_K_S-00001-of-00012.gguf",
# allow_patterns=["UD-TQ1_0/*.gguf"],
# verbose=False
# )
import os
def print_tree(start_path="models"):
for root, dirs, files in os.walk(start_path):
level = root.replace(start_path, "").count(os.sep)
indent = "│ " * level
print(f"{indent}├── {os.path.basename(root)}/")
subindent = "│ " * (level + 1)
for f in files:
print(f"{subindent}├── {f}")
print_tree("models")
import gc
import torch
def delete_llama_model(llm):
# global llm
if llm is not None:
try:
llm.close() # 🔥 VERY IMPORTANT
except Exception as e:
print("Close error:", e)
llm = None
# Force Python garbage collection
gc.collect()
# Clear GPU cache (if using CUDA)
try:
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
torch.cuda.synchronize()
except:
pass
print("Model fully unloaded.")
llm = None
llm_model_glm = None
llm_model_qwen= None
import json
def str_to_json(str_obj):
json_obj = json.loads(str_obj)
return json_obj
@spaces.GPU(duration=100)
def respond(
message,
history: list[tuple[str, str]],
model,
system_message,
max_tokens,
temperature,
top_p,
top_k,
repeat_penalty,
):
# chat_template = MessagesFormatterType.GEMMA_2
chat_template = MessagesFormatterType.CHATML
# chat_template =
# global llm
# global llm_model
# global llm_model_glm
# global llm_model_qwen
try:
j_r= str_to_json(message)
messages= j_r['messages']
max_token= j_r['max_token']
except Exception as e:
print(e)
print("not valid json")
max_token=8196
messages=[
{
"role":"user",
"content":str(message)
}
]
# if llm_model_glm == None:
llm_model_glm = Llama(
model_path=f"models/{model}",
flash_attn=False,
n_gpu_layers=-1,
n_batch=2048, # increase
n_ctx= max_token, # reduce if you don’t need 8k
n_threads= os.cpu_count(), # set to your CPU cores
# use_mlock=True,
verbose=True,
chat_format="chatml"
)
x=llm_model_glm.create_chat_completion(
messages =messages
)
# delete_llama_model(llm_model_glm)
print(x)
x= x['choices'][0]['message']['content']
yield str(x)
# provider = LlamaCppPythonProvider(llm)
# agent = LlamaCppAgent(
# provider,
# system_prompt=f"{system_message}",
# predefined_messages_formatter_type=chat_template,
# debug_output=False
# )
# settings = provider.get_provider_default_settings()
# settings.temperature = temperature
# settings.top_k = top_k
# settings.top_p = top_p
# settings.max_tokens = max_tokens
# settings.repeat_penalty = repeat_penalty
# # settings.stream = True
# # settings.reasoning_effort ="low"
# messages = BasicChatHistory()
# for msn in history:
# user = {
# 'role': Roles.user,
# 'content': msn[0]
# }
# assistant = {
# 'role': Roles.assistant,
# 'content': msn[1]
# }
# messages.add_message(user)
# messages.add_message(assistant)
# stream = agent.get_chat_response(
# message,
# # llm_sampling_settings=settings,
# chat_history=messages,
# # returns_streaming_generator=True,
# print_output=False
# )
# outputs = ""
# for output in stream:
# outputs += output
# yield outputs
description = """<p align="center">Defaults to 2B (you can switch to 9B or 27B from additional inputs)</p>
<p><center>
<a href="https://huggingface.co/google/gemma-2-27b-it" target="_blank">[27B it Model]</a>
<a href="https://huggingface.co/google/gemma-2-9b-it" target="_blank">[9B it Model]</a>
<a href="https://huggingface.co/google/gemma-2-2b-it" target="_blank">[2B it Model]</a>
<a href="https://huggingface.co/bartowski/gemma-2-27b-it-GGUF" target="_blank">[27B it Model GGUF]</a>
<a href="https://huggingface.co/bartowski/gemma-2-9b-it-GGUF" target="_blank">[9B it Model GGUF]</a>
<a href="https://huggingface.co/google/gemma-2-2b-it-GGUF" target="_blank">[2B it Model GGUF]</a>
</center></p>
"""
import gradio as gr
demo = gr.ChatInterface(
fn=respond,
additional_inputs=[
gr.Dropdown(
[
# "gemma-2-9b-it-Q5_K_M.gguf",
# "gemma-2-27b-it-Q5_K_M.gguf",
# "2b_it_v2.gguf",
# "Qwen3-Coder-Next-Q4_K_M.gguf",
# "gpt-oss-20b-Q4_K_M.gguf",
# "Qwen3-Next-80B-A3B-Instruct-Q4_K_M.gguf",
"Q6_K/Qwen3-Coder-Next-Q6_K-00001-of-00003.gguf",
# "Qwen3-VL-32B-Thinking-Q8_0.gguf",
# "Q8_0/gpt-oss-120b-Q8_0-00001-of-00002.gguf"
],
value="Q6_K/Qwen3-Coder-Next-Q6_K-00001-of-00003.gguf",
label="Model",
),
gr.Textbox(
value="You are a helpful assistant.",
label="System message",
),
gr.Slider(1, 4096, value=2048, step=1, label="Max tokens"),
gr.Slider(0.1, 4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p"),
gr.Slider(0, 100, value=40, step=1, label="Top-k"),
gr.Slider(0.0, 2.0, value=1.1, step=0.1, label="Repetition penalty"),
],
title="Chat with Gemma 2 using llama.cpp",
description=description,
)
# demo.launch()
if __name__ == "__main__":
demo.launch() |