chatbots5 / app.py
rphrp1985's picture
Update app.py
2454be4 verified
import spaces
import os
os.environ["LLAMA_CUBLAS"] = "1"
os.environ["GGML_CUDA_FORCE_DMMV"] = "1"
import json
import subprocess
from cachetools import LRUCache
import threading
MODEL_CACHE = LRUCache(maxsize=10) # max 10 models
CACHE_LOCK = threading.Lock()
### monkey patch
import llama_cpp._internals as internals
from llama_cpp.llama_chat_format import Qwen35ChatHandler
# 2️⃣ Monkey patch BEFORE creating Llama()
_original_close = internals.LlamaModel.close
def safe_close(self):
try:
if hasattr(self, "sampler") and self.sampler is not None:
return _original_close(self)
except Exception:
pass
internals.LlamaModel.close = safe_close
def safe_del(self):
try:
self.close()
except Exception:
pass
internals.LlamaModel.__del__ = safe_del
##### final verdict
# GLM 4.7 flash fast infrence
#qwen 3 VL
#mini max 2.5
# qwen 3 coder next
#gpt oss 120B
from llama_cpp import Llama
from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
from llama_cpp_agent.providers import LlamaCppPythonProvider
from llama_cpp_agent.chat_history import BasicChatHistory
from llama_cpp_agent.chat_history.messages import Roles
import gradio as gr
from huggingface_hub import hf_hub_download
import llama_cpp
print(llama_cpp.__file__)
print(llama_cpp.__version__)
huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
# hf_hub_download(
# repo_id="bartowski/gemma-2-9b-it-GGUF",
# filename="gemma-2-9b-it-Q5_K_M.gguf",
# local_dir="./models"
# )
# hf_hub_download(
# repo_id="bartowski/gemma-2-27b-it-GGUF",
# filename="gemma-2-27b-it-Q5_K_M.gguf",
# local_dir="./models"
# )
# hf_hub_download(
# repo_id="google/gemma-2-2b-it-GGUF",
# filename="2b_it_v2.gguf",
# local_dir="./models",
# token=huggingface_token
# )
# hf_hub_download(
# repo_id="unsloth/GLM-4.7-Flash-GGUF",
# filename="GLM-4.7-Flash-Q8_0.gguf",
# local_dir="./models",
# token=huggingface_token
# )
# hf_hub_download(
# repo_id="unsloth/gpt-oss-20b-GGUF",
# filename="gpt-oss-20b-Q4_K_M.gguf",
# local_dir="./models",
# token=huggingface_token
# )
# hf_hub_download(
# repo_id="unsloth/gpt-oss-20b-GGUF",
# filename="gpt-oss-20b-Q4_K_M.gguf",
# local_dir="./models",
# token=huggingface_token
# )
import base64
# hf_hub_download(
# repo_id="unsloth/Qwen3-Next-80B-A3B-Instruct-GGUF",
# filename="Qwen3-Next-80B-A3B-Instruct-Q4_K_M.gguf",
# local_dir="./models",
# token=huggingface_token
# )
hf_hub_download(
repo_id="unsloth/Qwen3.5-9B-GGUF",
filename="Qwen3.5-9B-UD-Q8_K_XL.gguf",
local_dir="./models"
)
hf_hub_download(
repo_id="unsloth/Qwen3.5-9B-GGUF",
filename="mmproj-BF16.gguf",
local_dir="./models"
)
from huggingface_hub import snapshot_download
# snapshot_download(
# repo_id="unsloth/Qwen3-Coder-Next-GGUF",
# repo_type="model",
# local_dir="./models/",
# allow_patterns=["Q5_K_M/*"], # 👈 folder inside repo
# token=huggingface_token # only if gated/private
# )
#### Deploy Minimax 2.5 insplace of gpt oss 120b its larger . and better and more recet leeases
# snapshot_download(
# repo_id="unsloth/gpt-oss-120b-GGUF",
# repo_type="model",
# local_dir="./models/",
# allow_patterns=["Q8_0/*"], # 👈 folder inside repo
# token=huggingface_token # only if gated/private
# )
# llm = Llama.from_pretrained(
# repo_id="stepfun-ai/Step-3.5-Flash-GGUF-Q4_K_S",
# # ALWAYS first shard only here
# filename="step3p5_flash_Q4_K_S-00001-of-00012.gguf",
# # Download all shards
# additional_files=[
# f"step3p5_flash_Q4_K_S-{i:05d}-of-00012.gguf"
# for i in range(2, 13)
# ],
# local_dir="./models",
# # Performance settings
# flash_attn=True,
# n_gpu_layers=-1, # use full GPU (if you have enough VRAM)
# n_batch=2048,
# n_ctx=4096, # 8000 is heavy unless needed
# )
# llm = Llama.from_pretrained(
# repo_id="stepfun-ai/Step-3.5-Flash-GGUF-Q4_K_S",
# filename="step3p5_flash_Q4_K_S-00001-of-00012.gguf",
# allow_patterns=["UD-TQ1_0/*.gguf"],
# verbose=False
# )
import os
def print_tree(start_path="models"):
for root, dirs, files in os.walk(start_path):
level = root.replace(start_path, "").count(os.sep)
indent = "│ " * level
print(f"{indent}├── {os.path.basename(root)}/")
subindent = "│ " * (level + 1)
for f in files:
print(f"{subindent}├── {f}")
print_tree("models")
import gc
import torch
def delete_llama_model(llm):
# global llm
if llm is not None:
try:
llm.close() # 🔥 VERY IMPORTANT
except Exception as e:
print("Close error:", e)
llm = None
# Force Python garbage collection
gc.collect()
# Clear GPU cache (if using CUDA)
try:
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
torch.cuda.synchronize()
except:
pass
print("Model fully unloaded.")
llm = None
llm_model_glm = None
llm_model_qwen= None
_IMAGE_MIME_TYPES = {
# Most common formats
'.png': 'image/png',
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.gif': 'image/gif',
'.webp': 'image/webp',
# Next-generation formats
'.avif': 'image/avif',
'.jp2': 'image/jp2',
'.j2k': 'image/jp2',
'.jpx': 'image/jp2',
# Legacy / Windows formats
'.bmp': 'image/bmp',
'.ico': 'image/x-icon',
'.pcx': 'image/x-pcx',
'.tga': 'image/x-tga',
'.icns': 'image/icns',
# Professional / Scientific imaging
'.tif': 'image/tiff',
'.tiff': 'image/tiff',
'.eps': 'application/postscript',
'.dds': 'image/vnd-ms.dds',
'.dib': 'image/dib',
'.sgi': 'image/sgi',
# Portable Map formats (PPM/PGM/PBM)
'.pbm': 'image/x-portable-bitmap',
'.pgm': 'image/x-portable-graymap',
'.ppm': 'image/x-portable-pixmap',
# Miscellaneous / Older formats
'.xbm': 'image/x-xbitmap',
'.mpo': 'image/mpo',
'.msp': 'image/msp',
'.im': 'image/x-pillow-im',
'.qoi': 'image/qoi',
}
import os
import base64
def image_to_base64_data_uri(
file_path: str,
*,
fallback_mime: str = "application/octet-stream"
) -> str:
"""
Convert a local image file to a base64-encoded data URI with the correct MIME type.
Supports 20+ image formats (PNG, JPEG, WebP, AVIF, BMP, ICO, TIFF, etc.).
Args:
file_path: Path to the image file on disk.
fallback_mime: MIME type used when the file extension is unknown.
Returns:
A valid data URI string (e.g., data:image/webp;base64,...).
Raises:
FileNotFoundError: If the file does not exist.
OSError: If reading the file fails.
"""
if not os.path.isfile(file_path):
raise FileNotFoundError(f"Image file not found: {file_path}")
extension = os.path.splitext(file_path)[1].lower()
mime_type = _IMAGE_MIME_TYPES.get(extension, fallback_mime)
if mime_type == fallback_mime:
print(f"Warning: Unknown extension '{extension}' for '{file_path}'. "
f"Using fallback MIME type: {fallback_mime}")
try:
with open(file_path, "rb") as img_file:
encoded_data = base64.b64encode(img_file.read()).decode("utf-8")
except OSError as e:
raise OSError(f"Failed to read image file '{file_path}': {e}") from e
return f"data:{mime_type};base64,{encoded_data}"
###################### sample code ################################################
# --- Main Logic for Image Processing ---
# # 1. Create a list containing all image paths
# image_paths = [
# r'./scene.jpeg',
# r'./cat.png',
# r'./network.webp',
# # Add more image paths here if needed
# ]
# # 2. Create an empty list to store the message objects (images and text)
# images_messages = []
# # 3. Loop through the image path list, convert each image to a Data URI,
# # and add it to the message list as an image_url object.
# for path in image_paths:
# data_uri = image_to_base64_data_uri(path)
# images_messages.append({"type": "image_url", "image_url": {"url": data_uri}})
# # 4. Add the final text prompt at the end of the list
# images_messages.append({"type": "text", "text": "Describes the images."})
# # 5. Use this list to build the chat_completion request
# res = llm.create_chat_completion(
# messages=[
# {"role": "system", "content": "You are a highly accurate vision-language assistant. Provide detailed, precise, and well-structured image descriptions."},
# # The user's content is the list containing both images and text
# {"role": "user", "content": images_messages}
# ]
# )
# # Print the assistant's response
# print(res["choices"][0]["message"]["content"])
import os
import tempfile
import requests
from urllib.parse import urlparse
def handle_image_input(image_input):
"""
image_input can be:
- URL (http/https)
- Data URI (data:image/png;base64,...)
"""
# Case 1: If it's a Data URI → do nothing
if image_input.startswith("data:"):
print("Data URI detected. No download needed.")
return process_image(image_input)
# Case 2: If it's a URL → download temporarily
parsed = urlparse(image_input)
if parsed.scheme in ("http", "https"):
print("URL detected. Downloading temporarily...")
response = requests.get(image_input)
response.raise_for_status()
# Create temporary file
with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmp_file:
tmp_file.write(response.content)
temp_path = tmp_file.name
try:
tmp_tmp=image_to_base64_data_uri(temp_path)
finally:
# Ensure deletion
os.remove(temp_path)
return tmp_tmp
else:
raise ValueError("Unsupported image format.")
# Example usage
# image_url = "https://raw.githubusercontent.com/rphrp1985/mycodes/main/matagimage.png"
import json
def str_to_json(str_obj):
json_obj = json.loads(str_obj)
return json_obj
MODES = {
# ===============================
# THINKING MODE - GENERAL
# ===============================
"thinking_general": {
"temperature": 1.0,
"top_p": 0.95,
"top_k": 20,
"min_p": 0.0,
"presence_penalty": 1.5,
"repeat_penalty": 1.0,
"enable_thinking": True,
},
# ===============================
# THINKING MODE - CODING
# ===============================
"thinking_coding": {
"temperature": 0.6,
"top_p": 0.95,
"top_k": 20,
"min_p": 0.0,
"presence_penalty": 0.0,
"repeat_penalty": 1.0,
"enable_thinking": True,
},
# ===============================
# INSTRUCT MODE - GENERAL
# ===============================
"instruct_general": {
"temperature": 0.7,
"top_p": 0.8,
"top_k": 20,
"min_p": 0.0,
"presence_penalty": 1.5,
"repeat_penalty": 1.0,
"enable_thinking": False,
},
# ===============================
# INSTRUCT MODE - REASONING
# ===============================
"instruct_reasoning": {
"temperature": 1.0,
"top_p": 0.95,
"top_k": 20,
"min_p": 0.0,
"presence_penalty": 1.5,
"repeat_penalty": 1.0,
"enable_thinking": False,
},
}
import os
def load_llama_model(model_path, enable_thinking):
key = (model_path, enable_thinking)
with CACHE_LOCK:
if key in MODEL_CACHE:
return MODEL_CACHE[key]
print("Loading model:", key)
llm = Llama(
model_path=f"models/Qwen3.5-9B-UD-Q8_K_XL.gguf",
flash_attn=True,
n_gpu_layers=-1,
n_batch=2048, # increase
n_ctx= 16384, # reduce if you don’t need 8k
# n_threads= os.cpu_count(), # set to your CPU cores
n_threads=os.cpu_count(), # use all cores
n_threads_batch=os.cpu_count(),
# use_mlock=True,
# verbose=True,
mul_mat_q=True,
use_mmap=True,
chat_handler=Qwen35ChatHandler(
clip_model_path=f"models/mmproj-BF16.gguf",
# force_reasoning=True,
enable_thinking=enable_thinking ,
image_min_tokens=1024, # Note: Qwen-VL models require at minimum 1024 image tokens to function correctly on bbox grounding tasks
),
)
MODEL_CACHE[key] = llm
return llm
# if isinstance(var, str):
# print("its astring ")
# return True
@spaces.GPU(duration=70)
def respond(
message,
history: list[tuple[str, str]],
model,
system_message,
max_tokens,
temperature,
top_p,
top_k,
repeat_penalty,
):
# chat_template = MessagesFormatterType.GEMMA_2
# chat_template = MessagesFormatterType.CHATML
# chat_template =
global MODES
try:
j_r= str_to_json(message)
messages= j_r['messages']
max_token= j_r['max_token']
try:
mode = j_r['mode']
except:
mode="thinking_general"
except Exception as e:
print(e)
print("not valid json")
max_token=8196
mode="instruct_general"
messages=[
{
"role":"user",
"content":str(message)
}
]
final_messages=[]
for message in messages:
content= message['content']
if isinstance(content, str):
final_messages.append(message)
continue
#### its a list so it contains images
tmp_list= []
for item in content:
if item['type']=="text":
tmp_list.append(item)
continue
#### its a definitely a image
if item['type']=="image":
try:
if item['url']:
data_uri= handle_image_input(item['url'])
tmp_list.append({"type": "image_url", "image_url": {"url": data_uri}})
except Exception as e:
print(e)
try:
if item['uri']:
data_uri= handle_image_input(item['url'])
tmp_list.append({"type": "image_url", "image_url": {"url": data_uri}})
except Exception as e:
print(e)
final_messages.append({"role": message['role'], "content": tmp_list})
# if model == "Qwen3-VL-32B-Thinking-Q8_0.gguf" :
# if llm_model_qwen == None:
config = MODES[mode]
llm_model_qwen = load_llama_model(
model_path=f"models/{model}",
enable_thinking=config["enable_thinking"],
)
x=llm_model_qwen.create_chat_completion(
messages = final_messages,
max_tokens= max_token,
temperature=config["temperature"],
top_p=config["top_p"],
top_k=config["top_k"],
min_p=config["min_p"],
# presence_penalty=config["presence_penalty"],
repeat_penalty=config["repeat_penalty"],
# max_tokens=32768, # adequate output length
# chat_template_kwargs={
# "enable_thinking": config["enable_thinking"]
# }
)
print(x)
# print(x)
x= x['choices'][0]['message']['content']
yield str(x)
# # delete_llama_model(llm_model_qwen)
# yield str(x)
# provider = LlamaCppPythonProvider(llm)
# agent = LlamaCppAgent(
# provider,
# system_prompt=f"{system_message}",
# predefined_messages_formatter_type=chat_template,
# debug_output=False
# )
# settings = provider.get_provider_default_settings()
# settings.temperature = temperature
# settings.top_k = top_k
# settings.top_p = top_p
# settings.max_tokens = max_tokens
# settings.repeat_penalty = repeat_penalty
# # settings.stream = True
# # settings.reasoning_effort ="low"
# messages = BasicChatHistory()
# for msn in history:
# user = {
# 'role': Roles.user,
# 'content': msn[0]
# }
# assistant = {
# 'role': Roles.assistant,
# 'content': msn[1]
# }
# messages.add_message(user)
# messages.add_message(assistant)
# stream = agent.get_chat_response(
# message,
# # llm_sampling_settings=settings,
# chat_history=messages,
# # returns_streaming_generator=True,
# print_output=False
# )
# outputs = ""
# for output in stream:
# outputs += output
# yield outputs
description = """<p align="center">Defaults to 2B (you can switch to 9B or 27B from additional inputs)</p>
<p><center>
<a href="https://huggingface.co/google/gemma-2-27b-it" target="_blank">[27B it Model]</a>
<a href="https://huggingface.co/google/gemma-2-9b-it" target="_blank">[9B it Model]</a>
<a href="https://huggingface.co/google/gemma-2-2b-it" target="_blank">[2B it Model]</a>
<a href="https://huggingface.co/bartowski/gemma-2-27b-it-GGUF" target="_blank">[27B it Model GGUF]</a>
<a href="https://huggingface.co/bartowski/gemma-2-9b-it-GGUF" target="_blank">[9B it Model GGUF]</a>
<a href="https://huggingface.co/google/gemma-2-2b-it-GGUF" target="_blank">[2B it Model GGUF]</a>
</center></p>
"""
import gradio as gr
demo = gr.ChatInterface(
fn=respond,
additional_inputs=[
gr.Dropdown(
[
# "gemma-2-9b-it-Q5_K_M.gguf",
# "gemma-2-27b-it-Q5_K_M.gguf",
# "2b_it_v2.gguf",
# "GLM-4.7-Flash-Q8_0.gguf",
# "Qwen3-Coder-Next-Q4_K_M.gguf",
# "gpt-oss-20b-Q4_K_M.gguf",
# "Qwen3-Next-80B-A3B-Instruct-Q4_K_M.gguf",
"Qwen3-VL-32B-Thinking-Q8_0.gguf",
# "Qwen3-VL-32B-Thinking-Q8_0.gguf",
# "Q8_0/gpt-oss-120b-Q8_0-00001-of-00002.gguf"
],
value="Qwen3-VL-32B-Thinking-Q8_0.gguf",
label="Model",
),
gr.Textbox(
value="You are a helpful assistant.",
label="System message",
),
gr.Slider(1, 4096, value=2048, step=1, label="Max tokens"),
gr.Slider(0.1, 4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p"),
gr.Slider(0, 100, value=40, step=1, label="Top-k"),
gr.Slider(0.0, 2.0, value=1.1, step=0.1, label="Repetition penalty"),
],
title="Chat with Gemma 2 using llama.cpp",
description=description,
)
# demo.launch()
if __name__ == "__main__":
demo.launch()