chatbots1 / app.py
rishi2025's picture
Update app.py
9ef5d59 verified
import spaces
import os
import json
import subprocess
from functools import lru_cache
from cachetools import LRUCache
import threading
MODEL_CACHE = LRUCache(maxsize=3) # max 10 models
CACHE_LOCK = threading.Lock()
### monkey patch
from llama_cpp.llama_chat_format import Qwen35ChatHandler
import llama_cpp._internals as internals
# 2️⃣ Monkey patch BEFORE creating Llama()
_original_close = internals.LlamaModel.close
def safe_close(self):
try:
if hasattr(self, "sampler") and self.sampler is not None:
return _original_close(self)
except Exception:
pass
internals.LlamaModel.close = safe_close
def safe_del(self):
try:
self.close()
except Exception:
pass
internals.LlamaModel.__del__ = safe_del
##### final verdict
# qwen 3 next 80b infrence
#qwen 3 VL
#mini max 2.5
#gpt oss 120B #seems unstable
import llama_cpp._internals as internals
from llama_cpp.llama_chat_format import Qwen3VLChatHandler
from llama_cpp import Llama
from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
from llama_cpp_agent.providers import LlamaCppPythonProvider
from llama_cpp_agent.chat_history import BasicChatHistory
from llama_cpp_agent.chat_history.messages import Roles
import gradio as gr
from huggingface_hub import hf_hub_download
import llama_cpp
print(llama_cpp.__file__)
print(llama_cpp.__version__)
huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
# hf_hub_download(
# repo_id="bartowski/gemma-2-9b-it-GGUF",
# filename="gemma-2-9b-it-Q5_K_M.gguf",
# local_dir="./models"
# )
# hf_hub_download(
# repo_id="bartowski/gemma-2-27b-it-GGUF",
# filename="gemma-2-27b-it-Q5_K_M.gguf",
# local_dir="./models"
# )
# hf_hub_download(
# repo_id="google/gemma-2-2b-it-GGUF",
# filename="2b_it_v2.gguf",
# local_dir="./models",
# token=huggingface_token
# )
# hf_hub_donload(
# repo_id="unsloth/GLM-4.7-Flash-GGUF",
# filename="GLM-4.7-Flash-UD-Q8_K_XL.gguf",
# local_dir="./models",
# token=huggingface_token
# )
# hf_hub_download(
# repo_id="unsloth/gpt-oss-20b-GGUF",
# filename="gpt-oss-20b-Q4_K_M.gguf",
# local_dir="./models",
# token=huggingface_token
# )
# hf_hub_download(
# repo_id="unsloth/gpt-oss-20b-GGUF",
# filename="gpt-oss-20b-Q4_K_M.gguf",
# local_dir="./models",
# token=huggingface_token
# )
# hf_hub_download(
# repo_id="unsloth/Qwen3-Next-80B-A3B-Instruct-GGUF",
# filename="Qwen3-Next-80B-A3B-Instruct-Q4_K_M.gguf",
# local_dir="./models",
# token=huggingface_token
# )
# hf_hub_download(
# repo_id="unsloth/Qwen3-VL-32B-Thinking-GGUF",
# filename="Qwen3-VL-32B-Thinking-Q8_0.gguf",
# local_dir="./models"
# )
# hf_hub_download(
# repo_id="unsloth/Qwen3-Coder-Next-GGUF",
# filename="Qwen3-Coder-Next-Q4_K_M.gguf",
# local_dir="./models"
# )
from huggingface_hub import snapshot_download
snapshot_download(
repo_id="unsloth/Qwen3.5-122B-A10B-GGUF",
repo_type="model",
local_dir="./models/",
allow_patterns=["UD-Q3_K_XL/*"], # 👈 folder inside repo
token=huggingface_token # only if gated/private
)
hf_hub_download(
repo_id="unsloth/Qwen3.5-122B-A10B-GGUF",
filename="mmproj-BF16.gguf",
local_dir="./models"
)
_IMAGE_MIME_TYPES = {
# Most common formats
'.png': 'image/png',
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.gif': 'image/gif',
'.webp': 'image/webp',
# Next-generation formats
'.avif': 'image/avif',
'.jp2': 'image/jp2',
'.j2k': 'image/jp2',
'.jpx': 'image/jp2',
# Legacy / Windows formats
'.bmp': 'image/bmp',
'.ico': 'image/x-icon',
'.pcx': 'image/x-pcx',
'.tga': 'image/x-tga',
'.icns': 'image/icns',
# Professional / Scientific imaging
'.tif': 'image/tiff',
'.tiff': 'image/tiff',
'.eps': 'application/postscript',
'.dds': 'image/vnd-ms.dds',
'.dib': 'image/dib',
'.sgi': 'image/sgi',
# Portable Map formats (PPM/PGM/PBM)
'.pbm': 'image/x-portable-bitmap',
'.pgm': 'image/x-portable-graymap',
'.ppm': 'image/x-portable-pixmap',
# Miscellaneous / Older formats
'.xbm': 'image/x-xbitmap',
'.mpo': 'image/mpo',
'.msp': 'image/msp',
'.im': 'image/x-pillow-im',
'.qoi': 'image/qoi',
}
import os
import base64
def image_to_base64_data_uri(
file_path: str,
*,
fallback_mime: str = "application/octet-stream"
) -> str:
"""
Convert a local image file to a base64-encoded data URI with the correct MIME type.
Supports 20+ image formats (PNG, JPEG, WebP, AVIF, BMP, ICO, TIFF, etc.).
Args:
file_path: Path to the image file on disk.
fallback_mime: MIME type used when the file extension is unknown.
Returns:
A valid data URI string (e.g., data:image/webp;base64,...).
Raises:
FileNotFoundError: If the file does not exist.
OSError: If reading the file fails.
"""
if not os.path.isfile(file_path):
raise FileNotFoundError(f"Image file not found: {file_path}")
extension = os.path.splitext(file_path)[1].lower()
mime_type = _IMAGE_MIME_TYPES.get(extension, fallback_mime)
if mime_type == fallback_mime:
print(f"Warning: Unknown extension '{extension}' for '{file_path}'. "
f"Using fallback MIME type: {fallback_mime}")
try:
with open(file_path, "rb") as img_file:
encoded_data = base64.b64encode(img_file.read()).decode("utf-8")
except OSError as e:
raise OSError(f"Failed to read image file '{file_path}': {e}") from e
return f"data:{mime_type};base64,{encoded_data}"
import os
import tempfile
import requests
from urllib.parse import urlparse
def handle_image_input(image_input):
"""
image_input can be:
- URL (http/https)
- Data URI (data:image/png;base64,...)
"""
# Case 1: If it's a Data URI → do nothing
if image_input.startswith("data:"):
print("Data URI detected. No download needed.")
return process_image(image_input)
# Case 2: If it's a URL → download temporarily
parsed = urlparse(image_input)
if parsed.scheme in ("http", "https"):
print("URL detected. Downloading temporarily...")
response = requests.get(image_input)
response.raise_for_status()
# Create temporary file
with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmp_file:
tmp_file.write(response.content)
temp_path = tmp_file.name
try:
tmp_tmp=image_to_base64_data_uri(temp_path)
finally:
# Ensure deletion
os.remove(temp_path)
return tmp_tmp
else:
raise ValueError("Unsupported image format.")
import os
def print_tree(start_path="models"):
for root, dirs, files in os.walk(start_path):
level = root.replace(start_path, "").count(os.sep)
indent = "│ " * level
print(f"{indent}├── {os.path.basename(root)}/")
subindent = "│ " * (level + 1)
for f in files:
print(f"{subindent}├── {f}")
print_tree("models")
llm = None
llm_model = None
import json
def str_to_json(str_obj):
json_obj = json.loads(str_obj)
return json_obj
MODES = {
# ===============================
# THINKING MODE - GENERAL
# ===============================
"thinking_general": {
"temperature": 1.0,
"top_p": 0.95,
"top_k": 20,
"min_p": 0.0,
"presence_penalty": 1.5,
"repeat_penalty": 1.0,
"enable_thinking": True,
},
# ===============================
# THINKING MODE - CODING
# ===============================
"thinking_coding": {
"temperature": 0.6,
"top_p": 0.95,
"top_k": 20,
"min_p": 0.0,
"presence_penalty": 0.0,
"repeat_penalty": 1.0,
"enable_thinking": True,
},
# ===============================
# INSTRUCT MODE - GENERAL
# ===============================
"instruct_general": {
"temperature": 0.7,
"top_p": 0.8,
"top_k": 20,
"min_p": 0.0,
"presence_penalty": 1.5,
"repeat_penalty": 1.0,
"enable_thinking": False,
},
# ===============================
# INSTRUCT MODE - REASONING
# ===============================
"instruct_reasoning": {
"temperature": 1.0,
"top_p": 0.95,
"top_k": 20,
"min_p": 0.0,
"presence_penalty": 1.5,
"repeat_penalty": 1.0,
"enable_thinking": False,
},
}
import os
def load_llama_model(model_path, enable_thinking):
key = (model_path, enable_thinking)
with CACHE_LOCK:
if key in MODEL_CACHE:
return MODEL_CACHE[key]
print("Loading model:", key)
llm = Llama(
model_path=model_path,
flash_attn=True,
n_gpu_layers=-1,
n_batch=2048,
n_ctx=8196,
n_threads=os.cpu_count(),
n_threads_batch=os.cpu_count(),
use_mmap=True,
use_mlock=False,
mul_mat_q=True,
chat_handler=Qwen35ChatHandler(
clip_model_path="models/mmproj-BF16.gguf",
enable_thinking=enable_thinking,
image_min_tokens=1024,
),
)
MODEL_CACHE[key] = llm
return llm
# @lru_cache(maxsize=1) # adjust if you want to keep multiple models
# def load_llama_model(
# model_path: str,
# enable_thinking: bool,
# ):
# print("🔹 Loading model ONCE:", model_path, "thinking=", enable_thinking)
# return Llama(
# model_path=model_path,
# flash_attn=True,
# n_gpu_layers=-1,
# n_batch=2048,
# n_ctx=8196,
# n_threads=os.cpu_count(),
# n_threads_batch=os.cpu_count(),
# use_mlock=False,
# use_mmap=True,
# mul_mat_q=True,
# chat_handler=Qwen35ChatHandler(
# clip_model_path="models/mmproj-BF16.gguf",
# enable_thinking=enable_thinking,
# image_min_tokens=1024,
# ),
# )
@spaces.GPU(duration=70)
def respond(
message,
history: list[tuple[str, str]],
model,
system_message,
max_tokens,
temperature,
top_p,
top_k,
repeat_penalty,
):
# chat_template = MessagesFormatterType.GEMMA_2
chat_template = MessagesFormatterType.CHATML
# chat_template =
# global llm
# global llm_model
global MODES
try:
j_r= str_to_json(message)
messages= j_r['messages']
max_token= j_r['max_token']
try:
mode = j_r['mode']
except:
mode="thinking_general"
except Exception as e:
print(e)
print("not valid json")
max_token=8196
mode="instruct_general"
messages=[
{
"role":"user",
"content":str(message)
}
]
final_messages=[]
for message in messages:
content= message['content']
if isinstance(content, str):
final_messages.append(message)
continue
#### its a list so it contains images
tmp_list= []
for item in content:
if item['type']=="text":
tmp_list.append(item)
continue
#### its a definitely a image
if item['type']=="image":
try:
if item['url']:
data_uri= handle_image_input(item['url'])
tmp_list.append({"type": "image_url", "image_url": {"url": data_uri}})
except Exception as e:
print(e)
try:
if item['uri']:
data_uri= handle_image_input(item['url'])
tmp_list.append({"type": "image_url", "image_url": {"url": data_uri}})
except Exception as e:
print(e)
final_messages.append({"role": message['role'], "content": tmp_list})
# if llm is None or llm_model != model:
config = MODES[mode]
# llm = Llama(
# model_path=f"models/{model}",
# flash_attn=True,
# n_gpu_layers=-1,
# n_batch=2048, # increase
# n_ctx= 8196, # reduce if you don’t need 8k
# # n_threads= os.cpu_count(), # set to your CPU cores
# n_threads=os.cpu_count(), # use all cores
# n_threads_batch=os.cpu_count(),
# use_mlock=False,
# # verbose=True,
# mul_mat_q=True,
# use_mmap=True,
# chat_handler=Qwen35ChatHandler(
# clip_model_path=f"models/mmproj-BF16.gguf",
# enable_thinking= config["enable_thinking"],
# image_min_tokens=1024, # Note: Qwen-VL models require at minimum 1024 image tokens to function correctly on bbox grounding tasks
# ),
# )
# llm_model = model
llm = load_llama_model(
model_path=f"models/{model}",
enable_thinking=config["enable_thinking"],
)
x=llm.create_chat_completion(
messages = final_messages,
max_tokens= max_token,
temperature=config["temperature"],
top_p=config["top_p"],
top_k=config["top_k"],
min_p=config["min_p"],
# presence_penalty=config["presence_penalty"],
repeat_penalty=config["repeat_penalty"]
)
print(x)
print(x)
x= x['choices'][0]['message']['content']
yield str(x)
# provider = LlamaCppPythonProvider(llm)
# agent = LlamaCppAgent(
# provider,
# system_prompt=f"{system_message}",
# predefined_messages_formatter_type=chat_template,
# debug_output=False
# )
# settings = provider.get_provider_default_settings()
# settings.temperature = temperature
# settings.top_k = top_k
# settings.top_p = top_p
# settings.max_tokens = max_tokens
# settings.repeat_penalty = repeat_penalty
# # settings.stream = True
# # settings.reasoning_effort ="low"
# messages = BasicChatHistory()
# for msn in history:
# user = {
# 'role': Roles.user,
# 'content': msn[0]
# }
# assistant = {
# 'role': Roles.assistant,
# 'content': msn[1]
# }
# messages.add_message(user)
# messages.add_message(assistant)
# stream = agent.get_chat_response(
# message,
# # llm_sampling_settings=settings,
# chat_history=messages,
# # returns_streaming_generator=True,
# print_output=False
# )
# outputs = ""
# for output in stream:
# outputs += output
# yield outputs
description = """<p align="center">Defaults to 2B (you can switch to 9B or 27B from additional inputs)</p>
<p><center>
<a href="https://huggingface.co/google/gemma-2-27b-it" target="_blank">[27B it Model]</a>
<a href="https://huggingface.co/google/gemma-2-9b-it" target="_blank">[9B it Model]</a>
<a href="https://huggingface.co/google/gemma-2-2b-it" target="_blank">[2B it Model]</a>
<a href="https://huggingface.co/bartowski/gemma-2-27b-it-GGUF" target="_blank">[27B it Model GGUF]</a>
<a href="https://huggingface.co/bartowski/gemma-2-9b-it-GGUF" target="_blank">[9B it Model GGUF]</a>
<a href="https://huggingface.co/google/gemma-2-2b-it-GGUF" target="_blank">[2B it Model GGUF]</a>
</center></p>
"""
import gradio as gr
demo = gr.ChatInterface(
fn=respond,
additional_inputs=[
gr.Dropdown(
[
# "gemma-2-9b-it-Q5_K_M.gguf",
# "gemma-2-27b-it-Q5_K_M.gguf",
# # "2b_it_v2.gguf",
# "GLM-4.7-Flash-UD-Q8_K_XL.gguf",
# "Qwen3-Coder-Next-Q4_K_M.gguf",
# "gpt-oss-20b-Q4_K_M.gguf",
# "Qwen3-Next-80B-A3B-Instruct-Q4_K_M.gguf",
"UD-Q3_K_XL/Qwen3.5-122B-A10B-UD-Q3_K_XL-00001-of-00003.gguf",
# "Qwen3-VL-32B-Thinking-Q8_0.gguf",
# "Q8_0/gpt-oss-120b-Q8_0-00001-of-00002.gguf"
],
value= "UD-Q3_K_XL/Qwen3.5-122B-A10B-UD-Q3_K_XL-00001-of-00003.gguf",
label="Model",
),
gr.Textbox(
value="You are a helpful assistant.",
label="System message",
),
gr.Slider(1, 4096, value=2048, step=1, label="Max tokens"),
gr.Slider(0.1, 4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p"),
gr.Slider(0, 100, value=40, step=1, label="Top-k"),
gr.Slider(0.0, 2.0, value=1.1, step=0.1, label="Repetition penalty"),
],
title="Chat with Gemma 2 using llama.cpp",
description=description,
)
# demo.launch()
if __name__ == "__main__":
demo.launch()