My-Fast-Server / app.py
M-hv1's picture
Update app.py
c9b16c8 verified
import os
import gradio as gr
import copy
from llama_cpp import Llama
from huggingface_hub import hf_hub_download
# إعداد الموديل (تم تثبيت Qwen مباشرة لتجنب الأخطاء)
llm = Llama(
model_path=hf_hub_download(
repo_id="Qwen/Qwen2.5-1.5B-Instruct-GGUF",
filename="qwen2.5-1.5b-instruct-q4_k_m.gguf",
),
n_ctx=2048,
n_gpu_layers=0, # تم جعله 0 ليعمل باستقرار على CPU
verbose=False
)
def generate_text(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
):
temp = ""
# تعديل صيغة البرومبت لتناسب Qwen (ChatML Format)
input_prompt = f"<|im_start|>system\n{system_message}<|im_end|>\n"
for interaction in history:
input_prompt += f"<|im_start|>user\n{interaction[0]}<|im_end|>\n<|im_start|>assistant\n{interaction[1]}<|im_end|>\n"
input_prompt += f"<|im_start|>user\n{message}<|im_end|>\n<|im_start|>assistant\n"
output = llm(
input_prompt,
temperature=temperature,
top_p=top_p,
top_k=40,
repeat_penalty=1.1,
max_tokens=max_tokens,
stop=[
"<|im_end|>",
"<|endoftext|>",
],
stream=True,
)
for out in output:
stream = copy.deepcopy(out)
temp += stream["choices"][0]["text"]
yield temp
demo = gr.ChatInterface(
generate_text,
title="Qwen 2.5 (1.5B) - Fast Server",
description="Running Qwen 2.5 on CPU via llama.cpp",
examples=[
['Hello, introduce yourself.'],
['Explain quantum physics simply.'],
['Write a python code to sum two numbers.']
],
cache_examples=False,
retry_btn=None,
undo_btn="Delete Previous",
clear_btn="Clear",
additional_inputs=[
gr.Textbox(value="You are a helpful AI assistant.", label="System message"),
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)",
),
],
)
if __name__ == "__main__":
demo.launch()