Update app.py
Browse files
app.py
CHANGED
|
@@ -4,8 +4,9 @@ import gradio as gr
|
|
| 4 |
from huggingface_hub import hf_hub_download
|
| 5 |
from llama_cpp import Llama
|
| 6 |
|
| 7 |
-
|
| 8 |
-
|
|
|
|
| 9 |
|
| 10 |
@lru_cache()
|
| 11 |
def load_llm():
|
|
@@ -15,18 +16,18 @@ def load_llm():
|
|
| 15 |
local_dir=".",
|
| 16 |
local_dir_use_symlinks=False,
|
| 17 |
)
|
|
|
|
| 18 |
llm = Llama(
|
| 19 |
model_path=model_path,
|
| 20 |
-
n_ctx=
|
| 21 |
n_threads=max(2, os.cpu_count() or 2),
|
| 22 |
n_gpu_layers=0,
|
| 23 |
-
n_batch=
|
| 24 |
-
verbose=
|
| 25 |
)
|
| 26 |
return llm
|
| 27 |
|
| 28 |
-
|
| 29 |
-
SYSTEM_PROMPT = "به فارسی، واضح و خیلی کوتاه جواب بده (حداکثر ۲ جمله)."
|
| 30 |
|
| 31 |
def build_prompt(message, history):
|
| 32 |
prompt = f"<s>[SYSTEM]\n{SYSTEM_PROMPT}\n[/SYSTEM]\n"
|
|
@@ -38,11 +39,10 @@ def build_prompt(message, history):
|
|
| 38 |
def respond(message, history):
|
| 39 |
llm = load_llm()
|
| 40 |
prompt = build_prompt(message, history)
|
| 41 |
-
# تغییرات سرعت: max_tokens و temperature
|
| 42 |
stream = llm.create_completion(
|
| 43 |
prompt=prompt,
|
| 44 |
-
max_tokens=
|
| 45 |
-
temperature=0.5,
|
| 46 |
top_p=0.9,
|
| 47 |
stop=["[/ASSISTANT]", "[USER]", "\n[USER]"],
|
| 48 |
stream=True,
|
|
@@ -56,8 +56,9 @@ def respond(message, history):
|
|
| 56 |
demo = gr.ChatInterface(
|
| 57 |
fn=respond,
|
| 58 |
title="چتبات خیلی ساده (CPU رایگان)",
|
| 59 |
-
description="
|
| 60 |
)
|
| 61 |
|
| 62 |
if __name__ == "__main__":
|
| 63 |
-
|
|
|
|
|
|
| 4 |
from huggingface_hub import hf_hub_download
|
| 5 |
from llama_cpp import Llama
|
| 6 |
|
| 7 |
+
# مدل خیلی سریع (۱۳۵M).
|
| 8 |
+
REPO_ID = os.getenv("GGUF_REPO_ID", "bartowski/SmolLM2-135M-Instruct-GGUF")
|
| 9 |
+
FILENAME = os.getenv("GGUF_FILENAME", "SmolLM2-135M-Instruct-Q4_K_M.gguf")
|
| 10 |
|
| 11 |
@lru_cache()
|
| 12 |
def load_llm():
|
|
|
|
| 16 |
local_dir=".",
|
| 17 |
local_dir_use_symlinks=False,
|
| 18 |
)
|
| 19 |
+
|
| 20 |
llm = Llama(
|
| 21 |
model_path=model_path,
|
| 22 |
+
n_ctx=256,
|
| 23 |
n_threads=max(2, os.cpu_count() or 2),
|
| 24 |
n_gpu_layers=0,
|
| 25 |
+
n_batch=16,
|
| 26 |
+
verbose=True,
|
| 27 |
)
|
| 28 |
return llm
|
| 29 |
|
| 30 |
+
SYSTEM_PROMPT = "به فارسی، خیلی کوتاه و روشن جواب بده (حداکثر ۲ جمله)."
|
|
|
|
| 31 |
|
| 32 |
def build_prompt(message, history):
|
| 33 |
prompt = f"<s>[SYSTEM]\n{SYSTEM_PROMPT}\n[/SYSTEM]\n"
|
|
|
|
| 39 |
def respond(message, history):
|
| 40 |
llm = load_llm()
|
| 41 |
prompt = build_prompt(message, history)
|
|
|
|
| 42 |
stream = llm.create_completion(
|
| 43 |
prompt=prompt,
|
| 44 |
+
max_tokens=60,
|
| 45 |
+
temperature=0.5,
|
| 46 |
top_p=0.9,
|
| 47 |
stop=["[/ASSISTANT]", "[USER]", "\n[USER]"],
|
| 48 |
stream=True,
|
|
|
|
| 56 |
demo = gr.ChatInterface(
|
| 57 |
fn=respond,
|
| 58 |
title="چتبات خیلی ساده (CPU رایگان)",
|
| 59 |
+
description="SmolLM2-135M (GGUF) با llama.cpp روی CPU. نسخهی مینیمال برای یادگیری.",
|
| 60 |
)
|
| 61 |
|
| 62 |
if __name__ == "__main__":
|
| 63 |
+
|
| 64 |
+
demo.launch(ssr_mode=False)
|