Update app.py
Browse files
app.py
CHANGED
|
@@ -10,35 +10,34 @@ from threading import Thread
|
|
| 10 |
MODEL_ID = "unsloth/gemma-3-4b-it-unsloth-bnb-4bit"
|
| 11 |
|
| 12 |
# ======================================================
|
| 13 |
-
# البرومبت الافتراضي (
|
| 14 |
# ======================================================
|
| 15 |
SYSTEM_PROMPT = (
|
| 16 |
-
"أنت
|
| 17 |
-
"
|
| 18 |
-
"
|
| 19 |
-
"سؤال المستخدم → جواب مباشر من المساعد بدون مقدمات أو تحيات."
|
| 20 |
)
|
| 21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
# ======================================================
|
| 23 |
# دالة المحادثة
|
| 24 |
# ======================================================
|
| 25 |
@spaces.GPU(duration=60)
|
| 26 |
def chat(message, history):
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
# تحميل الموديل عند أول استدعاء داخل GPU worker
|
| 30 |
-
if "model" not in globals():
|
| 31 |
-
print("🔄 Loading SILMA Kashif 2B Instruct model inside GPU worker...")
|
| 32 |
-
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
|
| 33 |
-
model = AutoModelForCausalLM.from_pretrained(
|
| 34 |
-
MODEL_ID,
|
| 35 |
-
dtype=torch.bfloat16,
|
| 36 |
-
device_map="auto",
|
| 37 |
-
)
|
| 38 |
-
model.eval()
|
| 39 |
-
print("✅ Model ready!")
|
| 40 |
-
|
| 41 |
-
# ✅ تنسيق Gradio الحديث (type="messages")
|
| 42 |
messages = []
|
| 43 |
for msg in history:
|
| 44 |
if msg["role"] == "user":
|
|
@@ -46,37 +45,37 @@ def chat(message, history):
|
|
| 46 |
elif msg["role"] == "assistant":
|
| 47 |
messages.append({"role": "assistant", "content": msg["content"]})
|
| 48 |
|
| 49 |
-
#
|
| 50 |
-
|
| 51 |
-
messages.append({"role": "user", "content": merged_prompt})
|
| 52 |
|
| 53 |
-
#
|
| 54 |
input_ids = tokenizer.apply_chat_template(
|
| 55 |
messages,
|
| 56 |
return_tensors="pt",
|
| 57 |
add_generation_prompt=True
|
| 58 |
).to(model.device)
|
| 59 |
|
| 60 |
-
# إعداد
|
| 61 |
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
| 62 |
|
| 63 |
generation_kwargs = dict(
|
| 64 |
input_ids=input_ids,
|
| 65 |
streamer=streamer,
|
| 66 |
-
max_new_tokens=
|
| 67 |
-
temperature=0.
|
| 68 |
top_p=0.9,
|
| 69 |
do_sample=True,
|
| 70 |
-
repetition_penalty=1.
|
| 71 |
)
|
| 72 |
|
|
|
|
| 73 |
thread = Thread(target=model.generate, kwargs=generation_kwargs)
|
| 74 |
thread.start()
|
| 75 |
|
| 76 |
partial_text = ""
|
| 77 |
for new_text in streamer:
|
| 78 |
partial_text += new_text
|
| 79 |
-
yield partial_text
|
| 80 |
|
| 81 |
thread.join()
|
| 82 |
|
|
@@ -87,20 +86,20 @@ def chat(message, history):
|
|
| 87 |
demo = gr.ChatInterface(
|
| 88 |
fn=chat,
|
| 89 |
type="messages",
|
| 90 |
-
title="🇮🇶
|
| 91 |
description="""
|
| 92 |
-
**نموذج
|
| 93 |
-
مدرّب على
|
| 94 |
|
| 95 |
🧠 جرّب:
|
| 96 |
-
- "
|
| 97 |
-
- "اشرحلي شنو يعني
|
| 98 |
-
- "
|
| 99 |
""",
|
| 100 |
examples=[
|
| 101 |
-
["
|
| 102 |
-
["اشرحلي شنو يعني
|
| 103 |
-
["
|
| 104 |
],
|
| 105 |
theme=gr.themes.Soft(),
|
| 106 |
)
|
|
|
|
| 10 |
MODEL_ID = "unsloth/gemma-3-4b-it-unsloth-bnb-4bit"
|
| 11 |
|
| 12 |
# ======================================================
|
| 13 |
+
# البرومبت الافتراضي (متوافق مع أسلوب التدريب)
|
| 14 |
# ======================================================
|
| 15 |
SYSTEM_PROMPT = (
|
| 16 |
+
"أنت مساعد ذكي تفهم اللهجة العراقية والعربية الفصحى. "
|
| 17 |
+
"جاوب على الأسئلة بإيجاز ووضوح، بنفس لغة المستخدم. "
|
| 18 |
+
"لا تستخدم مقدمات مثل (مرحباً أو بالتأكيد)، فقط الجواب المباشر."
|
|
|
|
| 19 |
)
|
| 20 |
|
| 21 |
+
# ======================================================
|
| 22 |
+
# تحميل الموديل (مرة واحدة فقط)
|
| 23 |
+
# ======================================================
|
| 24 |
+
print("🔄 Loading Gemma 3 4B model...")
|
| 25 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
|
| 26 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 27 |
+
MODEL_ID,
|
| 28 |
+
torch_dtype=torch.bfloat16,
|
| 29 |
+
device_map="auto",
|
| 30 |
+
)
|
| 31 |
+
model.eval()
|
| 32 |
+
print("✅ Model ready!")
|
| 33 |
+
|
| 34 |
+
|
| 35 |
# ======================================================
|
| 36 |
# دالة المحادثة
|
| 37 |
# ======================================================
|
| 38 |
@spaces.GPU(duration=60)
|
| 39 |
def chat(message, history):
|
| 40 |
+
# تحويل تاريخ المحادثة لصيغة مناسبة
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
messages = []
|
| 42 |
for msg in history:
|
| 43 |
if msg["role"] == "user":
|
|
|
|
| 45 |
elif msg["role"] == "assistant":
|
| 46 |
messages.append({"role": "assistant", "content": msg["content"]})
|
| 47 |
|
| 48 |
+
# نضيف السؤال الحالي مع system prompt
|
| 49 |
+
messages.append({"role": "user", "content": f"{SYSTEM_PROMPT}\n\nالسؤال: {message}"})
|
|
|
|
| 50 |
|
| 51 |
+
# تحويل الرسائل إلى input ids باستخدام chat template الخاص بـ Gemma
|
| 52 |
input_ids = tokenizer.apply_chat_template(
|
| 53 |
messages,
|
| 54 |
return_tensors="pt",
|
| 55 |
add_generation_prompt=True
|
| 56 |
).to(model.device)
|
| 57 |
|
| 58 |
+
# إعداد Streamer للبث الحي
|
| 59 |
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
| 60 |
|
| 61 |
generation_kwargs = dict(
|
| 62 |
input_ids=input_ids,
|
| 63 |
streamer=streamer,
|
| 64 |
+
max_new_tokens=256,
|
| 65 |
+
temperature=0.3,
|
| 66 |
top_p=0.9,
|
| 67 |
do_sample=True,
|
| 68 |
+
repetition_penalty=1.15,
|
| 69 |
)
|
| 70 |
|
| 71 |
+
# توليد النص في thread منفصل للبث الحي
|
| 72 |
thread = Thread(target=model.generate, kwargs=generation_kwargs)
|
| 73 |
thread.start()
|
| 74 |
|
| 75 |
partial_text = ""
|
| 76 |
for new_text in streamer:
|
| 77 |
partial_text += new_text
|
| 78 |
+
yield partial_text.strip()
|
| 79 |
|
| 80 |
thread.join()
|
| 81 |
|
|
|
|
| 86 |
demo = gr.ChatInterface(
|
| 87 |
fn=chat,
|
| 88 |
type="messages",
|
| 89 |
+
title="🇮🇶 Gemma 3 4B – Iraqi/Arabic Chat Demo",
|
| 90 |
description="""
|
| 91 |
+
**نموذج Gemma 3 4B Instruct (Unsloth Optimized)**
|
| 92 |
+
مدرّب على العربية والفصحى ومهيّأ للحوار باللهجة العراقية.
|
| 93 |
|
| 94 |
🧠 جرّب:
|
| 95 |
+
- "شنو معنى إنكار معقول؟"
|
| 96 |
+
- "اشرحلي شنو يعني تصعيد إداري"
|
| 97 |
+
- "وضحلي الفرق بين الدليل والعبرة"
|
| 98 |
""",
|
| 99 |
examples=[
|
| 100 |
+
["شنو معنى إنكار معقول؟"],
|
| 101 |
+
["اشرحلي شنو يعني تصعيد إداري"],
|
| 102 |
+
["وضحلي الفرق بين الدليل والعبرة"],
|
| 103 |
],
|
| 104 |
theme=gr.themes.Soft(),
|
| 105 |
)
|