anaspro
commited on
Commit
·
f183692
1
Parent(s):
b5d6ec2
upadte
Browse files
app.py
CHANGED
|
@@ -17,25 +17,23 @@ def load_system_prompt():
|
|
| 17 |
|
| 18 |
DEFAULT_SYSTEM_PROMPT = load_system_prompt()
|
| 19 |
|
| 20 |
-
# 🔄 تغيير المسار للموديل الجديد
|
| 21 |
model_path = "unsloth/gemma-3-4b-it-unsloth-bnb-4bit"
|
| 22 |
|
| 23 |
# إذا كان فيه HF_TOKEN في البيئة
|
| 24 |
hf_token = os.getenv("HF_TOKEN")
|
| 25 |
|
| 26 |
-
#
|
| 27 |
pipeline_model = pipeline(
|
| 28 |
-
"
|
| 29 |
model=model_path,
|
| 30 |
-
|
| 31 |
token=hf_token,
|
| 32 |
-
trust_remote_code=True
|
| 33 |
-
|
| 34 |
)
|
| 35 |
|
| 36 |
def generate_with_pipeline(messages, max_new_tokens=256, temperature=0.7, top_p=0.9, top_k=50, repetition_penalty=1.0):
|
| 37 |
"""Generate response using the pipeline with messages format"""
|
| 38 |
-
# Apply chat template
|
| 39 |
prompt = pipeline_model.tokenizer.apply_chat_template(
|
| 40 |
messages,
|
| 41 |
tokenize=False,
|
|
@@ -50,8 +48,7 @@ def generate_with_pipeline(messages, max_new_tokens=256, temperature=0.7, top_p=
|
|
| 50 |
top_k=top_k,
|
| 51 |
repetition_penalty=repetition_penalty,
|
| 52 |
do_sample=True,
|
| 53 |
-
return_full_text=False
|
| 54 |
-
eos_token_id=pipeline_model.tokenizer.eos_token_id,
|
| 55 |
)
|
| 56 |
return outputs[0]["generated_text"]
|
| 57 |
|
|
@@ -67,31 +64,15 @@ def generate_response(message, history, max_new_tokens, temperature, top_p, top_
|
|
| 67 |
max_new_tokens, temperature, top_p, top_k, repetition_penalty: Generation parameters
|
| 68 |
"""
|
| 69 |
try:
|
| 70 |
-
#
|
| 71 |
-
messages = []
|
| 72 |
-
|
| 73 |
-
# ✅ System prompt as first user message + model acknowledgment
|
| 74 |
-
messages.append({
|
| 75 |
-
"role": "user",
|
| 76 |
-
"content": DEFAULT_SYSTEM_PROMPT
|
| 77 |
-
})
|
| 78 |
-
messages.append({
|
| 79 |
-
"role": "model", # ✅ في Gemma 3 استخدم "model" مو "assistant"
|
| 80 |
-
"content": "Understood. I will follow these instructions."
|
| 81 |
-
})
|
| 82 |
|
| 83 |
# Add conversation history
|
|
|
|
| 84 |
if history:
|
| 85 |
for msg in history:
|
| 86 |
if isinstance(msg, dict) and 'role' in msg and 'content' in msg:
|
| 87 |
-
|
| 88 |
-
role = msg['role']
|
| 89 |
-
if role == 'assistant':
|
| 90 |
-
role = 'model'
|
| 91 |
-
messages.append({
|
| 92 |
-
"role": role,
|
| 93 |
-
"content": msg['content']
|
| 94 |
-
})
|
| 95 |
|
| 96 |
# Add current user message
|
| 97 |
if isinstance(message, dict):
|
|
@@ -99,10 +80,7 @@ def generate_response(message, history, max_new_tokens, temperature, top_p, top_
|
|
| 99 |
else:
|
| 100 |
current_message = str(message)
|
| 101 |
|
| 102 |
-
messages.append({
|
| 103 |
-
"role": "user",
|
| 104 |
-
"content": current_message
|
| 105 |
-
})
|
| 106 |
|
| 107 |
# Debug: print messages structure
|
| 108 |
print(f"Messages sent to model: {len(messages)} messages")
|
|
@@ -134,9 +112,9 @@ demo = gr.ChatInterface(
|
|
| 134 |
fn=generate_response,
|
| 135 |
additional_inputs=[
|
| 136 |
gr.Slider(label="الحد الأقصى للكلمات الجديدة", minimum=64, maximum=4096, step=1, value=2048),
|
| 137 |
-
gr.Slider(label="درجة الحرارة", minimum=0.1, maximum=2.0, step=0.1, value=
|
| 138 |
-
gr.Slider(label="Top-p", minimum=0.05, maximum=1.0, step=0.05, value=0.
|
| 139 |
-
gr.Slider(label="Top-k", minimum=1, maximum=100, step=1, value=
|
| 140 |
gr.Slider(label="عقوبة التكرار", minimum=1.0, maximum=2.0, step=0.05, value=1.0)
|
| 141 |
],
|
| 142 |
examples=[
|
|
@@ -148,7 +126,7 @@ demo = gr.ChatInterface(
|
|
| 148 |
],
|
| 149 |
cache_examples=False,
|
| 150 |
type="messages",
|
| 151 |
-
title="دعم عملاء TechSolutions - مساعد أليكس (
|
| 152 |
description="""🤖 مساعد خدمة عملاء ذكي لـ TechSolutions
|
| 153 |
|
| 154 |
✨ المميزات:
|
|
@@ -157,8 +135,7 @@ demo = gr.ChatInterface(
|
|
| 157 |
- 🔧 دعم فني واستكشاف الأخطاء
|
| 158 |
- 📋 معلومات الخدمات والإرشاد
|
| 159 |
- 🧠 **يتذكر المحادثة السابقة** - يمكنك الرجوع للمواضيع السابقة
|
| 160 |
-
- 🎯 مدعوم بـ
|
| 161 |
-
- ⚡ تحسينات Unsloth للأداء والضغط
|
| 162 |
|
| 163 |
احجي مع أليكس لحل مشاكلك التقنية، استفسر عن الخدمات، أو احصل على معلومات المنتجات.""",
|
| 164 |
fill_height=True,
|
|
|
|
| 17 |
|
| 18 |
DEFAULT_SYSTEM_PROMPT = load_system_prompt()
|
| 19 |
|
|
|
|
| 20 |
model_path = "unsloth/gemma-3-4b-it-unsloth-bnb-4bit"
|
| 21 |
|
| 22 |
# إذا كان فيه HF_TOKEN في البيئة
|
| 23 |
hf_token = os.getenv("HF_TOKEN")
|
| 24 |
|
| 25 |
+
# Initialize pipeline for chat
|
| 26 |
pipeline_model = pipeline(
|
| 27 |
+
"text-generation",
|
| 28 |
model=model_path,
|
| 29 |
+
device_map="auto",
|
| 30 |
token=hf_token,
|
| 31 |
+
trust_remote_code=True
|
|
|
|
| 32 |
)
|
| 33 |
|
| 34 |
def generate_with_pipeline(messages, max_new_tokens=256, temperature=0.7, top_p=0.9, top_k=50, repetition_penalty=1.0):
|
| 35 |
"""Generate response using the pipeline with messages format"""
|
| 36 |
+
# Apply chat template
|
| 37 |
prompt = pipeline_model.tokenizer.apply_chat_template(
|
| 38 |
messages,
|
| 39 |
tokenize=False,
|
|
|
|
| 48 |
top_k=top_k,
|
| 49 |
repetition_penalty=repetition_penalty,
|
| 50 |
do_sample=True,
|
| 51 |
+
return_full_text=False
|
|
|
|
| 52 |
)
|
| 53 |
return outputs[0]["generated_text"]
|
| 54 |
|
|
|
|
| 64 |
max_new_tokens, temperature, top_p, top_k, repetition_penalty: Generation parameters
|
| 65 |
"""
|
| 66 |
try:
|
| 67 |
+
# Build messages list starting with system prompt
|
| 68 |
+
messages = [{"role": "system", "content": DEFAULT_SYSTEM_PROMPT}]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
|
| 70 |
# Add conversation history
|
| 71 |
+
# When type="messages", history is a list of message dicts with 'role' and 'content'
|
| 72 |
if history:
|
| 73 |
for msg in history:
|
| 74 |
if isinstance(msg, dict) and 'role' in msg and 'content' in msg:
|
| 75 |
+
messages.append({"role": msg['role'], "content": msg['content']})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
|
| 77 |
# Add current user message
|
| 78 |
if isinstance(message, dict):
|
|
|
|
| 80 |
else:
|
| 81 |
current_message = str(message)
|
| 82 |
|
| 83 |
+
messages.append({"role": "user", "content": current_message})
|
|
|
|
|
|
|
|
|
|
| 84 |
|
| 85 |
# Debug: print messages structure
|
| 86 |
print(f"Messages sent to model: {len(messages)} messages")
|
|
|
|
| 112 |
fn=generate_response,
|
| 113 |
additional_inputs=[
|
| 114 |
gr.Slider(label="الحد الأقصى للكلمات الجديدة", minimum=64, maximum=4096, step=1, value=2048),
|
| 115 |
+
gr.Slider(label="درجة الحرارة", minimum=0.1, maximum=2.0, step=0.1, value=0.7),
|
| 116 |
+
gr.Slider(label="Top-p", minimum=0.05, maximum=1.0, step=0.05, value=0.9),
|
| 117 |
+
gr.Slider(label="Top-k", minimum=1, maximum=100, step=1, value=50),
|
| 118 |
gr.Slider(label="عقوبة التكرار", minimum=1.0, maximum=2.0, step=0.05, value=1.0)
|
| 119 |
],
|
| 120 |
examples=[
|
|
|
|
| 126 |
],
|
| 127 |
cache_examples=False,
|
| 128 |
type="messages",
|
| 129 |
+
title="دعم عملاء TechSolutions - مساعد أليكس (العراقي)",
|
| 130 |
description="""🤖 مساعد خدمة عملاء ذكي لـ TechSolutions
|
| 131 |
|
| 132 |
✨ المميزات:
|
|
|
|
| 135 |
- 🔧 دعم فني واستكشاف الأخطاء
|
| 136 |
- 📋 معلومات الخدمات والإرشاد
|
| 137 |
- 🧠 **يتذكر المحادثة السابقة** - يمكنك الرجوع للمواضيع السابقة
|
| 138 |
+
- 🎯 مدعوم بـ موديل Unsloth Meta-Llama-3.1-8B-Instruct-bnb-4bit
|
|
|
|
| 139 |
|
| 140 |
احجي مع أليكس لحل مشاكلك التقنية، استفسر عن الخدمات، أو احصل على معلومات المنتجات.""",
|
| 141 |
fill_height=True,
|