anaspro
commited on
Commit
·
dc0dce5
1
Parent(s):
9e71c55
updatE
Browse files
app.py
CHANGED
|
@@ -2,8 +2,7 @@
|
|
| 2 |
|
| 3 |
import os
|
| 4 |
import torch
|
| 5 |
-
import
|
| 6 |
-
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
| 7 |
import gradio as gr
|
| 8 |
import spaces
|
| 9 |
|
|
@@ -17,159 +16,127 @@ def load_system_prompt():
|
|
| 17 |
|
| 18 |
DEFAULT_SYSTEM_PROMPT = load_system_prompt()
|
| 19 |
|
| 20 |
-
model_path = "
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
|
| 22 |
# إذا كان فيه HF_TOKEN في البيئة
|
| 23 |
hf_token = os.getenv("HF_TOKEN")
|
| 24 |
|
| 25 |
-
|
| 26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
|
| 35 |
-
# For ZeroGPU, load model without specifying device_map
|
| 36 |
-
# The @spaces.GPU() decorator will handle GPU allocation
|
| 37 |
-
model = AutoModelForCausalLM.from_pretrained(
|
| 38 |
-
model_path,
|
| 39 |
-
token=hf_token,
|
| 40 |
-
trust_remote_code=True,
|
| 41 |
-
torch_dtype=torch.float16, # Use float16 for ZeroGPU
|
| 42 |
-
low_cpu_mem_usage=True
|
| 43 |
-
)
|
| 44 |
-
|
| 45 |
-
print("Model loaded successfully!")
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
def generate_with_pipeline(messages, max_new_tokens=256, temperature=0.7, top_p=0.9, top_k=50, repetition_penalty=1.0):
|
| 49 |
-
"""Generate response using the model with messages format"""
|
| 50 |
-
# Gemma expects messages in format: [{"role": "user", "content": "..."}, {"role": "model", "content": "..."}]
|
| 51 |
-
# Convert 'assistant' to 'model' for Gemma
|
| 52 |
-
gemma_messages = []
|
| 53 |
-
|
| 54 |
-
for msg in messages:
|
| 55 |
-
role = msg['role']
|
| 56 |
-
# Gemma uses 'model' instead of 'assistant'
|
| 57 |
-
if role == 'assistant':
|
| 58 |
-
role = 'model'
|
| 59 |
-
# Gemma doesn't use system role in the same way - prepend to first user message
|
| 60 |
-
if role == 'system':
|
| 61 |
-
continue # We'll handle system prompt differently
|
| 62 |
-
gemma_messages.append({"role": role, "content": msg['content']})
|
| 63 |
-
|
| 64 |
-
# If there's a system prompt, prepend it to the first user message
|
| 65 |
-
if messages and messages[0]['role'] == 'system' and gemma_messages:
|
| 66 |
-
system_content = messages[0]['content']
|
| 67 |
-
if gemma_messages[0]['role'] == 'user':
|
| 68 |
-
gemma_messages[0]['content'] = f"{system_content}\n\n{gemma_messages[0]['content']}"
|
| 69 |
-
|
| 70 |
-
# Apply chat template
|
| 71 |
try:
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
tokenize=False,
|
| 75 |
-
add_generation_prompt=True
|
| 76 |
-
)
|
| 77 |
-
except Exception as template_error:
|
| 78 |
-
print(f"Template application error: {template_error}")
|
| 79 |
-
# Fallback: manually format messages for Gemma
|
| 80 |
-
prompt = ""
|
| 81 |
-
for msg in gemma_messages:
|
| 82 |
-
if msg['role'] == 'user':
|
| 83 |
-
prompt += f"<start_of_turn>user\n{msg['content']}<end_of_turn>\n"
|
| 84 |
-
elif msg['role'] == 'model':
|
| 85 |
-
prompt += f"<start_of_turn>model\n{msg['content']}<end_of_turn>\n"
|
| 86 |
-
prompt += "<start_of_turn>model\n"
|
| 87 |
-
|
| 88 |
-
# Debug: print final prompt
|
| 89 |
-
print(f"Final prompt preview: {prompt[:200]}...")
|
| 90 |
-
|
| 91 |
-
# Tokenize
|
| 92 |
-
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
| 93 |
-
|
| 94 |
-
# Generate
|
| 95 |
-
with torch.no_grad():
|
| 96 |
-
outputs = model.generate(
|
| 97 |
-
**inputs,
|
| 98 |
-
max_new_tokens=max_new_tokens,
|
| 99 |
-
temperature=temperature,
|
| 100 |
-
top_p=top_p,
|
| 101 |
-
top_k=top_k,
|
| 102 |
-
repetition_penalty=repetition_penalty,
|
| 103 |
-
do_sample=True,
|
| 104 |
-
pad_token_id=tokenizer.pad_token_id,
|
| 105 |
-
eos_token_id=tokenizer.eos_token_id
|
| 106 |
-
)
|
| 107 |
-
|
| 108 |
-
# Decode only the new tokens
|
| 109 |
-
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
|
| 110 |
-
return response
|
| 111 |
|
|
|
|
|
|
|
|
|
|
| 112 |
|
| 113 |
-
|
| 114 |
-
def generate_response(message, history, max_new_tokens, temperature, top_p, top_k, repetition_penalty):
|
| 115 |
-
"""
|
| 116 |
-
Generate response with full conversation history
|
| 117 |
-
|
| 118 |
-
Args:
|
| 119 |
-
message: Current user message (dict with 'text' key when type="messages")
|
| 120 |
-
history: List of previous messages (already in correct format for type="messages")
|
| 121 |
-
max_new_tokens, temperature, top_p, top_k, repetition_penalty: Generation parameters
|
| 122 |
-
"""
|
| 123 |
-
try:
|
| 124 |
-
# Build messages list - Gemma template expects alternating user/model
|
| 125 |
-
messages = []
|
| 126 |
-
|
| 127 |
-
# Add system message first (will be prepended to first user message)
|
| 128 |
-
messages.append({"role": "system", "content": DEFAULT_SYSTEM_PROMPT})
|
| 129 |
-
|
| 130 |
-
# Add conversation history
|
| 131 |
-
if history:
|
| 132 |
-
for msg in history:
|
| 133 |
-
if isinstance(msg, dict) and 'role' in msg and 'content' in msg:
|
| 134 |
-
messages.append({"role": msg['role'], "content": msg['content']})
|
| 135 |
-
|
| 136 |
-
# Add current user message
|
| 137 |
-
if isinstance(message, dict):
|
| 138 |
-
current_message = message.get("text", "") or message.get("content", "")
|
| 139 |
-
else:
|
| 140 |
-
current_message = str(message)
|
| 141 |
-
|
| 142 |
-
messages.append({"role": "user", "content": current_message})
|
| 143 |
-
|
| 144 |
-
# Debug: print messages structure
|
| 145 |
-
print(f"Messages sent to model: {len(messages)} messages")
|
| 146 |
-
for i, msg in enumerate(messages):
|
| 147 |
-
content_preview = msg['content'][:50] if len(msg['content']) > 50 else msg['content']
|
| 148 |
-
print(f" Message {i}: role={msg['role']}, content_preview={content_preview}...")
|
| 149 |
-
|
| 150 |
-
# Generate response
|
| 151 |
-
response = generate_with_pipeline(
|
| 152 |
-
messages,
|
| 153 |
-
max_new_tokens=max_new_tokens,
|
| 154 |
-
temperature=temperature,
|
| 155 |
-
top_p=top_p,
|
| 156 |
-
top_k=top_k,
|
| 157 |
-
repetition_penalty=repetition_penalty
|
| 158 |
-
)
|
| 159 |
-
|
| 160 |
-
if not response or response.strip() == "":
|
| 161 |
response = "أهلاً! أنا أليكس مساعد خدمة العملاء. كيف أقدر أساعدك اليوم؟"
|
| 162 |
|
| 163 |
-
|
| 164 |
|
| 165 |
except Exception as e:
|
| 166 |
print(f"Error in generate_response: {e}")
|
| 167 |
import traceback
|
| 168 |
print(traceback.format_exc())
|
| 169 |
-
|
| 170 |
-
|
| 171 |
|
| 172 |
-
# Create Gradio interface
|
| 173 |
demo = gr.ChatInterface(
|
| 174 |
fn=generate_response,
|
| 175 |
additional_inputs=[
|
|
@@ -180,11 +147,11 @@ demo = gr.ChatInterface(
|
|
| 180 |
gr.Slider(label="عقوبة التكرار", minimum=1.0, maximum=2.0, step=0.05, value=1.0)
|
| 181 |
],
|
| 182 |
examples=[
|
| 183 |
-
["النت عندي معطل من الصبح، تقدر تساعدني؟"],
|
| 184 |
-
["عندي مشكلة بالاتصال بالواي فاي"],
|
| 185 |
-
["شنو الباقات المتوفرة عندكم؟"],
|
| 186 |
-
["كيف أعيد ضبط الجهاز؟"],
|
| 187 |
-
["My device is not working properly"],
|
| 188 |
],
|
| 189 |
cache_examples=False,
|
| 190 |
type="messages",
|
|
@@ -196,8 +163,7 @@ demo = gr.ChatInterface(
|
|
| 196 |
- 💬 لهجة محادثة طبيعية
|
| 197 |
- 🔧 دعم فني واستكشاف الأخطاء
|
| 198 |
- 📋 معلومات الخدمات والإرشاد
|
| 199 |
-
-
|
| 200 |
-
- 🎯 مدعوم بـ موديل Gemma-3-4B-IT
|
| 201 |
|
| 202 |
احجي مع أليكس لحل مشاكلك التقنية، استفسر عن الخدمات، أو احصل على معلومات المنتجات.""",
|
| 203 |
fill_height=True,
|
|
@@ -211,4 +177,4 @@ demo = gr.ChatInterface(
|
|
| 211 |
)
|
| 212 |
|
| 213 |
if __name__ == "__main__":
|
| 214 |
-
demo.launch()
|
|
|
|
| 2 |
|
| 3 |
import os
|
| 4 |
import torch
|
| 5 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
|
|
| 6 |
import gradio as gr
|
| 7 |
import spaces
|
| 8 |
|
|
|
|
| 16 |
|
| 17 |
DEFAULT_SYSTEM_PROMPT = load_system_prompt()
|
| 18 |
|
| 19 |
+
model_path = "anaspro/iraqi-7b"
|
| 20 |
+
|
| 21 |
+
# Jais chat prompts from documentation
|
| 22 |
+
prompt_eng = """### Instruction:Your name is 'Jais', and you are named after Jebel Jais, the highest mountain in UAE. You were made by 'Inception' in the UAE. You are a helpful, respectful, and honest assistant. Always answer as helpfully as possible, while being safe. Complete the conversation between [|Human|] and [|AI|]:
|
| 23 |
+
### Input: [|Human|] {Question}
|
| 24 |
+
[|AI|]
|
| 25 |
+
### Response :"""
|
| 26 |
+
|
| 27 |
+
prompt_ar = """### Instruction:اسمك "جيس" وسميت على اسم جبل جيس اعلى جبل في الامارات. تم بنائك بواسطة Inception في الإمارات. أنت مساعد مفيد ومحترم وصادق. أجب دائمًا بأكبر قدر ممكن من المساعدة، مع الحفاظ على البقاء أمناً. أكمل المحادثة بين [|Human|] و[|AI|] :
|
| 28 |
+
### Input:[|Human|] {Question}
|
| 29 |
+
[|AI|]
|
| 30 |
+
### Response :"""
|
| 31 |
|
| 32 |
# إذا كان فيه HF_TOKEN في البيئة
|
| 33 |
hf_token = os.getenv("HF_TOKEN")
|
| 34 |
|
| 35 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 36 |
+
|
| 37 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path, token=hf_token)
|
| 38 |
+
model = AutoModelForCausalLM.from_pretrained(model_path, device_map="auto", trust_remote_code=True, token=hf_token)
|
| 39 |
+
|
| 40 |
+
if tokenizer.pad_token is None:
|
| 41 |
+
tokenizer.pad_token = tokenizer.eos_token
|
| 42 |
+
|
| 43 |
+
def get_response(text, tokenizer=tokenizer, model=model):
|
| 44 |
+
"""نفس الدالة من documentation مع تعديل لـ chat model"""
|
| 45 |
+
tokenized = tokenizer(text, return_tensors="pt")
|
| 46 |
+
input_ids, attention_mask = tokenized['input_ids'].to(device), tokenized['attention_mask'].to(device)
|
| 47 |
+
input_len = input_ids.shape[-1]
|
| 48 |
+
generate_ids = model.generate(
|
| 49 |
+
input_ids,
|
| 50 |
+
attention_mask=attention_mask,
|
| 51 |
+
top_p=0.9,
|
| 52 |
+
temperature=0.3,
|
| 53 |
+
max_length=2048,
|
| 54 |
+
min_length=input_len + 4,
|
| 55 |
+
repetition_penalty=1.2,
|
| 56 |
+
do_sample=True,
|
| 57 |
+
pad_token_id=tokenizer.pad_token_id
|
| 58 |
+
)
|
| 59 |
+
response = tokenizer.batch_decode(
|
| 60 |
+
generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True
|
| 61 |
+
)[0]
|
| 62 |
+
response = response.split("### Response :")[-1].lstrip()
|
| 63 |
+
return response
|
| 64 |
|
| 65 |
+
def format_conversation_history(chat_history):
|
| 66 |
+
messages = []
|
| 67 |
+
for item in chat_history:
|
| 68 |
+
role = item["role"]
|
| 69 |
+
content = item["content"]
|
| 70 |
+
if isinstance(content, list):
|
| 71 |
+
content = content[0]["text"] if content and "text" in content[0] else str(content)
|
| 72 |
+
messages.append({"role": role, "content": content})
|
| 73 |
+
return messages
|
| 74 |
+
|
| 75 |
+
def detect_language(text):
|
| 76 |
+
"""Simple language detection - Arabic vs English"""
|
| 77 |
+
arabic_chars = sum(1 for char in text if '\u0600' <= char <= '\u06FF')
|
| 78 |
+
total_chars = len(text.replace(' ', ''))
|
| 79 |
+
|
| 80 |
+
if total_chars == 0:
|
| 81 |
+
return 'ar' # default to Arabic
|
| 82 |
+
|
| 83 |
+
arabic_ratio = arabic_chars / total_chars
|
| 84 |
+
return 'ar' if arabic_ratio > 0.3 else 'en'
|
| 85 |
+
|
| 86 |
+
@spaces.GPU()
|
| 87 |
+
def generate_response(input_data, chat_history, max_new_tokens, temperature, top_p, top_k, repetition_penalty):
|
| 88 |
+
# Detect language of the current question
|
| 89 |
+
lang = detect_language(input_data)
|
| 90 |
+
prompt_template = prompt_ar if lang == 'ar' else prompt_eng
|
| 91 |
+
|
| 92 |
+
# Build conversation for Jais format
|
| 93 |
+
conversation_parts = []
|
| 94 |
+
|
| 95 |
+
# Add system prompt as part of the instruction (keep it short for Jais)
|
| 96 |
+
system_instruction = "اسمك \"أليكس\" وأنت مساعد خدمة العملاء في شركة TechSolutions. مهمتك مساعدة العملاء في حل مشاكلهم مع المنتجات والإجابة عن أسئلتهم حول الخدمات. كن ودوداً وصبوراً ومحترماً. أجب بالعربية أو الإنجليزية حسب تفضيل العميل. ابدأ بالتحية وكن مباشراً في الحلول."
|
| 97 |
+
|
| 98 |
+
# Add chat history
|
| 99 |
+
if chat_history:
|
| 100 |
+
for item in chat_history:
|
| 101 |
+
role = item["role"]
|
| 102 |
+
content = item["content"]
|
| 103 |
+
if isinstance(content, list):
|
| 104 |
+
content = content[0]["text"] if content and "text" in content[0] else str(content)
|
| 105 |
+
|
| 106 |
+
if role == "user":
|
| 107 |
+
conversation_parts.append(f"[|Human|] {content}")
|
| 108 |
+
elif role == "assistant":
|
| 109 |
+
conversation_parts.append(f"[|AI|] {content}")
|
| 110 |
+
|
| 111 |
+
# Add current user message
|
| 112 |
+
conversation_parts.append(f"[|Human|] {input_data}")
|
| 113 |
+
conversation_parts.append("[|AI|]")
|
| 114 |
+
|
| 115 |
+
# Join conversation
|
| 116 |
+
conversation = "\n".join(conversation_parts)
|
| 117 |
+
|
| 118 |
+
# Create full prompt using Jais format with our system prompt
|
| 119 |
+
full_prompt = f"### Instruction:{system_instruction}\n### Input:{conversation}\n### Response :"
|
| 120 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 121 |
try:
|
| 122 |
+
# استخدام دالة get_response من documentation
|
| 123 |
+
response = get_response(full_prompt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 124 |
|
| 125 |
+
# استخراج الرد الجديد فقط (بعد "### Response :")
|
| 126 |
+
if "### Response :" in response:
|
| 127 |
+
response = response.split("### Response :")[-1].strip()
|
| 128 |
|
| 129 |
+
if not response:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 130 |
response = "أهلاً! أنا أليكس مساعد خدمة العملاء. كيف أقدر أساعدك اليوم؟"
|
| 131 |
|
| 132 |
+
yield response
|
| 133 |
|
| 134 |
except Exception as e:
|
| 135 |
print(f"Error in generate_response: {e}")
|
| 136 |
import traceback
|
| 137 |
print(traceback.format_exc())
|
| 138 |
+
yield "أهلاً! أنا أليكس مساعد خدمة العملاء. كيف أقدر أساعدك اليوم؟"
|
|
|
|
| 139 |
|
|
|
|
| 140 |
demo = gr.ChatInterface(
|
| 141 |
fn=generate_response,
|
| 142 |
additional_inputs=[
|
|
|
|
| 147 |
gr.Slider(label="عقوبة التكرار", minimum=1.0, maximum=2.0, step=0.05, value=1.0)
|
| 148 |
],
|
| 149 |
examples=[
|
| 150 |
+
[{"text": "النت عندي معطل من الصبح، تقدر تساعدني؟"}],
|
| 151 |
+
[{"text": "عندي مشكلة بالاتصال بالواي فاي"}],
|
| 152 |
+
[{"text": "شنو الباقات المتوفرة عندكم؟"}],
|
| 153 |
+
[{"text": "كيف أعيد ضبط الجهاز؟"}],
|
| 154 |
+
[{"text": "My device is not working properly"}],
|
| 155 |
],
|
| 156 |
cache_examples=False,
|
| 157 |
type="messages",
|
|
|
|
| 163 |
- 💬 لهجة محادثة طبيعية
|
| 164 |
- 🔧 دعم فني واستكشاف الأخطاء
|
| 165 |
- 📋 معلومات الخدمات والإرشاد
|
| 166 |
+
- 🎯 مدعوم بـ موديل Unsloth Meta-Llama-3.1-8B-Instruct (مع تحسينات الأداء)
|
|
|
|
| 167 |
|
| 168 |
احجي مع أليكس لحل مشاكلك التقنية، استفسر عن الخدمات، أو احصل على معلومات المنتجات.""",
|
| 169 |
fill_height=True,
|
|
|
|
| 177 |
)
|
| 178 |
|
| 179 |
if __name__ == "__main__":
|
| 180 |
+
demo.launch()
|