anaspro
commited on
Commit
·
20c2d55
1
Parent(s):
320dd53
upadte
Browse files
app.py
CHANGED
|
@@ -16,63 +16,18 @@ def load_system_prompt():
|
|
| 16 |
|
| 17 |
DEFAULT_SYSTEM_PROMPT = load_system_prompt()
|
| 18 |
|
| 19 |
-
model_path = "
|
| 20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
if hasattr(tokenizer, 'apply_chat_template'):
|
| 27 |
-
return tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=add_generation_prompt)
|
| 28 |
-
|
| 29 |
-
# Manual implementation based on the template
|
| 30 |
-
result = tokenizer.bos_token or ""
|
| 31 |
-
|
| 32 |
-
system_message = None
|
| 33 |
-
if messages and messages[0]['role'] == 'system':
|
| 34 |
-
system_message = messages[0]['content']
|
| 35 |
-
messages = messages[1:]
|
| 36 |
-
|
| 37 |
-
for i, message in enumerate(messages):
|
| 38 |
-
if message['role'] == 'assistant':
|
| 39 |
-
role = 'model'
|
| 40 |
-
else:
|
| 41 |
-
role = message['role']
|
| 42 |
-
|
| 43 |
-
result += f"<start_of_turn>{role}\n"
|
| 44 |
-
|
| 45 |
-
if i == 0 and system_message:
|
| 46 |
-
result += f"{system_message}\n\n"
|
| 47 |
-
|
| 48 |
-
if isinstance(message['content'], str):
|
| 49 |
-
result += message['content'].strip()
|
| 50 |
-
elif isinstance(message['content'], list):
|
| 51 |
-
for item in message['content']:
|
| 52 |
-
if item.get('type') == 'text':
|
| 53 |
-
result += item['text'].strip()
|
| 54 |
-
|
| 55 |
-
result += "<end_of_turn>\n"
|
| 56 |
-
|
| 57 |
-
if add_generation_prompt:
|
| 58 |
-
result += "<start_of_turn>model\n"
|
| 59 |
-
|
| 60 |
-
return result
|
| 61 |
-
|
| 62 |
-
except Exception as e:
|
| 63 |
-
print(f"Error in Gemma template: {e}")
|
| 64 |
-
# Fallback
|
| 65 |
-
prompt = ""
|
| 66 |
-
for msg in messages:
|
| 67 |
-
if msg["role"] == "system":
|
| 68 |
-
prompt += f"System: {msg['content']}\n"
|
| 69 |
-
elif msg["role"] == "user":
|
| 70 |
-
prompt += f"Human: {msg['content']}\n"
|
| 71 |
-
elif msg["role"] == "assistant":
|
| 72 |
-
prompt += f"Assistant: {msg['content']}\n"
|
| 73 |
-
prompt += "Assistant:"
|
| 74 |
-
return prompt
|
| 75 |
-
|
| 76 |
|
| 77 |
# إذا كان فيه HF_TOKEN في البيئة
|
| 78 |
hf_token = os.getenv("HF_TOKEN")
|
|
@@ -86,7 +41,7 @@ if tokenizer.pad_token is None:
|
|
| 86 |
tokenizer.pad_token = tokenizer.eos_token
|
| 87 |
|
| 88 |
def get_response(text, tokenizer=tokenizer, model=model):
|
| 89 |
-
"""
|
| 90 |
tokenized = tokenizer(text, return_tensors="pt")
|
| 91 |
input_ids, attention_mask = tokenized['input_ids'].to(device), tokenized['attention_mask'].to(device)
|
| 92 |
input_len = input_ids.shape[-1]
|
|
@@ -104,82 +59,76 @@ def get_response(text, tokenizer=tokenizer, model=model):
|
|
| 104 |
response = tokenizer.batch_decode(
|
| 105 |
generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True
|
| 106 |
)[0]
|
|
|
|
|
|
|
| 107 |
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
break
|
| 118 |
|
| 119 |
-
|
|
|
|
|
|
|
|
|
|
| 120 |
|
|
|
|
|
|
|
| 121 |
|
|
|
|
|
|
|
| 122 |
|
| 123 |
@spaces.GPU()
|
| 124 |
def generate_response(input_data, chat_history, max_new_tokens, temperature, top_p, top_k, repetition_penalty):
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
|
| 129 |
-
|
| 130 |
-
|
| 131 |
|
| 132 |
-
|
| 133 |
-
|
| 134 |
|
| 135 |
-
|
| 136 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 137 |
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
content = item["content"]
|
| 143 |
-
if isinstance(content, list):
|
| 144 |
-
content = content[0]["text"] if content and "text" in content[0] else str(content)
|
| 145 |
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
|
| 150 |
-
|
|
|
|
| 151 |
|
| 152 |
-
|
| 153 |
-
|
| 154 |
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
messages.append({"role": "user", "content": user_content})
|
| 159 |
-
|
| 160 |
-
# Use Gemma template for the model
|
| 161 |
-
prompt = apply_gemma_template(messages)
|
| 162 |
-
|
| 163 |
-
print(f"Generated Gemma prompt: {prompt[:200]}...") # Debug
|
| 164 |
-
|
| 165 |
-
# استخدام دالة get_response مع Gemma prompt
|
| 166 |
-
response = get_response(prompt)
|
| 167 |
|
| 168 |
-
# استخراج الرد الجديد فقط
|
| 169 |
-
if "
|
| 170 |
-
response = response.split("
|
| 171 |
-
else:
|
| 172 |
-
# ابحث عن أول رد فعلي
|
| 173 |
-
lines = response.split('\n')
|
| 174 |
-
for i, line in enumerate(lines):
|
| 175 |
-
if line.strip() and not line.startswith('###') and not line.startswith('Input:') and not line.startswith('[|'):
|
| 176 |
-
response = '\n'.join(lines[i:]).strip()
|
| 177 |
-
break
|
| 178 |
|
| 179 |
if not response:
|
| 180 |
response = "أهلاً! أنا أليكس مساعد خدمة العملاء. كيف أقدر أساعدك اليوم؟"
|
| 181 |
|
| 182 |
-
print(f"Final response: {response[:100]}...") # Debug
|
| 183 |
yield response
|
| 184 |
|
| 185 |
except Exception as e:
|
|
|
|
| 16 |
|
| 17 |
DEFAULT_SYSTEM_PROMPT = load_system_prompt()
|
| 18 |
|
| 19 |
+
model_path = "inceptionai/jais-adapted-7b-chat"
|
| 20 |
|
| 21 |
+
# Jais chat prompts from documentation
|
| 22 |
+
prompt_eng = """### Instruction:Your name is 'Jais', and you are named after Jebel Jais, the highest mountain in UAE. You were made by 'Inception' in the UAE. You are a helpful, respectful, and honest assistant. Always answer as helpfully as possible, while being safe. Complete the conversation between [|Human|] and [|AI|]:
|
| 23 |
+
### Input: [|Human|] {Question}
|
| 24 |
+
[|AI|]
|
| 25 |
+
### Response :"""
|
| 26 |
|
| 27 |
+
prompt_ar = """### Instruction:اسمك "جيس" وسميت على اسم جبل جيس اعلى جبل في الامارات. تم بنائك بواسطة Inception في الإمارات. أنت مساعد مفيد ومحترم وصادق. أجب دائمًا بأكبر قدر ممكن من المساعدة، مع الحفاظ على البقاء أمناً. أكمل المحادثة بين [|Human|] و[|AI|] :
|
| 28 |
+
### Input:[|Human|] {Question}
|
| 29 |
+
[|AI|]
|
| 30 |
+
### Response :"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
|
| 32 |
# إذا كان فيه HF_TOKEN في البيئة
|
| 33 |
hf_token = os.getenv("HF_TOKEN")
|
|
|
|
| 41 |
tokenizer.pad_token = tokenizer.eos_token
|
| 42 |
|
| 43 |
def get_response(text, tokenizer=tokenizer, model=model):
|
| 44 |
+
"""نفس الدالة من documentation مع تعديل لـ chat model"""
|
| 45 |
tokenized = tokenizer(text, return_tensors="pt")
|
| 46 |
input_ids, attention_mask = tokenized['input_ids'].to(device), tokenized['attention_mask'].to(device)
|
| 47 |
input_len = input_ids.shape[-1]
|
|
|
|
| 59 |
response = tokenizer.batch_decode(
|
| 60 |
generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True
|
| 61 |
)[0]
|
| 62 |
+
response = response.split("### Response :")[-1].lstrip()
|
| 63 |
+
return response
|
| 64 |
|
| 65 |
+
def format_conversation_history(chat_history):
|
| 66 |
+
messages = []
|
| 67 |
+
for item in chat_history:
|
| 68 |
+
role = item["role"]
|
| 69 |
+
content = item["content"]
|
| 70 |
+
if isinstance(content, list):
|
| 71 |
+
content = content[0]["text"] if content and "text" in content[0] else str(content)
|
| 72 |
+
messages.append({"role": role, "content": content})
|
| 73 |
+
return messages
|
|
|
|
| 74 |
|
| 75 |
+
def detect_language(text):
|
| 76 |
+
"""Simple language detection - Arabic vs English"""
|
| 77 |
+
arabic_chars = sum(1 for char in text if '\u0600' <= char <= '\u06FF')
|
| 78 |
+
total_chars = len(text.replace(' ', ''))
|
| 79 |
|
| 80 |
+
if total_chars == 0:
|
| 81 |
+
return 'ar' # default to Arabic
|
| 82 |
|
| 83 |
+
arabic_ratio = arabic_chars / total_chars
|
| 84 |
+
return 'ar' if arabic_ratio > 0.3 else 'en'
|
| 85 |
|
| 86 |
@spaces.GPU()
|
| 87 |
def generate_response(input_data, chat_history, max_new_tokens, temperature, top_p, top_k, repetition_penalty):
|
| 88 |
+
# Detect language of the current question
|
| 89 |
+
lang = detect_language(input_data)
|
| 90 |
+
prompt_template = prompt_ar if lang == 'ar' else prompt_eng
|
| 91 |
|
| 92 |
+
# Build conversation for Jais format
|
| 93 |
+
conversation_parts = []
|
| 94 |
|
| 95 |
+
# Add system prompt as part of the instruction (keep it short for Jais)
|
| 96 |
+
system_instruction = "اسمك \"أليكس\" وأنت مساعد خدمة العملاء في شركة TechSolutions. مهمتك مساعدة العملاء في حل مشاكلهم مع المنتجات والإجابة عن أسئلتهم حول الخدمات. كن ودوداً وصبوراً ومحترماً. أجب بالعربية أو الإنجليزية حسب تفضيل العميل. ابدأ بالتحية وكن مباشراً في الحلول."
|
| 97 |
|
| 98 |
+
# Add chat history
|
| 99 |
+
if chat_history:
|
| 100 |
+
for item in chat_history:
|
| 101 |
+
role = item["role"]
|
| 102 |
+
content = item["content"]
|
| 103 |
+
if isinstance(content, list):
|
| 104 |
+
content = content[0]["text"] if content and "text" in content[0] else str(content)
|
| 105 |
|
| 106 |
+
if role == "user":
|
| 107 |
+
conversation_parts.append(f"[|Human|] {content}")
|
| 108 |
+
elif role == "assistant":
|
| 109 |
+
conversation_parts.append(f"[|AI|] {content}")
|
|
|
|
|
|
|
|
|
|
| 110 |
|
| 111 |
+
# Add current user message
|
| 112 |
+
conversation_parts.append(f"[|Human|] {input_data}")
|
| 113 |
+
conversation_parts.append("[|AI|]")
|
| 114 |
|
| 115 |
+
# Join conversation
|
| 116 |
+
conversation = "\n".join(conversation_parts)
|
| 117 |
|
| 118 |
+
# Create full prompt using Jais format with our system prompt
|
| 119 |
+
full_prompt = f"### Instruction:{system_instruction}\n### Input:{conversation}\n### Response :"
|
| 120 |
|
| 121 |
+
try:
|
| 122 |
+
# استخدام دالة get_response من documentation
|
| 123 |
+
response = get_response(full_prompt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 124 |
|
| 125 |
+
# استخراج الرد الجديد فقط (بعد "### Response :")
|
| 126 |
+
if "### Response :" in response:
|
| 127 |
+
response = response.split("### Response :")[-1].strip()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 128 |
|
| 129 |
if not response:
|
| 130 |
response = "أهلاً! أنا أليكس مساعد خدمة العملاء. كيف أقدر أساعدك اليوم؟"
|
| 131 |
|
|
|
|
| 132 |
yield response
|
| 133 |
|
| 134 |
except Exception as e:
|