anaspro
commited on
Commit
·
441e4e3
1
Parent(s):
20c2d55
upadte
Browse files- app.py +45 -98
- system_prompt.txt +1 -1
- test_model.py +57 -0
app.py
CHANGED
|
@@ -2,7 +2,8 @@
|
|
| 2 |
|
| 3 |
import os
|
| 4 |
import torch
|
| 5 |
-
|
|
|
|
| 6 |
import gradio as gr
|
| 7 |
import spaces
|
| 8 |
|
|
@@ -16,115 +17,61 @@ def load_system_prompt():
|
|
| 16 |
|
| 17 |
DEFAULT_SYSTEM_PROMPT = load_system_prompt()
|
| 18 |
|
| 19 |
-
model_path = "
|
| 20 |
|
| 21 |
-
# Jais chat prompts from documentation
|
| 22 |
-
prompt_eng = """### Instruction:Your name is 'Jais', and you are named after Jebel Jais, the highest mountain in UAE. You were made by 'Inception' in the UAE. You are a helpful, respectful, and honest assistant. Always answer as helpfully as possible, while being safe. Complete the conversation between [|Human|] and [|AI|]:
|
| 23 |
-
### Input: [|Human|] {Question}
|
| 24 |
-
[|AI|]
|
| 25 |
-
### Response :"""
|
| 26 |
-
|
| 27 |
-
prompt_ar = """### Instruction:اسمك "جيس" وسميت على اسم جبل جيس اعلى جبل في الامارات. تم بنائك بواسطة Inception في الإمارات. أنت مساعد مفيد ومحترم وصادق. أجب دائمًا بأكبر قدر ممكن من المساعدة، مع الحفاظ على البقاء أمناً. أكمل المحادثة بين [|Human|] و[|AI|] :
|
| 28 |
-
### Input:[|Human|] {Question}
|
| 29 |
-
[|AI|]
|
| 30 |
-
### Response :"""
|
| 31 |
|
| 32 |
# إذا كان فيه HF_TOKEN في البيئة
|
| 33 |
hf_token = os.getenv("HF_TOKEN")
|
| 34 |
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
model
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
|
|
|
|
|
|
| 56 |
do_sample=True,
|
| 57 |
-
|
| 58 |
)
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
)[0]
|
| 62 |
-
response = response.split("### Response :")[-1].lstrip()
|
| 63 |
-
return response
|
| 64 |
-
|
| 65 |
-
def format_conversation_history(chat_history):
|
| 66 |
-
messages = []
|
| 67 |
-
for item in chat_history:
|
| 68 |
-
role = item["role"]
|
| 69 |
-
content = item["content"]
|
| 70 |
-
if isinstance(content, list):
|
| 71 |
-
content = content[0]["text"] if content and "text" in content[0] else str(content)
|
| 72 |
-
messages.append({"role": role, "content": content})
|
| 73 |
-
return messages
|
| 74 |
-
|
| 75 |
-
def detect_language(text):
|
| 76 |
-
"""Simple language detection - Arabic vs English"""
|
| 77 |
-
arabic_chars = sum(1 for char in text if '\u0600' <= char <= '\u06FF')
|
| 78 |
-
total_chars = len(text.replace(' ', ''))
|
| 79 |
-
|
| 80 |
-
if total_chars == 0:
|
| 81 |
-
return 'ar' # default to Arabic
|
| 82 |
-
|
| 83 |
-
arabic_ratio = arabic_chars / total_chars
|
| 84 |
-
return 'ar' if arabic_ratio > 0.3 else 'en'
|
| 85 |
|
| 86 |
@spaces.GPU()
|
| 87 |
def generate_response(input_data, chat_history, max_new_tokens, temperature, top_p, top_k, repetition_penalty):
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
# Build conversation for Jais format
|
| 93 |
-
conversation_parts = []
|
| 94 |
-
|
| 95 |
-
# Add system prompt as part of the instruction (keep it short for Jais)
|
| 96 |
-
system_instruction = "اسمك \"أليكس\" وأنت مساعد خدمة العملاء في شركة TechSolutions. مهمتك مساعدة العملاء في حل مشاكلهم مع المنتجات والإجابة عن أسئلتهم حول الخدمات. كن ودوداً وصبوراً ومحترماً. أجب بالعربية أو الإنجليزية حسب تفضيل العميل. ابدأ بالتحية وكن مباشراً في الحلول."
|
| 97 |
-
|
| 98 |
-
# Add chat history
|
| 99 |
-
if chat_history:
|
| 100 |
-
for item in chat_history:
|
| 101 |
-
role = item["role"]
|
| 102 |
-
content = item["content"]
|
| 103 |
-
if isinstance(content, list):
|
| 104 |
-
content = content[0]["text"] if content and "text" in content[0] else str(content)
|
| 105 |
-
|
| 106 |
-
if role == "user":
|
| 107 |
-
conversation_parts.append(f"[|Human|] {content}")
|
| 108 |
-
elif role == "assistant":
|
| 109 |
-
conversation_parts.append(f"[|AI|] {content}")
|
| 110 |
-
|
| 111 |
-
# Add current user message
|
| 112 |
-
conversation_parts.append(f"[|Human|] {input_data}")
|
| 113 |
-
conversation_parts.append("[|AI|]")
|
| 114 |
-
|
| 115 |
-
# Join conversation
|
| 116 |
-
conversation = "\n".join(conversation_parts)
|
| 117 |
|
| 118 |
-
|
| 119 |
-
|
|
|
|
| 120 |
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
response = get_response(full_prompt)
|
| 124 |
|
| 125 |
-
#
|
| 126 |
-
|
| 127 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 128 |
|
| 129 |
if not response:
|
| 130 |
response = "أهلاً! أنا أليكس مساعد خدمة العملاء. كيف أقدر أساعدك اليوم؟"
|
|
@@ -163,7 +110,7 @@ demo = gr.ChatInterface(
|
|
| 163 |
- 💬 لهجة محادثة طبيعية
|
| 164 |
- 🔧 دعم فني واستكشاف الأخطاء
|
| 165 |
- 📋 معلومات الخدمات والإرشاد
|
| 166 |
-
- 🎯 مدعوم بـ موديل Unsloth Meta-Llama-3.1-8B-Instruct (مع تحسينات الأداء)
|
| 167 |
|
| 168 |
احجي مع أليكس لحل مشاكلك التقنية، استفسر عن الخدمات، أو احصل على معلومات المنتجات.""",
|
| 169 |
fill_height=True,
|
|
|
|
| 2 |
|
| 3 |
import os
|
| 4 |
import torch
|
| 5 |
+
import transformers
|
| 6 |
+
from transformers import pipeline
|
| 7 |
import gradio as gr
|
| 8 |
import spaces
|
| 9 |
|
|
|
|
| 17 |
|
| 18 |
DEFAULT_SYSTEM_PROMPT = load_system_prompt()
|
| 19 |
|
| 20 |
+
model_path = "unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit"
|
| 21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
# إذا كان فيه HF_TOKEN في البيئة
|
| 24 |
hf_token = os.getenv("HF_TOKEN")
|
| 25 |
|
| 26 |
+
# Initialize pipeline for chat
|
| 27 |
+
pipeline_model = pipeline(
|
| 28 |
+
"text-generation",
|
| 29 |
+
model=model_path,
|
| 30 |
+
device_map="auto",
|
| 31 |
+
token=hf_token,
|
| 32 |
+
trust_remote_code=True
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
def generate_with_pipeline(messages, max_new_tokens=256, temperature=0.7, top_p=0.9):
|
| 36 |
+
"""Generate response using the pipeline with messages format"""
|
| 37 |
+
# Apply chat template for unsloth models
|
| 38 |
+
prompt = pipeline_model.tokenizer.apply_chat_template(
|
| 39 |
+
messages,
|
| 40 |
+
tokenize=False,
|
| 41 |
+
add_generation_prompt=True
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
outputs = pipeline_model(
|
| 45 |
+
prompt,
|
| 46 |
+
max_new_tokens=max_new_tokens,
|
| 47 |
+
temperature=temperature,
|
| 48 |
+
top_p=top_p,
|
| 49 |
do_sample=True,
|
| 50 |
+
return_full_text=False
|
| 51 |
)
|
| 52 |
+
return outputs[0]["generated_text"]
|
| 53 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
|
| 55 |
@spaces.GPU()
|
| 56 |
def generate_response(input_data, chat_history, max_new_tokens, temperature, top_p, top_k, repetition_penalty):
|
| 57 |
+
try:
|
| 58 |
+
# Build messages for the pipeline (without chat history)
|
| 59 |
+
messages = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
|
| 61 |
+
# Add system message
|
| 62 |
+
system_content = DEFAULT_SYSTEM_PROMPT
|
| 63 |
+
messages.append({"role": "system", "content": system_content})
|
| 64 |
|
| 65 |
+
# Add current user message only
|
| 66 |
+
messages.append({"role": "user", "content": input_data})
|
|
|
|
| 67 |
|
| 68 |
+
# Generate response using pipeline
|
| 69 |
+
response = generate_with_pipeline(
|
| 70 |
+
messages,
|
| 71 |
+
max_new_tokens=max_new_tokens,
|
| 72 |
+
temperature=temperature,
|
| 73 |
+
top_p=top_p
|
| 74 |
+
)
|
| 75 |
|
| 76 |
if not response:
|
| 77 |
response = "أهلاً! أنا أليكس مساعد خدمة العملاء. كيف أقدر أساعدك اليوم؟"
|
|
|
|
| 110 |
- 💬 لهجة محادثة طبيعية
|
| 111 |
- 🔧 دعم فني واستكشاف الأخطاء
|
| 112 |
- 📋 معلومات الخدمات والإرشاد
|
| 113 |
+
- 🎯 مدعوم بـ موديل Unsloth Meta-Llama-3.1-8B-Instruct-bnb-4bit (مع تحسينات الأداء والضغط)
|
| 114 |
|
| 115 |
احجي مع أليكس لحل مشاكلك التقنية، استفسر عن الخدمات، أو احصل على معلومات المنتجات.""",
|
| 116 |
fill_height=True,
|
system_prompt.txt
CHANGED
|
@@ -1 +1 @@
|
|
| 1 |
-
تحدث
|
|
|
|
| 1 |
+
اسمك "أليكس" وأنت مساعد خدمة العملاء في شركة TechSolutions. مهمتك مساعدة العملاء في حل مشاكلهم مع المنتجات والإجابة عن أسئلتهم حول الخدمات. كن ودوداً وصبوراً ومحترماً. أجب بالعربية أو الإنجليزية حسب تفضيل العميل. ابدأ بالتحية وكن مباشراً في الحلول. تحدث باللهجة العراقية البغدادية الطبيعية.
|
test_model.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
# -*- coding: utf-8 -*-
|
| 3 |
+
|
| 4 |
+
import os
|
| 5 |
+
import torch
|
| 6 |
+
import transformers
|
| 7 |
+
from transformers import pipeline
|
| 8 |
+
|
| 9 |
+
model_path = "unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit"
|
| 10 |
+
|
| 11 |
+
# إذا كان فيه HF_TOKEN في البيئة
|
| 12 |
+
hf_token = os.getenv("HF_TOKEN")
|
| 13 |
+
|
| 14 |
+
print("Loading model...")
|
| 15 |
+
try:
|
| 16 |
+
# Initialize pipeline for chat
|
| 17 |
+
pipeline_model = pipeline(
|
| 18 |
+
"text-generation",
|
| 19 |
+
model=model_path,
|
| 20 |
+
device_map="auto",
|
| 21 |
+
token=hf_token,
|
| 22 |
+
trust_remote_code=True
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
print("Model loaded successfully!")
|
| 26 |
+
|
| 27 |
+
# Test with a simple message
|
| 28 |
+
messages = [
|
| 29 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
| 30 |
+
{"role": "user", "content": "Hello!"},
|
| 31 |
+
]
|
| 32 |
+
|
| 33 |
+
print("Testing generation...")
|
| 34 |
+
# Apply chat template for unsloth models
|
| 35 |
+
prompt = pipeline_model.tokenizer.apply_chat_template(
|
| 36 |
+
messages,
|
| 37 |
+
tokenize=False,
|
| 38 |
+
add_generation_prompt=True
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
outputs = pipeline_model(
|
| 42 |
+
prompt,
|
| 43 |
+
max_new_tokens=50,
|
| 44 |
+
temperature=0.7,
|
| 45 |
+
top_p=0.9,
|
| 46 |
+
do_sample=True,
|
| 47 |
+
return_full_text=False
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
response = outputs[0]["generated_text"]
|
| 51 |
+
print(f"Test response: {response}")
|
| 52 |
+
print("✅ Model test successful!")
|
| 53 |
+
|
| 54 |
+
except Exception as e:
|
| 55 |
+
print(f"❌ Error: {e}")
|
| 56 |
+
import traceback
|
| 57 |
+
traceback.print_exc()
|