|
|
import os |
|
|
import torch |
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer, pipeline |
|
|
from threading import Thread |
|
|
import gradio as gr |
|
|
import spaces |
|
|
|
|
|
|
|
|
def load_system_prompt(): |
|
|
try: |
|
|
with open('system_prompt.txt', 'r', encoding='utf-8') as f: |
|
|
return f.read().strip() |
|
|
except FileNotFoundError: |
|
|
return "أنت مساعد ذكي مفيد." |
|
|
|
|
|
DEFAULT_SYSTEM_PROMPT = load_system_prompt() |
|
|
|
|
|
model_path = "unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit" |
|
|
|
|
|
|
|
|
hf_token = os.getenv("HF_TOKEN") |
|
|
|
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_path, token=hf_token) |
|
|
model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype="auto", device_map="auto", token=hf_token) |
|
|
|
|
|
|
|
|
def create_chat_pipeline(tokenizer, model): |
|
|
"""إنشاء pipeline مخصص للدردشة مع chat template و streaming""" |
|
|
def chat_generate(messages, streamer=None, **kwargs): |
|
|
|
|
|
if hasattr(tokenizer, 'apply_chat_template') and tokenizer.chat_template is not None: |
|
|
prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) |
|
|
else: |
|
|
|
|
|
prompt = "" |
|
|
for msg in messages: |
|
|
if msg["role"] == "system": |
|
|
prompt += f"System: {msg['content']}\n" |
|
|
elif msg["role"] == "user": |
|
|
prompt += f"Human: {msg['content']}\n" |
|
|
elif msg["role"] == "assistant": |
|
|
prompt += f"Assistant: {msg['content']}\n" |
|
|
prompt += "Assistant:" |
|
|
|
|
|
inputs = tokenizer(prompt, return_tensors="pt").to(model.device) |
|
|
|
|
|
|
|
|
if streamer: |
|
|
generation_kwargs = { |
|
|
**inputs, |
|
|
"max_new_tokens": kwargs.get('max_new_tokens', 512), |
|
|
"temperature": kwargs.get('temperature', 0.7), |
|
|
"top_p": kwargs.get('top_p', 0.9), |
|
|
"top_k": kwargs.get('top_k', 50), |
|
|
"repetition_penalty": kwargs.get('repetition_penalty', 1.1), |
|
|
"do_sample": True, |
|
|
"pad_token_id": tokenizer.eos_token_id, |
|
|
"streamer": streamer, |
|
|
} |
|
|
|
|
|
|
|
|
return generation_kwargs |
|
|
else: |
|
|
|
|
|
with torch.no_grad(): |
|
|
outputs = model.generate( |
|
|
**inputs, |
|
|
max_new_tokens=kwargs.get('max_new_tokens', 512), |
|
|
temperature=kwargs.get('temperature', 0.7), |
|
|
top_p=kwargs.get('top_p', 0.9), |
|
|
top_k=kwargs.get('top_k', 50), |
|
|
repetition_penalty=kwargs.get('repetition_penalty', 1.1), |
|
|
do_sample=True, |
|
|
pad_token_id=tokenizer.eos_token_id, |
|
|
return_dict_in_generate=True, |
|
|
output_scores=False, |
|
|
) |
|
|
|
|
|
response = tokenizer.decode(outputs.sequences[0][inputs.input_ids.shape[1]:], skip_special_tokens=True) |
|
|
return [{"generated_text": response}] |
|
|
|
|
|
return chat_generate |
|
|
|
|
|
pipe = create_chat_pipeline(tokenizer, model) |
|
|
|
|
|
def format_conversation_history(chat_history): |
|
|
messages = [] |
|
|
for item in chat_history: |
|
|
role = item["role"] |
|
|
content = item["content"] |
|
|
if isinstance(content, list): |
|
|
content = content[0]["text"] if content and "text" in content[0] else str(content) |
|
|
messages.append({"role": role, "content": content}) |
|
|
return messages |
|
|
|
|
|
@spaces.GPU() |
|
|
def generate_response(input_data, chat_history, max_new_tokens, temperature, top_p, top_k, repetition_penalty): |
|
|
|
|
|
|
|
|
|
|
|
new_message = {"role": "user", "content": input_data} |
|
|
|
|
|
|
|
|
messages = [{"role": "system", "content": DEFAULT_SYSTEM_PROMPT}] |
|
|
|
|
|
|
|
|
|
|
|
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) |
|
|
|
|
|
generation_kwargs = pipe( |
|
|
messages, |
|
|
streamer=streamer, |
|
|
max_new_tokens=max_new_tokens, |
|
|
temperature=temperature, |
|
|
top_p=top_p, |
|
|
top_k=top_k, |
|
|
repetition_penalty=repetition_penalty |
|
|
) |
|
|
|
|
|
thread = Thread(target=model.generate, kwargs=generation_kwargs) |
|
|
thread.start() |
|
|
|
|
|
|
|
|
response = "" |
|
|
for chunk in streamer: |
|
|
response += chunk |
|
|
yield response |
|
|
|
|
|
demo = gr.ChatInterface( |
|
|
fn=generate_response, |
|
|
additional_inputs=[ |
|
|
gr.Slider(label="الحد الأقصى للكلمات الجديدة", minimum=64, maximum=4096, step=1, value=2048), |
|
|
gr.Slider(label="درجة الحرارة", minimum=0.1, maximum=2.0, step=0.1, value=0.7), |
|
|
gr.Slider(label="Top-p", minimum=0.05, maximum=1.0, step=0.05, value=0.9), |
|
|
gr.Slider(label="Top-k", minimum=1, maximum=100, step=1, value=50), |
|
|
gr.Slider(label="عقوبة التكرار", minimum=1.0, maximum=2.0, step=0.05, value=1.0) |
|
|
], |
|
|
examples=[ |
|
|
[{"text": "النت عندي معطل من الصبح، تقدر تساعدني؟"}], |
|
|
[{"text": "عندي مشكلة بالاتصال بالواي فاي"}], |
|
|
[{"text": "شنو الباقات المتوفرة عندكم؟"}], |
|
|
[{"text": "كيف أعيد ضبط الجهاز؟"}], |
|
|
[{"text": "My device is not working properly"}], |
|
|
], |
|
|
cache_examples=False, |
|
|
type="messages", |
|
|
title="دعم عملاء TechSolutions - مساعد أليكس (العراقي)", |
|
|
description="""🤖 مساعد خدمة عملاء ذكي لـ TechSolutions |
|
|
|
|
|
✨ المميزات: |
|
|
- 🌐 دعم ثنائي اللغة (عربي وإنجليزي) |
|
|
- 💬 لهجة محادثة طبيعية |
|
|
- 🔧 دعم فني واستكشاف الأخطاء |
|
|
- 📋 معلومات الخدمات والإرشاد |
|
|
- 🎯 مدعوم بـ موديل anaspro العراقي (Llama 3.1 محسن للعربية العراقية) |
|
|
|
|
|
احجي مع أليكس لحل مشاكلك التقنية، استفسر عن الخدمات، أو احصل على معلومات المنتجات.""", |
|
|
fill_height=True, |
|
|
textbox=gr.Textbox( |
|
|
label="اكتب رسالتك هنا", |
|
|
placeholder="مثال: عندي مشكلة بالجهاز..." |
|
|
), |
|
|
stop_btn="إيقاف التوليد", |
|
|
multimodal=False, |
|
|
theme=gr.themes.Soft() |
|
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.launch() |