kawkabelaloom commited on
Commit
7e0c4ed
·
verified ·
1 Parent(s): d104410

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -59
app.py CHANGED
@@ -1,83 +1,83 @@
1
  import gradio as gr
2
- from transformers import pipeline
3
  import torch
 
 
4
 
5
- # ===============================
6
- # 🤖 Load Model LOCALLY
7
- # ===============================
8
- MODEL_NAME = "kawkabelaloom/astramind"
9
 
10
- generator = pipeline(
11
- "text-generation",
12
- model=MODEL_NAME,
13
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
14
- device=0 if torch.cuda.is_available() else -1
 
 
 
 
 
 
 
 
 
 
15
  )
16
 
17
- # ===============================
18
- # 🧠 Chat Logic
19
- # ===============================
20
- history = []
21
 
22
- SYSTEM_PROMPT = (
23
- "أنت مساعد عربي ذكي اسمك أسترا. "
24
- "أجب بلغة عربية واضحة ومفيدة.\n\n"
 
25
  )
26
 
27
- def build_prompt(user_message):
28
- prompt = SYSTEM_PROMPT
29
- for h in history[-6:]:
30
- if h["role"] == "user":
31
- prompt += f"المستخدم: {h['content']}\n"
32
- else:
33
- prompt += f"المساعد: {h['content']}\n"
34
  prompt += f"المستخدم: {user_message}\nالمساعد:"
35
  return prompt
36
 
37
- def chat(user_message, chat_history, max_tokens, temperature):
38
- if not user_message.strip():
39
- return chat_history, ""
40
 
41
- prompt = build_prompt(user_message)
42
-
43
- output = generator(
44
- prompt,
45
- max_new_tokens=max_tokens,
46
- temperature=temperature,
47
- do_sample=True,
48
- return_full_text=False
49
- )[0]["generated_text"]
50
 
51
- history.append({"role": "user", "content": user_message})
52
- history.append({"role": "assistant", "content": output})
53
 
54
- chat_history.append((user_message, output))
55
- return chat_history, ""
 
 
 
 
 
56
 
57
- def clear_chat():
58
- history.clear()
59
- return []
60
 
61
- # ===============================
62
- # 🖥️ Gradio UI
63
- # ===============================
64
- with gr.Blocks(title="Astramind") as demo:
65
- gr.Markdown("# 🤖 Astramind Chatbot")
66
- gr.Markdown("تشغيل محلي داخل HuggingFace Space")
67
 
68
- chatbot = gr.Chatbot(height=400)
 
 
 
69
 
70
- with gr.Row():
71
- msg = gr.Textbox(placeholder="اكتب رسالتك...", lines=2)
72
- send = gr.Button("إرسال")
73
 
74
- with gr.Accordion("⚙️ الإعدادات"):
75
- tokens = gr.Slider(50, 500, 200, step=10, label="طول الرد")
76
- temp = gr.Slider(0.1, 1.0, 0.7, step=0.1, label="الإبداع")
 
77
 
78
- send.click(chat, [msg, chatbot, tokens, temp], [chatbot, msg])
79
- msg.submit(chat, [msg, chatbot, tokens, temp], [chatbot, msg])
 
80
 
81
- gr.Button("مسح المحادثة").click(clear_chat, outputs=chatbot)
 
82
 
83
  demo.launch()
 
1
  import gradio as gr
 
2
  import torch
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
4
+ import traceback
5
 
6
+ MODEL_NAME = "Qwen/Qwen2.5-7B-Instruct"
 
 
 
7
 
8
+ SYSTEM_PROMPT = "أنت مساعد عربي ذكي، تجيب بوضوح وبأسلوب بسيط ومفيد."
9
+
10
+ # ---------- تحميل الموديل ----------
11
+ print("🔄 Loading tokenizer...")
12
+ tokenizer = AutoTokenizer.from_pretrained(
13
+ MODEL_NAME,
14
+ trust_remote_code=True
15
+ )
16
+
17
+ print("🔄 Loading model (CPU)...")
18
+ model = AutoModelForCausalLM.from_pretrained(
19
+ MODEL_NAME,
20
+ torch_dtype=torch.float32,
21
+ device_map="cpu",
22
+ trust_remote_code=True
23
  )
24
 
25
+ print("✅ Model loaded")
 
 
 
26
 
27
+ generator = pipeline(
28
+ "text-generation",
29
+ model=model,
30
+ tokenizer=tokenizer
31
  )
32
 
33
+ # ---------- تنسيق البرومبت ----------
34
+ def build_prompt(history, user_message):
35
+ prompt = SYSTEM_PROMPT + "\n\n"
36
+ for user, bot in history:
37
+ prompt += f"المستخدم: {user}\n"
38
+ prompt += f"المساعد: {bot}\n"
 
39
  prompt += f"المستخدم: {user_message}\nالمساعد:"
40
  return prompt
41
 
 
 
 
42
 
43
+ # ---------- دالة الرد ----------
44
+ def chat(user_message, history):
45
+ if not user_message.strip():
46
+ return history, "⚠️ اكتب رسالة"
 
 
 
 
 
47
 
48
+ try:
49
+ prompt = build_prompt(history, user_message)
50
 
51
+ output = generator(
52
+ prompt,
53
+ max_new_tokens=256,
54
+ temperature=0.7,
55
+ top_p=0.9,
56
+ do_sample=True
57
+ )
58
 
59
+ full_text = output[0]["generated_text"]
60
+ response = full_text.split("المساعد:")[-1].strip()
 
61
 
62
+ history.append((user_message, response))
63
+ return history, ""
 
 
 
 
64
 
65
+ except Exception as e:
66
+ error_msg = f"❌ Error:\n{str(e)}\n\n{traceback.format_exc()}"
67
+ history.append((user_message, error_msg))
68
+ return history, ""
69
 
 
 
 
70
 
71
+ # ---------- واجهة Gradio ----------
72
+ with gr.Blocks(title="Qwen2.5 Arabic Chatbot") as demo:
73
+ gr.Markdown("# 🤖 Qwen2.5 Arabic Chatbot")
74
+ gr.Markdown("نموذج Qwen2.5 يعمل بدون messages وبدون API")
75
 
76
+ chatbot = gr.Chatbot(height=450)
77
+ msg = gr.Textbox(label="رسالتك", placeholder="اكتب سؤالك هنا...")
78
+ clear = gr.Button("🗑️ مسح المحادثة")
79
 
80
+ msg.submit(chat, [msg, chatbot], [chatbot, msg])
81
+ clear.click(lambda: [], None, chatbot)
82
 
83
  demo.launch()