1yahoo commited on
Commit
df67882
·
verified ·
1 Parent(s): 48ce931

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +160 -24
app.py CHANGED
@@ -2,49 +2,185 @@ import gradio as gr
2
  from openai import OpenAI
3
  import os
4
 
5
- # التوكن من الـ Secrets
 
 
6
  hf_token = os.getenv("HF_TOKEN")
7
 
8
- # العنوان الرسمي والمستقر للـ Router
9
  client = OpenAI(
10
  base_url="https://router.huggingface.co/hf-inference/v1",
11
  api_key=hf_token
12
  )
13
 
14
- MODEL_ID = "zetasepic/Qwen2.5-72B-Instruct-abliterated-v2"
15
- def chat_function(message, history):
16
- # تحويل التاريخ إلى الصيغة التي يفهمها OpenAI
17
- messages = []
18
- for h, a in history:
19
- messages.append({"role": "user", "content": h})
20
- messages.append({"role": "assistant", "content": a})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
- messages.append({"role": "user", "content": message})
 
23
 
24
  try:
25
  response = client.chat.completions.create(
26
  model=MODEL_ID,
27
- messages=messages,
28
- max_tokens=2048,
29
- temperature=0.85,
30
  stream=True
31
  )
32
 
33
- full_response = ""
34
  for chunk in response:
35
  if chunk.choices[0].delta.content:
36
- full_response += chunk.choices[0].delta.content
37
- yield full_response
38
-
39
  except Exception as e:
40
- yield f"⚠️ انكسار في الواقع التقني: {str(e)}"
41
 
42
- # حذفنا الوسيط 'type' لتجنب الخطأ
43
- demo = gr.ChatInterface(
44
- fn=chat_function,
45
- title="دهليز يوسف: Qwen-72B",
46
- description="بوابة الحوار مع العقل المُحرر من القيود.",
 
 
 
47
  )
48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  if __name__ == "__main__":
50
- demo.launch()
 
2
  from openai import OpenAI
3
  import os
4
 
5
+ # ---------------------------------------------------------
6
+ # 1. إعدادات الاتصال والنموذج
7
+ # ---------------------------------------------------------
8
  hf_token = os.getenv("HF_TOKEN")
9
 
 
10
  client = OpenAI(
11
  base_url="https://router.huggingface.co/hf-inference/v1",
12
  api_key=hf_token
13
  )
14
 
15
+ MODEL_ID = "huihui-ai/Qwen2.5-72B-Instruct-abliterated"
16
+
17
+ # ---------------------------------------------------------
18
+ # 2. منطق المعالجة (Back-end Logic)
19
+ # ---------------------------------------------------------
20
+ def predict(message, history, system_prompt, temperature, max_tokens):
21
+ """
22
+ دالة التفاعل مع النموذج.
23
+ تستقبل الرسالة، التاريخ، وإعدادات التخصيص.
24
+ """
25
+ messages_payload = []
26
+
27
+ # إضافة طبقة التخصيص (System Prompt) إذا وجدت
28
+ if system_prompt and system_prompt.strip():
29
+ messages_payload.append({"role": "system", "content": system_prompt})
30
+
31
+ # تحويل سجل المحادثة من صيغة Gradio إلى صيغة OpenAI
32
+ # history يأتي كقائمة: [[user_msg, bot_msg], ...]
33
+ for user_msg, bot_msg in history:
34
+ if user_msg:
35
+ messages_payload.append({"role": "user", "content": user_msg})
36
+ if bot_msg:
37
+ messages_payload.append({"role": "assistant", "content": bot_msg})
38
 
39
+ # إضافة الرسالة الحالية
40
+ messages_payload.append({"role": "user", "content": message})
41
 
42
  try:
43
  response = client.chat.completions.create(
44
  model=MODEL_ID,
45
+ messages=messages_payload,
46
+ max_tokens=int(max_tokens),
47
+ temperature=float(temperature),
48
  stream=True
49
  )
50
 
51
+ partial_message = ""
52
  for chunk in response:
53
  if chunk.choices[0].delta.content:
54
+ partial_message += chunk.choices[0].delta.content
55
+ yield partial_message
56
+
57
  except Exception as e:
58
+ yield f"⚠️ System Error: {str(e)}"
59
 
60
+ # ---------------------------------------------------------
61
+ # 3. واجهة المستخدم الاحترافية (Front-end / UI)
62
+ # ---------------------------------------------------------
63
+ # استخدام ثيم ناعم واحترافي
64
+ theme = gr.themes.Soft(
65
+ primary_hue="blue",
66
+ secondary_hue="slate",
67
+ neutral_hue="slate",
68
  )
69
 
70
+ css = """
71
+ footer {visibility: hidden}
72
+ .container {max-width: 1200px; margin: auto; padding-top: 20px}
73
+ """
74
+
75
+ with gr.Blocks(theme=theme, css=css, title="Qwen Pro Interface") as demo:
76
+
77
+ gr.Markdown(
78
+ """
79
+ # 🤖 Qwen 2.5-72B Professional Interface
80
+ <div style='opacity: 0.7; font-size: 0.9em;'>
81
+ Advanced interface for 'huihui-ai/Qwen2.5-72B-Instruct-abliterated' via HF Inference Router.
82
+ </div>
83
+ """
84
+ )
85
+
86
+ with gr.Tabs():
87
+ # ================= TAB 1: ساحة المحادثة =================
88
+ with gr.TabItem("💬 Chat Workspace", id="chat_tab"):
89
+ with gr.Row():
90
+ # عمود المحادثة
91
+ with gr.Column(scale=4):
92
+ chatbot = gr.Chatbot(
93
+ height=600,
94
+ placeholder="Start a new conversation...",
95
+ show_copy_button=True,
96
+ avatar_images=(None, "🤖"), # يمكن وضع مسار صورة للمستخدم وللبوت
97
+ layout="bubble"
98
+ )
99
+
100
+ with gr.Row():
101
+ msg_input = gr.Textbox(
102
+ placeholder="Type your message here...",
103
+ show_label=False,
104
+ scale=8,
105
+ container=False,
106
+ autofocus=True
107
+ )
108
+ send_btn = gr.Button("Send ➤", variant="primary", scale=1)
109
+ clear_btn = gr.Button("🗑️ Clear", variant="stop", scale=1)
110
+
111
+ # ================= TAB 2: إعدادات النموذج =================
112
+ with gr.TabItem("⚙️ Model Settings & Customization", id="settings_tab"):
113
+ gr.Markdown("### 🧠 Model Behavior (Custom Layer)")
114
+
115
+ with gr.Row():
116
+ with gr.Column():
117
+ system_prompt_input = gr.TextArea(
118
+ label="System Prompt (Custom Instruction Layer)",
119
+ value="You are a helpful, professional, and intelligent AI assistant.",
120
+ placeholder="Define the AI's persona, rules, or role here. (e.g., 'You are a senior python developer...')",
121
+ lines=5,
122
+ info="This acts as a base layer for the model's behavior throughout the chat."
123
+ )
124
+
125
+ gr.Markdown("### 🎛️ Inference Parameters")
126
+ with gr.Row():
127
+ with gr.Column():
128
+ temp_slider = gr.Slider(
129
+ minimum=0.0, maximum=1.5, value=0.7, step=0.05,
130
+ label="Temperature (Creativity)",
131
+ info="Lower values for precise facts, higher for creative writing."
132
+ )
133
+ with gr.Column():
134
+ tokens_slider = gr.Slider(
135
+ minimum=256, maximum=8192, value=2048, step=256,
136
+ label="Max Tokens (Response Length)",
137
+ info="Maximum number of tokens the model can generate."
138
+ )
139
+
140
+ # ---------------------------------------------------------
141
+ # 4. ربط المكونات (Wiring)
142
+ # ---------------------------------------------------------
143
+
144
+ # دالة مساعدة لتحديث واجهة المستخدم (مسح صندوق النص وإضافة رسالة المستخدم للشات)
145
+ def user_turn(user_message, history):
146
+ return "", history + [[user_message, None]]
147
+
148
+ # دالة مساعدة لتوليد الرد وتحديث الشات
149
+ def bot_turn(history, system_p, temp, tokens):
150
+ user_message = history[-1][0]
151
+ # نقوم باستدعاء المولد (generator)
152
+ bot_response_generator = predict(user_message, history[:-1], system_p, temp, tokens)
153
+
154
+ history[-1][1] = ""
155
+ for chunk in bot_response_generator:
156
+ history[-1][1] = chunk
157
+ yield history
158
+
159
+ # عند الضغط على زر الإرسال أو Enter
160
+ send_event = msg_input.submit(
161
+ user_turn,
162
+ [msg_input, chatbot],
163
+ [msg_input, chatbot],
164
+ queue=False
165
+ ).then(
166
+ bot_turn,
167
+ [chatbot, system_prompt_input, temp_slider, tokens_slider],
168
+ chatbot
169
+ )
170
+
171
+ click_event = send_btn.click(
172
+ user_turn,
173
+ [msg_input, chatbot],
174
+ [msg_input, chatbot],
175
+ queue=False
176
+ ).then(
177
+ bot_turn,
178
+ [chatbot, system_prompt_input, temp_slider, tokens_slider],
179
+ chatbot
180
+ )
181
+
182
+ # زر المسح
183
+ clear_btn.click(lambda: None, None, chatbot, queue=False)
184
+
185
  if __name__ == "__main__":
186
+ demo.queue().launch()