1yahoo commited on
Commit
1dc8c03
·
verified ·
1 Parent(s): df67882

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +120 -157
app.py CHANGED
@@ -1,186 +1,149 @@
1
  import gradio as gr
2
  from openai import OpenAI
3
  import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
- # ---------------------------------------------------------
6
- # 1. إعدادات الاتصال والنموذج
7
- # ---------------------------------------------------------
8
- hf_token = os.getenv("HF_TOKEN")
9
-
10
- client = OpenAI(
11
- base_url="https://router.huggingface.co/hf-inference/v1",
12
- api_key=hf_token
13
- )
14
-
15
- MODEL_ID = "huihui-ai/Qwen2.5-72B-Instruct-abliterated"
16
-
17
- # ---------------------------------------------------------
18
- # 2. منطق المعالجة (Back-end Logic)
19
- # ---------------------------------------------------------
20
- def predict(message, history, system_prompt, temperature, max_tokens):
21
- """
22
- دالة التفاعل مع النموذج.
23
- تستقبل الرسالة، التاريخ، وإعدادات التخصيص.
24
- """
25
- messages_payload = []
26
-
27
- # إضافة طبقة التخصيص (System Prompt) إذا وجدت
28
- if system_prompt and system_prompt.strip():
29
- messages_payload.append({"role": "system", "content": system_prompt})
30
-
31
- # تحويل سجل المحادثة من صيغة Gradio إلى صيغة OpenAI
32
- # history يأتي كقائمة: [[user_msg, bot_msg], ...]
33
- for user_msg, bot_msg in history:
34
- if user_msg:
35
- messages_payload.append({"role": "user", "content": user_msg})
36
- if bot_msg:
37
- messages_payload.append({"role": "assistant", "content": bot_msg})
38
 
39
- # إضافة الرسالة الحالية
40
- messages_payload.append({"role": "user", "content": message})
 
 
 
 
 
41
 
42
  try:
43
  response = client.chat.completions.create(
44
- model=MODEL_ID,
45
- messages=messages_payload,
46
- max_tokens=int(max_tokens),
47
- temperature=float(temperature),
48
  stream=True
49
  )
50
-
51
- partial_message = ""
52
  for chunk in response:
53
  if chunk.choices[0].delta.content:
54
- partial_message += chunk.choices[0].delta.content
55
- yield partial_message
56
-
57
  except Exception as e:
58
- yield f"⚠️ System Error: {str(e)}"
59
-
60
- # ---------------------------------------------------------
61
- # 3. واجهة المستخدم الاحترافية (Front-end / UI)
62
- # ---------------------------------------------------------
63
- # استخدام ثيم ناعم واحترافي
64
- theme = gr.themes.Soft(
65
- primary_hue="blue",
66
- secondary_hue="slate",
67
- neutral_hue="slate",
68
- )
69
 
70
- css = """
71
- footer {visibility: hidden}
72
- .container {max-width: 1200px; margin: auto; padding-top: 20px}
73
- """
74
-
75
- with gr.Blocks(theme=theme, css=css, title="Qwen Pro Interface") as demo:
76
 
77
- gr.Markdown(
78
- """
79
- # 🤖 Qwen 2.5-72B Professional Interface
80
- <div style='opacity: 0.7; font-size: 0.9em;'>
81
- Advanced interface for 'huihui-ai/Qwen2.5-72B-Instruct-abliterated' via HF Inference Router.
82
- </div>
83
- """
84
- )
85
-
86
  with gr.Tabs():
87
- # ================= TAB 1: ساحة المحادثة =================
88
- with gr.TabItem("💬 Chat Workspace", id="chat_tab"):
89
  with gr.Row():
90
- # عمود المحادثة
91
- with gr.Column(scale=4):
92
- chatbot = gr.Chatbot(
93
- height=600,
94
- placeholder="Start a new conversation...",
95
- show_copy_button=True,
96
- avatar_images=(None, "🤖"), # يمكن وضع مسار صورة للمستخدم وللبوت
97
- layout="bubble"
98
- )
99
-
100
- with gr.Row():
101
- msg_input = gr.Textbox(
102
- placeholder="Type your message here...",
103
- show_label=False,
104
- scale=8,
105
- container=False,
106
- autofocus=True
107
- )
108
- send_btn = gr.Button("Send ➤", variant="primary", scale=1)
109
- clear_btn = gr.Button("🗑️ Clear", variant="stop", scale=1)
110
 
111
- # ================= TAB 2: إعدادات النموذج =================
112
- with gr.TabItem("⚙️ Model Settings & Customization", id="settings_tab"):
113
- gr.Markdown("### 🧠 Model Behavior (Custom Layer)")
114
-
115
  with gr.Row():
116
  with gr.Column():
117
- system_prompt_input = gr.TextArea(
118
- label="System Prompt (Custom Instruction Layer)",
119
- value="You are a helpful, professional, and intelligent AI assistant.",
120
- placeholder="Define the AI's persona, rules, or role here. (e.g., 'You are a senior python developer...')",
121
- lines=5,
122
- info="This acts as a base layer for the model's behavior throughout the chat."
123
- )
124
 
125
- gr.Markdown("### 🎛Inference Parameters")
126
  with gr.Row():
127
  with gr.Column():
128
- temp_slider = gr.Slider(
129
- minimum=0.0, maximum=1.5, value=0.7, step=0.05,
130
- label="Temperature (Creativity)",
131
- info="Lower values for precise facts, higher for creative writing."
132
- )
133
  with gr.Column():
134
- tokens_slider = gr.Slider(
135
- minimum=256, maximum=8192, value=2048, step=256,
136
- label="Max Tokens (Response Length)",
137
- info="Maximum number of tokens the model can generate."
138
- )
139
 
140
- # ---------------------------------------------------------
141
- # 4. ربط المكونات (Wiring)
142
- # ---------------------------------------------------------
143
 
144
- # دالة مساعدة لتحديث واجهة المستخدم (مسح صندوق النص وإضافة رسالة المستخدم للشات)
145
- def user_turn(user_message, history):
146
- return "", history + [[user_message, None]]
147
-
148
- # دالة مساعدة لتوليد الرد وتحديث الشات
149
- def bot_turn(history, system_p, temp, tokens):
150
- user_message = history[-1][0]
151
- # نقوم باستدعاء المولد (generator)
152
- bot_response_generator = predict(user_message, history[:-1], system_p, temp, tokens)
153
-
154
- history[-1][1] = ""
155
- for chunk in bot_response_generator:
156
- history[-1][1] = chunk
157
- yield history
158
-
159
- # عند الضغط على زر الإرسال أو Enter
160
- send_event = msg_input.submit(
161
- user_turn,
162
- [msg_input, chatbot],
163
- [msg_input, chatbot],
164
- queue=False
165
- ).then(
166
- bot_turn,
167
- [chatbot, system_prompt_input, temp_slider, tokens_slider],
168
- chatbot
169
- )
170
-
171
- click_event = send_btn.click(
172
- user_turn,
173
- [msg_input, chatbot],
174
- [msg_input, chatbot],
175
- queue=False
176
- ).then(
177
- bot_turn,
178
- [chatbot, system_prompt_input, temp_slider, tokens_slider],
179
- chatbot
180
- )
181
-
182
- # زر المسح
183
- clear_btn.click(lambda: None, None, chatbot, queue=False)
184
 
185
  if __name__ == "__main__":
186
  demo.queue().launch()
 
1
  import gradio as gr
2
  from openai import OpenAI
3
  import os
4
+ import chromadb
5
+ from chromadb.utils import embedding_functions
6
+ import pypdf
7
+ import uuid
8
+
9
+ # --- الإعدادات الفنية ---
10
+ STORAGE_PATH = "/data/neural_memory" if os.path.exists("/data") else "./neural_memory"
11
+ chroma_client = chromadb.PersistentClient(path=STORAGE_PATH)
12
+ default_ef = embedding_functions.DefaultEmbeddingFunction()
13
+ collection = chroma_client.get_or_create_collection(name="advanced_brain", embedding_function=default_ef)
14
+
15
+ # --- 1. الابتكار في الحقن (Semantic Ingestion) ---
16
+ def advanced_ingest(file_path):
17
+ """حقن متقدم مع Metadata و Overlap و Normalization."""
18
+ try:
19
+ text = ""
20
+ filename = os.path.basename(file_path)
21
+
22
+ if file_path.endswith('.pdf'):
23
+ reader = pypdf.PdfReader(file_path)
24
+ pages_data = [(p.extract_text(), i+1) for i, p in enumerate(reader.pages)]
25
+ else:
26
+ with open(file_path, 'r', encoding='utf-8') as f:
27
+ pages_data = [(f.read(), 1)]
28
+
29
+ documents, metadatas, ids = [], [], []
30
+
31
+ # إعدادات الـ Chunking الاحترافية
32
+ chunk_size = 1000
33
+ overlap = 200 # تداخل 20% لضمان اتصال المعنى
34
+
35
+ for content, page_num in pages_data:
36
+ content = content.replace('\t', ' ').strip() # Normalization بسيط
37
+
38
+ for i in range(0, len(content), chunk_size - overlap):
39
+ chunk = content[i : i + chunk_size]
40
+ documents.append(chunk)
41
+ metadatas.append({
42
+ "source": filename,
43
+ "page": page_num,
44
+ "length": len(chunk)
45
+ })
46
+ ids.append(str(uuid.uuid4()))
47
+
48
+ collection.add(documents=documents, metadatas=metadatas, ids=ids)
49
+ return f"✅ تم حقن {len(documents)} قطعة معرفية من '{filename}' مع حفظ الميتا-داتا."
50
+ except Exception as e:
51
+ return f"❌ فشل الحقن: {str(e)}"
52
+
53
+ # --- 2. الاسترجاع الذكي (Filtered Query) ---
54
+ def smart_query(user_query, threshold=0.6):
55
+ """استرجاع مع تصفية حسب درجة التشابه (Score Filtering)."""
56
+ # نطلب نتائج أكثر ثم نصفيها
57
+ results = collection.query(
58
+ query_texts=[user_query],
59
+ n_results=10,
60
+ include=['documents', 'metadatas', 'distances']
61
+ )
62
+
63
+ context_parts = []
64
+ for doc, meta, dist in zip(results['documents'][0], results['metadatas'][0], results['distances'][0]):
65
+ # في ChromaDB الـ distance الأقل تعني تشابه أكبر (0 = متطابق)
66
+ # نحولها إلى Score افتراضي (1 - dist)
67
+ score = 1 - dist
68
+ if score >= threshold:
69
+ source_info = f"[المصدر: {meta['source']} | صفحة: {meta['page']}]"
70
+ context_parts.append(f"{source_info}\n{doc}")
71
+
72
+ return "\n\n---\n\n".join(context_parts) if context_parts else "لم يتم العثور على معرفة وثيقة الصلة."
73
 
74
+ # --- 3. المحرك العصبي (The Engine) ---
75
+ def neural_engine(message, history, system_prompt, base_url, api_key, temp, score_threshold):
76
+ client = OpenAI(
77
+ base_url=base_url or "https://router.huggingface.co/hf-inference/v1",
78
+ api_key=api_key or os.getenv("HF_TOKEN")
79
+ )
80
+
81
+ knowledge = smart_query(message, threshold=score_threshold)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
 
83
+ enhanced_system = f"{system_prompt}\n\n[سياق المعرفة الموثق]:\n{knowledge}"
84
+
85
+ messages = [{"role": "system", "content": enhanced_system}]
86
+ for u, a in history:
87
+ messages.append({"role": "user", "content": u})
88
+ messages.append({"role": "assistant", "content": a})
89
+ messages.append({"role": "user", "content": message})
90
 
91
  try:
92
  response = client.chat.completions.create(
93
+ model="huihui-ai/Qwen2.5-72B-Instruct-abliterated",
94
+ messages=messages,
95
+ temperature=temp,
 
96
  stream=True
97
  )
98
+ full_resp = ""
 
99
  for chunk in response:
100
  if chunk.choices[0].delta.content:
101
+ full_resp += chunk.choices[0].delta.content
102
+ yield full_resp
 
103
  except Exception as e:
104
+ yield f"⚠️ Neural Glitch: {str(e)}"
 
 
 
 
 
 
 
 
 
 
105
 
106
+ # --- 4. واجهة المستخدم المتقدمة ---
107
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal"), css=".gradio-container {background: #f9f9f9}") as demo:
108
+ gr.Markdown("# 🧬 Neural OS v4.0 (Semantic Edition)")
 
 
 
109
 
 
 
 
 
 
 
 
 
 
110
  with gr.Tabs():
111
+ with gr.Tab("💬 Interaction Console"):
112
+ chatbot = gr.Chatbot(height=600, show_label=False)
113
  with gr.Row():
114
+ msg_input = gr.Textbox(placeholder="اسأل العقل الاصطناعي...", scale=8)
115
+ submit_btn = gr.Button("نفاذ", variant="primary")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
 
117
+ with gr.Tab("📚 Knowledge Vault"):
 
 
 
118
  with gr.Row():
119
  with gr.Column():
120
+ file_input = gr.File(label="وثائق التدريب (PDF/TXT)")
121
+ upload_btn = gr.Button("بدء المعالجة الدلالية", variant="secondary")
122
+ with gr.Column():
123
+ status_log = gr.TextArea(label="سجل العمليات", interactive=False)
 
 
 
124
 
125
+ with gr.Tab("Control Panel"):
126
  with gr.Row():
127
  with gr.Column():
128
+ sys_p = gr.TextArea(label="System Persona", value="أنت محرك معرفي يستند إلى وثائق رسمية.")
129
+ score_th = gr.Slider(0.0, 1.0, 0.4, label="Relevance Threshold", info="كلما زاد، كان الاسترجاع أدق وأقل كمية.")
 
 
 
130
  with gr.Column():
131
+ endpoint = gr.Textbox(label="API Endpoint")
132
+ token = gr.Textbox(label="Access Token", type="password")
133
+ temp = gr.Slider(0, 1.5, 0.7, label="Temperature")
 
 
134
 
135
+ # التفاعلات
136
+ upload_btn.click(lambda files: "\n".join([advanced_ingest(f.name) for f in files]), [file_input], [status_log])
 
137
 
138
+ def chat_logic(m, h, sp, url, t, tmp, th):
139
+ gen = neural_engine(m, h, sp, url, t, tmp, th)
140
+ h.append([m, ""])
141
+ for res in gen:
142
+ h[-1][1] = res
143
+ yield "", h
144
+
145
+ submit_btn.click(chat_logic, [msg_input, chatbot, sys_p, endpoint, token, temp, score_th], [msg_input, chatbot])
146
+ msg_input.submit(chat_logic, [msg_input, chatbot, sys_p, endpoint, token, temp, score_th], [msg_input, chatbot])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
 
148
  if __name__ == "__main__":
149
  demo.queue().launch()