nouraoffload / app.py
osamabyc's picture
Update app.py
6e0ad11 verified
# app.py (يدعم stream / non-stream من الواجهة)
import os
from typing import Any, Dict, List, Optional, Tuple, Union
import gradio as gr
from huggingface_hub import InferenceClient
import logging
# إعدادات
MODEL_ID = "HuggingFaceH4/zephyr-7b-beta"
MAX_CHAT_OUTPUT = 10000
HF_TOKEN_ENV_VARS = (
"HF_TOKEN", "HUGGINGFACEHUB_API_TOKEN", "HUGGING_FACE_HUB_TOKEN", "HUGGINGFACE_TOKEN"
)
os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
os.environ["GRADIO_USE_SYMLINKS"] = "False"
# إعداد التسجيل
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# قراءة التوكن
def _get_hf_token_from_env() -> Optional[str]:
for key in HF_TOKEN_ENV_VARS:
val = os.getenv(key)
if val and val.strip():
return val.strip()
return None
def _select_hf_token(user_supplied: Optional[str]) -> Optional[str]:
return user_supplied.strip() if user_supplied else _get_hf_token_from_env()
_CLIENT_CACHE: Dict[str, InferenceClient] = {}
def _get_client(token: Optional[str]) -> InferenceClient:
key = token or "anon"
if key not in _CLIENT_CACHE:
_CLIENT_CACHE[key] = InferenceClient(model=MODEL_ID, token=token)
return _CLIENT_CACHE[key]
# تطبيع سجل الدردشة
ChatHistoryType = Union[
List[Dict[str, Any]],
List[Tuple[Optional[str], Optional[str]]],
List[List[Optional[str]]],
]
def _normalize_history(history: ChatHistoryType) -> List[Dict[str, str]]:
out: List[Dict[str, str]] = []
if not history:
return out
if isinstance(history[0], dict):
for msg in history:
role = msg.get("role", "user")
content = msg.get("content", "")
if isinstance(content, dict):
path = content.get("path") or "file"
content = f"[{path}]"
out.append({"role": role, "content": str(content)})
else:
for pair in history:
if isinstance(pair, (list, tuple)):
if pair[0]:
out.append({"role": "user", "content": str(pair[0])})
if len(pair) > 1 and pair[1]:
out.append({"role": "assistant", "content": str(pair[1])})
return out
def _truncate(txt: str, n: int = MAX_CHAT_OUTPUT) -> str:
return txt if len(txt) <= n else txt[:n//2] + "\n...\n[output truncated]\n...\n" + txt[-n//2:]
# دالة الرد المعدلة
def respond(
message: Union[str, Dict[str, Any]],
history: ChatHistoryType,
system_message: str,
max_tokens: int,
temperature: float,
top_p: float,
hf_token: str,
enable_stream: bool,
):
try:
if isinstance(message, dict):
message_text = message.get("text", "")
else:
message_text = message
msgs = [{"role": "system", "content": system_message}]
msgs.extend(_normalize_history(history))
msgs.append({"role": "user", "content": message_text})
token = _select_hf_token(hf_token)
client = _get_client(token)
accumulated = ""
stream = client.chat_completion(
messages=msgs,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
stream=True,
)
for chunk in stream:
delta = chunk.choices[0].delta.content
if delta:
accumulated += delta
if len(accumulated) > MAX_CHAT_OUTPUT:
accumulated = _truncate(accumulated)
break
if enable_stream:
yield delta # إرسال الجزء الجديد فقط
if not enable_stream:
yield _truncate(accumulated)
except Exception as e:
logger.error(f"Error in respond: {str(e)}")
yield f"[حدث خطأ: {str(e)}]"
# واجهة Gradio المعدلة
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("## 💬 واجهة دردشة متطورة")
with gr.Row():
with gr.Column(scale=3):
chatbot = gr.Chatbot(height=500)
msg = gr.Textbox(label="رسالتك")
with gr.Column(scale=1):
system_msg = gr.Textbox(value="You are a helpful assistant.", label="رسالة النظام")
max_tokens = gr.Slider(1, 2048, value=512, step=1, label="الحد الأقصى للرموز")
temperature = gr.Slider(0.1, 4.0, value=0.7, step=0.1, label="الدرجة الحرارية")
top_p = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p")
hf_token = gr.Textbox(label="رمز Hugging Face (اختياري)", type="password")
enable_stream = gr.Checkbox(value=False, label="الرد المباشر")
submit_btn = gr.Button("إرسال")
clear_btn = gr.Button("مسح")
def user(user_message, history):
return "", history + [[user_message, None]]
def bot(history, system_message, max_tokens, temperature, top_p, hf_token, enable_stream):
response = respond(
history[-1][0],
history[:-1],
system_message,
max_tokens,
temperature,
top_p,
hf_token,
enable_stream,
)
history[-1][1] = ""
for chunk in response:
history[-1][1] += chunk
yield history
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
bot, [chatbot, system_msg, max_tokens, temperature, top_p, hf_token, enable_stream], chatbot
)
submit_btn.click(user, [msg, chatbot], [msg, chatbot], queue=False).then(
bot, [chatbot, system_msg, max_tokens, temperature, top_p, hf_token, enable_stream], chatbot
)
clear_btn.click(lambda: None, None, chatbot, queue=False)
# إعدادات التشغيل المعدلة
if __name__ == "__main__":
demo.launch(
server_name="0.0.0.0",
server_port=7860,
debug=False, # تعطيل وضع التصحيح لتجنب المشاكل
share=False,
show_error=True,
favicon_path=None,
ssl_verify=True,
ssl_keyfile=None,
ssl_certfile=None,
ssl_keyfile_password=None,
quiet=True,
show_api=False,
max_threads=40,
auth=None,
auth_message=None,
prevent_thread_lock=False,
allowed_paths=None,
blocked_paths=None,
root_path="",
http_headers={"Transfer-Encoding": "chunked"}, # حل مشكلة Content-Length
)