Update app.py
Browse files
app.py
CHANGED
|
@@ -2,19 +2,23 @@ import os
|
|
| 2 |
import gradio as gr
|
| 3 |
from openai import OpenAI
|
| 4 |
|
|
|
|
| 5 |
API_KEY = os.getenv("API_KEY")
|
| 6 |
|
|
|
|
| 7 |
client = OpenAI(
|
| 8 |
base_url="https://integrate.api.nvidia.com/v1",
|
| 9 |
api_key=API_KEY
|
| 10 |
)
|
| 11 |
|
|
|
|
| 12 |
history = []
|
| 13 |
|
| 14 |
def chat_with_model(message, files=None):
|
| 15 |
global history
|
| 16 |
content = message
|
| 17 |
|
|
|
|
| 18 |
if files:
|
| 19 |
for file in files:
|
| 20 |
try:
|
|
@@ -23,8 +27,10 @@ def chat_with_model(message, files=None):
|
|
| 23 |
file_content = "<تعذر قراءة الملف>"
|
| 24 |
content += f"\n\n# محتوى الملف {file.name}:\n{file_content}"
|
| 25 |
|
|
|
|
| 26 |
history.append({"role": "user", "content": content})
|
| 27 |
|
|
|
|
| 28 |
completion = client.chat.completions.create(
|
| 29 |
model="qwen/qwen3-coder-480b-instruct",
|
| 30 |
messages=history,
|
|
@@ -40,10 +46,12 @@ def chat_with_model(message, files=None):
|
|
| 40 |
response_text += chunk.choices[0].delta.content
|
| 41 |
print(chunk.choices[0].delta.content, end="")
|
| 42 |
|
|
|
|
| 43 |
history.append({"role": "assistant", "content": response_text})
|
| 44 |
|
| 45 |
return response_text, history
|
| 46 |
|
|
|
|
| 47 |
with gr.Blocks() as demo:
|
| 48 |
gr.Markdown("# NVIDIA Qwen3 Chat (متعدد الملفات)")
|
| 49 |
chatbot = gr.Chatbot(type="messages")
|
|
@@ -52,8 +60,7 @@ with gr.Blocks() as demo:
|
|
| 52 |
files_input = gr.File(
|
| 53 |
label="ارفع ملفات Python أو نصية",
|
| 54 |
file_types=[".py", ".txt"],
|
| 55 |
-
|
| 56 |
-
file_multiple=True # لقبول ملفات متعددة
|
| 57 |
)
|
| 58 |
send = gr.Button("إرسال")
|
| 59 |
|
|
|
|
| 2 |
import gradio as gr
|
| 3 |
from openai import OpenAI
|
| 4 |
|
| 5 |
+
# 🔒 المفتاح السري من البيئة
|
| 6 |
API_KEY = os.getenv("API_KEY")
|
| 7 |
|
| 8 |
+
# إنشاء العميل
|
| 9 |
client = OpenAI(
|
| 10 |
base_url="https://integrate.api.nvidia.com/v1",
|
| 11 |
api_key=API_KEY
|
| 12 |
)
|
| 13 |
|
| 14 |
+
# سجل المحادثة
|
| 15 |
history = []
|
| 16 |
|
| 17 |
def chat_with_model(message, files=None):
|
| 18 |
global history
|
| 19 |
content = message
|
| 20 |
|
| 21 |
+
# قراءة محتوى الملفات المرفوعة
|
| 22 |
if files:
|
| 23 |
for file in files:
|
| 24 |
try:
|
|
|
|
| 27 |
file_content = "<تعذر قراءة الملف>"
|
| 28 |
content += f"\n\n# محتوى الملف {file.name}:\n{file_content}"
|
| 29 |
|
| 30 |
+
# إضافة رسالة المستخدم للسجل
|
| 31 |
history.append({"role": "user", "content": content})
|
| 32 |
|
| 33 |
+
# طلب النموذج مع حفظ السياق
|
| 34 |
completion = client.chat.completions.create(
|
| 35 |
model="qwen/qwen3-coder-480b-instruct",
|
| 36 |
messages=history,
|
|
|
|
| 46 |
response_text += chunk.choices[0].delta.content
|
| 47 |
print(chunk.choices[0].delta.content, end="")
|
| 48 |
|
| 49 |
+
# إضافة الرد لسجل المحادثة
|
| 50 |
history.append({"role": "assistant", "content": response_text})
|
| 51 |
|
| 52 |
return response_text, history
|
| 53 |
|
| 54 |
+
# واجهة Gradio
|
| 55 |
with gr.Blocks() as demo:
|
| 56 |
gr.Markdown("# NVIDIA Qwen3 Chat (متعدد الملفات)")
|
| 57 |
chatbot = gr.Chatbot(type="messages")
|
|
|
|
| 60 |
files_input = gr.File(
|
| 61 |
label="ارفع ملفات Python أو نصية",
|
| 62 |
file_types=[".py", ".txt"],
|
| 63 |
+
file_types_multiple=True # مدعوم بعد التحديث
|
|
|
|
| 64 |
)
|
| 65 |
send = gr.Button("إرسال")
|
| 66 |
|