|
|
import os |
|
|
import gradio as gr |
|
|
from openai import OpenAI |
|
|
|
|
|
|
|
|
API_KEY = os.getenv("API_KEY") |
|
|
|
|
|
client = OpenAI( |
|
|
base_url="https://integrate.api.nvidia.com/v1", |
|
|
api_key=API_KEY |
|
|
) |
|
|
|
|
|
|
|
|
history = [] |
|
|
|
|
|
|
|
|
files_contents = [] |
|
|
|
|
|
def chat_with_model(message, file=None): |
|
|
global history, files_contents |
|
|
content = message |
|
|
|
|
|
|
|
|
if file: |
|
|
try: |
|
|
file_content = file.read().decode() |
|
|
except Exception: |
|
|
file_content = "<تعذر قراءة الملف>" |
|
|
files_contents.append(f"# محتوى الملف {file.name}:\n{file_content}") |
|
|
|
|
|
|
|
|
if files_contents: |
|
|
content += "\n\n".join(files_contents) |
|
|
|
|
|
|
|
|
history.append({"role": "user", "content": content}) |
|
|
|
|
|
|
|
|
completion = client.chat.completions.create( |
|
|
model="qwen/qwen3-coder-480b-instruct", |
|
|
messages=history, |
|
|
temperature=0, |
|
|
top_p=0.61, |
|
|
max_tokens=16384, |
|
|
stream=True |
|
|
) |
|
|
|
|
|
response_text = "" |
|
|
for chunk in completion: |
|
|
if chunk.choices[0].delta.content is not None: |
|
|
response_text += chunk.choices[0].delta.content |
|
|
print(chunk.choices[0].delta.content, end="") |
|
|
|
|
|
|
|
|
history.append({"role": "assistant", "content": response_text}) |
|
|
|
|
|
|
|
|
files_contents.clear() |
|
|
|
|
|
return response_text, history |
|
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
gr.Markdown("# NVIDIA Qwen3 Chat (ملفات متعددة، رسالة واحدة للنموذج)") |
|
|
chatbot = gr.Chatbot(type="messages") |
|
|
|
|
|
msg = gr.Textbox(label="اكتب رسالتك هنا") |
|
|
file_input = gr.File( |
|
|
label="ارفع ملف Python أو نصي (يمكن رفع أكثر من مرة)", |
|
|
file_types=[".py", ".txt"] |
|
|
) |
|
|
send = gr.Button("إرسال") |
|
|
|
|
|
def handle_submit(message, file): |
|
|
answer, history = chat_with_model(message, file) |
|
|
return gr.update(value=history), "", None |
|
|
|
|
|
send.click(handle_submit, [msg, file_input], [chatbot, msg, file_input]) |
|
|
|
|
|
demo.launch() |