Spaces:
Sleeping
Sleeping
Update ui.py
#2
by
Nexari-Research - opened
ui.py
CHANGED
|
@@ -1,37 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import asyncio
|
| 3 |
|
| 4 |
def create_ui(generate_fn):
|
| 5 |
"""
|
| 6 |
-
|
| 7 |
-
|
| 8 |
"""
|
| 9 |
|
| 10 |
async def chat_wrapper(message, history):
|
| 11 |
messages = []
|
| 12 |
-
# Convert history
|
| 13 |
for human, ai in history:
|
| 14 |
messages.append({"role": "user", "content": human})
|
| 15 |
messages.append({"role": "assistant", "content": ai})
|
|
|
|
| 16 |
messages.append({"role": "user", "content": message})
|
| 17 |
|
| 18 |
-
#
|
|
|
|
| 19 |
async for chunk in generate_fn(messages):
|
| 20 |
-
|
|
|
|
| 21 |
|
|
|
|
| 22 |
custom_css = """
|
| 23 |
-
body { background-color: #0b0f19; }
|
| 24 |
.gradio-container { font-family: 'Inter', sans-serif; max-width: 900px !important; margin: auto; }
|
| 25 |
-
footer { display: none
|
| 26 |
"""
|
| 27 |
|
| 28 |
demo = gr.ChatInterface(
|
| 29 |
fn=chat_wrapper,
|
| 30 |
-
title="Nexari
|
| 31 |
-
description="Official
|
| 32 |
theme="soft",
|
| 33 |
-
examples=[
|
| 34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
)
|
| 36 |
|
| 37 |
return demo
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
ui.py - Nexari Chat Interface
|
| 3 |
+
Description: Handles the Gradio UI layout and styling.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
import gradio as gr
|
| 7 |
import asyncio
|
| 8 |
|
| 9 |
def create_ui(generate_fn):
|
| 10 |
"""
|
| 11 |
+
generate_fn: An async function that takes 'messages' (list of dicts)
|
| 12 |
+
and yields strings (stream).
|
| 13 |
"""
|
| 14 |
|
| 15 |
async def chat_wrapper(message, history):
|
| 16 |
messages = []
|
| 17 |
+
# Convert Gradio history [[user, bot], ...] to OpenAI format
|
| 18 |
for human, ai in history:
|
| 19 |
messages.append({"role": "user", "content": human})
|
| 20 |
messages.append({"role": "assistant", "content": ai})
|
| 21 |
+
|
| 22 |
messages.append({"role": "user", "content": message})
|
| 23 |
|
| 24 |
+
# Consume the async generator
|
| 25 |
+
partial_response = ""
|
| 26 |
async for chunk in generate_fn(messages):
|
| 27 |
+
partial_response = chunk
|
| 28 |
+
yield partial_response
|
| 29 |
|
| 30 |
+
# Custom CSS
|
| 31 |
custom_css = """
|
| 32 |
+
body { background-color: #0b0f19; color: #e0e0e0; }
|
| 33 |
.gradio-container { font-family: 'Inter', sans-serif; max-width: 900px !important; margin: auto; }
|
| 34 |
+
footer { visibility: hidden; display: none; }
|
| 35 |
"""
|
| 36 |
|
| 37 |
demo = gr.ChatInterface(
|
| 38 |
fn=chat_wrapper,
|
| 39 |
+
title="Nexari AI",
|
| 40 |
+
description="Official Backend for Nexari Research. Powered by Nexari-Qwen-3B.",
|
| 41 |
theme="soft",
|
| 42 |
+
examples=[
|
| 43 |
+
"Who created you?",
|
| 44 |
+
"Explain Quantum Computing in simple terms",
|
| 45 |
+
"Write a Python script to scrape a website"
|
| 46 |
+
],
|
| 47 |
+
css=custom_css,
|
| 48 |
+
concurrency_limit=5
|
| 49 |
)
|
| 50 |
|
| 51 |
return demo
|