TestStream / app.py
Prasanga73's picture
Update app.py
e2d2024 verified
import gradio as gr
from huggingface_hub import InferenceClient
import os
def respond(
message,
history: list[dict[str, str]],
system_message,
max_tokens,
temperature,
top_p,
hf_token_string,
):
token = hf_token_string if hf_token_string else os.getenv("HF_TOKEN")
if not token:
yield "Error: No Token provided."
return
client = InferenceClient(token=token, model="meta-llama/Meta-Llama-3-8B-Instruct")
messages = [{"role": "system", "content": system_message}]
messages.extend(history)
messages.append({"role": "user", "content": message})
try:
# We don't need a 'response' string variable here for the API
for chunk in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
if len(chunk.choices) > 0:
token_str = chunk.choices[0].delta.content
if token_str:
# OPTIMIZATION: Yield ONLY the new token.
# This is what makes the API streaming "instant".
yield token_str
except Exception as e:
yield f"API Error: {str(e)}"
# The ChatInterface will now receive tokens one by one.
# Note: In the Gradio UI, this might make tokens "replace" each other.
# If you want the UI to still look normal while keeping the API fast,
# use the client-side logic below.
chatbot = gr.ChatInterface(
respond,
type="messages",
additional_inputs=[
gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p"),
gr.Textbox(label="Hugging Face Token", type="password"),
],
)
with gr.Blocks() as demo:
with gr.Sidebar():
gr.LoginButton()
chatbot.render()
if __name__ == "__main__":
demo.launch()