talkAboutIt / app.py
niki2one's picture
Update app.py
272a49c verified
import gradio as gr
from huggingface_hub import InferenceClient
"""
For more information on `huggingface_hub` Inference API support, please check the docs:
https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
# Initialize the Hugging Face Inference Client
# Option 1: Use environment variable for authenticated access
import os
token = os.getenv("HUGGINGFACE_API_TOKE")
# Try different models - some may not require authentication
# client = InferenceClient("HuggingFaceH4/zephyr-7b-beta", token=token)
# client = InferenceClient("microsoft/DialoGPT-medium", token=token)
# client = InferenceClient("google/flan-t5-base", token=token)
# If you have a token, use it:
if token:
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta", token=token)
else:
# Fallback to a model that might not require authentication
print("No token found. Using alternative model...")
client = InferenceClient("microsoft/DialoGPT-medium")
def respond(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
):
"""
Generate response using the Zephyr model
"""
try:
# Build the conversation history
messages = [{"role": "system", "content": system_message}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
# Add the current user message
messages.append({"role": "user", "content": message})
response = ""
# Stream the response
for message_chunk in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
if message_chunk.choices[0].delta.content is not None:
token = message_chunk.choices[0].delta.content
response += token
yield response
except Exception as e:
yield f"Error: {str(e)}"
"""
For information on how to customize the ChatInterface, peruse the gradio docs:
https://www.gradio.app/docs/chatinterface
"""
# Create the Gradio ChatInterface
demo = gr.ChatInterface(
respond,
title="🤖 Zephyr-7B Chatbot",
description="Chat with Zephyr-7B model. Adjust the parameters below to customize the response behavior.",
theme="soft",
additional_inputs=[
gr.Textbox(
value="You are a friendly and helpful AI assistant.",
label="System message",
placeholder="Enter system prompt to guide the assistant's behavior..."
),
gr.Slider(
minimum=1,
maximum=2048,
value=512,
step=1,
label="Max new tokens",
info="Maximum number of tokens to generate"
),
gr.Slider(
minimum=0.1,
maximum=4.0,
value=0.7,
step=0.1,
label="Temperature",
info="Controls randomness (lower = more focused)"
),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)",
info="Controls diversity of responses"
),
]
)
if __name__ == "__main__":
demo.launch(
share=True, # Set to True to create a public link
server_name="0.0.0.0", # Allow external connections
server_port=7860, # Default port
show_error=True # Show detailed error messages
)