safe-playground / app.py
pratyushmaini's picture
Update app.py
33783bd
raw
history blame
7.56 kB
import gradio as gr
from huggingface_hub import InferenceClient
# Define available models (update with your actual model IDs)
model_list = {
"Safe LM": "HuggingFaceH4/zephyr-7b-beta", # Replace with your Safe LM model ID
"Baseline 1": "HuggingFaceH4/zephyr-7b-beta",
"Another Model": "HuggingFaceH4/zephyr-7b-beta"
}
def respond(message, history, system_message, max_tokens, temperature, top_p, selected_model):
try:
# Create an InferenceClient for the selected model
client = InferenceClient(model_list.get(selected_model, "HuggingFaceH4/zephyr-7b-beta"))
# Build conversation messages for the client
messages = [{"role": "system", "content": system_message}]
for user_msg, assistant_msg in history:
if user_msg: # Only add non-empty messages
messages.append({"role": "user", "content": user_msg})
if assistant_msg: # Only add non-empty messages
messages.append({"role": "assistant", "content": assistant_msg})
messages.append({"role": "user", "content": message})
response = ""
# Stream the response from the client
for token_message in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
# Safe extraction of token with error handling
try:
token = token_message.choices[0].delta.content
if token is not None: # Handle potential None values
response += token
yield response
except (AttributeError, IndexError) as e:
# Handle cases where token structure might be different
print(f"Error extracting token: {e}")
continue
except Exception as e:
# Return error message if the model call fails
print(f"Error calling model API: {e}")
yield f"Sorry, there was an error: {str(e)}"
# Custom CSS for branding with consistent styling and hiding footer
css = """
body {
background-color: #f0f5fb !important; /* Light pastel blue background */
}
/* Gold button styling */
.gold-button {
background: white !important;
color: #333 !important;
border: 2px solid #e6c200 !important;
}
.gold-button:hover {
background: #fff9e6 !important;
box-shadow: 0 2px 5px rgba(230, 194, 0, 0.2) !important;
}
/* Message styling - no backgrounds */
.message {
background-color: transparent !important;
}
.message p {
background-color: transparent !important;
}
.message-wrap {
background-color: transparent !important;
}
/* Emoji for messages */
.user .message::before {
content: "👤 ";
font-size: 16px;
margin-right: 5px;
}
.bot .message::before {
content: "🛡️ ";
font-size: 16px;
margin-right: 5px;
}
/* Hide footer */
footer {
display: none !important;
}
"""
with gr.Blocks(theme=gr.themes.Default(
primary_hue="blue",
secondary_hue="gold",
font=["Crimson Pro", "Palatino", "serif"]
), css=css) as demo:
# Custom header with branding - removed the hr element
gr.HTML("""
<div class="app-header">
<h1 class="app-title">
<span class="shield">🛡️</span>
<span class="safe">Safe</span>
<span class="lm">Playground</span>
</h1>
<p class="app-subtitle">Responsible AI for everyone</p>
</div>
""")
with gr.Row():
# Left sidebar: Model selector
with gr.Column(scale=1):
gr.Markdown("## Models")
model_dropdown = gr.Dropdown(
choices=list(model_list.keys()),
label="Select Model",
value="Safe LM",
elem_classes=["model-select"]
)
# Settings
gr.Markdown("### Settings")
system_message = gr.Textbox(
value="You are a friendly and safe assistant.",
label="System Message",
lines=2
)
max_tokens_slider = gr.Slider(
minimum=1, maximum=2048, value=512, step=1,
label="Max New Tokens"
)
temperature_slider = gr.Slider(
minimum=0.1, maximum=4.0, value=0.7, step=0.1,
label="Temperature"
)
top_p_slider = gr.Slider(
minimum=0.1, maximum=1.0, value=0.95, step=0.05,
label="Top-p (nucleus sampling)"
)
# Main area: Chat interface
with gr.Column(scale=3):
chatbot = gr.Chatbot(
label="Conversation",
show_label=True,
# Removed avatar_images which may not be supported in some Gradio versions
elem_id="chatbot",
height=500
)
with gr.Row():
user_input = gr.Textbox(
placeholder="Type your message here...",
label="Your Message",
show_label=False,
scale=9
)
send_button = gr.Button(
"Send",
scale=1,
variant="primary"
)
with gr.Row():
clear_button = gr.Button("Clear Chat", variant="secondary", elem_classes=["gold-button"])
# No footer - removed
gr.HTML('<div style="height: 20px;"></div>')
# Fix 1: Correct event handling for the chatbot interface
def user(user_message, history):
# Return the user's message and add it to history
return "", history + [[user_message, None]]
def bot(history, system_message, max_tokens, temperature, top_p, selected_model):
# Get the last user message from history (with error checking)
if not history or len(history) == 0:
return history
user_message = history[-1][0]
# Call respond function with the message
response_generator = respond(
user_message,
history[:-1], # Pass history without the current message
system_message,
max_tokens,
temperature,
top_p,
selected_model
)
# Update history as responses come in
for response in response_generator:
history[-1][1] = response
yield history
# Wire up the event chain - use queue=True for the bot responses
user_input.submit(
user,
[user_input, chatbot],
[user_input, chatbot],
queue=False
).then(
bot,
[chatbot, system_message, max_tokens_slider, temperature_slider, top_p_slider, model_dropdown],
[chatbot],
queue=True
)
send_button.click(
user,
[user_input, chatbot],
[user_input, chatbot],
queue=False
).then(
bot,
[chatbot, system_message, max_tokens_slider, temperature_slider, top_p_slider, model_dropdown],
[chatbot],
queue=True
)
# Clear the chat history - use a proper function instead of lambda
def clear_history():
return []
clear_button.click(clear_history, None, chatbot, queue=False)
if __name__ == "__main__":
# Simple launch without parameters that might not be supported
demo.launch()