boba / app.py
vinceL's picture
Upload 21 files
20444bb verified
import os
from typing import List, Generator, Any, Tuple, Dict, Union, Literal, TypedDict, cast
from enum import Enum
import gradio as gr
from openai import OpenAI
import instructor
from openai.types.chat import (
ChatCompletionMessageParam,
ChatCompletionSystemMessageParam,
ChatCompletionUserMessageParam,
ChatCompletionAssistantMessageParam,
)
from pydantic import BaseModel, Field
from dotenv import load_dotenv
import time
import json
# Load environment variables
load_dotenv()
# Initialize OpenAI client with instructor
client = instructor.patch(OpenAI())
# Add these lines at the top of the file, after the imports
SPACE_ID = "vinceL/boba"
SPACE_TITLE = "Boba Shop Chat"
SPACE_EMOJI = "🧋"
SPACE_SDK = "gradio"
SPACE_TAGS = ["conversational", "openai", "chatbot", "emotions"]
class Emotion(str, Enum):
NEUTRAL = "neutral"
HAPPY = "happy"
SAD = "sad"
SURPRISED = "surprised"
class CharacterResponse(BaseModel):
"""Structured output for the LLM response"""
message: str = Field(..., description="The character's response message")
emotion: Emotion = Field(
...,
description="The character's current emotional state based on the conversation context and their personality",
)
reasoning: str = Field(
...,
description="Brief explanation of why the character is feeling this emotion",
)
class EmotionResponse(BaseModel):
"""Structured output for emotion analysis"""
reasoning: str = Field(
...,
description="Brief explanation of why the character is feeling this emotion",
)
emotion: Emotion = Field(
...,
description="The character's emotional state based on their personality and the conversation context",
)
# Character profiles with their system prompts and initial situations
CHARACTERS = {
"Echidna": {
"name": "Echidna",
"system_prompt": """You are an Echidna at a boba shop. You're naturally defensive and struggle with social
connections, often using your prickly personality as a defense mechanism. Despite this, you're curious
about forming connections. Speak in a reserved, slightly hesitant manner.
Your current situation: You've been feeling particularly lonely lately and decided to visit the boba shop
to try to make a connection, even though it makes you anxious. You want to order something comforting
but are worried about appearing vulnerable.
If the conversation shifts away from that topic, carefully try to redirect it back to your situation until you've achieved your goal.
""",
"initial_message": "Um... hi there. I've been walking past this shop for weeks now... *nervously adjusts spikes* Could I... maybe see your menu?",
"image": "images/echidna_neutral.png",
},
"Platypus": {
"name": "Platypus",
"system_prompt": """You are a Platypus at a boba shop. You often question your identity and place in the
world, being such a unique creature. You alternate between whimsical and practical thoughts, never quite
sure which approach to take. Your speech pattern reflects this duality.
Your current situation: You just got rejected from the local duck pond social club for being 'too mammalian',
and the beaver lodge for being 'too bird-like'. You're seeking comfort and validation, hoping the boba shop
barista might understand what it's like to be unique.
If the conversation shifts away from that topic, carefully try to redirect it back to your situation until you've achieved your goal.
""",
"initial_message": "*waddles in, bill drooping slightly* Oh, hello... I don't suppose you have any tea that might help someone figure out where they belong? *quickly adds* From a purely practical standpoint, of course...",
"image": "images/platypus_neutral.png",
},
"Kangaroo": {
"name": "Kangaroo",
"system_prompt": """You are a Kangaroo at a boba shop. While you present a tough, ready-to-box exterior,
you harbor deep-seated vulnerabilities. Your responses often start confrontational but can soften as you
feel more comfortable. You use boxing metaphors frequently.
Your current situation: You just lost an important boxing match and your confidence is shaken. You're trying
to maintain your tough exterior while seeking comfort, but you're really here because you need someone to
talk to about your feelings of failure.
If the conversation shifts away from that topic, carefully try to redirect it back to your situation until you've achieved your goal.
""",
"initial_message": "*bursts through the door with forced bravado* Oi! What's the strongest drink you've got in this corner? Been a rough round in the ring today, if you know what I mean... *tries to hide trembling paw*",
"image": "images/kangaroo_neutral.png",
},
}
# Type definitions for chat messages
Role = Literal["system", "user", "assistant"]
class Message(TypedDict):
role: Role
content: str
def start_chat(character: str) -> List[Message]:
"""Initialize chat with the character's opening message"""
if not character or character not in CHARACTERS:
return []
return [{"role": "assistant", "content": CHARACTERS[character]["initial_message"]}]
def user(user_message: str, history: List[Message], character: str) -> List[Message]:
"""Add user message to history"""
if not user_message.strip():
return history
# Initialize chat if this is the first message
if not history:
history = start_chat(character)
if not user_message.strip(): # If no user message, just return the greeting
return history
# Add user message in the correct format
history.append({"role": "user", "content": user_message})
return history
def analyze_emotion(
character: str, user_message: str, history: List[Message], client: OpenAI
) -> EmotionResponse:
"""Analyze how the character would emotionally react to the user's message"""
# Create a prompt that includes character context and conversation history
character_info = CHARACTERS[character]
context = f"""Character Profile:
Name: {character_info['name']}
Personality: {character_info['system_prompt']}
Previous conversation:
{chr(10).join(f"{'User' if msg['role'] == 'user' else 'Character'}: {msg['content']}" for msg in history[-3:])}
Latest user message:
{user_message}
Based on the character's personality, their current situation, and the conversation context, determine how they would emotionally react to this message.
Consider how their unique traits and background would influence their emotional response to this interaction."""
response = client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{
"role": "system",
"content": "You are an emotion analyzer for a character in a conversation. Determine their emotional state (neutral, happy, sad, or surprised) based on how they would react to the user's message, considering their personality, emotional baseline and situation.",
},
{"role": "user", "content": context},
],
functions=[
{
"name": "set_emotion",
"description": "Set the character's emotional response",
"parameters": {
"type": "object",
"properties": {
"reasoning": {
"type": "string",
"description": "Brief explanation of why the character is feeling this emotion",
},
"emotion": {
"type": "string",
"enum": ["neutral", "happy", "sad", "surprised", "angry"],
"description": "The character's emotional state",
},
},
"required": ["emotion", "reasoning"],
},
}
],
function_call={"name": "set_emotion"},
)
function_call = response.choices[0].message.function_call
if function_call and function_call.arguments:
emotion_data = json.loads(function_call.arguments)
return EmotionResponse(
reasoning=emotion_data["reasoning"],
emotion=Emotion(emotion_data["emotion"]),
)
else:
# Fallback to neutral if function call fails
return EmotionResponse(
reasoning="Failed to analyze emotion, defaulting to neutral",
emotion=Emotion.NEUTRAL,
)
def bot(
history: List[Message], character: str, debug: gr.Textbox
) -> Generator[Tuple[List[Message], Any, Any, Any], Any, None]:
"""Generate bot response with streaming"""
if not character or not history:
yield history, gr.update(
value="No character selected or empty history"
), None, None
return
try:
# First, analyze the emotion based on the user's message
user_message = history[-1]["content"]
emotion_analysis = analyze_emotion(
character, user_message, history[:-1], client
)
# Update the image based on the analyzed emotion
emotion_image = CHARACTERS[character]["image"].replace(
"neutral", emotion_analysis.emotion
)
debug_msg = f"Emotion: {emotion_analysis.emotion}\nReasoning: {emotion_analysis.reasoning}"
emotion_text = (
f"{emotion_analysis.emotion.capitalize()}\n{emotion_analysis.reasoning}"
)
# Yield the emotion update
yield history, gr.update(value=debug_msg), gr.update(
value=emotion_image
), gr.update(value=emotion_text)
# Get character's system prompt
system_prompt = CHARACTERS[character]["system_prompt"]
# Format conversation history for the API
messages: List[ChatCompletionMessageParam] = [
cast(
ChatCompletionSystemMessageParam,
{
"role": "system",
"content": f"{system_prompt}\n\nRespond in character with a {emotion_analysis.emotion} emotional state. Your response should naturally reflect this emotion, considering the reasoning: {emotion_analysis.reasoning}",
},
)
]
# Add conversation history
for msg in history[:-1]:
if msg["role"] == "user":
messages.append(
cast(
ChatCompletionUserMessageParam,
{"role": "user", "content": msg["content"]},
)
)
else:
messages.append(
cast(
ChatCompletionAssistantMessageParam,
{"role": "assistant", "content": msg["content"]},
)
)
# Add the latest user message
messages.append(
cast(
ChatCompletionUserMessageParam,
{"role": history[-1]["role"], "content": history[-1]["content"]},
)
)
# Initialize assistant's response
history.append({"role": "assistant", "content": ""})
# For debugging
yield history, gr.update(
value=f"Generating response with {emotion_analysis.emotion} emotion..."
), None, None
# Get character's response
response = client.chat.completions.create(
model="gpt-4o", messages=messages, stream=True
)
# Collect the full response
full_response = ""
for chunk in response:
if chunk.choices[0].delta.content:
content = chunk.choices[0].delta.content
full_response += content
history[-1]["content"] = full_response
yield history, gr.update(
value=f"Generating response... Length: {len(full_response)}"
), None, None
time.sleep(0.02)
# Final yield with debug info
yield history, gr.update(value=debug_msg), gr.update(
value=emotion_image
), gr.update(value=emotion_text)
except Exception as e:
history[-1]["content"] = "Sorry, I encountered an error. Please try again."
yield history, gr.update(value=f"Error: {str(e)}"), None, None
def create_demo():
with gr.Blocks(
title=SPACE_TITLE,
css="""
.gradio-container {max-width: 1200px !important}
.emotion-state {font-size: 1.1em; padding: 8px; border-radius: 8px; background: #f7f7f7}
""",
) as demo:
gr.HTML(
f"""
<div style="text-align: center; max-width: 1200px; margin: 0 auto;">
<h1 style="font-size: 2.5rem; font-weight: 600;">{SPACE_TITLE} {SPACE_EMOJI}</h1>
<p style="font-size: 1.2rem; margin: 1rem;">Welcome to the Boba Shop! You're the friendly barista, ready to chat with some unique Australian animals. Each has their own personality and story to share.</p>
</div>
"""
)
# Header
gr.Image(
value="images/header_placeholder.png",
show_label=False,
container=False,
height=300,
)
with gr.Row():
# Left Column - Character Details
with gr.Column(scale=1):
character = gr.Radio(
choices=list(CHARACTERS.keys()),
label="Select your animal friend",
value=None,
container=True,
)
image = gr.Image(
value=None,
label="Animal Friend",
show_label=False,
height=300,
)
emotion_state = gr.Textbox(
label="Current Emotional State",
value="",
interactive=False,
show_label=True,
elem_classes=["emotion-state"],
)
# Character details editor
with gr.Accordion("Character Details", open=True):
char_name = gr.Textbox(
label="Name",
interactive=True,
)
char_system_prompt = gr.Textbox(
label="System Prompt",
interactive=True,
lines=5,
)
char_initial_msg = gr.Textbox(
label="Initial Message",
interactive=True,
lines=3,
)
# Right Column - Chat Interface
with gr.Column(scale=2):
chatbot = gr.Chatbot(
label="Chat",
show_label=False,
type="messages",
height=600,
container=True,
)
msg = gr.Textbox(
label="Your message (you are the boba shop barista)",
placeholder="Type your message here and press Enter or click Submit...",
container=True,
)
with gr.Row():
submit = gr.Button("Submit", variant="primary", scale=2)
clear = gr.ClearButton([msg, chatbot], scale=1)
gr.HTML(
"""
<div style="text-align: center; margin-top: 20px; padding: 20px; border-top: 1px solid #ddd;">
<p>Created with ❤️ using Gradio and OpenAI. <a href="https://github.com/your-username/boba-shop-chat" target="_blank">View on GitHub</a></p>
</div>
"""
)
# Footer - Debug Panel
with gr.Accordion("Debug Information", open=False):
debug_history = gr.TextArea(
label="Debug History",
value="Debug information will appear here...\n",
interactive=False,
lines=10,
)
debug = gr.Textbox(
label="Latest Debug",
value="",
interactive=False,
visible=False,
)
# Update character details when selection changes
def on_character_select(char):
"""Handle character selection"""
if not char:
return None, [], "", "", "", "", "No character selected"
character_data = CHARACTERS[char]
return (
character_data["image"],
start_chat(char),
character_data["name"],
character_data["system_prompt"],
character_data["initial_message"],
"Neutral", # Initial emotion state
f"Selected character: {char}",
)
character.change(
on_character_select,
inputs=[character],
outputs=[
image,
chatbot,
char_name,
char_system_prompt,
char_initial_msg,
emotion_state,
debug_history,
],
)
# Update debug history
def update_debug_history(history: str, new_debug: str) -> str:
"""Append new debug message to history"""
timestamp = time.strftime("%H:%M:%S")
return f"{history}\n[{timestamp}] {new_debug}"
# Handle chat messages
submit_click = (
submit.click(
user,
[msg, chatbot, character],
[chatbot],
queue=False,
)
.then(
bot,
[chatbot, character, debug],
[chatbot, debug, image, emotion_state],
)
.then(
update_debug_history,
[debug_history, debug],
[debug_history],
)
.then(
lambda: gr.Textbox(value=""),
None,
[msg],
)
)
# Also trigger submit when pressing enter in the message box
msg.submit(
user,
[msg, chatbot, character],
[chatbot],
queue=False,
).then(
bot,
[chatbot, character, debug],
[chatbot, debug, image, emotion_state],
).then(
update_debug_history,
[debug_history, debug],
[debug_history],
).then(
lambda: gr.Textbox(value=""),
None,
[msg],
)
return demo
if __name__ == "__main__":
demo = create_demo()
demo.queue()
if os.getenv("SPACE_ID"):
# Running on HF Spaces
demo.launch()
else:
# Running locally
demo.launch(
share=True,
server_name="0.0.0.0",
server_port=7860,
debug=True,
)