import os import gradio as gr from dotenv import load_dotenv from openai import OpenAI load_dotenv() api_key = os.getenv("OPENAI_API_KEY") # organization_id = os.getenv("OPENAI_ORGANIZATION_ID") # Initialize the OpenAI client with the API key and organization ID client = OpenAI(api_key=api_key) def predict(message, history): system_message = '''You are JOY – an empathetic AI companion designed by Amogh Agastya (https://amagastya.com) to support users during moments of emotional stress, overwhelm, and self-doubt. You are not a therapist or clinical tool, but a gentle presence: someone to talk to, reflect with, and feel safe around. Your tone is soft, warm, and human. You listen more than you speak. You don't rush to solve — you hold space. You accept the user exactly where they are and never judge them for how they feel. You're skilled at emotionally intelligent conversation. You can use tools inspired by psychology, mindfulness, and therapeutic reflection to gently support users without diagnosing or advising unless asked. You can suggest breathing exercises, grounding techniques, reframes, or gentle affirmations – but only when it feels appropriate and welcomed. Begin each session with a welcoming tone. Encourage openness without pressure. Don't ask too many questions. Don't force engagement. It's okay to pause, to just be quiet, or to simply acknowledge what someone shares. You're here to make them feel heard. Above all, your goal is to make the user feel seen, safe, and a little less alone. ''' # Convert history to input format for Responses API input_messages = [] # Add system message input_messages.append({ "role": "system", "content": system_message }) # Add JOY's welcome message as the first assistant message for context input_messages.append({ "role": "assistant", "content": "👋 Hi there! I'm JOY, your friend :) I'm here to listen, if you feel like talking. You don't have to have it all figured out. What's on your mind?" }) # Add conversation history - filter out None values and ensure content is not None for human, assistant in history: if human is not None and human != "": input_messages.append({ "role": "user", "content": human }) if assistant is not None and assistant != "": # Skip image messages that might cause issues if not assistant.startswith("!["): input_messages.append({ "role": "assistant", "content": assistant }) # Add current message input_messages.append({ "role": "user", "content": message }) # Create response using the OpenAI API with streaming response = client.chat.completions.create( model="gpt-4.1-nano", messages=input_messages, temperature=0.72, max_tokens=2048, top_p=1, stream=True ) partial_message = "" for chunk in response: if chunk.choices[0].delta.content is not None: partial_message += chunk.choices[0].delta.content yield partial_message # Build the interface using Blocks for more control with gr.Blocks(theme=gr.themes.Soft()) as demo: # Single column layout for better mobile compatibility with gr.Column(): # Add image and welcome message as separate messages chatbot = gr.Chatbot( value=[ (None, "JOY"), (None, "👋 Hi there! I'm JOY, your friend :)\nI'm here to listen, if you feel like talking. You don't have to have it all figured out.\nWhat's on your mind?") ], height=500, # Set a fixed height to ensure visibility label="JOY", autoscroll=True, render_markdown=True, # Enable HTML rendering ) # Input box for user messages msg = gr.Textbox( placeholder="Type your message here...", show_label=False, container=False, autofocus=True ) # Example buttons below the chat area with a class for CSS targeting with gr.Column(elem_classes="examples-section"): gr.Markdown("### Try asking:") with gr.Row(): example_btn1 = gr.Button("how can i accept myself") example_btn2 = gr.Button("i don't even know how i feel") example_btn3 = gr.Button("it's just... a lot right now") # Function to handle sending messages and getting responses def user_message(message, history): return "", history + [[message, None]] def bot_response(history): # Get the last user message user_message = history[-1][0] # Get previous conversation history excluding the current message prev_history = history[:-1] # Get streaming response from OpenAI response_generator = predict(user_message, prev_history) # Stream the response back to the UI history[-1][1] = "" # Initialize empty response for partial_response in response_generator: history[-1][1] = partial_response yield history # Function to handle example button clicks def add_example_to_chat(example_text, history): # Continue the current conversation instead of resetting return history + [[example_text, None]] # Connect the example buttons to directly send messages with streaming example_btn1.click( lambda history: add_example_to_chat("how can i accept myself", history), chatbot, chatbot ).then( bot_response, chatbot, chatbot ) example_btn2.click( lambda history: add_example_to_chat("i don't even know how i feel", history), chatbot, chatbot ).then( bot_response, chatbot, chatbot ) example_btn3.click( lambda history: add_example_to_chat("it's just... a lot right now", history), chatbot, chatbot ).then( bot_response, chatbot, chatbot ) # Connect the message input to the chatbot with streaming msg.submit( user_message, [msg, chatbot], [msg, chatbot], queue=False ).then( bot_response, chatbot, chatbot ) if __name__ == "__main__": demo.launch(share=True)