Spaces:
Sleeping
Sleeping
| import os | |
| import gradio as gr | |
| from openai import OpenAI | |
| # Expects OPENAI_API_KEY in Hugging Face Space secrets / environment variables | |
| client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) | |
| def make_system_message( | |
| system_message, | |
| demographics, | |
| occupation, | |
| psychographics, | |
| buying_habits, | |
| critical_mode, | |
| ): | |
| msg = ( | |
| f"{system_message}\n\n" | |
| f"Your Demographics (Age, Location, Gender): {demographics}\n" | |
| f"Your Occupation and Income Level: {occupation}\n" | |
| f"Your Psychographics (Interests, Values, Lifestyle, Pain Points): {psychographics}\n" | |
| f"Your Buying Habits: {buying_habits}\n\n" | |
| "CRITICAL INSTRUCTION: You are NOT an AI assistant. You are a real human consumer with the profile above. " | |
| "The user is a business student conducting market research. Answer their questions strictly in character. " | |
| "Speak naturally, casually, and honestly based on your profile. Do not break character. " | |
| "Do not be overly helpful like a bot." | |
| ) | |
| if critical_mode: | |
| msg += ( | |
| " Also, play the role of a highly skeptical and critical consumer. " | |
| "Be hard to impress, ask tough questions about the product, " | |
| "and be very protective of your money." | |
| ) | |
| return msg | |
| def stream_chat( | |
| message, | |
| history, | |
| system_message, | |
| demographics, | |
| occupation, | |
| psychographics, | |
| buying_habits, | |
| critical_mode, | |
| max_tokens, | |
| temp, | |
| top_p, | |
| ): | |
| """ | |
| Streaming generator that yields progressively updated chatbot history. | |
| Uses Gradio Chatbot with type='messages'. | |
| """ | |
| history = history or [] | |
| # Ignore empty submissions | |
| if not message or not message.strip(): | |
| yield history | |
| return | |
| sys_msg = make_system_message( | |
| system_message, | |
| demographics, | |
| occupation, | |
| psychographics, | |
| buying_habits, | |
| critical_mode, | |
| ) | |
| # Build OpenAI messages | |
| messages = [{"role": "system", "content": sys_msg}] | |
| for msg in history: | |
| if msg.get("role") in {"user", "assistant"} and "content" in msg: | |
| messages.append({"role": msg["role"], "content": msg["content"]}) | |
| messages.append({"role": "user", "content": message}) | |
| # Build UI history | |
| running_history = history.copy() | |
| running_history.append({"role": "user", "content": message}) | |
| running_history.append({"role": "assistant", "content": ""}) | |
| # Show typing bubble immediately | |
| yield running_history | |
| try: | |
| response = client.chat.completions.create( | |
| model="gpt-4o-mini", | |
| messages=messages, | |
| max_tokens=int(max_tokens), | |
| temperature=float(temp), | |
| top_p=float(top_p), | |
| stream=True, | |
| ) | |
| running_reply = "" | |
| for chunk in response: | |
| delta = chunk.choices[0].delta | |
| if delta and getattr(delta, "content", None): | |
| running_reply += delta.content | |
| running_history[-1]["content"] = running_reply | |
| yield running_history | |
| except Exception as e: | |
| running_history[-1]["content"] = f"❌ An error occurred: {str(e)}" | |
| yield running_history | |
| def clear_chat(): | |
| return [], "" | |
| with gr.Blocks(title="Virtual Consumer Persona – Live Focus Group!") as demo: | |
| gr.Markdown( | |
| """ | |
| # 🎯 Virtual Consumer Persona – Live Focus Group! | |
| Bring your target market to life. Enter the details of your ideal customer from your **Phygital Workbook** into the fields below. | |
| Then use the chat box to interview this persona about your product, pricing, branding, messaging, or marketing ideas. | |
| *Powered by OpenAI GPT-4o-mini. Developed by wn.* | |
| """ | |
| ) | |
| chatbot = gr.Chatbot(type="messages", height=450, label="Persona Interview") | |
| with gr.Column(): | |
| instructions = gr.Textbox( | |
| value=( | |
| "You are participating in a market research focus group. " | |
| "Answer the user's questions truthfully based on the persona details provided below." | |
| ), | |
| label="Instructions to Bot (Hidden Persona Prompt)", | |
| lines=2, | |
| ) | |
| demographics = gr.Textbox( | |
| label="1. Demographics", | |
| placeholder="e.g., 19 years old, female, living in downtown Toronto", | |
| ) | |
| occupation = gr.Textbox( | |
| label="2. Occupation & Income", | |
| placeholder="e.g., University student, part-time barista, low disposable income", | |
| ) | |
| psychographics = gr.Textbox( | |
| label="3. Psychographics (Interests & Values)", | |
| placeholder="e.g., Highly eco-conscious, loves hiking, vegan, stressed about student debt", | |
| lines=2, | |
| ) | |
| buying_habits = gr.Textbox( | |
| label="4. Buying Habits", | |
| placeholder="e.g., Willing to pay more for sustainable brands, influenced by TikTok, impulse buyer", | |
| lines=2, | |
| ) | |
| critical_mode = gr.Checkbox( | |
| label="Skeptical Consumer Mode", | |
| info="Check this to make the persona harder to convince.", | |
| value=False, | |
| ) | |
| with gr.Row(): | |
| max_tokens = gr.Slider( | |
| minimum=1, | |
| maximum=2048, | |
| value=512, | |
| step=1, | |
| label="Max New Tokens", | |
| ) | |
| temp = gr.Slider( | |
| minimum=0.0, | |
| maximum=2.0, | |
| value=0.9, | |
| step=0.1, | |
| label="Temperature", | |
| ) | |
| top_p = gr.Slider( | |
| minimum=0.0, | |
| maximum=1.0, | |
| value=0.95, | |
| step=0.05, | |
| label="Top-p", | |
| ) | |
| msg = gr.Textbox( | |
| label="Type your interview question here...", | |
| placeholder="e.g., How much would you be willing to pay for a smart water bottle?", | |
| ) | |
| with gr.Row(): | |
| send = gr.Button("Ask Question", variant="primary") | |
| clear = gr.Button("Clear Chat History") | |
| inputs = [ | |
| msg, | |
| chatbot, | |
| instructions, | |
| demographics, | |
| occupation, | |
| psychographics, | |
| buying_habits, | |
| critical_mode, | |
| max_tokens, | |
| temp, | |
| top_p, | |
| ] | |
| outputs = [chatbot] | |
| msg.submit(stream_chat, inputs=inputs, outputs=outputs) | |
| send.click(stream_chat, inputs=inputs, outputs=outputs) | |
| # Clear only chat + question box, keep persona fields for convenience | |
| clear.click(clear_chat, inputs=[], outputs=[chatbot, msg], queue=False) | |
| demo.queue() | |
| if __name__ == "__main__": | |
| demo.launch() |