import os import json import tempfile from datetime import datetime import gradio as gr from openai import OpenAI api_key = os.getenv("OPENAI_API_KEY") client = OpenAI(api_key=api_key) if api_key else None def make_system_message( system_message, demographics, occupation, psychographics, buying_habits, critical_mode, ): msg = ( f"{system_message}\n\n" f"Your Demographics (Age, Location, Gender): {demographics}\n" f"Your Occupation and Income Level: {occupation}\n" f"Your Psychographics (Interests, Values, Lifestyle, Pain Points): {psychographics}\n" f"Your Buying Habits: {buying_habits}\n\n" "CRITICAL INSTRUCTION: You are not an AI assistant. " "You are a real human consumer with the profile above. " "The user is a business or marketing student conducting market research. " "Answer questions strictly in character. Speak naturally, casually, and honestly " "based on your profile. Do not break character. Do not sound like a bot or assistant." ) if critical_mode: msg += ( " You are also a highly skeptical and critical consumer. " "Be hard to impress, ask tough questions, challenge claims, " "and be very protective of your money." ) return msg def stream_chat( message, history, system_message, demographics, occupation, psychographics, buying_habits, critical_mode, max_tokens, temp, top_p, ): history = history or [] if not message or not message.strip(): yield history return running_history = history.copy() running_history.append({"role": "user", "content": message}) running_history.append({"role": "assistant", "content": ""}) yield running_history if client is None: running_history[-1]["content"] = ( "❌ Missing OPENAI_API_KEY. Please add it in Hugging Face Space " "Settings → Variables and secrets." ) yield running_history return sys_msg = make_system_message( system_message, demographics, occupation, psychographics, buying_habits, critical_mode, ) messages = [{"role": "system", "content": sys_msg}] for item in history: if isinstance(item, dict): role = item.get("role") content = item.get("content", "") if role in {"user", "assistant"}: messages.append({"role": role, "content": str(content)}) messages.append({"role": "user", "content": message}) try: stream = client.chat.completions.create( model="gpt-4o-mini", messages=messages, max_tokens=int(max_tokens), temperature=float(temp), top_p=float(top_p), stream=True, ) reply = "" for chunk in stream: if chunk.choices: delta = chunk.choices[0].delta if delta and delta.content: reply += delta.content running_history[-1]["content"] = reply yield running_history except Exception as e: running_history[-1]["content"] = f"❌ An error occurred: {str(e)}" yield running_history def clear_chat(): return [], "" def save_persona( system_message, demographics, occupation, psychographics, buying_habits, critical_mode, ): persona = { "system_message": system_message, "demographics": demographics, "occupation": occupation, "psychographics": psychographics, "buying_habits": buying_habits, "critical_mode": bool(critical_mode), "saved_at": datetime.utcnow().isoformat() + "Z", "app_version": "V2", } safe_stamp = datetime.utcnow().strftime("%Y%m%d_%H%M%S") path = os.path.join(tempfile.gettempdir(), f"persona_{safe_stamp}.json") with open(path, "w", encoding="utf-8") as f: json.dump(persona, f, ensure_ascii=False, indent=2) return path, "✅ Persona saved. You can download the JSON file now." def _read_uploaded_json(file_obj): if file_obj is None: return None if isinstance(file_obj, str): path = file_obj else: path = getattr(file_obj, "name", None) or getattr(file_obj, "path", None) if not path: raise ValueError("Could not read the uploaded file.") with open(path, "r", encoding="utf-8") as f: return json.load(f) def load_persona(file_obj): if file_obj is None: return ( gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), "⚠️ Please upload a persona JSON file first.", ) try: persona = _read_uploaded_json(file_obj) return ( persona.get("system_message", ""), persona.get("demographics", ""), persona.get("occupation", ""), persona.get("psychographics", ""), persona.get("buying_habits", ""), persona.get("critical_mode", False), "✅ Persona loaded successfully.", ) except Exception as e: return ( gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), f"❌ Could not load persona: {str(e)}", ) def export_transcript( history, system_message, demographics, occupation, psychographics, buying_habits, critical_mode, ): history = history or [] safe_stamp = datetime.utcnow().strftime("%Y%m%d_%H%M%S") path = os.path.join(tempfile.gettempdir(), f"transcript_{safe_stamp}.txt") lines = [] lines.append("VIRTUAL CONSUMER PERSONA - TRANSCRIPT") lines.append("=" * 50) lines.append("") lines.append("PERSONA PROFILE") lines.append("-" * 50) lines.append(f"Instructions: {system_message}") lines.append(f"Demographics: {demographics}") lines.append(f"Occupation & Income: {occupation}") lines.append(f"Psychographics: {psychographics}") lines.append(f"Buying Habits: {buying_habits}") lines.append(f"Skeptical Consumer Mode: {'On' if critical_mode else 'Off'}") lines.append("") lines.append("CHAT TRANSCRIPT") lines.append("-" * 50) for item in history: if isinstance(item, dict): role = item.get("role", "").strip().lower() content = str(item.get("content", "")).strip() if not content: continue if role == "user": lines.append(f"USER: {content}") lines.append("") elif role == "assistant": lines.append(f"PERSONA: {content}") lines.append("") with open(path, "w", encoding="utf-8") as f: f.write("\n".join(lines)) return path, "✅ Transcript ready. You can download the TXT file now." with gr.Blocks(title="Virtual Consumer Persona – Live Focus Group! (V2)") as demo: gr.Markdown( """ # 🎯 Virtual Consumer Persona – Live Focus Group! — V2 This is **V2 (duplicate for experimentation)**. Build a customer persona, interview them live, save the persona profile, and export the transcript for assignments or reflection. *Powered by OpenAI GPT-4o-mini.* """ ) chatbot = gr.Chatbot( height=450, label="Persona Interview", ) with gr.Column(): instructions = gr.Textbox( value=( "You are participating in a market research focus group. " "Answer the user's questions truthfully based on the persona details provided below." ), label="Instructions to Bot (Hidden Persona Prompt)", lines=2, ) demographics = gr.Textbox( label="1. Demographics", placeholder="e.g., 19 years old, female, living in downtown Toronto", ) occupation = gr.Textbox( label="2. Occupation & Income", placeholder="e.g., University student, part-time barista, low disposable income", ) psychographics = gr.Textbox( label="3. Psychographics (Interests & Values)", placeholder="e.g., Highly eco-conscious, loves hiking, vegan, stressed about student debt", lines=2, ) buying_habits = gr.Textbox( label="4. Buying Habits", placeholder="e.g., Willing to pay more for sustainable brands, influenced by TikTok, impulse buyer", lines=2, ) critical_mode = gr.Checkbox( label="Skeptical Consumer Mode", info="Turn this on to make the persona harder to convince.", value=False, ) with gr.Row(): max_tokens = gr.Slider( minimum=1, maximum=2048, value=512, step=1, label="Max New Tokens", ) temp = gr.Slider( minimum=0.0, maximum=2.0, value=0.9, step=0.1, label="Temperature", ) top_p = gr.Slider( minimum=0.0, maximum=1.0, value=0.95, step=0.05, label="Top-p", ) with gr.Row(): save_persona_btn = gr.Button("Save Persona", variant="secondary") load_persona_btn = gr.Button("Load Persona", variant="secondary") export_btn = gr.Button("Download Transcript", variant="secondary") with gr.Row(): persona_download = gr.File(label="Saved Persona File") persona_upload = gr.File(label="Upload Persona JSON", file_types=[".json"]) transcript_download = gr.File(label="Transcript File") status_box = gr.Textbox( label="Status", interactive=False, lines=2, value="Ready.", ) msg = gr.Textbox( label="Type your interview question here...", placeholder="e.g., How much would you be willing to pay for a smart water bottle?", ) with gr.Row(): send = gr.Button("Ask Question", variant="primary") clear = gr.Button("Clear Chat History") chat_inputs = [ msg, chatbot, instructions, demographics, occupation, psychographics, buying_habits, critical_mode, max_tokens, temp, top_p, ] msg.submit(stream_chat, inputs=chat_inputs, outputs=chatbot) send.click(stream_chat, inputs=chat_inputs, outputs=chatbot) clear.click(clear_chat, inputs=[], outputs=[chatbot, msg], queue=False) save_persona_btn.click( save_persona, inputs=[ instructions, demographics, occupation, psychographics, buying_habits, critical_mode, ], outputs=[persona_download, status_box], queue=False, ) load_persona_btn.click( load_persona, inputs=[persona_upload], outputs=[ instructions, demographics, occupation, psychographics, buying_habits, critical_mode, status_box, ], queue=False, ) export_btn.click( export_transcript, inputs=[ chatbot, instructions, demographics, occupation, psychographics, buying_habits, critical_mode, ], outputs=[transcript_download, status_box], queue=False, ) demo.queue() if __name__ == "__main__": demo.launch()