WWW1M / app.py
GauravGoel11's picture
Update app.py
dade0c1 verified
import sys
import traceback
import json
import logging
import gradio as gr
import re
from crewai import Crew, Task, Process
from common.RespondentAgent import *
from langchain_openai import ChatOpenAI
from langchain_groq import ChatGroq
# Add this at the top of your main file (not recommended for production)
import gradio_client.utils
orig_get_type = gradio_client.utils.get_type
def safe_get_type(schema):
if isinstance(schema, bool):
return None
return orig_get_type(schema)
gradio_client.utils.get_type = safe_get_type
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
# Global tracker for the last active agent
last_active_agent = None # Initially, no agent is selected
def parse_question_with_llm(question, respondent_names, openai_llm):
"""
Uses OpenAI's LLM to extract the specific agents being addressed and their respective questions.
Supports compound requests.
"""
logging.info(f"Parsing question with LLM: {question}")
prompt = f"""
You are an expert in market research interview analysis.
Your task is to **identify respondents** mentioned in the question and **extract the exact question** posed to them.
### User Input:
{question}
### Instructions:
1. Identify **each respondent being addressed**.
The respondents available are {respondent_names}. If these names are mistyped, then ensure that you match the names to the ones available.
2. Extract the **exact question** posed to each respondent.
3. Ensure extracted questions **match the original structure**.
4. If no respondent is explicitly addressed, return "General" as the respondent name.
5. If the question is posed to all respondents, return "All" as the respondent name.
6. Ensure that you follow the formatting rules exactly. THIS IS EXTREMELY IMPORTANT.
### Formatting Rules:
Provide the output in the following structured format:
- Respondent: <Respondent Name>
Question: <Extracted Question>
Only return the formatted output without explanations.
"""
# Invoke LangChain LLM
response = openai_llm.invoke(prompt)
chatgpt_output = response.content.strip()
logging.info(f"LLM Parsed Output: {chatgpt_output}")
parsed_questions = {}
respondent_name = "General"
question_text = None
for line in chatgpt_output.split("\n"):
if "- Respondent:" in line:
respondent_name = re.sub(r"^.*Respondent:\s*", "", line).strip().capitalize()
elif "Question:" in line:
question_text = re.sub(r"^.*Question:\s*", "", line).strip()
if respondent_name and question_text:
parsed_questions[respondent_name] = question_text
respondent_name = "General"
question_text = None
return parsed_questions
def ask_interview_question(respondent_agents_dict, question, openai_llm):
"""
Handles both individual and group interview questions while tracking conversation flow.
Uses OpenAI's LLM to extract the intended respondent(s) and their specific question(s).
Uses Groq's LLM for response generation.
"""
global last_active_agent # Track last responding agent
logging.info(f"Received question: {question}")
agent_names = list(respondent_agents_dict.keys())
logging.info(f"Available respondents: {agent_names}")
print(f"Available respondents: {agent_names}")
# Use OpenAI LLM to parse question into individual respondent-specific sub-questions
parsed_questions = parse_question_with_llm(question, str(agent_names), openai_llm)
if not parsed_questions:
logging.warning("No parsed questions returned. Exiting function.")
return "**PreData Moderator**:⚠️ No valid respondents were detected for this question."
elif len(parsed_questions) > 1:
logging.warning("More than one respondent specified. Exiting function.")
return "**PreData Moderator**: Please ask each respondent one question at a time."
else:
print(f"Parsed questions are: {parsed_questions}")
if "General" in parsed_questions:
if last_active_agent:
logging.info(f"General case detected. Continuing with last active agent: {last_active_agent}")
if isinstance(last_active_agent, list):
parsed_questions = {name: question for name in last_active_agent}
else:
parsed_questions = {last_active_agent: question}
else:
logging.info("General case detected without a previous active agent. Assigning question to all respondents.")
parsed_questions = {name: question for name in agent_names}
elif "All" in parsed_questions:
logging.info("All case detected. Assigning question to all respondents.")
parsed_questions = {name: question for name in agent_names}
last_active_agent = list(parsed_questions.keys())
logging.info(f"Final parsed questions: {parsed_questions}")
# Construct one crew and task for each agent and question
responses = []
for agent_name, agent_question in parsed_questions.items():
agent_name_key = next((k for k in respondent_agents_dict if k.lower() == agent_name.lower()), None)
if not agent_name_key:
logging.warning(f"No valid respondent found for {agent_name}. Skipping.")
responses.append(f"**PreData Moderator**: {agent_name} is not a valid respondent.")
continue
respondent_agent = respondent_agents_dict[agent_name_key].get_agent()
user_profile = respondent_agents_dict[agent_name_key].get_user_profile()
communication_style = user_profile.get_field("Communication", "Style")
question_task_description = f"""
You are responding to a market research interview question. Your response must strictly follow the **style and tone** outlined below.
---
### **Style and Tone Reference:**
{communication_style}
---
### **How to Answer:**
- Your response should be **natural, authentic, and fully aligned** with the specified style and tone.
- Ensure the answer is **clear, engaging, and directly relevant** to the question.
- Adapt your **sentence structure, phrasing, and word choices** to match the intended communication style.
- If applicable, incorporate **culturally relevant expressions, regional nuances, or industry-specific terminology** that fit the given tone.
- **Adjust response length** based on the tone—**concise and direct** for casual styles, **structured and detailed** for professional styles.
---
### **Guidelines for Ensuring Authenticity & Alignment:**
- **Consistency**: Maintain the same tone throughout the response.
- **Authenticity**: The response should feel natural and match the speaker’s persona.
- **Avoid Overgeneralisation**: Ensure responses are specific and not overly generic or robotic.
- **Cultural & Linguistic Relevance**: Adapt language and references to match the speaker’s background, industry, or region where appropriate.
- **British Spelling & Grammar**: Always use British spelling conventions.
- **Correct:** organised, prioritise, realise, behaviour, centre
- **Incorrect:** organized, prioritize, realize, behavior, center
- **Formatting**:
- If the tone is informal, allow a conversational flow that mirrors natural speech.
- If the tone is formal, use a structured and professional format.
- **Do not include emojis or hashtags in the response.**
---
### **Example Responses (for Different Styles & Tones)**
#### **Casual & Conversational Tone**
**Question:** "How do you stay updated on the latest fashion and tech trends?"
**Correct Response:**
"I keep up with trends by following influencers on Instagram and watching product reviews on YouTube. Brands like Noise and Boat always drop stylish, affordable options, so I make sure to stay ahead of the curve."
#### **Formal & Professional Tone**
**Question:** "How do you stay updated on the latest fashion and tech trends?"
**Correct Response:**
"I actively follow industry trends by reading reports, attending webinars, and engaging with thought leaders on LinkedIn. I also keep up with global fashion and technology updates through leading publications such as *The Business of Fashion* and *TechCrunch*."
---
Your final answer should be **a well-structured response that directly answers the question while maintaining the specified style and tone**:
**"{question}"**
"""
question_task_expected_output = f"""
A culturally authentic and conversational response to the question: '{question}'.
- The response must reflect the respondent's **local cultural background and geographic influences**, ensuring it aligns with their **speech patterns, preferences, and linguistic style**.
- The language must follow **British spelling conventions**, ensuring it is **natural, personal, and free-flowing**, avoiding American spelling and overly Westernised phrasing.
- The response **must not introduce the respondent**, nor include placeholders like "[Your Name]" or "[Brand Name]".
- The final output should be a **single, well-structured paragraph that directly answers the question** while staying fully aligned with the specified communication style.
"""
question_task = Task(
description=question_task_description,
expected_output=question_task_expected_output,
agent=respondent_agent
)
logging.info(f"Executing task for {agent_name}")
# Create a new crew for each agent-question pair
crew = Crew(
agents=[respondent_agent],
tasks=[question_task],
process=Process.sequential
)
try:
crew_output = crew.kickoff()
task_output = question_task.output
# Collect and format the response
if task_output.raw:
responses.append(f"**{agent_name}**: {task_output.raw}")
else:
responses.append(f"**{agent_name}**: I wasn't able to answer right now - can you try again?")
except Exception as e:
logging.error(f"Error with agent {agent_name}: {str(e)}")
responses.append(f"**{agent_name}**: I'm having trouble processing your question. Technical details: {str(e)}")
return responses
# MAIN
if __name__ == "__main__":
Config.load_environment(".", "urban-male-panel")
Config.print_environment()
# Initialize OpenAI LLM for parsing
openai_llm = ChatOpenAI(
temperature=0,
api_key=Config.openai_api_key,
model=Config.model,
max_tokens=3000,
top_p=0.1,
frequency_penalty=0,
presence_penalty=-0.5
)
# Set up Groq LLM for response generation
fact_based_llm = ChatGroq(
groq_api_key=Config.groq_api_key,
model_name=Config.agent_model,
temperature=0.1, # Low temperature for deterministic output
)
# Load all user profiles from the Excel file
data_dictionary = DataDictionary.generate_dictionary(Config.data_dictionary_file)
print(f"Generated data dictionary: {data_dictionary}")
respondent_agent_user_profiles = UserProfile.read_user_profiles_from_excel(Config.respondent_details_file, data_dictionary)
# Create respondent agents for all profiles
respondent_agents_dict = {
profile.get_field("Demographics", "Name"): RespondentAgent.create(
profile, f"{Config.config_dir}/fastfacts/{profile.ID}_fast_facts.xlsx", fact_based_llm
)
for profile in respondent_agent_user_profiles[:5]
}
def chatbot_interface(message, history=None):
"""
Handles chatbot interaction. Can be used both in Gradio and from MAIN.
"""
if history is None:
history = [] # Ensure history is initialized
responses = ask_interview_question(respondent_agents_dict, message, openai_llm)
logging.info(f"Interview response is {responses}")
# Ensure responses is always a list
if not isinstance(responses, list):
responses = [responses] # Wrap single response in a list
# Format each response properly
formatted_responses = []
for r in responses:
formatted_responses.append({"role": "assistant", "content": str(r)})
# Append user message and formatted responses to history
history.append({"role": "user", "content": message})
history.extend(formatted_responses) # Add each response separately
logging.info(f"Return history: {history}")
return history, ""
custom_css = """
body {
background-color: A9A9A9;
font-family: 'Arial', sans-serif;
color: #ffffff;
}
.gradio-container {
background: A9A9A9;
border-radius: 10px;
padding: 20px;
color: #ffffff;
}
/* Center-justify welcome and bio sections */
.welcome-section, .bio-section {
color: #ffffff;
background-color: transparent;
font-size: 16px;
text-align: center; /* Center the text */
display: block !important; /* Use flexbox for centering within the row */
justify-content: center; /* Center horizontally */
align-items: center; /* Center vertically (if needed) */
flex-direction: column; /* Arrange items vertically */
}
.welcome-section > *, .bio-section > * { /* Center markdown elements within flex container */
text-align: center;
}
#logo {
background-color: #000000;
}
.logo-row {
display: flex;
justify-content: flex-start;
align-items: flex-start;
margin: 0;
padding: 0;
}
.logo-row > * { /* Remove margins from the image itself */
margin: 0;
}
"""
with gr.Blocks(css=custom_css) as demo:
# Top section with the logo and welcome message
with gr.Row(elem_classes="logo-row"): # Logo at the top
gr.Image("predata-logo.png", height=200, show_label=False, elem_id="logo")
with gr.Row(elem_classes="welcome-section"): # Welcome section below the logo
gr.Markdown("""
# Welcome to PreData.AI's Market Research Panel [Demo]
## Introducing our AI-powered panel:
- This panel consists of **five AI-powered agents** who will answer your questions.
- Ask them a question by **typing into the input box below**.
- Feel free to address a question to a **specific panellist**.
- If no panellist is specified, all of the panellists will respond.
""")
# Main row with two columns: bio-section and chatbot
with gr.Row():
# Left column for the bio-section
with gr.Column(scale=1, elem_classes="bio-section"):
gr.HTML("""
<div style="text-align: left;">
<h2>Our Panellists</h2>
<i>Click on our panellist's name so they can introduce themselves</i>
<p><a href="#" onclick="showModal(event, 'Vipul')"><b>Vipul</b></a> – College Student & Part-Time Freelancer from Mumbai (23)</p>
<p><a href="#" onclick="showModal(event, 'Rahul')"><b>Rahul</b></a> – IT Professional & Fitness Enthusiast from Bengaluru (32)</p>
<p><a href="#" onclick="showModal(event, 'Navin')"><b>Navin</b></a> – Senior Consultant & Tech Enthusiast from Delhi (35)</p>
<p><a href="#" onclick="showModal(event, 'Amitav')"><b>Amitav</b></a> – Bank Manager & Family Man from Kolkata (45)</p>
<p><a href="#" onclick="showModal(event, 'Karthik')"><b>Karthik</b></a> – App Developer & Startup Founder from Kochi (28)</p>
</div>
<!-- Modal HTML -->
<div id="bioModal" style="display: none; position: fixed; top: 50%; left: 50%; transform: translate(-50%, -50%);
background-color: white; border: 1px solid #ccc; box-shadow: 0px 4px 6px rgba(0, 0, 0, 0.2); width: 400px; max-height: 300px; padding: 20px; text-align: left; z-index: 1000; overflow-y: auto;">
<p id="bioContent"></p>
<button onclick="closeModal()" style="margin-top: 10px; padding: 5px 10px; background-color: #007bff; color: white; border: none; border-radius: 5px; cursor: pointer;">Close</button>
</div>
<div id="modalBackdrop" style="display: none; position: fixed; top: 0; left: 0; width: 100%; height: 100%; background-color: rgba(0, 0, 0, 0.5); z-index: 999;" onclick="closeModal()"></div>
<script>
function showModal(event, name) {
event.preventDefault(); // Prevent default link behavior
let bios = {
"Vipul": "I am a 23-year-old college student and part-time freelancer from Mumbai, driven by ambition and social influence. My passion lies in fashion, social media, and tech that enhances my image and confidence. I choose smartwatches that blend style with affordability, offering notifications, health tracking, and customization to match my evolving lifestyle.",
"Rahul": "I am a 32-year-old IT professional and fitness enthusiast from Bengaluru, committed to efficiency and self-discipline. I balance long desk hours with structured fitness routines, relying on tech that enhances both productivity and health. I choose smartwatches with accurate tracking, strong battery life, and seamless app integration for a hassle-free experience.",
"Navin": "I am a 35-year-old senior consultant and tech enthusiast from Delhi, drawn to premium ecosystems and seamless connectivity. I integrate high-end devices into my professional and personal life, valuing reliability and status. I choose smartwatches with luxury aesthetics, advanced health tracking, and flawless integration with my existing tech setup.",
"Amitav": "I am a 45-year-old bank manager and family man from Kolkata, prioritizing practicality and durability in technology. My routine is structured, and I seek value-for-money products that enhance time management without unnecessary distractions. I choose smartwatches that offer long battery life, simple usability, and essential health tracking features.",
"Karthik": "I am a 28-year-old app developer and startup founder from Kochi, fascinated by well-designed, functional technology. I balance long coding hours with a keen interest in emerging innovations, preferring devices that enhance productivity without complexity. I choose smartwatches with intuitive performance, strong battery life, and smooth work tool integration."
};
// Ensure the bio exists before displaying it
if (bios[name]) {
document.getElementById('bioContent').innerText = bios[name];
} else {
document.getElementById('bioContent').innerText = "Bio not found.";
}
// Show the modal and backdrop
document.getElementById('bioModal').style.display = 'block';
document.getElementById('modalBackdrop').style.display = 'block';
}
function closeModal() {
document.getElementById('bioModal').style.display = 'none';
document.getElementById('modalBackdrop').style.display = 'none';
}
</script>
""")
# Right column for chatbot and textbox
with gr.Column(scale=3): # Wider column for the chatbot
chatbot = gr.Chatbot(label="Panel Discussion", height=400, type="messages")
msg = gr.Textbox(placeholder="Ask your question to the panel here...")
msg.submit(chatbot_interface, [msg, chatbot], [chatbot, msg])
with gr.Row(elem_classes="footer-row"):
gr.Markdown("""
<div style="text-align: center; margin-top: 20px; font-size: 14px;">
© 2025 PreData.AI - All rights reserved. | Contact us at
<a href="mailto:hello@predata.ai" onclick="event.stopPropagation();"
style="text-decoration: none; color: #007bff;">hello@predata.ai</a>
</div>
""")
demo.launch(debug=True)