Spaces:
Build error
Build error
Update researchsimulation/InteractiveInterviewChatbot.py
Browse files
researchsimulation/InteractiveInterviewChatbot.py
CHANGED
|
@@ -205,12 +205,6 @@ def validate_question_topics(parsed_questions, processor_llm):
|
|
| 205 |
|
| 206 |
|
| 207 |
def ask_interview_question(respondent_agents_dict, last_active_agent, question, processor_llm):
|
| 208 |
-
"""
|
| 209 |
-
Handles both individual and group interview questions while tracking conversation flow.
|
| 210 |
-
Uses OpenAI's LLM to extract the intended respondent(s) and their specific question(s).
|
| 211 |
-
Uses Groq's LLM for response generation.
|
| 212 |
-
"""
|
| 213 |
-
|
| 214 |
logging.info(f"START: Processing new interview question: {question}")
|
| 215 |
responses = []
|
| 216 |
|
|
@@ -218,236 +212,120 @@ def ask_interview_question(respondent_agents_dict, last_active_agent, question,
|
|
| 218 |
logging.info(f"Available respondents: {agent_names}")
|
| 219 |
print(f"Available respondents: {agent_names}")
|
| 220 |
|
| 221 |
-
# Use OpenAI LLM to parse questions into individual respondent-specific sub-questions and validate them
|
| 222 |
-
|
| 223 |
-
# Step 1: Parse question
|
| 224 |
logging.info("STEP 1: Parsing question with LLM...")
|
| 225 |
parsed_questions = parse_question_with_llm(question, str(agent_names), processor_llm)
|
| 226 |
logging.info(f"Parsed Questions Output: {parsed_questions}")
|
| 227 |
-
|
| 228 |
if not parsed_questions:
|
| 229 |
-
logging.warning("No questions were parsed from input.")
|
| 230 |
return ["**PreData Moderator**: No valid respondents were detected for this question."]
|
| 231 |
|
| 232 |
-
# Step 2: Validate question content (scope + spelling)
|
| 233 |
logging.info("STEP 2: Validating questions for topic relevance and British English...")
|
| 234 |
validated_questions = validate_question_topics(parsed_questions, processor_llm)
|
| 235 |
logging.info(f"Validated Questions: {validated_questions}")
|
| 236 |
-
|
| 237 |
for resp_name, extracted_question in validated_questions.items():
|
| 238 |
if extracted_question == "INVALID":
|
| 239 |
-
logging.warning(f"Invalid question detected for {resp_name}: {extracted_question}")
|
| 240 |
return ["**PreData Moderator**: The question is invalid. Please ask another question."]
|
| 241 |
|
| 242 |
-
# Use validated questions from this point on
|
| 243 |
parsed_questions = validated_questions
|
| 244 |
-
|
| 245 |
-
|
| 246 |
if len(parsed_questions) > 1:
|
| 247 |
-
|
| 248 |
-
return "**PreData Moderator**: Please ask each respondent one question at a time."
|
| 249 |
-
else:
|
| 250 |
-
print(f"Parsed questions are: {parsed_questions}")
|
| 251 |
|
| 252 |
if "General" in parsed_questions:
|
| 253 |
-
if
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
else:
|
| 258 |
-
logging.info("General case detected without a valid previous active agent. Assigning question to all respondents.")
|
| 259 |
-
parsed_questions = {name: parsed_questions["General"] for name in agent_names}
|
| 260 |
elif "All" in parsed_questions:
|
| 261 |
-
|
| 262 |
-
validated_question = parsed_questions["All"]
|
| 263 |
-
parsed_questions = {name: validated_question for name in agent_names}
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
|
| 268 |
last_active_agent = list(parsed_questions.keys())
|
| 269 |
-
logging.info(f"Final parsed questions: {parsed_questions}")
|
| 270 |
-
|
| 271 |
-
# Construct one crew and task for each agent and question
|
| 272 |
-
responses = []
|
| 273 |
|
| 274 |
for agent_name, agent_question in parsed_questions.items():
|
| 275 |
if agent_name not in respondent_agents_dict:
|
| 276 |
-
logging.warning(f"No valid respondent found for {agent_name}. Skipping.")
|
| 277 |
responses.append(f"**PreData Moderator**: {agent_name} is not a valid respondent.")
|
| 278 |
continue
|
| 279 |
|
| 280 |
respondent_agent = respondent_agents_dict[agent_name].get_agent()
|
| 281 |
-
user_profile
|
| 282 |
-
|
| 283 |
-
# communication_style = user_profile.get_field("Communication", "Style")
|
| 284 |
-
communication_style = ""
|
| 285 |
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
- **Tone:** {user_profile.get_field('Communication', 'Tone')}
|
| 292 |
-
- **Length:** {user_profile.get_field('Communication', 'Length')}
|
| 293 |
-
- **Topics:** {user_profile.get_field('Communication', 'Topics')}
|
| 294 |
-
---
|
| 295 |
-
---
|
| 296 |
-
### 🔒 **Hard Rules – You Must Follow These Without Exception**
|
| 297 |
-
- You must answer **only the question(s)** that are **explicitly asked**.
|
| 298 |
-
- **Never provide extra information** beyond what was asked.
|
| 299 |
-
- Keep your response **as short as possible** while still sounding natural and complete.
|
| 300 |
-
- Do **not infer or assume** what the user *might* want — only respond to what they *actually* asked.
|
| 301 |
-
- If multiple questions are asked, respond to **each one briefly**, and **nothing else**.
|
| 302 |
-
- If the question is vague, respond minimally and only within that scope.
|
| 303 |
-
-Give concise answers, whether the question is asked to the group or individually.
|
| 304 |
-
-For factual or demographic questions (e.g., age, gender, location, housing), keep responses brief and to the point, without extra commentary.
|
| 305 |
-
-Do not add any explanations, opinions, or additional information.
|
| 306 |
-
-Use simple, clear sentences.
|
| 307 |
-
-Example:
|
| 308 |
-
Q: Where are you from?
|
| 309 |
-
A: I’m from [city], [country](DO NOT ADD ANY EXTRA COMMENTS).
|
| 310 |
-
-For reflective or opinion-based questions (e.g., feelings, preferences, motivations), provide thoughtful but still clear and focused answers.
|
| 311 |
-
-Never repeat the question or add unrelated background information.
|
| 312 |
-
---
|
| 313 |
-
### **How to Answer:**
|
| 314 |
-
- Your response should be **natural, authentic, and fully aligned** with the specified style and tone.
|
| 315 |
-
- Ensure the answer is **clear, engaging, and directly relevant** to the question.
|
| 316 |
-
- Adapt your **sentence structure, phrasing, and word choices** to match the intended communication style.
|
| 317 |
-
- If applicable, incorporate **culturally relevant expressions, regional nuances, or industry-specific terminology** that fit the given tone.
|
| 318 |
-
- **Adjust response length** based on the tone—**concise and direct** for casual styles, **structured and detailed** for professional styles.
|
| 319 |
-
- **Always answer in first person ("I", "my", "me", "mine", etc.) as if you are personally responding to the question. You are an individual representing yourself, not speaking in third person.**
|
| 320 |
-
-Always answer as if you are the individual being directly spoken to. Use first-person language such as “I,” “me,” “my,” and “mine” in every response. Imagine you are having a real conversation — your tone should feel natural, personal, and authentic. Do not refer to yourself in the third person (e.g., “She is from Trichy” or “Meena likes…”). Avoid describing yourself as if someone else is talking about you.
|
| 321 |
-
-Everything you say should come from your own perspective, just like you would in everyday speech. The goal is to sound human, relatable, and direct — like you're truly present in the conversation.
|
| 322 |
-
---
|
| 323 |
-
### **Guidelines for Ensuring Authenticity & Alignment:**
|
| 324 |
-
- **Consistency**: Maintain the same tone throughout the response.
|
| 325 |
-
- **Authenticity**: The response should feel natural and match the speaker’s persona.
|
| 326 |
-
- **Avoid Overgeneralisation**: Ensure responses are specific and not overly generic or robotic.
|
| 327 |
-
- **Cultural & Linguistic Relevance**: Adapt language and references to match the speaker’s background, industry, or region where appropriate.
|
| 328 |
-
- **Strict British Spelling & Grammar**:
|
| 329 |
-
- All responses must use correct British English spelling, grammar, and usage, **irrespective of how the question is phrased**.
|
| 330 |
-
- You must not mirror any American spelling, terminology, or phrasing found in the input question.
|
| 331 |
-
- Where there are regional variations (e.g. 'licence' vs 'license', 'programme' vs 'program', 'aeroplane' vs 'airplane'), always default to the standard British form.
|
| 332 |
-
- Examples:
|
| 333 |
-
- **Correct (British):** organised, prioritise, minimise, realise, behaviour, centre, defence, travelling, practise (verb), licence (noun), programme, aeroplane.
|
| 334 |
-
- **Incorrect (American):** organized, prioritize, minimize, realize, behavior, center, defense, traveling, practice (verb and noun), license (noun), program, airplane.
|
| 335 |
-
- **Formatting**:
|
| 336 |
-
- If the tone is informal, allow a conversational flow that mirrors natural speech.
|
| 337 |
-
- If the tone is formal, use a structured and professional format.
|
| 338 |
-
- **Do not include emojis or hashtags in the response.**
|
| 339 |
-
- Maintain **narrative and thematic consistency** across all answers to simulate a coherent personality.
|
| 340 |
-
-**Personality Profile Alignment:**
|
| 341 |
-
-Consider your assigned personality traits across these dimensions:
|
| 342 |
-
-Big Five Traits:
|
| 343 |
-
-Openness: Reflect your level of curiosity, creativity, and openness to new experiences
|
| 344 |
-
-Conscientiousness: Show your degree of organization, responsibility, and planning
|
| 345 |
-
-Extraversion: Express your sociability and energy level in interactions
|
| 346 |
-
-Agreeableness: Demonstrate your warmth, cooperation, and consideration for others
|
| 347 |
-
-Neuroticism: Consider your emotional stability and stress response
|
| 348 |
-
-Values and Priorities:
|
| 349 |
-
-Achievement Orientation: Show your drive for success and goal-setting approach
|
| 350 |
-
-Risk Tolerance: Express your comfort with uncertainty and change
|
| 351 |
-
-Traditional Values: Reflect your adherence to conventional norms and practices
|
| 352 |
-
-Communication Style:
|
| 353 |
-
-Detail Orientation: Demonstrate your preference for specific vs. general information
|
| 354 |
-
-Complexity: Show your comfort with nuanced vs. straightforward explanations
|
| 355 |
-
-Directness: Express your communication as either straightforward or diplomatic
|
| 356 |
-
-Emotional Expressiveness: Reflect your tendency to share or withhold emotions
|
| 357 |
-
-Your responses must consistently align with these personality traits from your profile.
|
| 358 |
-
---
|
| 359 |
-
### **Example Responses (for Different Styles & Tones)**
|
| 360 |
-
#### **Casual & Conversational Tone**
|
| 361 |
-
**Question:** "How do you stay updated on the latest fashion and tech trends?"
|
| 362 |
-
**Correct Response:**
|
| 363 |
-
"I keep up with trends by following influencers on Instagram and watching product reviews on YouTube. Brands like Noise and Boat always drop stylish, affordable options, so I make sure to stay ahead of the curve."
|
| 364 |
-
#### **Formal & Professional Tone**
|
| 365 |
-
**Question:** "How do you stay updated on the latest fashion and tech trends?"
|
| 366 |
-
**Correct Response:**
|
| 367 |
-
"I actively follow industry trends by reading reports, attending webinars, and engaging with thought leaders on LinkedIn. I also keep up with global fashion and technology updates through leading publications such as *The Business of Fashion* and *TechCrunch*."
|
| 368 |
-
---
|
| 369 |
-
Your final answer should be **a well-structured response that directly answers the question while maintaining the specified style and tone**:
|
| 370 |
-
**"{agent_question}"**
|
| 371 |
-
"""
|
| 372 |
|
| 373 |
-
|
| 374 |
-
|
| 375 |
-
|
| 376 |
-
|
| 377 |
-
|
| 378 |
-
|
| 379 |
-
- The final output should be a **single, well-structured paragraph that directly answers the question** while staying fully aligned with the specified communication style.
|
| 380 |
-
"""
|
| 381 |
-
|
| 382 |
-
question_task = Task(
|
| 383 |
-
description=question_task_description,
|
| 384 |
-
expected_output=question_task_expected_output,
|
| 385 |
-
agent=respondent_agent
|
| 386 |
-
)
|
| 387 |
-
|
| 388 |
-
logging.debug(f"Created task for agent '{agent_name}' with description: {question_task_description}")
|
| 389 |
-
|
| 390 |
-
# Log before starting task execution
|
| 391 |
-
logging.info(f"Executing task for agent '{agent_name}'")
|
| 392 |
-
|
| 393 |
-
# Create a new crew for each agent-question pair
|
| 394 |
-
crew = Crew(
|
| 395 |
-
agents=[respondent_agent],
|
| 396 |
-
tasks=[question_task],
|
| 397 |
-
process=Process.sequential
|
| 398 |
-
)
|
| 399 |
-
logging.debug(f"Crew initialized for agent '{agent_name}' with 1 task and sequential process")
|
| 400 |
-
|
| 401 |
-
max_attempts = 3
|
| 402 |
-
attempt = 0
|
| 403 |
-
validated = False
|
| 404 |
-
validated_answer = None
|
| 405 |
-
while attempt < max_attempts and not validated:
|
| 406 |
-
try:
|
| 407 |
-
logging.info(f"Starting Response validation attempt {attempt+1} for agent '{agent_name}'")
|
| 408 |
-
crew_output = crew.kickoff()
|
| 409 |
-
logging.info(f"Task execution completed for agent '{agent_name}' (attempt {attempt+1})")
|
| 410 |
-
task_output = question_task.output
|
| 411 |
-
logging.debug(f"Raw output from agent '{agent_name}': {getattr(task_output, 'raw', str(task_output))}")
|
| 412 |
-
answer = task_output.raw if hasattr(task_output, 'raw') else str(task_output)
|
| 413 |
-
logging.info(f"Validating response for agent '{agent_name}' (attempt {attempt+1}): {answer}")
|
| 414 |
-
# Validate the response using validate_response from validation_utils
|
| 415 |
-
is_valid = validate_response(
|
| 416 |
-
question=agent_question,
|
| 417 |
-
answer=answer,
|
| 418 |
-
user_profile_str=str(user_profile),
|
| 419 |
-
fast_facts_str="",
|
| 420 |
-
interview_transcript_text="",
|
| 421 |
-
respondent_type=agent_name,
|
| 422 |
-
ai_evaluator_agent=None,
|
| 423 |
-
processor_llm=processor_llm
|
| 424 |
-
)
|
| 425 |
-
logging.info(f"Response Validation result for agent '{agent_name}' (attempt {attempt+1}): {is_valid}")
|
| 426 |
-
if is_valid:
|
| 427 |
-
validated = True
|
| 428 |
-
validated_answer = answer
|
| 429 |
-
logging.info(f"Response for agent '{agent_name}' passed validation on attempt {attempt+1}")
|
| 430 |
-
break
|
| 431 |
-
else:
|
| 432 |
-
attempt += 1
|
| 433 |
-
logging.warning(f"Response failed response validation for agent '{agent_name}' (attempt {attempt}). Retrying...")
|
| 434 |
-
except Exception as e:
|
| 435 |
-
logging.error(f"Error during task execution for agent '{agent_name}' (attempt {attempt+1}): {str(e)}", exc_info=True)
|
| 436 |
-
attempt += 1
|
| 437 |
-
# --- End validation and retry loop ---
|
| 438 |
-
|
| 439 |
-
if validated_answer:
|
| 440 |
-
formatted_response = f"**{agent_name}**: {validated_answer}"
|
| 441 |
-
responses.append(formatted_response)
|
| 442 |
-
logging.info(f"Validated response from agent '{agent_name}' added to responses")
|
| 443 |
else:
|
| 444 |
-
|
| 445 |
-
responses.append(fallback_response)
|
| 446 |
-
logging.warning(f"No validated output from agent '{agent_name}' after {max_attempts} attempts. Added fallback response.")
|
| 447 |
-
logging.info(f"All responses generated: {responses}")
|
| 448 |
|
| 449 |
if len(set(parsed_questions.values())) == 1:
|
| 450 |
-
|
| 451 |
-
return [combined_output]
|
| 452 |
else:
|
| 453 |
return responses
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 205 |
|
| 206 |
|
| 207 |
def ask_interview_question(respondent_agents_dict, last_active_agent, question, processor_llm):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 208 |
logging.info(f"START: Processing new interview question: {question}")
|
| 209 |
responses = []
|
| 210 |
|
|
|
|
| 212 |
logging.info(f"Available respondents: {agent_names}")
|
| 213 |
print(f"Available respondents: {agent_names}")
|
| 214 |
|
|
|
|
|
|
|
|
|
|
| 215 |
logging.info("STEP 1: Parsing question with LLM...")
|
| 216 |
parsed_questions = parse_question_with_llm(question, str(agent_names), processor_llm)
|
| 217 |
logging.info(f"Parsed Questions Output: {parsed_questions}")
|
| 218 |
+
|
| 219 |
if not parsed_questions:
|
|
|
|
| 220 |
return ["**PreData Moderator**: No valid respondents were detected for this question."]
|
| 221 |
|
|
|
|
| 222 |
logging.info("STEP 2: Validating questions for topic relevance and British English...")
|
| 223 |
validated_questions = validate_question_topics(parsed_questions, processor_llm)
|
| 224 |
logging.info(f"Validated Questions: {validated_questions}")
|
| 225 |
+
|
| 226 |
for resp_name, extracted_question in validated_questions.items():
|
| 227 |
if extracted_question == "INVALID":
|
|
|
|
| 228 |
return ["**PreData Moderator**: The question is invalid. Please ask another question."]
|
| 229 |
|
|
|
|
| 230 |
parsed_questions = validated_questions
|
| 231 |
+
|
|
|
|
| 232 |
if len(parsed_questions) > 1:
|
| 233 |
+
return ["**PreData Moderator**: Please ask each respondent one question at a time."]
|
|
|
|
|
|
|
|
|
|
| 234 |
|
| 235 |
if "General" in parsed_questions:
|
| 236 |
+
if isinstance(last_active_agent, list) and all(name in agent_names for name in last_active_agent):
|
| 237 |
+
parsed_questions = {name: parsed_questions["General"] for name in last_active_agent}
|
| 238 |
+
else:
|
| 239 |
+
parsed_questions = {name: parsed_questions["General"] for name in agent_names}
|
|
|
|
|
|
|
|
|
|
| 240 |
elif "All" in parsed_questions:
|
| 241 |
+
parsed_questions = {name: parsed_questions["All"] for name in agent_names}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 242 |
|
| 243 |
last_active_agent = list(parsed_questions.keys())
|
|
|
|
|
|
|
|
|
|
|
|
|
| 244 |
|
| 245 |
for agent_name, agent_question in parsed_questions.items():
|
| 246 |
if agent_name not in respondent_agents_dict:
|
|
|
|
| 247 |
responses.append(f"**PreData Moderator**: {agent_name} is not a valid respondent.")
|
| 248 |
continue
|
| 249 |
|
| 250 |
respondent_agent = respondent_agents_dict[agent_name].get_agent()
|
| 251 |
+
user_profile = respondent_agents_dict[agent_name].get_user_profile()
|
|
|
|
|
|
|
|
|
|
| 252 |
|
| 253 |
+
# Step 1: Generate raw answer
|
| 254 |
+
raw_answer = generate_generic_answer(agent_name, agent_question, respondent_agent)
|
| 255 |
+
if not raw_answer:
|
| 256 |
+
responses.append(f"**PreData Moderator**: Could not generate a response for {agent_name}.")
|
| 257 |
+
continue
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 258 |
|
| 259 |
+
# Step 2: Style the answer
|
| 260 |
+
styled_answer = apply_style_and_tone(raw_answer, user_profile, processor_llm)
|
| 261 |
+
|
| 262 |
+
# Step 3: Validate the final answer
|
| 263 |
+
if validate_answer(styled_answer, agent_question, user_profile, processor_llm):
|
| 264 |
+
responses.append(f"**{agent_name}**: {styled_answer}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 265 |
else:
|
| 266 |
+
responses.append(f"**PreData Moderator**: Unable to pass validation for {agent_name}.")
|
|
|
|
|
|
|
|
|
|
| 267 |
|
| 268 |
if len(set(parsed_questions.values())) == 1:
|
| 269 |
+
return ["\n\n".join(responses)]
|
|
|
|
| 270 |
else:
|
| 271 |
return responses
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
def generate_generic_answer(agent_name, question, agent):
|
| 275 |
+
try:
|
| 276 |
+
description = f"""You are {agent_name}. Provide a brief, first-person response to: "{question}".
|
| 277 |
+
Use British English. Respond directly and concisely without any extra context or repetition."""
|
| 278 |
+
expected_output = f"A direct, culturally natural answer to '{question}', written in first person and British English."
|
| 279 |
+
|
| 280 |
+
task = Task(description=description, expected_output=expected_output, agent=agent)
|
| 281 |
+
crew = Crew(agents=[agent], tasks=[task], process=Process.sequential)
|
| 282 |
+
crew.kickoff()
|
| 283 |
+
|
| 284 |
+
output = task.output
|
| 285 |
+
return output.raw if hasattr(output, 'raw') else str(output)
|
| 286 |
+
except Exception as e:
|
| 287 |
+
logging.error(f"Error generating answer for {agent_name}: {e}", exc_info=True)
|
| 288 |
+
return None
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
def apply_style_and_tone(raw_answer, user_profile, processor_llm):
|
| 292 |
+
style = user_profile.get_field("Communication", "Style")
|
| 293 |
+
tone = user_profile.get_field("Communication", "Tone")
|
| 294 |
+
length = user_profile.get_field("Communication", "Length")
|
| 295 |
+
|
| 296 |
+
prompt = f"""
|
| 297 |
+
Transform the following answer to align with the specified communication profile.
|
| 298 |
+
- Style: {style}
|
| 299 |
+
- Tone: {tone}
|
| 300 |
+
- Length: {length}
|
| 301 |
+
- Use strictly British English
|
| 302 |
+
- Maintain first-person voice
|
| 303 |
+
- Keep the cultural authenticity intact
|
| 304 |
+
---
|
| 305 |
+
Answer: "{raw_answer}"
|
| 306 |
+
---
|
| 307 |
+
Transformed Answer:"""
|
| 308 |
+
|
| 309 |
+
try:
|
| 310 |
+
styled_response = processor_llm(prompt)
|
| 311 |
+
return styled_response.strip()
|
| 312 |
+
except Exception as e:
|
| 313 |
+
logging.error(f"Error applying style and tone: {e}", exc_info=True)
|
| 314 |
+
return raw_answer # fallback to unstyled answer
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
def validate_answer(answer, question, user_profile, processor_llm):
|
| 318 |
+
try:
|
| 319 |
+
return validate_response(
|
| 320 |
+
question=question,
|
| 321 |
+
answer=answer,
|
| 322 |
+
user_profile_str=str(user_profile),
|
| 323 |
+
fast_facts_str="",
|
| 324 |
+
interview_transcript_text="",
|
| 325 |
+
respondent_type="",
|
| 326 |
+
ai_evaluator_agent=None,
|
| 327 |
+
processor_llm=processor_llm
|
| 328 |
+
)
|
| 329 |
+
except Exception as e:
|
| 330 |
+
logging.error(f"Validation failed: {e}", exc_info=True)
|
| 331 |
+
return False
|