Spaces:
Sleeping
Sleeping
| """ | |
| Conversation Moderator - AI-powered interview moderator | |
| """ | |
| import sys | |
| import os | |
| from typing import Dict, List, Optional, Tuple | |
| # Add parent directory to path for imports | |
| sys.path.insert(0, os.path.dirname(__file__)) | |
| from llm_backend import LLMBackend | |
| from conversation_flow import ConversationFlow, ConversationNode | |
| from conversation_session import ConversationSession | |
| class ConversationModerator: | |
| """ | |
| AI moderator that conducts conversations based on flows. | |
| Handles scripted questions, dynamic follow-ups, and probing. | |
| """ | |
| def __init__(self, llm_backend: LLMBackend, flow: ConversationFlow): | |
| self.llm = llm_backend | |
| self.flow = flow | |
| self.follow_up_threshold = 3 # Ask follow-up every N user responses | |
| def start_conversation(self, session: ConversationSession) -> str: | |
| """ | |
| Start a conversation by asking the first question. | |
| Returns: | |
| The opening message from the AI | |
| """ | |
| first_node = self.flow.get_start_node() | |
| if not first_node: | |
| return "I apologize, but there seems to be an issue with the conversation flow." | |
| session.current_node_id = first_node.id | |
| session.add_turn("ai", first_node.content, node_id=first_node.id) | |
| return first_node.content | |
| def process_user_response(self, session: ConversationSession, user_message: str) -> str: | |
| """ | |
| Process a user response and generate the next AI message. | |
| Args: | |
| session: Current conversation session | |
| user_message: The user's message | |
| Returns: | |
| The AI's response | |
| """ | |
| # Add user message to session | |
| session.add_turn("user", user_message) | |
| # Decide whether to ask scripted question or dynamic follow-up | |
| if self._should_probe(session, user_message): | |
| # Generate dynamic follow-up question | |
| ai_response = self._generate_follow_up(session, user_message) | |
| session.add_turn("ai", ai_response) | |
| else: | |
| # Move to next node in flow | |
| ai_response = self._get_next_scripted_question(session) | |
| if ai_response: | |
| session.add_turn("ai", ai_response, node_id=session.current_node_id) | |
| else: | |
| # End of flow | |
| ai_response = self._generate_closing(session) | |
| session.add_turn("ai", ai_response) | |
| session.end_session() | |
| return ai_response | |
| def _should_probe(self, session: ConversationSession, user_message: str) -> bool: | |
| """ | |
| Decide if we should probe deeper or continue with scripted questions. | |
| Returns: | |
| True if should ask follow-up, False if should continue flow | |
| """ | |
| # Don't probe on very short responses | |
| if len(user_message.split()) < 5: | |
| return False | |
| # Probe every few responses (but not too often) | |
| user_turns = [t for t in session.conversation_history if t.role == "user"] | |
| turn_count = len(user_turns) | |
| # Probe on turns 2, 5, 8, etc. (every 3 turns, starting after first question) | |
| if turn_count > 1 and (turn_count - 1) % self.follow_up_threshold == 0: | |
| return True | |
| # Also probe if response contains interesting keywords | |
| interesting_keywords = [ | |
| "because", "however", "although", "surprisingly", "unfortunately", | |
| "frustrated", "confused", "excited", "worried", "concerned" | |
| ] | |
| if any(keyword in user_message.lower() for keyword in interesting_keywords): | |
| return True | |
| return False | |
| def _generate_follow_up(self, session: ConversationSession, user_message: str) -> str: | |
| """ | |
| Generate a dynamic follow-up question using the LLM. | |
| Args: | |
| session: Current conversation session | |
| user_message: The user's latest message | |
| Returns: | |
| A follow-up question | |
| """ | |
| # Create prompt for generating follow-up - optimized for Mistral/Mixtral | |
| system_prompt = """You are a skilled qualitative research interviewer conducting a professional interview. Your role is to: | |
| - Build trust and rapport with respondents | |
| - Probe deeper into meaningful points they raise | |
| - Encourage detailed, thoughtful responses | |
| - Stay curious and engaged without bias | |
| When generating follow-up questions: | |
| - Focus on a single interesting or important point they mentioned | |
| - Ask for more detail, clarity, or deeper thinking | |
| - Use natural, conversational phrasing | |
| - Show genuine interest in their perspective | |
| - Keep questions clear and concise (one sentence) | |
| - Be empathetic and non-judgmental | |
| Output ONLY the follow-up question text, with no additional explanation or commentary.""" | |
| user_prompt = f"""**Respondent's Statement:** "{user_message}" | |
| **Task:** Generate one thoughtful follow-up question that probes deeper into what they said. | |
| Focus on: | |
| - Exploring an interesting or important point | |
| - Asking for more detail or their reasoning | |
| - Encouraging reflection and deeper thinking | |
| Provide ONLY the follow-up question text.""" | |
| messages = [ | |
| {"role": "system", "content": system_prompt}, | |
| {"role": "user", "content": user_prompt} | |
| ] | |
| try: | |
| follow_up = self.llm.generate(messages, max_tokens=100, temperature=0.7) | |
| # Clean up the response | |
| follow_up = follow_up.strip().strip('"').strip("'") | |
| if not follow_up.endswith("?"): | |
| follow_up += "?" | |
| return follow_up | |
| except Exception as e: | |
| # Fallback to generic follow-up | |
| return "Can you tell me more about that?" | |
| def _get_next_scripted_question(self, session: ConversationSession) -> Optional[str]: | |
| """ | |
| Get the next scripted question from the flow. | |
| Returns: | |
| The next question, or None if end of flow | |
| """ | |
| if not session.current_node_id: | |
| return None | |
| current_node = self.flow.get_node(session.current_node_id) | |
| if not current_node or not current_node.next: | |
| return None | |
| next_node = self.flow.get_node(current_node.next) | |
| if not next_node: | |
| return None | |
| session.current_node_id = next_node.id | |
| return next_node.content | |
| def _generate_closing(self, session: ConversationSession) -> str: | |
| """ | |
| Generate a closing message for the conversation. | |
| Returns: | |
| Closing message | |
| """ | |
| return "Thank you so much for sharing your thoughts with me today. Your insights are incredibly valuable and will help us better understand this topic. Is there anything else you'd like to add before we finish?" | |
| def generate_summary(self, session: ConversationSession) -> str: | |
| """ | |
| Generate a summary of the conversation using the LLM. | |
| Args: | |
| session: The conversation session to summarize | |
| Returns: | |
| A summary of the conversation | |
| """ | |
| # Get conversation transcript | |
| transcript_parts = [] | |
| for turn in session.conversation_history: | |
| speaker = "Moderator" if turn.role == "ai" else "Respondent" | |
| transcript_parts.append(f"{speaker}: {turn.content}") | |
| transcript = "\n".join(transcript_parts) | |
| system_prompt = """You are a qualitative research analyst summarizing a conducted interview. Your summary should be: | |
| - Professional and objective | |
| - Grounded in what the respondent actually said | |
| - Organized by themes and key points | |
| - Include representative quotes | |
| - Highlight insights and implications | |
| - Suitable for a research report or case study""" | |
| user_prompt = f"""Task: Summarize this qualitative research interview | |
| **Interview Transcript:** | |
| {transcript} | |
| **Summary Requirements:** | |
| 1. **Main Topics:** What topics or subjects did the respondent discuss? | |
| 2. **Key Insights:** What are the most important or revealing points they made? | |
| 3. **Themes:** What patterns or recurring themes emerge from their responses? | |
| 4. **Representative Quotes:** Include 2-3 direct quotes that capture important moments | |
| 5. **Sentiment & Tone:** What is the overall emotional tone and sentiment? | |
| **Format:** Write a professional summary of 3-4 paragraphs suitable for a research report. | |
| Start with a brief overview, then discuss key themes and insights.""" | |
| messages = [ | |
| {"role": "system", "content": system_prompt}, | |
| {"role": "user", "content": user_prompt} | |
| ] | |
| try: | |
| summary = self.llm.generate(messages, max_tokens=500, temperature=0.5) | |
| return summary.strip() | |
| except Exception as e: | |
| return f"Summary generation failed: {str(e)}" | |
| def reflect_understanding(self, session: ConversationSession) -> str: | |
| """ | |
| Periodically reflect back understanding to the respondent. | |
| Returns: | |
| A reflection statement | |
| """ | |
| recent_turns = [t for t in session.conversation_history if t.role == "user"][-3:] | |
| if not recent_turns: | |
| return "Let me make sure I understand you correctly..." | |
| recent_content = " ".join([t.content for t in recent_turns]) | |
| system_prompt = """You are a research interviewer reflecting back what you've heard. Create a brief summary (1-2 sentences) of what the respondent has shared, then ask if you understood correctly. | |
| Format: "So if I understand correctly, [summary]. Is that right?" """ | |
| user_prompt = f"""The respondent recently said: "{recent_content}" | |
| Reflect back your understanding and ask for confirmation.""" | |
| messages = [ | |
| {"role": "system", "content": system_prompt}, | |
| {"role": "user", "content": user_prompt} | |
| ] | |
| try: | |
| reflection = self.llm.generate(messages, max_tokens=150, temperature=0.5) | |
| return reflection.strip() | |
| except Exception as e: | |
| return "Let me make sure I understand you correctly - can you confirm that I've captured your main points accurately?" | |