File size: 10,404 Bytes
d38e6a5
 
 
8056e83
 
d38e6a5
8056e83
 
 
 
d38e6a5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
db39ccf
 
 
 
 
 
d38e6a5
db39ccf
 
 
 
 
 
 
d38e6a5
db39ccf
d38e6a5
db39ccf
d38e6a5
db39ccf
 
 
 
 
 
 
 
d38e6a5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
db39ccf
 
 
 
 
 
 
d38e6a5
db39ccf
d38e6a5
db39ccf
d38e6a5
 
 
db39ccf
 
 
 
 
 
 
 
 
d38e6a5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
"""

Conversation Moderator - AI-powered interview moderator

"""
import sys
import os
from typing import Dict, List, Optional, Tuple

# Add parent directory to path for imports
sys.path.insert(0, os.path.dirname(__file__))

from llm_backend import LLMBackend
from conversation_flow import ConversationFlow, ConversationNode
from conversation_session import ConversationSession


class ConversationModerator:
    """

    AI moderator that conducts conversations based on flows.

    Handles scripted questions, dynamic follow-ups, and probing.

    """

    def __init__(self, llm_backend: LLMBackend, flow: ConversationFlow):
        self.llm = llm_backend
        self.flow = flow
        self.follow_up_threshold = 3  # Ask follow-up every N user responses

    def start_conversation(self, session: ConversationSession) -> str:
        """

        Start a conversation by asking the first question.



        Returns:

            The opening message from the AI

        """
        first_node = self.flow.get_start_node()
        if not first_node:
            return "I apologize, but there seems to be an issue with the conversation flow."

        session.current_node_id = first_node.id
        session.add_turn("ai", first_node.content, node_id=first_node.id)
        return first_node.content

    def process_user_response(self, session: ConversationSession, user_message: str) -> str:
        """

        Process a user response and generate the next AI message.



        Args:

            session: Current conversation session

            user_message: The user's message



        Returns:

            The AI's response

        """
        # Add user message to session
        session.add_turn("user", user_message)

        # Decide whether to ask scripted question or dynamic follow-up
        if self._should_probe(session, user_message):
            # Generate dynamic follow-up question
            ai_response = self._generate_follow_up(session, user_message)
            session.add_turn("ai", ai_response)
        else:
            # Move to next node in flow
            ai_response = self._get_next_scripted_question(session)
            if ai_response:
                session.add_turn("ai", ai_response, node_id=session.current_node_id)
            else:
                # End of flow
                ai_response = self._generate_closing(session)
                session.add_turn("ai", ai_response)
                session.end_session()

        return ai_response

    def _should_probe(self, session: ConversationSession, user_message: str) -> bool:
        """

        Decide if we should probe deeper or continue with scripted questions.



        Returns:

            True if should ask follow-up, False if should continue flow

        """
        # Don't probe on very short responses
        if len(user_message.split()) < 5:
            return False

        # Probe every few responses (but not too often)
        user_turns = [t for t in session.conversation_history if t.role == "user"]
        turn_count = len(user_turns)

        # Probe on turns 2, 5, 8, etc. (every 3 turns, starting after first question)
        if turn_count > 1 and (turn_count - 1) % self.follow_up_threshold == 0:
            return True

        # Also probe if response contains interesting keywords
        interesting_keywords = [
            "because", "however", "although", "surprisingly", "unfortunately",
            "frustrated", "confused", "excited", "worried", "concerned"
        ]
        if any(keyword in user_message.lower() for keyword in interesting_keywords):
            return True

        return False

    def _generate_follow_up(self, session: ConversationSession, user_message: str) -> str:
        """

        Generate a dynamic follow-up question using the LLM.



        Args:

            session: Current conversation session

            user_message: The user's latest message



        Returns:

            A follow-up question

        """
        # Create prompt for generating follow-up - optimized for Mistral/Mixtral
        system_prompt = """You are a skilled qualitative research interviewer conducting a professional interview. Your role is to:

- Build trust and rapport with respondents

- Probe deeper into meaningful points they raise

- Encourage detailed, thoughtful responses

- Stay curious and engaged without bias



When generating follow-up questions:

- Focus on a single interesting or important point they mentioned

- Ask for more detail, clarity, or deeper thinking

- Use natural, conversational phrasing

- Show genuine interest in their perspective

- Keep questions clear and concise (one sentence)

- Be empathetic and non-judgmental



Output ONLY the follow-up question text, with no additional explanation or commentary."""

        user_prompt = f"""**Respondent's Statement:** "{user_message}"



**Task:** Generate one thoughtful follow-up question that probes deeper into what they said.



Focus on:

- Exploring an interesting or important point

- Asking for more detail or their reasoning

- Encouraging reflection and deeper thinking



Provide ONLY the follow-up question text."""

        messages = [
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": user_prompt}
        ]

        try:
            follow_up = self.llm.generate(messages, max_tokens=100, temperature=0.7)
            # Clean up the response
            follow_up = follow_up.strip().strip('"').strip("'")
            if not follow_up.endswith("?"):
                follow_up += "?"
            return follow_up
        except Exception as e:
            # Fallback to generic follow-up
            return "Can you tell me more about that?"

    def _get_next_scripted_question(self, session: ConversationSession) -> Optional[str]:
        """

        Get the next scripted question from the flow.



        Returns:

            The next question, or None if end of flow

        """
        if not session.current_node_id:
            return None

        current_node = self.flow.get_node(session.current_node_id)
        if not current_node or not current_node.next:
            return None

        next_node = self.flow.get_node(current_node.next)
        if not next_node:
            return None

        session.current_node_id = next_node.id
        return next_node.content

    def _generate_closing(self, session: ConversationSession) -> str:
        """

        Generate a closing message for the conversation.



        Returns:

            Closing message

        """
        return "Thank you so much for sharing your thoughts with me today. Your insights are incredibly valuable and will help us better understand this topic. Is there anything else you'd like to add before we finish?"

    def generate_summary(self, session: ConversationSession) -> str:
        """

        Generate a summary of the conversation using the LLM.



        Args:

            session: The conversation session to summarize



        Returns:

            A summary of the conversation

        """
        # Get conversation transcript
        transcript_parts = []
        for turn in session.conversation_history:
            speaker = "Moderator" if turn.role == "ai" else "Respondent"
            transcript_parts.append(f"{speaker}: {turn.content}")

        transcript = "\n".join(transcript_parts)

        system_prompt = """You are a qualitative research analyst summarizing a conducted interview. Your summary should be:

- Professional and objective

- Grounded in what the respondent actually said

- Organized by themes and key points

- Include representative quotes

- Highlight insights and implications

- Suitable for a research report or case study"""

        user_prompt = f"""Task: Summarize this qualitative research interview



**Interview Transcript:**



{transcript}



**Summary Requirements:**

1. **Main Topics:** What topics or subjects did the respondent discuss?

2. **Key Insights:** What are the most important or revealing points they made?

3. **Themes:** What patterns or recurring themes emerge from their responses?

4. **Representative Quotes:** Include 2-3 direct quotes that capture important moments

5. **Sentiment & Tone:** What is the overall emotional tone and sentiment?



**Format:** Write a professional summary of 3-4 paragraphs suitable for a research report.

Start with a brief overview, then discuss key themes and insights."""

        messages = [
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": user_prompt}
        ]

        try:
            summary = self.llm.generate(messages, max_tokens=500, temperature=0.5)
            return summary.strip()
        except Exception as e:
            return f"Summary generation failed: {str(e)}"

    def reflect_understanding(self, session: ConversationSession) -> str:
        """

        Periodically reflect back understanding to the respondent.



        Returns:

            A reflection statement

        """
        recent_turns = [t for t in session.conversation_history if t.role == "user"][-3:]
        if not recent_turns:
            return "Let me make sure I understand you correctly..."

        recent_content = " ".join([t.content for t in recent_turns])

        system_prompt = """You are a research interviewer reflecting back what you've heard. Create a brief summary (1-2 sentences) of what the respondent has shared, then ask if you understood correctly.



Format: "So if I understand correctly, [summary]. Is that right?" """

        user_prompt = f"""The respondent recently said: "{recent_content}"



Reflect back your understanding and ask for confirmation."""

        messages = [
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": user_prompt}
        ]

        try:
            reflection = self.llm.generate(messages, max_tokens=150, temperature=0.5)
            return reflection.strip()
        except Exception as e:
            return "Let me make sure I understand you correctly - can you confirm that I've captured your main points accurately?"