mikaelmp commited on
Commit
bf551eb
·
verified ·
1 Parent(s): dbfe10d
researchsimulation/InteractiveInterviewChatbot.py CHANGED
@@ -16,7 +16,9 @@ def parse_question_with_llm(question, respondent_names, processor_llm):
16
  Uses OpenAI's LLM to extract the specific agents being addressed and their respective questions.
17
  Supports compound requests.
18
  """
19
- logging.info(f"Parsing question with LLM: {question}")
 
 
20
 
21
  prompt = f"""
22
  You are an expert in market research interview analysis.
@@ -61,12 +63,24 @@ def parse_question_with_llm(question, respondent_names, processor_llm):
61
 
62
  Only return the formatted output without explanations.
63
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
 
65
- # Invoke LangChain LLM
66
- logging.info("Invoking LLM for parsing...")
67
- response = processor_llm.invoke(prompt)
68
- chatgpt_output = response.content.strip()
69
- logging.info(f"LLM Parsed Output: {chatgpt_output}")
70
 
71
  parsed_questions = {}
72
  respondent_name = "General"
@@ -83,7 +97,11 @@ def parse_question_with_llm(question, respondent_names, processor_llm):
83
  respondent_name = "General"
84
  question_text = None
85
 
86
- logging.info("Parsing complete.")
 
 
 
 
87
  return parsed_questions
88
 
89
  def validate_question_topics(parsed_questions, processor_llm):
@@ -92,7 +110,9 @@ def validate_question_topics(parsed_questions, processor_llm):
92
  Converts question to British English spelling if valid.
93
  Returns 'INVALID' for any out-of-scope question.
94
  """
95
- logging.info("Validating question topics and converting to British English...")
 
 
96
  validated_questions = {}
97
 
98
  for respondent, question in parsed_questions.items():
@@ -163,12 +183,25 @@ def validate_question_topics(parsed_questions, processor_llm):
163
  # ### Output:
164
  # <Validated question in British English, or "INVALID">
165
 
166
- response = processor_llm.invoke(prompt)
167
- validated_output = response.content.strip()
168
- logging.info(f"Validated output for {respondent}: {validated_output}")
 
 
 
 
 
 
 
 
 
 
 
 
169
  validated_questions[respondent] = validated_output
170
 
171
- logging.info("Validation complete.")
 
172
  return validated_questions
173
 
174
 
@@ -180,7 +213,8 @@ def ask_interview_question(respondent_agents_dict, last_active_agent, question,
180
  Uses Groq's LLM for response generation.
181
  """
182
 
183
- logging.info(f"Received question: {question}")
 
184
 
185
  agent_names = list(respondent_agents_dict.keys())
186
  logging.info(f"Available respondents: {agent_names}")
@@ -189,13 +223,19 @@ def ask_interview_question(respondent_agents_dict, last_active_agent, question,
189
  # Use OpenAI LLM to parse questions into individual respondent-specific sub-questions and validate them
190
 
191
  # Step 1: Parse question
 
192
  parsed_questions = parse_question_with_llm(question, str(agent_names), processor_llm)
 
 
193
  if not parsed_questions:
194
  logging.warning("No questions were parsed from input.")
195
  return ["**PreData Moderator**: No valid respondents were detected for this question."]
196
 
197
  # Step 2: Validate question content (scope + spelling)
 
198
  validated_questions = validate_question_topics(parsed_questions, processor_llm)
 
 
199
  for resp_name, extracted_question in validated_questions.items():
200
  if extracted_question == "INVALID":
201
  logging.warning(f"Invalid question detected for {resp_name}: {extracted_question}")
 
16
  Uses OpenAI's LLM to extract the specific agents being addressed and their respective questions.
17
  Supports compound requests.
18
  """
19
+ logging.info("🔍 ENTERING: parse_question_with_llm()")
20
+ logging.info(f"Received user input: {question}")
21
+ logging.info(f"Available respondent names: {respondent_names}")
22
 
23
  prompt = f"""
24
  You are an expert in market research interview analysis.
 
63
 
64
  Only return the formatted output without explanations.
65
  """
66
+
67
+ logging.info("Prompt constructed. Invoking LLM now...")
68
+
69
+ try:
70
+ response = processor_llm.invoke(prompt)
71
+ if not hasattr(response, "content") or not response.content:
72
+ logging.error("LLM response is empty or malformed.")
73
+ return {}
74
+
75
+ chatgpt_output = response.content.strip()
76
+ logging.info(f"Raw LLM Output:\n{chatgpt_output}")
77
+
78
+ except Exception as e:
79
+ logging.exception(f"Exception occurred during LLM invocation in parse_question_with_llm: {e}")
80
+ return {}
81
 
82
+ # Begin parsing the structured response
83
+ logging.info("Parsing LLM output for respondent-question pairs...")
 
 
 
84
 
85
  parsed_questions = {}
86
  respondent_name = "General"
 
97
  respondent_name = "General"
98
  question_text = None
99
 
100
+ if not parsed_questions:
101
+ logging.warning("No respondent-question pairs were successfully parsed.")
102
+
103
+ logging.info(f"Final parsed questions: {parsed_questions}")
104
+ logging.info("Exiting parse_question_with_llm()")
105
  return parsed_questions
106
 
107
  def validate_question_topics(parsed_questions, processor_llm):
 
110
  Converts question to British English spelling if valid.
111
  Returns 'INVALID' for any out-of-scope question.
112
  """
113
+ logging.info("ENTERING: validate_question_topics()")
114
+ logging.info("Starting question validation against permitted scope...")
115
+
116
  validated_questions = {}
117
 
118
  for respondent, question in parsed_questions.items():
 
183
  # ### Output:
184
  # <Validated question in British English, or "INVALID">
185
 
186
+ try:
187
+ logging.debug("Sending validation prompt to LLM...")
188
+ response = processor_llm.invoke(prompt)
189
+
190
+ if not hasattr(response, "content") or not response.content:
191
+ logging.error(f"Empty or malformed response from LLM for respondent: '{respondent}'")
192
+ validated_output = "INVALID"
193
+ else:
194
+ validated_output = response.content.strip()
195
+ logging.info(f"Validation output for '{respondent}': {validated_output}")
196
+
197
+ except Exception as e:
198
+ logging.exception(f"Exception during validation for respondent '{respondent}': {e}")
199
+ validated_output = "INVALID"
200
+
201
  validated_questions[respondent] = validated_output
202
 
203
+ logging.info("Completed validation for all questions.")
204
+ logging.debug(f"Final validated questions dictionary:\n{validated_questions}")
205
  return validated_questions
206
 
207
 
 
213
  Uses Groq's LLM for response generation.
214
  """
215
 
216
+ logging.info(f"START: Processing new interview question: {question}")
217
+ responses = []
218
 
219
  agent_names = list(respondent_agents_dict.keys())
220
  logging.info(f"Available respondents: {agent_names}")
 
223
  # Use OpenAI LLM to parse questions into individual respondent-specific sub-questions and validate them
224
 
225
  # Step 1: Parse question
226
+ logging.info("STEP 1: Parsing question with LLM...")
227
  parsed_questions = parse_question_with_llm(question, str(agent_names), processor_llm)
228
+ logging.info(f"Parsed Questions Output: {parsed_questions}")
229
+
230
  if not parsed_questions:
231
  logging.warning("No questions were parsed from input.")
232
  return ["**PreData Moderator**: No valid respondents were detected for this question."]
233
 
234
  # Step 2: Validate question content (scope + spelling)
235
+ logging.info("STEP 2: Validating questions for topic relevance and British English...")
236
  validated_questions = validate_question_topics(parsed_questions, processor_llm)
237
+ logging.info(f"Validated Questions: {validated_questions}")
238
+
239
  for resp_name, extracted_question in validated_questions.items():
240
  if extracted_question == "INVALID":
241
  logging.warning(f"Invalid question detected for {resp_name}: {extracted_question}")