Basitha commited on
Commit
9b14f3e
·
verified ·
1 Parent(s): e56d353

Update common/validation_utils.py

Browse files
Files changed (1) hide show
  1. common/validation_utils.py +22 -2
common/validation_utils.py CHANGED
@@ -7,6 +7,18 @@ from RespondentAgent import *
7
  from langchain_groq import ChatGroq
8
 
9
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  def validate_response(question, answer, user_profile_str, fast_facts_str, interview_transcript_text, respondent_type, ai_evaluator_agent, processor_llm):
11
  """
12
  Validates a response (answer) to a question using the appropriate evaluation method (exploratory or fact-based).
@@ -129,7 +141,11 @@ def validate_response(question, answer, user_profile_str, fast_facts_str, interv
129
  relevance = None
130
  logging.info(f"Exploratory evaluation: plausibility={plausibility}, relevance={relevance}")
131
  if plausibility is not None and relevance is not None:
132
- return plausibility >= 8.0 and relevance >= 8.0
 
 
 
 
133
  return False
134
  else:
135
  # Fact-based: Ask LLM for accuracy rating
@@ -157,5 +173,9 @@ def validate_response(question, answer, user_profile_str, fast_facts_str, interv
157
  accuracy = None
158
  logging.info(f"Fact-based evaluation: accuracy={accuracy}")
159
  if accuracy is not None:
160
- return accuracy >= 8.0
 
 
 
 
161
  return False
 
7
  from langchain_groq import ChatGroq
8
 
9
 
10
+ def is_first_person(answer):
11
+ """
12
+ Checks if the answer is written in first person.
13
+ Returns True if first person pronouns are found and third person references are not dominant.
14
+ """
15
+ # Look for first person pronouns
16
+ first_person = re.search(r'\b(I|my|me|mine|we|our|us|ours)\b', answer, re.IGNORECASE)
17
+ # Look for third person references (e.g., "he", "she", "they", or a capitalized name at the start)
18
+ third_person = re.search(r'\b(he|she|they|his|her|their|him|them)\b', answer, re.IGNORECASE)
19
+ return bool(first_person) and not bool(third_person)
20
+
21
+
22
  def validate_response(question, answer, user_profile_str, fast_facts_str, interview_transcript_text, respondent_type, ai_evaluator_agent, processor_llm):
23
  """
24
  Validates a response (answer) to a question using the appropriate evaluation method (exploratory or fact-based).
 
141
  relevance = None
142
  logging.info(f"Exploratory evaluation: plausibility={plausibility}, relevance={relevance}")
143
  if plausibility is not None and relevance is not None:
144
+ if plausibility >= 8.0 and relevance >= 8.0:
145
+ if not is_first_person(answer):
146
+ logging.warning("Did not pass style due to 3rd person use")
147
+ return False
148
+ return True
149
  return False
150
  else:
151
  # Fact-based: Ask LLM for accuracy rating
 
173
  accuracy = None
174
  logging.info(f"Fact-based evaluation: accuracy={accuracy}")
175
  if accuracy is not None:
176
+ if accuracy >= 8.0:
177
+ if not is_first_person(answer):
178
+ logging.warning("Did not pass style due to 3rd person use")
179
+ return False
180
+ return True
181
  return False