Sa-m commited on
Commit
8b998be
·
verified ·
1 Parent(s): af55524

Update interview_logic.py

Browse files
Files changed (1) hide show
  1. interview_logic.py +189 -214
interview_logic.py CHANGED
@@ -1,3 +1,4 @@
 
1
  # PrepGenie/interview_logic.py
2
  """Core logic for the mock interview process."""
3
 
@@ -13,17 +14,12 @@ import json
13
  import matplotlib.pyplot as plt
14
  import io
15
  import re
 
16
 
17
  # --- Configuration ---
18
- # These could potentially be moved to a config file or environment variables
19
- # For now, they are initialized here or passed in.
20
- # genai.configure(api_key=os.getenv("GOOGLE_API_KEY") or "YOUR_DEFAULT_API_KEY_HERE")
21
- # text_model = genai.GenerativeModel("gemini-1.5-flash") # This should be initialized in app.py or a central config
22
 
23
  # --- BERT Model Loading ---
24
- # It's generally better to load large models once. This can be handled in app.py and passed if needed,
25
- # or loaded here if this module is imported once at startup.
26
- # For simplicity, we'll handle loading here, assuming it's imported once.
27
  try:
28
  model = TFBertModel.from_pretrained("bert-base-uncased")
29
  tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
@@ -35,22 +31,64 @@ except Exception as e:
35
  model = None
36
  tokenizer = None
37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  # --- Core Logic Functions ---
39
 
40
  def getallinfo(data, text_model):
41
  """Processes raw resume text into a structured overview."""
42
  if not data or not data.strip():
43
  return "No data provided or data is empty."
 
44
  text = f"""{data} is given by the user. Make sure you are getting the details like name, experience,
45
- education, skills of the user like in a resume. If the details are not provided return: not a resume.
46
- If details are provided then please try again and format the whole in a single paragraph covering all the information. """
47
- try:
48
- response = text_model.generate_content(text)
49
- response.resolve()
50
- return response.text
51
- except Exception as e:
52
- print(f"Error in getallinfo: {e}")
53
- return "Error processing resume data."
54
 
55
  def file_processing(pdf_file_path):
56
  """Processes the uploaded PDF file given its path."""
@@ -63,7 +101,9 @@ def file_processing(pdf_file_path):
63
  reader = PyPDF2.PdfReader(f)
64
  text = ""
65
  for page in reader.pages:
66
- text += page.extract_text() or "" # Handle None from extract_text
 
 
67
  return text
68
  except FileNotFoundError:
69
  error_msg = f"File not found at path: {pdf_file_path}"
@@ -78,6 +118,7 @@ def file_processing(pdf_file_path):
78
  print(error_msg)
79
  return ""
80
 
 
81
  def get_embedding(text):
82
  """Generates BERT embedding for a given text."""
83
  if not text or not text.strip():
@@ -94,6 +135,7 @@ def get_embedding(text):
94
  print(f"Error getting embedding in interview_logic: {e}")
95
  return np.zeros((1, 768))
96
 
 
97
  def generate_feedback(question, answer):
98
  """Calculates similarity score between question and answer."""
99
  if not question or not question.strip() or not answer or not answer.strip():
@@ -114,16 +156,19 @@ def generate_feedback(question, answer):
114
  print(f"Error generating feedback in interview_logic: {e}")
115
  return "0.00"
116
 
 
117
  def generate_questions(roles, data, text_model):
118
  """Generates 5 interview questions based on resume and roles."""
 
 
 
 
 
 
 
 
119
  if not roles or (isinstance(roles, list) and not any(roles)) or not data or not data.strip():
120
- return [
121
- "Could you please introduce yourself based on your resume?",
122
- "What are your key technical skills relevant to this role?",
123
- "Describe a challenging project you've worked on and how you resolved it.",
124
- "How do you prioritize tasks when working under tight deadlines?",
125
- "Where do you see yourself professionally in the next 3 to 5 years?"
126
- ]
127
 
128
  if isinstance(roles, list):
129
  roles_str = ", ".join(roles)
@@ -154,148 +199,141 @@ Example format (do not copy these, generate your own):
154
  4. If given an ambiguous dataset with missing values, what steps would you take to analyze it?
155
  5. Where do you see your career heading in the next 3 to 5 years?"""
156
 
157
- try:
158
- response = text_model.generate_content(text)
159
- response.resolve()
160
- questions_text = response.text.strip()
161
-
162
- # Parse numbered questions (e.g. "1. Question here?")
163
- import re
164
- questions = re.findall(r'^\d+[\.\)]\s*(.+)', questions_text, re.MULTILINE)
165
- questions = [q.strip() for q in questions if q.strip()]
166
 
167
- # Fallback: split by newline if numbered parsing fails
168
- if len(questions) < 3:
169
- questions = [q.strip() for q in questions_text.split('\n') if q.strip() and '?' in q]
 
170
 
171
- print(f"Generated {len(questions)} questions: {questions}")
 
 
172
 
173
- except Exception as e:
174
- print(f"Error generating questions in interview_logic: {e}")
175
- questions = []
176
 
177
  # Pad with defaults if AI returned fewer than 5
178
- defaults = [
179
- "Could you please introduce yourself based on your resume?",
180
- "What are your key technical skills relevant to this role?",
181
- "Describe a challenging project you've worked on and how you resolved it.",
182
- "How do you prioritize tasks when working under tight deadlines?",
183
- "Where do you see yourself professionally in the next 3 to 5 years?"
184
- ]
185
  while len(questions) < 5:
186
- questions.append(defaults[len(questions)])
187
 
188
  return questions[:5]
189
 
 
190
  def generate_overall_feedback(data, percent, answer, question, text_model):
191
  """Generates overall feedback for an answer."""
192
  if not data or not data.strip() or not answer or not answer.strip() or not question or not question.strip():
193
  return "Unable to generate feedback due to missing information."
 
194
  if isinstance(percent, float):
195
  percent_str = f"{percent:.2f}"
196
  else:
197
  percent_str = str(percent)
 
198
  prompt = f"""As an interviewer, provide concise feedback (max 150 words) for candidate {data}.
199
- Questions asked: {question} # Pass single question
200
- Candidate's answers: {answer}
201
- Score: {percent_str}
202
- Feedback should include:
203
- 1. Overall performance assessment (2-3 sentences)
204
- 2. Key strengths (2-3 points)
205
- 3. Areas for improvement (2-3 points)
206
- Be honest and constructive. Do not mention the exact score, but rate the candidate out of 10 based on their answers."""
207
- try:
208
- response = text_model.generate_content(prompt)
209
- response.resolve()
210
- return response.text
211
- except Exception as e:
212
- print(f"Error generating overall feedback in interview_logic: {e}")
213
- return "Feedback could not be generated."
214
 
215
  def generate_metrics(data, answer, question, text_model):
216
  """Generates skill metrics for an answer."""
 
 
 
 
 
 
 
 
217
  if not data or not data.strip() or not answer or not answer.strip() or not question or not question.strip():
218
- return {
219
- "Communication skills": 0.0, "Teamwork and collaboration": 0.0,
220
- "Problem-solving and critical thinking": 0.0, "Time management and organization": 0.0,
221
- "Adaptability and resilience": 0.0
222
- }
223
- metrics = {}
224
  text = f"""Here is the overview of the candidate {data}. In the interview the question asked was {question}.
225
- The candidate has answered the question as follows: {answer}. Based on the answers provided, give me the metrics related to:
226
- Communication skills, Teamwork and collaboration, Problem-solving and critical thinking, Time management and organization,
227
- Adaptability and resilience.
228
- Rules for rating:
229
- - Rate each skill from 0 to 10
230
- - If the answer is empty, 'Sorry could not recognize your voice', meaningless, or irrelevant: rate all skills as 0
231
- - Only provide numeric ratings without any additional text or '/10'
232
- - Ratings must reflect actual content quality - do not give courtesy points
233
- - Consider answer relevance to the specific skill being rated
234
- Format:
235
- Communication skills: [rating]
236
- Teamwork and collaboration: [rating]
237
- Problem-solving and critical thinking: [rating]
238
- Time management and organization: [rating]
239
- Adaptability and resilience: [rating]"""
240
- try:
241
- response = text_model.generate_content(text)
242
- response.resolve()
243
- metrics_text = response.text.strip()
244
- for line in metrics_text.split('\n'):
245
- if ':' in line:
246
- key, value_str = line.split(':', 1)
247
- key = key.strip()
248
- try:
249
- value_clean = value_str.strip().split()[0]
250
- value = float(value_clean)
251
- metrics[key] = value
252
- except (ValueError, IndexError):
253
- metrics[key] = 0.0
254
- expected_metrics = [
255
- "Communication skills", "Teamwork and collaboration",
256
- "Problem-solving and critical thinking", "Time management and organization",
257
- "Adaptability and resilience"
258
- ]
259
- for m in expected_metrics:
260
- if m not in metrics:
261
- metrics[m] = 0.0
262
- except Exception as e:
263
- print(f"Error generating metrics in interview_logic: {e}")
264
- # BEFORE — returns empty dict which sometimes serializes as ""
265
- return {}
266
-
267
- # AFTER — return explicit zeroed metrics so JSON component always gets valid data
268
- return {
269
- "Communication skills": 0.0,
270
- "Teamwork and collaboration": 0.0,
271
- "Problem-solving and critical thinking": 0.0,
272
- "Time management and organization": 0.0,
273
- "Adaptability and resilience": 0.0
274
- }
275
 
276
  def getmetrics(interaction, resume, text_model):
277
  """Gets overall metrics from AI based on interaction."""
278
  interaction_text = "\n".join([f"{q}: {a}" for q, a in interaction.items()])
279
  text = f"""This is the user's resume: {resume}.
280
- And here is the interaction of the interview: {interaction_text}.
281
- Please evaluate the interview based on the interaction and the resume.
282
- Rate me the following metrics on a scale of 1 to 10. 1 being the lowest and 10 being the highest.
283
- Communication skills, Teamwork and collaboration, Problem-solving and critical thinking,
284
- Time management and organization, Adaptability and resilience. Just give the ratings for the metrics.
285
- I do not need the feedback. Just the ratings in the format:
286
- Communication skills: X
287
- Teamwork and collaboration: Y
288
- Problem-solving and critical thinking: Z
289
- Time management and organization: A
290
- Adaptability and resilience: B
291
- """
292
- try:
293
- response = text_model.generate_content(text)
294
- response.resolve()
295
- return response.text
296
- except Exception as e:
297
- print(f"Error fetching metrics from AI in interview_logic: {e}")
298
- return ""
299
 
300
  def parse_metrics(metric_text):
301
  """Parses raw metric text into a dictionary."""
@@ -327,6 +365,7 @@ def parse_metrics(metric_text):
327
  metrics[key] = 0
328
  return metrics
329
 
 
330
  def create_metrics_chart(metrics_dict):
331
  """Creates a pie chart image from metrics."""
332
  try:
@@ -359,6 +398,7 @@ def create_metrics_chart(metrics_dict):
359
  plt.close(fig)
360
  return buf
361
 
 
362
  def generate_evaluation_report(metrics_data, average_rating, feedback_list, interaction_dict):
363
  """Generates a formatted evaluation report."""
364
  try:
@@ -396,8 +436,8 @@ def generate_evaluation_report(metrics_data, average_rating, feedback_list, inte
396
  print(error_msg)
397
  return error_msg
398
 
 
399
  # --- Interview State Management Functions ---
400
- # These functions operate on the interview_state dictionary
401
 
402
  def process_resume_logic(file_obj):
403
  """Handles resume upload and processing logic."""
@@ -469,72 +509,6 @@ def process_resume_logic(file_obj):
469
  }
470
  }
471
 
472
- # def start_interview_logic(roles, processed_resume_data, text_model):
473
- # """Starts the interview process logic."""
474
- # if not roles or (isinstance(roles, list) and not any(roles)) or not processed_resume_data or not processed_resume_data.strip():
475
- # return {
476
- # "status": "Please select a role and ensure resume is processed.",
477
- # "initial_question": "",
478
- # "interview_state": {},
479
- # "ui_updates": {
480
- # "audio_input": "gr_show", # show recording for Q1
481
- # "submit_answer_btn": "gr_show", # show submit for Q1
482
- # "next_question_btn": "gr_hide", # hidden — must submit first
483
- # "submit_interview_btn": "gr_hide",
484
- # "feedback_display": "gr_hide",
485
- # "metrics_display": "gr_hide",
486
- # "question_display": "gr_show",
487
- # "answer_instructions": "gr_show"
488
- # }
489
- # }
490
- # try:
491
- # questions = generate_questions(roles, processed_resume_data, text_model)
492
- # default_questions = [
493
- # "Could you please introduce yourself based on your resume?",
494
- # "What are your key technical skills relevant to this role?",
495
- # "Describe a challenging project you've worked on and how you handled it.",
496
- # "Where do you see yourself in 5 years?",
497
- # "Do you have any questions for us?"
498
- # ]
499
- # while len(questions) < 5:
500
- # questions.append(default_questions[len(questions)])
501
- # questions = questions[:5] # cap at 5
502
-
503
- # initial_question = questions[0]
504
- # interview_state = {
505
- # "questions": questions,
506
- # "current_q_index": 0,
507
- # "answers": [],
508
- # "feedback": [],
509
- # "interactions": {},
510
- # "metrics_list": [],
511
- # "resume_data": processed_resume_data,
512
- # "selected_roles": roles # Store roles for history
513
- # }
514
- # return {
515
- # "status": "Interview started. Please answer the first question.",
516
- # "initial_question": initial_question,
517
- # "interview_state": interview_state,
518
- # "ui_updates": {
519
- # "audio_input": "gr_show", "submit_answer_btn": "gr_show", "next_question_btn": "gr_hide",
520
- # "submit_interview_btn": "gr_hide", "feedback_display": "gr_hide", "metrics_display": "gr_hide",
521
- # "question_display": "gr_show", "answer_instructions": "gr_show"
522
- # }
523
- # }
524
- # except Exception as e:
525
- # error_msg = f"Error starting interview in interview_logic: {str(e)}"
526
- # print(error_msg)
527
- # return {
528
- # "status": error_msg,
529
- # "initial_question": "",
530
- # "interview_state": {},
531
- # "ui_updates": {
532
- # "audio_input": "gr_hide", "submit_answer_btn": "gr_hide", "next_question_btn": "gr_hide",
533
- # "submit_interview_btn": "gr_hide", "feedback_display": "gr_hide", "metrics_display": "gr_hide",
534
- # "question_display": "gr_hide", "answer_instructions": "gr_hide"
535
- # }
536
- # }
537
-
538
 
539
  def start_interview_logic(roles, processed_resume_data, text_model):
540
  """Starts the interview process logic."""
@@ -542,7 +516,7 @@ def start_interview_logic(roles, processed_resume_data, text_model):
542
  return {
543
  "status": "Please select a role and ensure resume is processed.",
544
  "initial_question": "",
545
- "all_questions": "", # New field for all questions
546
  "interview_state": {},
547
  "ui_updates": {
548
  "audio_input": "gr_show",
@@ -617,6 +591,8 @@ def start_interview_logic(roles, processed_resume_data, text_model):
617
  "answer_instructions": "gr_hide"
618
  }
619
  }
 
 
620
  def submit_answer_logic(audio, interview_state, text_model):
621
  """Handles submitting an answer via audio logic."""
622
  if not audio or not interview_state:
@@ -670,8 +646,8 @@ def submit_answer_logic(audio, interview_state, text_model):
670
  "ui_updates": {
671
  "feedback_display": "gr_show_and_update",
672
  "metrics_display": "gr_show_and_update",
673
- "audio_input": "gr_hide", # hide until Next is clicked
674
- "submit_answer_btn": "gr_hide", # hide until Next is clicked
675
  "next_question_btn": "gr_hide" if is_last_question else "gr_show",
676
  "submit_interview_btn": "gr_show" if is_last_question else "gr_hide",
677
  "question_display": "gr_show",
@@ -693,6 +669,7 @@ def submit_answer_logic(audio, interview_state, text_model):
693
  }
694
  }
695
 
 
696
  def next_question_logic(interview_state):
697
  """Moves to the next question or ends the interview logic."""
698
  if not interview_state:
@@ -735,12 +712,13 @@ def next_question_logic(interview_state):
735
  "interview_state": interview_state,
736
  "ui_updates": {
737
  "audio_input": "gr_hide", "submit_answer_btn": "gr_hide", "next_question_btn": "gr_hide",
738
- "feedback_display": "gr_hide", "metrics_display": "gr_hide", "submit_interview_btn": "gr_show", # Show submit button
739
  "question_display": "gr_show", "answer_instructions": "gr_hide",
740
  "answer_display": "gr_clear", "metrics_display_clear": "gr_clear"
741
  }
742
  }
743
 
 
744
  def submit_interview_logic(interview_state, text_model):
745
  """Handles final submission, triggers evaluation, prepares results logic."""
746
  if not interview_state or not isinstance(interview_state, dict):
@@ -759,7 +737,6 @@ def submit_interview_logic(interview_state, text_model):
759
  resume_data = interview_state.get("resume_data", "")
760
  feedback_list = interview_state.get("feedback", [])
761
  metrics_history = interview_state.get("metrics_list", [])
762
- # selected_roles = interview_state.get("selected_roles", []) # Not used here directly
763
 
764
  if not interactions:
765
  error_msg = "No interview interactions found to evaluate."
@@ -788,7 +765,7 @@ def submit_interview_logic(interview_state, text_model):
788
 
789
  return {
790
  "status": "Evaluation Complete! See your results below.",
791
- "interview_state": interview_state, # Pass through
792
  "report_text": report_text,
793
  "chart_buffer": chart_buffer,
794
  "ui_updates": {
@@ -808,6 +785,4 @@ def submit_interview_logic(interview_state, text_model):
808
  "ui_updates": {
809
  "evaluation_report_display": "gr_show_and_update_error", "evaluation_chart_display": "gr_hide"
810
  }
811
- }
812
-
813
- # Add similar logic functions for chat if needed, or keep chat in its own module.
 
1
+
2
  # PrepGenie/interview_logic.py
3
  """Core logic for the mock interview process."""
4
 
 
14
  import matplotlib.pyplot as plt
15
  import io
16
  import re
17
+ import time
18
 
19
  # --- Configuration ---
20
+ # Note: text_model is passed in from app.py to avoid circular imports or global state issues.
 
 
 
21
 
22
  # --- BERT Model Loading ---
 
 
 
23
  try:
24
  model = TFBertModel.from_pretrained("bert-base-uncased")
25
  tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
 
31
  model = None
32
  tokenizer = None
33
 
34
+
35
+ def safe_generate_content(text_model, prompt, fallback_message="Service temporarily unavailable. Please try again later."):
36
+ """
37
+ Wrapper for Gemini API calls that handles quota/rate limit errors gracefully.
38
+ Returns a tuple: (success: bool, result_or_error_message: str)
39
+ Includes exponential backoff for rate limits.
40
+ """
41
+ max_retries = 3
42
+ initial_delay = 2
43
+
44
+ for attempt in range(max_retries):
45
+ try:
46
+ response = text_model.generate_content(prompt)
47
+ response.resolve()
48
+ return True, response.text
49
+ except Exception as e:
50
+ error_str = str(e).lower()
51
+
52
+ # Check for quota/rate limit errors
53
+ if "429" in error_str or "quota" in error_str or "rate limit" in error_str:
54
+ print(f"Quota/Rate limit error (Attempt {attempt + 1}/{max_retries}): {e}")
55
+ if attempt < max_retries - 1:
56
+ delay = initial_delay * (2 ** attempt)
57
+ print(f"Retrying in {delay}s...")
58
+ time.sleep(delay)
59
+ continue
60
+ else:
61
+ return False, "⚠️ API quota exceeded. Please wait a few minutes and try again, or check your API plan."
62
+
63
+ elif "403" in error_str or "permission" in error_str:
64
+ print(f"Permission error: {e}")
65
+ return False, "⚠️ API access denied. Please check your API key configuration."
66
+
67
+ else:
68
+ print(f"API error: {e}")
69
+ # For non-retriable errors, return immediately
70
+ return False, f"⚠️ Service error: {fallback_message}"
71
+
72
+ # Fallback if all retries exhausted
73
+ return False, "⚠️ Service unavailable after multiple attempts. Please try again later."
74
+
75
+
76
  # --- Core Logic Functions ---
77
 
78
  def getallinfo(data, text_model):
79
  """Processes raw resume text into a structured overview."""
80
  if not data or not data.strip():
81
  return "No data provided or data is empty."
82
+
83
  text = f"""{data} is given by the user. Make sure you are getting the details like name, experience,
84
+ education, skills of the user like in a resume. If the details are not provided return: not a resume.
85
+ If details are provided then please try again and format the whole in a single paragraph covering all the information. """
86
+
87
+ success, result = safe_generate_content(text_model, text, "Could not process resume data.")
88
+ if not success:
89
+ return result # Returns the warning message
90
+ return result
91
+
 
92
 
93
  def file_processing(pdf_file_path):
94
  """Processes the uploaded PDF file given its path."""
 
101
  reader = PyPDF2.PdfReader(f)
102
  text = ""
103
  for page in reader.pages:
104
+ extracted = page.extract_text()
105
+ if extracted:
106
+ text += extracted
107
  return text
108
  except FileNotFoundError:
109
  error_msg = f"File not found at path: {pdf_file_path}"
 
118
  print(error_msg)
119
  return ""
120
 
121
+
122
  def get_embedding(text):
123
  """Generates BERT embedding for a given text."""
124
  if not text or not text.strip():
 
135
  print(f"Error getting embedding in interview_logic: {e}")
136
  return np.zeros((1, 768))
137
 
138
+
139
  def generate_feedback(question, answer):
140
  """Calculates similarity score between question and answer."""
141
  if not question or not question.strip() or not answer or not answer.strip():
 
156
  print(f"Error generating feedback in interview_logic: {e}")
157
  return "0.00"
158
 
159
+
160
  def generate_questions(roles, data, text_model):
161
  """Generates 5 interview questions based on resume and roles."""
162
+ default_questions = [
163
+ "Could you please introduce yourself based on your resume?",
164
+ "What are your key technical skills relevant to this role?",
165
+ "Describe a challenging project you've worked on and how you resolved it.",
166
+ "How do you prioritize tasks when working under tight deadlines?",
167
+ "Where do you see yourself professionally in the next 3 to 5 years?"
168
+ ]
169
+
170
  if not roles or (isinstance(roles, list) and not any(roles)) or not data or not data.strip():
171
+ return default_questions
 
 
 
 
 
 
172
 
173
  if isinstance(roles, list):
174
  roles_str = ", ".join(roles)
 
199
  4. If given an ambiguous dataset with missing values, what steps would you take to analyze it?
200
  5. Where do you see your career heading in the next 3 to 5 years?"""
201
 
202
+ success, result = safe_generate_content(text_model, text, "Could not generate questions.")
203
+
204
+ if not success:
205
+ print(f"Using fallback questions due to: {result}")
206
+ # Return default questions with the warning as the first item so user sees it
207
+ return [f"⚠️ {result}"] + default_questions[:4]
 
 
 
208
 
209
+ # Parse the successful result
210
+ questions_text = result.strip()
211
+ questions = re.findall(r'^\d+[\.\)]\s*(.+)', questions_text, re.MULTILINE)
212
+ questions = [q.strip() for q in questions if q.strip()]
213
 
214
+ # Fallback: split by newline if numbered parsing fails
215
+ if len(questions) < 3:
216
+ questions = [q.strip() for q in questions_text.split('\n') if q.strip() and '?' in q]
217
 
218
+ print(f"Generated {len(questions)} questions: {questions}")
 
 
219
 
220
  # Pad with defaults if AI returned fewer than 5
 
 
 
 
 
 
 
221
  while len(questions) < 5:
222
+ questions.append(default_questions[len(questions)])
223
 
224
  return questions[:5]
225
 
226
+
227
  def generate_overall_feedback(data, percent, answer, question, text_model):
228
  """Generates overall feedback for an answer."""
229
  if not data or not data.strip() or not answer or not answer.strip() or not question or not question.strip():
230
  return "Unable to generate feedback due to missing information."
231
+
232
  if isinstance(percent, float):
233
  percent_str = f"{percent:.2f}"
234
  else:
235
  percent_str = str(percent)
236
+
237
  prompt = f"""As an interviewer, provide concise feedback (max 150 words) for candidate {data}.
238
+ Questions asked: {question}
239
+ Candidate's answers: {answer}
240
+ Score: {percent_str}
241
+ Feedback should include:
242
+ 1. Overall performance assessment (2-3 sentences)
243
+ 2. Key strengths (2-3 points)
244
+ 3. Areas for improvement (2-3 points)
245
+ Be honest and constructive. Do not mention the exact score, but rate the candidate out of 10 based on their answers."""
246
+
247
+ success, result = safe_generate_content(text_model, prompt, "Could not generate feedback.")
248
+ if not success:
249
+ return f"Feedback unavailable: {result}"
250
+
251
+ return result
252
+
253
 
254
  def generate_metrics(data, answer, question, text_model):
255
  """Generates skill metrics for an answer."""
256
+ default_metrics = {
257
+ "Communication skills": 0.0,
258
+ "Teamwork and collaboration": 0.0,
259
+ "Problem-solving and critical thinking": 0.0,
260
+ "Time management and organization": 0.0,
261
+ "Adaptability and resilience": 0.0
262
+ }
263
+
264
  if not data or not data.strip() or not answer or not answer.strip() or not question or not question.strip():
265
+ return default_metrics
266
+
 
 
 
 
267
  text = f"""Here is the overview of the candidate {data}. In the interview the question asked was {question}.
268
+ The candidate has answered the question as follows: {answer}. Based on the answers provided, give me the metrics related to:
269
+ Communication skills, Teamwork and collaboration, Problem-solving and critical thinking, Time management and organization,
270
+ Adaptability and resilience.
271
+ Rules for rating:
272
+ - Rate each skill from 0 to 10
273
+ - If the answer is empty, 'Sorry could not recognize your voice', meaningless, or irrelevant: rate all skills as 0
274
+ - Only provide numeric ratings without any additional text or '/10'
275
+ - Ratings must reflect actual content quality - do not give courtesy points
276
+ - Consider answer relevance to the specific skill being rated
277
+ Format:
278
+ Communication skills: [rating]
279
+ Teamwork and collaboration: [rating]
280
+ Problem-solving and critical thinking: [rating]
281
+ Time management and organization: [rating]
282
+ Adaptability and resilience: [rating]"""
283
+
284
+ success, result = safe_generate_content(text_model, text, "Could not generate metrics.")
285
+ if not success:
286
+ print(f"Metrics generation failed: {result}")
287
+ return default_metrics
288
+
289
+ metrics = {}
290
+ metrics_text = result.strip()
291
+ for line in metrics_text.split('\n'):
292
+ if ':' in line:
293
+ key, value_str = line.split(':', 1)
294
+ key = key.strip()
295
+ try:
296
+ value_clean = value_str.strip().split()[0]
297
+ value = float(value_clean)
298
+ metrics[key] = value
299
+ except (ValueError, IndexError):
300
+ metrics[key] = 0.0
301
+
302
+ # Ensure all expected keys exist
303
+ expected_metrics = [
304
+ "Communication skills", "Teamwork and collaboration",
305
+ "Problem-solving and critical thinking", "Time management and organization",
306
+ "Adaptability and resilience"
307
+ ]
308
+ for m in expected_metrics:
309
+ if m not in metrics:
310
+ metrics[m] = 0.0
311
+
312
+ return metrics
313
+
 
 
 
 
314
 
315
  def getmetrics(interaction, resume, text_model):
316
  """Gets overall metrics from AI based on interaction."""
317
  interaction_text = "\n".join([f"{q}: {a}" for q, a in interaction.items()])
318
  text = f"""This is the user's resume: {resume}.
319
+ And here is the interaction of the interview: {interaction_text}.
320
+ Please evaluate the interview based on the interaction and the resume.
321
+ Rate me the following metrics on a scale of 1 to 10. 1 being the lowest and 10 being the highest.
322
+ Communication skills, Teamwork and collaboration, Problem-solving and critical thinking,
323
+ Time management and organization, Adaptability and resilience. Just give the ratings for the metrics.
324
+ I do not need the feedback. Just the ratings in the format:
325
+ Communication skills: X
326
+ Teamwork and collaboration: Y
327
+ Problem-solving and critical thinking: Z
328
+ Time management and organization: A
329
+ Adaptability and resilience: B
330
+ """
331
+ success, result = safe_generate_content(text_model, text, "Could not fetch final metrics.")
332
+ if not success:
333
+ print(f"Final metrics fetch failed: {result}")
334
+ return "" # Return empty string, parser will handle it
335
+ return result
336
+
 
337
 
338
  def parse_metrics(metric_text):
339
  """Parses raw metric text into a dictionary."""
 
365
  metrics[key] = 0
366
  return metrics
367
 
368
+
369
  def create_metrics_chart(metrics_dict):
370
  """Creates a pie chart image from metrics."""
371
  try:
 
398
  plt.close(fig)
399
  return buf
400
 
401
+
402
  def generate_evaluation_report(metrics_data, average_rating, feedback_list, interaction_dict):
403
  """Generates a formatted evaluation report."""
404
  try:
 
436
  print(error_msg)
437
  return error_msg
438
 
439
+
440
  # --- Interview State Management Functions ---
 
441
 
442
  def process_resume_logic(file_obj):
443
  """Handles resume upload and processing logic."""
 
509
  }
510
  }
511
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
512
 
513
  def start_interview_logic(roles, processed_resume_data, text_model):
514
  """Starts the interview process logic."""
 
516
  return {
517
  "status": "Please select a role and ensure resume is processed.",
518
  "initial_question": "",
519
+ "all_questions": "",
520
  "interview_state": {},
521
  "ui_updates": {
522
  "audio_input": "gr_show",
 
591
  "answer_instructions": "gr_hide"
592
  }
593
  }
594
+
595
+
596
  def submit_answer_logic(audio, interview_state, text_model):
597
  """Handles submitting an answer via audio logic."""
598
  if not audio or not interview_state:
 
646
  "ui_updates": {
647
  "feedback_display": "gr_show_and_update",
648
  "metrics_display": "gr_show_and_update",
649
+ "audio_input": "gr_hide",
650
+ "submit_answer_btn": "gr_hide",
651
  "next_question_btn": "gr_hide" if is_last_question else "gr_show",
652
  "submit_interview_btn": "gr_show" if is_last_question else "gr_hide",
653
  "question_display": "gr_show",
 
669
  }
670
  }
671
 
672
+
673
  def next_question_logic(interview_state):
674
  """Moves to the next question or ends the interview logic."""
675
  if not interview_state:
 
712
  "interview_state": interview_state,
713
  "ui_updates": {
714
  "audio_input": "gr_hide", "submit_answer_btn": "gr_hide", "next_question_btn": "gr_hide",
715
+ "feedback_display": "gr_hide", "metrics_display": "gr_hide", "submit_interview_btn": "gr_show",
716
  "question_display": "gr_show", "answer_instructions": "gr_hide",
717
  "answer_display": "gr_clear", "metrics_display_clear": "gr_clear"
718
  }
719
  }
720
 
721
+
722
  def submit_interview_logic(interview_state, text_model):
723
  """Handles final submission, triggers evaluation, prepares results logic."""
724
  if not interview_state or not isinstance(interview_state, dict):
 
737
  resume_data = interview_state.get("resume_data", "")
738
  feedback_list = interview_state.get("feedback", [])
739
  metrics_history = interview_state.get("metrics_list", [])
 
740
 
741
  if not interactions:
742
  error_msg = "No interview interactions found to evaluate."
 
765
 
766
  return {
767
  "status": "Evaluation Complete! See your results below.",
768
+ "interview_state": interview_state,
769
  "report_text": report_text,
770
  "chart_buffer": chart_buffer,
771
  "ui_updates": {
 
785
  "ui_updates": {
786
  "evaluation_report_display": "gr_show_and_update_error", "evaluation_chart_display": "gr_hide"
787
  }
788
+ }