norhan12 commited on
Commit
ec0833e
·
verified ·
1 Parent(s): 4d417cb

Updateprocess_interview.py

Browse files
Files changed (1) hide show
  1. process_interview.py +193 -193
process_interview.py CHANGED
@@ -37,17 +37,17 @@ from concurrent.futures import ThreadPoolExecutor
37
  # Setup logging
38
  logging.basicConfig(level=logging.INFO)
39
  logger = logging.getLogger(__name__)
40
- logging.getLogger("nemo_logging").setLevel(logging.INFO)
41
- logging.getLogger("nemo").setLevel(logging.INFO)
42
 
43
  # Configuration
44
- AUDIO_DIR = "./Uploads"
45
  OUTPUT_DIR = "./processed_audio"
46
  os.makedirs(OUTPUT_DIR, exist_ok=True)
47
 
48
  # API Keys
49
- PINECONE_KEY = os.getenv("PINECONE_KEY")'
50
- ASSEMBLYAI_KEY = 'os.getenv("ASSEMBLYAI_KEY")'
51
  GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
52
 
53
  def download_audio_from_url(url: str) -> str:
@@ -211,31 +211,31 @@ def process_utterance(utterance, full_audio, wav_file):
211
  else:
212
  speaker_id = f"unknown_{uuid.uuid4().hex[:6]}"
213
  speaker_name = f"Speaker_{speaker_id[-4:]}"
214
- index.upsert([(speaker_id, embedding_list, {"speaker_name": speaker_id})])
215
  os.remove(temp_path)
216
  return {
217
- ...
218
- **speech, 'speaker': speaker_name,
219
  'speaker_id': speaker_id,
220
  'embedding': embedding_list
221
  }
222
  except Exception as e:
223
  logger.error(f"Utterance processing failed: {str(e)}", exc_info=True)
224
  return {
225
- ...
226
- speech, 'speech': 'Unknown',
227
- 'speaker_id': speaker_id,
228
- 'embedding_id': None
229
  }
230
 
231
- def identify_speakers(audio: Dict, text: str) -> List[Dict]:
232
  try:
233
- audio = AudioSegment.from_wav(text)
234
- speakers = audio['speech']
235
  with ThreadPoolExecutor(max_workers=5) as executor:
236
  futures = [
237
- executor.submit(process_speech, speech, speakers, text)
238
- for speech in speakers
239
  ]
240
  results = [f.result() for f in futures]
241
  return results
@@ -243,33 +243,31 @@ def identify_speakers(audio: Dict, text: str) -> List[Dict]:
243
  logger.error(f"Speaker identification failed: {str(e)}")
244
  raise
245
 
246
- def train_role_classifier(speakers: List[Dict]):
247
  try:
248
- speech = [u['speech'].split()]
249
- vectorizer = TfidfVectorizer(max_features=500, ngram_range=(1,2))
250
- X_text = vectorizer.fit_transform(speech)
251
  features = []
252
  labels = []
253
- for i, speaker in enumerate(speakers):
254
- utterance = speaker['speech_features']
255
  feat = [
256
- utterance['duration'], utterance['speech_rate'], utterance['duration'], utterance['mean_pitch'],
257
- utterance['min_pitch'], utterance['max_pitch'],
258
- utterance['speech_sd'], utterance['intensityLevel'],
259
- utterance['intensity_level'],
260
- utterance['speechMax']], utterance['speechSD'],
261
  ]
262
- feat.extend(X_text[i].toarray()[0])
263
- doc = nlp(speaker['speech'])
264
- speech.extend([
265
- int(speaker['speech'].endswith('?'))),
266
- len(re.findall(r'\b(why|how|what|when|where|who|which)\b', speaker['speech'].lower())),
267
- len(speaker['speech'].split())),
268
- sum(frequency for token in speech if token.pos_ == 'VERB'),
269
- sum(frequency for token in speech if token.pos == 'NOUN')
270
  ])
271
  features.append(feat)
272
- labels.append((0 if i % 2 == 0 else 1))
273
  scaler = StandardScaler()
274
  X = scaler.fit_transform(features)
275
  clf = RandomForestClassifier(
@@ -284,53 +282,53 @@ def train_role_classifier(speakers: List[Dict]):
284
  logger.error(f"Classifier training failed: {str(e)}")
285
  raise
286
 
287
- def classify_roles(speakers: List[Dict], clf, vectorizer, scaler):
288
  try:
289
- speech = [u['speech'] for u in speakers]
290
- X_text = vectorizer.transform(speech)
291
  results = []
292
- for i, speaker in enumerate(speakers):
293
- prosodic = speaker['speech_features']
294
  feat = [
295
  prosodic['duration'], prosodic['mean_pitch'], prosodic['min_pitch'],
296
  prosodic['max_pitch'], prosodic['pitch_sd'], prosodic['intensityMean'],
297
  prosodic['intensityMin'], prosodic['intensityMax'], prosodic['intensitySD'],
298
  ]
299
  feat.extend(X_text[i].toarray()[0].tolist())
300
- doc = nlp(speaker['speech'])
301
  feat.extend([
302
- int(speaker['speech'].endswith('?')),
303
- len(re.findall(r'\b(why|how|what|when|where|who|which)\b', speaker['speech'].lower())),
304
- len(speaker['speech'].split()),
305
  sum(1 for token in doc if token.pos_ == 'VERB'),
306
  sum(1 for token in doc if token.pos_ == 'NOUN')
307
  ])
308
  X = scaler.transform([feat])
309
  role = 'Interviewer' if clf.predict(X)[0] == 0 else 'Interviewee'
310
- results.append({**speaker, 'role': role})
311
  return results
312
  except Exception as e:
313
  logger.error(f"Role classification failed: {str(e)}")
314
  raise
315
 
316
- def analyze_interviewee_voice(audio_path: str, speakers: List[Dict]) -> Dict:
317
  try:
318
  y, sr = librosa.load(audio_path, sr=16000)
319
- interviewee_speakers = [u for u in speakers if u['role'] == 'Interviewee']
320
- if not interviewee_speakers:
321
- return {'error': 'No interviewee speeches found'}
322
  segments = []
323
- for u in interviewee_speakers:
324
  start = int(u['start'] * sr / 1000)
325
  end = int(u['end'] * sr / 1000)
326
  segments.append(y[start:end])
327
- total_duration = sum(u['speech_features']['duration'] for u in interviewee_speakers)
328
- total_words = sum(len(u['speech'].split()) for u in interviewee_speakers)
329
  speaking_rate = total_words / total_duration if total_duration > 0 else 0
330
  filler_words = ['um', 'uh', 'like', 'you know', 'so', 'i mean']
331
- filler_count = sum(sum(u['speech'].lower().count(fw) for fw in filler_words) for u in interviewee_speakers)
332
  filler_ratio = filler_count / total_words if total_words > 0 else 0
333
- all_words = ' '.join(u['speech'].lower() for u in interviewee_speakers).split()
334
  word_counts = {}
335
  for i in range(len(all_words) - 1):
336
  bigram = (all_words[i], all_words[i + 1])
@@ -374,19 +372,19 @@ def generate_voice_interpretation(analysis: Dict) -> str:
374
  return "Voice analysis unavailable due to processing limitations."
375
  interpretation_lines = [
376
  "Vocal Performance Profile:",
377
- f"- Speaking Rate: {analysis['speaking_rate']} words/sec - Benchmark: 2.0-3.0 wps for clear delivery",
378
- f"- Filler Word Frequency: {analysis['filler_ratio'] * 100:.1f}% - Measures non-content words",
379
- f"- Repetition Index: {analysis['repetition_score']:.3f} - Frequency of repeated phrases",
380
- f"- Anxiety Indicator: {analysis['interpretation']['anxiety_level']} (Score: {analysis['composite_scores']['anxiety']:.3f}) - Pitch and vocal stability",
381
- f"- Confidence Indicator: {analysis['interpretation']['confidence_level']} (Score: {analysis['composite_scores']['confidence']:.3f}) - Vocal strength",
382
- f"- Fluency Rating: {analysis['interpretation']['fluency_level']} - Speech flow and coherence",
383
  "",
384
- "HR Insights:",
385
- "- Rapid speech (>3.0 wps) may signal enthusiasm but risks clarity.",
386
- "- High filler word use reduces perceived professionalism.",
387
- "- Elevated anxiety suggests pressure; training can build resilience.",
388
- "- Strong confidence aligns with leadership presence.",
389
- "- Fluent speech enhances engagement, critical for team roles."
390
  ]
391
  return "\n".join(interpretation_lines)
392
 
@@ -394,18 +392,18 @@ def generate_anxiety_confidence_chart(composite_scores: Dict, chart_path_or_buff
394
  try:
395
  labels = ['Anxiety', 'Confidence']
396
  scores = [composite_scores.get('anxiety', 0), composite_scores.get('confidence', 0)]
397
- fig, ax = plt.subplots(figsize=(5, 3.5))
398
- bars = ax.bar(labels, scores, color=['#FF5252', '#26A69A'], edgecolor='black', width=0.45)
399
  ax.set_ylabel('Score (Normalized)', fontsize=12)
400
  ax.set_title('Vocal Dynamics: Anxiety vs. Confidence', fontsize=14, pad=15)
401
- ax.set_ylim(0, 1.3)
402
  for bar in bars:
403
  height = bar.get_height()
404
  ax.text(bar.get_x() + bar.get_width()/2, height + 0.05, f"{height:.2f}",
405
  ha='center', color='black', fontweight='bold', fontsize=11)
406
  ax.grid(True, axis='y', linestyle='--', alpha=0.7)
407
  plt.tight_layout()
408
- plt.savefig(chart_path_or_buffer, format='png', bbox_inches='tight', dpi=300)
409
  plt.close(fig)
410
  except Exception as e:
411
  logger.error(f"Error generating chart: {str(e)}")
@@ -449,29 +447,29 @@ def generate_report(analysis_data: Dict) -> str:
449
  elif acceptance_prob >= 40: acceptance_line += "HR Verdict: Moderate potential, requires additional assessment and skill-building."
450
  else: acceptance_line += "HR Verdict: Limited fit, significant improvement needed for role alignment."
451
  prompt = f"""
452
- You are EvalBot, a senior HR consultant with 20+ years of experience, delivering a polished, concise, and engaging interview analysis report. Use a professional tone, clear headings, and bullet points ('- ') for readability. Avoid redundancy and ensure distinct sections for strengths, growth areas, and recommendations.
453
  {acceptance_line}
454
  **1. Executive Summary**
455
- - Provide a concise overview of performance, key metrics, and hiring potential.
456
  - Interview length: {analysis_data['text_analysis']['total_duration']:.2f} seconds
457
  - Speaker turns: {analysis_data['text_analysis']['speaker_turns']}
458
  - Participants: {', '.join(analysis_data['speakers'])}
459
  **2. Communication and Vocal Dynamics**
460
- - Evaluate vocal delivery (rate, fluency, confidence) and professional impact.
461
- - Offer HR insights on workplace alignment.
462
  {voice_interpretation}
463
  **3. Competency and Content Evaluation**
464
- - Assess competencies: leadership, problem-solving, communication, adaptability.
465
- - List strengths and growth areas separately, with specific examples.
466
  - Sample responses:
467
  {chr(10).join(interviewee_responses)}
468
  **4. Role Fit and Growth Potential**
469
- - Analyze cultural fit, role readiness, and long-term potential.
470
- - Highlight enthusiasm and scalability.
471
  **5. Strategic HR Recommendations**
472
- - Provide distinct, prioritized strategies for candidate growth.
473
- - Target: Communication, Response Depth, Professional Presence.
474
- - List clear next steps for hiring managers (e.g., advance, train, assess).
475
  """
476
  response = gemini_model.generate_content(prompt)
477
  return response.text
@@ -482,40 +480,41 @@ def generate_report(analysis_data: Dict) -> str:
482
  def create_pdf_report(analysis_data: Dict, output_path: str, gemini_report_text: str):
483
  try:
484
  doc = SimpleDocTemplate(output_path, pagesize=letter,
485
- rightMargin=0.7*inch, leftMargin=0.7*inch,
486
- topMargin=0.9*inch, bottomMargin=0.9*inch)
487
  styles = getSampleStyleSheet()
488
- h1 = ParagraphStyle(name='Heading1', fontSize=22, leading=26, spaceAfter=20, alignment=1, textColor=colors.HexColor('#003087'), fontName='Helvetica-Bold')
489
- h2 = ParagraphStyle(name='Heading2', fontSize=15, leading=18, spaceBefore=14, spaceAfter=8, textColor=colors.HexColor('#0050BC'), fontName='Helvetica-Bold')
490
- h3 = ParagraphStyle(name='Heading3', fontSize=11, leading=14, spaceBefore=10, spaceAfter=6, textColor=colors.HexColor('#3F7CFF'), fontName='Helvetica')
491
- body_text = ParagraphStyle(name='BodyText', fontSize=10, leading=13, spaceAfter=8, fontName='Helvetica', textColor=colors.HexColor('#333333'))
492
- bullet_style = ParagraphStyle(name='Bullet', parent=body_text, leftIndent=20, bulletIndent=10, fontName='Helvetica', bulletFontName='Helvetica', bulletFontSize=10)
493
 
494
  story = []
495
 
496
  def header_footer(canvas, doc):
497
  canvas.saveState()
498
- canvas.setFont('Helvetica', 8)
 
 
 
 
 
 
 
499
  canvas.setFillColor(colors.HexColor('#666666'))
500
- canvas.drawString(doc.leftMargin, 0.4 * inch, f"Page {doc.page} | EvalBot HR Interview Report | Confidential")
501
- canvas.setStrokeColor(colors.HexColor('#0050BC'))
502
- canvas.setLineWidth(1)
503
- canvas.line(doc.leftMargin, doc.height + 0.85*inch, doc.width + doc.leftMargin, doc.height + 0.85*inch)
504
- canvas.setFont('Helvetica-Bold', 10)
505
- canvas.drawString(doc.leftMargin, doc.height + 0.9*inch, "Candidate Interview Analysis")
506
- canvas.drawRightString(doc.width + doc.leftMargin, doc.height + 0.9*inch, time.strftime('%B %d, %Y'))
507
  canvas.restoreState()
508
 
509
  # Title Page
510
  story.append(Paragraph("Candidate Interview Analysis", h1))
511
- story.append(Paragraph(f"Generated: {time.strftime('%B %d, %Y')}", ParagraphStyle(name='Date', alignment=1, fontSize=10, textColor=colors.HexColor('#666666'), fontName='Helvetica')))
512
- story.append(Spacer(1, 0.5 * inch))
513
  acceptance_prob = analysis_data.get('acceptance_probability')
514
  if acceptance_prob is not None:
515
- story.append(Paragraph("Hiring Suitability Snapshot", h2))
516
  prob_color = colors.HexColor('#2E7D32') if acceptance_prob >= 80 else (colors.HexColor('#F57C00') if acceptance_prob >= 60 else colors.HexColor('#D32F2F'))
517
- story.append(Paragraph(f"Suitability Score: <font size=16 color='{prob_color.hexval()}'><b>{acceptance_prob:.2f}%</b></font>",
518
- ParagraphStyle(name='Prob', fontSize=12, spaceAfter=12, alignment=1, fontName='Helvetica-Bold')))
519
  if acceptance_prob >= 80:
520
  story.append(Paragraph("<b>HR Verdict:</b> Outstanding candidate, highly recommended for immediate advancement.", body_text))
521
  elif acceptance_prob >= 60:
@@ -524,102 +523,89 @@ def create_pdf_report(analysis_data: Dict, output_path: str, gemini_report_text:
524
  story.append(Paragraph("<b>HR Verdict:</b> Moderate potential, requires additional assessment and skill-building.", body_text))
525
  else:
526
  story.append(Paragraph("<b>HR Verdict:</b> Limited fit, significant improvement needed for role alignment.", body_text))
527
- story.append(Spacer(1, 0.3 * inch))
528
  table_data = [
529
- ['Metric', 'Value'],
530
- ['Interview Duration', f"{analysis_data['text_analysis']['total_duration']:.2f} seconds"],
531
  ['Speaker Turns', f"{analysis_data['text_analysis']['speaker_turns']}"],
532
- ['Participants', ', '.join(sorted(analysis_data['speakers']))]
533
  ]
534
- table = Table(table_data, colWidths=[2.2*inch, 3.8*inch])
535
  table.setStyle(TableStyle([
536
- ('BACKGROUND', (0,0), (-1,0), colors.HexColor('#0050BC')),
537
- ('TEXTCOLOR', (0,0), (-1,0), colors.white),
538
  ('ALIGN', (0,0), (-1,-1), 'LEFT'),
539
  ('VALIGN', (0,0), (-1,-1), 'MIDDLE'),
540
- ('FONTNAME', (0,0), (-1,0), 'Helvetica-Bold'),
541
- ('FONTSIZE', (0,0), (-1,-1), 9),
542
- ('BOTTOMPADDING', (0,0), (-1,0), 10),
543
- ('TOPPADDING', (0,0), (-1,0), 10),
544
- ('BACKGROUND', (0,1), (-1,-1), colors.HexColor('#F5F6FA')),
545
- ('GRID', (0,0), (-1,-1), 0.5, colors.HexColor('#DDE4EB'))
546
  ]))
547
  story.append(table)
548
- story.append(Spacer(1, 0.4 * inch))
549
- story.append(Paragraph("Prepared by: EvalBot - AI-Powered HR Analysis", body_text))
550
  story.append(PageBreak())
551
 
552
  # Detailed Analysis
553
- story.append(Paragraph("Detailed Candidate Evaluation", h1))
554
 
555
- # Communication and Vocal Dynamics
556
  story.append(Paragraph("1. Communication & Vocal Dynamics", h2))
557
  voice_analysis = analysis_data.get('voice_analysis', {})
558
  if voice_analysis and 'error' not in voice_analysis:
559
  table_data = [
560
  ['Metric', 'Value', 'HR Insight'],
561
- ['Speaking Rate', f"{voice_analysis.get('speaking_rate', 0):.2f} words/sec", 'Benchmark: 2.0-3.0 wps; impacts clarity'],
562
- ['Filler Words', f"{voice_analysis.get('filler_ratio', 0) * 100:.1f}%", 'High usage reduces credibility'],
563
- ['Anxiety', voice_analysis.get('interpretation', {}).get('anxiety_level', 'N/A'), f"Score: {voice_analysis.get('composite_scores', {}).get('anxiety', 0):.3f}; stress response"],
564
- ['Confidence', voice_analysis.get('interpretation', {}).get('confidence_level', 'N/A'), f"Score: {voice_analysis.get('composite_scores', {}).get('confidence', 0):.3f}; vocal strength"],
565
- ['Fluency', voice_analysis.get('interpretation', {}).get('fluency_level', 'N/A'), 'Drives engagement']
566
  ]
567
- table = Table(table_data, colWidths=[1.7*inch, 1.2*inch, 3.1*inch])
568
  table.setStyle(TableStyle([
569
- ('BACKGROUND', (0,0), (-1,0), colors.HexColor('#0050BC')),
570
- ('TEXTCOLOR', (0,0), (-1,0), colors.white),
571
  ('ALIGN', (0,0), (-1,-1), 'LEFT'),
572
  ('VALIGN', (0,0), (-1,-1), 'MIDDLE'),
573
- ('FONTNAME', (0,0), (-1,0), 'Helvetica-Bold'),
574
- ('FONTSIZE', (0,0), (-1,-1), 9),
575
- ('BOTTOMPADDING', (0,0), (-1,0), 10),
576
- ('TOPPADDING', (0,0), (-1,0), 10),
577
- ('BACKGROUND', (0,1), (-1,-1), colors.HexColor('#F5F6FA')),
578
- ('GRID', (0,0), (-1,-1), 0.5, colors.HexColor('#DDE4EB'))
579
  ]))
580
  story.append(table)
581
- story.append(Spacer(1, 0.2 * inch))
582
  chart_buffer = io.BytesIO()
583
  generate_anxiety_confidence_chart(voice_analysis.get('composite_scores', {}), chart_buffer)
584
  chart_buffer.seek(0)
585
- img = Image(chart_buffer, width=4.8*inch, height=3.2*inch)
586
  img.hAlign = 'CENTER'
587
  story.append(img)
588
  else:
589
- story.append(Paragraph("Vocal analysis unavailable.", body_text))
590
- story.append(Spacer(1, 0.3 * inch))
591
 
592
  # Parse Gemini Report
593
- sections = {
594
- "Executive Summary": [],
595
- "Communication and Vocal Dynamics": [],
596
- "Competency and Content Evaluation": {"Strengths": [], "Growth Areas": []},
597
- "Role Fit and Growth Potential": [],
598
- "Strategic HR Recommendations": {"Development Priorities": [], "Next Steps": []}
599
- }
600
  report_parts = re.split(r'(\s*\*\*\s*\d\.\s*.*?\s*\*\*)', gemini_report_text)
601
  current_section = None
602
  for part in report_parts:
603
  if not part.strip(): continue
604
  is_heading = False
605
- for title in sections.keys():
606
  if title.lower() in part.lower():
607
  current_section = title
608
  is_heading = True
609
  break
610
  if not is_heading and current_section:
611
- if current_section == "Competency and Content Evaluation":
612
- if 'strength' in part.lower() or any(k in part.lower() for k in ['leadership', 'problem-solving', 'communication', 'adaptability']):
613
- sections[current_section]["Strengths"].append(part.strip())
614
- elif 'improve' in part.lower() or 'grow' in part.lower() or 'challenge' in part.lower():
615
- sections[current_section]["Growth Areas"].append(part.strip())
616
- elif current_section == "Strategic HR Recommendations":
617
- if any(k in part.lower() for k in ['communication', 'depth', 'presence', 'improve']):
618
- sections[current_section]["Development Priorities"].append(part.strip())
619
- elif any(k in part.lower() for k in ['advance', 'train', 'assess', 'next step']):
620
- sections[current_section]["Next Steps"].append(part.strip())
621
- else:
622
- sections[current_section].append(part.strip())
623
 
624
  # Executive Summary
625
  story.append(Paragraph("2. Executive Summary", h2))
@@ -630,28 +616,35 @@ def create_pdf_report(analysis_data: Dict, output_path: str, gemini_report_text:
630
  else:
631
  story.append(Paragraph(line, body_text))
632
  else:
633
- story.append(Paragraph("Summary unavailable.", body_text))
634
- story.append(Spacer(1, 0.3 * inch))
635
 
636
  # Competency and Content
637
- story.append(Paragraph("3. Competency & Content", h2))
638
- story.append(Paragraph("Strengths", h3))
639
- if sections['Competency and Content Evaluation']['Strengths']:
640
- for line in sections['Competency and Content Evaluation']['Strengths']:
641
- story.append(Paragraph(line.lstrip('-•* ').strip(), bullet_style))
642
- else:
643
- story.append(Paragraph("No strengths identified.", body_text))
644
- story.append(Spacer(1, 0.2 * inch))
645
- story.append(Paragraph("Growth Areas", h3))
646
- if sections['Competency and Content Evaluation']['Growth Areas']:
647
- for line in sections['Competency and Content Evaluation']['Growth Areas']:
648
- story.append(Paragraph(line.lstrip('-•* ').strip(), bullet_style))
 
 
 
 
 
 
 
649
  else:
650
- story.append(Paragraph("No growth areas identified.", body_text))
651
- story.append(Spacer(1, 0.3 * inch))
652
 
653
  # Role Fit
654
- story.append(Paragraph("4. Role Fit & Potential", h2))
655
  if sections['Role Fit and Growth Potential']:
656
  for line in sections['Role Fit and Growth Potential']:
657
  if line.startswith(('-', '•', '*')):
@@ -659,31 +652,38 @@ def create_pdf_report(analysis_data: Dict, output_path: str, gemini_report_text:
659
  else:
660
  story.append(Paragraph(line, body_text))
661
  else:
662
- story.append(Paragraph("Fit and potential analysis unavailable.", body_text))
663
- story.append(Spacer(1, 0.3 * inch))
664
 
665
- # Strategic Recommendations
666
- story.append(Paragraph("5. Strategic Recommendations", h2))
667
- story.append(Paragraph("Development Priorities", h3))
668
- if sections['Strategic HR Recommendations']['Development Priorities']:
669
- for line in sections['Strategic HR Recommendations']['Development Priorities']:
670
- story.append(Paragraph(line.lstrip('-•* ').strip(), bullet_style))
671
- else:
672
- story.append(Paragraph("No development priorities specified.", body_text))
673
- story.append(Spacer(1, 0.2 * inch))
674
- story.append(Paragraph("Next Steps for Managers", h3))
675
- if sections['Strategic HR Recommendations']['Next Steps']:
676
- for line in sections['Strategic HR Recommendations']['Next Steps']:
677
- story.append(Paragraph(line.lstrip('-•* ').strip(), bullet_style))
 
 
 
 
 
 
 
678
  else:
679
- story.append(Paragraph("No next steps provided.", body_text))
680
  story.append(Spacer(1, 0.3 * inch))
681
- story.append(Paragraph("This report provides a data-driven evaluation to guide hiring and development decisions.", body_text))
682
 
683
  doc.build(story, onFirstPage=header_footer, onLaterPages=header_footer)
684
  return True
685
  except Exception as e:
686
- logger.error(f"PDF creation failed: {str(e)}", exc_info=True)
687
  return False
688
 
689
  def convert_to_serializable(obj):
 
37
  # Setup logging
38
  logging.basicConfig(level=logging.INFO)
39
  logger = logging.getLogger(__name__)
40
+ logging.getLogger("nemo_logging").setLevel(logging.ERROR)
41
+ logging.getLogger("nemo").setLevel(logging.ERROR)
42
 
43
  # Configuration
44
+ AUDIO_DIR = "./uploads"
45
  OUTPUT_DIR = "./processed_audio"
46
  os.makedirs(OUTPUT_DIR, exist_ok=True)
47
 
48
  # API Keys
49
+ PINECONE_KEY = os.getenv("PINECONE_KEY")
50
+ ASSEMBLYAI_KEY = os.getenv("ASSEMBLYAI_KEY")
51
  GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
52
 
53
  def download_audio_from_url(url: str) -> str:
 
211
  else:
212
  speaker_id = f"unknown_{uuid.uuid4().hex[:6]}"
213
  speaker_name = f"Speaker_{speaker_id[-4:]}"
214
+ index.upsert([(speaker_id, embedding_list, {"speaker_name": speaker_name})])
215
  os.remove(temp_path)
216
  return {
217
+ **utterance,
218
+ 'speaker': speaker_name,
219
  'speaker_id': speaker_id,
220
  'embedding': embedding_list
221
  }
222
  except Exception as e:
223
  logger.error(f"Utterance processing failed: {str(e)}", exc_info=True)
224
  return {
225
+ **utterance,
226
+ 'speaker': 'Unknown',
227
+ 'speaker_id': 'unknown',
228
+ 'embedding': None
229
  }
230
 
231
+ def identify_speakers(transcript: Dict, wav_file: str) -> List[Dict]:
232
  try:
233
+ full_audio = AudioSegment.from_wav(wav_file)
234
+ utterances = transcript['utterances']
235
  with ThreadPoolExecutor(max_workers=5) as executor:
236
  futures = [
237
+ executor.submit(process_utterance, utterance, full_audio, wav_file)
238
+ for utterance in utterances
239
  ]
240
  results = [f.result() for f in futures]
241
  return results
 
243
  logger.error(f"Speaker identification failed: {str(e)}")
244
  raise
245
 
246
+ def train_role_classifier(utterances: List[Dict]):
247
  try:
248
+ texts = [u['text'] for u in utterances]
249
+ vectorizer = TfidfVectorizer(max_features=500, ngram_range=(1, 2))
250
+ X_text = vectorizer.fit_transform(texts)
251
  features = []
252
  labels = []
253
+ for i, utterance in enumerate(utterances):
254
+ prosodic = utterance['prosodic_features']
255
  feat = [
256
+ prosodic['duration'], prosodic['mean_pitch'], prosodic['min_pitch'],
257
+ prosodic['max_pitch'], prosodic['pitch_sd'], prosodic['intensityMean'],
258
+ prosodic['intensityMin'], prosodic['intensityMax'], prosodic['intensitySD'],
 
 
259
  ]
260
+ feat.extend(X_text[i].toarray()[0].tolist())
261
+ doc = nlp(utterance['text'])
262
+ feat.extend([
263
+ int(utterance['text'].endswith('?')),
264
+ len(re.findall(r'\b(why|how|what|when|where|who|which)\b', utterance['text'].lower())),
265
+ len(utterance['text'].split()),
266
+ sum(1 for token in doc if token.pos_ == 'VERB'),
267
+ sum(1 for token in doc if token.pos_ == 'NOUN')
268
  ])
269
  features.append(feat)
270
+ labels.append(0 if i % 2 == 0 else 1)
271
  scaler = StandardScaler()
272
  X = scaler.fit_transform(features)
273
  clf = RandomForestClassifier(
 
282
  logger.error(f"Classifier training failed: {str(e)}")
283
  raise
284
 
285
+ def classify_roles(utterances: List[Dict], clf, vectorizer, scaler):
286
  try:
287
+ texts = [u['text'] for u in utterances]
288
+ X_text = vectorizer.transform(texts)
289
  results = []
290
+ for i, utterance in enumerate(utterances):
291
+ prosodic = utterance['prosodic_features']
292
  feat = [
293
  prosodic['duration'], prosodic['mean_pitch'], prosodic['min_pitch'],
294
  prosodic['max_pitch'], prosodic['pitch_sd'], prosodic['intensityMean'],
295
  prosodic['intensityMin'], prosodic['intensityMax'], prosodic['intensitySD'],
296
  ]
297
  feat.extend(X_text[i].toarray()[0].tolist())
298
+ doc = nlp(utterance['text'])
299
  feat.extend([
300
+ int(utterance['text'].endswith('?')),
301
+ len(re.findall(r'\b(why|how|what|when|where|who|which)\b', utterance['text'].lower())),
302
+ len(utterance['text'].split()),
303
  sum(1 for token in doc if token.pos_ == 'VERB'),
304
  sum(1 for token in doc if token.pos_ == 'NOUN')
305
  ])
306
  X = scaler.transform([feat])
307
  role = 'Interviewer' if clf.predict(X)[0] == 0 else 'Interviewee'
308
+ results.append({**utterance, 'role': role})
309
  return results
310
  except Exception as e:
311
  logger.error(f"Role classification failed: {str(e)}")
312
  raise
313
 
314
+ def analyze_interviewee_voice(audio_path: str, utterances: List[Dict]) -> Dict:
315
  try:
316
  y, sr = librosa.load(audio_path, sr=16000)
317
+ interviewee_utterances = [u for u in utterances if u['role'] == 'Interviewee']
318
+ if not interviewee_utterances:
319
+ return {'error': 'No interviewee utterances found'}
320
  segments = []
321
+ for u in interviewee_utterances:
322
  start = int(u['start'] * sr / 1000)
323
  end = int(u['end'] * sr / 1000)
324
  segments.append(y[start:end])
325
+ total_duration = sum(u['prosodic_features']['duration'] for u in interviewee_utterances)
326
+ total_words = sum(len(u['text'].split()) for u in interviewee_utterances)
327
  speaking_rate = total_words / total_duration if total_duration > 0 else 0
328
  filler_words = ['um', 'uh', 'like', 'you know', 'so', 'i mean']
329
+ filler_count = sum(sum(u['text'].lower().count(fw) for fw in filler_words) for u in interviewee_utterances)
330
  filler_ratio = filler_count / total_words if total_words > 0 else 0
331
+ all_words = ' '.join(u['text'].lower() for u in interviewee_utterances).split()
332
  word_counts = {}
333
  for i in range(len(all_words) - 1):
334
  bigram = (all_words[i], all_words[i + 1])
 
372
  return "Voice analysis unavailable due to processing limitations."
373
  interpretation_lines = [
374
  "Vocal Performance Profile:",
375
+ f"- Speaking Rate: {analysis['speaking_rate']} words/sec - Benchmark: 2.0-3.0 wps for clear, professional delivery",
376
+ f"- Filler Word Frequency: {analysis['filler_ratio'] * 100:.1f}% - Measures non-content words (e.g., 'um', 'like')",
377
+ f"- Repetition Index: {analysis['repetition_score']:.3f} - Frequency of repeated phrases or ideas",
378
+ f"- Anxiety Indicator: {analysis['interpretation']['anxiety_level']} (Score: {analysis['composite_scores']['anxiety']:.3f}) - Derived from pitch variation and vocal stability",
379
+ f"- Confidence Indicator: {analysis['interpretation']['confidence_level']} (Score: {analysis['composite_scores']['confidence']:.3f}) - Reflects vocal strength and consistency",
380
+ f"- Fluency Rating: {analysis['interpretation']['fluency_level']} - Assesses speech flow and coherence",
381
  "",
382
+ "HR Performance Insights:",
383
+ "- Rapid speech (>3.0 wps) may signal enthusiasm but risks clarity; slower, deliberate pacing enhances professionalism.",
384
+ "- Elevated filler word use reduces perceived polish and can distract from key messages.",
385
+ "- High anxiety scores suggest interview pressure; training can build resilience.",
386
+ "- Strong confidence indicators align with leadership presence and effective communication.",
387
+ "- Fluent speech enhances engagement, critical for client-facing or team roles."
388
  ]
389
  return "\n".join(interpretation_lines)
390
 
 
392
  try:
393
  labels = ['Anxiety', 'Confidence']
394
  scores = [composite_scores.get('anxiety', 0), composite_scores.get('confidence', 0)]
395
+ fig, ax = plt.subplots(figsize=(5, 3))
396
+ bars = ax.bar(labels, scores, color=['#FF6B6B', '#4ECDC4'], edgecolor='black', width=0.6)
397
  ax.set_ylabel('Score (Normalized)', fontsize=12)
398
  ax.set_title('Vocal Dynamics: Anxiety vs. Confidence', fontsize=14, pad=15)
399
+ ax.set_ylim(0, 1.2)
400
  for bar in bars:
401
  height = bar.get_height()
402
  ax.text(bar.get_x() + bar.get_width()/2, height + 0.05, f"{height:.2f}",
403
  ha='center', color='black', fontweight='bold', fontsize=11)
404
  ax.grid(True, axis='y', linestyle='--', alpha=0.7)
405
  plt.tight_layout()
406
+ plt.savefig(chart_path_or_buffer, format='png', bbox_inches='tight', dpi=200)
407
  plt.close(fig)
408
  except Exception as e:
409
  logger.error(f"Error generating chart: {str(e)}")
 
447
  elif acceptance_prob >= 40: acceptance_line += "HR Verdict: Moderate potential, requires additional assessment and skill-building."
448
  else: acceptance_line += "HR Verdict: Limited fit, significant improvement needed for role alignment."
449
  prompt = f"""
450
+ You are EvalBot, a senior HR consultant with 20+ years of experience, delivering a polished, concise, and visually engaging interview analysis report. Use a professional tone, clear headings, and bullet points ('- ') for readability. Focus on candidate suitability, strengths, and actionable growth strategies.
451
  {acceptance_line}
452
  **1. Executive Summary**
453
+ - Deliver a crisp overview of the candidate's performance, emphasizing key metrics and hiring potential.
454
  - Interview length: {analysis_data['text_analysis']['total_duration']:.2f} seconds
455
  - Speaker turns: {analysis_data['text_analysis']['speaker_turns']}
456
  - Participants: {', '.join(analysis_data['speakers'])}
457
  **2. Communication and Vocal Dynamics**
458
+ - Assess the candidate's vocal delivery (rate, fluency, confidence) and its impact on professional presence.
459
+ - Provide HR insights on how these traits align with workplace expectations.
460
  {voice_interpretation}
461
  **3. Competency and Content Evaluation**
462
+ - Evaluate responses for core competencies: leadership, problem-solving, communication, adaptability.
463
+ - Highlight strengths and growth areas with specific, concise examples.
464
  - Sample responses:
465
  {chr(10).join(interviewee_responses)}
466
  **4. Role Fit and Growth Potential**
467
+ - Analyze alignment with professional roles, focusing on cultural fit, readiness, and scalability.
468
+ - Consider enthusiasm, teamwork, and long-term potential.
469
  **5. Strategic HR Recommendations**
470
+ - Offer prioritized, actionable strategies to enhance candidate performance.
471
+ - Target: Communication Effectiveness, Response Depth, Professional Impact.
472
+ - Suggest clear next steps for hiring managers (e.g., advance, train, assess).
473
  """
474
  response = gemini_model.generate_content(prompt)
475
  return response.text
 
480
  def create_pdf_report(analysis_data: Dict, output_path: str, gemini_report_text: str):
481
  try:
482
  doc = SimpleDocTemplate(output_path, pagesize=letter,
483
+ rightMargin=0.6*inch, leftMargin=0.6*inch,
484
+ topMargin=0.8*inch, bottomMargin=0.8*inch)
485
  styles = getSampleStyleSheet()
486
+ h1 = ParagraphStyle(name='Heading1', fontSize=24, leading=28, spaceAfter=25, alignment=1, textColor=colors.HexColor('#1A3C5E'), fontName='Helvetica-Bold')
487
+ h2 = ParagraphStyle(name='Heading2', fontSize=16, leading=20, spaceBefore=16, spaceAfter=10, textColor=colors.HexColor('#2E5A87'), fontName='Helvetica-Bold')
488
+ h3 = ParagraphStyle(name='Heading3', fontSize=12, leading=16, spaceBefore=12, spaceAfter=8, textColor=colors.HexColor('#4A6FA5'), fontName='Helvetica')
489
+ body_text = ParagraphStyle(name='BodyText', parent=styles['Normal'], fontSize=10, leading=14, spaceAfter=10, fontName='Helvetica')
490
+ bullet_style = ParagraphStyle(name='Bullet', parent=body_text, leftIndent=25, bulletIndent=12, fontName='Helvetica')
491
 
492
  story = []
493
 
494
  def header_footer(canvas, doc):
495
  canvas.saveState()
496
+ canvas.setFont('Helvetica', 9)
497
+ canvas.setFillColor(colors.HexColor('#666666'))
498
+ canvas.drawString(doc.leftMargin, 0.5 * inch, f"Page {doc.page} | EvalBot HR Interview Report | Confidential")
499
+ canvas.setStrokeColor(colors.HexColor('#2E5A87'))
500
+ canvas.setLineWidth(1.2)
501
+ canvas.line(doc.leftMargin, doc.height + 0.9*inch, doc.width + doc.leftMargin, doc.height + 0.9*inch)
502
+ canvas.setFont('Helvetica-Bold', 11)
503
+ canvas.drawString(doc.leftMargin, doc.height + 0.95*inch, "Candidate Interview Analysis")
504
  canvas.setFillColor(colors.HexColor('#666666'))
505
+ canvas.drawRightString(doc.width + doc.leftMargin, doc.height + 0.95*inch, time.strftime('%B %d, %Y'))
 
 
 
 
 
 
506
  canvas.restoreState()
507
 
508
  # Title Page
509
  story.append(Paragraph("Candidate Interview Analysis", h1))
510
+ story.append(Paragraph(f"Generated: {time.strftime('%B %d, %Y')}", ParagraphStyle(name='Date', alignment=1, fontSize=11, textColor=colors.HexColor('#666666'), fontName='Helvetica')))
511
+ story.append(Spacer(1, 0.6 * inch))
512
  acceptance_prob = analysis_data.get('acceptance_probability')
513
  if acceptance_prob is not None:
514
+ story.append(Paragraph("Hiring Suitability Overview", h2))
515
  prob_color = colors.HexColor('#2E7D32') if acceptance_prob >= 80 else (colors.HexColor('#F57C00') if acceptance_prob >= 60 else colors.HexColor('#D32F2F'))
516
+ story.append(Paragraph(f"Hiring Suitability Score: <font size=18 color='{prob_color.hexval()}'><b>{acceptance_prob:.2f}%</b></font>",
517
+ ParagraphStyle(name='Prob', fontSize=14, spaceAfter=15, alignment=1, fontName='Helvetica-Bold')))
518
  if acceptance_prob >= 80:
519
  story.append(Paragraph("<b>HR Verdict:</b> Outstanding candidate, highly recommended for immediate advancement.", body_text))
520
  elif acceptance_prob >= 60:
 
523
  story.append(Paragraph("<b>HR Verdict:</b> Moderate potential, requires additional assessment and skill-building.", body_text))
524
  else:
525
  story.append(Paragraph("<b>HR Verdict:</b> Limited fit, significant improvement needed for role alignment.", body_text))
526
+ story.append(Spacer(1, 0.4 * inch))
527
  table_data = [
528
+ ['Key Metrics', 'Value'],
529
+ ['Interview Length', f"{analysis_data['text_analysis']['total_duration']:.2f} seconds"],
530
  ['Speaker Turns', f"{analysis_data['text_analysis']['speaker_turns']}"],
531
+ ['Participants', ', '.join(analysis_data['speakers'])]
532
  ]
533
+ table = Table(table_data, colWidths=[2.5*inch, 4*inch])
534
  table.setStyle(TableStyle([
535
+ ('BACKGROUND', (0,0), (-1,0), colors.HexColor('#2E5A87')),
536
+ ('TEXTCOLOR', (0,0), (-1,0), colors.whitesmoke),
537
  ('ALIGN', (0,0), (-1,-1), 'LEFT'),
538
  ('VALIGN', (0,0), (-1,-1), 'MIDDLE'),
539
+ ('FONTNAME', (0, 0), (-1, 0), 'Helvetica-Bold'),
540
+ ('FONTSIZE', (0, 0), (-1, -1), 10),
541
+ ('BOTTOMPADDING', (0, 0), (-1, 0), 12),
542
+ ('TOPPADDING', (0, 0), (-1, 0), 12),
543
+ ('BACKGROUND', (0, 1), (-1, -1), colors.HexColor('#F5F7FA')),
544
+ ('GRID', (0,0), (-1,-1), 1, colors.HexColor('#DDE4EB'))
545
  ]))
546
  story.append(table)
547
+ story.append(Spacer(1, 0.5 * inch))
548
+ story.append(Paragraph("Prepared by: EvalBot - AI-Powered HR Analysis System", body_text))
549
  story.append(PageBreak())
550
 
551
  # Detailed Analysis
552
+ story.append(Paragraph("Detailed Candidate Profile", h1))
553
 
 
554
  story.append(Paragraph("1. Communication & Vocal Dynamics", h2))
555
  voice_analysis = analysis_data.get('voice_analysis', {})
556
  if voice_analysis and 'error' not in voice_analysis:
557
  table_data = [
558
  ['Metric', 'Value', 'HR Insight'],
559
+ ['Speaking Rate', f"{voice_analysis.get('speaking_rate', 0):.2f} words/sec", 'Benchmark: 2.0-3.0 wps; affects clarity, poise'],
560
+ ['Filler Word Frequency', f"{voice_analysis.get('filler_ratio', 0) * 100:.1f}%", 'Excess use impacts polish, credibility'],
561
+ ['Anxiety Indicator', voice_analysis.get('interpretation', {}).get('anxiety_level', 'N/A'), f"Score: {voice_analysis.get('composite_scores', {}).get('anxiety', 0):.3f}; shows stress response"],
562
+ ['Confidence Indicator', voice_analysis.get('interpretation', {}).get('confidence_level', 'N/A'), f"Score: {voice_analysis.get('composite_scores', {}).get('confidence', 0):.3f}; reflects vocal strength"],
563
+ ['Fluency Rating', voice_analysis.get('interpretation', {}).get('fluency_level', 'N/A'), 'Drives engagement, message impact']
564
  ]
565
+ table = Table(table_data, colWidths=[1.9*inch, 1.3*inch, 3.3*inch])
566
  table.setStyle(TableStyle([
567
+ ('BACKGROUND', (0,0), (-1,0), colors.HexColor('#2E5A87')),
568
+ ('TEXTCOLOR', (0,0), (-1,0), colors.whitesmoke),
569
  ('ALIGN', (0,0), (-1,-1), 'LEFT'),
570
  ('VALIGN', (0,0), (-1,-1), 'MIDDLE'),
571
+ ('FONTNAME', (0, 0), (-1, 0), 'Helvetica-Bold'),
572
+ ('FONTSIZE', (0, 0), (-1, -1), 9),
573
+ ('BOTTOMPADDING', (0, 0), (-1, 0), 12),
574
+ ('TOPPADDING', (0, 0), (-1, 0), 12),
575
+ ('BACKGROUND', (0, 1), (-1, -1), colors.HexColor('#F5F7FA')),
576
+ ('GRID', (0,0), (-1,-1), 1, colors.HexColor('#DDE4EB'))
577
  ]))
578
  story.append(table)
579
+ story.append(Spacer(1, 0.3 * inch))
580
  chart_buffer = io.BytesIO()
581
  generate_anxiety_confidence_chart(voice_analysis.get('composite_scores', {}), chart_buffer)
582
  chart_buffer.seek(0)
583
+ img = Image(chart_buffer, width=5*inch, height=3*inch)
584
  img.hAlign = 'CENTER'
585
  story.append(img)
586
  else:
587
+ story.append(Paragraph("Vocal analysis unavailable due to processing constraints.", body_text))
588
+ story.append(Spacer(1, 0.4 * inch))
589
 
590
  # Parse Gemini Report
591
+ sections = {}
592
+ section_titles = ["Executive Summary", "Communication and Vocal Dynamics",
593
+ "Competency and Content Evaluation",
594
+ "Role Fit and Growth Potential", "Strategic HR Recommendations"]
595
+ for title in section_titles:
596
+ sections[title] = []
 
597
  report_parts = re.split(r'(\s*\*\*\s*\d\.\s*.*?\s*\*\*)', gemini_report_text)
598
  current_section = None
599
  for part in report_parts:
600
  if not part.strip(): continue
601
  is_heading = False
602
+ for title in section_titles:
603
  if title.lower() in part.lower():
604
  current_section = title
605
  is_heading = True
606
  break
607
  if not is_heading and current_section:
608
+ sections[current_section].append(part.strip())
 
 
 
 
 
 
 
 
 
 
 
609
 
610
  # Executive Summary
611
  story.append(Paragraph("2. Executive Summary", h2))
 
616
  else:
617
  story.append(Paragraph(line, body_text))
618
  else:
619
+ story.append(Paragraph("Executive summary unavailable.", body_text))
620
+ story.append(Spacer(1, 0.4 * inch))
621
 
622
  # Competency and Content
623
+ story.append(Paragraph("3. Competency & Content Evaluation", h2))
624
+ if sections['Competency and Content Evaluation']:
625
+ story.append(Paragraph("Strengths", h3))
626
+ strengths_found = False
627
+ for line in sections['Competency and Content Evaluation']:
628
+ if 'strength' in line.lower() or any(k in line.lower() for k in ['leadership', 'problem-solving', 'communication', 'adaptability']):
629
+ story.append(Paragraph(line.lstrip('-•* ').strip(), bullet_style))
630
+ strengths_found = True
631
+ if not strengths_found:
632
+ story.append(Paragraph("No specific strengths identified.", body_text))
633
+ story.append(Spacer(1, 0.2 * inch))
634
+ story.append(Paragraph("Growth Areas", h3))
635
+ growth_found = False
636
+ for line in sections['Competency and Content Evaluation']:
637
+ if 'improve' in line.lower() or 'weak' in line.lower() or 'challenge' in line.lower():
638
+ story.append(Paragraph(line.lstrip('-•* ').strip(), bullet_style))
639
+ growth_found = True
640
+ if not growth_found:
641
+ story.append(Paragraph("No specific growth areas identified.", body_text))
642
  else:
643
+ story.append(Paragraph("Competency and content evaluation unavailable.", body_text))
644
+ story.append(PageBreak())
645
 
646
  # Role Fit
647
+ story.append(Paragraph("4. Role Fit & Growth Potential", h2))
648
  if sections['Role Fit and Growth Potential']:
649
  for line in sections['Role Fit and Growth Potential']:
650
  if line.startswith(('-', '•', '*')):
 
652
  else:
653
  story.append(Paragraph(line, body_text))
654
  else:
655
+ story.append(Paragraph("Role fit and potential analysis unavailable.", body_text))
656
+ story.append(Spacer(1, 0.4 * inch))
657
 
658
+ # HR Recommendations
659
+ story.append(Paragraph("5. Strategic HR Recommendations", h2))
660
+ if sections['Strategic HR Recommendations']:
661
+ story.append(Paragraph("Development Priorities", h3))
662
+ dev_found = False
663
+ for line in sections['Strategic HR Recommendations']:
664
+ if any(k in line.lower() for k in ['communication', 'clarity', 'depth', 'presence', 'improve']):
665
+ story.append(Paragraph(line.lstrip('-•* ').strip(), bullet_style))
666
+ dev_found = True
667
+ if not dev_found:
668
+ story.append(Paragraph("No development priorities specified.", body_text))
669
+ story.append(Spacer(1, 0.2 * inch))
670
+ story.append(Paragraph("Next Steps for Hiring Managers", h3))
671
+ steps_found = False
672
+ for line in sections['Strategic HR Recommendations']:
673
+ if any(k in line.lower() for k in ['advance', 'train', 'assess', 'next step']):
674
+ story.append(Paragraph(line.lstrip('-•* ').strip(), bullet_style))
675
+ steps_found = True
676
+ if not steps_found:
677
+ story.append(Paragraph("No specific next steps provided.", body_text))
678
  else:
679
+ story.append(Paragraph("Strategic recommendations unavailable.", body_text))
680
  story.append(Spacer(1, 0.3 * inch))
681
+ story.append(Paragraph("This report delivers a comprehensive, data-driven evaluation to guide hiring decisions and candidate development.", body_text))
682
 
683
  doc.build(story, onFirstPage=header_footer, onLaterPages=header_footer)
684
  return True
685
  except Exception as e:
686
+ logger.error(f"Enhanced PDF creation failed: {str(e)}", exc_info=True)
687
  return False
688
 
689
  def convert_to_serializable(obj):