Spaces:
Sleeping
Sleeping
| from crewai import Crew, Process | |
| from langchain_core.tools import tool | |
| import json | |
| from pydantic import BaseModel, Field | |
| from typing import Dict, List, Any, Type | |
| from .agents import report_generator_agent, cv_section_splitter_agent, skills_extractor_agent, experience_extractor_agent, project_extractor_agent, education_extractor_agent, ProfileBuilderAgent, informations_personnelle_agent, reconversion_detector_agent | |
| from .tasks import generate_report_task, task_split_cv_sections, task_extract_skills, task_extract_experience, task_extract_projects, task_extract_education, task_build_profile, task_extract_informations, task_detect_reconversion | |
| from src.deep_learning_analyzer import MultiModelInterviewAnalyzer | |
| from src.rag_handler import RAGHandler | |
| from langchain_core.tools import BaseTool | |
| def run_interview_analysis(conversation_history: list, job_description_text: list) -> str: | |
| """ | |
| Appelle cet outil à la toute fin d'un entretien d'embauche pour analyser | |
| l'intégralité de la conversation et générer un rapport de feedback. | |
| Ne l'utilise PAS pour répondre à une question normale, mais seulement pour conclure et analyser l'entretien. | |
| """ | |
| analyzer = MultiModelInterviewAnalyzer() | |
| structured_analysis = analyzer.run_full_analysis(conversation_history, job_description_text) | |
| rag_handler = RAGHandler() | |
| rag_feedback = [] | |
| if structured_analysis.get("intent_analysis"): | |
| for intent in structured_analysis["intent_analysis"]: | |
| query = f"Conseils pour un candidat qui cherche à {intent['labels'][0]}" | |
| rag_feedback.extend(rag_handler.get_relevant_feedback(query)) | |
| if structured_analysis.get("sentiment_analysis"): | |
| for sentiment_group in structured_analysis["sentiment_analysis"]: | |
| for sentiment in sentiment_group: | |
| if sentiment['label'] == 'stress' and sentiment['score'].item() > 0.6: | |
| rag_feedback.extend(rag_handler.get_relevant_feedback("gestion du stress en entretien")) | |
| unique_feedback = list(set(rag_feedback)) | |
| interview_crew = Crew( | |
| agents=[report_generator_agent], | |
| tasks=[generate_report_task], | |
| process=Process.sequential, | |
| verbose=False, | |
| telemetry=False | |
| ) | |
| final_report = interview_crew.kickoff(inputs={ | |
| 'structured_analysis_data': json.dumps(structured_analysis, indent=2), | |
| 'rag_contextual_feedback': "\n".join(unique_feedback) | |
| }) | |
| return final_report | |
| class CVSectionExtractor: | |
| """ | |
| Extracteur de sections qui utilise les résultats du CVSectionSplitterAgent | |
| pour distribuer le contenu approprié à chaque agent spécialisé. | |
| """ | |
| def __init__(self, sections_data: dict): | |
| self.sections = sections_data | |
| def get_contact_section(self) -> str: | |
| return self.sections.get("contact", "") | |
| def get_experiences_section(self) -> str: | |
| return self.sections.get("experiences", "") | |
| def get_projects_section(self) -> str: | |
| return self.sections.get("projects", "") | |
| def get_education_section(self) -> str: | |
| return self.sections.get("education", "") | |
| def get_skills_section(self) -> str: | |
| return self.sections.get("skills", "") | |
| def get_skills_context(self) -> str: | |
| """Combine les sections pertinentes pour l'extraction de compétences""" | |
| return f""" | |
| Section Expériences: | |
| {self.get_experiences_section()} | |
| Section Projets: | |
| {self.get_projects_section()} | |
| Section Compétences: | |
| {self.get_skills_section()} | |
| """ | |
| def analyse_cv(cv_content: str) -> json: | |
| section_splitting_crew = Crew( | |
| agents=[cv_section_splitter_agent], | |
| tasks=[task_split_cv_sections], | |
| process=Process.sequential, | |
| verbose=False, | |
| telemetry=False | |
| ) | |
| sections_result = section_splitting_crew.kickoff(inputs={"cv_content": cv_content}) | |
| try: | |
| if hasattr(sections_result, 'raw'): | |
| sections_json = sections_result.raw | |
| else: | |
| sections_json = str(sections_result) | |
| sections_json_cleaned = sections_json.strip() | |
| if '```' in sections_json: | |
| if '```json' in sections_json: | |
| sections_json_cleaned = sections_json.split('```json')[1].split('```')[0].strip() | |
| else: | |
| parts = sections_json.split('```') | |
| if len(parts) >= 3: | |
| sections_json_cleaned = parts[1].strip() | |
| sections_data = json.loads(sections_json_cleaned) | |
| extractor = CVSectionExtractor(sections_data) | |
| except (json.JSONDecodeError, Exception) as e: | |
| sections_data = { | |
| "contact": cv_content[:500], | |
| "experiences": cv_content, | |
| "projects": cv_content, | |
| "education": cv_content, | |
| "skills": cv_content, | |
| "other": "" | |
| } | |
| extractor = CVSectionExtractor(sections_data) | |
| main_crew = Crew( | |
| agents=[ | |
| informations_personnelle_agent, | |
| skills_extractor_agent, | |
| experience_extractor_agent, | |
| project_extractor_agent, | |
| education_extractor_agent, | |
| reconversion_detector_agent, | |
| ProfileBuilderAgent | |
| ], | |
| tasks=[ | |
| task_extract_informations, | |
| task_extract_skills, | |
| task_extract_experience, | |
| task_extract_projects, | |
| task_extract_education, | |
| task_detect_reconversion, | |
| task_build_profile | |
| ], | |
| process=Process.sequential, | |
| verbose=False, | |
| telemetry=False | |
| ) | |
| main_inputs = { | |
| "contact": extractor.get_contact_section(), | |
| "experiences": extractor.get_experiences_section(), | |
| "projects": extractor.get_projects_section(), | |
| "education": extractor.get_education_section(), | |
| "skills": extractor.get_skills_section() | |
| } | |
| result = main_crew.kickoff(inputs=main_inputs) | |
| return result |