Spaces:
Sleeping
Sleeping
File size: 6,073 Bytes
67ce3bc 29856a8 a8ee0db f2a0c92 a8ee0db 29856a8 a8ee0db 29856a8 a8ee0db 29856a8 a8ee0db 29856a8 cb30661 29856a8 a8ee0db cb30661 6ee44e5 a8ee0db 6ee44e5 a8ee0db 29856a8 81872d9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 |
from crewai import Crew, Process
from langchain_core.tools import tool
import json
from pydantic import BaseModel, Field
from typing import Dict, List, Any, Type
from .agents import report_generator_agent, cv_section_splitter_agent, skills_extractor_agent, experience_extractor_agent, project_extractor_agent, education_extractor_agent, ProfileBuilderAgent, informations_personnelle_agent, reconversion_detector_agent
from .tasks import generate_report_task, task_split_cv_sections, task_extract_skills, task_extract_experience, task_extract_projects, task_extract_education, task_build_profile, task_extract_informations, task_detect_reconversion
from src.deep_learning_analyzer import MultiModelInterviewAnalyzer
from src.rag_handler import RAGHandler
from langchain_core.tools import BaseTool
def run_interview_analysis(conversation_history: list, job_description_text: list) -> str:
"""
Appelle cet outil à la toute fin d'un entretien d'embauche pour analyser
l'intégralité de la conversation et générer un rapport de feedback.
Ne l'utilise PAS pour répondre à une question normale, mais seulement pour conclure et analyser l'entretien.
"""
analyzer = MultiModelInterviewAnalyzer()
structured_analysis = analyzer.run_full_analysis(conversation_history, job_description_text)
rag_handler = RAGHandler()
rag_feedback = []
if structured_analysis.get("intent_analysis"):
for intent in structured_analysis["intent_analysis"]:
query = f"Conseils pour un candidat qui cherche à {intent['labels'][0]}"
rag_feedback.extend(rag_handler.get_relevant_feedback(query))
if structured_analysis.get("sentiment_analysis"):
for sentiment_group in structured_analysis["sentiment_analysis"]:
for sentiment in sentiment_group:
if sentiment['label'] == 'stress' and sentiment['score'].item() > 0.6:
rag_feedback.extend(rag_handler.get_relevant_feedback("gestion du stress en entretien"))
unique_feedback = list(set(rag_feedback))
interview_crew = Crew(
agents=[report_generator_agent],
tasks=[generate_report_task],
process=Process.sequential,
verbose=False,
telemetry=False
)
final_report = interview_crew.kickoff(inputs={
'structured_analysis_data': json.dumps(structured_analysis, indent=2),
'rag_contextual_feedback': "\n".join(unique_feedback)
})
return final_report
class CVSectionExtractor:
"""
Extracteur de sections qui utilise les résultats du CVSectionSplitterAgent
pour distribuer le contenu approprié à chaque agent spécialisé.
"""
def __init__(self, sections_data: dict):
self.sections = sections_data
def get_contact_section(self) -> str:
return self.sections.get("contact", "")
def get_experiences_section(self) -> str:
return self.sections.get("experiences", "")
def get_projects_section(self) -> str:
return self.sections.get("projects", "")
def get_education_section(self) -> str:
return self.sections.get("education", "")
def get_skills_section(self) -> str:
return self.sections.get("skills", "")
def get_skills_context(self) -> str:
"""Combine les sections pertinentes pour l'extraction de compétences"""
return f"""
Section Expériences:
{self.get_experiences_section()}
Section Projets:
{self.get_projects_section()}
Section Compétences:
{self.get_skills_section()}
"""
def analyse_cv(cv_content: str) -> json:
section_splitting_crew = Crew(
agents=[cv_section_splitter_agent],
tasks=[task_split_cv_sections],
process=Process.sequential,
verbose=False,
telemetry=False
)
sections_result = section_splitting_crew.kickoff(inputs={"cv_content": cv_content})
try:
if hasattr(sections_result, 'raw'):
sections_json = sections_result.raw
else:
sections_json = str(sections_result)
sections_json_cleaned = sections_json.strip()
if '```' in sections_json:
if '```json' in sections_json:
sections_json_cleaned = sections_json.split('```json')[1].split('```')[0].strip()
else:
parts = sections_json.split('```')
if len(parts) >= 3:
sections_json_cleaned = parts[1].strip()
sections_data = json.loads(sections_json_cleaned)
extractor = CVSectionExtractor(sections_data)
except (json.JSONDecodeError, Exception) as e:
sections_data = {
"contact": cv_content[:500],
"experiences": cv_content,
"projects": cv_content,
"education": cv_content,
"skills": cv_content,
"other": ""
}
extractor = CVSectionExtractor(sections_data)
main_crew = Crew(
agents=[
informations_personnelle_agent,
skills_extractor_agent,
experience_extractor_agent,
project_extractor_agent,
education_extractor_agent,
reconversion_detector_agent,
ProfileBuilderAgent
],
tasks=[
task_extract_informations,
task_extract_skills,
task_extract_experience,
task_extract_projects,
task_extract_education,
task_detect_reconversion,
task_build_profile
],
process=Process.sequential,
verbose=False,
telemetry=False
)
main_inputs = {
"contact": extractor.get_contact_section(),
"experiences": extractor.get_experiences_section(),
"projects": extractor.get_projects_section(),
"education": extractor.get_education_section(),
"skills": extractor.get_skills_section()
}
result = main_crew.kickoff(inputs=main_inputs)
return result |