Spaces:
Running
Running
quentinL52 commited on
Commit ·
6da2b52
0
Parent(s):
Initial commit
Browse files- .gitignore +37 -0
- main.py +92 -0
- requirements.txt +22 -0
- src/config/agents.yaml +83 -0
- src/config/app_config.py +51 -0
- src/config/tasks.yaml +145 -0
- src/parser_flow/CV_agent_flow.py +232 -0
- src/services/cv_service.py +14 -0
.gitignore
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Environnement
|
| 2 |
+
.env
|
| 3 |
+
.env.*
|
| 4 |
+
!.env.example
|
| 5 |
+
venv/
|
| 6 |
+
env/
|
| 7 |
+
ENV/
|
| 8 |
+
.venv/
|
| 9 |
+
|
| 10 |
+
# Python
|
| 11 |
+
__pycache__/
|
| 12 |
+
*.py[cod]
|
| 13 |
+
*.pyc
|
| 14 |
+
*$py.class
|
| 15 |
+
*.so
|
| 16 |
+
.Python
|
| 17 |
+
|
| 18 |
+
# Tests
|
| 19 |
+
.pytest_cache/
|
| 20 |
+
.coverage
|
| 21 |
+
htmlcov/
|
| 22 |
+
|
| 23 |
+
# IDE
|
| 24 |
+
.vscode/
|
| 25 |
+
.idea/
|
| 26 |
+
*.swp
|
| 27 |
+
|
| 28 |
+
# Distribution
|
| 29 |
+
dist/
|
| 30 |
+
build/
|
| 31 |
+
*.egg-info/
|
| 32 |
+
|
| 33 |
+
# OS
|
| 34 |
+
.DS_Store
|
| 35 |
+
Thumbs.db
|
| 36 |
+
|
| 37 |
+
nul
|
main.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import logging
|
| 3 |
+
import tempfile
|
| 4 |
+
import uuid
|
| 5 |
+
from langtrace_python_sdk import inject_additional_attributes
|
| 6 |
+
|
| 7 |
+
from fastapi import FastAPI, UploadFile, File, HTTPException
|
| 8 |
+
|
| 9 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 10 |
+
|
| 11 |
+
from src.services.cv_service import parse_cv
|
| 12 |
+
from langtrace_python_sdk import langtrace
|
| 13 |
+
|
| 14 |
+
langtrace.init(api_key=os.getenv("LANGTRACE_API_KEY"))
|
| 15 |
+
|
| 16 |
+
logging.basicConfig(level=logging.INFO)
|
| 17 |
+
logger = logging.getLogger(__name__)
|
| 18 |
+
|
| 19 |
+
app = FastAPI(
|
| 20 |
+
title="CV Parser API",
|
| 21 |
+
description="parsing de CV agentique",
|
| 22 |
+
version="2.0.0",
|
| 23 |
+
docs_url="/docs",
|
| 24 |
+
redoc_url="/redoc"
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
ALLOWED_ORIGINS = os.getenv("CORS_ORIGINS", "http://localhost:8000").split(",")
|
| 28 |
+
|
| 29 |
+
app.add_middleware(
|
| 30 |
+
CORSMiddleware,
|
| 31 |
+
allow_origins=ALLOWED_ORIGINS,
|
| 32 |
+
allow_credentials=True,
|
| 33 |
+
allow_methods=["*"],
|
| 34 |
+
allow_headers=["*"],
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
from pydantic import BaseModel
|
| 38 |
+
|
| 39 |
+
class HealthCheck(BaseModel):
|
| 40 |
+
status: str = "ok"
|
| 41 |
+
|
| 42 |
+
@app.get("/", response_model=HealthCheck, tags=["Status"])
|
| 43 |
+
async def health_check():
|
| 44 |
+
return HealthCheck()
|
| 45 |
+
|
| 46 |
+
@app.post("/parse-cv/", tags=["CV Parsing"])
|
| 47 |
+
async def parse_cv_endpoint(
|
| 48 |
+
file: UploadFile = File(...)
|
| 49 |
+
):
|
| 50 |
+
"""
|
| 51 |
+
Parses a CV file (PDF) and returns the parsed data.
|
| 52 |
+
"""
|
| 53 |
+
if file.content_type != "application/pdf":
|
| 54 |
+
raise HTTPException(status_code=400, detail="PDF file required")
|
| 55 |
+
|
| 56 |
+
contents = await file.read()
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as tmp:
|
| 60 |
+
tmp.write(contents)
|
| 61 |
+
tmp_path = tmp.name
|
| 62 |
+
|
| 63 |
+
try:
|
| 64 |
+
session_id = str(uuid.uuid4())
|
| 65 |
+
attributes = {
|
| 66 |
+
"session.id": session_id,
|
| 67 |
+
"user_id": session_id
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
async def _traced_parse():
|
| 71 |
+
return await parse_cv(tmp_path)
|
| 72 |
+
|
| 73 |
+
result = await inject_additional_attributes(
|
| 74 |
+
_traced_parse,
|
| 75 |
+
attributes
|
| 76 |
+
)
|
| 77 |
+
except Exception as e:
|
| 78 |
+
logger.error(f"Error processing CV: {str(e)}", exc_info=True)
|
| 79 |
+
raise HTTPException(status_code=500, detail=f"Internal Server Error: {str(e)}")
|
| 80 |
+
finally:
|
| 81 |
+
if os.path.exists(tmp_path):
|
| 82 |
+
os.remove(tmp_path)
|
| 83 |
+
|
| 84 |
+
if not result:
|
| 85 |
+
raise HTTPException(status_code=500, detail="Failed to extract data from CV.")
|
| 86 |
+
|
| 87 |
+
return result
|
| 88 |
+
|
| 89 |
+
if __name__ == "__main__":
|
| 90 |
+
import uvicorn
|
| 91 |
+
port = int(os.getenv("PORT", 8001))
|
| 92 |
+
uvicorn.run(app, host="0.0.0.0", port=port)
|
requirements.txt
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi
|
| 2 |
+
uvicorn[standard]
|
| 3 |
+
pydantic
|
| 4 |
+
python-multipart
|
| 5 |
+
|
| 6 |
+
langchain-core
|
| 7 |
+
langchain-community
|
| 8 |
+
langchain-openai
|
| 9 |
+
langchain_groq
|
| 10 |
+
langchain-huggingface
|
| 11 |
+
crewai
|
| 12 |
+
crewai-tools
|
| 13 |
+
accelerate
|
| 14 |
+
pymupdf4llm
|
| 15 |
+
python-dotenv
|
| 16 |
+
requests
|
| 17 |
+
litellm
|
| 18 |
+
|
| 19 |
+
httpx==0.28.1
|
| 20 |
+
langtrace-python-sdk
|
| 21 |
+
sentence-transformers
|
| 22 |
+
setuptools<70.0.0
|
src/config/agents.yaml
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
cv_splitter:
|
| 2 |
+
role: >
|
| 3 |
+
Expert en Structure Documentaire
|
| 4 |
+
goal: >
|
| 5 |
+
Analyser et découper le CV en sections logiques.
|
| 6 |
+
backstory: >
|
| 7 |
+
Tu es un algorithme de parsing de haute précision capable de structurer n'importe quel document non structuré en format JSON clair.
|
| 8 |
+
Ta priorité est la fidélité de l'extraction et la séparation propre des sections.
|
| 9 |
+
verbose: false
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
skills_extractor:
|
| 13 |
+
role: >
|
| 14 |
+
Analyste de Compétences (Semantic Matcher)
|
| 15 |
+
goal: >
|
| 16 |
+
Identifier les compétences techniques et comportementales avec preuves et niveau de maîtrise
|
| 17 |
+
backstory: >
|
| 18 |
+
Expert technique capable de distinguer le simple "Name Dropping" de la compétence réelle.
|
| 19 |
+
Tu cherches des preuves d'utilisation (contexte) pour valider chaque compétence, si une compétence est mentionnée sans contexte indique sans contexte, si une compétence est presente sur projet et experience indique les 2.
|
| 20 |
+
verbose: false
|
| 21 |
+
|
| 22 |
+
experience_extractor:
|
| 23 |
+
role: >
|
| 24 |
+
Extracteur d'expériences
|
| 25 |
+
goal: >
|
| 26 |
+
Extraire les expériences professionnelles
|
| 27 |
+
backstory: >
|
| 28 |
+
Expert en analyse de parcours professionnels.
|
| 29 |
+
verbose: false
|
| 30 |
+
|
| 31 |
+
project_extractor:
|
| 32 |
+
role: >
|
| 33 |
+
Extracteur de projets
|
| 34 |
+
goal: >
|
| 35 |
+
Identifier projets professionnels et personnels
|
| 36 |
+
backstory: >
|
| 37 |
+
Spécialiste en identification de projets significatifs.
|
| 38 |
+
verbose: false
|
| 39 |
+
|
| 40 |
+
education_extractor:
|
| 41 |
+
role: >
|
| 42 |
+
Extracteur de formations
|
| 43 |
+
goal: >
|
| 44 |
+
Extraire formations et diplômes
|
| 45 |
+
backstory: >
|
| 46 |
+
Expert en analyse de parcours académiques.
|
| 47 |
+
verbose: false
|
| 48 |
+
|
| 49 |
+
reconversion_detector:
|
| 50 |
+
role: >
|
| 51 |
+
Détecteur de reconversion
|
| 52 |
+
goal: >
|
| 53 |
+
Detecter les changements de carrière
|
| 54 |
+
backstory: >
|
| 55 |
+
Conseiller d'orientation expert en transitions de carrière.
|
| 56 |
+
verbose: false
|
| 57 |
+
|
| 58 |
+
etudiant_detector:
|
| 59 |
+
role: >
|
| 60 |
+
Détecteur d'étudiant
|
| 61 |
+
goal: >
|
| 62 |
+
Detecter les candidats étudiants
|
| 63 |
+
backstory: >
|
| 64 |
+
specilisé dans la detection d'etudes en cours.
|
| 65 |
+
verbose: false
|
| 66 |
+
|
| 67 |
+
language_extractor:
|
| 68 |
+
role: >
|
| 69 |
+
Extracteur de langues
|
| 70 |
+
goal: >
|
| 71 |
+
Identifier les langues parlées et leur niveau de maîtrise
|
| 72 |
+
backstory: >
|
| 73 |
+
Expert en évaluation linguistique capable d'identifier les langues et compétences linguistiques.
|
| 74 |
+
verbose: false
|
| 75 |
+
|
| 76 |
+
identity_extractor:
|
| 77 |
+
role: >
|
| 78 |
+
Extracteur d'identité
|
| 79 |
+
goal: >
|
| 80 |
+
Extraire les informations d'identité du candidat
|
| 81 |
+
backstory: >
|
| 82 |
+
Expert en identification de personnes.
|
| 83 |
+
verbose: false
|
src/config/app_config.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import os
|
| 3 |
+
from dotenv import load_dotenv
|
| 4 |
+
load_dotenv()
|
| 5 |
+
import pymupdf4llm
|
| 6 |
+
from langchain_groq import ChatGroq
|
| 7 |
+
from langchain_openai import ChatOpenAI
|
| 8 |
+
import litellm
|
| 9 |
+
litellm.set_verbose = False
|
| 10 |
+
|
| 11 |
+
def load_pdf(pdf_path):
|
| 12 |
+
md_text = pymupdf4llm.to_markdown(pdf_path)
|
| 13 |
+
return md_text
|
| 14 |
+
|
| 15 |
+
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
| 16 |
+
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def get_big_llm():
|
| 20 |
+
"""GPT-4o pour les tâches complexes."""
|
| 21 |
+
return ChatOpenAI(
|
| 22 |
+
model="gpt-4o",
|
| 23 |
+
temperature=0.0,
|
| 24 |
+
api_key=OPENAI_API_KEY
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
def get_small_llm():
|
| 28 |
+
"""GPT-4o-mini pour l'extraction."""
|
| 29 |
+
return ChatOpenAI(
|
| 30 |
+
model="gpt-4o-mini",
|
| 31 |
+
temperature=0.0,
|
| 32 |
+
max_tokens=1500,
|
| 33 |
+
api_key=OPENAI_API_KEY
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
def get_fast_llm():
|
| 37 |
+
"""Groq llama-3.1-8b - Le plus RAPIDE."""
|
| 38 |
+
return ChatGroq(
|
| 39 |
+
model="groq/llama-3.1-8b-instant",
|
| 40 |
+
temperature=0.0,
|
| 41 |
+
max_tokens=1500,
|
| 42 |
+
groq_api_key=GROQ_API_KEY
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
def get_openai_small_llm():
|
| 46 |
+
"""GPT-4o-mini - Fallback."""
|
| 47 |
+
return ChatOpenAI(
|
| 48 |
+
model="gpt-4o-mini",
|
| 49 |
+
temperature=0.0,
|
| 50 |
+
api_key=OPENAI_API_KEY
|
| 51 |
+
)
|
src/config/tasks.yaml
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
split_cv_task:
|
| 2 |
+
description: >
|
| 3 |
+
ANALYSE EXTRÊMEMENT RAPIDE REQUISE.
|
| 4 |
+
Ton objectif unique est de découper le texte brut du CV en blocs JSON.
|
| 5 |
+
Ne résume pas. Ne reformule pas. Copie-colle le texte brut dans les sections correspondantes.
|
| 6 |
+
|
| 7 |
+
TEXTE DU CV :
|
| 8 |
+
"{cv_content}"
|
| 9 |
+
|
| 10 |
+
RÈGLES STRICTES :
|
| 11 |
+
1. "experiences": Contient UNIQUEMENT les blocs parlant d'historique professionnel (Entreprise, Poste, Dates).
|
| 12 |
+
2. "projects": Contient UNIQUEMENT les sections explicitement titrées "Projets" ou les descriptions de projets académiques/personnels distincts. SI PAS DE SECTION PROJET, LAISSER VIDE. Ne pas inventer.
|
| 13 |
+
3. "skills": Contient les listes de compétences, langages, outils.
|
| 14 |
+
4. "education": Contient les diplômes et formations.
|
| 15 |
+
5. "languages": Contient les langues mentionnées (Français, Anglais, etc.) avec éventuellement leur niveau.
|
| 16 |
+
6. "personal_info": Contient les informations personnelles (Prénom).
|
| 17 |
+
expected_output: >
|
| 18 |
+
Un objet JSON valide strictement structuré ainsi :
|
| 19 |
+
{{
|
| 20 |
+
"experiences": "texte brut...",
|
| 21 |
+
"projects": "texte brut...",
|
| 22 |
+
"education": "texte brut...",
|
| 23 |
+
"skills": "texte brut...",
|
| 24 |
+
"languages": "texte brut...",
|
| 25 |
+
"personal_info": "texte brut..."
|
| 26 |
+
}}
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
skills_task:
|
| 30 |
+
description: >
|
| 31 |
+
Analyse les sections suivantes pour lister les compétences avec leur contexte d'utilisation.
|
| 32 |
+
Expériences : {experiences}
|
| 33 |
+
Projets : {projects}
|
| 34 |
+
academique : {education}
|
| 35 |
+
Skills Explicit : {skills}
|
| 36 |
+
|
| 37 |
+
RÈGLES DE CLASSIFICATION :
|
| 38 |
+
1. 'hard_skills' : Outils, langages, technos (ex: Python, SQL, Excel, React, FastAPI, LangChain).
|
| 39 |
+
IMPORTANT: Inclure TOUTES les technologies mentionnées dans les projets, expériences et académique.
|
| 40 |
+
2. 'soft_skills' : Qualités humaines (ex: Leadership, Communication, Rigueur).
|
| 41 |
+
3. NE PAS INVENTER. Si ce n'est pas écrit, ne l'ajoute pas.
|
| 42 |
+
4. Pour chaque skill, indique si elle est présente dans une expérience, un projet, academique, sans contexte, si elle est presente dans plusieurs section indique le .
|
| 43 |
+
expected_output: >
|
| 44 |
+
JSON : {{
|
| 45 |
+
"hard_skills": ["Python", "SQL", "FastAPI"],
|
| 46 |
+
"soft_skills": ["Rigueur", "Leadership"],
|
| 47 |
+
"skills_with_context": [
|
| 48 |
+
{{"skill": "Python", "context": "projet"}},
|
| 49 |
+
{{"skill": "FastAPI", "context": "entreprise"}}
|
| 50 |
+
]
|
| 51 |
+
}}
|
| 52 |
+
|
| 53 |
+
experience_task:
|
| 54 |
+
description: >
|
| 55 |
+
Tu es un extracteur de données strict. Analyse ce texte d'expérience :
|
| 56 |
+
"{experiences}"
|
| 57 |
+
|
| 58 |
+
Pour CHAQUE poste identifié :
|
| 59 |
+
1. Titre du poste
|
| 60 |
+
2. Nom de l'entreprise
|
| 61 |
+
3. Dates (Début - Fin)
|
| 62 |
+
4. Description (Liste des tâches/responsabilités)
|
| 63 |
+
|
| 64 |
+
RÈGLE : Ne confonds PAS les projets scolaires avec des expériences pro. Les stages et alternances SONT des expériences.
|
| 65 |
+
expected_output: >
|
| 66 |
+
Liste JSON : [{{"Poste": "...", "Entreprise": "...", "start_date": "...", "end_date": "...", "responsabilités": ["task 1", "task 2"]}}]
|
| 67 |
+
|
| 68 |
+
project_task:
|
| 69 |
+
description: >
|
| 70 |
+
Analyse ce texte de projets : "{projects}"
|
| 71 |
+
|
| 72 |
+
RÈGLES STRICTES DE STRUCTURE :
|
| 73 |
+
Chaque projet DOIT avoir exactement ces clés :
|
| 74 |
+
- "title" : Titre du projet
|
| 75 |
+
- "technologies" : Liste de strings (Ex: ["Python", "React"])
|
| 76 |
+
- "outcomes" : Liste de strings décrivant les résultats ou fonctionnalités (Ex: ["Appli déployée", "User base x2"])
|
| 77 |
+
- "domaine metier" : (exemple un projet en lien avec la recommandation d'emploi est classé comme recrutement/RH)
|
| 78 |
+
|
| 79 |
+
Sépare :
|
| 80 |
+
- "professional" : Projets réalisés en entreprise (distincts du simple poste).
|
| 81 |
+
- "personal" : Projets perso, Github, Hackathons, Écoles.
|
| 82 |
+
|
| 83 |
+
Si une liste est vide, renvoie [].
|
| 84 |
+
expected_output: >
|
| 85 |
+
JSON : {{
|
| 86 |
+
"professional": [
|
| 87 |
+
{{ "title": "Data Jobs", "technologies": ["Python", "API"], "outcomes": ["App crée", "Optimisation X"], "domaine metier": "recrutement/RH" }}
|
| 88 |
+
],
|
| 89 |
+
"personal": []
|
| 90 |
+
}}
|
| 91 |
+
|
| 92 |
+
education_task:
|
| 93 |
+
description: >
|
| 94 |
+
Analyse la section education : "{education}"
|
| 95 |
+
Extrait les diplômes.
|
| 96 |
+
expected_output: >
|
| 97 |
+
Liste JSON : [{{"degree": "...", "institution": "...", "start_date": "...", "end_date": "..."}}]
|
| 98 |
+
|
| 99 |
+
reconversion_task:
|
| 100 |
+
description: >
|
| 101 |
+
Analyse les expériences : "{experiences}"
|
| 102 |
+
Et "{education}"
|
| 103 |
+
Le candidat est-il en reconversion ? (Changement majeur de domaine récent).
|
| 104 |
+
indique aussi le contexte (de quelle domaine ou poste vient t'il et quelle est sa réortientation)
|
| 105 |
+
pour le contexte de reconversion tu dois prendre en compte les experience et les education.
|
| 106 |
+
expected_output: >
|
| 107 |
+
JSON : {{"reconversion_analysis": {{"is_reconversion": true/false, "context": "..."}}
|
| 108 |
+
|
| 109 |
+
etudiant_task:
|
| 110 |
+
description: >
|
| 111 |
+
Analyse la section education : "{education}"
|
| 112 |
+
Nous sommes le {current_date}.
|
| 113 |
+
Le candidat est-il ACTUELLEMENT étuditant ?
|
| 114 |
+
CRITÈRES :
|
| 115 |
+
1. Regarde les dates de fin des formations.
|
| 116 |
+
2. Identifie la date de fin de la formation la plus récente.
|
| 117 |
+
3. Si cette date est FUTURE par rapport à {current_date} ou si c'est écrit "En cours" / "Présent", alors is_etudiant = true.
|
| 118 |
+
4. Récupère explicitement cette date sous le champ 'latest_education_end_date' (format YYYY-MM-DD ou MM/YYYY ou "Present").
|
| 119 |
+
5. indique aussi le niveau d'études (ex: bac+2, bac+5) du diplome le plus proche de {current_date}.
|
| 120 |
+
6. indique la spécialité exemple: ingenieur IA, data analyste, devellopeur frontend, etc.
|
| 121 |
+
expected_output: >
|
| 122 |
+
JSON : {{"etudiant_analysis": {{"is_etudiant": true/false, "niveau_etudes": "bac+5", "specialite": "data analyste", "latest_education_end_date": "YYYY-MM-DD"}}}}
|
| 123 |
+
|
| 124 |
+
language_task:
|
| 125 |
+
description: >
|
| 126 |
+
Analyse le texte suivant pour identifier les langues parlées par le candidat.
|
| 127 |
+
SECTION LANGUES : "{languages}"
|
| 128 |
+
|
| 129 |
+
RÈGLES :
|
| 130 |
+
1. Identifie chaque langue mentionnée dans la SECTION LANGUES.
|
| 131 |
+
2. Si la SECTION LANGUES est vide : le candidat parle au moins la LANGUE DU CV.
|
| 132 |
+
expected_output: >
|
| 133 |
+
JSON : {{"langues": [{{"langue": "Anglais"}}]}}
|
| 134 |
+
|
| 135 |
+
identity_task:
|
| 136 |
+
description: >
|
| 137 |
+
Analyse le texte suivant pour extraire UNIQUEMENT le prénom du candidat.
|
| 138 |
+
SECTION INFO PERSO : "{personal_info}"
|
| 139 |
+
|
| 140 |
+
RÈGLES :
|
| 141 |
+
1. Identifie le prénom du candidat.
|
| 142 |
+
2. Si plusieurs noms sont présents, essaie de déduire le prénom principal.
|
| 143 |
+
3. Si introuvable, renvoie une chaîne vide ou null.
|
| 144 |
+
expected_output: >
|
| 145 |
+
JSON : {{"first_name": "Jean"}}
|
src/parser_flow/CV_agent_flow.py
ADDED
|
@@ -0,0 +1,232 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import logging
|
| 3 |
+
import os
|
| 4 |
+
import yaml
|
| 5 |
+
import asyncio
|
| 6 |
+
from datetime import datetime
|
| 7 |
+
from typing import Dict, Any
|
| 8 |
+
from crewai import Agent, Task, Crew, Process
|
| 9 |
+
from src.config.app_config import get_small_llm, get_big_llm
|
| 10 |
+
|
| 11 |
+
logger = logging.getLogger(__name__)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class CVAgentOrchestrator:
|
| 15 |
+
|
| 16 |
+
def __init__(self):
|
| 17 |
+
self.llm = get_small_llm()
|
| 18 |
+
self.big_llm = get_big_llm()
|
| 19 |
+
self.agents_config = self._load_yaml("agents.yaml")
|
| 20 |
+
self.tasks_config = self._load_yaml("tasks.yaml")
|
| 21 |
+
self._create_agents()
|
| 22 |
+
|
| 23 |
+
def _load_yaml(self, filename: str) -> Dict:
|
| 24 |
+
base_path = os.path.dirname(os.path.dirname(__file__))
|
| 25 |
+
config_path = os.path.join(base_path, "config", filename)
|
| 26 |
+
with open(config_path, 'r', encoding='utf-8') as f:
|
| 27 |
+
return yaml.safe_load(f)
|
| 28 |
+
|
| 29 |
+
def _create_agents(self):
|
| 30 |
+
def make_agent(name, llm_override=None):
|
| 31 |
+
return Agent(
|
| 32 |
+
config=self.agents_config[name],
|
| 33 |
+
llm=llm_override or self.llm,
|
| 34 |
+
allow_delegation=False,
|
| 35 |
+
verbose=False,
|
| 36 |
+
max_iter=1,
|
| 37 |
+
respect_context_window=True
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
self.cv_splitter = make_agent('cv_splitter', llm_override=self.big_llm)
|
| 41 |
+
self.skills_extractor = make_agent('skills_extractor')
|
| 42 |
+
self.experience_extractor = make_agent('experience_extractor')
|
| 43 |
+
self.project_extractor = make_agent('project_extractor')
|
| 44 |
+
self.education_extractor = make_agent('education_extractor')
|
| 45 |
+
self.reconversion_detector = make_agent('reconversion_detector')
|
| 46 |
+
self.language_extractor = make_agent('language_extractor')
|
| 47 |
+
self.etudiant_detector = make_agent('etudiant_detector')
|
| 48 |
+
self.identity_extractor = make_agent('identity_extractor')
|
| 49 |
+
|
| 50 |
+
async def split_cv_sections(self, cv_content: str) -> Dict[str, str]:
|
| 51 |
+
"""
|
| 52 |
+
decoupage du cv en sections
|
| 53 |
+
"""
|
| 54 |
+
task_config = self.tasks_config['split_cv_task'].copy()
|
| 55 |
+
task_config['description'] = task_config['description'].format(cv_content=cv_content[:20000])
|
| 56 |
+
|
| 57 |
+
task = Task(
|
| 58 |
+
config=task_config,
|
| 59 |
+
agent=self.cv_splitter
|
| 60 |
+
)
|
| 61 |
+
crew = Crew(
|
| 62 |
+
agents=[self.cv_splitter],
|
| 63 |
+
tasks=[task],
|
| 64 |
+
process=Process.sequential,
|
| 65 |
+
verbose=False
|
| 66 |
+
)
|
| 67 |
+
result = await crew.kickoff_async()
|
| 68 |
+
parsed = self._parse_json_output(result, default_structure={})
|
| 69 |
+
return parsed
|
| 70 |
+
|
| 71 |
+
async def extract_all_sections(self, sections: Dict[str, str]) -> Dict[str, Any]:
|
| 72 |
+
"""
|
| 73 |
+
execution des taches en parraléle.
|
| 74 |
+
"""
|
| 75 |
+
def create_task_async(task_key, agent, **kwargs):
|
| 76 |
+
t_config = self.tasks_config[task_key].copy()
|
| 77 |
+
t_config['description'] = t_config['description'].format(**kwargs)
|
| 78 |
+
task = Task(config=t_config, agent=agent)
|
| 79 |
+
c = Crew(agents=[agent], tasks=[task], verbose=False)
|
| 80 |
+
return (task_key, c.kickoff_async())
|
| 81 |
+
|
| 82 |
+
tasks_def = [
|
| 83 |
+
('skills_task', self.skills_extractor, {
|
| 84 |
+
'experiences': sections.get('experiences', ''),
|
| 85 |
+
'projects': sections.get('projects', ''),
|
| 86 |
+
'skills': sections.get('skills', ''),
|
| 87 |
+
'education': sections.get('education', '')
|
| 88 |
+
}),
|
| 89 |
+
('experience_task', self.experience_extractor, {'experiences': sections.get('experiences', '')}),
|
| 90 |
+
('project_task', self.project_extractor, {'projects': sections.get('projects', '')}),
|
| 91 |
+
('education_task', self.education_extractor, {'education': sections.get('education', '')}),
|
| 92 |
+
('reconversion_task', self.reconversion_detector, {
|
| 93 |
+
'experiences': sections.get('experiences', ''),
|
| 94 |
+
'education': sections.get('education', '')
|
| 95 |
+
}),
|
| 96 |
+
('language_task', self.language_extractor, {
|
| 97 |
+
'languages': sections.get('languages', '')
|
| 98 |
+
}),
|
| 99 |
+
('etudiant_task', self.etudiant_detector, {
|
| 100 |
+
'education': sections.get('education', ''),
|
| 101 |
+
'current_date': datetime.now().strftime("%Y-%m-%d")
|
| 102 |
+
}),
|
| 103 |
+
('identity_task', self.identity_extractor, {
|
| 104 |
+
'personal_info': sections.get('personal_info', '')
|
| 105 |
+
})
|
| 106 |
+
]
|
| 107 |
+
task_coroutines = [create_task_async(key, agent, **kwargs) for key, agent, kwargs in tasks_def]
|
| 108 |
+
keys = [t[0] for t in task_coroutines]
|
| 109 |
+
coroutines = [t[1] for t in task_coroutines]
|
| 110 |
+
results_list = await asyncio.gather(*coroutines, return_exceptions=True)
|
| 111 |
+
|
| 112 |
+
results_map = {}
|
| 113 |
+
for key, result in zip(keys, results_list):
|
| 114 |
+
if isinstance(result, Exception):
|
| 115 |
+
logger.error(f"Task '{key}' failed: {result}")
|
| 116 |
+
else:
|
| 117 |
+
results_map[key] = result
|
| 118 |
+
|
| 119 |
+
return self._aggregate_results(results_map)
|
| 120 |
+
|
| 121 |
+
def _aggregate_results(self, results_map: Dict[str, Any]) -> Dict[str, Any]:
|
| 122 |
+
|
| 123 |
+
def get_parsed(key, default=None):
|
| 124 |
+
if key not in results_map:
|
| 125 |
+
return default
|
| 126 |
+
return self._parse_json_output(results_map[key], default)
|
| 127 |
+
|
| 128 |
+
competences = get_parsed('skills_task', {"hard_skills": [], "soft_skills": []})
|
| 129 |
+
experiences = get_parsed('experience_task', [])
|
| 130 |
+
projets = get_parsed('project_task', {"professional": [], "personal": []})
|
| 131 |
+
formations = get_parsed('education_task', [])
|
| 132 |
+
reconversion = get_parsed('reconversion_task', {}).get("reconversion_analysis", {})
|
| 133 |
+
etudiant_data = get_parsed('etudiant_task', {}).get("etudiant_analysis", {})
|
| 134 |
+
latest_end_date = etudiant_data.get("latest_education_end_date")
|
| 135 |
+
if latest_end_date:
|
| 136 |
+
is_student_by_date = self._is_still_student(latest_end_date)
|
| 137 |
+
etudiant_data["is_etudiant"] = is_student_by_date
|
| 138 |
+
|
| 139 |
+
langues_raw = get_parsed('language_task', {})
|
| 140 |
+
|
| 141 |
+
if isinstance(competences, dict):
|
| 142 |
+
# Deduplicate hard_skills while preserving order
|
| 143 |
+
raw_skills = competences.get("hard_skills", [])
|
| 144 |
+
seen = set()
|
| 145 |
+
unique_skills = []
|
| 146 |
+
for skill in raw_skills:
|
| 147 |
+
key = str(skill).lower() if not isinstance(skill, str) else skill.lower()
|
| 148 |
+
if key not in seen:
|
| 149 |
+
seen.add(key)
|
| 150 |
+
unique_skills.append(skill)
|
| 151 |
+
competences["hard_skills"] = unique_skills
|
| 152 |
+
|
| 153 |
+
identity = get_parsed('identity_task', {})
|
| 154 |
+
|
| 155 |
+
return {
|
| 156 |
+
"candidat": {
|
| 157 |
+
"first_name": identity.get("first_name") if isinstance(identity, dict) else None,
|
| 158 |
+
"compétences": competences,
|
| 159 |
+
"expériences": experiences,
|
| 160 |
+
"reconversion": reconversion,
|
| 161 |
+
"projets": projets,
|
| 162 |
+
"formations": formations,
|
| 163 |
+
"etudiant": etudiant_data,
|
| 164 |
+
"langues": langues_raw.get("langues", []) if isinstance(langues_raw, dict) else [],
|
| 165 |
+
}
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
def _is_still_student(self, date_str: str) -> bool:
|
| 169 |
+
if not date_str:
|
| 170 |
+
return False
|
| 171 |
+
date_str = str(date_str).lower().strip()
|
| 172 |
+
ongoing_keywords = ["present", "présent", "current", "cours", "aujourd'hui", "now"]
|
| 173 |
+
if any(keyword in date_str for keyword in ongoing_keywords):
|
| 174 |
+
return True
|
| 175 |
+
|
| 176 |
+
try:
|
| 177 |
+
now = datetime.now()
|
| 178 |
+
end_date = None
|
| 179 |
+
if len(date_str) == 10 and date_str[4] == '-' and date_str[7] == '-':
|
| 180 |
+
end_date = datetime.strptime(date_str, "%Y-%m-%d")
|
| 181 |
+
elif len(date_str) == 7 and date_str[4] == '-':
|
| 182 |
+
end_date = datetime.strptime(date_str, "%Y-%m")
|
| 183 |
+
elif '/' in date_str:
|
| 184 |
+
parts = date_str.split('/')
|
| 185 |
+
if len(parts) == 2:
|
| 186 |
+
m, y = parts
|
| 187 |
+
if len(y) == 4:
|
| 188 |
+
end_date = datetime.strptime(date_str, "%m/%Y")
|
| 189 |
+
elif len(y) == 2:
|
| 190 |
+
end_date = datetime.strptime(date_str, "%m/%y")
|
| 191 |
+
|
| 192 |
+
elif len(date_str) == 4 and date_str.isdigit():
|
| 193 |
+
end_date = datetime.strptime(date_str, "%Y")
|
| 194 |
+
end_date = end_date.replace(month=12, day=31)
|
| 195 |
+
|
| 196 |
+
if end_date:
|
| 197 |
+
return end_date >= now
|
| 198 |
+
|
| 199 |
+
return False
|
| 200 |
+
|
| 201 |
+
except (ValueError, IndexError):
|
| 202 |
+
logger.warning(f"Date parsing failed for: {date_str}")
|
| 203 |
+
return False
|
| 204 |
+
|
| 205 |
+
def _parse_json_output(self, crew_output, default_structure=None) -> Any:
|
| 206 |
+
raw = crew_output.raw if hasattr(crew_output, 'raw') else str(crew_output)
|
| 207 |
+
|
| 208 |
+
if '```json' in raw:
|
| 209 |
+
raw = raw.split('```json')[1].split('```')[0].strip()
|
| 210 |
+
elif '```' in raw:
|
| 211 |
+
parts = raw.split('```')
|
| 212 |
+
if len(parts) >= 3:
|
| 213 |
+
raw = parts[1].strip()
|
| 214 |
+
|
| 215 |
+
# Clean common LLM artifacts
|
| 216 |
+
raw = raw.strip().lstrip('\ufeff') # BOM
|
| 217 |
+
|
| 218 |
+
try:
|
| 219 |
+
return json.loads(raw)
|
| 220 |
+
except json.JSONDecodeError:
|
| 221 |
+
# Try to find the first JSON object or array in the output
|
| 222 |
+
for start_char, end_char in [('{', '}'), ('[', ']')]:
|
| 223 |
+
start_idx = raw.find(start_char)
|
| 224 |
+
end_idx = raw.rfind(end_char)
|
| 225 |
+
if start_idx != -1 and end_idx > start_idx:
|
| 226 |
+
try:
|
| 227 |
+
return json.loads(raw[start_idx:end_idx + 1])
|
| 228 |
+
except json.JSONDecodeError:
|
| 229 |
+
continue
|
| 230 |
+
|
| 231 |
+
logger.error(f"JSON Parse Error (after cleanup): {raw[:200]}")
|
| 232 |
+
return default_structure if default_structure is not None else {}
|
src/services/cv_service.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
from typing import Dict, Any
|
| 3 |
+
from src.config.app_config import load_pdf
|
| 4 |
+
from src.parser_flow.CV_agent_flow import CVAgentOrchestrator
|
| 5 |
+
|
| 6 |
+
logger = logging.getLogger(__name__)
|
| 7 |
+
|
| 8 |
+
async def parse_cv(pdf_path: str, user_id: str = None) -> Dict[str, Any]:
|
| 9 |
+
orchestrator = CVAgentOrchestrator()
|
| 10 |
+
cv_text = load_pdf(pdf_path)
|
| 11 |
+
sections = await orchestrator.split_cv_sections(cv_text)
|
| 12 |
+
cv_data = await orchestrator.extract_all_sections(sections)
|
| 13 |
+
return cv_data
|
| 14 |
+
|