Spaces:
Sleeping
Sleeping
Upload folder using huggingface_hub
Browse files- exemples/01_predict_single_employee.json +44 -0
- exemples/02_predict_batch_eval.csv +11 -0
- exemples/02_predict_batch_sirh.csv +11 -0
- exemples/02_predict_batch_sondage.csv +11 -0
- exemples/GUIDE_DEMO.txt +89 -0
- exemples/README.md +138 -0
- exemples/demo_batch.py +160 -0
- exemples/demo_batch_hf.py +119 -0
- exemples/demo_unitaire.py +146 -0
- exemples/demo_unitaire_hf.py +131 -0
- exemples/lancer_api.sh +44 -0
- exemples/predictions_batch_20260111_235739.csv +11 -0
- exemples/requirements.txt +3 -0
- src/gradio_ui.py +115 -0
exemples/01_predict_single_employee.json
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"description": "🔮 Exemple de prédiction unitaire - Prédit le risque de départ pour UN employé",
|
| 3 |
+
"api_endpoint": "POST /predict",
|
| 4 |
+
"authentication": "Header: X-API-Key: your-secret-key",
|
| 5 |
+
"response_includes": [
|
| 6 |
+
"prediction (0=Will Stay, 1=Will Leave)",
|
| 7 |
+
"probability_stay",
|
| 8 |
+
"probability_leave",
|
| 9 |
+
"risk_level (Low/Medium/High)"
|
| 10 |
+
],
|
| 11 |
+
"employee_data": {
|
| 12 |
+
"nombre_participation_pee": 1,
|
| 13 |
+
"nb_formations_suivies": 3,
|
| 14 |
+
"nombre_employee_sous_responsabilite": 1,
|
| 15 |
+
"distance_domicile_travail": 8,
|
| 16 |
+
"niveau_education": 1,
|
| 17 |
+
"domaine_etude": "Infra & Cloud",
|
| 18 |
+
"ayant_enfants": "Y",
|
| 19 |
+
"frequence_deplacement": "Frequent",
|
| 20 |
+
"annees_depuis_la_derniere_promotion": 1,
|
| 21 |
+
"annes_sous_responsable_actuel": 7,
|
| 22 |
+
"satisfaction_employee_environnement": 3,
|
| 23 |
+
"note_evaluation_precedente": 2,
|
| 24 |
+
"niveau_hierarchique_poste": 2,
|
| 25 |
+
"satisfaction_employee_nature_travail": 2,
|
| 26 |
+
"satisfaction_employee_equipe": 4,
|
| 27 |
+
"satisfaction_employee_equilibre_pro_perso": 3,
|
| 28 |
+
"note_evaluation_actuelle": 4,
|
| 29 |
+
"heure_supplementaires": "Non",
|
| 30 |
+
"augementation_salaire_precedente": 23,
|
| 31 |
+
"age": 49,
|
| 32 |
+
"genre": "M",
|
| 33 |
+
"revenu_mensuel": 5130,
|
| 34 |
+
"statut_marital": "Marié(e)",
|
| 35 |
+
"departement": "Consulting",
|
| 36 |
+
"poste": "Assistant de Direction",
|
| 37 |
+
"nombre_experiences_precedentes": 1,
|
| 38 |
+
"nombre_heures_travailless": 80,
|
| 39 |
+
"annee_experience_totale": 10,
|
| 40 |
+
"annees_dans_l_entreprise": 10,
|
| 41 |
+
"annees_dans_le_poste_actuel": 7
|
| 42 |
+
},
|
| 43 |
+
"curl_example": "curl -X POST http://localhost:8000/predict \\ -H 'X-API-Key: your-secret-key' \\ -H 'Content-Type: application/json' \\ -d @01_predict_single_employee.json"
|
| 44 |
+
}
|
exemples/02_predict_batch_eval.csv
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
satisfaction_employee_environnement,note_evaluation_precedente,niveau_hierarchique_poste,satisfaction_employee_nature_travail,satisfaction_employee_equipe,satisfaction_employee_equilibre_pro_perso,eval_number,note_evaluation_actuelle,heure_supplementaires,augementation_salaire_precedente
|
| 2 |
+
2,3,2,4,1,1,1,3,Oui,11 %
|
| 3 |
+
3,2,2,2,4,3,2,4,Non,23 %
|
| 4 |
+
4,2,1,3,2,3,3,3,Oui,15 %
|
| 5 |
+
4,3,1,3,3,3,4,3,Oui,11 %
|
| 6 |
+
2,2,2,3,2,2,5,3,Non,8 %
|
| 7 |
+
3,3,1,4,4,3,6,4,Oui,20 %
|
| 8 |
+
4,2,2,3,3,2,7,3,Non,12 %
|
| 9 |
+
2,3,1,2,2,1,8,3,Oui,18 %
|
| 10 |
+
3,2,2,4,3,3,9,3,Non,14 %
|
| 11 |
+
4,4,1,4,4,4,10,4,Non,25 %
|
exemples/02_predict_batch_sirh.csv
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
id_employee,age,genre,revenu_mensuel,statut_marital,departement,poste,nombre_experiences_precedentes,nombre_heures_travailless,annee_experience_totale,annees_dans_l_entreprise,annees_dans_le_poste_actuel
|
| 2 |
+
1,41,F,5993,Célibataire,Commercial,Cadre Commercial,8,80,8,6,4
|
| 3 |
+
2,49,M,5130,Marié(e),Consulting,Assistant de Direction,1,80,10,10,7
|
| 4 |
+
3,37,M,2090,Célibataire,Consulting,Consultant,6,80,7,0,0
|
| 5 |
+
4,33,F,2909,Marié(e),Consulting,Assistant de Direction,1,80,8,8,7
|
| 6 |
+
5,45,M,4500,Célibataire,Commercial,Cadre Commercial,5,80,12,5,3
|
| 7 |
+
6,38,F,3200,Marié(e),Ressources Humaines,Ressources Humaines,2,80,6,6,2
|
| 8 |
+
7,52,M,6200,Marié(e),Consulting,Tech Lead,7,80,15,12,8
|
| 9 |
+
8,28,F,2500,Célibataire,Commercial,Représentant Commercial,1,80,2,2,1
|
| 10 |
+
9,44,M,7000,Marié(e),Consulting,Senior Manager,6,80,18,10,5
|
| 11 |
+
10,35,F,3800,Divorcé(e),Ressources Humaines,Manager,3,80,9,4,2
|
exemples/02_predict_batch_sondage.csv
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nombre_participation_pee,nb_formations_suivies,nombre_employee_sous_responsabilite,code_sondage,distance_domicile_travail,niveau_education,domaine_etude,ayant_enfants,frequence_deplacement,annees_depuis_la_derniere_promotion,annes_sous_responsable_actuel
|
| 2 |
+
0,0,1,1,1,2,Infra & Cloud,Y,Occasionnel,0,5
|
| 3 |
+
1,3,1,2,8,1,Infra & Cloud,Y,Frequent,1,7
|
| 4 |
+
0,3,1,3,2,2,Autre,Y,Occasionnel,0,0
|
| 5 |
+
0,3,1,4,3,4,Infra & Cloud,Y,Frequent,3,0
|
| 6 |
+
2,2,1,5,5,3,Marketing,N,Aucun,2,3
|
| 7 |
+
1,1,1,6,10,2,Transformation Digitale,Y,Frequent,0,8
|
| 8 |
+
0,2,1,7,4,1,Ressources Humaines,Y,Occasionnel,1,2
|
| 9 |
+
2,4,1,8,7,4,Infra & Cloud,N,Frequent,2,6
|
| 10 |
+
1,2,1,9,6,2,Entrepreunariat,Y,Aucun,3,4
|
| 11 |
+
3,3,1,10,9,3,Autre,Y,Frequent,1,5
|
exemples/GUIDE_DEMO.txt
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
╔══════════════════════════════════════════════════════════════════╗
|
| 2 |
+
║ ║
|
| 3 |
+
║ 🚀 GUIDE DÉMONSTRATION - Employee Turnover API ║
|
| 4 |
+
║ Par défaut : API locale http://127.0.0.1:7860 ║
|
| 5 |
+
║ ║
|
| 6 |
+
╚══════════════════════════════════════════════════════════════════╝
|
| 7 |
+
|
| 8 |
+
⚙️ CONFIGURATION DE L'URL
|
| 9 |
+
──────────────────────────────────────────────────────────────────
|
| 10 |
+
Par défaut, les scripts utilisent l'API locale : http://127.0.0.1:7860
|
| 11 |
+
|
| 12 |
+
Pour utiliser l'API Hugging Face Spaces, utilisez les scripts dédiés :
|
| 13 |
+
- demo_unitaire_hf.py
|
| 14 |
+
- demo_batch_hf.py
|
| 15 |
+
|
| 16 |
+
Optionnel : surcharger l'URL via la variable d'environnement :
|
| 17 |
+
HF_API_URL="https://asi-engineer-oc-p5.hf.space"
|
| 18 |
+
Optionnel : si la Space protège les endpoints, ajouter une API key :
|
| 19 |
+
HF_API_KEY="votre-cle"
|
| 20 |
+
|
| 21 |
+
Si FastAPI n'est pas exposé sur la Space, le script batch tentera automatiquement l'API Gradio `/api/predict_batch` (nécessite l'onglet Batch activé dans l'interface).
|
| 22 |
+
|
| 23 |
+
📋 INSTALLATION (une seule fois)
|
| 24 |
+
──────────────────────────────────────────────────────────────────
|
| 25 |
+
pip install requests pandas
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
🚀 LANCER L'API LOCALE (nécessaire pour la démo)
|
| 29 |
+
──────────────────────────────────────────────────────────────────
|
| 30 |
+
./lancer_api.sh
|
| 31 |
+
|
| 32 |
+
Ou manuellement depuis le dossier racine :
|
| 33 |
+
cd ..
|
| 34 |
+
poetry run uvicorn api:app --host 127.0.0.1 --port 7860
|
| 35 |
+
|
| 36 |
+
L'API sera accessible sur http://127.0.0.1:7860
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
🔮 DÉMO 1 : PRÉDICTION UNITAIRE (1 employé)
|
| 40 |
+
──────────────────────────────────────────────────────────────────
|
| 41 |
+
python demo_unitaire.py
|
| 42 |
+
|
| 43 |
+
→ Le script pose des questions sur l'employé
|
| 44 |
+
→ Affiche directement le résultat de prédiction
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
📦 DÉMO 2 : PRÉDICTION BATCH (plusieurs employés)
|
| 48 |
+
──────────────────────────────────────────────────────────────────
|
| 49 |
+
python demo_batch.py
|
| 50 |
+
|
| 51 |
+
→ Demande 3 fichiers CSV (sondage, eval, sirh)
|
| 52 |
+
→ Génère un fichier CSV avec tous les résultats
|
| 53 |
+
→ Nom du fichier : predictions_batch_YYYYMMDD_HHMMSS.csv
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
✅ TEST RAPIDE (avec fichiers d'exemple fournis)
|
| 57 |
+
──────────────────────────────────────────────────────────────────
|
| 58 |
+
Quand demo_batch.py demande si vous voulez utiliser les fichiers
|
| 59 |
+
d'exemple, tapez simplement "O" ou appuyez sur Entrée.
|
| 60 |
+
|
| 61 |
+
Les 3 fichiers d'exemple (10 employés) seront utilisés automatiquement :
|
| 62 |
+
- 02_predict_batch_sondage.csv
|
| 63 |
+
- 02_predict_batch_eval.csv
|
| 64 |
+
- 02_predict_batch_sirh.csv
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
🎯 JOUR J - CHECKLIST
|
| 68 |
+
──────────────────────────────────────────────────────────────────
|
| 69 |
+
□ Lancer l'API locale : poetry run uvicorn api:app --host 127.0.0.1 --port 7860
|
| 70 |
+
(depuis le dossier racine du projet, ou ./lancer_api.sh depuis exemples/)
|
| 71 |
+
□ Ou configurer l'URL HF Spaces dans les scripts
|
| 72 |
+
(via HF_API_URL avec demo_unitaire_hf.py / demo_batch_hf.py)
|
| 73 |
+
□ Préparer les 3 fichiers CSV batch si nécessaire
|
| 74 |
+
□ Tester : python demo_batch.py
|
| 75 |
+
□ Vérifier le CSV de sortie est bien généré
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
📄 FICHIERS DANS CE DOSSIER
|
| 79 |
+
──────────────────────────────────────────────────────────────────
|
| 80 |
+
lancer_api.sh → Lance l'API locale facilement
|
| 81 |
+
demo_unitaire.py → Script démo prédiction 1 employé
|
| 82 |
+
demo_batch.py → Script démo prédiction batch
|
| 83 |
+
demo_unitaire_hf.py → Script démo unitaire via Hugging Face
|
| 84 |
+
demo_batch_hf.py → Script démo batch via Hugging Face
|
| 85 |
+
02_predict_batch_*.csv → Fichiers d'exemple (10 employés)
|
| 86 |
+
README.md → Documentation détaillée
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
C'EST TOUT ! 🎉
|
exemples/README.md
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 🚀 DÉMONSTRATION API Employee Turnover
|
| 2 |
+
|
| 3 |
+
**Par défaut** : API locale `http://127.0.0.1:7860`
|
| 4 |
+
**Production** : Hugging Face Spaces `https://asi-engineer-oc-p5.hf.space`
|
| 5 |
+
|
| 6 |
+
## ⚙️ Configuration
|
| 7 |
+
|
| 8 |
+
Les scripts locaux utilisent par défaut l'API locale. Pour la Space Hugging Face, des scripts dédiés sont fournis et acceptent `HF_API_URL` comme variable d'environnement.
|
| 9 |
+
|
| 10 |
+
## 📋 Installation
|
| 11 |
+
|
| 12 |
+
```bash
|
| 13 |
+
pip install requests pandas
|
| 14 |
+
```
|
| 15 |
+
|
| 16 |
+
## 🚀 Lancer l'API locale
|
| 17 |
+
|
| 18 |
+
**Option 1** : Script automatique
|
| 19 |
+
```bash
|
| 20 |
+
./lancer_api.sh
|
| 21 |
+
```
|
| 22 |
+
|
| 23 |
+
**Option 2** : Commande manuelle
|
| 24 |
+
```bash
|
| 25 |
+
cd .. # Retour au dossier racine
|
| 26 |
+
poetry run uvicorn api:app --host 127.0.0.1 --port 7860
|
| 27 |
+
```
|
| 28 |
+
|
| 29 |
+
L'API sera disponible sur `http://127.0.0.1:7860`
|
| 30 |
+
|
| 31 |
+
## 🔮 Prédiction UNITAIRE (1 employé)
|
| 32 |
+
|
| 33 |
+
**Usage ultra-simple** : Le script pose toutes les questions une par une.
|
| 34 |
+
|
| 35 |
+
```bash
|
| 36 |
+
python demo_unitaire.py
|
| 37 |
+
```
|
| 38 |
+
|
| 39 |
+
Le script demande les informations de l'employé, interroge l'API et affiche le résultat immédiatement.
|
| 40 |
+
|
| 41 |
+
**Exemple de sortie** :
|
| 42 |
+
```
|
| 43 |
+
📊 RÉSULTAT
|
| 44 |
+
══════════════════════════════════════════════════════════
|
| 45 |
+
✅ PRÉDICTION: L'EMPLOYÉ VA RESTER
|
| 46 |
+
🎯 Niveau de risque: Low
|
| 47 |
+
Probabilité de rester: 85.2%
|
| 48 |
+
Probabilité de partir: 14.8%
|
| 49 |
+
```
|
| 50 |
+
|
| 51 |
+
---
|
| 52 |
+
|
| 53 |
+
## 📦 Prédiction BATCH (fichiers CSV)
|
| 54 |
+
|
| 55 |
+
**Usage ultra-simple** : Fournit 3 fichiers CSV, obtient 1 CSV de résultats.
|
| 56 |
+
|
| 57 |
+
```bash
|
| 58 |
+
python demo_batch.py
|
| 59 |
+
```
|
| 60 |
+
|
| 61 |
+
Le script demande les chemins des 3 fichiers CSV :
|
| 62 |
+
1. Fichier sondage
|
| 63 |
+
2. Fichier évaluation
|
| 64 |
+
3. Fichier SIRH
|
| 65 |
+
|
| 66 |
+
**Il génère automatiquement** : `predictions_batch_YYYYMMDD_HHMMSS.csv` dans le dossier courant.
|
| 67 |
+
|
| 68 |
+
**Exemple de sortie** :
|
| 69 |
+
```
|
| 70 |
+
📊 RÉSUMÉ
|
| 71 |
+
══════════════════════════════════════════════════════════
|
| 72 |
+
✅ Employés qui vont RESTER: 8
|
| 73 |
+
🏃 Employés qui vont PARTIR: 2
|
| 74 |
+
🔴 Risque ÉLEVÉ: 1
|
| 75 |
+
🟡 Risque MOYEN: 2
|
| 76 |
+
🟢 Risque FAIBLE: 7
|
| 77 |
+
|
| 78 |
+
💾 Résultats sauvegardés dans: predictions_batch_20260111_234530.csv
|
| 79 |
+
```
|
| 80 |
+
|
| 81 |
+
---
|
| 82 |
+
|
| 83 |
+
## ☁️ Utiliser l'API Hugging Face (Space)
|
| 84 |
+
|
| 85 |
+
Deux scripts ciblent directement la Space HF:
|
| 86 |
+
|
| 87 |
+
```bash
|
| 88 |
+
python demo_unitaire_hf.py
|
| 89 |
+
python demo_batch_hf.py
|
| 90 |
+
```
|
| 91 |
+
|
| 92 |
+
Optionnel: surcharger l'URL via `HF_API_URL`:
|
| 93 |
+
|
| 94 |
+
```bash
|
| 95 |
+
HF_API_URL="https://asi-engineer-oc-p5.hf.space" python demo_batch_hf.py
|
| 96 |
+
```
|
| 97 |
+
|
| 98 |
+
Optionnel: si la Space protège les endpoints, ajouter une API key:
|
| 99 |
+
|
| 100 |
+
```bash
|
| 101 |
+
HF_API_KEY="votre-cle" python demo_unitaire_hf.py
|
| 102 |
+
HF_API_KEY="votre-cle" python demo_batch_hf.py
|
| 103 |
+
```
|
| 104 |
+
|
| 105 |
+
Note: si la Space n'expose pas FastAPI, le script batch basculera automatiquement sur l'API Gradio (`/api/predict_batch`) si l'onglet Batch est activé. Sinon, utilisez l'API locale avec `lancer_api.sh`.
|
| 106 |
+
|
| 107 |
+
---
|
| 108 |
+
|
| 109 |
+
## 📂 Fichiers d'exemple fournis
|
| 110 |
+
|
| 111 |
+
Pour tester rapidement, 4 fichiers d'exemple sont fournis :
|
| 112 |
+
|
| 113 |
+
- **`01_predict_single_employee.json`** - Exemple d'employé pour test unitaire
|
| 114 |
+
- **`02_predict_batch_sondage.csv`** - 10 employés (données sondage)
|
| 115 |
+
- **`02_predict_batch_eval.csv`** - 10 employés (données évaluation)
|
| 116 |
+
- **`02_predict_batch_sirh.csv`** - 10 employés (données SIRH)
|
| 117 |
+
|
| 118 |
+
**Utilisation** : Indiquez simplement ces chemins quand `demo_batch.py` vous les demande.
|
| 119 |
+
|
| 120 |
+
---
|
| 121 |
+
|
| 122 |
+
## 🎯 Jour J - Checklist
|
| 123 |
+
|
| 124 |
+
1. ✅ Installer les dépendances : `pip install requests pandas`
|
| 125 |
+
2. ✅ Tester unitaire : `python demo_unitaire.py`
|
| 126 |
+
3. ✅ Tester batch : `python demo_batch.py` (utiliser les fichiers `02_predict_batch_*.csv`)
|
| 127 |
+
4. ✅ Vérifier que les CSV de résultats sont générés
|
| 128 |
+
|
| 129 |
+
**C'est tout !** 🎉
|
| 130 |
+
|
| 131 |
+
---
|
| 132 |
+
|
| 133 |
+
## 📖 Documentation complète
|
| 134 |
+
|
| 135 |
+
Pour plus d'informations sur l'API, les formats de données, etc., voir :
|
| 136 |
+
- [API Documentation](../docs/api_documentation.md)
|
| 137 |
+
- [Architecture](../docs/architecture.md)
|
| 138 |
+
|
exemples/demo_batch.py
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
📦 Prédiction BATCH - Le plus simple possible
|
| 4 |
+
|
| 5 |
+
Usage: python demo_batch.py
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import os
|
| 9 |
+
import pandas as pd
|
| 10 |
+
import requests
|
| 11 |
+
from datetime import datetime
|
| 12 |
+
|
| 13 |
+
# ═══════════════════════════════════════════════════════════════
|
| 14 |
+
# CONFIGURATION
|
| 15 |
+
# ═══════════════════════════════════════════════════════════════
|
| 16 |
+
|
| 17 |
+
# Pour utiliser l'API Hugging Face Spaces, changez l'URL ci-dessous
|
| 18 |
+
# API_URL = "https://asi-engineer-oc-p5.hf.space"
|
| 19 |
+
API_URL = "http://127.0.0.1:7860" # API locale par défaut
|
| 20 |
+
API_KEY = None # Pas besoin d'API key en mode DEBUG local
|
| 21 |
+
|
| 22 |
+
print("╔══════════════════════════════════════════════════════════╗")
|
| 23 |
+
print("║ 📦 PRÉDICTION BATCH - Traitement CSV ║")
|
| 24 |
+
print("╚══════════════════════════════════════════════════════════╝\n")
|
| 25 |
+
|
| 26 |
+
# ═══════════════════════════════════════════════════════════════
|
| 27 |
+
# COLLECTE DES FICHIERS
|
| 28 |
+
# ═══════════════════════════════════════════════════════════════
|
| 29 |
+
|
| 30 |
+
# Obtenir le dossier du script
|
| 31 |
+
script_dir = os.path.dirname(os.path.abspath(__file__))
|
| 32 |
+
|
| 33 |
+
# Chemins par défaut vers les fichiers d'exemple
|
| 34 |
+
default_sondage = os.path.join(script_dir, "02_predict_batch_sondage.csv")
|
| 35 |
+
default_eval = os.path.join(script_dir, "02_predict_batch_eval.csv")
|
| 36 |
+
default_sirh = os.path.join(script_dir, "02_predict_batch_sirh.csv")
|
| 37 |
+
|
| 38 |
+
print("📋 Voulez-vous utiliser les fichiers d'exemple du dossier ?")
|
| 39 |
+
print(f" - {os.path.basename(default_sondage)}")
|
| 40 |
+
print(f" - {os.path.basename(default_eval)}")
|
| 41 |
+
print(f" - {os.path.basename(default_sirh)}")
|
| 42 |
+
use_defaults = input("\nUtiliser ces fichiers ? (O/n): ").strip().lower()
|
| 43 |
+
|
| 44 |
+
if use_defaults in ["", "o", "oui", "y", "yes"]:
|
| 45 |
+
sondage_path = default_sondage
|
| 46 |
+
eval_path = default_eval
|
| 47 |
+
sirh_path = default_sirh
|
| 48 |
+
print("\n✅ Utilisation des fichiers d'exemple")
|
| 49 |
+
else:
|
| 50 |
+
print("\nVeuillez fournir les 3 fichiers CSV:\n")
|
| 51 |
+
sondage_path = input("📋 Chemin du fichier SONDAGE (.csv): ").strip()
|
| 52 |
+
eval_path = input("📊 Chemin du fichier ÉVALUATION (.csv): ").strip()
|
| 53 |
+
sirh_path = input("💼 Chemin du fichier SIRH (.csv): ").strip()
|
| 54 |
+
|
| 55 |
+
# Vérifier que les fichiers existent
|
| 56 |
+
for path in [sondage_path, eval_path, sirh_path]:
|
| 57 |
+
if not os.path.exists(path):
|
| 58 |
+
print(f"\n❌ ERREUR: Fichier introuvable: {path}")
|
| 59 |
+
exit(1)
|
| 60 |
+
|
| 61 |
+
print("\n✅ Fichiers chargés:")
|
| 62 |
+
print(f" - Sondage: {os.path.basename(sondage_path)}")
|
| 63 |
+
print(f" - Évaluation: {os.path.basename(eval_path)}")
|
| 64 |
+
print(f" - SIRH: {os.path.basename(sirh_path)}")
|
| 65 |
+
|
| 66 |
+
# ═══════════════════════════════════════════════════════════════
|
| 67 |
+
# ENVOI À L'API
|
| 68 |
+
# ═══════════════════════════════════════════════════════════════
|
| 69 |
+
|
| 70 |
+
print("\n⏳ Envoi des fichiers à l'API...")
|
| 71 |
+
|
| 72 |
+
files = {
|
| 73 |
+
"sondage_file": open(sondage_path, "rb"),
|
| 74 |
+
"eval_file": open(eval_path, "rb"),
|
| 75 |
+
"sirh_file": open(sirh_path, "rb"),
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
headers = {}
|
| 79 |
+
if API_KEY:
|
| 80 |
+
headers["X-API-Key"] = API_KEY
|
| 81 |
+
|
| 82 |
+
try:
|
| 83 |
+
response = requests.post(
|
| 84 |
+
f"{API_URL}/predict/batch", files=files, headers=headers, timeout=120
|
| 85 |
+
)
|
| 86 |
+
response.raise_for_status()
|
| 87 |
+
result = response.json()
|
| 88 |
+
|
| 89 |
+
# ═══════════════════════════════════════════════════════════════
|
| 90 |
+
# CRÉATION DU CSV DE SORTIE
|
| 91 |
+
# ═══════════════════════════════════════════════════════════════
|
| 92 |
+
|
| 93 |
+
print("\n✅ Prédictions reçues!")
|
| 94 |
+
print(f" Total employés traités: {result['total_employees']}")
|
| 95 |
+
|
| 96 |
+
# Créer un DataFrame avec les résultats
|
| 97 |
+
predictions_data = []
|
| 98 |
+
for pred in result["predictions"]:
|
| 99 |
+
predictions_data.append(
|
| 100 |
+
{
|
| 101 |
+
"employee_id": pred["employee_id"],
|
| 102 |
+
"prediction": "VA PARTIR" if pred["prediction"] == 1 else "VA RESTER",
|
| 103 |
+
"prediction_code": pred["prediction"],
|
| 104 |
+
"risk_level": pred["risk_level"],
|
| 105 |
+
"probability_stay": f"{pred['probability_stay']:.2%}",
|
| 106 |
+
"probability_leave": f"{pred['probability_leave']:.2%}",
|
| 107 |
+
}
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
df_results = pd.DataFrame(predictions_data)
|
| 111 |
+
|
| 112 |
+
# Générer le nom du fichier de sortie
|
| 113 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 114 |
+
output_filename = f"predictions_batch_{timestamp}.csv"
|
| 115 |
+
|
| 116 |
+
# Sauvegarder dans le même dossier que ce script
|
| 117 |
+
script_dir = os.path.dirname(os.path.abspath(__file__))
|
| 118 |
+
output_path = os.path.join(script_dir, output_filename)
|
| 119 |
+
|
| 120 |
+
# Sauvegarder dans le même dossier que ce script
|
| 121 |
+
script_dir = os.path.dirname(os.path.abspath(__file__))
|
| 122 |
+
output_path = os.path.join(script_dir, output_filename)
|
| 123 |
+
|
| 124 |
+
df_results.to_csv(output_path, index=False, encoding="utf-8-sig")
|
| 125 |
+
|
| 126 |
+
# ═══════════════════════════════════════════════════════════════
|
| 127 |
+
# AFFICHAGE DU RÉSUMÉ
|
| 128 |
+
# ═══════════════════════════════════════════════════════════════
|
| 129 |
+
|
| 130 |
+
print("\n" + "═" * 60)
|
| 131 |
+
print(" 📊 RÉSUMÉ")
|
| 132 |
+
print("═" * 60)
|
| 133 |
+
|
| 134 |
+
summary = result["summary"]
|
| 135 |
+
print(f"\n✅ Employés qui vont RESTER: {summary['total_stay']}")
|
| 136 |
+
print(f"🏃 Employés qui vont PARTIR: {summary['total_leave']}")
|
| 137 |
+
print(f"\n🔴 Risque ÉLEVÉ: {summary['high_risk_count']}")
|
| 138 |
+
print(f"🟡 Risque MOYEN: {summary['medium_risk_count']}")
|
| 139 |
+
print(f"🟢 Risque FAIBLE: {summary['low_risk_count']}")
|
| 140 |
+
|
| 141 |
+
print("\n" + "═" * 60)
|
| 142 |
+
print("💾 Résultats sauvegardés dans:")
|
| 143 |
+
print(f" {output_path}")
|
| 144 |
+
print("═" * 60)
|
| 145 |
+
|
| 146 |
+
# Afficher un échantillon
|
| 147 |
+
print("\n📋 Aperçu des 5 premiers résultats:")
|
| 148 |
+
print(df_results.head(5).to_string(index=False))
|
| 149 |
+
|
| 150 |
+
except requests.exceptions.RequestException as e:
|
| 151 |
+
print(f"\n❌ ERREUR API: {e}")
|
| 152 |
+
if hasattr(e, "response") and e.response is not None:
|
| 153 |
+
print(f"Détails: {e.response.text}")
|
| 154 |
+
except Exception as e:
|
| 155 |
+
print(f"\n❌ ERREUR: {e}")
|
| 156 |
+
finally:
|
| 157 |
+
# Fermer les fichiers
|
| 158 |
+
for f in files.values():
|
| 159 |
+
if not f.closed:
|
| 160 |
+
f.close()
|
exemples/demo_batch_hf.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
📦 Prédiction BATCH via API Hugging Face
|
| 4 |
+
|
| 5 |
+
Usage: python demo_batch_hf.py
|
| 6 |
+
- Utilise par défaut les CSV d'exemple du dossier
|
| 7 |
+
- Envoie les 3 fichiers à la Space HF
|
| 8 |
+
- Sauvegarde un CSV de résultats
|
| 9 |
+
|
| 10 |
+
Option: définir HF_API_URL pour surcharger l'URL par défaut.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
import os
|
| 14 |
+
import pandas as pd
|
| 15 |
+
import requests
|
| 16 |
+
from datetime import datetime
|
| 17 |
+
|
| 18 |
+
API_URL = os.getenv("HF_API_URL", "https://asi-engineer-oc-p5.hf.space")
|
| 19 |
+
|
| 20 |
+
print("╔══════════════════════════════════════════════════════════╗")
|
| 21 |
+
print("║ 📦 Prédiction BATCH - API Hugging Face ║")
|
| 22 |
+
print("╚══════════════════════════════════════════════════════════╝\n")
|
| 23 |
+
print(f"🌐 API: {API_URL}\n")
|
| 24 |
+
|
| 25 |
+
# Dossier du script
|
| 26 |
+
script_dir = os.path.dirname(os.path.abspath(__file__))
|
| 27 |
+
sondage_path = os.path.join(script_dir, "02_predict_batch_sondage.csv")
|
| 28 |
+
eval_path = os.path.join(script_dir, "02_predict_batch_eval.csv")
|
| 29 |
+
sirh_path = os.path.join(script_dir, "02_predict_batch_sirh.csv")
|
| 30 |
+
|
| 31 |
+
# Vérifier existence
|
| 32 |
+
for path in [sondage_path, eval_path, sirh_path]:
|
| 33 |
+
if not os.path.exists(path):
|
| 34 |
+
print(f"❌ Fichier introuvable: {path}")
|
| 35 |
+
raise SystemExit(1)
|
| 36 |
+
|
| 37 |
+
print("✅ Fichiers d'exemple détectés:")
|
| 38 |
+
print(f" - {os.path.basename(sondage_path)}")
|
| 39 |
+
print(f" - {os.path.basename(eval_path)}")
|
| 40 |
+
print(f" - {os.path.basename(sirh_path)}\n")
|
| 41 |
+
|
| 42 |
+
print("⏳ Envoi des fichiers à l'API HF...")
|
| 43 |
+
files = {
|
| 44 |
+
"sondage_file": open(sondage_path, "rb"),
|
| 45 |
+
"eval_file": open(eval_path, "rb"),
|
| 46 |
+
"sirh_file": open(sirh_path, "rb"),
|
| 47 |
+
}
|
| 48 |
+
headers = {}
|
| 49 |
+
api_key = os.getenv("HF_API_KEY")
|
| 50 |
+
if api_key:
|
| 51 |
+
headers["X-API-Key"] = api_key
|
| 52 |
+
|
| 53 |
+
try:
|
| 54 |
+
# 1) Tente FastAPI (si exposé)
|
| 55 |
+
r = requests.post(
|
| 56 |
+
f"{API_URL}/predict/batch", files=files, headers=headers, timeout=90
|
| 57 |
+
)
|
| 58 |
+
if r.status_code == 404:
|
| 59 |
+
# 2) Fallback: endpoint Gradio API
|
| 60 |
+
print(
|
| 61 |
+
"\nℹ️ Endpoint FastAPI indisponible, tentative via Gradio API (/api/predict_batch)..."
|
| 62 |
+
)
|
| 63 |
+
r = requests.post(
|
| 64 |
+
f"{API_URL}/api/predict_batch", files=files, headers=headers, timeout=90
|
| 65 |
+
)
|
| 66 |
+
if r.status_code == 404:
|
| 67 |
+
print(
|
| 68 |
+
"\n❌ Endpoint HF introuvable (/predict/batch et /api/predict_batch)."
|
| 69 |
+
)
|
| 70 |
+
print(
|
| 71 |
+
" Vérifiez que la Space expose l'API FastAPI ou l'onglet Batch Gradio."
|
| 72 |
+
)
|
| 73 |
+
print(" Sinon, utilisez l'API locale (lancer_api.sh).")
|
| 74 |
+
raise SystemExit(1)
|
| 75 |
+
r.raise_for_status()
|
| 76 |
+
result = r.json()
|
| 77 |
+
|
| 78 |
+
# Construire le CSV de sortie
|
| 79 |
+
predictions_data = []
|
| 80 |
+
for pred in result.get("predictions", []):
|
| 81 |
+
predictions_data.append(
|
| 82 |
+
{
|
| 83 |
+
"employee_id": pred.get("employee_id"),
|
| 84 |
+
"prediction": (
|
| 85 |
+
"VA PARTIR" if pred.get("prediction") == 1 else "VA RESTER"
|
| 86 |
+
),
|
| 87 |
+
"prediction_code": pred.get("prediction"),
|
| 88 |
+
"risk_level": pred.get("risk_level"),
|
| 89 |
+
"probability_stay": f"{pred.get('probability_stay', 0):.2%}",
|
| 90 |
+
"probability_leave": f"{pred.get('probability_leave', 0):.2%}",
|
| 91 |
+
}
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
df = pd.DataFrame(predictions_data)
|
| 95 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 96 |
+
output_path = os.path.join(script_dir, f"predictions_batch_hf_{timestamp}.csv")
|
| 97 |
+
df.to_csv(output_path, index=False, encoding="utf-8-sig")
|
| 98 |
+
|
| 99 |
+
# Affichage
|
| 100 |
+
summary = result.get("summary", {})
|
| 101 |
+
print("\n" + "═" * 60)
|
| 102 |
+
print(" 📊 RÉSULTAT (HF)")
|
| 103 |
+
print("═" * 60)
|
| 104 |
+
print(
|
| 105 |
+
f"\n✅ Traités: {result.get('total_employees')} | RESTER: {summary.get('total_stay')} | PARTIR: {summary.get('total_leave')}"
|
| 106 |
+
)
|
| 107 |
+
print(
|
| 108 |
+
f"🔴 High: {summary.get('high_risk_count')} 🟡 Medium: {summary.get('medium_risk_count')} 🟢 Low: {summary.get('low_risk_count')}\n"
|
| 109 |
+
)
|
| 110 |
+
print("📄 Aperçu:")
|
| 111 |
+
print(df.head(5).to_string(index=False))
|
| 112 |
+
print(f"\n💾 Sauvegardé: {output_path}")
|
| 113 |
+
|
| 114 |
+
finally:
|
| 115 |
+
for f in files.values():
|
| 116 |
+
try:
|
| 117 |
+
f.close()
|
| 118 |
+
except Exception:
|
| 119 |
+
pass
|
exemples/demo_unitaire.py
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
🔮 Prédiction UNITAIRE - Le plus simple possible
|
| 4 |
+
|
| 5 |
+
Usage: python demo_unitaire.py
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import requests
|
| 9 |
+
|
| 10 |
+
# ═══════════════════════════════════════════════════════════════
|
| 11 |
+
# CONFIGURATION
|
| 12 |
+
# ═══════════════════════════════════════════════════════════════
|
| 13 |
+
|
| 14 |
+
# Pour utiliser l'API Hugging Face Spaces, changez l'URL ci-dessous
|
| 15 |
+
# API_URL = "https://asi-engineer-oc-p5.hf.space"
|
| 16 |
+
API_URL = "http://127.0.0.1:7860" # API locale par défaut
|
| 17 |
+
API_KEY = None # Pas besoin d'API key en mode DEBUG local
|
| 18 |
+
|
| 19 |
+
print("╔══════════════════════════════════════════════════════════╗")
|
| 20 |
+
print("║ 🔮 PRÉDICTION UNITAIRE - Risque de départ employé ║")
|
| 21 |
+
print("╚══════════════════════════════════════════════════════════╝\n")
|
| 22 |
+
|
| 23 |
+
# ═══════════════════════════════════════════════════════════════
|
| 24 |
+
# COLLECTE DES DONNÉES
|
| 25 |
+
# ═══════════════════════════════════════════════════════════════
|
| 26 |
+
|
| 27 |
+
print("Veuillez renseigner les informations de l'employé:\n")
|
| 28 |
+
|
| 29 |
+
# === SONDAGE ===
|
| 30 |
+
print("📋 DONNÉES SONDAGE")
|
| 31 |
+
nombre_participation_pee = int(input("Nombre participations PEE (0-3): "))
|
| 32 |
+
nb_formations_suivies = int(input("Nombre formations suivies (0-6): "))
|
| 33 |
+
distance_domicile_travail = int(input("Distance domicile-travail en km (1-30): "))
|
| 34 |
+
niveau_education = int(input("Niveau d'éducation (1-5): "))
|
| 35 |
+
domaine_etude = input(
|
| 36 |
+
"Domaine d'étude (Infra & Cloud, Transformation Digitale, Marketing, Entrepreunariat, Ressources Humaines, Autre): "
|
| 37 |
+
)
|
| 38 |
+
ayant_enfants = input("A des enfants? (Y/N): ").upper()
|
| 39 |
+
frequence_deplacement = input("Fréquence déplacement (Aucun, Occasionnel, Frequent): ")
|
| 40 |
+
annees_depuis_la_derniere_promotion = int(input("Années depuis dernière promotion: "))
|
| 41 |
+
annes_sous_responsable_actuel = int(input("Années sous responsable actuel (0-17): "))
|
| 42 |
+
|
| 43 |
+
# === ÉVALUATION ===
|
| 44 |
+
print("\n📊 DONNÉES ÉVALUATION")
|
| 45 |
+
satisfaction_employee_environnement = int(input("Satisfaction environnement (1-4): "))
|
| 46 |
+
note_evaluation_precedente = int(input("Note évaluation précédente (1-4): "))
|
| 47 |
+
niveau_hierarchique_poste = int(input("Niveau hiérarchique (1-5): "))
|
| 48 |
+
satisfaction_employee_nature_travail = int(input("Satisfaction nature travail (1-4): "))
|
| 49 |
+
satisfaction_employee_equipe = int(input("Satisfaction équipe (1-4): "))
|
| 50 |
+
satisfaction_employee_equilibre_pro_perso = int(
|
| 51 |
+
input("Satisfaction équilibre pro/perso (1-4): ")
|
| 52 |
+
)
|
| 53 |
+
note_evaluation_actuelle = int(input("Note évaluation actuelle (3-4): "))
|
| 54 |
+
heure_supplementaires = input("Heures supplémentaires? (Oui/Non): ")
|
| 55 |
+
augementation_salaire_precedente = float(
|
| 56 |
+
input("Augmentation salaire précédente en % (0-100): ")
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
# === SIRH ===
|
| 60 |
+
print("\n💼 DONNÉES RH")
|
| 61 |
+
age = int(input("Âge (18-60): "))
|
| 62 |
+
genre = input("Genre (M/F): ").upper()
|
| 63 |
+
revenu_mensuel = float(input("Revenu mensuel en € (1000-20000): "))
|
| 64 |
+
statut_marital = input("Statut marital (Célibataire, Marié(e), Divorcé(e)): ")
|
| 65 |
+
departement = input("Département (Commercial, Consulting, Ressources Humaines): ")
|
| 66 |
+
poste = input(
|
| 67 |
+
"Poste (Cadre Commercial, Assistant de Direction, Consultant, Tech Lead, Manager, Senior Manager, Représentant Commercial, Directeur Technique, Ressources Humaines): "
|
| 68 |
+
)
|
| 69 |
+
nombre_experiences_precedentes = int(input("Nombre expériences précédentes (0-9): "))
|
| 70 |
+
annee_experience_totale = int(input("Années expérience totale: "))
|
| 71 |
+
annees_dans_l_entreprise = int(input("Années dans l'entreprise (0-40): "))
|
| 72 |
+
annees_dans_le_poste_actuel = int(input("Années dans le poste actuel (0-18): "))
|
| 73 |
+
|
| 74 |
+
# ═══════════════════════════════════════════════════════════════
|
| 75 |
+
# PRÉDICTION
|
| 76 |
+
# ═══════════════════════════════════════════════════════════════
|
| 77 |
+
|
| 78 |
+
employee_data = {
|
| 79 |
+
"nombre_participation_pee": nombre_participation_pee,
|
| 80 |
+
"nb_formations_suivies": nb_formations_suivies,
|
| 81 |
+
"nombre_employee_sous_responsabilite": 1,
|
| 82 |
+
"distance_domicile_travail": distance_domicile_travail,
|
| 83 |
+
"niveau_education": niveau_education,
|
| 84 |
+
"domaine_etude": domaine_etude,
|
| 85 |
+
"ayant_enfants": ayant_enfants,
|
| 86 |
+
"frequence_deplacement": frequence_deplacement,
|
| 87 |
+
"annees_depuis_la_derniere_promotion": annees_depuis_la_derniere_promotion,
|
| 88 |
+
"annes_sous_responsable_actuel": annes_sous_responsable_actuel,
|
| 89 |
+
"satisfaction_employee_environnement": satisfaction_employee_environnement,
|
| 90 |
+
"note_evaluation_precedente": note_evaluation_precedente,
|
| 91 |
+
"niveau_hierarchique_poste": niveau_hierarchique_poste,
|
| 92 |
+
"satisfaction_employee_nature_travail": satisfaction_employee_nature_travail,
|
| 93 |
+
"satisfaction_employee_equipe": satisfaction_employee_equipe,
|
| 94 |
+
"satisfaction_employee_equilibre_pro_perso": satisfaction_employee_equilibre_pro_perso,
|
| 95 |
+
"note_evaluation_actuelle": note_evaluation_actuelle,
|
| 96 |
+
"heure_supplementaires": heure_supplementaires,
|
| 97 |
+
"augementation_salaire_precedente": augementation_salaire_precedente,
|
| 98 |
+
"age": age,
|
| 99 |
+
"genre": genre,
|
| 100 |
+
"revenu_mensuel": revenu_mensuel,
|
| 101 |
+
"statut_marital": statut_marital,
|
| 102 |
+
"departement": departement,
|
| 103 |
+
"poste": poste,
|
| 104 |
+
"nombre_experiences_precedentes": nombre_experiences_precedentes,
|
| 105 |
+
"nombre_heures_travailless": 80,
|
| 106 |
+
"annee_experience_totale": annee_experience_totale,
|
| 107 |
+
"annees_dans_l_entreprise": annees_dans_l_entreprise,
|
| 108 |
+
"annees_dans_le_poste_actuel": annees_dans_le_poste_actuel,
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
print("\n⏳ Envoi de la requête à l'API...")
|
| 112 |
+
|
| 113 |
+
headers = {"Content-Type": "application/json"}
|
| 114 |
+
if API_KEY:
|
| 115 |
+
headers["X-API-Key"] = API_KEY
|
| 116 |
+
|
| 117 |
+
try:
|
| 118 |
+
response = requests.post(
|
| 119 |
+
f"{API_URL}/predict", json=employee_data, headers=headers, timeout=30
|
| 120 |
+
)
|
| 121 |
+
response.raise_for_status()
|
| 122 |
+
result = response.json()
|
| 123 |
+
|
| 124 |
+
# ═══════════════════════════════════════════════════════════════
|
| 125 |
+
# AFFICHAGE DU RÉSULTAT
|
| 126 |
+
# ═══════════════════════════════════════════════════════════════
|
| 127 |
+
|
| 128 |
+
print("\n" + "═" * 60)
|
| 129 |
+
print(" 📊 RÉSULTAT")
|
| 130 |
+
print("═" * 60)
|
| 131 |
+
|
| 132 |
+
if result["prediction"] == 1:
|
| 133 |
+
print("\n🏃 PRÉDICTION: L'EMPLOYÉ VA QUITTER L'ENTREPRISE")
|
| 134 |
+
else:
|
| 135 |
+
print("\n✅ PRÉDICTION: L'EMPLOYÉ VA RESTER")
|
| 136 |
+
|
| 137 |
+
print(f"\n🎯 Niveau de risque: {result['risk_level']}")
|
| 138 |
+
print(f" Probabilité de rester: {result['probability_0']:.1%}")
|
| 139 |
+
print(f" Probabilité de partir: {result['probability_1']:.1%}")
|
| 140 |
+
|
| 141 |
+
print("\n" + "═" * 60)
|
| 142 |
+
|
| 143 |
+
except requests.exceptions.RequestException as e:
|
| 144 |
+
print(f"\n❌ ERREUR: {e}")
|
| 145 |
+
if hasattr(e, "response") and e.response is not None:
|
| 146 |
+
print(f"Détails: {e.response.text}")
|
exemples/demo_unitaire_hf.py
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
🔮 Prédiction UNITAIRE via API Hugging Face
|
| 4 |
+
|
| 5 |
+
Usage: python demo_unitaire_hf.py
|
| 6 |
+
- Pose des questions interactives
|
| 7 |
+
- Envoie la requête à la Space HF
|
| 8 |
+
- Affiche la prédiction
|
| 9 |
+
|
| 10 |
+
Option: définir HF_API_URL pour surcharger l'URL par défaut.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
import os
|
| 14 |
+
import requests
|
| 15 |
+
|
| 16 |
+
API_URL = os.getenv("HF_API_URL", "https://asi-engineer-oc-p5.hf.space")
|
| 17 |
+
|
| 18 |
+
print("╔══════════════════════════════════════════════════════════╗")
|
| 19 |
+
print("║ 🔮 Prédiction UNITAIRE - API Hugging Face ║")
|
| 20 |
+
print("╚══════════════════════════════════════════════════════════╝\n")
|
| 21 |
+
print(f"🌐 API: {API_URL}\n")
|
| 22 |
+
|
| 23 |
+
# Collecte minimaliste des champs requis
|
| 24 |
+
print("Veuillez renseigner les informations de l'employé:\n")
|
| 25 |
+
|
| 26 |
+
# === SONDAGE ===
|
| 27 |
+
nombre_participation_pee = int(input("Nombre participations PEE (0-3): "))
|
| 28 |
+
nb_formations_suivies = int(input("Nombre formations suivies (0-6): "))
|
| 29 |
+
distance_domicile_travail = int(input("Distance domicile-travail (1-30): "))
|
| 30 |
+
niveau_education = int(input("Niveau d'éducation (1-5): "))
|
| 31 |
+
domaine_etude = input(
|
| 32 |
+
"Domaine d'étude: (Infra & Cloud, Transformation Digitale, Marketing, Entrepreunariat, Ressources Humaines, Autre): "
|
| 33 |
+
)
|
| 34 |
+
ayant_enfants = input("A des enfants? (Y/N): ").upper()
|
| 35 |
+
frequence_deplacement = input("Fréquence déplacement (Aucun, Occasionnel, Frequent): ")
|
| 36 |
+
annees_depuis_la_derniere_promotion = int(input("Années depuis dernière promotion: "))
|
| 37 |
+
annes_sous_responsable_actuel = int(input("Années sous responsable actuel (0-17): "))
|
| 38 |
+
|
| 39 |
+
# === ÉVALUATION ===
|
| 40 |
+
satisfaction_employee_environnement = int(input("Satisfaction environnement (1-4): "))
|
| 41 |
+
note_evaluation_precedente = int(input("Note évaluation précédente (1-4): "))
|
| 42 |
+
niveau_hierarchique_poste = int(input("Niveau hiérarchique (1-5): "))
|
| 43 |
+
satisfaction_employee_nature_travail = int(input("Satisfaction nature travail (1-4): "))
|
| 44 |
+
satisfaction_employee_equipe = int(input("Satisfaction équipe (1-4): "))
|
| 45 |
+
satisfaction_employee_equilibre_pro_perso = int(
|
| 46 |
+
input("Satisfaction équilibre pro/perso (1-4): ")
|
| 47 |
+
)
|
| 48 |
+
note_evaluation_actuelle = int(input("Note évaluation actuelle (3-4): "))
|
| 49 |
+
heure_supplementaires = input("Heures supplémentaires? (Oui/Non): ")
|
| 50 |
+
augementation_salaire_precedente = float(input("Augmentation salaire précédente (%): "))
|
| 51 |
+
|
| 52 |
+
# === SIRH ===
|
| 53 |
+
age = int(input("Âge (18-60): "))
|
| 54 |
+
genre = input("Genre (M/F): ").upper()
|
| 55 |
+
revenu_mensuel = float(input("Revenu mensuel (€): "))
|
| 56 |
+
statut_marital = input("Statut marital (Célibataire, Marié(e), Divorcé(e)): ")
|
| 57 |
+
departement = input("Département (Commercial, Consulting, Ressources Humaines): ")
|
| 58 |
+
poste = input("Poste: ")
|
| 59 |
+
nombre_experiences_precedentes = int(input("Nb expériences précédentes (0-9): "))
|
| 60 |
+
annee_experience_totale = int(input("Années expérience totale: "))
|
| 61 |
+
annees_dans_l_entreprise = int(input("Années dans l'entreprise (0-40): "))
|
| 62 |
+
annees_dans_le_poste_actuel = int(input("Années dans le poste actuel (0-18): "))
|
| 63 |
+
|
| 64 |
+
employee_data = {
|
| 65 |
+
"nombre_participation_pee": nombre_participation_pee,
|
| 66 |
+
"nb_formations_suivies": nb_formations_suivies,
|
| 67 |
+
"nombre_employee_sous_responsabilite": 1,
|
| 68 |
+
"distance_domicile_travail": distance_domicile_travail,
|
| 69 |
+
"niveau_education": niveau_education,
|
| 70 |
+
"domaine_etude": domaine_etude,
|
| 71 |
+
"ayant_enfants": ayant_enfants,
|
| 72 |
+
"frequence_deplacement": frequence_deplacement,
|
| 73 |
+
"annees_depuis_la_derniere_promotion": annees_depuis_la_derniere_promotion,
|
| 74 |
+
"annes_sous_responsable_actuel": annes_sous_responsable_actuel,
|
| 75 |
+
"satisfaction_employee_environnement": satisfaction_employee_environnement,
|
| 76 |
+
"note_evaluation_precedente": note_evaluation_precedente,
|
| 77 |
+
"niveau_hierarchique_poste": niveau_hierarchique_poste,
|
| 78 |
+
"satisfaction_employee_nature_travail": satisfaction_employee_nature_travail,
|
| 79 |
+
"satisfaction_employee_equipe": satisfaction_employee_equipe,
|
| 80 |
+
"satisfaction_employee_equilibre_pro_perso": satisfaction_employee_equilibre_pro_perso,
|
| 81 |
+
"note_evaluation_actuelle": note_evaluation_actuelle,
|
| 82 |
+
"heure_supplementaires": heure_supplementaires,
|
| 83 |
+
"augementation_salaire_precedente": augementation_salaire_precedente,
|
| 84 |
+
"age": age,
|
| 85 |
+
"genre": genre,
|
| 86 |
+
"revenu_mensuel": revenu_mensuel,
|
| 87 |
+
"statut_marital": statut_marital,
|
| 88 |
+
"departement": departement,
|
| 89 |
+
"poste": poste,
|
| 90 |
+
"nombre_experiences_precedentes": nombre_experiences_precedentes,
|
| 91 |
+
"nombre_heures_travailless": 80,
|
| 92 |
+
"annee_experience_totale": annee_experience_totale,
|
| 93 |
+
"annees_dans_l_entreprise": annees_dans_l_entreprise,
|
| 94 |
+
"annees_dans_le_poste_actuel": annees_dans_le_poste_actuel,
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
print("\n⏳ Envoi de la requête à l'API HF...")
|
| 98 |
+
headers = {"Content-Type": "application/json"}
|
| 99 |
+
# Optionnel: API Key si la Space protège les endpoints
|
| 100 |
+
api_key = os.getenv("HF_API_KEY")
|
| 101 |
+
if api_key:
|
| 102 |
+
headers["X-API-Key"] = api_key
|
| 103 |
+
|
| 104 |
+
try:
|
| 105 |
+
r = requests.post(
|
| 106 |
+
f"{API_URL}/predict", json=employee_data, headers=headers, timeout=45
|
| 107 |
+
)
|
| 108 |
+
if r.status_code == 404:
|
| 109 |
+
print(
|
| 110 |
+
"\n❌ Endpoint HF introuvable (/predict). Vérifiez que la Space expose l'API FastAPI."
|
| 111 |
+
)
|
| 112 |
+
print(" Sinon, utilisez l'API locale (lancer_api.sh) ou GRADIO.")
|
| 113 |
+
raise SystemExit(1)
|
| 114 |
+
r.raise_for_status()
|
| 115 |
+
result = r.json()
|
| 116 |
+
|
| 117 |
+
print("\n" + "═" * 60)
|
| 118 |
+
print(" 📊 RÉSULTAT (HF)")
|
| 119 |
+
print("═" * 60)
|
| 120 |
+
print(
|
| 121 |
+
"\n✅ PRÉDICTION: "
|
| 122 |
+
+ ("VA RESTER" if result.get("prediction", 0) == 0 else "VA PARTIR")
|
| 123 |
+
)
|
| 124 |
+
print(f"🎯 Niveau de risque: {result.get('risk_level')}")
|
| 125 |
+
print(f" Prob rester: {result.get('probability_0', 0):.1%}")
|
| 126 |
+
print(f" Prob partir: {result.get('probability_1', 0):.1%}")
|
| 127 |
+
|
| 128 |
+
except requests.exceptions.RequestException as e:
|
| 129 |
+
print(f"\n❌ ERREUR API HF: {e}")
|
| 130 |
+
if getattr(e, "response", None) is not None:
|
| 131 |
+
print(f"Détails: {e.response.text}")
|
exemples/lancer_api.sh
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
#
|
| 3 |
+
# 🚀 Script de lancement de l'API locale pour la démo
|
| 4 |
+
#
|
| 5 |
+
# Usage: ./lancer_api.sh
|
| 6 |
+
#
|
| 7 |
+
|
| 8 |
+
cd "$(dirname "$0")/.."
|
| 9 |
+
|
| 10 |
+
echo "╔══════════════════════════════════════════════════════════╗"
|
| 11 |
+
echo "║ 🚀 Lancement de l'API Employee Turnover ║"
|
| 12 |
+
echo "╚══════════════════════════════════════════════════════════╝"
|
| 13 |
+
echo ""
|
| 14 |
+
|
| 15 |
+
# Vérifier que poetry est installé
|
| 16 |
+
if ! command -v poetry &> /dev/null; then
|
| 17 |
+
echo "❌ poetry n'est pas installé"
|
| 18 |
+
echo " Installation : pip install poetry"
|
| 19 |
+
exit 1
|
| 20 |
+
fi
|
| 21 |
+
|
| 22 |
+
# Vérifier que le fichier api.py existe
|
| 23 |
+
if [ ! -f "api.py" ]; then
|
| 24 |
+
echo "❌ Fichier api.py introuvable"
|
| 25 |
+
echo " Assurez-vous d'être dans le bon dossier"
|
| 26 |
+
exit 1
|
| 27 |
+
fi
|
| 28 |
+
|
| 29 |
+
echo "✅ Démarrage de l'API sur http://127.0.0.1:7860"
|
| 30 |
+
echo ""
|
| 31 |
+
echo "📖 Documentation disponible sur:"
|
| 32 |
+
echo " - http://127.0.0.1:7860/docs (Swagger)"
|
| 33 |
+
echo " - http://127.0.0.1:7860/redoc (ReDoc)"
|
| 34 |
+
echo ""
|
| 35 |
+
echo "🔮 Interface Gradio (si activée):"
|
| 36 |
+
echo " - http://127.0.0.1:7860/"
|
| 37 |
+
echo ""
|
| 38 |
+
echo "💡 Pour arrêter l'API : Ctrl+C"
|
| 39 |
+
echo ""
|
| 40 |
+
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
| 41 |
+
echo ""
|
| 42 |
+
|
| 43 |
+
# Lancer l'API avec poetry en mode DEBUG (sans API key)
|
| 44 |
+
DEBUG=True poetry run uvicorn api:app --host 127.0.0.1 --port 7860
|
exemples/predictions_batch_20260111_235739.csv
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
employee_id,prediction,prediction_code,risk_level,probability_stay,probability_leave
|
| 2 |
+
1,VA PARTIR,1,High,16.41%,83.59%
|
| 3 |
+
2,VA RESTER,0,Low,88.46%,11.54%
|
| 4 |
+
3,VA PARTIR,1,Medium,35.19%,64.81%
|
| 5 |
+
4,VA PARTIR,1,High,24.39%,75.61%
|
| 6 |
+
5,VA PARTIR,1,Medium,32.16%,67.84%
|
| 7 |
+
6,VA RESTER,0,Low,95.30%,4.70%
|
| 8 |
+
7,VA RESTER,0,Low,81.61%,18.39%
|
| 9 |
+
8,VA PARTIR,1,High,20.77%,79.23%
|
| 10 |
+
9,VA RESTER,0,Low,96.22%,3.78%
|
| 11 |
+
10,VA RESTER,0,Low,92.47%,7.53%
|
exemples/requirements.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
requests>=2.31.0
|
| 2 |
+
pandas>=2.0.0
|
| 3 |
+
|
src/gradio_ui.py
CHANGED
|
@@ -9,6 +9,7 @@ Cette interface permet de:
|
|
| 9 |
"""
|
| 10 |
import os
|
| 11 |
from typing import cast
|
|
|
|
| 12 |
|
| 13 |
import gradio as gr
|
| 14 |
|
|
@@ -538,6 +539,120 @@ def create_gradio_interface():
|
|
| 538 |
api_name="predict",
|
| 539 |
)
|
| 540 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 541 |
# Onglet Documentation
|
| 542 |
with gr.TabItem("📚 Documentation API"):
|
| 543 |
gr.Markdown(API_DOCS)
|
|
|
|
| 9 |
"""
|
| 10 |
import os
|
| 11 |
from typing import cast
|
| 12 |
+
import pandas as pd
|
| 13 |
|
| 14 |
import gradio as gr
|
| 15 |
|
|
|
|
| 539 |
api_name="predict",
|
| 540 |
)
|
| 541 |
|
| 542 |
+
# Onglet Batch
|
| 543 |
+
with gr.TabItem("📦 Batch"):
|
| 544 |
+
gr.Markdown(
|
| 545 |
+
"### Prédictions batch à partir de 3 CSV (sondage, évaluation, SIRH)"
|
| 546 |
+
)
|
| 547 |
+
with gr.Column():
|
| 548 |
+
sondage_file = gr.File(
|
| 549 |
+
label="CSV Sondage", file_types=[".csv"], type="filepath"
|
| 550 |
+
)
|
| 551 |
+
eval_file = gr.File(
|
| 552 |
+
label="CSV Évaluation", file_types=[".csv"], type="filepath"
|
| 553 |
+
)
|
| 554 |
+
sirh_file = gr.File(
|
| 555 |
+
label="CSV SIRH", file_types=[".csv"], type="filepath"
|
| 556 |
+
)
|
| 557 |
+
batch_btn = gr.Button("📦 Prédire en batch", variant="primary")
|
| 558 |
+
batch_result = gr.JSON(label="Résultat batch")
|
| 559 |
+
|
| 560 |
+
def predict_batch_gradio(
|
| 561 |
+
sondage_path: str, eval_path: str, sirh_path: str
|
| 562 |
+
):
|
| 563 |
+
try:
|
| 564 |
+
# Lire CSV
|
| 565 |
+
sondage_df = pd.read_csv(sondage_path)
|
| 566 |
+
eval_df = pd.read_csv(eval_path)
|
| 567 |
+
sirh_df = pd.read_csv(sirh_path)
|
| 568 |
+
|
| 569 |
+
# Fusion
|
| 570 |
+
from src.preprocessing import (
|
| 571 |
+
merge_csv_dataframes,
|
| 572 |
+
preprocess_dataframe_for_prediction,
|
| 573 |
+
)
|
| 574 |
+
|
| 575 |
+
merged_df = merge_csv_dataframes(sondage_df, eval_df, sirh_df)
|
| 576 |
+
employee_ids = merged_df["original_employee_id"].tolist()
|
| 577 |
+
merged_df = merged_df.drop(columns=["original_employee_id"])
|
| 578 |
+
if "a_quitte_l_entreprise" in merged_df.columns:
|
| 579 |
+
merged_df = merged_df.drop(
|
| 580 |
+
columns=["a_quitte_l_entreprise"]
|
| 581 |
+
)
|
| 582 |
+
|
| 583 |
+
# Preprocessing
|
| 584 |
+
X = preprocess_dataframe_for_prediction(merged_df)
|
| 585 |
+
|
| 586 |
+
# Modèle et prédictions
|
| 587 |
+
from src.models import load_model
|
| 588 |
+
|
| 589 |
+
model = load_model()
|
| 590 |
+
predictions = model.predict(X.values)
|
| 591 |
+
probabilities = model.predict_proba(X.values)
|
| 592 |
+
|
| 593 |
+
results = []
|
| 594 |
+
risk_counts = {"Low": 0, "Medium": 0, "High": 0}
|
| 595 |
+
leave_count = 0
|
| 596 |
+
|
| 597 |
+
for i, emp_id in enumerate(employee_ids):
|
| 598 |
+
prob_stay = float(probabilities[i][0])
|
| 599 |
+
prob_leave = float(probabilities[i][1])
|
| 600 |
+
pred = int(predictions[i])
|
| 601 |
+
|
| 602 |
+
if prob_leave < 0.3:
|
| 603 |
+
risk = "Low"
|
| 604 |
+
elif prob_leave < 0.7:
|
| 605 |
+
risk = "Medium"
|
| 606 |
+
else:
|
| 607 |
+
risk = "High"
|
| 608 |
+
|
| 609 |
+
risk_counts[risk] += 1
|
| 610 |
+
if pred == 1:
|
| 611 |
+
leave_count += 1
|
| 612 |
+
|
| 613 |
+
results.append(
|
| 614 |
+
{
|
| 615 |
+
"employee_id": int(emp_id),
|
| 616 |
+
"prediction": pred,
|
| 617 |
+
"probability_stay": prob_stay,
|
| 618 |
+
"probability_leave": prob_leave,
|
| 619 |
+
"risk_level": risk,
|
| 620 |
+
}
|
| 621 |
+
)
|
| 622 |
+
|
| 623 |
+
summary = {
|
| 624 |
+
"total_stay": len(results) - leave_count,
|
| 625 |
+
"total_leave": leave_count,
|
| 626 |
+
"high_risk_count": risk_counts["High"],
|
| 627 |
+
"medium_risk_count": risk_counts["Medium"],
|
| 628 |
+
"low_risk_count": risk_counts["Low"],
|
| 629 |
+
}
|
| 630 |
+
|
| 631 |
+
return {
|
| 632 |
+
"total_employees": len(results),
|
| 633 |
+
"predictions": results,
|
| 634 |
+
"summary": summary,
|
| 635 |
+
}
|
| 636 |
+
except pd.errors.EmptyDataError:
|
| 637 |
+
return {
|
| 638 |
+
"error": "Empty CSV file",
|
| 639 |
+
"message": "Un des fichiers CSV est vide.",
|
| 640 |
+
}
|
| 641 |
+
except KeyError as e:
|
| 642 |
+
return {
|
| 643 |
+
"error": "Missing column",
|
| 644 |
+
"message": f"Colonne manquante dans les CSV: {e}",
|
| 645 |
+
}
|
| 646 |
+
except Exception as e:
|
| 647 |
+
return {"error": "Batch prediction failed", "message": str(e)}
|
| 648 |
+
|
| 649 |
+
batch_btn.click(
|
| 650 |
+
fn=predict_batch_gradio,
|
| 651 |
+
inputs=[sondage_file, eval_file, sirh_file],
|
| 652 |
+
outputs=batch_result,
|
| 653 |
+
api_name="predict_batch",
|
| 654 |
+
)
|
| 655 |
+
|
| 656 |
# Onglet Documentation
|
| 657 |
with gr.TabItem("📚 Documentation API"):
|
| 658 |
gr.Markdown(API_DOCS)
|