Upload train2_model.py
Browse files- train2_model.py +711 -0
train2_model.py
ADDED
|
@@ -0,0 +1,711 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import joblib
|
| 4 |
+
import datetime
|
| 5 |
+
import warnings
|
| 6 |
+
import requests
|
| 7 |
+
import numpy as np
|
| 8 |
+
import pandas as pd
|
| 9 |
+
from io import StringIO
|
| 10 |
+
from tqdm import tqdm
|
| 11 |
+
from xgboost import XGBClassifier
|
| 12 |
+
from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedKFold
|
| 13 |
+
from sklearn.preprocessing import StandardScaler
|
| 14 |
+
from sklearn.metrics import classification_report, roc_auc_score, brier_score_loss, confusion_matrix
|
| 15 |
+
from sklearn.pipeline import Pipeline
|
| 16 |
+
from sklearn.multioutput import MultiOutputClassifier
|
| 17 |
+
from sklearn.base import clone
|
| 18 |
+
from sklearn.calibration import CalibratedClassifierCV
|
| 19 |
+
from imblearn.over_sampling import SMOTE, ADASYN, BorderlineSMOTE
|
| 20 |
+
from imblearn.combine import SMOTEENN, SMOTETomek
|
| 21 |
+
from imblearn.pipeline import Pipeline as ImbPipeline
|
| 22 |
+
import matplotlib.pyplot as plt
|
| 23 |
+
import seaborn as sns
|
| 24 |
+
|
| 25 |
+
# Ignorer les avertissements spécifiques
|
| 26 |
+
warnings.filterwarnings("ignore", category=UserWarning)
|
| 27 |
+
|
| 28 |
+
# Constantes
|
| 29 |
+
LEAGUES = {
|
| 30 |
+
'F1': 'France Ligue 1', 'F2': 'France Ligue 2',
|
| 31 |
+
'E0': 'England Premier League', 'E1': 'Championship',
|
| 32 |
+
'D1': 'Germany Bundesliga', 'D2': '2. Bundesliga',
|
| 33 |
+
'I1': 'Italy Serie A', 'I2': 'Serie B',
|
| 34 |
+
'SP1': 'Spain La Liga', 'SP2': 'Segunda Division',
|
| 35 |
+
}
|
| 36 |
+
REQUIRED_COLS = ['Date', 'HomeTeam', 'AwayTeam', 'FTHG', 'FTAG', 'FTR']
|
| 37 |
+
BASE_URL = "https://www.football-data.co.uk/mmz4281"
|
| 38 |
+
TARGET_COLUMNS = ['Home_Win', 'Away_Win', 'Draw', 'Over2.5', 'BTTS']
|
| 39 |
+
|
| 40 |
+
def fetch_football_data():
|
| 41 |
+
"""Télécharge les données de football des 5 dernières saisons pour les ligues majeures européennes"""
|
| 42 |
+
current_year = datetime.datetime.now().year
|
| 43 |
+
seasons = [f"{str(y-1)[-2:]}{str(y)[-2:]}" for y in range(current_year-6, current_year)]
|
| 44 |
+
all_data = []
|
| 45 |
+
|
| 46 |
+
for season in tqdm(seasons, desc="Chargement des saisons"):
|
| 47 |
+
season_code = season[-4:]
|
| 48 |
+
for league_code, league_name in LEAGUES.items():
|
| 49 |
+
try:
|
| 50 |
+
url = f"{BASE_URL}/{season_code[:2]}{season_code[2:]}/{league_code}.csv"
|
| 51 |
+
response = requests.get(url, timeout=15)
|
| 52 |
+
response.raise_for_status()
|
| 53 |
+
|
| 54 |
+
# Essayer différents encodages
|
| 55 |
+
for encoding in ['utf-8', 'latin1', 'iso-8859-1']:
|
| 56 |
+
try:
|
| 57 |
+
df = pd.read_csv(StringIO(response.text), encoding=encoding,
|
| 58 |
+
parse_dates=['Date'], dayfirst=True, on_bad_lines='warn')
|
| 59 |
+
break
|
| 60 |
+
except Exception:
|
| 61 |
+
continue
|
| 62 |
+
else:
|
| 63 |
+
print(f"⚠️ Erreur d'encodage : {league_code} {season}")
|
| 64 |
+
continue
|
| 65 |
+
|
| 66 |
+
if not all(col in df.columns for col in REQUIRED_COLS):
|
| 67 |
+
print(f"⚠️ Colonnes manquantes : {league_code} {season}")
|
| 68 |
+
continue
|
| 69 |
+
|
| 70 |
+
df['Season'] = season
|
| 71 |
+
df['League'] = league_name
|
| 72 |
+
df['League_Code'] = league_code
|
| 73 |
+
all_data.append(df)
|
| 74 |
+
except Exception as e:
|
| 75 |
+
print(f"⚠️ Erreur {league_code} {season}: {str(e)}")
|
| 76 |
+
continue
|
| 77 |
+
|
| 78 |
+
if not all_data:
|
| 79 |
+
raise ValueError("Aucune donnée valide chargée.")
|
| 80 |
+
|
| 81 |
+
result_df = pd.concat(all_data, ignore_index=True).sort_values('Date')
|
| 82 |
+
print(f"📊 Données chargées : {len(result_df)} matchs de {len(seasons)} saisons")
|
| 83 |
+
return result_df
|
| 84 |
+
|
| 85 |
+
def preprocess_data(df):
|
| 86 |
+
"""Prétraite les données et calcule des caractéristiques additionnelles"""
|
| 87 |
+
# Nettoyer les noms de colonnes
|
| 88 |
+
df.columns = [col.strip() for col in df.columns]
|
| 89 |
+
|
| 90 |
+
# Mapping pour gérer différentes notations entre saisons
|
| 91 |
+
mapping = {
|
| 92 |
+
'HST': ['HST', 'HS', 'HSTS'], # Plus de variantes pour shots
|
| 93 |
+
'AST': ['AST', 'AS', 'ASTS'],
|
| 94 |
+
'HF': ['HF', 'HomeF', 'HFauls'], # Nouvelles métriques: fautes
|
| 95 |
+
'AF': ['AF', 'AwayF', 'AFauls'],
|
| 96 |
+
'HY': ['HY', 'HomeY'], # Cartes jaunes
|
| 97 |
+
'AY': ['AY', 'AwayY'],
|
| 98 |
+
'HR': ['HR', 'HomeR'], # Cartes rouges
|
| 99 |
+
'AR': ['AR', 'AwayR'],
|
| 100 |
+
'HC': ['HC'], 'AC': ['AC'],
|
| 101 |
+
'B365H': ['B365H', 'BbHwin'], 'B365D': ['B365D', 'BbDwin'], 'B365A': ['B365A', 'BbAwin'],
|
| 102 |
+
'B365O2.5': ['B365O2.5', 'BbOver'], 'B365U2.5': ['B365U2.5', 'BbUnder'],
|
| 103 |
+
'B365GG': ['B365GG', 'BBBTS']
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
# Harmonisation des colonnes basée sur le mapping
|
| 107 |
+
for target, sources in mapping.items():
|
| 108 |
+
for col in sources:
|
| 109 |
+
if col in df.columns:
|
| 110 |
+
df[target] = pd.to_numeric(df[col], errors='coerce')
|
| 111 |
+
break
|
| 112 |
+
|
| 113 |
+
# Caractéristiques de base des matchs
|
| 114 |
+
df['Goal_Diff'] = df['FTHG'] - df['FTAG']
|
| 115 |
+
df['Total_Goals'] = df['FTHG'] + df['FTAG']
|
| 116 |
+
df['Shot_Diff'] = df.get('HST', 0) - df.get('AST', 0)
|
| 117 |
+
df['Corners_Diff'] = df.get('HC', 0) - df.get('AC', 0)
|
| 118 |
+
df['Fouls_Diff'] = df.get('HF', 0) - df.get('AF', 0)
|
| 119 |
+
df['Yellow_Diff'] = df.get('HY', 0) - df.get('AY', 0)
|
| 120 |
+
df['Red_Diff'] = df.get('HR', 0) - df.get('AR', 0)
|
| 121 |
+
|
| 122 |
+
# Variables cibles
|
| 123 |
+
df['BTTS'] = ((df['FTHG'] > 0) & (df['FTAG'] > 0)).astype(int)
|
| 124 |
+
df['Over2.5'] = (df['Total_Goals'] > 2.5).astype(int)
|
| 125 |
+
|
| 126 |
+
# Caractéristiques avancées: forme récente (3 et 5 derniers matchs)
|
| 127 |
+
for team in ['Home', 'Away']:
|
| 128 |
+
team_col = f'{team}Team'
|
| 129 |
+
|
| 130 |
+
# Moyenne des 3 derniers matchs pour diverses statistiques
|
| 131 |
+
for stat in ['FTHG', 'FTAG', 'Total_Goals', 'BTTS', 'Over2.5']:
|
| 132 |
+
if stat in df.columns:
|
| 133 |
+
df[f'{team}_{stat}_Last3'] = df.groupby(team_col)[stat].transform(
|
| 134 |
+
lambda x: x.rolling(3, min_periods=1).mean().shift(1))
|
| 135 |
+
|
| 136 |
+
# Ajouter moyenne sur 5 matchs
|
| 137 |
+
df[f'{team}_{stat}_Last5'] = df.groupby(team_col)[stat].transform(
|
| 138 |
+
lambda x: x.rolling(5, min_periods=1).mean().shift(1))
|
| 139 |
+
|
| 140 |
+
# Statistiques défensives
|
| 141 |
+
if team == 'Home':
|
| 142 |
+
df[f'{team}_Goals_Conceded_Last3'] = df.groupby(team_col)['FTAG'].transform(
|
| 143 |
+
lambda x: x.rolling(3, min_periods=1).mean().shift(1))
|
| 144 |
+
df[f'{team}_Goals_Conceded_Last5'] = df.groupby(team_col)['FTAG'].transform(
|
| 145 |
+
lambda x: x.rolling(5, min_periods=1).mean().shift(1))
|
| 146 |
+
else:
|
| 147 |
+
df[f'{team}_Goals_Conceded_Last3'] = df.groupby(team_col)['FTHG'].transform(
|
| 148 |
+
lambda x: x.rolling(3, min_periods=1).mean().shift(1))
|
| 149 |
+
df[f'{team}_Goals_Conceded_Last5'] = df.groupby(team_col)['FTHG'].transform(
|
| 150 |
+
lambda x: x.rolling(5, min_periods=1).mean().shift(1))
|
| 151 |
+
|
| 152 |
+
# Statistiques de cartes
|
| 153 |
+
for card_type in ['HY', 'AY', 'HR', 'AR']:
|
| 154 |
+
if card_type in df.columns:
|
| 155 |
+
prefix = card_type[0] # H ou A
|
| 156 |
+
if (prefix == 'H' and team == 'Home') or (prefix == 'A' and team == 'Away'):
|
| 157 |
+
df[f'{team}_Cards_Last3'] = df.groupby(team_col)[card_type].transform(
|
| 158 |
+
lambda x: x.rolling(3, min_periods=1).mean().shift(1))
|
| 159 |
+
|
| 160 |
+
# Probabilités implicites depuis les cotes
|
| 161 |
+
odds_columns = {
|
| 162 |
+
'Implied_Prob_Home': 'B365H',
|
| 163 |
+
'Implied_Prob_Draw': 'B365D',
|
| 164 |
+
'Implied_Prob_Away': 'B365A',
|
| 165 |
+
'Implied_Prob_Over2.5': 'B365O2.5',
|
| 166 |
+
'Implied_Prob_BTTS': 'B365GG'
|
| 167 |
+
}
|
| 168 |
+
|
| 169 |
+
for prob_col, odds_col in odds_columns.items():
|
| 170 |
+
if odds_col in df.columns:
|
| 171 |
+
df[prob_col] = 1 / df[odds_col]
|
| 172 |
+
else:
|
| 173 |
+
df[prob_col] = 0.5 # Valeur par défaut si cote non disponible
|
| 174 |
+
|
| 175 |
+
# Normalisation des probabilités (somme = 1 pour HDA)
|
| 176 |
+
if all(col in df.columns for col in ['Implied_Prob_Home', 'Implied_Prob_Draw', 'Implied_Prob_Away']):
|
| 177 |
+
total_prob = (df['Implied_Prob_Home'].fillna(0) +
|
| 178 |
+
df['Implied_Prob_Draw'].fillna(0) +
|
| 179 |
+
df['Implied_Prob_Away'].fillna(0))
|
| 180 |
+
for prob_col in ['Implied_Prob_Home', 'Implied_Prob_Draw', 'Implied_Prob_Away']:
|
| 181 |
+
df[prob_col] = df[prob_col] / total_prob
|
| 182 |
+
|
| 183 |
+
# Points et forme
|
| 184 |
+
df['Points_Home'] = df['FTR'].map({'H': 3, 'D': 1, 'A': 0})
|
| 185 |
+
df['Points_Away'] = df['FTR'].map({'A': 3, 'D': 1, 'H': 0})
|
| 186 |
+
|
| 187 |
+
# Forme sur différentes périodes
|
| 188 |
+
for period in [3, 5, 10]:
|
| 189 |
+
df[f'Home_Form{period}'] = df.groupby('HomeTeam')['Points_Home'].transform(
|
| 190 |
+
lambda x: x.rolling(period, min_periods=1).mean().shift(1))
|
| 191 |
+
df[f'Away_Form{period}'] = df.groupby('AwayTeam')['Points_Away'].transform(
|
| 192 |
+
lambda x: x.rolling(period, min_periods=1).mean().shift(1))
|
| 193 |
+
|
| 194 |
+
# Nettoyage final
|
| 195 |
+
df = df.dropna(subset=['FTHG', 'FTAG', 'FTR', 'Date'])
|
| 196 |
+
df['Date'] = pd.to_datetime(df['Date'], errors='coerce')
|
| 197 |
+
df = df.dropna(subset=['Date']).reset_index(drop=True)
|
| 198 |
+
|
| 199 |
+
# Ajouter jour de la semaine et mois
|
| 200 |
+
df['DayOfWeek'] = df['Date'].dt.dayofweek
|
| 201 |
+
df['Month'] = df['Date'].dt.month
|
| 202 |
+
|
| 203 |
+
# Classements au moment du match
|
| 204 |
+
# Simuler un classement simple en fonction des points accumulés
|
| 205 |
+
seasons = df['Season'].unique()
|
| 206 |
+
leagues = df['League'].unique()
|
| 207 |
+
|
| 208 |
+
for season in seasons:
|
| 209 |
+
for league in leagues:
|
| 210 |
+
season_league_mask = (df['Season'] == season) & (df['League'] == league)
|
| 211 |
+
season_league_data = df[season_league_mask].sort_values('Date')
|
| 212 |
+
|
| 213 |
+
# Initialiser dictionnaires pour les points et matchs joués
|
| 214 |
+
team_points = {}
|
| 215 |
+
team_matches = {}
|
| 216 |
+
|
| 217 |
+
# Parcourir les matchs chronologiquement
|
| 218 |
+
for idx, row in season_league_data.iterrows():
|
| 219 |
+
home_team = row['HomeTeam']
|
| 220 |
+
away_team = row['AwayTeam']
|
| 221 |
+
|
| 222 |
+
# Initialiser si équipes pas encore rencontrées
|
| 223 |
+
for team in [home_team, away_team]:
|
| 224 |
+
if team not in team_points:
|
| 225 |
+
team_points[team] = 0
|
| 226 |
+
team_matches[team] = 0
|
| 227 |
+
|
| 228 |
+
# Enregistrer le classement avant le match
|
| 229 |
+
df.loc[idx, 'Home_Rank'] = sorted(
|
| 230 |
+
[(team, pts/max(1, matches)) for team, pts, matches in
|
| 231 |
+
zip(team_points.keys(), team_points.values(), team_matches.values())],
|
| 232 |
+
key=lambda x: x[1], reverse=True
|
| 233 |
+
).index((home_team, team_points[home_team]/max(1, team_matches[home_team]))) + 1
|
| 234 |
+
|
| 235 |
+
df.loc[idx, 'Away_Rank'] = sorted(
|
| 236 |
+
[(team, pts/max(1, matches)) for team, pts, matches in
|
| 237 |
+
zip(team_points.keys(), team_points.values(), team_matches.values())],
|
| 238 |
+
key=lambda x: x[1], reverse=True
|
| 239 |
+
).index((away_team, team_points[away_team]/max(1, team_matches[away_team]))) + 1
|
| 240 |
+
|
| 241 |
+
# Mettre à jour les points après le match
|
| 242 |
+
if row['FTR'] == 'H':
|
| 243 |
+
team_points[home_team] += 3
|
| 244 |
+
elif row['FTR'] == 'A':
|
| 245 |
+
team_points[away_team] += 3
|
| 246 |
+
else: # Draw
|
| 247 |
+
team_points[home_team] += 1
|
| 248 |
+
team_points[away_team] += 1
|
| 249 |
+
|
| 250 |
+
# Mettre à jour les matchs joués
|
| 251 |
+
team_matches[home_team] += 1
|
| 252 |
+
team_matches[away_team] += 1
|
| 253 |
+
|
| 254 |
+
# Différence de classement
|
| 255 |
+
df['Rank_Diff'] = df['Home_Rank'] - df['Away_Rank']
|
| 256 |
+
|
| 257 |
+
return df
|
| 258 |
+
|
| 259 |
+
def prepare_features(df):
|
| 260 |
+
"""Prépare les features et les cibles pour l'entraînement"""
|
| 261 |
+
# Sélection des 5 caractéristiques à utiliser
|
| 262 |
+
features = [
|
| 263 |
+
'Shot_Diff', # Donne une idée du rapport de force offensif
|
| 264 |
+
'Home_Total_Goals_Last5', # Forme récente (attaque)
|
| 265 |
+
'Away_Total_Goals_Last5', # Forme récente (attaque)
|
| 266 |
+
'Home_BTTS_Last5', # Tendance à marquer + encaisser
|
| 267 |
+
'Implied_Prob_Home' # Sentiment des bookmakers
|
| 268 |
+
]
|
| 269 |
+
|
| 270 |
+
# Filtrer pour ne garder que les colonnes disponibles
|
| 271 |
+
available_features = [f for f in features if f in df.columns]
|
| 272 |
+
|
| 273 |
+
print(f"Features disponibles: {len(available_features)}/{len(features)}")
|
| 274 |
+
if len(available_features) < len(features):
|
| 275 |
+
print(f"Features manquantes: {set(features) - set(available_features)}")
|
| 276 |
+
|
| 277 |
+
X = df[available_features].copy()
|
| 278 |
+
|
| 279 |
+
# Conversion et imputation
|
| 280 |
+
X = X.apply(pd.to_numeric, errors='coerce')
|
| 281 |
+
|
| 282 |
+
# Imputation plus robuste
|
| 283 |
+
for col in X.columns:
|
| 284 |
+
# Utiliser la médiane pour l'imputation (plus robuste que la moyenne)
|
| 285 |
+
X[col] = X[col].fillna(X[col].median())
|
| 286 |
+
|
| 287 |
+
# Préparation des variables cibles
|
| 288 |
+
y = pd.DataFrame({
|
| 289 |
+
'Home_Win': (df['FTR'] == 'H').astype(int),
|
| 290 |
+
'Away_Win': (df['FTR'] == 'A').astype(int),
|
| 291 |
+
'Draw': (df['FTR'] == 'D').astype(int),
|
| 292 |
+
'Over2.5': (df['Total_Goals'] > 2.5).astype(int),
|
| 293 |
+
'BTTS': ((df['FTHG'] > 0) & (df['FTAG'] > 0)).astype(int)
|
| 294 |
+
})
|
| 295 |
+
|
| 296 |
+
# Analyse des distributions des cibles
|
| 297 |
+
for col in y.columns:
|
| 298 |
+
positive_rate = y[col].mean() * 100
|
| 299 |
+
print(f"{col}: {positive_rate:.1f}% des cas")
|
| 300 |
+
|
| 301 |
+
return X, y
|
| 302 |
+
|
| 303 |
+
def plot_feature_importance(model, feature_names, targets, save_dir="ml/plots"):
|
| 304 |
+
"""Génère des graphiques de l'importance des features pour chaque cible"""
|
| 305 |
+
os.makedirs(save_dir, exist_ok=True)
|
| 306 |
+
estimators = model.named_steps['model'].estimators_
|
| 307 |
+
|
| 308 |
+
for i, (target, estimator) in enumerate(zip(targets, estimators)):
|
| 309 |
+
# Vérifier si l'estimateur est un CalibratedClassifierCV
|
| 310 |
+
if hasattr(estimator, 'base_estimator'):
|
| 311 |
+
# Accéder à l'estimateur sous-jacent
|
| 312 |
+
base_estimator = estimator.base_estimator
|
| 313 |
+
if hasattr(base_estimator, 'feature_importances_'):
|
| 314 |
+
importance = base_estimator.feature_importances_
|
| 315 |
+
else:
|
| 316 |
+
print(f"L'estimateur pour {target} n'a pas d'attribut feature_importances_")
|
| 317 |
+
continue
|
| 318 |
+
elif hasattr(estimator, 'feature_importances_'):
|
| 319 |
+
importance = estimator.feature_importances_
|
| 320 |
+
else:
|
| 321 |
+
print(f"L'estimateur pour {target} n'a pas d'attribut feature_importances_")
|
| 322 |
+
continue
|
| 323 |
+
|
| 324 |
+
indices = np.argsort(importance)[::-1]
|
| 325 |
+
|
| 326 |
+
# Graphique des 15 features les plus importantes
|
| 327 |
+
plt.figure(figsize=(10, 8))
|
| 328 |
+
plt.title(f'Importance des features pour {target}')
|
| 329 |
+
plt.barh(range(min(15, len(feature_names))),
|
| 330 |
+
importance[indices][:15], align='center')
|
| 331 |
+
plt.yticks(range(min(15, len(feature_names))),
|
| 332 |
+
[feature_names[i] for i in indices[:15]])
|
| 333 |
+
plt.xlabel('Importance relative')
|
| 334 |
+
plt.tight_layout()
|
| 335 |
+
plt.savefig(f"{save_dir}/feature_importance_{target}.png")
|
| 336 |
+
plt.close()
|
| 337 |
+
|
| 338 |
+
def train_model(X, y):
|
| 339 |
+
"""Entraîne des modèles spécifiques pour chaque cible en utilisant XGBoost"""
|
| 340 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
| 341 |
+
X, y, test_size=0.2, random_state=42, stratify=y['Draw'])
|
| 342 |
+
|
| 343 |
+
# Définir un modèle de base
|
| 344 |
+
base_model = XGBClassifier(
|
| 345 |
+
use_label_encoder=False,
|
| 346 |
+
eval_metric='logloss',
|
| 347 |
+
random_state=42,
|
| 348 |
+
n_jobs=-1
|
| 349 |
+
)
|
| 350 |
+
|
| 351 |
+
# Hyperparamètres à optimiser pour chaque cible
|
| 352 |
+
param_grids = {
|
| 353 |
+
'Home_Win': {
|
| 354 |
+
'n_estimators': [300, 400, 500],
|
| 355 |
+
'max_depth': [3, 4, 5],
|
| 356 |
+
'learning_rate': [0.01, 0.03],
|
| 357 |
+
'subsample': [0.8, 0.9],
|
| 358 |
+
'colsample_bytree': [0.8, 0.9]
|
| 359 |
+
},
|
| 360 |
+
'Away_Win': {
|
| 361 |
+
'n_estimators': [300, 400, 500],
|
| 362 |
+
'max_depth': [3, 4, 5],
|
| 363 |
+
'learning_rate': [0.01, 0.03],
|
| 364 |
+
'subsample': [0.8, 0.9],
|
| 365 |
+
'colsample_bytree': [0.8, 0.9]
|
| 366 |
+
},
|
| 367 |
+
'Draw': { # Paramètres plus robustes pour la classe minoritaire
|
| 368 |
+
'n_estimators': [400, 500, 600],
|
| 369 |
+
'max_depth': [4, 5, 6], # Légèrement plus profond pour capturer patterns complexes
|
| 370 |
+
'learning_rate': [0.005, 0.01, 0.02], # Taux d'apprentissage plus faible
|
| 371 |
+
'subsample': [0.8, 0.9],
|
| 372 |
+
'colsample_bytree': [0.8, 0.9],
|
| 373 |
+
'scale_pos_weight': [3] # Donner plus de poids aux exemples minoritaires
|
| 374 |
+
},
|
| 375 |
+
'Over2.5': {
|
| 376 |
+
'n_estimators': [300, 400, 500],
|
| 377 |
+
'max_depth': [4, 5, 6],
|
| 378 |
+
'learning_rate': [0.01, 0.03],
|
| 379 |
+
'subsample': [0.8, 0.9],
|
| 380 |
+
'colsample_bytree': [0.8, 0.9]
|
| 381 |
+
},
|
| 382 |
+
'BTTS': {
|
| 383 |
+
'n_estimators': [300, 400, 500],
|
| 384 |
+
'max_depth': [4, 5, 6],
|
| 385 |
+
'learning_rate': [0.01, 0.03],
|
| 386 |
+
'subsample': [0.8, 0.9],
|
| 387 |
+
'colsample_bytree': [0.8, 0.9]
|
| 388 |
+
}
|
| 389 |
+
}
|
| 390 |
+
|
| 391 |
+
# Techniques de rééquilibrage pour chaque cible
|
| 392 |
+
resampling_methods = {
|
| 393 |
+
'Home_Win': SMOTE(random_state=42),
|
| 394 |
+
'Away_Win': SMOTE(random_state=42),
|
| 395 |
+
'Draw': SMOTETomek(random_state=42), # Méthode plus agressive pour la classe la plus déséquilibrée
|
| 396 |
+
'Over2.5': SMOTEENN(random_state=42),
|
| 397 |
+
'BTTS': SMOTEENN(random_state=42)
|
| 398 |
+
}
|
| 399 |
+
|
| 400 |
+
# Préparation des pipelines et modèles optimisés pour chaque cible
|
| 401 |
+
best_params = {}
|
| 402 |
+
estimators = []
|
| 403 |
+
scaler = StandardScaler() # Utiliser StandardScaler au lieu de MinMaxScaler
|
| 404 |
+
X_train_scaled = scaler.fit_transform(X_train)
|
| 405 |
+
X_test_scaled = scaler.transform(X_test)
|
| 406 |
+
|
| 407 |
+
print("\n🔍 Optimisation des hyperparamètres pour chaque cible...")
|
| 408 |
+
|
| 409 |
+
for target in y_train.columns:
|
| 410 |
+
print(f"\nOptimisation pour {target}...")
|
| 411 |
+
|
| 412 |
+
# Recherche en grille avec validation croisée stratifiée
|
| 413 |
+
grid_search = GridSearchCV(
|
| 414 |
+
estimator=base_model,
|
| 415 |
+
param_grid=param_grids[target],
|
| 416 |
+
cv=StratifiedKFold(n_splits=5, shuffle=True, random_state=42),
|
| 417 |
+
scoring='roc_auc',
|
| 418 |
+
n_jobs=-1,
|
| 419 |
+
verbose=0
|
| 420 |
+
)
|
| 421 |
+
|
| 422 |
+
# Appliquer le rééquilibrage et entraîner
|
| 423 |
+
resampler = resampling_methods[target]
|
| 424 |
+
X_train_res, y_train_res = resampler.fit_resample(X_train_scaled, y_train[target])
|
| 425 |
+
|
| 426 |
+
# Entraîner la recherche en grille
|
| 427 |
+
grid_search.fit(X_train_res, y_train_res)
|
| 428 |
+
|
| 429 |
+
# Récupérer les meilleurs paramètres
|
| 430 |
+
best_params[target] = grid_search.best_params_
|
| 431 |
+
print(f"Meilleurs paramètres pour {target}: {best_params[target]}")
|
| 432 |
+
print(f"Meilleur score: {grid_search.best_score_:.4f}")
|
| 433 |
+
|
| 434 |
+
# Créer et entraîner le modèle avec les meilleurs paramètres
|
| 435 |
+
best_model = XGBClassifier(**best_params[target], random_state=42, use_label_encoder=False)
|
| 436 |
+
|
| 437 |
+
# Entraîner avec les données rééquilibrées
|
| 438 |
+
best_model.fit(
|
| 439 |
+
X_train_res, y_train_res,
|
| 440 |
+
eval_set=[(X_test_scaled, y_test[target])],
|
| 441 |
+
early_stopping_rounds=20,
|
| 442 |
+
verbose=False
|
| 443 |
+
)
|
| 444 |
+
|
| 445 |
+
# Pour Draw, Over2.5 et BTTS, ajouter une calibration de probabilité
|
| 446 |
+
if target in ['Draw', 'Over2.5', 'BTTS']:
|
| 447 |
+
print(f"Calibration des probabilités pour {target}...")
|
| 448 |
+
calibrated_model = CalibratedClassifierCV(
|
| 449 |
+
best_model,
|
| 450 |
+
method='isotonic', # ou 'sigmoid' selon le cas
|
| 451 |
+
cv='prefit' # le modèle est déjà entraîné
|
| 452 |
+
)
|
| 453 |
+
calibrated_model.fit(X_train_scaled, y_train[target])
|
| 454 |
+
estimators.append(calibrated_model)
|
| 455 |
+
else:
|
| 456 |
+
estimators.append(best_model)
|
| 457 |
+
|
| 458 |
+
# Construire le pipeline complet avec les modèles optimisés
|
| 459 |
+
multi_model = MultiOutputClassifier(base_model)
|
| 460 |
+
multi_model.estimators_ = estimators
|
| 461 |
+
|
| 462 |
+
pipeline = Pipeline([
|
| 463 |
+
('scaler', scaler),
|
| 464 |
+
('model', multi_model)
|
| 465 |
+
])
|
| 466 |
+
|
| 467 |
+
# Générer les graphiques d'importance des features
|
| 468 |
+
plot_feature_importance(pipeline, X.columns, y.columns)
|
| 469 |
+
|
| 470 |
+
return pipeline, X_test, y_test, best_params
|
| 471 |
+
|
| 472 |
+
def evaluate_model(model, X_test, y_test):
|
| 473 |
+
"""Évalue la performance du modèle et génère des visualisations"""
|
| 474 |
+
try:
|
| 475 |
+
y_pred = model.predict(X_test)
|
| 476 |
+
y_proba = [est.predict_proba(X_test)[:, 1] for est in model.named_steps['model'].estimators_]
|
| 477 |
+
|
| 478 |
+
# Créer des dossiers pour les visualisations
|
| 479 |
+
os.makedirs("ml/plots2", exist_ok=True)
|
| 480 |
+
|
| 481 |
+
# Résultats par cible
|
| 482 |
+
proba_results = {}
|
| 483 |
+
print("\n==== RÉSULTATS D'ÉVALUATION ====")
|
| 484 |
+
|
| 485 |
+
plt.figure(figsize=(12, 10))
|
| 486 |
+
|
| 487 |
+
for i, target in enumerate(y_test.columns):
|
| 488 |
+
proba = y_proba[i]
|
| 489 |
+
proba_results[target] = proba
|
| 490 |
+
|
| 491 |
+
print(f"\n=== Évaluation pour {target} ===")
|
| 492 |
+
print(classification_report(y_test[target], y_pred[:, i]))
|
| 493 |
+
|
| 494 |
+
# Métriques supplémentaires
|
| 495 |
+
roc_auc = roc_auc_score(y_test[target], proba)
|
| 496 |
+
brier = brier_score_loss(y_test[target], proba)
|
| 497 |
+
print(f"ROC AUC: {roc_auc:.3f}")
|
| 498 |
+
print(f"Brier Score: {brier:.3f}")
|
| 499 |
+
|
| 500 |
+
# Confusion matrix
|
| 501 |
+
cm = confusion_matrix(y_test[target], y_pred[:, i])
|
| 502 |
+
print(f"Matrice de confusion:\n{cm}")
|
| 503 |
+
|
| 504 |
+
# Plot de la matrice de confusion
|
| 505 |
+
plt.subplot(2, 3, i+1)
|
| 506 |
+
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', cbar=False)
|
| 507 |
+
plt.title(f'Matrice de confusion: {target}\nAUC: {roc_auc:.3f}')
|
| 508 |
+
plt.ylabel('Réel')
|
| 509 |
+
plt.xlabel('Prédit')
|
| 510 |
+
|
| 511 |
+
plt.tight_layout()
|
| 512 |
+
plt.savefig("ml/plots2/confusion_matrices.png")
|
| 513 |
+
plt.close()
|
| 514 |
+
|
| 515 |
+
# Dataframe avec les probabilités prédites
|
| 516 |
+
proba_df = pd.DataFrame(proba_results, index=X_test.index)
|
| 517 |
+
|
| 518 |
+
# Identification de la prédiction avec la probabilité la plus élevée
|
| 519 |
+
proba_df['Highest_Proba'] = proba_df[y_test.columns].idxmax(axis=1)
|
| 520 |
+
proba_df['Highest_Proba_Value'] = proba_df[y_test.columns].max(axis=1)
|
| 521 |
+
|
| 522 |
+
# Analyse des distributions de probabilités
|
| 523 |
+
print("\n=== Probabilités moyennes ===")
|
| 524 |
+
mean_probas = proba_df[y_test.columns].mean().sort_values(ascending=False)
|
| 525 |
+
print(mean_probas)
|
| 526 |
+
|
| 527 |
+
# Distribution des probabilités par classe
|
| 528 |
+
plt.figure(figsize=(12, 8))
|
| 529 |
+
for i, target in enumerate(y_test.columns):
|
| 530 |
+
plt.subplot(2, 3, i+1)
|
| 531 |
+
sns.histplot(proba_df[target], bins=20, kde=True)
|
| 532 |
+
plt.axvline(0.5, color='r', linestyle='--')
|
| 533 |
+
plt.title(f'Distribution des probabilités: {target}')
|
| 534 |
+
plt.xlabel('Probabilité')
|
| 535 |
+
plt.ylabel('Fréquence')
|
| 536 |
+
|
| 537 |
+
plt.tight_layout()
|
| 538 |
+
plt.savefig("ml/plots2/probability_distributions.png")
|
| 539 |
+
plt.close()
|
| 540 |
+
|
| 541 |
+
# Calibration des probabilités
|
| 542 |
+
plt.figure(figsize=(12, 8))
|
| 543 |
+
|
| 544 |
+
for i, target in enumerate(y_test.columns):
|
| 545 |
+
plt.subplot(2, 3, i+1)
|
| 546 |
+
|
| 547 |
+
# Regrouper les prédictions en buckets
|
| 548 |
+
n_bins = 10
|
| 549 |
+
bins = np.linspace(0, 1, n_bins + 1)
|
| 550 |
+
binned_preds = np.digitize(proba_df[target], bins) - 1
|
| 551 |
+
bin_accs = np.zeros(n_bins)
|
| 552 |
+
bin_confs = np.zeros(n_bins)
|
| 553 |
+
bin_sizes = np.zeros(n_bins)
|
| 554 |
+
|
| 555 |
+
for j in range(n_bins):
|
| 556 |
+
bin_mask = binned_preds == j
|
| 557 |
+
if np.sum(bin_mask) > 0:
|
| 558 |
+
bin_accs[j] = np.mean(y_test[target].values[bin_mask])
|
| 559 |
+
bin_confs[j] = np.mean(proba_df[target].values[bin_mask])
|
| 560 |
+
bin_sizes[j] = np.sum(bin_mask)
|
| 561 |
+
|
| 562 |
+
# Tracer la courbe de calibration
|
| 563 |
+
plt.plot(bin_confs, bin_accs, marker='o', linewidth=2, label='Calibration')
|
| 564 |
+
plt.plot([0, 1], [0, 1], linestyle='--', color='gray', label='Parfaite calibration')
|
| 565 |
+
plt.title(f'Courbe de calibration: {target}')
|
| 566 |
+
plt.xlabel('Probabilité prédite')
|
| 567 |
+
plt.ylabel('Fréquence observée')
|
| 568 |
+
plt.legend(loc='lower right')
|
| 569 |
+
|
| 570 |
+
# Ajouter la taille des bins
|
| 571 |
+
for j in range(n_bins):
|
| 572 |
+
if bin_sizes[j] > 0:
|
| 573 |
+
plt.text(bin_confs[j], bin_accs[j], f' {int(bin_sizes[j])}',
|
| 574 |
+
ha='left', va='center', fontsize=8)
|
| 575 |
+
|
| 576 |
+
plt.tight_layout()
|
| 577 |
+
plt.savefig("ml/plots2/calibration_curves.png")
|
| 578 |
+
plt.close()
|
| 579 |
+
|
| 580 |
+
# Afficher les prédictions les plus confiantes
|
| 581 |
+
print("\n=== Top 5 des prédictions les plus confiantes ===")
|
| 582 |
+
print(proba_df.sort_values('Highest_Proba_Value', ascending=False).head(5))
|
| 583 |
+
|
| 584 |
+
# Analyse des erreurs les plus importantes
|
| 585 |
+
print("\n=== Analyse des erreurs ===")
|
| 586 |
+
errors = pd.DataFrame()
|
| 587 |
+
|
| 588 |
+
for target in y_test.columns:
|
| 589 |
+
# Identifier les faux positifs les plus confiants
|
| 590 |
+
false_positives = proba_df[
|
| 591 |
+
(y_test[target] == 0) & (proba_df[target] > 0.75)
|
| 592 |
+
].sort_values(target, ascending=False)
|
| 593 |
+
|
| 594 |
+
if not false_positives.empty:
|
| 595 |
+
print(f"\nFaux positifs les plus confiants pour {target}:")
|
| 596 |
+
print(false_positives.head(3))
|
| 597 |
+
|
| 598 |
+
# Identifier les faux négatifs les plus confiants
|
| 599 |
+
false_negatives = proba_df[
|
| 600 |
+
(y_test[target] == 1) & (proba_df[target] < 0.25)
|
| 601 |
+
].sort_values(target)
|
| 602 |
+
|
| 603 |
+
if not false_negatives.empty:
|
| 604 |
+
print(f"\nFaux négatifs les plus confiants pour {target}:")
|
| 605 |
+
print(false_negatives.head(3))
|
| 606 |
+
|
| 607 |
+
return proba_df
|
| 608 |
+
|
| 609 |
+
except Exception as e:
|
| 610 |
+
print(f"\n❌ Erreur lors de l'évaluation : {str(e)}")
|
| 611 |
+
import traceback
|
| 612 |
+
traceback.print_exc()
|
| 613 |
+
return None
|
| 614 |
+
|
| 615 |
+
def save_model_info(model, X, best_params, targets):
|
| 616 |
+
"""Sauvegarde le modèle et les informations associées"""
|
| 617 |
+
os.makedirs("ml", exist_ok=True)
|
| 618 |
+
|
| 619 |
+
# Sauvegarder le modèle
|
| 620 |
+
joblib.dump(model, "ml/multi_output_model_5.joblib")
|
| 621 |
+
|
| 622 |
+
# Sauvegarder les caractéristiques et les paramètres
|
| 623 |
+
model_info = {
|
| 624 |
+
"features": list(X.columns),
|
| 625 |
+
"targets": targets,
|
| 626 |
+
"best_params": best_params,
|
| 627 |
+
"created_at": datetime.datetime.now().isoformat()
|
| 628 |
+
}
|
| 629 |
+
|
| 630 |
+
with open("ml/model_info_5.json", "w") as f:
|
| 631 |
+
json.dump(model_info, f, indent=2)
|
| 632 |
+
|
| 633 |
+
print("\n✅ Modèle sauvegardé dans ml/multi_output_model_5.joblib")
|
| 634 |
+
print("✅ Informations du modèle sauvegardées dans ml/model_info_5.json")
|
| 635 |
+
|
| 636 |
+
def predict_new_matches(model, features_df, feature_names):
|
| 637 |
+
"""Réalise des prédictions sur de nouveaux matchs"""
|
| 638 |
+
# S'assurer que les features sont dans le bon ordre
|
| 639 |
+
X_new = features_df[feature_names].copy()
|
| 640 |
+
|
| 641 |
+
# Appliquer le modèle pour obtenir les probabilités
|
| 642 |
+
y_proba = [est.predict_proba(X_new)[:, 1] for est in model.named_steps['model'].estimators_]
|
| 643 |
+
|
| 644 |
+
# Créer le DataFrame de résultats
|
| 645 |
+
results = pd.DataFrame()
|
| 646 |
+
|
| 647 |
+
for i, target in enumerate(TARGET_COLUMNS):
|
| 648 |
+
results[target] = y_proba[i]
|
| 649 |
+
|
| 650 |
+
# Ajouter la prédiction avec la plus haute probabilité
|
| 651 |
+
results['Highest_Proba'] = results[TARGET_COLUMNS].idxmax(axis=1)
|
| 652 |
+
results['Highest_Proba_Value'] = results[TARGET_COLUMNS].max(axis=1)
|
| 653 |
+
|
| 654 |
+
return results
|
| 655 |
+
|
| 656 |
+
def main():
|
| 657 |
+
"""Fonction principale"""
|
| 658 |
+
try:
|
| 659 |
+
print("🚀 Démarrage du processus d'analyse et modélisation...")
|
| 660 |
+
|
| 661 |
+
# Étape 1: Téléchargement des données
|
| 662 |
+
print("\n⏳ Téléchargement des données...")
|
| 663 |
+
df = fetch_football_data()
|
| 664 |
+
|
| 665 |
+
# Étape 2: Prétraitement
|
| 666 |
+
print("\n🧹 Prétraitement des données...")
|
| 667 |
+
df = preprocess_data(df)
|
| 668 |
+
print(f"\n📊 {len(df)} matchs prêts à l'analyse.")
|
| 669 |
+
|
| 670 |
+
# Étape 3: Préparation des features
|
| 671 |
+
print("\n🔧 Préparation des features...")
|
| 672 |
+
X, y = prepare_features(df)
|
| 673 |
+
print(f"\nFeatures utilisées ({len(X.columns)}):")
|
| 674 |
+
print(", ".join(X.columns))
|
| 675 |
+
|
| 676 |
+
# Étape 4: Entraînement du modèle
|
| 677 |
+
print("\n🤖 Entraînement du modèle...")
|
| 678 |
+
model, X_test, y_test, best_params = train_model(X, y)
|
| 679 |
+
|
| 680 |
+
# Étape 5: Évaluation finale
|
| 681 |
+
print("\n🔍 Évaluation finale...")
|
| 682 |
+
evaluate_model(model, X_test, y_test)
|
| 683 |
+
|
| 684 |
+
# Étape 6: Sauvegarde
|
| 685 |
+
save_model_info(model, X, best_params, list(y.columns))
|
| 686 |
+
|
| 687 |
+
# Bonus: Préparer un modèle pour de futures prédictions
|
| 688 |
+
latest_data = df.sort_values('Date').tail(100)
|
| 689 |
+
print(f"\n📝 Les {len(latest_data)} matchs les plus récents sont disponibles pour des prédictions futures.")
|
| 690 |
+
|
| 691 |
+
# Démonstration: prédire les 5 prochains matchs (simulation)
|
| 692 |
+
print("\n🔮 Démonstration de prédiction pour les 5 derniers matchs:")
|
| 693 |
+
sample_matches = X_test.head(5)
|
| 694 |
+
predictions = predict_new_matches(model, sample_matches, X.columns)
|
| 695 |
+
|
| 696 |
+
# Afficher HomeTeam et AwayTeam s'ils sont disponibles dans l'index
|
| 697 |
+
if 'HomeTeam' in df.columns and 'AwayTeam' in df.columns:
|
| 698 |
+
sample_info = df.loc[sample_matches.index, ['HomeTeam', 'AwayTeam']]
|
| 699 |
+
predictions = pd.concat([sample_info, predictions], axis=1)
|
| 700 |
+
|
| 701 |
+
print(predictions)
|
| 702 |
+
|
| 703 |
+
print("\n✅ Processus terminé avec succès!")
|
| 704 |
+
|
| 705 |
+
except Exception as e:
|
| 706 |
+
print(f"\n❌ Erreur: {str(e)}")
|
| 707 |
+
import traceback
|
| 708 |
+
traceback.print_exc()
|
| 709 |
+
|
| 710 |
+
if __name__ == "__main__":
|
| 711 |
+
main()
|