| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | import os
|
| | import json
|
| | import optuna
|
| | import datetime
|
| | import numpy as np
|
| | import pandas as pd
|
| | from Settings import Config
|
| | import matplotlib.pyplot as plt
|
| | from scipy.stats import pearsonr
|
| | from optuna.samplers import TPESampler
|
| | from optuna.pruners import MedianPruner
|
| | from typing import Dict, Any, List, Callable
|
| | from sklearn.model_selection import cross_val_score, KFold
|
| |
|
| |
|
| |
|
| | class OptunaOptimizer:
|
| | def __init__(self, model_name: str, config = Config):
|
| | self.model_name = model_name.lower()
|
| | self.config = config
|
| | self.study = None
|
| | self.best_params = None
|
| |
|
| | def create_objective(self, X: np.ndarray, y: np.ndarray, cv_folds: int = 3):
|
| | def objective(trial):
|
| | params = self._suggest_parameters(trial)
|
| |
|
| | try:
|
| | model = self._create_model(params)
|
| | scores = []
|
| |
|
| | kfold = KFold(n_splits = cv_folds, shuffle = True, random_state = self.config.RANDOM_STATE)
|
| | for train_idx, val_idx in kfold.split(X):
|
| | X_train, X_val = X[train_idx], X[val_idx]
|
| | y_train, y_val = y[train_idx], y[val_idx]
|
| |
|
| |
|
| | if self.model_name in ['xgb', 'lgb', 'cat']:
|
| |
|
| | if self.model_name == 'xgb':
|
| | model.fit(X_train, y_train, eval_set = [(X_val, y_val)], verbose = False)
|
| | elif self.model_name == 'lgb':
|
| | model.fit(X_train, y_train, eval_set = [(X_val, y_val)])
|
| | elif self.model_name == 'cat':
|
| | model.fit(X_train, y_train, eval_set = [(X_val, y_val)], verbose = False)
|
| | else:
|
| |
|
| | model.fit(X_train, y_train)
|
| |
|
| | y_pred = model.predict(X_val)
|
| | score = pearsonr(y_val, y_pred)[0]
|
| | scores.append(score)
|
| |
|
| | trial.report(score, len(scores) - 1)
|
| | if trial.should_prune():
|
| | raise optuna.TrialPruned()
|
| | return np.mean(scores)
|
| | except Exception as e:
|
| | print(f"Trial failed: {str(e)}")
|
| | return -1.0
|
| |
|
| | return objective
|
| |
|
| | def _suggest_parameters(self, trial):
|
| | if self.model_name == 'xgb':
|
| | return {
|
| | 'n_estimators': trial.suggest_int('n_estimators', 500, 3000),
|
| | 'learning_rate': trial.suggest_float('learning_rate', 0.01, 0.1, log = True),
|
| | 'max_depth': trial.suggest_int('max_depth', 6, 25),
|
| | 'max_leaves': trial.suggest_int('max_leaves', 8, 50),
|
| | 'min_child_weight': trial.suggest_int('min_child_weight', 1, 50),
|
| | 'subsample': trial.suggest_float('subsample', 0.05, 1.0),
|
| | 'colsample_bytree': trial.suggest_float('colsample_bytree', 0.5, 1.0),
|
| | 'colsample_bylevel': trial.suggest_float('colsample_bylevel', 0.3, 1.0),
|
| | 'colsample_bynode': trial.suggest_float('colsample_bynode', 0.3, 1.0),
|
| | 'reg_alpha': trial.suggest_float('reg_alpha', 0.1, 100.0, log = True),
|
| | 'reg_lambda': trial.suggest_float('reg_lambda', 0.1, 100.0, log = True),
|
| | 'gamma': trial.suggest_float('gamma', 0.1, 10.0),
|
| | 'tree_method': 'hist',
|
| | 'device': 'gpu' if hasattr(Config, 'USE_GPU') and Config.USE_GPU else 'cpu',
|
| | 'verbosity': 0,
|
| | 'random_state': self.config.RANDOM_STATE,
|
| | 'n_jobs': -1
|
| | }
|
| | elif self.model_name == 'lgb':
|
| | return {
|
| | 'n_estimators': trial.suggest_int('n_estimators', 500, 3000),
|
| | 'learning_rate': trial.suggest_float('learning_rate', 0.01, 0.1, log = True),
|
| | 'max_depth': trial.suggest_int('max_depth', 6, 25),
|
| | 'num_leaves': trial.suggest_int('num_leaves', 15, 200),
|
| | 'min_child_samples': trial.suggest_int('min_child_samples', 5, 100),
|
| | 'subsample': trial.suggest_float('subsample', 0.4, 1.0),
|
| | 'colsample_bytree': trial.suggest_float('colsample_bytree', 0.4, 1.0),
|
| | 'reg_alpha': trial.suggest_float('reg_alpha', 0.1, 100.0, log = True),
|
| | 'reg_lambda': trial.suggest_float('reg_lambda', 0.1, 100.0, log = True),
|
| | 'feature_fraction': trial.suggest_float('feature_fraction', 0.4, 1.0),
|
| | 'bagging_fraction': trial.suggest_float('bagging_fraction', 0.4, 1.0),
|
| | 'bagging_freq': trial.suggest_int('bagging_freq', 1, 7),
|
| | 'objective': 'regression',
|
| | 'metric': 'rmse',
|
| | 'boosting_type': 'gbdt',
|
| | 'verbose': -1,
|
| | 'random_state': self.config.RANDOM_STATE,
|
| | 'n_jobs': -1
|
| | }
|
| | elif self.model_name == 'cat':
|
| | return {
|
| | 'iterations': trial.suggest_int('iterations', 500, 3000),
|
| | 'learning_rate': trial.suggest_float('learning_rate', 0.01, 0.1),
|
| | 'depth': trial.suggest_int('depth', 4, 12),
|
| | 'l2_leaf_reg': trial.suggest_float('l2_leaf_reg', 1, 10),
|
| | 'bootstrap_type': trial.suggest_categorical('bootstrap_type', ['Bayesian', 'Bernoulli', 'MVS']),
|
| | 'random_seed': self.config.RANDOM_STATE,
|
| | 'verbose': False,
|
| | 'allow_writing_files': False
|
| | }
|
| | elif self.model_name == 'rf':
|
| | return {
|
| | 'n_estimators': trial.suggest_int('n_estimators', 100, 1000),
|
| | 'max_depth': trial.suggest_int('max_depth', 5, 30),
|
| | 'min_samples_split': trial.suggest_int('min_samples_split', 2, 20),
|
| | 'min_samples_leaf': trial.suggest_int('min_samples_leaf', 1, 10),
|
| | 'max_features': trial.suggest_categorical('max_features', ['sqrt', 'log2', None]),
|
| | 'bootstrap': trial.suggest_categorical('bootstrap', [True, False]),
|
| | 'random_state': self.config.RANDOM_STATE,
|
| | 'n_jobs': -1
|
| | }
|
| | else:
|
| | raise ValueError(f"不支持的模型类型: {self.model_name}")
|
| |
|
| | def _create_model(self, params):
|
| | learners = self.config.get_learners()
|
| | for learner in learners:
|
| | if learner['name'] == self.model_name:
|
| | return learner['estimator'](**params)
|
| | raise ValueError(f"未找到模型: {self.model_name}")
|
| |
|
| | def optimize(self, X: np.ndarray, y: np.ndarray, n_trials: int = 100, cv_folds: int = 3, study_name: str = None) -> Dict[str, Any]:
|
| | study_name = study_name or f"{self.model_name}_optimization"
|
| | self.study = optuna.create_study(
|
| | direction = 'maximize',
|
| | sampler = TPESampler(seed = self.config.RANDOM_STATE),
|
| | pruner = MedianPruner(n_startup_trials = 10, n_warmup_steps = 5),
|
| | study_name = study_name
|
| | )
|
| |
|
| | objective = self.create_objective(X, y, cv_folds)
|
| | print(f"Optimizing {self.model_name} hyperParameter...")
|
| | print(f"trail: {n_trials}, fold: {cv_folds}")
|
| |
|
| | self.study.optimize(objective, n_trials = n_trials, show_progress_bar = True)
|
| | self.best_params = self.study.best_params
|
| | best_score = self.study.best_value
|
| | print(f"Optimized score: {best_score:.3f}\nBest Parameters: {self.best_params}")
|
| | res = {'best_params': self.best_params, 'best_score': best_score, 'study': self.study, 'n_trials': len(self.study.trials)}
|
| | return res
|
| |
|
| | def save_results(self, save_path: str = None):
|
| | if self.best_params is None:
|
| | raise ValueError("Can't save before optimized.")
|
| |
|
| | save_path = save_path or os.path.join(Config.RESULTS_DIR, f"{self.model_name}_best_params.json")
|
| | result = {
|
| | 'model_name': self.model_name,
|
| | 'best_params': self.best_params,
|
| | 'best_score': self.study.best_value,
|
| | 'optimization_time': str(pd.Timestamp.now()),
|
| | 'n_trials': len(self.study.trials)
|
| | }
|
| | with open(save_path, 'w', encoding = 'utf-8') as f:
|
| | json.dump(result, f, indent = 2, ensure_ascii = False)
|
| |
|
| | print(f"optimized result saved in {save_path}")
|
| | return save_path
|
| |
|
| |
|
| | class HyperparameterManager:
|
| | def __init__(self, config = Config):
|
| | self.config = config
|
| | self.optimizers = {}
|
| | self.results = {}
|
| |
|
| | def register_optimizer(self, model_name: str, optimizer_type: str = 'optuna'):
|
| | if optimizer_type == 'optuna':
|
| | self.optimizers[model_name] = OptunaOptimizer(model_name, self.config)
|
| | else:
|
| | raise ValueError(f"Unsupported optimizer: {optimizer_type}")
|
| |
|
| | def optimize_all_models(self, X: np.ndarray, y: np.ndarray, n_trials: int = 50, cv_folds: int = 3) -> Dict[str, Any]:
|
| | learners = self.config.get_learners()
|
| | model_names = [learner['name'] for learner in learners]
|
| |
|
| | print(f"Starting hyperparameter optimization for {len(model_names)} models")
|
| | print(f"Model list: {model_names}")
|
| |
|
| | for model_name in model_names:
|
| | print(f"\n{'='*50}")
|
| | print(f"Optimizing model: {model_name.upper()}")
|
| | print(f"{'='*50}")
|
| |
|
| | self.register_optimizer(model_name)
|
| |
|
| | try:
|
| | result = self.optimizers[model_name].optimize(X, y, n_trials, cv_folds)
|
| | self.results[model_name] = result
|
| |
|
| | self.optimizers[model_name].save_results()
|
| |
|
| | except Exception as e:
|
| | print(f"Model {model_name} optimization failed: {str(e)}")
|
| | continue
|
| |
|
| | return self.results
|
| |
|
| | def update_config(self, config_path: str = 'Settings.py'):
|
| | if not self.results:
|
| | return
|
| |
|
| | with open(config_path, 'r', encoding = 'utf-8') as f:
|
| | config_content = f.read()
|
| | new_learners_config = self._generate_learners_config()
|
| |
|
| | backup_path = config_path.replace('.py', f'_backup_{datetime.datetime.today().strftime("%m%d-%H%M")}.py')
|
| | with open(backup_path, 'w', encoding = 'utf-8') as f:
|
| | f.write(config_content)
|
| | print(f"Original config backed up to: {backup_path}")
|
| |
|
| | updated_content = self._update_learners_in_config(config_content, new_learners_config)
|
| |
|
| | with open(config_path, 'w', encoding = 'utf-8') as f:
|
| | f.write(updated_content)
|
| |
|
| | print(f"Config file updated: {config_path}")
|
| |
|
| | def _generate_learners_config(self) -> str:
|
| | config_lines = [" @classmethod", " def get_learners(cls):", ' """获取配置好的学习器列表"""', " return ["]
|
| | learners = self.config.get_learners()
|
| | for learner in learners:
|
| | model_name = learner['name']
|
| | config_lines.append(f" {{")
|
| | config_lines.append(f" 'name': '{model_name}',")
|
| | config_lines.append(f" 'estimator': {learner['estimator'].__name__},")
|
| | config_lines.append(f" 'params': {{")
|
| |
|
| | if model_name in self.results:
|
| | params = self.results[model_name]['best_params']
|
| | print(f" Using optimized parameters for {model_name}")
|
| | else:
|
| | params = learner['params']
|
| | print(f" Keeping original parameters for {model_name}")
|
| |
|
| |
|
| | for key, value in params.items():
|
| | if isinstance(value, str):
|
| | config_lines.append(f' "{key}": "{value}",')
|
| | else:
|
| | config_lines.append(f' "{key}": {value},')
|
| |
|
| | config_lines.append(f" }},")
|
| | config_lines.append(f" }},")
|
| |
|
| | config_lines.append(" ]")
|
| |
|
| | return '\n'.join(config_lines)
|
| |
|
| | def _update_learners_in_config(self, content: str, new_config: str) -> str:
|
| | start_marker = "@classmethod\n def get_learners(cls):"
|
| | end_marker = " ]"
|
| |
|
| | start_idx = content.find(start_marker)
|
| | if start_idx == -1:
|
| | print("get_learners method not found, appending new config")
|
| | return content + "\n\n" + new_config
|
| |
|
| | temp_content = content[start_idx:]
|
| | end_idx = temp_content.find(end_marker)
|
| | if end_idx == -1:
|
| | print("get_learners method end position not found")
|
| | return content
|
| |
|
| | before = content[:start_idx]
|
| | after = content[start_idx + end_idx + len(end_marker):]
|
| |
|
| | return before + new_config + after
|
| |
|
| | def plot_optimization_history(self, save_path: str = None):
|
| | if not self.results:
|
| | print("No optimization results to plot")
|
| | return
|
| |
|
| | fig, axes = plt.subplots(2, 2, figsize = (15, 10))
|
| | axes = axes.flatten()
|
| |
|
| | for i, (model_name, result) in enumerate(self.results.items()):
|
| | if i >= 4:
|
| | break
|
| |
|
| | study = result['study']
|
| | trials = study.trials
|
| |
|
| | values = [trial.value for trial in trials if trial.value is not None]
|
| | axes[i].plot(values)
|
| | axes[i].set_title(f'{model_name.upper()} Optimization History')
|
| | axes[i].set_xlabel('Trial Number')
|
| | axes[i].set_ylabel('Pearson Correlation')
|
| | axes[i].grid(True)
|
| |
|
| | plt.tight_layout()
|
| |
|
| | if save_path:
|
| | plt.savefig(save_path, dpi = 300, bbox_inches = 'tight')
|
| | print(f"Optimization history plot saved to: {save_path}")
|
| | else:
|
| | plt.show()
|
| |
|
| |
|
| |
|
| | def quick_optimize_single_model(model_name: str, X: np.ndarray, y: np.ndarray, n_trials: int = 100) -> Dict[str, Any]:
|
| | optimizer = OptunaOptimizer(model_name)
|
| | result = optimizer.optimize(X, y, n_trials = n_trials)
|
| | optimizer.save_results()
|
| |
|
| | return result
|
| |
|
| |
|
| |
|
| | if __name__ == "__main__":
|
| |
|
| | np.random.seed(42)
|
| | X = np.random.randn(1000, 10)
|
| | y = X[:, 0] + 0.5 * X[:, 1] + np.random.randn(1000) * 0.1
|
| |
|
| |
|
| |
|
| | manager = HyperparameterManager()
|
| | results = manager.optimize_all_models(X, y, n_trials = 10)
|
| |
|
| |
|
| |
|
| |
|
| | history_path = os.path.join(Config.RESULTS_DIR, 'optimization_history.png')
|
| | manager.plot_optimization_history(history_path)
|
| |
|
| |
|