| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| |
|
| | import os |
| | import torch |
| | import random |
| | import numpy as np |
| | import pandas as pd |
| | import torch.nn as nn |
| | import torch.optim as optim |
| | from Settings import Config |
| | from itertools import product |
| | from scipy.stats import pearsonr |
| | from xgboost import XGBRegressor |
| | from lightgbm import LGBMRegressor |
| | from sklearn.linear_model import Ridge |
| | from catboost import CatBoostRegressor |
| | from sklearn.model_selection import KFold |
| | from sklearn.preprocessing import StandardScaler |
| | from sklearn.ensemble import RandomForestRegressor |
| | from sklearn.model_selection import cross_val_score |
| | from sklearn.model_selection import train_test_split |
| | from sklearn.metrics import mean_squared_error as MSE |
| | from torch.utils.data import DataLoader, TensorDataset |
| |
|
| |
|
| |
|
| | class MLP(nn.Module): |
| | def __init__(self, layers = [128, 64], activation = 'relu', last_activation = None, dropout_rate = 0.6): |
| | super(MLP, self).__init__() |
| | self.activation = get_activation(activation) |
| | self.last_activation = get_activation(last_activation) |
| |
|
| | self.linears = nn.ModuleList() |
| | [self.linears.append(nn.Linear(layers[i], layers[i + 1])) for i in range(len(layers) - 1)] |
| | self.dropout = nn.Dropout(dropout_rate) |
| |
|
| | def forward(self, x): |
| | for i in range(len(self.linears) - 1): |
| | x = self.activation(self.linears[i](x)) |
| | x = self.dropout(x) |
| | x = self.linears[-1](x) |
| | if self.last_activation is not None: |
| | x = self.last_activation(x) |
| | return x |
| |
|
| |
|
| | class CheckPointer: |
| | def __init__(self, path = None): |
| | if path is None: |
| | path = os.path.join(Config.RESULTS_DIR, 'best_model.pt') |
| | self.path = path |
| | self.best_pearson = -np.inf |
| | self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
| |
|
| | def load(self, model): |
| | model.load_state_dict(torch.load(self.path, map_location = self.device)) |
| | print(f'load model from {self.path} with Pearson: {self.best_pearson:.4f}') |
| | return model |
| |
|
| | def __call__(self, pearson_coef, model): |
| | if pearson_coef > self.best_pearson: |
| | self.best_pearson = pearson_coef |
| | torch.save(model.state_dict(), self.path) |
| | print(f'save better model with Pearson:{self.best_pearson:.4f}') |
| |
|
| |
|
| | def set_seed(seed = 23): |
| | random.seed(seed) |
| | np.random.seed(seed) |
| | torch.cuda.manual_seed(seed) |
| | torch.cuda.manual_seed_all(seed) |
| | torch.backends.cudnn.benchmark = False |
| | os.environ['PYTHONHASHSEED'] = str(seed) |
| | torch.backends.cudnn.deterministic = True |
| |
|
| |
|
| | def get_activation(func): |
| | if func == None: return None |
| | func = func.lower() |
| | if func == 'relu': return nn.ReLU() |
| | elif func == 'tanh': return nn.Tanh() |
| | elif func == 'sigmoid': return nn.Sigmoid() |
| | else: raise ValueError(f'Unsupported activation function: {func}') |
| |
|
| |
|
| | def get_model(model): |
| | if model == None: return None |
| | model = model.lower() |
| | if model == 'rf': return RandomForestRegressor(n_estimators = 100, max_depth = 10, random_state = Config.RANDOM_STATE, n_jobs = -1) |
| | elif model == 'xgb': return XGBRegressor(n_estimators = 50, max_depth = 6, random_state = Config.RANDOM_STATE, verbosity = 0, n_jobs = -1) |
| | elif model == 'lgb': return LGBMRegressor(n_estimators = 50, max_depth = 6, random_state = Config.RANDOM_STATE, verbose = -1, n_jobs = -1) |
| | elif model == 'cat': return CatBoostRegressor(n_estimators = 50, max_depth = 6, random_state = Config.RANDOM_STATE, verbose = -1, allow_writing_files = False) |
| | else: raise ValueError(f'Unsupported model: {model}') |
| |
|
| |
|
| | def get_time_decay_weights(n, k = 0.9): |
| | pos = np.arange(n) |
| | normalized = pos / (n - 1) if n > 1 else pos |
| | weights = k ** (1.0 - normalized) |
| | w = weights * n / weights.sum() |
| | return w |
| |
|
| |
|
| | def detect_outlier_weights(X, y, sample_weights, outlier_fraction = 0.001, strategy = 'none', model = 'rf'): |
| | if strategy == 'none' or len(y) < 100: |
| | return sample_weights, np.zeros(len(y), dtype = bool) |
| |
|
| | n_outlier = max(1, int(len(y) * outlier_fraction)) |
| | model = get_model(model) |
| | model.fit(X, y, sample_weight = sample_weights) |
| | pred = model.predict(X) |
| | residuals = np.abs(y - pred) |
| |
|
| | sorted_res = np.sort(residuals) |
| | residual_threshold = sorted_res[-n_outlier] if n_outlier <= len(y) else sorted_res[-1] |
| | outlier_mask = residuals >= residual_threshold |
| |
|
| | |
| | if np.sum(outlier_mask) > n_outlier: |
| | outlier_idx = np.where(outlier_mask)[0] |
| | np.random_state(23) |
| | select_idx = np.random.choice(outlier_idx, n_outlier, replace = False) |
| | outlier_mask = np.zeros(len(y), dtype = bool) |
| | outlier_mask[select_idx] = True |
| |
|
| | adjusted_w = sample_weights.copy() |
| | if outlier_mask.any(): |
| | if strategy == 'reduce': |
| | outlier_res = residuals[outlier_mask] |
| | min_res, max_res = outlier_res.min(), outlier_res.max() |
| | norm_res = (outlier_res - min_res) / (max_res - min_res) if max_res > min_res else np.ones_like(outlier_res) |
| | w_factors = 0.8 - 0.6 * norm_res |
| | adjusted_w[outlier_mask] *= w_factors |
| |
|
| | elif strategy == 'remove': adjusted_w[outlier_mask] = 0 |
| | elif strategy == 'double': adjusted_w[outlier_mask] *= 2.0 |
| | print(f" Strategy '{strategy}': Adjusted {n_outlier} outliers ({outlier_fraction*100:.1f}% of data)") |
| |
|
| | return outlier_mask, adjusted_w |
| |
|
| |
|
| | def get_slices_and_weights(n): |
| | base_slices = [] |
| | for config in Config.SLICE_CONFIGS: |
| | slice = config.copy() |
| | slice['anchor'] = int(n * config['anchor_ratio']) if config['anchor_ratio'] > 0 else 0 |
| | base_slices += [slice] |
| |
|
| | adjusted_slices = [] |
| | for bslice in base_slices: |
| | slice = bslice.copy() |
| | slice['name'] = f"{slice['name']}_adjust_outlier" |
| | slice['adjust_outlier'] = True |
| | adjusted_slices += [slice] |
| |
|
| | weights = np.array(Config.SLICE_WEIGHTS) |
| | weights = weights / weights.sum() |
| | assert len(base_slices + adjusted_slices) == len(weights) |
| |
|
| | return base_slices + adjusted_slices, weights |
| |
|
| |
|
| | def analyze_outliers(train): |
| | X, y = train[Config.FEATURES].values, train[Config.TARGET].values |
| | sample_weights = get_time_decay_weights(len(train)) |
| | outlier_mask, _ = detect_outlier_weights(X, y, sample_weights, outlier_fraction = Config.OUTLIER_FRACTION, strategy = 'remove') |
| | outlier_idx = np.where(outlier_mask)[0] |
| | n_outlier = len(outlier_idx) |
| | print(f"outlier detected: {n_outlier} ({n_outlier / len(train) * 100:.2f}%)") |
| |
|
| | if n_outlier == 0: print('no outliers detected with current threshold. consider adjusting outlier_fraction value.') |
| | else: _ = analyze_outliers_statistical(train, y, outlier_mask, outlier_idx) |
| | return outlier_idx |
| |
|
| |
|
| | def analyze_outliers_statistical(train, y, outlier_mask, outlier_idx): |
| | |
| | normal_y, outlier_y = y[~outlier_mask], y[outlier_mask] |
| | print(f"Normal samples - Min {normal_y.min():.4f} Max {normal_y.max():.4f} Mean {normal_y.mean():.4f} Std {normal_y.std():4f}") |
| | print(f"outlier samples - Min {outlier_y.min():.4f} Max {outlier_y.max():.4f} Mean {outlier_y.mean():.4f} Std {outlier_y.std():4f}") |
| |
|
| | |
| | features = Config.FEATURES |
| | normal_features, outlier_features = train.iloc[~outlier_mask][features], train.iloc[outlier_idx][features] |
| | feature_diffs = [] |
| | for feat in features: |
| | normal_mean, outlier_mean = normal_features[feat].mean(), outlier_features[feat].mean() |
| | if normal_mean != 0: |
| | relative_diff = abs(outlier_mean - normal_mean) / abs(normal_mean) |
| | feature_diffs += [(feat, relative_diff, outlier_mean, normal_mean)] |
| |
|
| | feature_diffs.sort(key = lambda x: x[1], reverse = True) |
| | print(f"Top 10 most different features:") |
| | for feat, diff, _, __ in feature_diffs[:10]: |
| | print(f" {feat}: {diff * 100:.1f}% difference") |
| |
|
| | print(f" Features with >50% difference: {sum(1 for t in feature_diffs if t[1] > 0.5)}") |
| | print(f" Features with >100% difference: {sum(1 for t in feature_diffs if t[1] > 1.0)}") |
| | return feature_diffs |
| |
|
| |
|
| | from sklearn.model_selection import KFold |
| | import numpy as np |
| |
|
| | def train2compare_outlier_strategy(train, test, mode='single'): |
| | train = train.replace([np.inf, -np.inf], np.nan).dropna(subset=[Config.TARGET]).reset_index(drop=True) |
| | n = len(train) |
| |
|
| | |
| | if mode == 'ensemble': |
| | strategy_res = {s: {'oof_scores': [], 'slice_scores': []} |
| | for s in Config.OUTLIER_STRATEGIES} |
| | else: |
| | strategy_res = { |
| | f"{s}_{l['name']}": {'oof_scores': [], 'slice_scores': []} |
| | for s in Config.OUTLIER_STRATEGIES |
| | for l in Config.get_learners() |
| | } |
| |
|
| | best_strategy, best_score = 'reduce', -np.inf |
| | best_oof_pred = best_test_pred = best_combination = None |
| |
|
| | |
| | base_weight = get_time_decay_weights(n) |
| |
|
| | folds = KFold(n_splits=Config.N_FOLDS, shuffle=False) |
| |
|
| | for strategy in Config.OUTLIER_STRATEGIES: |
| | print(f'Comparing {strategy.upper()}') |
| | slices, slice_weights = get_slices_and_weights(n) |
| |
|
| | |
| | oof_pred = {l['name']: {sl['name']: np.zeros(n) for sl in slices} |
| | for l in Config.get_learners()} |
| | test_pred = {l['name']: {sl['name']: np.zeros(len(test)) for sl in slices} |
| | for l in Config.get_learners()} |
| |
|
| | for fold, (train_i, valid_i) in enumerate(folds.split(train), 1): |
| | print(f'Fold {fold}/{Config.N_FOLDS}') |
| | valid_x = train.iloc[valid_i][Config.FEATURES] |
| | valid_y = train.iloc[valid_i][Config.TARGET] |
| |
|
| | for sl in slices: |
| | sl_name, anchor, after, adjust = ( |
| | sl['name'], sl['anchor'], sl['after'], |
| | sl.get('adjust_outlier', False) |
| | ) |
| |
|
| | |
| | if after: |
| | cut_df = train.iloc[anchor:].reset_index(drop=True) |
| | idx_in_slice = train_i[(train_i >= anchor)] - anchor |
| | else: |
| | cut_df = train.iloc[:anchor].reset_index(drop=True) |
| | idx_in_slice = train_i[train_i < anchor] |
| |
|
| | if len(idx_in_slice) == 0: |
| | continue |
| |
|
| | |
| | train_x = cut_df.iloc[idx_in_slice][Config.FEATURES] |
| | train_y = cut_df.iloc[idx_in_slice][Config.TARGET] |
| | weight = base_weight[anchor:][idx_in_slice] if after else base_weight[:anchor][idx_in_slice] |
| |
|
| | |
| | if adjust and len(train_y) > 100: |
| | _, weight = detect_outlier_weights( |
| | train_x.values, train_y.values, weight, |
| | Config.OUTLIER_FRACTION, strategy) |
| |
|
| | |
| | for learner in Config.get_learners(): |
| | model = learner['estimator'](**learner['params']) |
| | print(learner['name'], type(model)) |
| | print(train_x.shape[0], len(train_y), len(weight)) |
| | print(type(train_x), train_x.dtypes.unique()) |
| | print(type(train_y), train_y.dtype) |
| | print(type(weight), weight.dtype) |
| | fit_kwargs = dict( |
| | X=train_x, |
| | y=train_y, |
| | sample_weight=weight |
| | ) |
| |
|
| | |
| | if learner['name'] == 'xgb': |
| | fit_kwargs.update(eval_set=[(valid_x, valid_y)], verbose=False) |
| | elif learner['name'] == 'cat': |
| | fit_kwargs.update(eval_set=[(valid_x, valid_y)], verbose=False) |
| | elif learner['name'] == 'lgb': |
| | fit_kwargs['eval_set'] = [(valid_x, valid_y)] |
| | |
| |
|
| | model.fit(**fit_kwargs) |
| | |
| | |
| | if after: |
| | mask = valid_i >= anchor |
| | if mask.any(): |
| | idx = valid_i[mask] |
| | oof_pred[learner['name']][sl_name][idx] = \ |
| | model.predict(train.iloc[idx][Config.FEATURES]) |
| | if anchor and (~mask).any(): |
| | fallback = 'full_adjust_outlier' if adjust else 'full' |
| | oof_pred[learner['name']][sl_name][valid_i[~mask]] = \ |
| | oof_pred[learner['name']][fallback][valid_i[~mask]] |
| | else: |
| | oof_pred[learner['name']][sl_name][valid_i] = \ |
| | model.predict(train.iloc[valid_i][Config.FEATURES]) |
| |
|
| | test_pred[learner['name']][sl_name] += \ |
| | model.predict(test[Config.FEATURES]) |
| |
|
| | |
| | for l_name in test_pred: |
| | for sl_name in test_pred[l_name]: |
| | test_pred[l_name][sl_name] /= Config.N_FOLDS |
| |
|
| | |
| | if mode == 'ensemble': |
| | ensemble_oof, ensemble_test = evaluate_ensemble_strategy( |
| | oof_pred, test_pred, train, strategy, strategy_res, slice_weights) |
| | if strategy_res[strategy]['ensemble_score'] > best_score: |
| | best_score = strategy_res[strategy]['ensemble_score'] |
| | best_strategy, best_combination = strategy, f'Ensemble + {strategy}' |
| | best_oof_pred, best_test_pred = ensemble_oof, ensemble_test |
| | else: |
| | best_score, best_strategy, best_oof_pred, best_test_pred, best_combination = \ |
| | evaluate_single_model_strategy( |
| | oof_pred, test_pred, train, strategy, strategy_res, slice_weights, |
| | best_score, best_strategy, best_oof_pred, best_test_pred, best_combination) |
| |
|
| | return best_oof_pred, best_test_pred, strategy_res, best_strategy, best_combination |
| |
|
| | def evaluate_ensemble_strategy(oof_pred, test_pred, train, strategy, strategy_res, slice_weights, method = 'grid'): |
| | print('\nEvaluating ensemble strategy starting...') |
| | dic, model_oof_res, model_test_res, model_scores = {}, {}, {}, {} |
| | learner_names = [learner['name'] for learner in Config.get_learners()] |
| | |
| | for learner_name in learner_names: |
| | model_oof = pd.DataFrame(oof_pred[learner_name]).values @ slice_weights |
| | model_test = pd.DataFrame(test_pred[learner_name]).values @ slice_weights |
| | model_score = pearsonr(train[Config.TARGET], model_oof)[0] |
| |
|
| | model_oof_res[learner_name], model_test_res[learner_name] = model_oof, model_test |
| | model_scores[learner_name] = model_score |
| | print(f"\t{learner_name} score: {model_score:.4f}") |
| |
|
| | true = train[Config.TARGET].values |
| | model_oof_df, model_test_df = pd.DataFrame(model_oof_res)[learner_names], pd.DataFrame(model_test_res)[learner_names] |
| | |
| | if method == 'grid': |
| | print('\nTwo-stage grid search for model weights...') |
| | model_weights, ensemble_score, info = weightSearch_grid(model_oof_df, true) |
| | elif method == 'stacking': |
| | print('\nStacking Ridge fitting model weights...') |
| | model_weights, ensemble_weights, info = weightSearch_stacking(model_oof_df, true) |
| | else: raise ValueError(f'Unsupport model weight search method: {method}') |
| | dic['info'] = info |
| |
|
| | ensemble_oof = model_oof_df.values @ pd.Series(model_weights)[learner_names].values |
| | ensemble_test = model_test_df.values @ pd.Series(model_weights)[learner_names].values |
| | final_score = pearsonr(true, ensemble_oof)[0] |
| | print(f"strategy {strategy} final result:\n\tmethod: {method}\n\tscore: {final_score:.4f}") |
| |
|
| | dic['ensemble_score'], dic['oof_pred'], dic['test_pred'], dic['weight_method'] = final_score, ensemble_oof, ensemble_test, method |
| | dic['info'], dic['model_weights'], dic['model_scores'], dic['slice_weights'] = info, model_weights, model_scores, slice_weights |
| | strategy_res[strategy] = dic |
| |
|
| | return ensemble_oof, ensemble_test |
| |
|
| |
|
| | def weightSearch_grid(model_oof_df, true, stride1 = 0.1, stride2 = 0.025): |
| | model_names, n_models = model_oof_df.columns.tolist(), len(model_oof_df.columns) |
| | print('\nStage 1: Coarse search') |
| | ranges = [round(i * stride1, 1) for i in range(int(1 / stride1) + 1)] |
| | best_score, best_weights, search_times = -np.inf, None, 0 |
| | |
| | for weights in product(ranges, repeat = n_models): |
| | if abs(sum(weights) - 1) > 1e-6: continue |
| | if all(w == 0 for w in weights): continue |
| |
|
| | search_times += 1 |
| | ensemble_pred = model_oof_df @ weights |
| | |
| | score = MSE(true, ensemble_pred) |
| | if score > best_score: |
| | best_score, best_weights = score, weights |
| | if search_times % 1000 == 0: |
| | print(f" Tested {search_times} combinations, current best: {best_score:.4f}") |
| | |
| | print(f"Stage 1 completed: {best_score:.4f}") |
| | print(f"Best weights: {[f'{w:.1f}' for w in best_weights]}") |
| |
|
| |
|
| | print('Stage 2 starting...') |
| | fine_ranges = [] |
| | for i in range(n_models): |
| | center = best_weights[i] |
| | min_val, max_val = max(0.0, center - stride2 * 2), min(1.0, center + stride2 * 2) |
| | candidates, current = [], min_val |
| | while current <= max_val + 1e-6: |
| | candidates += [round(current, 3)] |
| | current += stride2 |
| | fine_ranges += [candidates] |
| |
|
| | print("Fine search range:") |
| | for model_name, candidates in zip(model_names, fine_ranges): |
| | print(f" {model_name}: {len(candidates)} candidates [{candidates[0]:.3f}, {candidates[-1]:.3f}]") |
| |
|
| | best_fine_score, best_fine_weights, fine_times = best_score, list(best_weights), 0 |
| | for weights_fine in product(*fine_ranges): |
| | weights_fine = np.array(weights_fine) |
| | weights_sum = sum(weights_fine) |
| | if weights_sum < 0.8 or weights_sum > 1.2: continue |
| | weights_fine = weights_fine / weights_sum |
| | fine_times += 1 |
| |
|
| | ensemble_pred_fine = model_oof_df @ weights_fine |
| | |
| | score_fine = MSE(true, ensemble_pred_fine) |
| | if score_fine > best_fine_score: |
| | best_fine_score, best_fine_weights = score_fine, weights_fine.tolist() |
| | if fine_times % 500 == 0: |
| | print(f" Tested {fine_times} combinations, current best: {best_fine_score:.4f}") |
| |
|
| | print(f"Fine search completed: {best_fine_score:.4f}") |
| | print(f"Performance improvement: {best_fine_score - best_score:.4f}") |
| |
|
| | |
| | best_weights_dict = dict(zip(model_names, best_fine_weights)) |
| | search_info = {"search_times": search_times, "fine_times": fine_times, |
| | "final_score": best_fine_score, "improvement": best_fine_score - best_score} |
| | return best_weights_dict, best_fine_score, search_info |
| |
|
| |
|
| | def weightSearch_stacking(model_oof_df, true): |
| | print('\nStacking weight search...') |
| | model_names, n_models = model_oof_df.columns.tolist(), len(model_oof_df.columns) |
| | meta_learner = Ridge(alpha = 1.0, random_state = Config.RANDOM_STATE) |
| | meta_learner.fit(model_oof_df, true) |
| | raw_weights = meta_learner.coef_ |
| | weights = np.maximum(raw_weights, 0) |
| | weights = weights / weights.sum() if weights.sum() > 0 else np.ones(n_models) / n_models |
| |
|
| | ensemble_pred = model_oof_df @ weights |
| | ensemble_score = pearsonr(true, ensemble_pred)[0] |
| |
|
| | cv_scores = cross_val_score(meta_learner, model_oof_df, true, cv = 3, scoring = 'neg_mean_squared_error') |
| | cv_std = cv_scores.std() |
| | |
| | print(f"Stacking result: {ensemble_score:.4f}") |
| | print(f"CV stability (std): {cv_std:.4f}") |
| | print(f"Model weights: {[f'{w:.3f}' for w in weights]}") |
| |
|
| | weight_dict = dict(zip(model_names, weights)) |
| | search_info = {"method": "stacking", "meta_learner": "Ridge", "cv_stability": cv_std, "ensemble_score": ensemble_score} |
| | |
| | return weight_dict, ensemble_score, search_info |
| |
|
| |
|
| | def evaluate_single_model_strategy(oof_pred, test_pred, train, strategy, strategy_res, slice_weights, |
| | best_score, best_strategy, best_oof_pred, best_test_pred, best_combination): |
| | for learner in Config.get_learners(): |
| | learner_name = learner['name'] |
| | print(f"{strategy} single model: {learner_name}") |
| | key = f"{strategy}_{learner_name}" |
| |
|
| | oof = pd.DataFrame(oof_pred[learner_name]).values @ slice_weights |
| | test = pd.DataFrame(test_pred[learner_name]).values @ slice_weights |
| | score = pearsonr(train[Config.TARGET], oof)[0] |
| | print(f"\t score: {score:.4f}") |
| |
|
| | strategy_res[key]['ensemble_score'] = score |
| | strategy_res[key]['oof_pred'], strategy_res[key]['test_pred'] = oof, test |
| | if score > best_score: |
| | best_score, best_strategy = score, key |
| | best_oof_pred, best_test_pred, best_combination = oof, test, f"{learner_name.upper()} {strategy}" |
| |
|
| | return best_score, best_strategy, best_oof_pred, best_test_pred, best_combination |
| |
|
| |
|
| | def print_strategy_comparison(strategy_res, mode, best_combination): |
| | print(f"\nFINAL RESULTS - MODE: {mode.upper()}") |
| | if mode == 'ensemble': |
| | print("Ensemble Results:") |
| | for strategy in Config.OUTLIER_STRATEGIES: |
| | score = strategy_res[strategy]['ensemble_score'] |
| | print(f"\t{strategy}: {score:.4f}") |
| |
|
| | for model_name, model_score in strategy_res[strategy]['model_scores'].items(): |
| | print(f"\t\t{model_name}: {model_score:.4f}") |
| | else: |
| | print("Single Results:") |
| | single_res = [(k, v['ensemble_score']) for k, v in strategy_res.items()] |
| | single_res.sort(key = lambda x: x[1], reverse = True) |
| |
|
| | for combination, score in single_res[:10]: |
| | print(f"\t{combination}: {score:.4f}") |
| |
|
| | print(f"\nBest Combination: {best_combination}") |
| | return single_res if mode != 'ensemble' else None |
| |
|
| |
|
| |
|
| |
|
| |
|
| | def train_mlp_model(train, test, config = None): |
| | if config is None: |
| | config = Config.MLP_CONFIG |
| | |
| | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
| | X_train_full = train[Config.MLP_FEATURES].values |
| | y_train_full = train[Config.TARGET].values |
| | X_train, X_val, y_train, y_val = train_test_split(X_train_full, y_train_full, test_size = 0.2, shuffle = False, random_state = Config.RANDOM_STATE) |
| |
|
| | scaler = StandardScaler() |
| | X_train = scaler.fit_transform(X_train) |
| | X_val = scaler.transform(X_val) |
| | X_test = scaler.transform(test[Config.MLP_FEATURES].values) |
| |
|
| | train_dataset = TensorDataset(torch.tensor(X_train, dtype = torch.float32), torch.tensor(y_train, dtype = torch.float32).unsqueeze(1)) |
| | val_dataset = TensorDataset(torch.tensor(X_val, dtype = torch.float32), torch.tensor(y_val, dtype = torch.float32).unsqueeze(1)) |
| | test_dataset = TensorDataset(torch.tensor(X_test, dtype = torch.float32)) |
| | train_loader = DataLoader(train_dataset, batch_size = config['batch_size'], shuffle = True) |
| | val_loader = DataLoader(val_dataset, batch_size = config['batch_size'], shuffle = False) |
| | test_loader = DataLoader(test_dataset, batch_size = config['batch_size'], shuffle = False) |
| |
|
| | model = MLP(layers = config['layers'], activation = config['activation'], last_activation = config['last_activation'], dropout_rate = config['dropout_rate']).to(device) |
| | criterion = nn.HuberLoss(delta = 5.0, reduction = 'mean') |
| | optimizer = optim.Adam(model.parameters(), lr = config['learning_rate']) |
| | checkpointer = CheckPointer(path = os.path.join(Config.RESULTS_DIR, 'best_mlp_model.pt')) |
| |
|
| | print(f"Starting MLP model training, epochs: {config['epochs']}") |
| | best_val_score = -np.inf |
| | patience_counter = 0 |
| | patience = config.get('patience', 10) |
| | |
| | for epoch in range(config['epochs']): |
| | model.train() |
| | running_loss = 0.0 |
| | |
| | for inputs, targets in train_loader: |
| | inputs, targets = inputs.to(device), targets.to(device) |
| | optimizer.zero_grad() |
| | outputs = model(inputs) |
| | loss = criterion(outputs, targets) |
| | loss.backward() |
| | optimizer.step() |
| | running_loss += loss.item() |
| | |
| | |
| | model.eval() |
| | val_preds, val_trues = [], [] |
| | with torch.no_grad(): |
| | for inputs, targets in val_loader: |
| | inputs, targets = inputs.to(device), targets.to(device) |
| | outputs = model(inputs) |
| | val_preds += [outputs.cpu().numpy()] |
| | val_trues += [targets.cpu().numpy()] |
| | |
| | val_preds = np.concatenate(val_preds).flatten() |
| | val_trues = np.concatenate(val_trues).flatten() |
| | val_score = pearsonr(val_preds, val_trues)[0] |
| | print(f"Epoch {epoch+1}/{config['epochs']}: Train Loss: {running_loss/len(train_loader):.4f}, Val Score: {val_score:.4f}") |
| |
|
| | if val_score > best_val_score: |
| | best_val_score = val_score |
| | patience_counter = 0 |
| | checkpointer(val_score, model) |
| | else: patience_counter += 1 |
| | |
| | if patience_counter >= patience: |
| | print(f"Early stopping at epoch {epoch+1}") |
| | break |
| | |
| | |
| | model = checkpointer.load(model) |
| | model.eval() |
| | predictions = [] |
| | with torch.no_grad(): |
| | for inputs, in test_loader: |
| | inputs = inputs.to(device) |
| | outputs = model(inputs) |
| | predictions += [outputs.cpu().numpy()] |
| | |
| | predictions = np.concatenate(predictions).flatten() |
| | return predictions, best_val_score |
| |
|
| |
|
| | def create_ensemble_submission(ml_predictions, mlp_predictions, submission, ml_weight = 0.8, mlp_weight = 0.2, strategy = 'ensemble'): |
| | if len(ml_predictions) != len(mlp_predictions): |
| | raise ValueError(f"预测长度不匹配: ML({len(ml_predictions)}) vs MLP({len(mlp_predictions)})") |
| | |
| | ensemble_pred = ml_weight * ml_predictions + mlp_weight * mlp_predictions |
| | submission_ensemble = submission.copy() |
| | submission_ensemble[Config.TARGET] = ensemble_pred |
| |
|
| | ensemble_filename = f"submission_ensemble_{strategy}_{ml_weight:.1f}ml_{mlp_weight:.1f}mlp.csv" |
| | ensemble_filepath = os.path.join(Config.SUBMISSION_DIR, ensemble_filename) |
| | submission_ensemble.to_csv(ensemble_filepath, index = False) |
| | print(f"Ensemble submission file saved: {ensemble_filepath}") |
| | |
| | return ensemble_pred, ensemble_filepath |
| |
|
| |
|
| | def save2csv(submission_, predictions, score, models = "ML"): |
| | submission = submission_.copy() |
| | submission[Config.TARGET] = predictions |
| | filename = f"submission_{models}_{score:.4f}.csv" |
| | filepath = os.path.join(Config.SUBMISSION_DIR, filename) |
| | submission.to_csv(filepath, index = False) |
| | print(f"{models} submission saved to {filepath}") |
| | return filepath |
| |
|
| |
|
| | def create_multiple_submissions(train, ml_predictions, mlp_predictions, submission, best_strategy, ml_score, mlp_score): |
| | ml_filename = save2csv(submission, ml_predictions, ml_score, 'ML') |
| | mlp_filename = save2csv(submission, mlp_predictions, mlp_score, 'MLP') |
| |
|
| | ensemble_configs = [ |
| | (0.9, 0.1, "conservative"), |
| | (0.7, 0.3, "balanced"), |
| | (0.5, 0.5, "equal"), |
| | ] |
| | |
| | ensemble_files = [] |
| | for ml_w, mlp_w, desc in ensemble_configs: |
| | ensemble_pred, ensemble_file = create_ensemble_submission(ml_predictions, mlp_predictions, submission, ml_w, mlp_w, f"{best_strategy}_{desc}") |
| | ensemble_files += [ensemble_file] |
| |
|
| | if ml_score > mlp_score: |
| | best_final_pred = ml_predictions |
| | best_filename = ml_filename |
| | best_type = "ML" |
| | else: |
| | best_final_pred = mlp_predictions |
| | best_filename = mlp_filename |
| | best_type = "MLP" |
| | |
| | print(f"\nRecommended submission: {best_filename} ({best_type})") |
| | print(f"All generated files:") |
| | for ef in ensemble_files: |
| | print(f" - {ef}") |
| | |
| | return best_final_pred, best_filename |
| |
|
| |
|
| |
|
| |
|