File size: 29,536 Bytes
c687548
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
# -*- coding: utf-8 -*-
# @Time : 2025/7/4 19:53
# @Author : Lukax
# @Email : Lukarxiang@gmail.com
# @File : Utils.py
# -*- presentd: PyCharm -*-


import os
import torch
import random
import numpy as np
import pandas as pd
import torch.nn as nn
import torch.optim as optim
from Settings import Config
from itertools import product
from scipy.stats import pearsonr
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
from sklearn.linear_model import Ridge
from catboost import CatBoostRegressor
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error as MSE
from torch.utils.data import DataLoader, TensorDataset



class MLP(nn.Module):
    def __init__(self, layers = [128, 64], activation = 'relu', last_activation = None, dropout_rate = 0.6):
        super(MLP, self).__init__()
        self.activation = get_activation(activation)
        self.last_activation = get_activation(last_activation) # 单独设置一下最后一个线性层的激活函数,可能和之前的不同

        self.linears = nn.ModuleList()
        [self.linears.append(nn.Linear(layers[i], layers[i + 1])) for i in range(len(layers) - 1)]
        self.dropout = nn.Dropout(dropout_rate) # 跟在映射,激活的后边做 dropout

    def forward(self, x):
        for i in range(len(self.linears) - 1):
            x = self.activation(self.linears[i](x))
            x = self.dropout(x)
        x = self.linears[-1](x)
        if self.last_activation is not None:
            x = self.last_activation(x)
        return x


class CheckPointer:
    def __init__(self, path = None):
        if path is None:
            path = os.path.join(Config.RESULTS_DIR, 'best_model.pt')
        self.path = path
        self.best_pearson = -np.inf
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    def load(self, model):
        model.load_state_dict(torch.load(self.path, map_location = self.device))
        print(f'load model from {self.path} with Pearson: {self.best_pearson:.4f}')
        return model

    def __call__(self, pearson_coef, model):
        if pearson_coef > self.best_pearson:
            self.best_pearson = pearson_coef
            torch.save(model.state_dict(), self.path)
            print(f'save better model with Pearson:{self.best_pearson:.4f}')


def set_seed(seed = 23):
    random.seed(seed)
    np.random.seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.benchmark = False
    os.environ['PYTHONHASHSEED'] = str(seed)
    torch.backends.cudnn.deterministic = True


def get_activation(func):
    if func == None: return None
    func = func.lower()
    if func == 'relu': return nn.ReLU()
    elif func == 'tanh': return nn.Tanh()
    elif func == 'sigmoid': return nn.Sigmoid()
    else: raise ValueError(f'Unsupported activation function: {func}')


def get_model(model): # 用来检测异常值的简单轻量树模型
    if model == None:  return None
    model = model.lower()
    if model == 'rf': return RandomForestRegressor(n_estimators = 100, max_depth = 10, random_state = Config.RANDOM_STATE, n_jobs = -1)
    elif model == 'xgb': return XGBRegressor(n_estimators = 50, max_depth = 6, random_state = Config.RANDOM_STATE, verbosity = 0, n_jobs = -1)
    elif model == 'lgb': return LGBMRegressor(n_estimators = 50, max_depth = 6, random_state = Config.RANDOM_STATE, verbose = -1, n_jobs = -1)
    elif model == 'cat': return CatBoostRegressor(n_estimators = 50, max_depth = 6, random_state = Config.RANDOM_STATE, verbose = -1, allow_writing_files = False)
    else: raise ValueError(f'Unsupported model: {model}')


def get_time_decay_weights(n, k = 0.9):
    pos = np.arange(n)
    normalized = pos / (n - 1) if n > 1 else pos
    weights = k ** (1.0 - normalized)
    w = weights * n / weights.sum()
    return w


def detect_outlier_weights(X, y, sample_weights, outlier_fraction = 0.001, strategy = 'none', model = 'rf'):
    if strategy == 'none' or len(y) < 100:
        return sample_weights, np.zeros(len(y), dtype = bool)

    n_outlier = max(1, int(len(y) * outlier_fraction))
    model = get_model(model)
    model.fit(X, y, sample_weight = sample_weights)
    pred = model.predict(X)
    residuals = np.abs(y - pred)

    sorted_res = np.sort(residuals)
    residual_threshold = sorted_res[-n_outlier] if n_outlier <= len(y) else sorted_res[-1]
    outlier_mask = residuals >= residual_threshold

    # 判断阈值划分后有更多满足条件的记录,即等于划分阈值的记录存在多个
    if np.sum(outlier_mask) > n_outlier:
        outlier_idx = np.where(outlier_mask)[0] # outlier_mask 是一个 bool类型数组,np.where 检索其中为 True的位序,返回一个元组,元组第一个元素是 True值的对应索引,使用切片 [0]取出
        np.random_state(23)
        select_idx = np.random.choice(outlier_idx, n_outlier, replace = False)
        outlier_mask = np.zeros(len(y), dtype = bool)
        outlier_mask[select_idx] = True # 其实也可以制作一个 Series,然后 pandas排序后取前 n_outliers的 index后做同样操作

    adjusted_w = sample_weights.copy()
    if outlier_mask.any():
        if strategy == 'reduce':
            outlier_res = residuals[outlier_mask]
            min_res, max_res = outlier_res.min(), outlier_res.max()
            norm_res = (outlier_res - min_res) / (max_res - min_res) if max_res > min_res else np.ones_like(outlier_res)
            w_factors = 0.8 - 0.6 * norm_res
            adjusted_w[outlier_mask] *= w_factors

        elif strategy == 'remove': adjusted_w[outlier_mask] = 0
        elif strategy == 'double': adjusted_w[outlier_mask] *= 2.0
        print(f"    Strategy '{strategy}': Adjusted {n_outlier} outliers ({outlier_fraction*100:.1f}% of data)")

    return outlier_mask, adjusted_w


def get_slices_and_weights(n):
    base_slices = []
    for config in Config.SLICE_CONFIGS:
        slice = config.copy()
        slice['anchor'] = int(n * config['anchor_ratio']) if config['anchor_ratio'] > 0 else 0
        base_slices += [slice]

    adjusted_slices = []
    for bslice in base_slices:
        slice = bslice.copy()
        slice['name'] = f"{slice['name']}_adjust_outlier"
        slice['adjust_outlier'] = True
        adjusted_slices += [slice]

    weights = np.array(Config.SLICE_WEIGHTS)
    weights = weights / weights.sum()
    assert len(base_slices + adjusted_slices) == len(weights)

    return base_slices + adjusted_slices, weights


def analyze_outliers(train):
    X, y = train[Config.FEATURES].values, train[Config.TARGET].values
    sample_weights = get_time_decay_weights(len(train))
    outlier_mask, _ = detect_outlier_weights(X, y, sample_weights, outlier_fraction = Config.OUTLIER_FRACTION, strategy = 'remove') # 这里调用只是为了找出 outlier,无需计算权重用于建模,随便选一个简单的策略
    outlier_idx = np.where(outlier_mask)[0]
    n_outlier = len(outlier_idx)
    print(f"outlier detected: {n_outlier} ({n_outlier / len(train) * 100:.2f}%)")

    if n_outlier == 0: print('no outliers detected with current threshold. consider adjusting outlier_fraction value.')
    else: _ = analyze_outliers_statistical(train, y, outlier_mask, outlier_idx) # 对异常值进行统计性分析
    return outlier_idx


def analyze_outliers_statistical(train, y, outlier_mask, outlier_idx):
    # analyze outliers y
    normal_y, outlier_y = y[~outlier_mask], y[outlier_mask]
    print(f"Normal samples - Min {normal_y.min():.4f} Max {normal_y.max():.4f} Mean {normal_y.mean():.4f} Std {normal_y.std():4f}")
    print(f"outlier samples - Min {outlier_y.min():.4f} Max {outlier_y.max():.4f} Mean {outlier_y.mean():.4f} Std {outlier_y.std():4f}")

    # analyze outliers x, all features
    features = Config.FEATURES
    normal_features, outlier_features = train.iloc[~outlier_mask][features], train.iloc[outlier_idx][features]
    feature_diffs = []
    for feat in features:
        normal_mean, outlier_mean = normal_features[feat].mean(), outlier_features[feat].mean()
        if normal_mean != 0:
            relative_diff = abs(outlier_mean - normal_mean) / abs(normal_mean)
            feature_diffs += [(feat, relative_diff, outlier_mean, normal_mean)]

    feature_diffs.sort(key = lambda x: x[1], reverse = True)
    print(f"Top 10 most different features:")
    for feat, diff, _, __ in feature_diffs[:10]:
        print(f"  {feat}: {diff * 100:.1f}% difference")

    print(f"  Features with >50% difference: {sum(1 for t in feature_diffs if t[1] > 0.5)}")
    print(f"  Features with >100% difference: {sum(1 for t in feature_diffs if t[1] > 1.0)}")
    return feature_diffs


from sklearn.model_selection import KFold
import numpy as np

def train2compare_outlier_strategy(train, test, mode='single'):
    train = train.replace([np.inf, -np.inf], np.nan).dropna(subset=[Config.TARGET]).reset_index(drop=True)
    n = len(train)

    # 1. 初始化结果容器
    if mode == 'ensemble':
        strategy_res = {s: {'oof_scores': [], 'slice_scores': []}
                        for s in Config.OUTLIER_STRATEGIES}
    else:
        strategy_res = {
            f"{s}_{l['name']}": {'oof_scores': [], 'slice_scores': []}
            for s in Config.OUTLIER_STRATEGIES
            for l in Config.get_learners()
        }

    best_strategy, best_score = 'reduce', -np.inf
    best_oof_pred = best_test_pred = best_combination = None

    # 2. 统一的全量权重(后面按 slice 再切)
    base_weight = get_time_decay_weights(n)

    folds = KFold(n_splits=Config.N_FOLDS, shuffle=False)

    for strategy in Config.OUTLIER_STRATEGIES:
        print(f'Comparing {strategy.upper()}')
        slices, slice_weights = get_slices_and_weights(n)

        # 3. 初始化 oof / test 缓存(保持你原来的结构)
        oof_pred = {l['name']: {sl['name']: np.zeros(n) for sl in slices}
                    for l in Config.get_learners()}
        test_pred = {l['name']: {sl['name']: np.zeros(len(test)) for sl in slices}
                     for l in Config.get_learners()}

        for fold, (train_i, valid_i) in enumerate(folds.split(train), 1):
            print(f'Fold {fold}/{Config.N_FOLDS}')
            valid_x = train.iloc[valid_i][Config.FEATURES]
            valid_y = train.iloc[valid_i][Config.TARGET]

            for sl in slices:
                sl_name, anchor, after, adjust = (
                    sl['name'], sl['anchor'], sl['after'],
                    sl.get('adjust_outlier', False)
                )

                # 4. 生成当前 slice 的 DataFrame 和索引
                if after:
                    cut_df = train.iloc[anchor:].reset_index(drop=True)
                    idx_in_slice = train_i[(train_i >= anchor)] - anchor
                else:
                    cut_df = train.iloc[:anchor].reset_index(drop=True)
                    idx_in_slice = train_i[train_i < anchor]

                if len(idx_in_slice) == 0:
                    continue   # 空 slice 跳过

                # 5. 同步切片:X, y, weight 三个数组必须同长
                train_x = cut_df.iloc[idx_in_slice][Config.FEATURES]
                train_y = cut_df.iloc[idx_in_slice][Config.TARGET]
                weight  = base_weight[anchor:][idx_in_slice] if after else base_weight[:anchor][idx_in_slice]

                # 6. 异常值策略覆盖权重(返回的新权重同样长度)
                if adjust and len(train_y) > 100:
                    _, weight = detect_outlier_weights(
                        train_x.values, train_y.values, weight,
                        Config.OUTLIER_FRACTION, strategy)

                # 7. 训练 & 预测
                for learner in Config.get_learners():
                    model = learner['estimator'](**learner['params'])
                    print(learner['name'], type(model))
                    print(train_x.shape[0], len(train_y), len(weight))
                    print(type(train_x), train_x.dtypes.unique())
                    print(type(train_y), train_y.dtype)
                    print(type(weight), weight.dtype)
                    fit_kwargs = dict(
                        X=train_x,
                        y=train_y,
                        sample_weight=weight
                    )

                    # 只对 XGB / CatBoost 加 eval_set 和 verbose
                    if learner['name'] == 'xgb':
                        fit_kwargs.update(eval_set=[(valid_x, valid_y)], verbose=False)
                    elif learner['name'] == 'cat':
                        fit_kwargs.update(eval_set=[(valid_x, valid_y)], verbose=False)
                    elif learner['name'] == 'lgb':
                        fit_kwargs['eval_set'] = [(valid_x, valid_y)]   # LightGBM 不要 verbose
                    # RandomForest 什么都不加

                    model.fit(**fit_kwargs)
                    
                    # 8. oof / test 记录
                    if after:
                        mask = valid_i >= anchor
                        if mask.any():
                            idx = valid_i[mask]
                            oof_pred[learner['name']][sl_name][idx] = \
                                model.predict(train.iloc[idx][Config.FEATURES])
                        if anchor and (~mask).any():
                            fallback = 'full_adjust_outlier' if adjust else 'full'
                            oof_pred[learner['name']][sl_name][valid_i[~mask]] = \
                                oof_pred[learner['name']][fallback][valid_i[~mask]]
                    else:
                        oof_pred[learner['name']][sl_name][valid_i] = \
                            model.predict(train.iloc[valid_i][Config.FEATURES])

                    test_pred[learner['name']][sl_name] += \
                        model.predict(test[Config.FEATURES])

        # 9. 对 test 求均值
        for l_name in test_pred:
            for sl_name in test_pred[l_name]:
                test_pred[l_name][sl_name] /= Config.N_FOLDS

        # 10. 评分与最佳策略更新(保持你原来的逻辑)
        if mode == 'ensemble':
            ensemble_oof, ensemble_test = evaluate_ensemble_strategy(
                oof_pred, test_pred, train, strategy, strategy_res, slice_weights)
            if strategy_res[strategy]['ensemble_score'] > best_score:
                best_score = strategy_res[strategy]['ensemble_score']
                best_strategy, best_combination = strategy, f'Ensemble + {strategy}'
                best_oof_pred, best_test_pred = ensemble_oof, ensemble_test
        else:
            best_score, best_strategy, best_oof_pred, best_test_pred, best_combination = \
                evaluate_single_model_strategy(
                    oof_pred, test_pred, train, strategy, strategy_res, slice_weights,
                    best_score, best_strategy, best_oof_pred, best_test_pred, best_combination)

    return best_oof_pred, best_test_pred, strategy_res, best_strategy, best_combination

def evaluate_ensemble_strategy(oof_pred, test_pred, train, strategy, strategy_res, slice_weights, method = 'grid'):
    print('\nEvaluating ensemble strategy starting...')
    dic, model_oof_res, model_test_res, model_scores = {}, {}, {}, {}
    learner_names = [learner['name'] for learner in Config.get_learners()]
    
    for learner_name in learner_names:
        model_oof = pd.DataFrame(oof_pred[learner_name]).values @ slice_weights
        model_test = pd.DataFrame(test_pred[learner_name]).values @ slice_weights
        model_score = pearsonr(train[Config.TARGET], model_oof)[0]

        model_oof_res[learner_name], model_test_res[learner_name] = model_oof, model_test
        model_scores[learner_name] = model_score
        print(f"\t{learner_name} score: {model_score:.4f}")

    true = train[Config.TARGET].values
    model_oof_df, model_test_df = pd.DataFrame(model_oof_res)[learner_names], pd.DataFrame(model_test_res)[learner_names]
    
    if method == 'grid':
        print('\nTwo-stage grid search for model weights...')
        model_weights, ensemble_score, info = weightSearch_grid(model_oof_df, true)
    elif method == 'stacking':
        print('\nStacking Ridge fitting model weights...')
        model_weights, ensemble_weights, info = weightSearch_stacking(model_oof_df, true)
    else: raise ValueError(f'Unsupport model weight search method: {method}')
    dic['info'] = info

    ensemble_oof = model_oof_df.values @ pd.Series(model_weights)[learner_names].values
    ensemble_test = model_test_df.values @ pd.Series(model_weights)[learner_names].values
    final_score = pearsonr(true, ensemble_oof)[0]
    print(f"strategy {strategy} final result:\n\tmethod: {method}\n\tscore: {final_score:.4f}")

    dic['ensemble_score'], dic['oof_pred'], dic['test_pred'], dic['weight_method'] = final_score, ensemble_oof, ensemble_test, method
    dic['info'], dic['model_weights'], dic['model_scores'], dic['slice_weights'] = info, model_weights, model_scores, slice_weights
    strategy_res[strategy] = dic

    return ensemble_oof, ensemble_test


def weightSearch_grid(model_oof_df, true, stride1 = 0.1, stride2 = 0.025):
    model_names, n_models = model_oof_df.columns.tolist(), len(model_oof_df.columns)
    print('\nStage 1: Coarse search')
    ranges = [round(i * stride1, 1) for i in range(int(1 / stride1) + 1)]
    best_score, best_weights, search_times = -np.inf, None, 0
    
    for weights in product(ranges, repeat = n_models):
        if abs(sum(weights) - 1) > 1e-6: continue  # 权重和为1
        if all(w == 0 for w in weights): continue

        search_times += 1
        ensemble_pred = model_oof_df @ weights
        # score = pearsonr(true, ensemble_pred)[0]
        score = MSE(true, ensemble_pred)
        if score > best_score:
            best_score, best_weights = score, weights
        if search_times % 1000 == 0:
            print(f"  Tested {search_times} combinations, current best: {best_score:.4f}")
    
    print(f"Stage 1 completed: {best_score:.4f}")
    print(f"Best weights: {[f'{w:.1f}' for w in best_weights]}")


    print('Stage 2 starting...')
    fine_ranges = []
    for i in range(n_models):
        center = best_weights[i]
        min_val, max_val = max(0.0, center - stride2 * 2), min(1.0, center + stride2 * 2)  # 搜索范围 ±2*fine_step
        candidates, current = [], min_val
        while current <= max_val + 1e-6:  # 加小量避免浮点误差
            candidates += [round(current, 3)]
            current += stride2
        fine_ranges += [candidates]

    print("Fine search range:")
    for model_name, candidates in zip(model_names, fine_ranges):
        print(f"  {model_name}: {len(candidates)} candidates [{candidates[0]:.3f}, {candidates[-1]:.3f}]")

    best_fine_score, best_fine_weights, fine_times = best_score, list(best_weights), 0
    for weights_fine in product(*fine_ranges):
        weights_fine = np.array(weights_fine)
        weights_sum = sum(weights_fine)
        if weights_sum < 0.8 or weights_sum > 1.2: continue # 权重和太偏离1,跳过
        weights_fine = weights_fine / weights_sum  # 标准化
        fine_times += 1

        ensemble_pred_fine = model_oof_df @ weights_fine
        # score_fine = pearsonr(true, ensemble_pred_fine)[0]
        score_fine = MSE(true, ensemble_pred_fine)
        if score_fine > best_fine_score:
            best_fine_score, best_fine_weights = score_fine, weights_fine.tolist()
        if fine_times % 500 == 0:
            print(f"  Tested {fine_times} combinations, current best: {best_fine_score:.4f}")

    print(f"Fine search completed: {best_fine_score:.4f}")
    print(f"Performance improvement: {best_fine_score - best_score:.4f}")

    # 构建最终权重字典
    best_weights_dict = dict(zip(model_names, best_fine_weights))
    search_info = {"search_times": search_times, "fine_times": fine_times,
                   "final_score": best_fine_score, "improvement": best_fine_score - best_score}
    return best_weights_dict, best_fine_score, search_info


def weightSearch_stacking(model_oof_df, true):
    print('\nStacking weight search...')
    model_names, n_models = model_oof_df.columns.tolist(), len(model_oof_df.columns)
    meta_learner = Ridge(alpha = 1.0, random_state = Config.RANDOM_STATE)
    meta_learner.fit(model_oof_df, true)
    raw_weights = meta_learner.coef_
    weights = np.maximum(raw_weights, 0)  # 去除负权重
    weights = weights / weights.sum() if weights.sum() > 0 else np.ones(n_models) / n_models # 权重和为负数,使用均等权重;否则可以归一化

    ensemble_pred = model_oof_df @ weights
    ensemble_score = pearsonr(true, ensemble_pred)[0]

    cv_scores = cross_val_score(meta_learner, model_oof_df, true, cv = 3, scoring = 'neg_mean_squared_error')
    cv_std = cv_scores.std()
    
    print(f"Stacking result: {ensemble_score:.4f}")
    print(f"CV stability (std): {cv_std:.4f}")
    print(f"Model weights: {[f'{w:.3f}' for w in weights]}")

    weight_dict = dict(zip(model_names, weights))
    search_info = {"method": "stacking", "meta_learner": "Ridge", "cv_stability": cv_std, "ensemble_score": ensemble_score}
    
    return weight_dict, ensemble_score, search_info


def evaluate_single_model_strategy(oof_pred, test_pred, train, strategy, strategy_res, slice_weights,
                                   best_score, best_strategy, best_oof_pred, best_test_pred, best_combination):
    for learner in Config.get_learners():
        learner_name = learner['name']
        print(f"{strategy} single model: {learner_name}")
        key = f"{strategy}_{learner_name}"

        oof = pd.DataFrame(oof_pred[learner_name]).values @ slice_weights
        test = pd.DataFrame(test_pred[learner_name]).values @ slice_weights
        score = pearsonr(train[Config.TARGET], oof)[0]
        print(f"\t score: {score:.4f}")

        strategy_res[key]['ensemble_score'] = score
        strategy_res[key]['oof_pred'], strategy_res[key]['test_pred'] = oof, test
        if score > best_score:
            best_score, best_strategy = score, key
            best_oof_pred, best_test_pred, best_combination = oof, test, f"{learner_name.upper()} {strategy}"

    return best_score, best_strategy, best_oof_pred, best_test_pred, best_combination


def print_strategy_comparison(strategy_res, mode, best_combination):
    print(f"\nFINAL RESULTS - MODE: {mode.upper()}")
    if mode == 'ensemble':
        print("Ensemble Results:")
        for strategy in Config.OUTLIER_STRATEGIES:
            score = strategy_res[strategy]['ensemble_score']
            print(f"\t{strategy}: {score:.4f}")

            for model_name, model_score in strategy_res[strategy]['model_scores'].items():
                print(f"\t\t{model_name}: {model_score:.4f}")
    else:
        print("Single Results:")
        single_res = [(k, v['ensemble_score']) for k, v in strategy_res.items()]
        single_res.sort(key = lambda x: x[1], reverse = True)

        for combination, score in single_res[:10]: # Top 10
            print(f"\t{combination}: {score:.4f}")

    print(f"\nBest Combination: {best_combination}")
    return single_res if mode != 'ensemble' else None





def train_mlp_model(train, test, config = None):
    if config is None:
        config = Config.MLP_CONFIG
    
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    X_train_full = train[Config.MLP_FEATURES].values
    y_train_full = train[Config.TARGET].values
    X_train, X_val, y_train, y_val = train_test_split(X_train_full, y_train_full, test_size = 0.2, shuffle = False, random_state = Config.RANDOM_STATE)

    scaler = StandardScaler()
    X_train = scaler.fit_transform(X_train)
    X_val = scaler.transform(X_val)
    X_test = scaler.transform(test[Config.MLP_FEATURES].values)

    train_dataset = TensorDataset(torch.tensor(X_train, dtype = torch.float32), torch.tensor(y_train, dtype = torch.float32).unsqueeze(1))
    val_dataset = TensorDataset(torch.tensor(X_val, dtype = torch.float32), torch.tensor(y_val, dtype = torch.float32).unsqueeze(1))
    test_dataset = TensorDataset(torch.tensor(X_test, dtype = torch.float32))
    train_loader = DataLoader(train_dataset, batch_size = config['batch_size'], shuffle = True)
    val_loader = DataLoader(val_dataset, batch_size = config['batch_size'], shuffle = False)
    test_loader = DataLoader(test_dataset, batch_size = config['batch_size'], shuffle = False)

    model = MLP(layers = config['layers'], activation = config['activation'], last_activation = config['last_activation'], dropout_rate = config['dropout_rate']).to(device)
    criterion = nn.HuberLoss(delta = 5.0, reduction = 'mean')
    optimizer = optim.Adam(model.parameters(), lr = config['learning_rate'])
    checkpointer = CheckPointer(path = os.path.join(Config.RESULTS_DIR, 'best_mlp_model.pt'))

    print(f"Starting MLP model training, epochs: {config['epochs']}")
    best_val_score = -np.inf
    patience_counter = 0
    patience = config.get('patience', 10)
    
    for epoch in range(config['epochs']):
        model.train()
        running_loss = 0.0
        
        for inputs, targets in train_loader:
            inputs, targets = inputs.to(device), targets.to(device)
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()
            running_loss += loss.item()
        
        # 验证
        model.eval()
        val_preds, val_trues = [], []
        with torch.no_grad():
            for inputs, targets in val_loader:
                inputs, targets = inputs.to(device), targets.to(device)
                outputs = model(inputs)
                val_preds += [outputs.cpu().numpy()]
                val_trues += [targets.cpu().numpy()]
        
        val_preds = np.concatenate(val_preds).flatten()
        val_trues = np.concatenate(val_trues).flatten()
        val_score = pearsonr(val_preds, val_trues)[0]
        print(f"Epoch {epoch+1}/{config['epochs']}: Train Loss: {running_loss/len(train_loader):.4f}, Val Score: {val_score:.4f}")

        if val_score > best_val_score:
            best_val_score = val_score
            patience_counter = 0
            checkpointer(val_score, model)
        else: patience_counter += 1
            
        if patience_counter >= patience:
            print(f"Early stopping at epoch {epoch+1}")
            break
    
    # 加载最佳模型并预测
    model = checkpointer.load(model)
    model.eval()
    predictions = []
    with torch.no_grad():
        for inputs, in test_loader:
            inputs = inputs.to(device)
            outputs = model(inputs)
            predictions += [outputs.cpu().numpy()]
    
    predictions = np.concatenate(predictions).flatten()
    return predictions, best_val_score


def create_ensemble_submission(ml_predictions, mlp_predictions, submission, ml_weight = 0.8, mlp_weight = 0.2, strategy = 'ensemble'):
    if len(ml_predictions) != len(mlp_predictions):
        raise ValueError(f"预测长度不匹配: ML({len(ml_predictions)}) vs MLP({len(mlp_predictions)})")
    
    ensemble_pred = ml_weight * ml_predictions + mlp_weight * mlp_predictions
    submission_ensemble = submission.copy()
    submission_ensemble[Config.TARGET] = ensemble_pred

    ensemble_filename = f"submission_ensemble_{strategy}_{ml_weight:.1f}ml_{mlp_weight:.1f}mlp.csv"
    ensemble_filepath = os.path.join(Config.SUBMISSION_DIR, ensemble_filename)
    submission_ensemble.to_csv(ensemble_filepath, index = False)
    print(f"Ensemble submission file saved: {ensemble_filepath}")
    
    return ensemble_pred, ensemble_filepath


def save2csv(submission_, predictions, score, models = "ML"):
    submission = submission_.copy()
    submission[Config.TARGET] = predictions
    filename = f"submission_{models}_{score:.4f}.csv"
    filepath = os.path.join(Config.SUBMISSION_DIR, filename)
    submission.to_csv(filepath, index = False)
    print(f"{models} submission saved to {filepath}")
    return filepath


def create_multiple_submissions(train, ml_predictions, mlp_predictions, submission, best_strategy, ml_score, mlp_score):
    ml_filename = save2csv(submission, ml_predictions, ml_score, 'ML')
    mlp_filename = save2csv(submission, mlp_predictions, mlp_score, 'MLP')

    ensemble_configs = [
        (0.9, 0.1, "conservative"),  # 保守:主要依赖ML
        (0.7, 0.3, "balanced"),     # 平衡
        (0.5, 0.5, "equal"),        # 等权重
    ]
    
    ensemble_files = []
    for ml_w, mlp_w, desc in ensemble_configs:
        ensemble_pred, ensemble_file = create_ensemble_submission(ml_predictions, mlp_predictions, submission, ml_w, mlp_w, f"{best_strategy}_{desc}")
        ensemble_files += [ensemble_file]

    if ml_score > mlp_score:
        best_final_pred = ml_predictions
        best_filename = ml_filename
        best_type = "ML"
    else:
        best_final_pred = mlp_predictions  
        best_filename = mlp_filename
        best_type = "MLP"
    
    print(f"\nRecommended submission: {best_filename} ({best_type})")
    print(f"All generated files:")
    for ef in ensemble_files:
        print(f"  - {ef}")
    
    return best_final_pred, best_filename