File size: 2,458 Bytes
c687548
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import optuna
import pandas as pd
import numpy as np
from xgboost import XGBRegressor
from sklearn.model_selection import KFold, cross_val_score
from scipy.stats import pearsonr

# 配置
class Config:
    TRAIN_PATH = "/AI4M/users/mjzhang/workspace/DRW/data/train.parquet"
    FEATURES = [
        "X863", "X856", "X598", "X862", "X385", "X852", "X603", "X860", "X674",
        "X345", "X855", "X302", "X178", "X168", "X612", "sell_qty", 
        "bid_qty", "ask_qty", "buy_qty", "volume"
    ]
    LABEL_COLUMN = "label"
    N_FOLDS = 3
    RANDOM_STATE = 42

def pearson_scorer(y_true, y_pred):
    return pearsonr(y_true, y_pred)[0]

def objective(trial):
    train_df = pd.read_parquet(Config.TRAIN_PATH, columns=Config.FEATURES + [Config.LABEL_COLUMN])
    X = train_df[Config.FEATURES]
    y = train_df[Config.LABEL_COLUMN]

    params = {
        "tree_method": "hist",
        "device": "gpu",
        "colsample_bylevel": trial.suggest_float("colsample_bylevel", 0.2, 1.0),
        "colsample_bynode": trial.suggest_float("colsample_bynode", 0.2, 1.0),
        "colsample_bytree": trial.suggest_float("colsample_bytree", 0.2, 1.0),
        "gamma": trial.suggest_float("gamma", 0, 5),
        "learning_rate": trial.suggest_float("learning_rate", 0.01, 0.05, log=True),
        "max_depth": trial.suggest_int("max_depth", 3, 24),
        "max_leaves": trial.suggest_int("max_leaves", 4, 32),
        "min_child_weight": trial.suggest_int("min_child_weight", 1, 32),
        "n_estimators": trial.suggest_int("n_estimators", 300, 2000),
        "subsample": trial.suggest_float("subsample", 0.05, 1.0),
        "reg_alpha": trial.suggest_float("reg_alpha", 0, 50),
        "reg_lambda": trial.suggest_float("reg_lambda", 0, 100),
        "verbosity": 0,
        "random_state": Config.RANDOM_STATE,
        "n_jobs": -1
    }

    model = XGBRegressor(**params)
    kf = KFold(n_splits=Config.N_FOLDS, shuffle=True, random_state=Config.RANDOM_STATE)
    scores = cross_val_score(model, X, y, cv=kf, scoring="r2", n_jobs=-1)
    mean_score = np.mean(scores)
    # 限制分数,防止过拟合
    if mean_score > 0.25:
        return 0  # 或者 return -1,或者 return 0
    return mean_score

if __name__ == "__main__":
    study = optuna.create_study(direction="maximize")
    study.optimize(objective, n_trials=15)  # 可根据算力调整n_trials
    print("最优参数:", study.best_params)
    print("最优得分:", study.best_value)