Personal_Code / ZMJ /pipeline_xgb.py
ChanceuxMJ's picture
Upload folder using huggingface_hub
c687548 verified
import sys
import pandas as pd
import numpy as np
from sklearn.model_selection import KFold
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
from sklearn.linear_model import (
HuberRegressor, RANSACRegressor, TheilSenRegressor,
Lasso, ElasticNet, Ridge
)
from sklearn.cross_decomposition import PLSRegression
from sklearn.preprocessing import StandardScaler, RobustScaler
from sklearn.ensemble import RandomForestRegressor
from scipy.stats import pearsonr
import warnings
from sklearn.decomposition import PCA
warnings.filterwarnings('ignore')
# ===== Feature Engineering =====
def feature_engineering(df):
"""Original features plus new robust features"""
# Original features
df['volume_weighted_sell'] = df['sell_qty'] * df['volume']
df['buy_sell_ratio'] = df['buy_qty'] / (df['sell_qty'] + 1e-8)
df['selling_pressure'] = df['sell_qty'] / (df['volume'] + 1e-8)
df['effective_spread_proxy'] = np.abs(df['buy_qty'] - df['sell_qty']) / (df['volume'] + 1e-8)
# New robust features
df['log_volume'] = np.log1p(df['volume'])
df['bid_ask_imbalance'] = (df['bid_qty'] - df['ask_qty']) / (df['bid_qty'] + df['ask_qty'] + 1e-8)
df['order_flow_imbalance'] = (df['buy_qty'] - df['sell_qty']) / (df['buy_qty'] + df['sell_qty'] + 1e-8)
df['liquidity_ratio'] = (df['bid_qty'] + df['ask_qty']) / (df['volume'] + 1e-8)
# Handle infinities and NaN
df = df.replace([np.inf, -np.inf], np.nan)
# For each column, replace NaN with median for robustness
for col in df.columns:
if df[col].isna().any():
median_val = df[col].median()
df[col] = df[col].fillna(median_val if not pd.isna(median_val) else 0)
return df
# ===== Configuration =====
class Config:
ORIGIN_TRAIN_PATH = "/AI4M/users/mjzhang/workspace/DRW/data/train.parquet"
ORIGIN_TEST_PATH = "/AI4M/users/mjzhang/workspace/DRW/data/test.parquet"
TRAIN_PATH = "/AI4M/users/mjzhang/workspace/DRW/ZMJ/threshold_6_29/train_final.parquet"
TEST_PATH = "/AI4M/users/mjzhang/workspace/DRW/ZMJ/threshold_6_29/test_final.parquet"
SUBMISSION_PATH = "/AI4M/users/mjzhang/workspace/DRW/ZMJ/threshold_6_29/sample_submission.csv"
# Original features plus additional market features
FEATURES = [
"X863", "X856", "X598", "X862", "X385", "X852", "X603", "X860", "X674",
"X415", "X345", "X855", "X174", "X302", "X178", "X168", "X612",
"buy_qty", "sell_qty", "volume", "X888", "X421", "X333",
"bid_qty", "ask_qty"
]
MERGE = True
LABEL_COLUMN = "label"
N_FOLDS = 5
RANDOM_STATE = 42
# 新增PCA相关配置
USE_PCA = True # 是否使用PCA降维
PCA_N_COMPONENTS = 20 # 降到多少维
# ===== Model Parameters =====
# 只保留XGBoost参数
XGB_PARAMS = {
"tree_method": "hist",
"device": "gpu",
"colsample_bylevel": 0.4778,
"colsample_bynode": 0.3628,
"colsample_bytree": 0.7107,
"gamma": 1.7095,
"learning_rate": 0.02213,
"max_depth": 20,
"max_leaves": 12,
"min_child_weight": 16,
"n_estimators": 1667,
"subsample": 0.06567,
"reg_alpha": 39.3524,
"reg_lambda": 75.4484,
"verbosity": 0,
"random_state": Config.RANDOM_STATE,
"n_jobs": -1
}
# XGB_PARAMS = {
# "tree_method": "hist",
# "device": "gpu",
# "colsample_bylevel": 0.3,
# "colsample_bynode": 0.25,
# "colsample_bytree": 0.5,
# "gamma": 2.5,
# "learning_rate": 0.015,
# "max_depth": 12,
# "max_leaves": 8,
# "min_child_weight": 25,
# "n_estimators": 2000,
# "subsample": 0.7,
# "reg_alpha": 50,
# "reg_lambda": 100,
# "verbosity": 0,
# "random_state": Config.RANDOM_STATE,
# "n_jobs": -1
# }
# 只保留XGBoost
LEARNERS = [
{"name": "xgb_baseline", "Estimator": XGBRegressor, "params": XGB_PARAMS, "need_scale": False},
]
# ===== Data Loading =====
def create_time_decay_weights(n: int, decay: float = 0.9) -> np.ndarray:
"""Create time decay weights for more recent data importance"""
positions = np.arange(n)
normalized = positions / (n - 1)
weights = decay ** (1.0 - normalized)
return weights * n / weights.sum()
def merge_origin_and_df(origin_df, df, features):
"""
合并 origin_df 和 df,仅使用 origin_df 中指定的列,并保留一个 label 列。
参数:
origin_df (pd.DataFrame): 原始数据 DataFrame。
df (pd.DataFrame): 处理后的数据 DataFrame。
features (list): 需要从 origin_df 中提取的列名列表。
返回:
pd.DataFrame: 合并后的 DataFrame。
"""
# 确保两个 DataFrame 的索引一致
assert origin_df.index.equals(df.index), "两个 DataFrame 的索引必须一致"
# 筛选 origin_df 中的指定列
origin_selected = origin_df[features]
# # 删除 df 中的 label 列(避免重复)
# df_cleaned = df.drop(columns=['label'], errors='ignore')
# 横向合并 origin_df 的指定列 和 df 的其余列
merged_df = pd.concat([origin_selected, df], axis=1)
return merged_df
def load_data():
"""Load and preprocess data"""
origin_train_df = pd.read_parquet(Config.ORIGIN_TRAIN_PATH)
origin_test_df = pd.read_parquet(Config.ORIGIN_TEST_PATH)
train_df = pd.read_parquet(Config.TRAIN_PATH)
test_df = pd.read_parquet(Config.TEST_PATH)
submission_df = pd.read_csv(Config.SUBMISSION_PATH)
Config.AGGREGATE_FEATURES = [col for col in train_df.columns.tolist() if col != 'label']
if Config.MERGE == True:
# Apply feature engineering
origin_train_df = feature_engineering(origin_train_df)
origin_test_df = feature_engineering(origin_test_df)
# Update features list with engineered features
engineered_features = [
"volume_weighted_sell", "buy_sell_ratio", "selling_pressure",
"effective_spread_proxy", "log_volume", "bid_ask_imbalance",
"order_flow_imbalance", "liquidity_ratio"
]
Config.FEATURES = list(set(Config.FEATURES + engineered_features))
merged_train_df = merge_origin_and_df(origin_train_df, train_df, Config.FEATURES)
merged_test_df = merge_origin_and_df(origin_test_df, test_df, Config.FEATURES)
Config.FEATURES = [col for col in merged_train_df.columns.tolist() if col != 'label']
else:
Config.FEATURES = Config.AGGREGATE_FEATURES
merged_train_df = train_df
merged_test_df = test_df
# ====== 新增PCA降维功能 ======
if getattr(Config, 'USE_PCA', False):
print(f"Applying PCA to capture 95% variance...")
pca = PCA(n_components=0.95, random_state=Config.RANDOM_STATE)
X_train = merged_train_df[Config.FEATURES].values
X_test = merged_test_df[Config.FEATURES].values
X_train_pca = pca.fit_transform(X_train)
X_test_pca = pca.transform(X_test)
n_pca = X_train_pca.shape[1]
print(f"PCA reduced features to {n_pca} dimensions (95% variance)")
pca_feature_names = [f"PCA_{i}" for i in range(n_pca)]
merged_train_df = pd.DataFrame(X_train_pca, columns=pca_feature_names)
merged_test_df = pd.DataFrame(X_test_pca, columns=pca_feature_names)
# 保留label列
merged_train_df[Config.LABEL_COLUMN] = train_df[Config.LABEL_COLUMN].values
if Config.LABEL_COLUMN in merged_test_df.columns:
merged_test_df = merged_test_df.drop(columns=[Config.LABEL_COLUMN])
Config.FEATURES = pca_feature_names
print(f"Loaded data - Train: {merged_train_df.shape}, Test: {merged_test_df.shape}, Submission: {submission_df.shape}")
print(f"Total features: {len(Config.FEATURES)}")
return merged_train_df.reset_index(drop=True), merged_test_df.reset_index(drop=True), submission_df
# ===== Model Training =====
def get_model_slices(n_samples: int):
"""Define different data slices for training"""
return [
{"name": "full_data", "cutoff": 0},
{"name": "last_75pct", "cutoff": int(0.25 * n_samples)},
{"name": "last_50pct", "cutoff": int(0.50 * n_samples)},
]
def train_single_model(X_train, y_train, X_valid, y_valid, X_test, learner, sample_weights=None):
"""Train a single model with appropriate scaling if needed"""
if learner["need_scale"]:
scaler = RobustScaler() # More robust to outliers than StandardScaler
X_train_scaled = scaler.fit_transform(X_train)
X_valid_scaled = scaler.transform(X_valid)
X_test_scaled = scaler.transform(X_test)
else:
X_train_scaled = X_train
X_valid_scaled = X_valid
X_test_scaled = X_test
model = learner["Estimator"](**learner["params"])
# Handle different model training approaches
if learner["name"] in ["xgb_baseline"]:
model.fit(X_train_scaled, y_train, sample_weight=sample_weights,
eval_set=[(X_valid_scaled, y_valid)], verbose=False)
else:
# RANSAC, TheilSen, PLS don't support sample weights
model.fit(X_train_scaled, y_train)
valid_pred = model.predict(X_valid_scaled)
test_pred = model.predict(X_test_scaled)
return valid_pred, test_pred
def train_and_evaluate(train_df, test_df):
"""只训练XGBoost模型,交叉验证"""
n_samples = len(train_df)
model_slices = get_model_slices(n_samples)
# 初始化预测字典
oof_preds = {
"xgb_baseline": {s["name"]: np.zeros(n_samples) for s in model_slices}
}
test_preds = {
"xgb_baseline": {s["name"]: np.zeros(len(test_df)) for s in model_slices}
}
full_weights = create_time_decay_weights(n_samples)
kf = KFold(n_splits=Config.N_FOLDS, shuffle=False)
for fold, (train_idx, valid_idx) in enumerate(kf.split(train_df), start=1):
print(f"\n--- Fold {fold}/{Config.N_FOLDS} ---")
X_valid = train_df.iloc[valid_idx][Config.FEATURES]
y_valid = train_df.iloc[valid_idx][Config.LABEL_COLUMN]
X_test = test_df[Config.FEATURES]
for s in model_slices:
cutoff = s["cutoff"]
slice_name = s["name"]
subset = train_df.iloc[cutoff:].reset_index(drop=True)
rel_idx = train_idx[train_idx >= cutoff] - cutoff
if len(rel_idx) == 0:
continue
X_train = subset.iloc[rel_idx][Config.FEATURES]
y_train = subset.iloc[rel_idx][Config.LABEL_COLUMN]
sw = create_time_decay_weights(len(subset))[rel_idx] if cutoff > 0 else full_weights[train_idx]
print(f" Training slice: {slice_name}, samples: {len(X_train)}")
# 只训练XGBoost
learner = LEARNERS[0]
try:
valid_pred, test_pred = train_single_model(
X_train, y_train, X_valid, y_valid, X_test, learner, sw
)
# Store OOF predictions
mask = valid_idx >= cutoff
if mask.any():
idxs = valid_idx[mask]
oof_preds[learner["name"]][slice_name][idxs] = valid_pred[mask]
if cutoff > 0 and (~mask).any():
oof_preds[learner["name"]][slice_name][valid_idx[~mask]] = \
oof_preds[learner["name"]]["full_data"][valid_idx[~mask]]
test_preds[learner["name"]][slice_name] += test_pred
except Exception as e:
print(f" Error training {learner['name']}: {str(e)}")
continue
# Normalize test predictions
for slice_name in test_preds["xgb_baseline"]:
test_preds["xgb_baseline"][slice_name] /= Config.N_FOLDS
return oof_preds, test_preds, model_slices
# ===== Ensemble and Submission =====
def create_submissions(train_df, oof_preds, test_preds, submission_df):
"""只生成XGBoost提交文件"""
all_submissions = {}
# 只保留XGBoost
if "xgb_baseline" in oof_preds:
xgb_oof = np.mean(list(oof_preds["xgb_baseline"].values()), axis=0)
xgb_test = np.mean(list(test_preds["xgb_baseline"].values()), axis=0)
xgb_score = pearsonr(train_df[Config.LABEL_COLUMN], xgb_oof)[0]
print(f"\nXGBoost Baseline Score: {xgb_score:.4f}")
submission_xgb = submission_df.copy()
submission_xgb["prediction"] = xgb_test
submission_xgb.to_csv("/AI4M/users/mjzhang/workspace/DRW/ZMJ/threshold_6_30/submission_xgb_baseline_59_pca.csv", index=False)
all_submissions["xgb_baseline"] = xgb_score
print("\n" + "="*50)
print("SUBMISSION SUMMARY:")
print("="*50)
for name, score in sorted(all_submissions.items(), key=lambda x: x[1], reverse=True):
print(f"{name:25s}: {score:.4f}")
return all_submissions
# ===== Main Execution =====
if __name__ == "__main__":
print("Loading data...")
train_df, test_df, submission_df = load_data()
print("\nTraining models...")
oof_preds, test_preds, model_slices = train_and_evaluate(train_df, test_df)
print("\nCreating submissions...")
submission_scores = create_submissions(train_df, oof_preds, test_preds, submission_df)
print("\nAll submissions created successfully!")
print("Files created:")
print("- submission_xgb_baseline.csv (original baseline)")