ChanceuxMJ's picture
Upload folder using huggingface_hub
c687548 verified
import sys
import pandas as pd
import numpy as np
from sklearn.model_selection import KFold
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
from sklearn.linear_model import (
HuberRegressor, RANSACRegressor, TheilSenRegressor,
Lasso, ElasticNet, Ridge
)
from sklearn.cross_decomposition import PLSRegression
from sklearn.preprocessing import StandardScaler, RobustScaler
from sklearn.ensemble import RandomForestRegressor
from scipy.stats import pearsonr
import warnings
import torch
import matplotlib.pyplot as plt
import seaborn as sns
from concurrent.futures import ThreadPoolExecutor, as_completed
from itertools import combinations
import time
TRAIN_PATH = "/AI4M/users/mjzhang/workspace/DRW/new_data/train.parquet"
TEST_PATH = "/AI4M/users/mjzhang/workspace/DRW/new_data/test.parquet"
train_df = pd.read_parquet(TRAIN_PATH)
test_df = pd.read_parquet(TEST_PATH)
# ===== Feature Engineering =====
def feature_engineering(df):
"""Original features plus new robust features"""
# Original features
df['volume_weighted_sell'] = df['sell_qty'] * df['volume']
df['buy_sell_ratio'] = df['buy_qty'] / (df['sell_qty'] + 1e-8)
df['selling_pressure'] = df['sell_qty'] / (df['volume'] + 1e-8)
df['effective_spread_proxy'] = np.abs(df['buy_qty'] - df['sell_qty']) / (df['volume'] + 1e-8)
# New robust features
df['log_volume'] = np.log1p(df['volume'])
df['bid_ask_imbalance'] = (df['bid_qty'] - df['ask_qty']) / (df['bid_qty'] + df['ask_qty'] + 1e-8)
df['order_flow_imbalance'] = (df['buy_qty'] - df['sell_qty']) / (df['buy_qty'] + df['sell_qty'] + 1e-8)
df['liquidity_ratio'] = (df['bid_qty'] + df['ask_qty']) / (df['volume'] + 1e-8)
# Handle infinities and NaN
df = df.replace([np.inf, -np.inf], np.nan)
# For each column, replace NaN with median for robustness
for col in df.columns:
if df[col].isna().any():
median_val = df[col].median()
df[col] = df[col].fillna(median_val if not pd.isna(median_val) else 0)
return df
train_df = feature_engineering(train_df)
test_df = feature_engineering(test_df)
LABEL_COLUMN = 'label'
feature_cols = [col for col in train_df.columns if col != LABEL_COLUMN]
train_len = len(train_df)
df = pd.concat([train_df, test_df], axis=0)
X = train_df[feature_cols].values
y = train_df[LABEL_COLUMN].values
from sklearn.preprocessing import StandardScaler
import joblib
def clip_by_median_mad(df, n=3):
df_num = df.select_dtypes(include=[np.number])
median = df_num.median()
mad = (df_num - median).abs().median()
lower = median - n * mad
upper = median + n * mad
df_clipped = df_num.clip(lower=lower, upper=upper, axis=1)
# 如果原df有非数值型列,合并回来
for col in df.columns:
if col not in df_clipped.columns:
df_clipped[col] = df[col]
return df_clipped
all_features = feature_cols + [LABEL_COLUMN]
train_df[all_features] = clip_by_median_mad(train_df[all_features])
test_df[all_features] = clip_by_median_mad(test_df[all_features])
scaler = StandardScaler()
train_df[all_features] = scaler.fit_transform(train_df[all_features])
test_df[all_features] = scaler.transform(test_df[all_features])
joblib.dump(scaler, 'scaler.pkl')
train_df.to_pickle('train_df.pkl')
test_df.to_pickle('test_df.pkl')