File size: 16,778 Bytes
c687548 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.feature_selection import SelectKBest, f_regression, RFE
from sklearn.ensemble import RandomForestRegressor
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from scipy.stats import pearsonr, spearmanr
import warnings
warnings.filterwarnings('ignore')
# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False
# ===== Configuration =====
class Config:
TRAIN_PATH = "/AI4M/users/mjzhang/workspace/DRW/new_data/train.parquet"
TEST_PATH = "/AI4M/users/mjzhang/workspace/DRW/new_data/test.parquet"
SUBMISSION_PATH = "/AI4M/users/mjzhang/workspace/DRW/new_data/sample_submission.csv"
LABEL_COLUMN = "label"
def load_data():
"""加载数据"""
print("正在加载数据...")
train_df = pd.read_parquet(Config.TRAIN_PATH, columns=Config.FEATURES + [Config.LABEL_COLUMN])
print(f"数据加载完成,训练集形状: {train_df.shape}")
return train_df
def feature_engineering(df):
"""特征工程(与原始代码保持一致)"""
# Original features
df['volume_weighted_sell'] = df['sell_qty'] * df['volume']
df['buy_sell_ratio'] = df['buy_qty'] / (df['sell_qty'] + 1e-8)
df['selling_pressure'] = df['sell_qty'] / (df['volume'] + 1e-8)
df['effective_spread_proxy'] = np.abs(df['buy_qty'] - df['sell_qty']) / (df['volume'] + 1e-8)
# New robust features
df['log_volume'] = np.log1p(df['volume'])
df['bid_ask_imbalance'] = (df['bid_qty'] - df['ask_qty']) / (df['bid_qty'] + df['ask_qty'] + 1e-8)
df['order_flow_imbalance'] = (df['buy_qty'] - df['sell_qty']) / (df['buy_qty'] + df['sell_qty'] + 1e-8)
df['liquidity_ratio'] = (df['bid_qty'] + df['ask_qty']) / (df['volume'] + 1e-8)
# Handle infinities and NaN
df = df.replace([np.inf, -np.inf], np.nan)
# For each column, replace NaN with median for robustness
for col in df.columns:
if df[col].isna().any():
median_val = df[col].median()
df[col] = df[col].fillna(median_val if not pd.isna(median_val) else 0)
return df
def analyze_feature_label_correlation(df, features, label_col):
"""分析特征与标签的相关性"""
print("\n=== 特征与标签相关性分析 ===")
correlations = []
for feature in features:
if feature in df.columns:
# Pearson相关系数
pearson_corr, pearson_p = pearsonr(df[feature], df[label_col])
# Spearman相关系数
spearman_corr, spearman_p = spearmanr(df[feature], df[label_col])
correlations.append({
'feature': feature,
'pearson_corr': pearson_corr,
'pearson_p': pearson_p,
'spearman_corr': spearman_corr,
'spearman_p': spearman_p,
'abs_pearson': abs(pearson_corr),
'abs_spearman': abs(spearman_corr)
})
# 转换为DataFrame并排序
corr_df = pd.DataFrame(correlations)
corr_df = corr_df.sort_values('abs_pearson', ascending=False)
print("\n特征与标签相关性排序(按Pearson相关系数绝对值):")
print("=" * 80)
print(f"{'特征名':<20} {'Pearson':<10} {'P值':<10} {'Spearman':<10} {'P值':<10}")
print("=" * 80)
for _, row in corr_df.iterrows():
print(f"{row['feature']:<20} {row['pearson_corr']:<10.4f} {row['pearson_p']:<10.4f} "
f"{row['spearman_corr']:<10.4f} {row['spearman_p']:<10.4f}")
return corr_df
def analyze_feature_correlations(df, features, top_n=10):
"""分析特征间的相关性"""
print(f"\n=== 特征间相关性分析(显示前{top_n}个最强相关) ===")
# 计算相关性矩阵
feature_df = df[features]
corr_matrix = feature_df.corr()
# 获取上三角矩阵的相关性
upper_tri = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(bool))
# 转换为长格式并排序
correlations = []
for i in range(len(features)):
for j in range(i+1, len(features)):
feat1, feat2 = features[i], features[j]
corr_val = corr_matrix.loc[feat1, feat2]
if not pd.isna(corr_val):
correlations.append({
'feature1': feat1,
'feature2': feat2,
'correlation': corr_val,
'abs_correlation': abs(corr_val)
})
corr_df = pd.DataFrame(correlations)
corr_df = corr_df.sort_values('abs_correlation', ascending=False)
print(f"\n特征间相关性排序(前{top_n}个):")
print("=" * 60)
print(f"{'特征1':<15} {'特征2':<15} {'相关性':<10} {'绝对值':<10}")
print("=" * 60)
for _, row in corr_df.head(top_n).iterrows():
print(f"{row['feature1']:<15} {row['feature2']:<15} {row['correlation']:<10.4f} {row['abs_correlation']:<10.4f}")
return corr_matrix, corr_df
def plot_correlation_heatmap(corr_matrix, title="特征相关性热力图", figsize=(12, 10)):
"""绘制相关性热力图"""
plt.figure(figsize=figsize)
mask = np.triu(np.ones_like(corr_matrix, dtype=bool))
sns.heatmap(corr_matrix, mask=mask, annot=True, cmap='RdBu_r', center=0,
square=True, linewidths=0.5, cbar_kws={"shrink": .8})
plt.title(title, fontsize=16, pad=20)
plt.tight_layout()
plt.savefig(f"{title.replace(' ', '_')}.png", dpi=300, bbox_inches='tight')
plt.show()
def plot_feature_label_correlation(corr_df, title="特征与标签相关性", figsize=(12, 8)):
"""绘制特征与标签相关性条形图"""
plt.figure(figsize=figsize)
# 选择前20个特征
top_features = corr_df.head(20)
x = range(len(top_features))
pearson_vals = top_features['pearson_corr'].values
spearman_vals = top_features['spearman_corr'].values
# 创建条形图
width = 0.35
plt.bar([i - width/2 for i in x], pearson_vals, width, label='Pearson', alpha=0.8)
plt.bar([i + width/2 for i in x], spearman_vals, width, label='Spearman', alpha=0.8)
plt.xlabel('特征')
plt.ylabel('相关系数')
plt.title(title)
plt.xticks(x, top_features['feature'], rotation=45, ha='right')
plt.legend()
plt.grid(True, alpha=0.3)
plt.tight_layout()
plt.savefig(f"{title.replace(' ', '_')}.png", dpi=300, bbox_inches='tight')
plt.show()
def analyze_feature_importance_by_correlation(corr_df, threshold=0.01):
"""基于相关性分析特征重要性"""
print(f"\n=== 基于相关性的特征重要性分析(P值阈值: {threshold}) ===")
# 筛选显著相关的特征
significant_features = corr_df[corr_df['pearson_p'] < threshold].copy()
significant_features = significant_features.sort_values('abs_pearson', ascending=False)
print(f"\n显著相关的特征数量: {len(significant_features)}")
print("\n高重要性特征(|相关系数| > 0.05):")
high_importance = significant_features[significant_features['abs_pearson'] > 0.05]
for _, row in high_importance.iterrows():
print(f"- {row['feature']}: Pearson={row['pearson_corr']:.4f}, P={row['pearson_p']:.4f}")
return significant_features
def find_high_correlations(corr_matrix, threshold=0.8):
high_corr_pairs = []
for i in range(len(corr_matrix.columns)):
for j in range(i + 1, len(corr_matrix.columns)):
if abs(corr_matrix.iloc[i, j]) > threshold:
high_corr_pairs += [{
'feature1': corr_matrix.columns[i],
'feature2': corr_matrix.columns[j],
'correlation': corr_matrix.iloc[i, j]
}]
return pd.DataFrame(high_corr_pairs)
def main():
"""主函数"""
print("开始特征相关性分析...")
# 加载数据
train = pd.read_parquet(Config.TRAIN_PATH)
anonymized_features = list(train.columns[list(range(5, train.shape[1] - 1))])
target = 'label'
fig, axes = plt.subplots(2, 3, figsize=(15, 10))
fig.suptitle('Distribution of Random 6 Anonymized Features', fontsize=16)
import random
for i, feature in enumerate(random.sample(list(anonymized_features), 6)):
row, col = i // 3, i % 3
train[feature].hist(bins=50, ax=axes[row, col], alpha=0.7)
axes[row, col].set_title(f'{feature}')
axes[row, col].set_xlabel('Value')
axes[row, col].set_ylabel('Frequency')
plt.tight_layout()
plt.show()
breakpoint()
# heatmap correlation between features
subset_features = anonymized_features[:50]
correlation_matrix = train[subset_features + [target]].corr()
# Plot correlation heatmap
plt.figure(figsize=(32, 20))
sns.heatmap(correlation_matrix, cmap='coolwarm', center=0,
square=True, fmt='.2f',annot=True, cbar_kws={'shrink': 0.8})
plt.title('Correlation Matrix - First 50 Anonymized Features + Target')
plt.tight_layout()
plt.show()
breakpoint()
high_corr_df = find_high_correlations(correlation_matrix, threshold=0.98)
print(f"High correlation pairs (|corr| > 0.98):")
print(high_corr_df.sort_values('correlation', key=abs, ascending=False).head(50))
# Correlation with target variable
target_correlations = train[anonymized_features + [target]].corr()[target].abs().sort_values(ascending=False)
print(f"\nTop 20 features most correlated with target:")
print(target_correlations.head(51)[1:]) # Exclude target itself
breakpoint()
from sklearn.decomposition import IncrementalPCA
X_numeric = train[anonymized_features].select_dtypes(include=[np.number]).iloc[:, :200]
# 2. Fill missing values with column median
X_clean = X_numeric.fillna(X_numeric.median())
# 3. Downcast to float32 to save memory
X_clean = X_clean.astype(np.float32)
# 4. Standardize features
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X_clean)
# 5. Perform Incremental PCA
print("Performing Incremental PCA...")
ipca = IncrementalPCA(n_components=50, batch_size=200)
X_pca = ipca.fit_transform(X_scaled)
# 6. Explained variance analysis
explained_variance_ratio = ipca.explained_variance_ratio_
cumsum_variance = np.cumsum(explained_variance_ratio)
n_components_90 = np.argmax(cumsum_variance >= 0.90) + 1
n_components_95 = np.argmax(cumsum_variance >= 0.95) + 1
print(f"Number of components needed for 90% variance: {n_components_90}")
print(f"Number of components needed for 95% variance: {n_components_95}")
breakpoint()
# t-SNE visualization (on a sample for computational efficiency)
print("Performing t-SNE on sample data...")
sample_size = min(5000, len(X_clean))
sample_indices = np.random.choice(len(X_clean), sample_size, replace=False)
X_sample = X_scaled[sample_indices]
y = train[target] # <-- Replace 'target' with your actual label column name
y_sample = y.iloc[sample_indices]
# Use PCA first to reduce dimensions before t-SNE
pca_pre = PCA(n_components=50)
X_pca_sample = pca_pre.fit_transform(X_sample)
tsne = TSNE(n_components=2, random_state=42, perplexity=30)
X_tsne = tsne.fit_transform(X_pca_sample)
breakpoint()
from sklearn.feature_selection import SelectKBest, f_regression
# 使用单变量的 f回归判断,其实就是 pearson排序
print("\n" + "="*50)
print("4. FEATURE SELECTION")
print("="*50)
# Univariate feature selection
print("Performing univariate feature selection...")
selector_univariate = SelectKBest(score_func=f_regression, k=100)
X_selected_univariate = selector_univariate.fit_transform(X_clean, y)
# Get feature scores
anonymized_features = X_clean.columns # <-- Make sure this matches the full feature list
feature_scores = pd.DataFrame({
'feature': anonymized_features,
'score': selector_univariate.scores_
}).sort_values('score', ascending=False)
print("Top 20 features by univariate selection:")
print(feature_scores.head(20))
breakpoint()
from sklearn.ensemble import RandomForestRegressor
print("\nTraining Random Forest for feature importance (with speed-up)...")
rf = RandomForestRegressor(
n_estimators=100, # Reduce trees from 100 → 50 for faster training
max_depth=10, # Limit depth to prevent very deep trees
max_features='sqrt', # Use sqrt of features at each split (default)
n_jobs=-1, # Use all CPU cores
random_state=42
)
y = train[target]
rf.fit(X_clean, y)
feature_importance = pd.DataFrame({
'feature': anonymized_features,
'importance': rf.feature_importances_
}).sort_values('importance', ascending=False)
print("Top 20 features by Random Forest importance:")
print(feature_importance.head(20))
breakpoint()
from sklearn.linear_model import Lasso, LassoCV
from sklearn.feature_selection import RFE
# lasso + rfe 相比于 f回归能考虑特征之间的相关性
# lasso alpha 0.01比较小,应该是设置越大,惩罚越大,回归系数压缩到 0的特征越多,但是如果没有,rfe也会迭代删除最弱的特征
# lasso 的系数排序不等于 rfe的排序。lasso有时对弱相关特征选择不稳定,rfe 更鲁棒。ref是反向过程,记录是最后被保留的排名,不是当前有多大的系数
print("\nPerforming Recursive Feature Elimination with Lasso...")
# Initialize Lasso with a small alpha (regularization strength) and enough iterations
lasso_cv = LassoCV(cv=5, max_iter=10000, random_state=42)
lasso_cv.fit(X_clean, y)
print(f"Best alpha via CV: {lasso_cv.alpha_}")
lasso = Lasso(alpha=lasso_cv.alpha_, max_iter=10000, random_state=42)
# RFE with step=50 (removes 50 features at a time) to speed up
rfe = RFE(estimator=lasso, n_features_to_select=100, step=50)
# Fit RFE on your data
rfe.fit(X_clean, y)
# Collect feature selection results
rfe_features = pd.DataFrame({
'feature': anonymized_features,
'selected': rfe.support_,
'ranking': rfe.ranking_
}).sort_values('ranking')
selected_features_rfe = rfe_features[rfe_features['selected']]['feature'].tolist()
print(f"RFE selected {len(selected_features_rfe)} features")
print("Top 20 RFE selected features:")
print(rfe_features.head(20))
breakpoint()
# K-means clustering on PCA-reduced data
print("Performing K-means clustering...")
n_clusters_range = range(2, 11)
inertias = []
for k in n_clusters_range:
kmeans = KMeans(n_clusters=k, random_state=42)
kmeans.fit(X_pca[:, :50]) # Use first 50 PCA components
inertias.append(kmeans.inertia_)
# Apply optimal clustering
optimal_k = 5 # You can adjust based on elbow curve
kmeans_final = KMeans(n_clusters=optimal_k, random_state=42)
clusters = kmeans_final.fit_predict(X_pca[:, :50])
# Analyze clusters
cluster_analysis = pd.DataFrame({
'cluster': clusters,
'target': y
})
print(f"\nCluster analysis with {optimal_k} clusters:")
cluster_stats = cluster_analysis.groupby('cluster')['target'].agg(['count', 'mean', 'std'])
print(cluster_stats)
print(f"1. Dataset contains {890} anonymized features")
print(f"2. {n_components_90} components explain 90% of variance, {n_components_95} explain 95%")
print(f"3. Top correlated feature with target: {target_correlations.index[1]} (corr: {target_correlations.iloc[1]:.4f})")
print(f"4. {len(high_corr_df)} feature pairs have high correlation (>0.7)")
print(f"5. Optimal number of clusters appears to be around {optimal_k}")
# Save important features for further analysis
important_features = {
'top_univariate': feature_scores.head(50)['feature'].tolist(),
'top_rf_importance': feature_importance.head(50)['feature'].tolist(),
'rfe_selected': selected_features_rfe,
'high_target_corr': target_correlations.head(50).index[1:].tolist()
}
common_features = set(important_features['top_univariate']) & \
set(important_features['top_rf_importance']) & \
set(important_features['rfe_selected'])
print(f" Common Features selected by all 3 methods: {(common_features)}")
if __name__ == "__main__":
main() |