Personal_Code / ZMJ /analyze.py
ChanceuxMJ's picture
Upload folder using huggingface_hub
c687548 verified
import pandas as pd
import numpy as np
from scipy.stats import pearsonr
import warnings
from concurrent.futures import ThreadPoolExecutor, as_completed
import time
warnings.filterwarnings('ignore')
# ===== Configuration =====
class Config:
# 数据路径配置
TRAIN_PATH = "/AI4M/users/mjzhang/workspace/DRW/data/train.parquet"
TEST_PATH = "/AI4M/users/mjzhang/workspace/DRW/data/test.parquet"
# 如果使用聚合后的数据
AGGREGATED_TRAIN_PATH = "/AI4M/users/mjzhang/workspace/DRW/ZMJ/max_IC_mixed/train_aggregated.parquet"
AGGREGATED_TEST_PATH = "/AI4M/users/mjzhang/workspace/DRW/ZMJ/max_IC_mixed/test_aggregated.parquet"
LABEL_COLUMN = "label"
# 性能配置
MAX_WORKERS = 4 # 并行计算的工作线程数
USE_AGGREGATED_DATA = True # 是否使用聚合后的数据
# 输出配置
OUTPUT_DIR = "./ic_analysis_results"
SAVE_DETAILED_RESULTS = True # 是否保存详细结果
def fast_ic_calculation(df, features, label_col, max_workers=4):
"""
快速计算特征IC值,支持并行计算
Parameters:
-----------
df : pd.DataFrame
数据框
features : list
特征列表
label_col : str
标签列名
max_workers : int
并行计算的工作线程数
Returns:
--------
ic_values : pd.Series
特征IC值
"""
print(f"开始计算特征IC值 (特征数量: {len(features)})")
start_time = time.time()
def calculate_ic(feature):
"""计算单个特征的IC值"""
try:
ic, p_value = pearsonr(df[feature], df[label_col])
return feature, ic, p_value
except Exception as e:
print(f"计算特征 {feature} 的IC值时出错: {e}")
return feature, 0.0, 1.0
# 并行计算IC值
ic_dict = {}
p_value_dict = {}
with ThreadPoolExecutor(max_workers=max_workers) as executor:
future_to_feature = {executor.submit(calculate_ic, feature): feature for feature in features}
completed = 0
for future in as_completed(future_to_feature):
feature, ic, p_value = future.result()
ic_dict[feature] = ic
p_value_dict[feature] = p_value
completed += 1
if completed % 50 == 0:
print(f"IC计算进度: {completed}/{len(features)} ({completed/len(features)*100:.1f}%)")
ic_values = pd.Series(ic_dict)
p_values = pd.Series(p_value_dict)
print(f"IC值计算耗时: {time.time() - start_time:.2f}秒")
return ic_values, p_values
def calculate_feature_statistics(df, features, label_col):
"""
计算特征的统计信息
Parameters:
-----------
df : pd.DataFrame
数据框
features : list
特征列表
label_col : str
标签列名
Returns:
--------
stats_df : pd.DataFrame
特征统计信息
"""
print("计算特征统计信息...")
stats_data = []
for feature in features:
try:
feature_data = df[feature]
label_data = df[label_col]
# 基本统计
mean_val = feature_data.mean()
std_val = feature_data.std()
min_val = feature_data.min()
max_val = feature_data.max()
# 缺失值统计
missing_count = feature_data.isna().sum()
missing_ratio = missing_count / len(feature_data)
# 零值统计
zero_count = (feature_data == 0).sum()
zero_ratio = zero_count / len(feature_data)
# 异常值统计(超过3个标准差)
outlier_count = ((feature_data - mean_val).abs() > 3 * std_val).sum()
outlier_ratio = outlier_count / len(feature_data)
stats_data.append({
'feature': feature,
'mean': mean_val,
'std': std_val,
'min': min_val,
'max': max_val,
'missing_count': missing_count,
'missing_ratio': missing_ratio,
'zero_count': zero_count,
'zero_ratio': zero_ratio,
'outlier_count': outlier_count,
'outlier_ratio': outlier_ratio
})
except Exception as e:
print(f"计算特征 {feature} 统计信息时出错: {e}")
stats_data.append({
'feature': feature,
'mean': np.nan,
'std': np.nan,
'min': np.nan,
'max': np.nan,
'missing_count': np.nan,
'missing_ratio': np.nan,
'zero_count': np.nan,
'zero_ratio': np.nan,
'outlier_count': np.nan,
'outlier_ratio': np.nan
})
return pd.DataFrame(stats_data)
def create_ic_analysis_report(ic_values, p_values, stats_df, output_dir):
"""
创建IC分析报告
Parameters:
-----------
ic_values : pd.Series
IC值
p_values : pd.Series
P值
stats_df : pd.DataFrame
统计信息
output_dir : str
输出目录
"""
print("创建IC分析报告...")
# 创建输出目录
import os
os.makedirs(output_dir, exist_ok=True)
# 1. 合并所有信息
report_df = pd.DataFrame({
'feature': ic_values.index,
'ic_value': ic_values.values,
'ic_abs': ic_values.abs().values,
'p_value': p_values.values,
'is_significant': p_values < 0.05
})
# 添加统计信息
report_df = report_df.merge(stats_df, on='feature', how='left')
# 2. 按IC绝对值排序
report_df = report_df.sort_values('ic_abs', ascending=False)
# 3. 添加排名
report_df['ic_rank'] = report_df['ic_abs'].rank(ascending=False, method='min')
# 4. 保存详细报告
if Config.SAVE_DETAILED_RESULTS:
detailed_path = os.path.join(output_dir, 'detailed_ic_analysis.csv')
report_df.to_csv(detailed_path, index=False)
print(f"详细IC分析报告已保存: {detailed_path}")
# 5. 保存简化报告(只包含重要信息)
simple_df = report_df[['feature', 'ic_value', 'ic_abs', 'ic_rank', 'p_value', 'is_significant']].copy()
simple_path = os.path.join(output_dir, 'ic_analysis_summary.csv')
simple_df.to_csv(simple_path, index=False)
print(f"IC分析摘要已保存: {simple_path}")
# 6. 保存统计信息
stats_path = os.path.join(output_dir, 'feature_statistics.csv')
stats_df.to_csv(stats_path, index=False)
print(f"特征统计信息已保存: {stats_path}")
# 7. 打印摘要信息
print("\n" + "="*60)
print("IC分析摘要")
print("="*60)
print(f"总特征数量: {len(ic_values)}")
print(f"平均IC值: {ic_values.mean():.4f}")
print(f"IC值标准差: {ic_values.std():.4f}")
print(f"最大IC值: {ic_values.max():.4f}")
print(f"最小IC值: {ic_values.min():.4f}")
print(f"显著特征数量 (p < 0.05): {(p_values < 0.05).sum()}")
print(f"正IC值特征数量: {(ic_values > 0).sum()}")
print(f"负IC值特征数量: {(ic_values < 0).sum()}")
print(f"\nTop 10 最高IC值特征:")
top_10 = report_df.head(10)
for _, row in top_10.iterrows():
significance = "***" if row['is_significant'] else ""
print(f" {row['ic_rank']:2.0f}. {row['feature']:20s} IC={row['ic_value']:6.4f} (p={row['p_value']:.4f}) {significance}")
print(f"\nBottom 10 最低IC值特征:")
bottom_10 = report_df.tail(10)
for _, row in bottom_10.iterrows():
significance = "***" if row['is_significant'] else ""
print(f" {row['ic_rank']:2.0f}. {row['feature']:20s} IC={row['ic_value']:6.4f} (p={row['p_value']:.4f}) {significance}")
return report_df
def main():
"""主函数"""
print("="*60)
print("开始IC值分析")
print("="*60)
# 1. 加载数据
print("\n1. 加载数据...")
if Config.USE_AGGREGATED_DATA:
try:
train_df = pd.read_parquet(Config.AGGREGATED_TRAIN_PATH)
print(f"使用聚合后的训练数据: {train_df.shape}")
except FileNotFoundError:
print("聚合数据文件不存在,使用原始数据...")
train_df = pd.read_parquet(Config.TRAIN_PATH)
print(f"使用原始训练数据: {train_df.shape}")
else:
train_df = pd.read_parquet(Config.TRAIN_PATH)
print(f"使用原始训练数据: {train_df.shape}")
# 2. 获取特征列表
print("\n2. 获取特征列表...")
features = [col for col in train_df.columns if col != Config.LABEL_COLUMN]
print(f"特征数量: {len(features)}")
# 3. 数据预处理
print("\n3. 数据预处理...")
# 处理缺失值
for col in features + [Config.LABEL_COLUMN]:
if train_df[col].isna().any():
median_val = train_df[col].median()
train_df[col] = train_df[col].fillna(median_val if not pd.isna(median_val) else 0)
# 处理无穷值
train_df = train_df.replace([np.inf, -np.inf], np.nan)
for col in features + [Config.LABEL_COLUMN]:
if train_df[col].isna().any():
median_val = train_df[col].median()
train_df[col] = train_df[col].fillna(median_val if not pd.isna(median_val) else 0)
print(f"预处理后数据形状: {train_df.shape}")
# 4. 计算IC值
print("\n4. 计算IC值...")
ic_values, p_values = fast_ic_calculation(train_df, features, Config.LABEL_COLUMN, Config.MAX_WORKERS)
# 5. 计算特征统计信息
print("\n5. 计算特征统计信息...")
stats_df = calculate_feature_statistics(train_df, features, Config.LABEL_COLUMN)
# 6. 创建分析报告
print("\n6. 创建分析报告...")
report_df = create_ic_analysis_report(ic_values, p_values, stats_df, Config.OUTPUT_DIR)
# 7. 保存原始IC值
print("\n7. 保存原始IC值...")
ic_df = pd.DataFrame({
'feature': ic_values.index,
'ic_value': ic_values.values,
'p_value': p_values.values
})
ic_path = f"{Config.OUTPUT_DIR}/ic_values.csv"
ic_df.to_csv(ic_path, index=False)
print(f"IC值已保存: {ic_path}")
print("\n" + "="*60)
print("IC值分析完成!")
print("="*60)
print(f"所有结果已保存到目录: {Config.OUTPUT_DIR}")
print("生成的文件:")
print("- ic_values.csv: 原始IC值")
print("- ic_analysis_summary.csv: IC分析摘要")
print("- detailed_ic_analysis.csv: 详细IC分析报告")
print("- feature_statistics.csv: 特征统计信息")
if __name__ == "__main__":
main()