Personal_Code / ZMJ /pipeline_mlp.py
ChanceuxMJ's picture
Upload folder using huggingface_hub
c687548 verified
import sys
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.model_selection import KFold
from sklearn.preprocessing import RobustScaler
from scipy.stats import pearsonr
import warnings
warnings.filterwarnings('ignore')
# ===== Feature Engineering =====
def feature_engineering(df):
# 保持接口一致,实际特征工程已在feature.py完成
return df
# ===== Configuration =====
class Config:
TRAIN_PATH = "/AI4M/users/mjzhang/workspace/DRW/ZMJ/max_IC_mixed/train_aggregated.parquet"
TEST_PATH = "/AI4M/users/mjzhang/workspace/DRW/ZMJ/max_IC_mixed/test_aggregated.parquet"
SUBMISSION_PATH = "/AI4M/users/mjzhang/workspace/DRW/ZMJ/max_IC_mixed/sample_submission.csv"
LABEL_COLUMN = "label"
N_FOLDS = 3
RANDOM_STATE = 42
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
BATCH_SIZE = 128
EPOCHS = 20
LEARNING_RATE = 1e-3
# ===== MLP Model Definition =====
class MLP(nn.Module):
def __init__(self, input_dim):
super().__init__()
self.net = nn.Sequential(
nn.Linear(input_dim, 256),
nn.ReLU(),
nn.Linear(256, 128),
nn.ReLU(),
# nn.Linear(1024, 512),
# nn.ReLU(),
# nn.Linear(512, 256),
# nn.ReLU(),
# nn.Linear(256, 128),
# nn.ReLU(),
# nn.Linear(256, 128),
# nn.ReLU(),
nn.Linear(128, 1)
)
def forward(self, x):
return self.net(x)
# ===== Data Loading =====
def load_data():
train_df = pd.read_parquet(Config.TRAIN_PATH)
test_df = pd.read_parquet(Config.TEST_PATH)
submission_df = pd.read_csv(Config.SUBMISSION_PATH)
Config.FEATURES = [col for col in train_df.columns.tolist() if col != Config.LABEL_COLUMN]
print(f"Loaded data - Train: {train_df.shape}, Test: {test_df.shape}, Submission: {submission_df.shape}")
print(f"Total features: {len(Config.FEATURES)}")
return train_df.reset_index(drop=True), test_df.reset_index(drop=True), submission_df
# ===== Model Training =====
def train_mlp(X_train, y_train, X_valid, y_valid, X_test, scaler):
X_train = scaler.transform(X_train)
X_valid = scaler.transform(X_valid)
X_test = scaler.transform(X_test)
X_train = torch.tensor(X_train, dtype=torch.float32, device=Config.DEVICE)
y_train = torch.tensor(y_train.values, dtype=torch.float32, device=Config.DEVICE).view(-1, 1)
X_valid = torch.tensor(X_valid, dtype=torch.float32, device=Config.DEVICE)
y_valid = torch.tensor(y_valid.values, dtype=torch.float32, device=Config.DEVICE).view(-1, 1)
X_test = torch.tensor(X_test, dtype=torch.float32, device=Config.DEVICE)
model = MLP(X_train.shape[1]).to(Config.DEVICE)
optimizer = optim.AdamW(model.parameters(), lr=Config.LEARNING_RATE, weight_decay=1e-4) # L2正则
criterion = nn.MSELoss()
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=Config.EPOCHS)
best_model = None
best_score = -np.inf
patience = 7 # 早停容忍轮数
patience_counter = 0
for epoch in range(Config.EPOCHS):
model.train()
idx = np.random.permutation(len(X_train))
for i in range(0, len(X_train), Config.BATCH_SIZE):
batch_idx = idx[i:i+Config.BATCH_SIZE]
xb = X_train[batch_idx]
yb = y_train[batch_idx]
optimizer.zero_grad()
pred = model(xb)
loss = criterion(pred, yb)
loss.backward()
optimizer.step()
scheduler.step()
# 验证
model.eval()
with torch.no_grad():
val_pred = model(X_valid).cpu().numpy().flatten()
val_score = np.corrcoef(val_pred, y_valid.cpu().numpy().flatten())[0, 1]
if val_score > best_score:
best_score = val_score
best_model = MLP(X_train.shape[1]).to(Config.DEVICE)
best_model.load_state_dict(model.state_dict())
patience_counter = 0
else:
patience_counter += 1
if patience_counter >= patience:
print(f"Early stopping at epoch {epoch+1}, best valid corr: {best_score:.4f}")
break
# 用最佳模型预测
best_model.eval()
with torch.no_grad():
valid_pred = best_model(X_valid).cpu().numpy().flatten()
test_pred = best_model(X_test).cpu().numpy().flatten()
return valid_pred, test_pred
def train_and_evaluate(train_df, test_df):
X_train = train_df[Config.FEATURES]
y_train = train_df[Config.LABEL_COLUMN]
X_test = test_df[Config.FEATURES]
scaler = RobustScaler().fit(X_train)
# 这里直接用全集训练
valid_pred, test_pred = train_mlp(X_train, y_train, X_train, y_train, X_test, scaler)
oof_preds = valid_pred # 全集预测
test_preds = test_pred
score = pearsonr(y_train, valid_pred)[0]
print(f"Train PearsonR (no CV): {score:.4f}")
return oof_preds, test_preds
# ===== Submission =====
def create_submission(train_df, oof_preds, test_preds, submission_df):
score = pearsonr(train_df[Config.LABEL_COLUMN], oof_preds)[0]
print(f"\nMLP OOF PearsonR: {score:.4f}")
submission = submission_df.copy()
submission["prediction"] = test_preds
submission.to_csv("/AI4M/users/mjzhang/workspace/DRW/ZMJ/max_IC_mixed/submission_mlp_new.csv", index=False)
print("Saved: submission_mlp.csv")
return score
# ===== Main Execution =====
if __name__ == "__main__":
print("Loading data...")
train_df, test_df, submission_df = load_data()
print("\nTraining MLP model...")
oof_preds, test_preds = train_and_evaluate(train_df, test_df)
print("\nCreating submission...")
score = create_submission(train_df, oof_preds, test_preds, submission_df)
print(f"\nAll done! MLP OOF PearsonR: {score:.4f}")