| | import sys |
| | import pandas as pd |
| | import numpy as np |
| | import torch |
| | import torch.nn as nn |
| | import torch.optim as optim |
| | from sklearn.model_selection import KFold |
| | from sklearn.preprocessing import RobustScaler |
| | from scipy.stats import pearsonr |
| | import warnings |
| | import pickle |
| | warnings.filterwarnings('ignore') |
| |
|
| | |
| | def feature_engineering(df): |
| | |
| | return df |
| |
|
| | |
| | class Config: |
| | TRAIN_PATH = "/AI4M/users/mjzhang/workspace/DRW/ZMJ/max_IC_mixed/train_aggregated.parquet" |
| | TEST_PATH = "/AI4M/users/mjzhang/workspace/DRW/ZMJ/max_IC_mixed/test_aggregated.parquet" |
| | SUBMISSION_PATH = "/AI4M/users/mjzhang/workspace/DRW/ZMJ/max_IC_mixed/sample_submission.csv" |
| | LABEL_COLUMN = "label" |
| | N_FOLDS = 5 |
| | RANDOM_STATE = 42 |
| | DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
| | BATCH_SIZE = 256 |
| | EPOCHS = 100 |
| | LEARNING_RATE = 5e-4 |
| | WEIGHT_DECAY = 1e-3 |
| | PATIENCE = 15 |
| |
|
| | |
| | class MLP(nn.Module): |
| | def __init__(self, input_dim): |
| | super().__init__() |
| | self.net = nn.Sequential( |
| | nn.Linear(input_dim, 256), |
| | |
| | nn.ReLU(), |
| | nn.Dropout(0.1), |
| | |
| | nn.Linear(256, 128), |
| | |
| | nn.ReLU(), |
| | nn.Dropout(0.1), |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | nn.Linear(128, 1) |
| | ) |
| | |
| | |
| | self.apply(self._init_weights) |
| | |
| | def _init_weights(self, module): |
| | if isinstance(module, nn.Linear): |
| | nn.init.xavier_uniform_(module.weight) |
| | if module.bias is not None: |
| | nn.init.zeros_(module.bias) |
| | |
| | def forward(self, x): |
| | return self.net(x) |
| |
|
| | |
| | def load_data(): |
| | train_df = pd.read_parquet(Config.TRAIN_PATH) |
| | test_df = pd.read_parquet(Config.TEST_PATH) |
| | submission_df = pd.read_csv(Config.SUBMISSION_PATH) |
| | Config.FEATURES = [col for col in train_df.columns.tolist() if col != Config.LABEL_COLUMN] |
| | print(f"Loaded data - Train: {train_df.shape}, Test: {test_df.shape}, Submission: {submission_df.shape}") |
| | print(f"Total features: {len(Config.FEATURES)}") |
| | return train_df.reset_index(drop=True), test_df.reset_index(drop=True), submission_df |
| |
|
| | |
| | def train_mlp(X_train, y_train, X_valid, y_valid, X_test, scaler): |
| | X_train = scaler.transform(X_train) |
| | X_valid = scaler.transform(X_valid) |
| | X_test = scaler.transform(X_test) |
| | X_train = torch.tensor(X_train, dtype=torch.float32, device=Config.DEVICE) |
| | y_train = torch.tensor(y_train.values, dtype=torch.float32, device=Config.DEVICE).view(-1, 1) |
| | X_valid = torch.tensor(X_valid, dtype=torch.float32, device=Config.DEVICE) |
| | y_valid = torch.tensor(y_valid.values, dtype=torch.float32, device=Config.DEVICE).view(-1, 1) |
| | X_test = torch.tensor(X_test, dtype=torch.float32, device=Config.DEVICE) |
| | |
| | model = MLP(X_train.shape[1]).to(Config.DEVICE) |
| | optimizer = optim.AdamW(model.parameters(), lr=Config.LEARNING_RATE, weight_decay=Config.WEIGHT_DECAY) |
| | criterion = nn.MSELoss() |
| | scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=Config.EPOCHS) |
| | |
| | best_model_state = None |
| | best_score = -np.inf |
| | patience_counter = 0 |
| | |
| | for epoch in range(Config.EPOCHS): |
| | |
| | model.train() |
| | idx = np.random.permutation(len(X_train)) |
| | total_loss = 0 |
| | num_batches = 0 |
| | |
| | for i in range(0, len(X_train), Config.BATCH_SIZE): |
| | batch_idx = idx[i:i+Config.BATCH_SIZE] |
| | xb = X_train[batch_idx] |
| | yb = y_train[batch_idx] |
| | optimizer.zero_grad() |
| | pred = model(xb) |
| | loss = criterion(pred, yb) |
| | loss.backward() |
| | optimizer.step() |
| | total_loss += loss.item() |
| | num_batches += 1 |
| | |
| | avg_loss = total_loss / num_batches |
| | scheduler.step() |
| | |
| | |
| | model.eval() |
| | with torch.no_grad(): |
| | val_pred = model(X_valid).cpu().numpy().flatten() |
| | val_score = np.corrcoef(val_pred, y_valid.cpu().numpy().flatten())[0, 1] |
| | |
| | if val_score > best_score: |
| | best_score = val_score |
| | best_model_state = model.state_dict().copy() |
| | patience_counter = 0 |
| | else: |
| | patience_counter += 1 |
| | |
| | |
| | if (epoch + 1) % 10 == 0: |
| | print(f" Epoch {epoch+1}/{Config.EPOCHS} - Loss: {avg_loss:.4f} - Val Corr: {val_score:.4f} - Best: {best_score:.4f}") |
| | |
| | |
| | if patience_counter >= Config.PATIENCE: |
| | print(f"Early stopping at epoch {epoch+1}, best valid corr: {best_score:.4f}") |
| | break |
| | |
| | |
| | model.load_state_dict(best_model_state) |
| | model.eval() |
| | |
| | |
| | with torch.no_grad(): |
| | valid_pred = model(X_valid).cpu().numpy().flatten() |
| | test_pred = model(X_test).cpu().numpy().flatten() |
| | |
| | return valid_pred, test_pred, best_score |
| |
|
| | def train_and_evaluate(train_df, test_df): |
| | """使用k折交叉验证训练MLP模型""" |
| | X_train = train_df[Config.FEATURES] |
| | y_train = train_df[Config.LABEL_COLUMN] |
| | X_test = test_df[Config.FEATURES] |
| | |
| | |
| | n_samples = len(train_df) |
| | oof_preds = np.zeros(n_samples) |
| | test_preds = np.zeros(len(test_df)) |
| | |
| | |
| | kf = KFold(n_splits=Config.N_FOLDS, shuffle=True, random_state=Config.RANDOM_STATE) |
| | |
| | fold_scores = [] |
| | best_fold_score = -np.inf |
| | best_fold_model = None |
| | best_fold_scaler = None |
| | best_model_state = None |
| | |
| | print(f"开始 {Config.N_FOLDS} 折交叉验证...") |
| | |
| | for fold, (train_idx, valid_idx) in enumerate(kf.split(train_df), 1): |
| | print(f"\n--- Fold {fold}/{Config.N_FOLDS} ---") |
| | |
| | |
| | X_fold_train = X_train.iloc[train_idx] |
| | y_fold_train = y_train.iloc[train_idx] |
| | X_fold_valid = X_train.iloc[valid_idx] |
| | y_fold_valid = y_train.iloc[valid_idx] |
| | |
| | |
| | scaler = RobustScaler() |
| | scaler.fit(X_fold_train) |
| | |
| | |
| | valid_pred, test_pred, fold_score = train_mlp( |
| | X_fold_train, y_fold_train, X_fold_valid, y_fold_valid, X_test, scaler |
| | ) |
| | |
| | |
| | oof_preds[valid_idx] = valid_pred |
| | test_preds += test_pred / Config.N_FOLDS |
| | |
| | fold_scores.append(fold_score) |
| | print(f"Fold {fold} 验证集相关系数: {fold_score:.4f}") |
| | |
| | |
| | if fold_score > best_fold_score: |
| | best_fold_score = fold_score |
| | best_fold_model = fold |
| | best_fold_scaler = scaler |
| | |
| | print(f" -> 新的最佳模型 (Fold {fold})") |
| | |
| | |
| | overall_score = pearsonr(y_train, oof_preds)[0] |
| | |
| | print(f"\n=== 交叉验证结果 ===") |
| | print(f"各折验证集相关系数: {[f'{score:.4f}' for score in fold_scores]}") |
| | print(f"平均验证集相关系数: {np.mean(fold_scores):.4f} ± {np.std(fold_scores):.4f}") |
| | print(f"最佳验证集相关系数: {best_fold_score:.4f} (Fold {best_fold_model})") |
| | print(f"整体OOF相关系数: {overall_score:.4f}") |
| | |
| | |
| | model_info = { |
| | 'best_fold': best_fold_model, |
| | 'best_score': best_fold_score, |
| | 'fold_scores': fold_scores, |
| | 'overall_score': overall_score, |
| | 'config': { |
| | 'n_folds': Config.N_FOLDS, |
| | 'epochs': Config.EPOCHS, |
| | 'learning_rate': Config.LEARNING_RATE, |
| | 'batch_size': Config.BATCH_SIZE |
| | } |
| | } |
| | |
| | with open('/AI4M/users/mjzhang/workspace/DRW/ZMJ/max_IC_mixed/mlp_best_model_info.pkl', 'wb') as f: |
| | pickle.dump(model_info, f) |
| | print(f"最佳模型信息已保存: mlp_best_model_info.pkl") |
| | |
| | return oof_preds, test_preds, fold_scores, best_fold_score |
| |
|
| | |
| | def create_submission(train_df, oof_preds, test_preds, submission_df, fold_scores, best_fold_score): |
| | overall_score = pearsonr(train_df[Config.LABEL_COLUMN], oof_preds)[0] |
| | print(f"\n=== MLP 最终结果 ===") |
| | print(f"各折验证集相关系数: {[f'{score:.4f}' for score in fold_scores]}") |
| | print(f"平均验证集相关系数: {np.mean(fold_scores):.4f} ± {np.std(fold_scores):.4f}") |
| | print(f"最佳验证集相关系数: {best_fold_score:.4f}") |
| | print(f"整体OOF相关系数: {overall_score:.4f}") |
| | |
| | submission = submission_df.copy() |
| | submission["prediction"] = test_preds |
| | submission.to_csv("/AI4M/users/mjzhang/workspace/DRW/ZMJ/max_IC_mixed/submission_mlp_cv.csv", index=False) |
| | print("Saved: submission_mlp_cv.csv") |
| | return overall_score |
| |
|
| | |
| | if __name__ == "__main__": |
| | print("Loading data...") |
| | train_df, test_df, submission_df = load_data() |
| | print("\nTraining MLP model...") |
| | oof_preds, test_preds, fold_scores, best_fold_score = train_and_evaluate(train_df, test_df) |
| | print("\nCreating submission...") |
| | score = create_submission(train_df, oof_preds, test_preds, submission_df, fold_scores, best_fold_score) |
| | print(f"\nAll done! MLP OOF PearsonR: {score:.4f}") |