# -*- coding: utf-8 -*- # @Time : 2025/7/4 18:48 # @Author : Lukax # @Email : Lukarxiang@gmail.com # @File : Settings.py # -*- presentd: PyCharm -*- import os import torch from xgboost import XGBRegressor from lightgbm import LGBMRegressor from catboost import CatBoostRegressor from sklearn.ensemble import RandomForestRegressor class Config: ROOT_PATH = os.getcwd() DATA_DIR = os.path.join(ROOT_PATH, 'data') SUBMISSION_DIR = os.path.join(ROOT_PATH, 'submission') RESULTS_DIR = os.path.join(ROOT_PATH, 'results') os.makedirs(DATA_DIR, exist_ok=True) os.makedirs(SUBMISSION_DIR, exist_ok=True) os.makedirs(RESULTS_DIR, exist_ok=True) TRAIN_PATH = os.path.join(DATA_DIR, 'train.parquet') TEST_PATH = os.path.join(DATA_DIR, 'test.parquet') SUBMISSION_PATH = os.path.join(DATA_DIR, 'sample_submission.csv') FEATURES = [ "bid_qty", "ask_qty", "buy_qty", "sell_qty", "volume", "X598", "X385", "X603", "X674", "X415", "X345", "X174", "X302", "X178", "X168", "X612", "X421", "X333", "X586", "X292" ] MLP_FEATURES = [ "bid_qty", "ask_qty", "buy_qty", "sell_qty", "volume", "X344", "X598", "X385", "X603", "X674", "X415", "X345", "X137", "X174", "X302", "X178", "X532", "X168", "X612" ] TARGET = 'label' N_FOLDS = 5 RANDOM_STATE = 23 OUTLIER_FRACTION = 0.001 OUTLIER_STRATEGIES = ['reduce', 'remove', 'double', 'none'] ENSEMBLE_METHODS = ['grid', 'stacking'] GRID_SEARCH_STRIDE1 = 0.1 GRID_SEARCH_STRIDE2 = 0.025 SLICE_CONFIGS = [ {'name': 'full', 'anchor_ratio': 0, 'after': True, 'adjust_outlier': False}, {'name': 'recent_90', 'anchor_ratio': 0.1, 'after': True, 'adjust_outlier': False}, {'name': 'recent_85', 'anchor_ratio': 0.15, 'after': True, 'adjust_outlier': False}, {'name': 'recent_80', 'anchor_ratio': 0.2, 'after': True, 'adjust_outlier': False}, {'name': 'first_25', 'anchor_ratio': 0.25, 'after': False, 'adjust_outlier': False}, ] SLICE_WEIGHTS = [ 1.0, # full_data 1.0, # last_90pct 1.0, # last_85pct 1.0, # last_80pct 0.25, # oldest_25pct 0.9, # full_data_outlier_adj 0.9, # last_90pct_outlier_adj 0.9, # last_85pct_outlier_adj 0.9, # last_80pct_outlier_adj 0.2 # oldest_25pct_outlier_adj ] MLP_CONFIG = { 'layers': [len(MLP_FEATURES), 128, 64, 1], 'activation': 'relu', 'last_activation': None, 'dropout_rate': 0.6, 'learning_rate': 0.001, 'batch_size': 1024, 'epochs': 100, 'patience': 10 } @classmethod def get_learners(cls): return [ { 'name': 'xgb', 'estimator': XGBRegressor, 'params': { "tree_method": "hist", "device": "gpu" if torch.cuda.is_available() else "cpu", "colsample_bylevel": 0.4778, "colsample_bynode": 0.3628, "colsample_bytree": 0.7107, "gamma": 1.7095, "learning_rate": 0.02213, "max_depth": 20, "max_leaves": 12, "min_child_weight": 16, "n_estimators": 1667, "subsample": 0.06567, "reg_alpha": 39.3524, "reg_lambda": 75.4484, "verbosity": 0, "random_state": cls.RANDOM_STATE, "n_jobs": -1 }, }, { 'name': 'lgb', 'estimator': LGBMRegressor, 'params': { "objective": "regression", "metric": "rmse", "boosting_type": "gbdt", "num_leaves": 31, "learning_rate": 0.05, "feature_fraction": 0.9, "bagging_fraction": 0.8, "bagging_freq": 5, "verbose": -1, "random_state": cls.RANDOM_STATE, "n_estimators": 1000 }, }, { 'name': 'cat', 'estimator': CatBoostRegressor, 'params': { "iterations": 1000, "learning_rate": 0.03, "depth": 6, "l2_leaf_reg": 3, "random_seed": cls.RANDOM_STATE, "verbose": False, "allow_writing_files": False }, }, { 'name': 'rf', 'estimator': RandomForestRegressor, 'params': { "n_estimators": 200, "max_depth": 15, "min_samples_split": 5, "min_samples_leaf": 2, "random_state": cls.RANDOM_STATE, "n_jobs": -1 }, }, ] @property def LEARNERS(self): return self.get_learners() @classmethod def print_config_summary(cls): print("=" * 50) print(f"GBDT feature nums: {len(cls.FEATURES)}") print(f"MLP feature nums: {len(cls.MLP_FEATURES)}") print(f"n_cv: {cls.N_FOLDS}") print(f"outlier_fraction: {cls.OUTLIER_FRACTION}") print(f"outlier_strategies: {cls.OUTLIER_STRATEGIES}") print(f"learners: {[l['name'] for l in cls.get_learners()]}") print("=" * 50) Config = Config()