| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | import torch
|
| | import numpy as np
|
| | import pandas as pd
|
| | from Settings import Config
|
| | from torch.utils.data import DataLoader, TensorDataset
|
| |
|
| |
|
| | def getDataLoader(X, Y, hparams, device, shuffle = True):
|
| | X = torch.tensor(X, dtype = torch.float32, device = device)
|
| | if Y is None:
|
| | dataset = TensorDataset(X)
|
| | else:
|
| | Y = torch.tensor(Y.values if hasattr(Y, 'values') else Y,
|
| | dtype = torch.float32, device = device).unsqueeze(1)
|
| | dataset = TensorDataset(X, Y)
|
| |
|
| | dataloader = DataLoader(dataset, batch_size = hparams['batch_size'], shuffle = shuffle,
|
| | generator = torch.Generator().manual_seed(hparams['seed']))
|
| | return dataloader
|
| |
|
| | class Config:
|
| | TRAIN_PATH = "/AI4M/users/mjzhang/workspace/DRW/new_data/train.parquet"
|
| | TEST_PATH = "/AI4M/users/mjzhang/workspace/DRW/new_data/test.parquet"
|
| | SUBMISSION_PATH = "/AI4M/users/mjzhang/workspace/DRW/new_data/sample_submission.csv"
|
| |
|
| |
|
| | FEATURES = [
|
| | "X175", "X198", "X179", "X173", "X169", "X181", "X94",
|
| | "X197", "X137", "X133", "X163", "X196", "sell_qty",
|
| | "bid_qty", "ask_qty", "buy_qty", "volume"]
|
| | EX_FEATURES = [
|
| | 'X598', 'X385', 'X603', 'X674', 'X415', 'X345', 'X174',
|
| | 'X302', 'X178', 'X168', 'X612', 'X421', 'X333', 'X586', 'X292'
|
| | ]
|
| | TARGET = "label"
|
| | N_FOLDS = 3
|
| | RANDOM_STATE = 42
|
| |
|
| | def add_featrues1(df):
|
| |
|
| | df['bid_ask_interaction'] = df['bid_qty'] * df['ask_qty']
|
| | df['bid_buy_interaction'] = df['bid_qty'] * df['buy_qty']
|
| | df['bid_sell_interaction'] = df['bid_qty'] * df['sell_qty']
|
| | df['ask_buy_interaction'] = df['ask_qty'] * df['buy_qty']
|
| | df['ask_sell_interaction'] = df['ask_qty'] * df['sell_qty']
|
| |
|
| | df['volume_weighted_sell'] = df['sell_qty'] * df['volume']
|
| | df['buy_sell_ratio'] = df['buy_qty'] / (df['sell_qty'] + 1e-10)
|
| | df['selling_pressure'] = df['sell_qty'] / (df['volume'] + 1e-10)
|
| | df['log_volume'] = np.log1p(df['volume'])
|
| |
|
| | df['effective_spread_proxy'] = np.abs(df['buy_qty'] - df['sell_qty']) / (df['volume'] + 1e-10)
|
| | df['bid_ask_imbalance'] = (df['bid_qty'] - df['ask_qty']) / (df['bid_qty'] + df['ask_qty'] + 1e-10)
|
| | df['order_flow_imbalance'] = (df['buy_qty'] - df['sell_qty']) / (df['buy_qty'] + df['sell_qty'] + 1e-10)
|
| | df['liquidity_ratio'] = (df['bid_qty'] + df['ask_qty']) / (df['volume'] + 1e-10)
|
| |
|
| |
|
| |
|
| |
|
| | df['net_order_flow'] = df['buy_qty'] - df['sell_qty']
|
| | df['normalized_net_flow'] = df['net_order_flow'] / (df['volume'] + 1e-10)
|
| | df['buying_pressure'] = df['buy_qty'] / (df['volume'] + 1e-10)
|
| | df['volume_weighted_buy'] = df['buy_qty'] * df['volume']
|
| |
|
| |
|
| | df['total_depth'] = df['bid_qty'] + df['ask_qty']
|
| | df['depth_imbalance'] = (df['bid_qty'] - df['ask_qty']) / (df['total_depth'] + 1e-10)
|
| | df['relative_spread'] = np.abs(df['bid_qty'] - df['ask_qty']) / (df['total_depth'] + 1e-10)
|
| | df['log_depth'] = np.log1p(df['total_depth'])
|
| |
|
| |
|
| | df['kyle_lambda'] = np.abs(df['net_order_flow']) / (df['volume'] + 1e-10)
|
| | df['flow_toxicity'] = np.abs(df['order_flow_imbalance']) * df['volume']
|
| | df['aggressive_flow_ratio'] = (df['buy_qty'] + df['sell_qty']) / (df['total_depth'] + 1e-10)
|
| |
|
| |
|
| | df['volume_depth_ratio'] = df['volume'] / (df['total_depth'] + 1e-10)
|
| | df['activity_intensity'] = (df['buy_qty'] + df['sell_qty']) / (df['volume'] + 1e-10)
|
| | df['log_buy_qty'] = np.log1p(df['buy_qty'])
|
| | df['log_sell_qty'] = np.log1p(df['sell_qty'])
|
| | df['log_bid_qty'] = np.log1p(df['bid_qty'])
|
| | df['log_ask_qty'] = np.log1p(df['ask_qty'])
|
| |
|
| |
|
| | df['realized_spread_proxy'] = 2 * np.abs(df['net_order_flow']) / (df['volume'] + 1e-10)
|
| | df['price_impact_proxy'] = df['net_order_flow'] / (df['total_depth'] + 1e-10)
|
| | df['quote_volatility_proxy'] = np.abs(df['depth_imbalance'])
|
| |
|
| |
|
| | df['flow_depth_interaction'] = df['net_order_flow'] * df['total_depth']
|
| | df['imbalance_volume_interaction'] = df['order_flow_imbalance'] * df['volume']
|
| | df['depth_volume_interaction'] = df['total_depth'] * df['volume']
|
| | df['buy_sell_spread'] = np.abs(df['buy_qty'] - df['sell_qty'])
|
| | df['bid_ask_spread'] = np.abs(df['bid_qty'] - df['ask_qty'])
|
| |
|
| |
|
| | df['trade_informativeness'] = df['net_order_flow'] / (df['bid_qty'] + df['ask_qty'] + 1e-10)
|
| | df['execution_shortfall_proxy'] = df['buy_sell_spread'] / (df['volume'] + 1e-10)
|
| | df['adverse_selection_proxy'] = df['net_order_flow'] / (df['total_depth'] + 1e-10) * df['volume']
|
| |
|
| |
|
| | df['fill_probability'] = df['volume'] / (df['buy_qty'] + df['sell_qty'] + 1e-10)
|
| | df['execution_rate'] = (df['buy_qty'] + df['sell_qty']) / (df['total_depth'] + 1e-10)
|
| | df['market_efficiency'] = df['volume'] / (df['bid_ask_spread'] + 1e-10)
|
| |
|
| |
|
| | df['sqrt_volume'] = np.sqrt(df['volume'])
|
| | df['sqrt_depth'] = np.sqrt(df['total_depth'])
|
| | df['volume_squared'] = df['volume'] ** 2
|
| | df['imbalance_squared'] = df['order_flow_imbalance'] ** 2
|
| |
|
| |
|
| | df['bid_ratio'] = df['bid_qty'] / (df['total_depth'] + 1e-10)
|
| | df['ask_ratio'] = df['ask_qty'] / (df['total_depth'] + 1e-10)
|
| | df['buy_ratio'] = df['buy_qty'] / (df['buy_qty'] + df['sell_qty'] + 1e-10)
|
| | df['sell_ratio'] = df['sell_qty'] / (df['buy_qty'] + df['sell_qty'] + 1e-10)
|
| |
|
| |
|
| | df['liquidity_consumption'] = (df['buy_qty'] + df['sell_qty']) / (df['total_depth'] + 1e-10)
|
| | df['market_stress'] = df['volume'] / (df['total_depth'] + 1e-10) * np.abs(df['order_flow_imbalance'])
|
| | df['depth_depletion'] = df['volume'] / (df['bid_qty'] + df['ask_qty'] + 1e-10)
|
| |
|
| |
|
| | df['net_buying_ratio'] = df['net_order_flow'] / (df['volume'] + 1e-10)
|
| | df['directional_volume'] = df['net_order_flow'] * np.log1p(df['volume'])
|
| | df['signed_volume'] = np.sign(df['net_order_flow']) * df['volume']
|
| |
|
| |
|
| | df = df.replace([np.inf, -np.inf], 0).fillna(0)
|
| |
|
| | return df
|
| |
|
| |
|
| | def load_data():
|
| | features = list(set(Config.FEATURES + Config.MLP_FEATURES))
|
| | train = pd.read_parquet(Config.TRAIN_PATH, columns = features + [Config.TARGET])
|
| | train = train.dropna(subset=[Config.TARGET]).reset_index(drop=True)
|
| | assert not train[Config.TARGET].isna().any(), "label still has NaN"
|
| | test = pd.read_parquet(Config.TEST_PATH, columns = features)
|
| | submission = pd.read_csv(Config.SUBMISSION_PATH)
|
| | print(f'Origin: train {train.shape}, test {test.shape}, submission {submission.shape}')
|
| |
|
| | train, test = add_featrues1(train), add_featrues1(test)
|
| | Config.FEATURES = test.columns.tolist()
|
| |
|
| | return train.reset_index(drop = True), test.reset_index(drop = True), submission
|
| |
|
| |
|
| |
|