Buckets:

matthewkenney's picture
download
raw
17.6 kB
{"cells":[{"cell_type":"code","source":["!git clone https://github.com/frinkleko/FinReport.git\n","%cd FinReport/src\n","!tar -xvf \"/content/FinReport/src/18-21_SRl&Factor&SDPG_CN_data.csv.tar.gz\"\n"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"ja_OLyiH0Gne","executionInfo":{"status":"ok","timestamp":1755329977820,"user_tz":-345,"elapsed":6271,"user":{"displayName":"Santosh Upreti","userId":"15542637101614503540"}},"outputId":"4f0b0c8b-90f2-4b00-c56e-1586a09422c2"},"execution_count":1,"outputs":[{"output_type":"stream","name":"stdout","text":["Cloning into 'FinReport'...\n","remote: Enumerating objects: 39, done.\u001b[K\n","remote: Counting objects: 100% (39/39), done.\u001b[K\n","remote: Compressing objects: 100% (34/34), done.\u001b[K\n","remote: Total 39 (delta 16), reused 20 (delta 5), pack-reused 0 (from 0)\u001b[K\n","Receiving objects: 100% (39/39), 32.40 MiB | 21.37 MiB/s, done.\n","Resolving deltas: 100% (16/16), done.\n","/content/FinReport/src\n","./18-21_SRl&Factor&SDPG_CN_data.csv\n"]}]},{"cell_type":"code","execution_count":6,"metadata":{"collapsed":true,"colab":{"base_uri":"https://localhost:8080/"},"id":"5JHR8nIQzxcU","executionInfo":{"status":"ok","timestamp":1755330111144,"user_tz":-345,"elapsed":3569,"user":{"displayName":"Santosh Upreti","userId":"15542637101614503540"}},"outputId":"aed8af90-f804-4425-9872-0568541c97da"},"outputs":[{"output_type":"stream","name":"stdout","text":["代码将在GPU上计算\n"]},{"output_type":"stream","name":"stderr","text":["Performing Single Factor Test: 0%| | 0/58 [00:00<?, ?it/s]/tmp/ipython-input-4179336436.py:68: FutureWarning: The behavior of DataFrame concatenation with empty or all-NA entries is deprecated. In a future version, this will no longer exclude empty or all-NA columns when determining the result dtypes. To retain the old behavior, exclude the relevant entries before the concat operation.\n"," result_df = pd.concat([result_df, pd.DataFrame({'Factor': [factor], 'Accuracy': [accuracy]})], ignore_index=True)\n","Performing Single Factor Test: 100%|██████████| 58/58 [00:01<00:00, 34.87it/s]"]},{"output_type":"stream","name":"stdout","text":["选出的优质因子:\n"," Factor Accuracy\n","6 pct_chg 1.000000\n","8 ('technical_indicators_overbought_oversold', '... 0.502849\n","9 ('technical_indicators_overbought_oversold', '... 0.473335\n","10 ('technical_indicators_overbought_oversold', 'J') 0.469599\n","11 ('technical_indicators_overbought_oversold', '... 0.453068\n","12 ('technical_indicators_momentum_reversal', 'CMO') 0.423461\n","13 ('technical_indicators_momentum_reversal', 'IMI') 0.420846\n","14 ('liquidity_factor', 'Absolute_Return_to_Volume') 0.420659\n","15 ('volatility_factor', 'Total_Volatility') 0.420006\n","16 ('technical_indicators_overbought_oversold', 'K') 0.418044\n","17 ('volatility_factor', 'Idiosyncratic_Volatility') 0.413094\n","18 ('liquidity_factor', 'Capitalization_Adjusted_... 0.410666\n","19 ('technical_indicators_trending', 'VMACD') 0.409545\n","20 ('volatility_factor', 'Maximum_Daily_Return') 0.407677\n","21 ('technical_indicators_volatility', 'CVI') 0.407397\n"]},{"output_type":"stream","name":"stderr","text":["\n"]}],"source":["import pandas as pd\n","import numpy as np\n","from sklearn.linear_model import LogisticRegression\n","from sklearn.metrics import accuracy_score\n","from tqdm import tqdm\n","import torch\n","import ast\n","\n","# 检查是否有可用的GPU,如果没有则回退到CPU\n","device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n","if device.type == \"cuda\":\n"," print(\"代码将在GPU上计算\")\n","else:\n"," print(\"代码将在CPU上计算\")\n","\n","# 加载数据文件\n","data = pd.read_csv('/content/FinReport/src/18-21_SRl&Factor&SDPG_CN_data.csv')\n","\n","# Select only the numeric columns for factors.\n","numeric_cols = data.select_dtypes(include=np.number).columns\n","# Exclude the 'label' column from the factor columns\n","factor_columns = numeric_cols.drop('label', errors='ignore')\n","\n","\n","# 创建一个DataFrame用于存储结果\n","result_df = pd.DataFrame(columns=['Factor', 'Accuracy'])\n","\n","# 将数据集划分为训练集和验证集\n","train_data = data[data['trade_date'].between('2018-07-01', '2020-12-31')]\n","test_data = data[data['trade_date'].between('2021-01-01', '2021-11-30')]\n","\n","train_features = train_data[factor_columns]\n","test_features = test_data[factor_columns]\n","train_labels = train_data['label'].astype(int)\n","test_labels = test_data['label'].astype(int)\n","\n","\n","# 迭代遍历每个因子并进行单因子检验\n","for factor_str in tqdm(factor_columns, desc='Performing Single Factor Test'):\n"," # Convert the string representation of the tuple back to a tuple\n"," try:\n"," factor = ast.literal_eval(factor_str)\n"," except (ValueError, SyntaxError):\n"," factor = factor_str\n","\n"," # Extract the training and test set features for the current factor\n"," if factor in train_features.columns and factor in test_features.columns:\n"," train_feature = train_features[[factor]]\n"," test_feature = test_features[[factor]]\n"," elif factor_str in train_features.columns and factor_str in test_features.columns:\n"," train_feature = train_features[[factor_str]]\n"," test_feature = test_features[[factor_str]]\n"," factor = factor_str\n"," else:\n"," print(f\"Warning: Factor '{factor_str}' not found in features. Skipping.\")\n"," continue\n","\n","\n"," # 创建并训练逻辑回归模型\n"," model = LogisticRegression()\n"," model.fit(train_feature, train_labels)\n","\n"," # 在测试集上进行预测并计算准确率\n"," predictions = model.predict(test_feature)\n"," accuracy = accuracy_score(test_labels, predictions)\n","\n"," # 将结果存储到DataFrame中\n"," result_df = pd.concat([result_df, pd.DataFrame({'Factor': [factor], 'Accuracy': [accuracy]})], ignore_index=True)\n","\n","# 按准确率降序排序,选择前n个因子作为优质因子(可以根据需求修改n的值)\n","n = 15\n","selected_factors = result_df.sort_values('Accuracy', ascending=False).head(n)\n","\n","# 将结果存储到新的文件\n","selected_factors.to_csv('18-21SelectedFactors.csv', index=False)\n","\n","# 显示结果\n","print(\"选出的优质因子:\")\n","print(selected_factors)"]},{"cell_type":"code","execution_count":7,"outputs":[{"output_type":"stream","name":"stderr","text":[]},{"output_type":"stream","name":"stdout","text":["Epoch 1/20\n","Train Loss: 0.8325, Train Acc: 0.6239\n","Val Loss: 0.7524, Val Acc: 0.6695\n","\n"]},{"output_type":"stream","name":"stderr","text":[]},{"output_type":"stream","name":"stdout","text":["Epoch 2/20\n","Train Loss: 0.7304, Train Acc: 0.6823\n","Val Loss: 0.7313, Val Acc: 0.6726\n","\n"]},{"output_type":"stream","name":"stderr","text":[]},{"output_type":"stream","name":"stdout","text":["Epoch 3/20\n","Train Loss: 0.6953, Train Acc: 0.7029\n","Val Loss: 0.7003, Val Acc: 0.6946\n","\n"]},{"output_type":"stream","name":"stderr","text":[]},{"output_type":"stream","name":"stdout","text":["Epoch 4/20\n","Train Loss: 0.6708, Train Acc: 0.7109\n","Val Loss: 0.6876, Val Acc: 0.7104\n","\n"]},{"output_type":"stream","name":"stderr","text":[]},{"output_type":"stream","name":"stdout","text":["Epoch 5/20\n","Train Loss: 0.6497, Train Acc: 0.7216\n","Val Loss: 0.6824, Val Acc: 0.7062\n","\n"]},{"output_type":"stream","name":"stderr","text":[]},{"output_type":"stream","name":"stdout","text":["Epoch 6/20\n","Train Loss: 0.6368, Train Acc: 0.7313\n","Val Loss: 0.6593, Val Acc: 0.7189\n","\n"]},{"output_type":"stream","name":"stderr","text":[]},{"output_type":"stream","name":"stdout","text":["Epoch 7/20\n","Train Loss: 0.6159, Train Acc: 0.7402\n","Val Loss: 0.6466, Val Acc: 0.7301\n","\n"]},{"output_type":"stream","name":"stderr","text":[]},{"output_type":"stream","name":"stdout","text":["Epoch 8/20\n","Train Loss: 0.6068, Train Acc: 0.7455\n","Val Loss: 0.6503, Val Acc: 0.7290\n","\n"]},{"output_type":"stream","name":"stderr","text":[]},{"output_type":"stream","name":"stdout","text":["Epoch 9/20\n","Train Loss: 0.5923, Train Acc: 0.7522\n","Val Loss: 0.6564, Val Acc: 0.7216\n","\n"]},{"output_type":"stream","name":"stderr","text":[]},{"output_type":"stream","name":"stdout","text":["Epoch 10/20\n","Train Loss: 0.5747, Train Acc: 0.7596\n","Val Loss: 0.6408, Val Acc: 0.7328\n","\n"]},{"output_type":"stream","name":"stderr","text":[]},{"output_type":"stream","name":"stdout","text":["Epoch 11/20\n","Train Loss: 0.5561, Train Acc: 0.7658\n","Val Loss: 0.6313, Val Acc: 0.7375\n","\n"]},{"output_type":"stream","name":"stderr","text":[]},{"output_type":"stream","name":"stdout","text":["Epoch 12/20\n","Train Loss: 0.5460, Train Acc: 0.7723\n","Val Loss: 0.6161, Val Acc: 0.7432\n","\n"]},{"output_type":"stream","name":"stderr","text":[]},{"output_type":"stream","name":"stdout","text":["Epoch 13/20\n","Train Loss: 0.5355, Train Acc: 0.7772\n","Val Loss: 0.6213, Val Acc: 0.7475\n","\n"]},{"output_type":"stream","name":"stderr","text":[]},{"output_type":"stream","name":"stdout","text":["Epoch 14/20\n","Train Loss: 0.5222, Train Acc: 0.7816\n","Val Loss: 0.6164, Val Acc: 0.7467\n","\n"]},{"output_type":"stream","name":"stderr","text":[]},{"output_type":"stream","name":"stdout","text":["Epoch 15/20\n","Train Loss: 0.5077, Train Acc: 0.7901\n","Val Loss: 0.6283, Val Acc: 0.7463\n","\n"]},{"output_type":"stream","name":"stderr","text":[]},{"output_type":"stream","name":"stdout","text":["Epoch 16/20\n","Train Loss: 0.5057, Train Acc: 0.7918\n","Val Loss: 0.6028, Val Acc: 0.7560\n","\n"]},{"output_type":"stream","name":"stderr","text":[]},{"output_type":"stream","name":"stdout","text":["Epoch 17/20\n","Train Loss: 0.4899, Train Acc: 0.7959\n","Val Loss: 0.6011, Val Acc: 0.7537\n","\n"]},{"output_type":"stream","name":"stderr","text":[]},{"output_type":"stream","name":"stdout","text":["Epoch 18/20\n","Train Loss: 0.4839, Train Acc: 0.8031\n","Val Loss: 0.6026, Val Acc: 0.7556\n","\n"]},{"output_type":"stream","name":"stderr","text":[]},{"output_type":"stream","name":"stdout","text":["Epoch 19/20\n","Train Loss: 0.4819, Train Acc: 0.8004\n","Val Loss: 0.6042, Val Acc: 0.7552\n","\n"]},{"output_type":"stream","name":"stderr","text":[]},{"output_type":"stream","name":"stdout","text":["Epoch 20/20\n","Train Loss: 0.4638, Train Acc: 0.8080\n","Val Loss: 0.6055, Val Acc: 0.7560\n","\n","------------------------\n","Final Evaluation\n","------------------------\n","Test Loss: 0.6114\n","Test Acc: 0.7542\n"]}],"source":["import pandas as pd\n","import numpy as np\n","from sklearn.model_selection import train_test_split\n","from sklearn.metrics import accuracy_score\n","import torch\n","import torch.nn as nn\n","import torch.optim as optim\n","from torch.utils.data import Dataset, DataLoader\n","from tqdm import tqdm\n","\n","# 设置随机种子\n","seed = 42\n","np.random.seed(seed)\n","torch.manual_seed(seed)\n","torch.cuda.manual_seed_all(seed)\n","\n","# 加载数据\n","df = pd.read_csv('18-21_SRl&Factor&SDPG_CN_data.csv')\n","# 选择需要的列\n","selected_cols = list(df.columns[18:68]) + ['label', 'trade_date']\n","df = df[selected_cols]\n","\n","# 划分训练集和测试集\n","train_df = df[df['trade_date'].between('2018-07-01', '2020-12-31')]\n","test_df = df[df['trade_date'].between('2021-01-01', '2021-11-30')]\n","\n","# 划分训练集和验证集\n","train_df, val_df = train_test_split(train_df, test_size=0.2, random_state=seed)\n","\n","# 定义自定义数据集类\n","class CustomDataset(Dataset):\n"," def __init__(self, df):\n"," self.df = df\n","\n"," def __len__(self):\n"," return len(self.df)\n","\n"," def __getitem__(self, index):\n"," data = self.df.iloc[index]\n","\n"," factor_data = torch.from_numpy(data[selected_cols[:-2]].values.astype(np.float32))\n","\n"," label = torch.tensor(int(data['label']), dtype=torch.long)\n","\n"," return {\n"," 'factor_data': factor_data,\n"," 'label': label\n"," }\n","\n","# 设置模型参数\n","batch_size = 16\n","hidden_size = 128\n","device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n","num_epochs = 20\n","learning_rate = 0.001 # 调整学习率\n","\n","# 创建数据加载器\n","train_dataset = CustomDataset(train_df)\n","val_dataset = CustomDataset(val_df)\n","test_dataset = CustomDataset(test_df)\n","\n","train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)\n","val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)\n","test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)\n","\n","# 定义模型\n","class MLPClassifier(nn.Module):\n"," def __init__(self, input_size, hidden_size, num_classes):\n"," super(MLPClassifier, self).__init__()\n"," self.fc1 = nn.Linear(input_size, hidden_size)\n"," self.relu = nn.ReLU()\n"," self.dropout = nn.Dropout(0.2)\n"," self.fc2 = nn.Linear(hidden_size, hidden_size)\n"," self.fc3 = nn.Linear(hidden_size, num_classes)\n","\n"," def forward(self, x):\n"," out = self.fc1(x)\n"," out = self.relu(out)\n"," out = self.dropout(out)\n"," out = self.fc2(out)\n"," out = self.relu(out)\n"," out = self.dropout(out)\n"," out = self.fc3(out)\n"," return out\n","\n","# 实例化模型\n","input_size = len(selected_cols[:-2])\n","num_classes = 3\n","model = MLPClassifier(input_size, hidden_size, num_classes)\n","model = model.to(device)\n","\n","# 定义损失函数和优化器\n","criterion = nn.CrossEntropyLoss()\n","optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-5)\n","\n","# 训练模型\n","best_val_acc = 0.0\n","for epoch in range(num_epochs):\n"," train_loss = 0.0\n"," train_preds = []\n"," train_labels = []\n","\n"," progress_bar = tqdm(train_loader, desc='Epoch {:1d}'.format(epoch+1), leave=False)\n","\n"," model.train()\n"," for batch in progress_bar:\n"," factor_data = batch['factor_data'].to(device)\n"," labels = batch['label'].to(device)\n","\n"," optimizer.zero_grad()\n","\n"," logits = model(factor_data)\n","\n"," loss = criterion(logits, labels)\n","\n"," loss.backward()\n"," optimizer.step()\n","\n"," train_loss += loss.item()\n","\n"," preds = torch.argmax(logits, dim=1)\n"," train_preds.extend(preds.detach().cpu().numpy())\n"," train_labels.extend(labels.detach().cpu().numpy())\n","\n"," train_loss /= len(train_loader)\n","\n"," train_acc = accuracy_score(train_labels, train_preds)\n","\n"," val_loss= 0.0\n"," val_preds = []\n"," val_labels = []\n","\n"," model.eval()\n"," with torch.no_grad():\n"," for batch in val_loader:\n"," factor_data = batch['factor_data'].to(device)\n"," labels = batch['label'].to(device)\n","\n"," logits = model(factor_data)\n","\n"," preds = torch.argmax(logits, dim=1)\n"," val_preds.extend(preds.detach().cpu().numpy())\n"," val_labels.extend(labels.detach().cpu().numpy())\n","\n"," val_loss += criterion(logits, labels).item()\n","\n"," val_loss /= len(val_loader)\n","\n"," val_acc = accuracy_score(val_labels, val_preds)\n","\n"," print(f\"Epoch {epoch+1}/{num_epochs}\")\n"," print(f\"Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.4f}\")\n"," print(f\"Val Loss: {val_loss:.4f}, Val Acc: {val_acc:.4f}\\n\")\n","\n"," if val_acc > best_val_acc:\n"," best_val_acc = val_acc\n"," torch.save(model.state_dict(), 'Factor_best_model.pt')\n","\n","# 在测试集上评估模型\n","test_loss = 0.0\n","test_preds = []\n","test_labels = []\n","\n","model.load_state_dict(torch.load('Factor_best_model.pt'))\n","model.eval()\n","with torch.no_grad():\n"," for batch in test_loader:\n"," factor_data = batch['factor_data'].to(device)\n"," labels = batch['label'].to(device)\n","\n"," logits = model(factor_data)\n","\n"," preds = torch.argmax(logits, dim=1)\n"," test_preds.extend(preds.detach().cpu().numpy())\n"," test_labels.extend(labels.detach().cpu().numpy())\n","\n"," test_loss += criterion(logits, labels).item()\n","\n","test_loss /= len(test_loader)\n","\n","test_acc = accuracy_score(test_labels, test_preds)\n","\n","print(\"------------------------\")\n","print(\"Final Evaluation\")\n","print(\"------------------------\")\n","print(f\"Test Loss: {test_loss:.4f}\")\n","print(f\"Test Acc: {test_acc:.4f}\")\n"],"metadata":{"collapsed":true,"colab":{"base_uri":"https://localhost:8080/"},"id":"Huhz7dUHzxcW","executionInfo":{"status":"ok","timestamp":1755330266171,"user_tz":-345,"elapsed":151214,"user":{"displayName":"Santosh Upreti","userId":"15542637101614503540"}},"outputId":"5beadeaf-33de-48d4-88b6-16057b55902a"}}],"metadata":{"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":2},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython2","version":"2.7.6"},"colab":{"provenance":[],"gpuType":"T4"},"accelerator":"GPU"},"nbformat":4,"nbformat_minor":0}

Xet Storage Details

Size:
17.6 kB
·
Xet hash:
88043c48b5b194a79e4222f2c8bd1f4cda4dac33fa757bc4f62829ce353a3e41

Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.