RichardVR commited on
Commit
696bfeb
·
verified ·
1 Parent(s): f9be9fe

Upload 9 files

Browse files
.gitattributes CHANGED
@@ -57,3 +57,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ Stock_Benchmark[[:space:]]Analysis/unique_companies.csv filter=lfs diff=lfs merge=lfs -text
Stock_Benchmark Analysis/Stock_Benchmark Analysis copy.ipynb ADDED
@@ -0,0 +1,682 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "b48026f1",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "import pandas as pd\n",
11
+ "# Load the CSV file\n",
12
+ "df = pd.read_csv('unique_companies.csv')\n",
13
+ "# Filter rows where 'Industry' is 'Copper' or 'Diversified Metals & Mining'\n",
14
+ "filtered_df = df[df['Industry'].isin(['Copper', 'Diversified Metals & Mining'])]\n",
15
+ "# Save the result to a new CSV\n",
16
+ "filtered_df.to_csv('unique_companies_copper_diversified.csv', index=False)"
17
+ ]
18
+ },
19
+ {
20
+ "cell_type": "code",
21
+ "execution_count": 26,
22
+ "id": "261ce11e",
23
+ "metadata": {},
24
+ "outputs": [
25
+ {
26
+ "name": "stdout",
27
+ "output_type": "stream",
28
+ "text": [
29
+ "Found 150 tickers (including HG=F).\n",
30
+ "Downloading batch 1: 50 tickers\n",
31
+ "Downloading batch 2: 50 tickers\n",
32
+ "Downloading batch 3: 50 tickers\n",
33
+ "df_prices shape: (1307, 150)\n",
34
+ "Columns: ['HG=F', '000630.SZ', '000737.SZ', '000878.SZ', '002203.SZ', '005810.KS', '08W.F', '2009.TW', '2IK.F', '300618.SZ', '300697.SZ', '301511.SZ', '381.F', '3N4.SG', '4989.TW', '5PMA.F', '600255.SS', '600362.SS', '600490.SS', '601137.SS', '601609.SS', '603124.SS', '688102.SS', '688388.SS', '7GI.F', '7LY0.F', '97E0.F', '9CM0.F', 'ACMDY', 'ALM.AX', 'ANFGF', 'AR1.AX', 'ARJN.V', 'ARJNF', 'ARREF', 'ASCU.TO', 'ASCUF', 'ATCUF', 'ATYM.L', 'AXO.V', 'BCU.V', 'BCUFF', 'BFGFF', 'BHAGYANGR.NS', 'BP60.F', 'BRVRF', 'BZDLF', 'C730.F', 'CAEN', 'CAML.L', 'CAMLF', 'CFV0.F', 'COPR', 'CPCPF', 'CPER.V', 'CPFXF', 'CPO.AX', 'CPORF', 'CPPKF', 'CPPMF', 'CPR.JO', 'CS.TO', 'CSC.AX', 'CSCCF', 'CUBEXTUB.NS', 'CUU.V', 'CVV.AX', 'CYM.AX', 'CYPMF', 'E2E1.F', 'E9E.F', 'EMTRF', 'ERO', 'FCX', 'FCXO34.SA', 'FDY.TO', 'FG1.F', 'FPMB.F', 'FQVLF', 'GCUMF', 'GRX.AX', 'GRX.L', 'GSCU.L', 'H6F.F', 'HBM', 'HBM.TO', 'HCH.V', 'HDRSF', 'HGO.AX', 'HHLKF', 'HI.V', 'HIN.MU', 'HINDCOPPER.NS', 'HLGVF', 'HNCUF', 'IE', 'IE.TO', 'INUMF', 'IPMLF', 'JGRRF', 'JIX.F', 'KCC.V', 'KGH.WA', 'KGHPF', 'LA.V', 'LSANF', 'LUNMF', 'MAC.AX', 'MARI.TO', 'MARIF', 'MCL.NS', 'MMLTF', 'MTAL', 'MTJ3.F', 'NFM.AX', 'NRX.AX', 'NTM.AX', 'NU0.F', 'OCKA.F', 'OUW0.F', 'PMAM3.SA', 'PNTZF', 'PSGR', 'PUCOBRE.SN', 'Q.V', 'QCCUF', 'RAJMET.NS', 'RDS.AX', 'RE8.F', 'RRR.AX', 'SAGARDEEP.NS', 'SARKY.IS', 'SCCO', 'SFR.AX', 'SFRRF', 'SLMFF', 'TFM.V', 'TGB', 'TKO.L', 'TNC.AX', 'TRRCF', 'TVCCF', 'TWO.V', 'TWOSF', 'USCUF', 'VCUFF', 'WA1.AX', 'WAORF', 'WCUFF', 'XXIX.V']\n",
35
+ "Saved to 'df_prices.csv'.\n"
36
+ ]
37
+ }
38
+ ],
39
+ "source": [
40
+ "# ------------------------------------------------------------\n",
41
+ "# Build df_prices.csv for HG=F + tickers in unique_companies_copper.csv\n",
42
+ "# • period=\"5y\" (more reliable than start/end for some venues)\n",
43
+ "# • Prefer 'Adj Close', fallback to 'Close'\n",
44
+ "# • Re-download single tickers that are all-NaN in batch (e.g., 2IK.F)\n",
45
+ "# ------------------------------------------------------------\n",
46
+ "# pip install yfinance pandas\n",
47
+ "\n",
48
+ "import pandas as pd\n",
49
+ "import yfinance as yf\n",
50
+ "\n",
51
+ "CSV_PATH = \"unique_companies_copper.csv\"\n",
52
+ "TICKER_COL = \"PrimaryTicker\"\n",
53
+ "UNDERLYING = \"HG=F\"\n",
54
+ "BATCH_SIZE = 50\n",
55
+ "OUT_CSV = \"df_prices.csv\"\n",
56
+ "\n",
57
+ "# --- Read tickers ---\n",
58
+ "tickers = (\n",
59
+ " pd.read_csv(CSV_PATH, usecols=[TICKER_COL])[TICKER_COL]\n",
60
+ " .dropna().astype(str).str.strip().str.upper().tolist()\n",
61
+ ")\n",
62
+ "tickers = sorted(set(tickers))\n",
63
+ "if UNDERLYING not in tickers:\n",
64
+ " tickers = [UNDERLYING] + tickers\n",
65
+ "\n",
66
+ "print(f\"Found {len(tickers)} tickers (including {UNDERLYING}).\")\n",
67
+ "\n",
68
+ "def _extract_adj_or_close(df_multi: pd.DataFrame) -> tuple[pd.DataFrame, list[str]]:\n",
69
+ " \"\"\"From yfinance multi-ticker frame, prefer 'Adj Close', else 'Close' per ticker.\"\"\"\n",
70
+ " if not isinstance(df_multi.columns, pd.MultiIndex):\n",
71
+ " raise ValueError(\"Expected MultiIndex columns for multi-ticker download.\")\n",
72
+ " fields = set(df_multi.columns.get_level_values(-1))\n",
73
+ " adj = df_multi.xs(\"Adj Close\", axis=1, level=-1, drop_level=True) if \"Adj Close\" in fields else pd.DataFrame(index=df_multi.index)\n",
74
+ " clo = df_multi.xs(\"Close\", axis=1, level=-1, drop_level=True) if \"Close\" in fields else pd.DataFrame(index=df_multi.index)\n",
75
+ "\n",
76
+ " cols = sorted(set(adj.columns).union(clo.columns))\n",
77
+ " out = pd.DataFrame(index=df_multi.index, columns=cols, dtype=\"float64\")\n",
78
+ " used_close = []\n",
79
+ "\n",
80
+ " for t in cols:\n",
81
+ " a = adj[t] if t in adj.columns else None\n",
82
+ " c = clo[t] if t in clo.columns else None\n",
83
+ " if a is not None and not a.dropna().empty:\n",
84
+ " out[t] = a\n",
85
+ " elif c is not None and not c.dropna().empty:\n",
86
+ " out[t] = c\n",
87
+ " used_close.append(t)\n",
88
+ " return out, used_close\n",
89
+ "\n",
90
+ "def _download_batch(batch):\n",
91
+ " df = yf.download(\n",
92
+ " tickers=batch,\n",
93
+ " period=\"5y\",\n",
94
+ " interval=\"1d\",\n",
95
+ " auto_adjust=False,\n",
96
+ " actions=False,\n",
97
+ " progress=False,\n",
98
+ " group_by=\"ticker\",\n",
99
+ " threads=True\n",
100
+ " )\n",
101
+ " if isinstance(df.columns, pd.MultiIndex):\n",
102
+ " return _extract_adj_or_close(df)\n",
103
+ " else:\n",
104
+ " # Single-ticker shape\n",
105
+ " tkr = batch[0]\n",
106
+ " adj = df.get(\"Adj Close\")\n",
107
+ " clo = df.get(\"Close\")\n",
108
+ " used_close = []\n",
109
+ " if adj is not None and not adj.dropna().empty:\n",
110
+ " out = adj.rename(tkr).to_frame()\n",
111
+ " elif clo is not None and not clo.dropna().empty:\n",
112
+ " out = clo.rename(tkr).to_frame()\n",
113
+ " used_close.append(tkr)\n",
114
+ " else:\n",
115
+ " out = pd.DataFrame(index=df.index, columns=[tkr], dtype=\"float64\")\n",
116
+ " return out, used_close\n",
117
+ "\n",
118
+ "def _download_single(tkr: str) -> pd.Series:\n",
119
+ " \"\"\"Single-ticker repair path; prefer Adj Close, else Close.\"\"\"\n",
120
+ " df = yf.download(\n",
121
+ " tickers=tkr,\n",
122
+ " period=\"5y\",\n",
123
+ " interval=\"1d\",\n",
124
+ " auto_adjust=False,\n",
125
+ " actions=False,\n",
126
+ " progress=False\n",
127
+ " )\n",
128
+ " s = df.get(\"Adj Close\")\n",
129
+ " if s is None or s.dropna().empty:\n",
130
+ " s = df.get(\"Close\")\n",
131
+ " if s is None:\n",
132
+ " return pd.Series(dtype=\"float64\", name=tkr)\n",
133
+ " return s.rename(tkr)\n",
134
+ "\n",
135
+ "# --- Batch download + merge ---\n",
136
+ "frames, used_close_all = [], []\n",
137
+ "for i in range(0, len(tickers), BATCH_SIZE):\n",
138
+ " batch = tickers[i:i+BATCH_SIZE]\n",
139
+ " print(f\"Downloading batch {i//BATCH_SIZE + 1}: {len(batch)} tickers\")\n",
140
+ " part, used_close = _download_batch(batch)\n",
141
+ " frames.append(part)\n",
142
+ " used_close_all.extend(used_close)\n",
143
+ "\n",
144
+ "df_prices = pd.concat(frames, axis=1)\n",
145
+ "df_prices = df_prices.loc[:, ~df_prices.columns.duplicated()].sort_index()\n",
146
+ "\n",
147
+ "# --- Repair tickers that are NaN-only or missing after batch ---\n",
148
+ "to_repair = [t for t in tickers if (t in df_prices.columns and df_prices[t].dropna().empty) or (t not in df_prices.columns)]\n",
149
+ "to_repair = sorted(set(to_repair))\n",
150
+ "if to_repair:\n",
151
+ " print(f\"Repairing via single-ticker fetch: {to_repair}\")\n",
152
+ " for t in to_repair:\n",
153
+ " s = _download_single(t)\n",
154
+ " if not s.dropna().empty:\n",
155
+ " df_prices = df_prices.reindex(df_prices.index.union(s.index)).sort_index()\n",
156
+ " df_prices[t] = s.reindex(df_prices.index)\n",
157
+ "\n",
158
+ "# --- Order columns; drop all-NaN tickers ---\n",
159
+ "ordered_cols = [UNDERLYING] + [t for t in tickers if t != UNDERLYING and t in df_prices.columns]\n",
160
+ "df_prices = df_prices.reindex(columns=ordered_cols)\n",
161
+ "all_nan_cols = [c for c in df_prices.columns if df_prices[c].dropna().empty]\n",
162
+ "if all_nan_cols:\n",
163
+ " print(f\"Dropping tickers with no usable data: {all_nan_cols}\")\n",
164
+ " df_prices = df_prices.drop(columns=all_nan_cols)\n",
165
+ "\n",
166
+ "# --- Report fallback usage ---\n",
167
+ "used_close_all = sorted(set([t for t in used_close_all if t in df_prices.columns]))\n",
168
+ "if used_close_all:\n",
169
+ " print(f\"Used 'Close' fallback for: {used_close_all}\")\n",
170
+ "\n",
171
+ "print(\"df_prices shape:\", df_prices.shape)\n",
172
+ "print(\"Columns:\", list(df_prices.columns))\n",
173
+ "\n",
174
+ "# --- Save ---\n",
175
+ "df_prices.to_csv(OUT_CSV, index_label=\"Date\", float_format=\"%.6f\")\n",
176
+ "print(f\"Saved to '{OUT_CSV}'.\")\n"
177
+ ]
178
+ },
179
+ {
180
+ "cell_type": "code",
181
+ "execution_count": 29,
182
+ "id": "11079562",
183
+ "metadata": {},
184
+ "outputs": [
185
+ {
186
+ "name": "stdout",
187
+ "output_type": "stream",
188
+ "text": [
189
+ "=== Missing % by ticker ===\n",
190
+ " missing_pct\n",
191
+ "AXO.V 97.016067\n",
192
+ "603124.SS 93.037490\n",
193
+ "NFM.AX 87.299158\n",
194
+ "WAORF 85.233359\n",
195
+ "ASCUF 81.637337\n",
196
+ "... ...\n",
197
+ "OUW0.F 2.371844\n",
198
+ "9CM0.F 2.371844\n",
199
+ "5PMA.F 2.371844\n",
200
+ "3N4.SG 2.371844\n",
201
+ "E2E1.F 2.371844\n",
202
+ "\n",
203
+ "[150 rows x 1 columns]\n",
204
+ "\n",
205
+ "Dropping 45 tickers (> 10% missing): ['AXO.V', '603124.SS', 'NFM.AX', 'WAORF', 'ASCUF', 'CSC.AX', 'MAC.AX', 'CPPMF', 'JGRRF', '301511.SZ', 'HNCUF', 'TRRCF', 'CPR.JO', 'CYPMF', '381.F', 'CPORF', 'IE', 'IE.TO', 'CPCPF', '688102.SS', 'INUMF', 'CSCCF', 'WA1.AX', '7LY0.F', 'CPER.V', 'NU0.F', 'HCH.V', 'GSCU.L', 'E9E.F', 'Q.V', 'ASCU.TO', 'H6F.F', 'AR1.AX', 'VCUFF', '97E0.F', 'MTAL', 'RRR.AX', 'CAMLF', 'CPO.AX', 'WCUFF', 'EMTRF', '7GI.F', 'QCCUF', '2IK.F', 'CPPKF']\n",
206
+ "\n",
207
+ "Shapes:\n",
208
+ "Before: (1307, 150) After: (1307, 105)\n",
209
+ "Saved to 'df_prices_final.csv'.\n"
210
+ ]
211
+ }
212
+ ],
213
+ "source": [
214
+ "# ------------------------------------------------------------\n",
215
+ "# Load df_prices.csv, compute missing % per ticker,\n",
216
+ "# drop columns with >50% missing, save df_prices_final.csv\n",
217
+ "# ------------------------------------------------------------\n",
218
+ "import pandas as pd\n",
219
+ "\n",
220
+ "IN_CSV = \"df_prices.csv\"\n",
221
+ "OUT_CSV = \"df_prices_final.csv\"\n",
222
+ "THRESH = 10.0 # percent\n",
223
+ "\n",
224
+ "df_prices = pd.read_csv(IN_CSV, parse_dates=[\"Date\"], index_col=\"Date\")\n",
225
+ "\n",
226
+ "# Missing % over the full DataFrame index\n",
227
+ "missing_pct = df_prices.isna().mean() * 100.0\n",
228
+ "report = (\n",
229
+ " pd.DataFrame({\"missing_pct\": missing_pct})\n",
230
+ " .sort_values(\"missing_pct\", ascending=False)\n",
231
+ ")\n",
232
+ "print(\"=== Missing % by ticker ===\")\n",
233
+ "print(report)\n",
234
+ "\n",
235
+ "# Drop tickers with >50% missing\n",
236
+ "to_drop = report.index[report[\"missing_pct\"] > THRESH].tolist()\n",
237
+ "print(f\"\\nDropping {len(to_drop)} tickers (> {THRESH:.0f}% missing): {to_drop}\")\n",
238
+ "\n",
239
+ "df_prices_final = df_prices.drop(columns=to_drop, errors=\"ignore\")\n",
240
+ "\n",
241
+ "print(\"\\nShapes:\")\n",
242
+ "print(\"Before:\", df_prices.shape, \"After:\", df_prices_final.shape)\n",
243
+ "\n",
244
+ "df_prices_final.to_csv(OUT_CSV, index_label=\"Date\", float_format=\"%.6f\")\n",
245
+ "print(f\"Saved to '{OUT_CSV}'.\")\n"
246
+ ]
247
+ },
248
+ {
249
+ "cell_type": "code",
250
+ "execution_count": 30,
251
+ "id": "8dc7673c",
252
+ "metadata": {},
253
+ "outputs": [
254
+ {
255
+ "name": "stdout",
256
+ "output_type": "stream",
257
+ "text": [
258
+ "Saved clean, rectangular prices to 'df_prices_final.csv' with shape (1208, 105).\n"
259
+ ]
260
+ }
261
+ ],
262
+ "source": [
263
+ "# ------------------------------------------------------------\n",
264
+ "# Clean df_prices_final: common window + bfill→ffill + final NA drop\n",
265
+ "# Input : df_prices_final.csv (your current file with some missing)\n",
266
+ "# Output: df_prices_final.csv (overwritten, rectangular, NA-free)\n",
267
+ "# ------------------------------------------------------------\n",
268
+ "import pandas as pd\n",
269
+ "\n",
270
+ "IN_CSV = \"df_prices_final.csv\"\n",
271
+ "OUT_CSV = \"df_prices_final.csv\" # overwrite in place\n",
272
+ "\n",
273
+ "df = pd.read_csv(IN_CSV, parse_dates=[\"Date\"], index_col=\"Date\").sort_index()\n",
274
+ "\n",
275
+ "# 1) Common window (everyone has started and not yet delisted)\n",
276
+ "first_valid = df.apply(pd.Series.first_valid_index)\n",
277
+ "last_valid = df.apply(pd.Series.last_valid_index)\n",
278
+ "\n",
279
+ "common_start = max(first_valid.dropna())\n",
280
+ "common_end = min(last_valid.dropna())\n",
281
+ "\n",
282
+ "df = df.loc[common_start:common_end].copy()\n",
283
+ "\n",
284
+ "# 2) Business-day index to harmonize calendars\n",
285
+ "bidx = pd.date_range(df.index.min(), df.index.max(), freq=\"B\")\n",
286
+ "df = df.reindex(bidx)\n",
287
+ "\n",
288
+ "# 3) Fill:\n",
289
+ "# - Backfill once to seed the first business day for tickers closed on common_start\n",
290
+ "# - Forward-fill for holiday gaps etc.\n",
291
+ "df = df.bfill(limit=None).ffill(limit=None)\n",
292
+ "\n",
293
+ "# 4) Final sanity check: drop any rare rows still containing NA\n",
294
+ "before_rows = df.shape[0]\n",
295
+ "df = df.dropna(how=\"any\")\n",
296
+ "after_rows = df.shape[0]\n",
297
+ "if before_rows != after_rows:\n",
298
+ " print(f\"Dropped {before_rows - after_rows} rows that still had NAs after filling.\")\n",
299
+ "\n",
300
+ "# 5) Save\n",
301
+ "df.to_csv(OUT_CSV, index_label=\"Date\", float_format=\"%.6f\")\n",
302
+ "print(f\"Saved clean, rectangular prices to '{OUT_CSV}' with shape {df.shape}.\")\n"
303
+ ]
304
+ },
305
+ {
306
+ "cell_type": "code",
307
+ "execution_count": 40,
308
+ "id": "8cdec19b",
309
+ "metadata": {},
310
+ "outputs": [
311
+ {
312
+ "name": "stdout",
313
+ "output_type": "stream",
314
+ "text": [
315
+ "✓ Saved corr_beta.csv, excess_summary.csv (with corr_to_copper)\n"
316
+ ]
317
+ }
318
+ ],
319
+ "source": [
320
+ "# ============================================================\n",
321
+ "# Copper-linked stocks vs COMEX copper (HG=F)\n",
322
+ "# • Correlation & beta\n",
323
+ "# • Rolling correlation\n",
324
+ "# • β-hedged alpha / excess-return summary\n",
325
+ "# Outputs:\n",
326
+ "# corr_beta.csv, rolling_corr_long.csv, excess_summary.csv\n",
327
+ "# (excess_summary.csv now includes corr_to_copper)\n",
328
+ "# ============================================================\n",
329
+ "\n",
330
+ "import pandas as pd\n",
331
+ "import numpy as np\n",
332
+ "import statsmodels.api as sm\n",
333
+ "\n",
334
+ "# ---------- PARAMETERS ----------\n",
335
+ "IN_CSV = \"df_prices_final.csv\" # prices, Date index\n",
336
+ "BENCH = \"HG=F\" # underlying metal\n",
337
+ "FREQ = \"W-FRI\" # 'D' for daily or 'W-FRI'\n",
338
+ "ROLL_WINDOW = None # 26 weeks ≈ 6 months; set None to skip\n",
339
+ "USE_LOG_RET = True # True = log-returns, False = pct-change\n",
340
+ "ALPHA_T_CUT = 0.9 # |t| threshold for alpha significance\n",
341
+ "SHARPE_CUT = 0.0 # >0 required for outperform label\n",
342
+ "\n",
343
+ "# Annualisation factor\n",
344
+ "PER_YEAR = 52 if FREQ.startswith(\"W\") else 252\n",
345
+ "\n",
346
+ "# ---------- LOAD PRICES ----------\n",
347
+ "px = (pd.read_csv(IN_CSV, parse_dates=[\"Date\"], index_col=\"Date\")\n",
348
+ " .sort_index())\n",
349
+ "assert BENCH in px.columns, f\"{BENCH} not found in columns.\"\n",
350
+ "\n",
351
+ "# Optional resample (weekly smooths roll/holiday noise)\n",
352
+ "if FREQ != \"D\":\n",
353
+ " px = px.resample(FREQ).last()\n",
354
+ "\n",
355
+ "# ---------- RETURNS ----------\n",
356
+ "rets = np.log(px).diff() if USE_LOG_RET else px.pct_change()\n",
357
+ "rets = rets.dropna(how=\"all\")\n",
358
+ "\n",
359
+ "bench = rets[BENCH]\n",
360
+ "stocks = [c for c in rets.columns if c != BENCH]\n",
361
+ "\n",
362
+ "# ---------- 1) CORRELATION & BETA ----------\n",
363
+ "corr = rets[stocks].corrwith(bench) # Series: index=ticker\n",
364
+ "var_b = bench.var(ddof=1)\n",
365
+ "beta = rets[stocks].apply(lambda s: s.cov(bench)) / var_b\n",
366
+ "\n",
367
+ "corr_beta = (pd.DataFrame({\"corr_to_copper\": corr,\n",
368
+ " \"beta_to_copper\": beta})\n",
369
+ " .sort_values(\"corr_to_copper\", ascending=False))\n",
370
+ "corr_beta.index.name = \"ticker\"\n",
371
+ "corr_beta.to_csv(\"corr_beta.csv\", float_format=\"%.6f\")\n",
372
+ "\n",
373
+ "# Keep maps for reuse in excess_summary\n",
374
+ "corr_map = corr.to_dict()\n",
375
+ "beta_map = beta.to_dict()\n",
376
+ "\n",
377
+ "# ---------- OPTIONAL ROLLING CORRELATION ----------\n",
378
+ "if ROLL_WINDOW:\n",
379
+ " rolling_long = (\n",
380
+ " rets[stocks]\n",
381
+ " .rolling(ROLL_WINDOW)\n",
382
+ " .corr(bench) # wide: Date × tickers\n",
383
+ " .stack() # -> Series with MultiIndex (Date, ticker)\n",
384
+ " .rename(\"rolling_corr\")\n",
385
+ " .rename_axis([\"Date\", \"ticker\"])\n",
386
+ " .reset_index()\n",
387
+ " .dropna(subset=[\"rolling_corr\"])\n",
388
+ " )\n",
389
+ " rolling_long.to_csv(\"rolling_corr_long.csv\",\n",
390
+ " index=False, float_format=\"%.6f\")\n",
391
+ "\n",
392
+ "# ---------- 2) β-HEDGED EXCESS & SELECTION ----------\n",
393
+ "def _nw_lags(freq: str) -> int:\n",
394
+ " return 4 if freq.startswith(\"W\") else 21 # ≈1 month of obs\n",
395
+ "\n",
396
+ "hac_lags = _nw_lags(FREQ)\n",
397
+ "\n",
398
+ "rows = []\n",
399
+ "for s in stocks:\n",
400
+ " # align & drop NA\n",
401
+ " df_xy = rets[[s, BENCH]].dropna()\n",
402
+ " if df_xy.empty:\n",
403
+ " continue\n",
404
+ "\n",
405
+ " y = df_xy[s]\n",
406
+ " X = sm.add_constant(df_xy[BENCH])\n",
407
+ " model = sm.OLS(y, X).fit(\n",
408
+ " cov_type=\"HAC\",\n",
409
+ " cov_kwds={\"maxlags\": hac_lags}\n",
410
+ " )\n",
411
+ "\n",
412
+ " alpha = model.params[\"const\"]\n",
413
+ " beta_i = model.params[BENCH]\n",
414
+ " t_alpha = model.tvalues[\"const\"]\n",
415
+ " alpha_ann = alpha * PER_YEAR\n",
416
+ "\n",
417
+ " # β-hedged excess series\n",
418
+ " r_excess = y - beta_i * df_xy[BENCH]\n",
419
+ " mu, sd = r_excess.mean(), r_excess.std(ddof=1)\n",
420
+ " hedged_sharpe = (mu / sd) * np.sqrt(PER_YEAR) if sd > 0 else np.nan\n",
421
+ " cum_excess_beta_log = r_excess.cumsum().iloc[-1]\n",
422
+ "\n",
423
+ " # Simple 1× copper cumulative log excess\n",
424
+ " cum_excess_1x_log = y.cumsum().iloc[-1] - df_xy[BENCH].cumsum().iloc[-1]\n",
425
+ "\n",
426
+ " # Label\n",
427
+ " if (alpha_ann > 0) and (t_alpha > ALPHA_T_CUT) and (hedged_sharpe > SHARPE_CUT):\n",
428
+ " label = \"Outperform\"\n",
429
+ " elif (alpha_ann < 0) and (t_alpha < -ALPHA_T_CUT) and (hedged_sharpe < -SHARPE_CUT):\n",
430
+ " label = \"Underperform\"\n",
431
+ " else:\n",
432
+ " label = \"Neutral\"\n",
433
+ "\n",
434
+ " rows.append({\n",
435
+ " \"ticker\": s,\n",
436
+ " \"corr_to_copper\": corr_map.get(s, np.nan), # <-- added\n",
437
+ " \"beta_to_copper\": beta_map.get(s, np.nan), # keep for consistency\n",
438
+ " \"alpha_ann\": alpha_ann,\n",
439
+ " \"t_alpha\": t_alpha,\n",
440
+ " \"hedged_sharpe\": hedged_sharpe,\n",
441
+ " \"cum_excess_beta_log\": cum_excess_beta_log,\n",
442
+ " \"cum_excess_1x_log\": cum_excess_1x_log,\n",
443
+ " \"label\": label\n",
444
+ " })\n",
445
+ "\n",
446
+ "excess_summary = (pd.DataFrame(rows)\n",
447
+ " .set_index(\"ticker\")\n",
448
+ " .sort_values([\"label\", \"alpha_ann\", \"hedged_sharpe\"],\n",
449
+ " ascending=[True, False, False]))\n",
450
+ "excess_summary.to_csv(\"excess_summary.csv\", float_format=\"%.6f\")\n",
451
+ "\n",
452
+ "print(\"✓ Saved corr_beta.csv,\",\n",
453
+ " \"rolling_corr_long.csv,\" if ROLL_WINDOW else \"\",\n",
454
+ " \"excess_summary.csv (with corr_to_copper)\")\n"
455
+ ]
456
+ },
457
+ {
458
+ "cell_type": "code",
459
+ "execution_count": 42,
460
+ "id": "a651e4d4",
461
+ "metadata": {},
462
+ "outputs": [
463
+ {
464
+ "name": "stdout",
465
+ "output_type": "stream",
466
+ "text": [
467
+ "✓ Saved corr_beta.csv, excess_summary.csv (with corr_to_copper, vol_ann, bench_vol_ann)\n"
468
+ ]
469
+ }
470
+ ],
471
+ "source": [
472
+ "# ============================================================\n",
473
+ "# Copper-linked stocks vs COMEX copper (HG=F)\n",
474
+ "# • Correlation & beta\n",
475
+ "# • Rolling correlation\n",
476
+ "# • β-hedged alpha / excess-return summary\n",
477
+ "# • Annualized volatility (each stock + HG=F)\n",
478
+ "# Outputs:\n",
479
+ "# corr_beta.csv, rolling_corr_long.csv, excess_summary.csv\n",
480
+ "# ============================================================\n",
481
+ "\n",
482
+ "import pandas as pd\n",
483
+ "import numpy as np\n",
484
+ "import statsmodels.api as sm\n",
485
+ "\n",
486
+ "# ---------- PARAMETERS ----------\n",
487
+ "IN_CSV = \"df_prices_final.csv\" # prices, Date index\n",
488
+ "BENCH = \"HG=F\" # underlying metal\n",
489
+ "FREQ = \"W-FRI\" # 'D' for daily or 'W-FRI' (recommended)\n",
490
+ "ROLL_WINDOW = None # 26 weeks ≈ 6 months; set None to skip\n",
491
+ "USE_LOG_RET = True # True = log-returns, False = pct-change\n",
492
+ "ALPHA_T_CUT = 0.8 # |t| threshold for alpha significance\n",
493
+ "SHARPE_CUT = 0.0 # >0 required for outperform label\n",
494
+ "\n",
495
+ "# Annualisation factor\n",
496
+ "PER_YEAR = 52 if FREQ.startswith(\"W\") else 252\n",
497
+ "\n",
498
+ "# ---------- LOAD PRICES ----------\n",
499
+ "px = (pd.read_csv(IN_CSV, parse_dates=[\"Date\"], index_col=\"Date\")\n",
500
+ " .sort_index())\n",
501
+ "assert BENCH in px.columns, f\"{BENCH} not found in columns.\"\n",
502
+ "\n",
503
+ "# Optional resample (weekly smooths roll/holiday noise)\n",
504
+ "if FREQ != \"D\":\n",
505
+ " px = px.resample(FREQ).last()\n",
506
+ "\n",
507
+ "# ---------- RETURNS ----------\n",
508
+ "rets = np.log(px).diff() if USE_LOG_RET else px.pct_change()\n",
509
+ "rets = rets.dropna(how=\"all\")\n",
510
+ "\n",
511
+ "bench = rets[BENCH]\n",
512
+ "stocks = [c for c in rets.columns if c != BENCH]\n",
513
+ "\n",
514
+ "# ---------- 1) CORRELATION & BETA ----------\n",
515
+ "corr = rets[stocks].corrwith(bench)\n",
516
+ "var_b = bench.var(ddof=1)\n",
517
+ "beta = rets[stocks].apply(lambda s: s.cov(bench)) / var_b\n",
518
+ "\n",
519
+ "corr_beta = (pd.DataFrame({\"corr_to_copper\": corr,\n",
520
+ " \"beta_to_copper\": beta})\n",
521
+ " .sort_values(\"corr_to_copper\", ascending=False))\n",
522
+ "corr_beta.index.name = \"ticker\"\n",
523
+ "corr_beta.to_csv(\"corr_beta.csv\", float_format=\"%.6f\")\n",
524
+ "\n",
525
+ "# Maps to reuse\n",
526
+ "corr_map = corr.to_dict()\n",
527
+ "beta_map = beta.to_dict()\n",
528
+ "\n",
529
+ "# ---------- OPTIONAL ROLLING CORRELATION ----------\n",
530
+ "if ROLL_WINDOW:\n",
531
+ " rolling_long = (\n",
532
+ " rets[stocks]\n",
533
+ " .rolling(ROLL_WINDOW)\n",
534
+ " .corr(bench) # wide: Date × tickers\n",
535
+ " .stack() # -> Series with MultiIndex (Date, ticker)\n",
536
+ " .rename(\"rolling_corr\")\n",
537
+ " .rename_axis([\"Date\", \"ticker\"])\n",
538
+ " .reset_index()\n",
539
+ " .dropna(subset=[\"rolling_corr\"])\n",
540
+ " )\n",
541
+ " rolling_long.to_csv(\"rolling_corr_long.csv\",\n",
542
+ " index=False, float_format=\"%.6f\")\n",
543
+ "\n",
544
+ "# ---------- 2) β-HEDGED EXCESS, VOLATILITY & SELECTION ----------\n",
545
+ "def _nw_lags(freq: str) -> int:\n",
546
+ " return 4 if freq.startswith(\"W\") else 21 # ≈1 month of obs\n",
547
+ "\n",
548
+ "hac_lags = _nw_lags(FREQ)\n",
549
+ "\n",
550
+ "# Benchmark annualized volatility (on full return series at chosen frequency)\n",
551
+ "bench_vol_ann = bench.std(ddof=1) * np.sqrt(PER_YEAR)\n",
552
+ "\n",
553
+ "rows = []\n",
554
+ "for s in stocks:\n",
555
+ " # Align series to common non-NA dates for regression & stats\n",
556
+ " df_xy = rets[[s, BENCH]].dropna()\n",
557
+ " if df_xy.empty:\n",
558
+ " continue\n",
559
+ "\n",
560
+ " y = df_xy[s]\n",
561
+ " X = sm.add_constant(df_xy[BENCH])\n",
562
+ " model = sm.OLS(y, X).fit(\n",
563
+ " cov_type=\"HAC\",\n",
564
+ " cov_kwds={\"maxlags\": hac_lags}\n",
565
+ " )\n",
566
+ "\n",
567
+ " alpha = model.params[\"const\"]\n",
568
+ " beta_i = model.params[BENCH]\n",
569
+ " t_alpha = model.tvalues[\"const\"]\n",
570
+ " alpha_ann = alpha * PER_YEAR\n",
571
+ "\n",
572
+ " # β-hedged excess series\n",
573
+ " r_excess = y - beta_i * df_xy[BENCH]\n",
574
+ " mu, sd = r_excess.mean(), r_excess.std(ddof=1)\n",
575
+ " hedged_sharpe = (mu / sd) * np.sqrt(PER_YEAR) if sd > 0 else np.nan\n",
576
+ " cum_excess_beta_log = r_excess.cumsum().iloc[-1]\n",
577
+ "\n",
578
+ " # Simple 1× copper cumulative log excess\n",
579
+ " cum_excess_1x_log = y.cumsum().iloc[-1] - df_xy[BENCH].cumsum().iloc[-1]\n",
580
+ "\n",
581
+ " # Annualized volatility for this stock (aligned sample)\n",
582
+ " vol_ann = y.std(ddof=1) * np.sqrt(PER_YEAR)\n",
583
+ "\n",
584
+ " # Label\n",
585
+ " if (alpha_ann > 0) and (t_alpha > ALPHA_T_CUT) and (hedged_sharpe > SHARPE_CUT):\n",
586
+ " label = \"Outperform\"\n",
587
+ " elif (alpha_ann < 0) and (t_alpha < -ALPHA_T_CUT) and (hedged_sharpe < -SHARPE_CUT):\n",
588
+ " label = \"Underperform\"\n",
589
+ " else:\n",
590
+ " label = \"Neutral\"\n",
591
+ "\n",
592
+ " rows.append({\n",
593
+ " \"ticker\": s,\n",
594
+ " \"corr_to_copper\": corr_map.get(s, np.nan),\n",
595
+ " \"beta_to_copper\": beta_map.get(s, np.nan),\n",
596
+ " \"alpha_ann\": alpha_ann,\n",
597
+ " \"t_alpha\": t_alpha,\n",
598
+ " \"hedged_sharpe\": hedged_sharpe,\n",
599
+ " \"cum_excess_beta_log\": cum_excess_beta_log,\n",
600
+ " \"cum_excess_1x_log\": cum_excess_1x_log,\n",
601
+ " \"vol_ann\": vol_ann, # stock annualized volatility\n",
602
+ " \"bench_vol_ann\": bench_vol_ann, # HG=F annualized volatility (same for all rows)\n",
603
+ " \"label\": label\n",
604
+ " })\n",
605
+ "\n",
606
+ "excess_summary = (pd.DataFrame(rows)\n",
607
+ " .set_index(\"ticker\")\n",
608
+ " .sort_values([\"label\", \"alpha_ann\", \"hedged_sharpe\"],\n",
609
+ " ascending=[True, False, False]))\n",
610
+ "excess_summary.to_csv(\"excess_summary.csv\", float_format=\"%.6f\")\n",
611
+ "\n",
612
+ "print(\"✓ Saved corr_beta.csv,\",\n",
613
+ " \"rolling_corr_long.csv,\" if ROLL_WINDOW else \"\",\n",
614
+ " \"excess_summary.csv (with corr_to_copper, vol_ann, bench_vol_ann)\")\n"
615
+ ]
616
+ },
617
+ {
618
+ "cell_type": "code",
619
+ "execution_count": 43,
620
+ "id": "13fb4bda",
621
+ "metadata": {},
622
+ "outputs": [
623
+ {
624
+ "name": "stdout",
625
+ "output_type": "stream",
626
+ "text": [
627
+ "Found 20 outperformers; price matrix shape: (1208, 20)\n"
628
+ ]
629
+ }
630
+ ],
631
+ "source": [
632
+ "import pandas as pd\n",
633
+ "\n",
634
+ "# Load prices and labels\n",
635
+ "prices = pd.read_csv(\"df_prices_final.csv\", parse_dates=[\"Date\"], index_col=\"Date\")\n",
636
+ "labels = pd.read_csv(\"excess_summary.csv\", index_col=0)\n",
637
+ "\n",
638
+ "# Select tickers labeled as OUTPERFORM (case-insensitive)\n",
639
+ "op_tickers = (\n",
640
+ " labels.assign(_lab=labels[\"label\"].astype(str).str.upper())\n",
641
+ " .query(\"_lab == 'OUTPERFORM'\")\n",
642
+ " .index.tolist()\n",
643
+ ")\n",
644
+ "\n",
645
+ "# Keep only those tickers that exist in the price matrix\n",
646
+ "op_tickers = [t for t in op_tickers if t in prices.columns]\n",
647
+ "\n",
648
+ "# Slice prices → new DataFrame\n",
649
+ "outperforming_stocks = prices.loc[:, op_tickers].copy()\n",
650
+ "\n",
651
+ "print(f\"Found {len(op_tickers)} outperformers; price matrix shape: {outperforming_stocks.shape}\")\n",
652
+ "\n",
653
+ "# Optional: include the benchmark column too\n",
654
+ "# outperforming_with_bench = prices.loc[:, ['HG=F'] + op_tickers].copy()\n",
655
+ "\n",
656
+ "# Save to CSV (optional)\n",
657
+ "outperforming_stocks.to_csv(\"outperforming_stocks.csv\", index_label=\"Date\", float_format=\"%.6f\")\n"
658
+ ]
659
+ }
660
+ ],
661
+ "metadata": {
662
+ "kernelspec": {
663
+ "display_name": ".venv",
664
+ "language": "python",
665
+ "name": "python3"
666
+ },
667
+ "language_info": {
668
+ "codemirror_mode": {
669
+ "name": "ipython",
670
+ "version": 3
671
+ },
672
+ "file_extension": ".py",
673
+ "mimetype": "text/x-python",
674
+ "name": "python",
675
+ "nbconvert_exporter": "python",
676
+ "pygments_lexer": "ipython3",
677
+ "version": "3.12.11"
678
+ }
679
+ },
680
+ "nbformat": 4,
681
+ "nbformat_minor": 5
682
+ }
Stock_Benchmark Analysis/Stock_Benchmark Analysis.ipynb ADDED
@@ -0,0 +1,694 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 5,
6
+ "id": "811bdabd",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stderr",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_75834/244845463.py:4: DtypeWarning: Columns (11,13,23,24,75,76,79,80,83,84,90) have mixed types. Specify dtype option on import or set low_memory=False.\n",
14
+ " df = pd.read_csv('unique_companies.csv')\n"
15
+ ]
16
+ }
17
+ ],
18
+ "source": [
19
+ "import pandas as pd\n",
20
+ "\n",
21
+ "# Load the CSV file\n",
22
+ "df = pd.read_csv('unique_companies.csv')\n",
23
+ "\n",
24
+ "# Filter rows where 'Industry' is 'Copper'\n",
25
+ "filtered_df = df[df['Industry'] == 'Copper']\n",
26
+ "\n",
27
+ "# If you want to save the result to a new CSV\n",
28
+ "filtered_df.to_csv('unique_companies_copper.csv', index=False)\n"
29
+ ]
30
+ },
31
+ {
32
+ "cell_type": "code",
33
+ "execution_count": 26,
34
+ "id": "261ce11e",
35
+ "metadata": {},
36
+ "outputs": [
37
+ {
38
+ "name": "stdout",
39
+ "output_type": "stream",
40
+ "text": [
41
+ "Found 150 tickers (including HG=F).\n",
42
+ "Downloading batch 1: 50 tickers\n",
43
+ "Downloading batch 2: 50 tickers\n",
44
+ "Downloading batch 3: 50 tickers\n",
45
+ "df_prices shape: (1307, 150)\n",
46
+ "Columns: ['HG=F', '000630.SZ', '000737.SZ', '000878.SZ', '002203.SZ', '005810.KS', '08W.F', '2009.TW', '2IK.F', '300618.SZ', '300697.SZ', '301511.SZ', '381.F', '3N4.SG', '4989.TW', '5PMA.F', '600255.SS', '600362.SS', '600490.SS', '601137.SS', '601609.SS', '603124.SS', '688102.SS', '688388.SS', '7GI.F', '7LY0.F', '97E0.F', '9CM0.F', 'ACMDY', 'ALM.AX', 'ANFGF', 'AR1.AX', 'ARJN.V', 'ARJNF', 'ARREF', 'ASCU.TO', 'ASCUF', 'ATCUF', 'ATYM.L', 'AXO.V', 'BCU.V', 'BCUFF', 'BFGFF', 'BHAGYANGR.NS', 'BP60.F', 'BRVRF', 'BZDLF', 'C730.F', 'CAEN', 'CAML.L', 'CAMLF', 'CFV0.F', 'COPR', 'CPCPF', 'CPER.V', 'CPFXF', 'CPO.AX', 'CPORF', 'CPPKF', 'CPPMF', 'CPR.JO', 'CS.TO', 'CSC.AX', 'CSCCF', 'CUBEXTUB.NS', 'CUU.V', 'CVV.AX', 'CYM.AX', 'CYPMF', 'E2E1.F', 'E9E.F', 'EMTRF', 'ERO', 'FCX', 'FCXO34.SA', 'FDY.TO', 'FG1.F', 'FPMB.F', 'FQVLF', 'GCUMF', 'GRX.AX', 'GRX.L', 'GSCU.L', 'H6F.F', 'HBM', 'HBM.TO', 'HCH.V', 'HDRSF', 'HGO.AX', 'HHLKF', 'HI.V', 'HIN.MU', 'HINDCOPPER.NS', 'HLGVF', 'HNCUF', 'IE', 'IE.TO', 'INUMF', 'IPMLF', 'JGRRF', 'JIX.F', 'KCC.V', 'KGH.WA', 'KGHPF', 'LA.V', 'LSANF', 'LUNMF', 'MAC.AX', 'MARI.TO', 'MARIF', 'MCL.NS', 'MMLTF', 'MTAL', 'MTJ3.F', 'NFM.AX', 'NRX.AX', 'NTM.AX', 'NU0.F', 'OCKA.F', 'OUW0.F', 'PMAM3.SA', 'PNTZF', 'PSGR', 'PUCOBRE.SN', 'Q.V', 'QCCUF', 'RAJMET.NS', 'RDS.AX', 'RE8.F', 'RRR.AX', 'SAGARDEEP.NS', 'SARKY.IS', 'SCCO', 'SFR.AX', 'SFRRF', 'SLMFF', 'TFM.V', 'TGB', 'TKO.L', 'TNC.AX', 'TRRCF', 'TVCCF', 'TWO.V', 'TWOSF', 'USCUF', 'VCUFF', 'WA1.AX', 'WAORF', 'WCUFF', 'XXIX.V']\n",
47
+ "Saved to 'df_prices.csv'.\n"
48
+ ]
49
+ }
50
+ ],
51
+ "source": [
52
+ "# ------------------------------------------------------------\n",
53
+ "# Build df_prices.csv for HG=F + tickers in unique_companies_copper.csv\n",
54
+ "# • period=\"5y\" (more reliable than start/end for some venues)\n",
55
+ "# • Prefer 'Adj Close', fallback to 'Close'\n",
56
+ "# • Re-download single tickers that are all-NaN in batch (e.g., 2IK.F)\n",
57
+ "# ------------------------------------------------------------\n",
58
+ "# pip install yfinance pandas\n",
59
+ "\n",
60
+ "import pandas as pd\n",
61
+ "import yfinance as yf\n",
62
+ "\n",
63
+ "CSV_PATH = \"unique_companies_copper.csv\"\n",
64
+ "TICKER_COL = \"PrimaryTicker\"\n",
65
+ "UNDERLYING = \"HG=F\"\n",
66
+ "BATCH_SIZE = 50\n",
67
+ "OUT_CSV = \"df_prices.csv\"\n",
68
+ "\n",
69
+ "# --- Read tickers ---\n",
70
+ "tickers = (\n",
71
+ " pd.read_csv(CSV_PATH, usecols=[TICKER_COL])[TICKER_COL]\n",
72
+ " .dropna().astype(str).str.strip().str.upper().tolist()\n",
73
+ ")\n",
74
+ "tickers = sorted(set(tickers))\n",
75
+ "if UNDERLYING not in tickers:\n",
76
+ " tickers = [UNDERLYING] + tickers\n",
77
+ "\n",
78
+ "print(f\"Found {len(tickers)} tickers (including {UNDERLYING}).\")\n",
79
+ "\n",
80
+ "def _extract_adj_or_close(df_multi: pd.DataFrame) -> tuple[pd.DataFrame, list[str]]:\n",
81
+ " \"\"\"From yfinance multi-ticker frame, prefer 'Adj Close', else 'Close' per ticker.\"\"\"\n",
82
+ " if not isinstance(df_multi.columns, pd.MultiIndex):\n",
83
+ " raise ValueError(\"Expected MultiIndex columns for multi-ticker download.\")\n",
84
+ " fields = set(df_multi.columns.get_level_values(-1))\n",
85
+ " adj = df_multi.xs(\"Adj Close\", axis=1, level=-1, drop_level=True) if \"Adj Close\" in fields else pd.DataFrame(index=df_multi.index)\n",
86
+ " clo = df_multi.xs(\"Close\", axis=1, level=-1, drop_level=True) if \"Close\" in fields else pd.DataFrame(index=df_multi.index)\n",
87
+ "\n",
88
+ " cols = sorted(set(adj.columns).union(clo.columns))\n",
89
+ " out = pd.DataFrame(index=df_multi.index, columns=cols, dtype=\"float64\")\n",
90
+ " used_close = []\n",
91
+ "\n",
92
+ " for t in cols:\n",
93
+ " a = adj[t] if t in adj.columns else None\n",
94
+ " c = clo[t] if t in clo.columns else None\n",
95
+ " if a is not None and not a.dropna().empty:\n",
96
+ " out[t] = a\n",
97
+ " elif c is not None and not c.dropna().empty:\n",
98
+ " out[t] = c\n",
99
+ " used_close.append(t)\n",
100
+ " return out, used_close\n",
101
+ "\n",
102
+ "def _download_batch(batch):\n",
103
+ " df = yf.download(\n",
104
+ " tickers=batch,\n",
105
+ " period=\"5y\",\n",
106
+ " interval=\"1d\",\n",
107
+ " auto_adjust=False,\n",
108
+ " actions=False,\n",
109
+ " progress=False,\n",
110
+ " group_by=\"ticker\",\n",
111
+ " threads=True\n",
112
+ " )\n",
113
+ " if isinstance(df.columns, pd.MultiIndex):\n",
114
+ " return _extract_adj_or_close(df)\n",
115
+ " else:\n",
116
+ " # Single-ticker shape\n",
117
+ " tkr = batch[0]\n",
118
+ " adj = df.get(\"Adj Close\")\n",
119
+ " clo = df.get(\"Close\")\n",
120
+ " used_close = []\n",
121
+ " if adj is not None and not adj.dropna().empty:\n",
122
+ " out = adj.rename(tkr).to_frame()\n",
123
+ " elif clo is not None and not clo.dropna().empty:\n",
124
+ " out = clo.rename(tkr).to_frame()\n",
125
+ " used_close.append(tkr)\n",
126
+ " else:\n",
127
+ " out = pd.DataFrame(index=df.index, columns=[tkr], dtype=\"float64\")\n",
128
+ " return out, used_close\n",
129
+ "\n",
130
+ "def _download_single(tkr: str) -> pd.Series:\n",
131
+ " \"\"\"Single-ticker repair path; prefer Adj Close, else Close.\"\"\"\n",
132
+ " df = yf.download(\n",
133
+ " tickers=tkr,\n",
134
+ " period=\"5y\",\n",
135
+ " interval=\"1d\",\n",
136
+ " auto_adjust=False,\n",
137
+ " actions=False,\n",
138
+ " progress=False\n",
139
+ " )\n",
140
+ " s = df.get(\"Adj Close\")\n",
141
+ " if s is None or s.dropna().empty:\n",
142
+ " s = df.get(\"Close\")\n",
143
+ " if s is None:\n",
144
+ " return pd.Series(dtype=\"float64\", name=tkr)\n",
145
+ " return s.rename(tkr)\n",
146
+ "\n",
147
+ "# --- Batch download + merge ---\n",
148
+ "frames, used_close_all = [], []\n",
149
+ "for i in range(0, len(tickers), BATCH_SIZE):\n",
150
+ " batch = tickers[i:i+BATCH_SIZE]\n",
151
+ " print(f\"Downloading batch {i//BATCH_SIZE + 1}: {len(batch)} tickers\")\n",
152
+ " part, used_close = _download_batch(batch)\n",
153
+ " frames.append(part)\n",
154
+ " used_close_all.extend(used_close)\n",
155
+ "\n",
156
+ "df_prices = pd.concat(frames, axis=1)\n",
157
+ "df_prices = df_prices.loc[:, ~df_prices.columns.duplicated()].sort_index()\n",
158
+ "\n",
159
+ "# --- Repair tickers that are NaN-only or missing after batch ---\n",
160
+ "to_repair = [t for t in tickers if (t in df_prices.columns and df_prices[t].dropna().empty) or (t not in df_prices.columns)]\n",
161
+ "to_repair = sorted(set(to_repair))\n",
162
+ "if to_repair:\n",
163
+ " print(f\"Repairing via single-ticker fetch: {to_repair}\")\n",
164
+ " for t in to_repair:\n",
165
+ " s = _download_single(t)\n",
166
+ " if not s.dropna().empty:\n",
167
+ " df_prices = df_prices.reindex(df_prices.index.union(s.index)).sort_index()\n",
168
+ " df_prices[t] = s.reindex(df_prices.index)\n",
169
+ "\n",
170
+ "# --- Order columns; drop all-NaN tickers ---\n",
171
+ "ordered_cols = [UNDERLYING] + [t for t in tickers if t != UNDERLYING and t in df_prices.columns]\n",
172
+ "df_prices = df_prices.reindex(columns=ordered_cols)\n",
173
+ "all_nan_cols = [c for c in df_prices.columns if df_prices[c].dropna().empty]\n",
174
+ "if all_nan_cols:\n",
175
+ " print(f\"Dropping tickers with no usable data: {all_nan_cols}\")\n",
176
+ " df_prices = df_prices.drop(columns=all_nan_cols)\n",
177
+ "\n",
178
+ "# --- Report fallback usage ---\n",
179
+ "used_close_all = sorted(set([t for t in used_close_all if t in df_prices.columns]))\n",
180
+ "if used_close_all:\n",
181
+ " print(f\"Used 'Close' fallback for: {used_close_all}\")\n",
182
+ "\n",
183
+ "print(\"df_prices shape:\", df_prices.shape)\n",
184
+ "print(\"Columns:\", list(df_prices.columns))\n",
185
+ "\n",
186
+ "# --- Save ---\n",
187
+ "df_prices.to_csv(OUT_CSV, index_label=\"Date\", float_format=\"%.6f\")\n",
188
+ "print(f\"Saved to '{OUT_CSV}'.\")\n"
189
+ ]
190
+ },
191
+ {
192
+ "cell_type": "code",
193
+ "execution_count": 29,
194
+ "id": "11079562",
195
+ "metadata": {},
196
+ "outputs": [
197
+ {
198
+ "name": "stdout",
199
+ "output_type": "stream",
200
+ "text": [
201
+ "=== Missing % by ticker ===\n",
202
+ " missing_pct\n",
203
+ "AXO.V 97.016067\n",
204
+ "603124.SS 93.037490\n",
205
+ "NFM.AX 87.299158\n",
206
+ "WAORF 85.233359\n",
207
+ "ASCUF 81.637337\n",
208
+ "... ...\n",
209
+ "OUW0.F 2.371844\n",
210
+ "9CM0.F 2.371844\n",
211
+ "5PMA.F 2.371844\n",
212
+ "3N4.SG 2.371844\n",
213
+ "E2E1.F 2.371844\n",
214
+ "\n",
215
+ "[150 rows x 1 columns]\n",
216
+ "\n",
217
+ "Dropping 45 tickers (> 10% missing): ['AXO.V', '603124.SS', 'NFM.AX', 'WAORF', 'ASCUF', 'CSC.AX', 'MAC.AX', 'CPPMF', 'JGRRF', '301511.SZ', 'HNCUF', 'TRRCF', 'CPR.JO', 'CYPMF', '381.F', 'CPORF', 'IE', 'IE.TO', 'CPCPF', '688102.SS', 'INUMF', 'CSCCF', 'WA1.AX', '7LY0.F', 'CPER.V', 'NU0.F', 'HCH.V', 'GSCU.L', 'E9E.F', 'Q.V', 'ASCU.TO', 'H6F.F', 'AR1.AX', 'VCUFF', '97E0.F', 'MTAL', 'RRR.AX', 'CAMLF', 'CPO.AX', 'WCUFF', 'EMTRF', '7GI.F', 'QCCUF', '2IK.F', 'CPPKF']\n",
218
+ "\n",
219
+ "Shapes:\n",
220
+ "Before: (1307, 150) After: (1307, 105)\n",
221
+ "Saved to 'df_prices_final.csv'.\n"
222
+ ]
223
+ }
224
+ ],
225
+ "source": [
226
+ "# ------------------------------------------------------------\n",
227
+ "# Load df_prices.csv, compute missing % per ticker,\n",
228
+ "# drop columns with >50% missing, save df_prices_final.csv\n",
229
+ "# ------------------------------------------------------------\n",
230
+ "import pandas as pd\n",
231
+ "\n",
232
+ "IN_CSV = \"df_prices.csv\"\n",
233
+ "OUT_CSV = \"df_prices_final.csv\"\n",
234
+ "THRESH = 10.0 # percent\n",
235
+ "\n",
236
+ "df_prices = pd.read_csv(IN_CSV, parse_dates=[\"Date\"], index_col=\"Date\")\n",
237
+ "\n",
238
+ "# Missing % over the full DataFrame index\n",
239
+ "missing_pct = df_prices.isna().mean() * 100.0\n",
240
+ "report = (\n",
241
+ " pd.DataFrame({\"missing_pct\": missing_pct})\n",
242
+ " .sort_values(\"missing_pct\", ascending=False)\n",
243
+ ")\n",
244
+ "print(\"=== Missing % by ticker ===\")\n",
245
+ "print(report)\n",
246
+ "\n",
247
+ "# Drop tickers with >50% missing\n",
248
+ "to_drop = report.index[report[\"missing_pct\"] > THRESH].tolist()\n",
249
+ "print(f\"\\nDropping {len(to_drop)} tickers (> {THRESH:.0f}% missing): {to_drop}\")\n",
250
+ "\n",
251
+ "df_prices_final = df_prices.drop(columns=to_drop, errors=\"ignore\")\n",
252
+ "\n",
253
+ "print(\"\\nShapes:\")\n",
254
+ "print(\"Before:\", df_prices.shape, \"After:\", df_prices_final.shape)\n",
255
+ "\n",
256
+ "df_prices_final.to_csv(OUT_CSV, index_label=\"Date\", float_format=\"%.6f\")\n",
257
+ "print(f\"Saved to '{OUT_CSV}'.\")\n"
258
+ ]
259
+ },
260
+ {
261
+ "cell_type": "code",
262
+ "execution_count": 30,
263
+ "id": "8dc7673c",
264
+ "metadata": {},
265
+ "outputs": [
266
+ {
267
+ "name": "stdout",
268
+ "output_type": "stream",
269
+ "text": [
270
+ "Saved clean, rectangular prices to 'df_prices_final.csv' with shape (1208, 105).\n"
271
+ ]
272
+ }
273
+ ],
274
+ "source": [
275
+ "# ------------------------------------------------------------\n",
276
+ "# Clean df_prices_final: common window + bfill→ffill + final NA drop\n",
277
+ "# Input : df_prices_final.csv (your current file with some missing)\n",
278
+ "# Output: df_prices_final.csv (overwritten, rectangular, NA-free)\n",
279
+ "# ------------------------------------------------------------\n",
280
+ "import pandas as pd\n",
281
+ "\n",
282
+ "IN_CSV = \"df_prices_final.csv\"\n",
283
+ "OUT_CSV = \"df_prices_final.csv\" # overwrite in place\n",
284
+ "\n",
285
+ "df = pd.read_csv(IN_CSV, parse_dates=[\"Date\"], index_col=\"Date\").sort_index()\n",
286
+ "\n",
287
+ "# 1) Common window (everyone has started and not yet delisted)\n",
288
+ "first_valid = df.apply(pd.Series.first_valid_index)\n",
289
+ "last_valid = df.apply(pd.Series.last_valid_index)\n",
290
+ "\n",
291
+ "common_start = max(first_valid.dropna())\n",
292
+ "common_end = min(last_valid.dropna())\n",
293
+ "\n",
294
+ "df = df.loc[common_start:common_end].copy()\n",
295
+ "\n",
296
+ "# 2) Business-day index to harmonize calendars\n",
297
+ "bidx = pd.date_range(df.index.min(), df.index.max(), freq=\"B\")\n",
298
+ "df = df.reindex(bidx)\n",
299
+ "\n",
300
+ "# 3) Fill:\n",
301
+ "# - Backfill once to seed the first business day for tickers closed on common_start\n",
302
+ "# - Forward-fill for holiday gaps etc.\n",
303
+ "df = df.bfill(limit=None).ffill(limit=None)\n",
304
+ "\n",
305
+ "# 4) Final sanity check: drop any rare rows still containing NA\n",
306
+ "before_rows = df.shape[0]\n",
307
+ "df = df.dropna(how=\"any\")\n",
308
+ "after_rows = df.shape[0]\n",
309
+ "if before_rows != after_rows:\n",
310
+ " print(f\"Dropped {before_rows - after_rows} rows that still had NAs after filling.\")\n",
311
+ "\n",
312
+ "# 5) Save\n",
313
+ "df.to_csv(OUT_CSV, index_label=\"Date\", float_format=\"%.6f\")\n",
314
+ "print(f\"Saved clean, rectangular prices to '{OUT_CSV}' with shape {df.shape}.\")\n"
315
+ ]
316
+ },
317
+ {
318
+ "cell_type": "code",
319
+ "execution_count": 40,
320
+ "id": "8cdec19b",
321
+ "metadata": {},
322
+ "outputs": [
323
+ {
324
+ "name": "stdout",
325
+ "output_type": "stream",
326
+ "text": [
327
+ "✓ Saved corr_beta.csv, excess_summary.csv (with corr_to_copper)\n"
328
+ ]
329
+ }
330
+ ],
331
+ "source": [
332
+ "# ============================================================\n",
333
+ "# Copper-linked stocks vs COMEX copper (HG=F)\n",
334
+ "# • Correlation & beta\n",
335
+ "# • Rolling correlation\n",
336
+ "# • β-hedged alpha / excess-return summary\n",
337
+ "# Outputs:\n",
338
+ "# corr_beta.csv, rolling_corr_long.csv, excess_summary.csv\n",
339
+ "# (excess_summary.csv now includes corr_to_copper)\n",
340
+ "# ============================================================\n",
341
+ "\n",
342
+ "import pandas as pd\n",
343
+ "import numpy as np\n",
344
+ "import statsmodels.api as sm\n",
345
+ "\n",
346
+ "# ---------- PARAMETERS ----------\n",
347
+ "IN_CSV = \"df_prices_final.csv\" # prices, Date index\n",
348
+ "BENCH = \"HG=F\" # underlying metal\n",
349
+ "FREQ = \"W-FRI\" # 'D' for daily or 'W-FRI'\n",
350
+ "ROLL_WINDOW = None # 26 weeks ≈ 6 months; set None to skip\n",
351
+ "USE_LOG_RET = True # True = log-returns, False = pct-change\n",
352
+ "ALPHA_T_CUT = 0.9 # |t| threshold for alpha significance\n",
353
+ "SHARPE_CUT = 0.0 # >0 required for outperform label\n",
354
+ "\n",
355
+ "# Annualisation factor\n",
356
+ "PER_YEAR = 52 if FREQ.startswith(\"W\") else 252\n",
357
+ "\n",
358
+ "# ---------- LOAD PRICES ----------\n",
359
+ "px = (pd.read_csv(IN_CSV, parse_dates=[\"Date\"], index_col=\"Date\")\n",
360
+ " .sort_index())\n",
361
+ "assert BENCH in px.columns, f\"{BENCH} not found in columns.\"\n",
362
+ "\n",
363
+ "# Optional resample (weekly smooths roll/holiday noise)\n",
364
+ "if FREQ != \"D\":\n",
365
+ " px = px.resample(FREQ).last()\n",
366
+ "\n",
367
+ "# ---------- RETURNS ----------\n",
368
+ "rets = np.log(px).diff() if USE_LOG_RET else px.pct_change()\n",
369
+ "rets = rets.dropna(how=\"all\")\n",
370
+ "\n",
371
+ "bench = rets[BENCH]\n",
372
+ "stocks = [c for c in rets.columns if c != BENCH]\n",
373
+ "\n",
374
+ "# ---------- 1) CORRELATION & BETA ----------\n",
375
+ "corr = rets[stocks].corrwith(bench) # Series: index=ticker\n",
376
+ "var_b = bench.var(ddof=1)\n",
377
+ "beta = rets[stocks].apply(lambda s: s.cov(bench)) / var_b\n",
378
+ "\n",
379
+ "corr_beta = (pd.DataFrame({\"corr_to_copper\": corr,\n",
380
+ " \"beta_to_copper\": beta})\n",
381
+ " .sort_values(\"corr_to_copper\", ascending=False))\n",
382
+ "corr_beta.index.name = \"ticker\"\n",
383
+ "corr_beta.to_csv(\"corr_beta.csv\", float_format=\"%.6f\")\n",
384
+ "\n",
385
+ "# Keep maps for reuse in excess_summary\n",
386
+ "corr_map = corr.to_dict()\n",
387
+ "beta_map = beta.to_dict()\n",
388
+ "\n",
389
+ "# ---------- OPTIONAL ROLLING CORRELATION ----------\n",
390
+ "if ROLL_WINDOW:\n",
391
+ " rolling_long = (\n",
392
+ " rets[stocks]\n",
393
+ " .rolling(ROLL_WINDOW)\n",
394
+ " .corr(bench) # wide: Date × tickers\n",
395
+ " .stack() # -> Series with MultiIndex (Date, ticker)\n",
396
+ " .rename(\"rolling_corr\")\n",
397
+ " .rename_axis([\"Date\", \"ticker\"])\n",
398
+ " .reset_index()\n",
399
+ " .dropna(subset=[\"rolling_corr\"])\n",
400
+ " )\n",
401
+ " rolling_long.to_csv(\"rolling_corr_long.csv\",\n",
402
+ " index=False, float_format=\"%.6f\")\n",
403
+ "\n",
404
+ "# ---------- 2) β-HEDGED EXCESS & SELECTION ----------\n",
405
+ "def _nw_lags(freq: str) -> int:\n",
406
+ " return 4 if freq.startswith(\"W\") else 21 # ≈1 month of obs\n",
407
+ "\n",
408
+ "hac_lags = _nw_lags(FREQ)\n",
409
+ "\n",
410
+ "rows = []\n",
411
+ "for s in stocks:\n",
412
+ " # align & drop NA\n",
413
+ " df_xy = rets[[s, BENCH]].dropna()\n",
414
+ " if df_xy.empty:\n",
415
+ " continue\n",
416
+ "\n",
417
+ " y = df_xy[s]\n",
418
+ " X = sm.add_constant(df_xy[BENCH])\n",
419
+ " model = sm.OLS(y, X).fit(\n",
420
+ " cov_type=\"HAC\",\n",
421
+ " cov_kwds={\"maxlags\": hac_lags}\n",
422
+ " )\n",
423
+ "\n",
424
+ " alpha = model.params[\"const\"]\n",
425
+ " beta_i = model.params[BENCH]\n",
426
+ " t_alpha = model.tvalues[\"const\"]\n",
427
+ " alpha_ann = alpha * PER_YEAR\n",
428
+ "\n",
429
+ " # β-hedged excess series\n",
430
+ " r_excess = y - beta_i * df_xy[BENCH]\n",
431
+ " mu, sd = r_excess.mean(), r_excess.std(ddof=1)\n",
432
+ " hedged_sharpe = (mu / sd) * np.sqrt(PER_YEAR) if sd > 0 else np.nan\n",
433
+ " cum_excess_beta_log = r_excess.cumsum().iloc[-1]\n",
434
+ "\n",
435
+ " # Simple 1× copper cumulative log excess\n",
436
+ " cum_excess_1x_log = y.cumsum().iloc[-1] - df_xy[BENCH].cumsum().iloc[-1]\n",
437
+ "\n",
438
+ " # Label\n",
439
+ " if (alpha_ann > 0) and (t_alpha > ALPHA_T_CUT) and (hedged_sharpe > SHARPE_CUT):\n",
440
+ " label = \"Outperform\"\n",
441
+ " elif (alpha_ann < 0) and (t_alpha < -ALPHA_T_CUT) and (hedged_sharpe < -SHARPE_CUT):\n",
442
+ " label = \"Underperform\"\n",
443
+ " else:\n",
444
+ " label = \"Neutral\"\n",
445
+ "\n",
446
+ " rows.append({\n",
447
+ " \"ticker\": s,\n",
448
+ " \"corr_to_copper\": corr_map.get(s, np.nan), # <-- added\n",
449
+ " \"beta_to_copper\": beta_map.get(s, np.nan), # keep for consistency\n",
450
+ " \"alpha_ann\": alpha_ann,\n",
451
+ " \"t_alpha\": t_alpha,\n",
452
+ " \"hedged_sharpe\": hedged_sharpe,\n",
453
+ " \"cum_excess_beta_log\": cum_excess_beta_log,\n",
454
+ " \"cum_excess_1x_log\": cum_excess_1x_log,\n",
455
+ " \"label\": label\n",
456
+ " })\n",
457
+ "\n",
458
+ "excess_summary = (pd.DataFrame(rows)\n",
459
+ " .set_index(\"ticker\")\n",
460
+ " .sort_values([\"label\", \"alpha_ann\", \"hedged_sharpe\"],\n",
461
+ " ascending=[True, False, False]))\n",
462
+ "excess_summary.to_csv(\"excess_summary.csv\", float_format=\"%.6f\")\n",
463
+ "\n",
464
+ "print(\"✓ Saved corr_beta.csv,\",\n",
465
+ " \"rolling_corr_long.csv,\" if ROLL_WINDOW else \"\",\n",
466
+ " \"excess_summary.csv (with corr_to_copper)\")\n"
467
+ ]
468
+ },
469
+ {
470
+ "cell_type": "code",
471
+ "execution_count": 42,
472
+ "id": "a651e4d4",
473
+ "metadata": {},
474
+ "outputs": [
475
+ {
476
+ "name": "stdout",
477
+ "output_type": "stream",
478
+ "text": [
479
+ "✓ Saved corr_beta.csv, excess_summary.csv (with corr_to_copper, vol_ann, bench_vol_ann)\n"
480
+ ]
481
+ }
482
+ ],
483
+ "source": [
484
+ "# ============================================================\n",
485
+ "# Copper-linked stocks vs COMEX copper (HG=F)\n",
486
+ "# • Correlation & beta\n",
487
+ "# • Rolling correlation\n",
488
+ "# • β-hedged alpha / excess-return summary\n",
489
+ "# • Annualized volatility (each stock + HG=F)\n",
490
+ "# Outputs:\n",
491
+ "# corr_beta.csv, rolling_corr_long.csv, excess_summary.csv\n",
492
+ "# ============================================================\n",
493
+ "\n",
494
+ "import pandas as pd\n",
495
+ "import numpy as np\n",
496
+ "import statsmodels.api as sm\n",
497
+ "\n",
498
+ "# ---------- PARAMETERS ----------\n",
499
+ "IN_CSV = \"df_prices_final.csv\" # prices, Date index\n",
500
+ "BENCH = \"HG=F\" # underlying metal\n",
501
+ "FREQ = \"W-FRI\" # 'D' for daily or 'W-FRI' (recommended)\n",
502
+ "ROLL_WINDOW = None # 26 weeks ≈ 6 months; set None to skip\n",
503
+ "USE_LOG_RET = True # True = log-returns, False = pct-change\n",
504
+ "ALPHA_T_CUT = 0.8 # |t| threshold for alpha significance\n",
505
+ "SHARPE_CUT = 0.0 # >0 required for outperform label\n",
506
+ "\n",
507
+ "# Annualisation factor\n",
508
+ "PER_YEAR = 52 if FREQ.startswith(\"W\") else 252\n",
509
+ "\n",
510
+ "# ---------- LOAD PRICES ----------\n",
511
+ "px = (pd.read_csv(IN_CSV, parse_dates=[\"Date\"], index_col=\"Date\")\n",
512
+ " .sort_index())\n",
513
+ "assert BENCH in px.columns, f\"{BENCH} not found in columns.\"\n",
514
+ "\n",
515
+ "# Optional resample (weekly smooths roll/holiday noise)\n",
516
+ "if FREQ != \"D\":\n",
517
+ " px = px.resample(FREQ).last()\n",
518
+ "\n",
519
+ "# ---------- RETURNS ----------\n",
520
+ "rets = np.log(px).diff() if USE_LOG_RET else px.pct_change()\n",
521
+ "rets = rets.dropna(how=\"all\")\n",
522
+ "\n",
523
+ "bench = rets[BENCH]\n",
524
+ "stocks = [c for c in rets.columns if c != BENCH]\n",
525
+ "\n",
526
+ "# ---------- 1) CORRELATION & BETA ----------\n",
527
+ "corr = rets[stocks].corrwith(bench)\n",
528
+ "var_b = bench.var(ddof=1)\n",
529
+ "beta = rets[stocks].apply(lambda s: s.cov(bench)) / var_b\n",
530
+ "\n",
531
+ "corr_beta = (pd.DataFrame({\"corr_to_copper\": corr,\n",
532
+ " \"beta_to_copper\": beta})\n",
533
+ " .sort_values(\"corr_to_copper\", ascending=False))\n",
534
+ "corr_beta.index.name = \"ticker\"\n",
535
+ "corr_beta.to_csv(\"corr_beta.csv\", float_format=\"%.6f\")\n",
536
+ "\n",
537
+ "# Maps to reuse\n",
538
+ "corr_map = corr.to_dict()\n",
539
+ "beta_map = beta.to_dict()\n",
540
+ "\n",
541
+ "# ---------- OPTIONAL ROLLING CORRELATION ----------\n",
542
+ "if ROLL_WINDOW:\n",
543
+ " rolling_long = (\n",
544
+ " rets[stocks]\n",
545
+ " .rolling(ROLL_WINDOW)\n",
546
+ " .corr(bench) # wide: Date × tickers\n",
547
+ " .stack() # -> Series with MultiIndex (Date, ticker)\n",
548
+ " .rename(\"rolling_corr\")\n",
549
+ " .rename_axis([\"Date\", \"ticker\"])\n",
550
+ " .reset_index()\n",
551
+ " .dropna(subset=[\"rolling_corr\"])\n",
552
+ " )\n",
553
+ " rolling_long.to_csv(\"rolling_corr_long.csv\",\n",
554
+ " index=False, float_format=\"%.6f\")\n",
555
+ "\n",
556
+ "# ---------- 2) β-HEDGED EXCESS, VOLATILITY & SELECTION ----------\n",
557
+ "def _nw_lags(freq: str) -> int:\n",
558
+ " return 4 if freq.startswith(\"W\") else 21 # ≈1 month of obs\n",
559
+ "\n",
560
+ "hac_lags = _nw_lags(FREQ)\n",
561
+ "\n",
562
+ "# Benchmark annualized volatility (on full return series at chosen frequency)\n",
563
+ "bench_vol_ann = bench.std(ddof=1) * np.sqrt(PER_YEAR)\n",
564
+ "\n",
565
+ "rows = []\n",
566
+ "for s in stocks:\n",
567
+ " # Align series to common non-NA dates for regression & stats\n",
568
+ " df_xy = rets[[s, BENCH]].dropna()\n",
569
+ " if df_xy.empty:\n",
570
+ " continue\n",
571
+ "\n",
572
+ " y = df_xy[s]\n",
573
+ " X = sm.add_constant(df_xy[BENCH])\n",
574
+ " model = sm.OLS(y, X).fit(\n",
575
+ " cov_type=\"HAC\",\n",
576
+ " cov_kwds={\"maxlags\": hac_lags}\n",
577
+ " )\n",
578
+ "\n",
579
+ " alpha = model.params[\"const\"]\n",
580
+ " beta_i = model.params[BENCH]\n",
581
+ " t_alpha = model.tvalues[\"const\"]\n",
582
+ " alpha_ann = alpha * PER_YEAR\n",
583
+ "\n",
584
+ " # β-hedged excess series\n",
585
+ " r_excess = y - beta_i * df_xy[BENCH]\n",
586
+ " mu, sd = r_excess.mean(), r_excess.std(ddof=1)\n",
587
+ " hedged_sharpe = (mu / sd) * np.sqrt(PER_YEAR) if sd > 0 else np.nan\n",
588
+ " cum_excess_beta_log = r_excess.cumsum().iloc[-1]\n",
589
+ "\n",
590
+ " # Simple 1× copper cumulative log excess\n",
591
+ " cum_excess_1x_log = y.cumsum().iloc[-1] - df_xy[BENCH].cumsum().iloc[-1]\n",
592
+ "\n",
593
+ " # Annualized volatility for this stock (aligned sample)\n",
594
+ " vol_ann = y.std(ddof=1) * np.sqrt(PER_YEAR)\n",
595
+ "\n",
596
+ " # Label\n",
597
+ " if (alpha_ann > 0) and (t_alpha > ALPHA_T_CUT) and (hedged_sharpe > SHARPE_CUT):\n",
598
+ " label = \"Outperform\"\n",
599
+ " elif (alpha_ann < 0) and (t_alpha < -ALPHA_T_CUT) and (hedged_sharpe < -SHARPE_CUT):\n",
600
+ " label = \"Underperform\"\n",
601
+ " else:\n",
602
+ " label = \"Neutral\"\n",
603
+ "\n",
604
+ " rows.append({\n",
605
+ " \"ticker\": s,\n",
606
+ " \"corr_to_copper\": corr_map.get(s, np.nan),\n",
607
+ " \"beta_to_copper\": beta_map.get(s, np.nan),\n",
608
+ " \"alpha_ann\": alpha_ann,\n",
609
+ " \"t_alpha\": t_alpha,\n",
610
+ " \"hedged_sharpe\": hedged_sharpe,\n",
611
+ " \"cum_excess_beta_log\": cum_excess_beta_log,\n",
612
+ " \"cum_excess_1x_log\": cum_excess_1x_log,\n",
613
+ " \"vol_ann\": vol_ann, # stock annualized volatility\n",
614
+ " \"bench_vol_ann\": bench_vol_ann, # HG=F annualized volatility (same for all rows)\n",
615
+ " \"label\": label\n",
616
+ " })\n",
617
+ "\n",
618
+ "excess_summary = (pd.DataFrame(rows)\n",
619
+ " .set_index(\"ticker\")\n",
620
+ " .sort_values([\"label\", \"alpha_ann\", \"hedged_sharpe\"],\n",
621
+ " ascending=[True, False, False]))\n",
622
+ "excess_summary.to_csv(\"excess_summary.csv\", float_format=\"%.6f\")\n",
623
+ "\n",
624
+ "print(\"✓ Saved corr_beta.csv,\",\n",
625
+ " \"rolling_corr_long.csv,\" if ROLL_WINDOW else \"\",\n",
626
+ " \"excess_summary.csv (with corr_to_copper, vol_ann, bench_vol_ann)\")\n"
627
+ ]
628
+ },
629
+ {
630
+ "cell_type": "code",
631
+ "execution_count": 43,
632
+ "id": "13fb4bda",
633
+ "metadata": {},
634
+ "outputs": [
635
+ {
636
+ "name": "stdout",
637
+ "output_type": "stream",
638
+ "text": [
639
+ "Found 20 outperformers; price matrix shape: (1208, 20)\n"
640
+ ]
641
+ }
642
+ ],
643
+ "source": [
644
+ "import pandas as pd\n",
645
+ "\n",
646
+ "# Load prices and labels\n",
647
+ "prices = pd.read_csv(\"df_prices_final.csv\", parse_dates=[\"Date\"], index_col=\"Date\")\n",
648
+ "labels = pd.read_csv(\"excess_summary.csv\", index_col=0)\n",
649
+ "\n",
650
+ "# Select tickers labeled as OUTPERFORM (case-insensitive)\n",
651
+ "op_tickers = (\n",
652
+ " labels.assign(_lab=labels[\"label\"].astype(str).str.upper())\n",
653
+ " .query(\"_lab == 'OUTPERFORM'\")\n",
654
+ " .index.tolist()\n",
655
+ ")\n",
656
+ "\n",
657
+ "# Keep only those tickers that exist in the price matrix\n",
658
+ "op_tickers = [t for t in op_tickers if t in prices.columns]\n",
659
+ "\n",
660
+ "# Slice prices → new DataFrame\n",
661
+ "outperforming_stocks = prices.loc[:, op_tickers].copy()\n",
662
+ "\n",
663
+ "print(f\"Found {len(op_tickers)} outperformers; price matrix shape: {outperforming_stocks.shape}\")\n",
664
+ "\n",
665
+ "# Optional: include the benchmark column too\n",
666
+ "# outperforming_with_bench = prices.loc[:, ['HG=F'] + op_tickers].copy()\n",
667
+ "\n",
668
+ "# Save to CSV (optional)\n",
669
+ "outperforming_stocks.to_csv(\"outperforming_stocks.csv\", index_label=\"Date\", float_format=\"%.6f\")\n"
670
+ ]
671
+ }
672
+ ],
673
+ "metadata": {
674
+ "kernelspec": {
675
+ "display_name": ".venv",
676
+ "language": "python",
677
+ "name": "python3"
678
+ },
679
+ "language_info": {
680
+ "codemirror_mode": {
681
+ "name": "ipython",
682
+ "version": 3
683
+ },
684
+ "file_extension": ".py",
685
+ "mimetype": "text/x-python",
686
+ "name": "python",
687
+ "nbconvert_exporter": "python",
688
+ "pygments_lexer": "ipython3",
689
+ "version": "3.12.11"
690
+ }
691
+ },
692
+ "nbformat": 4,
693
+ "nbformat_minor": 5
694
+ }
Stock_Benchmark Analysis/corr_beta.csv ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ticker,corr_to_copper,beta_to_copper
2
+ SCCO,0.751106,1.169829
3
+ FCX,0.744135,1.426671
4
+ KGH.WA,0.732039,1.197883
5
+ CS.TO,0.731330,1.730688
6
+ FPMB.F,0.715914,1.326046
7
+ HBM,0.699505,1.612239
8
+ HBM.TO,0.694932,1.508307
9
+ FCXO34.SA,0.690445,1.266068
10
+ TGB,0.682023,1.598263
11
+ LUNMF,0.668351,1.278764
12
+ ERO,0.667417,1.451800
13
+ ARREF,0.663149,1.258081
14
+ FQVLF,0.631128,1.608453
15
+ OCKA.F,0.607377,1.362314
16
+ ANFGF,0.604987,1.130759
17
+ OUW0.F,0.587710,1.451457
18
+ FG1.F,0.586101,0.937223
19
+ 600362.SS,0.578169,0.859542
20
+ RE8.F,0.558737,0.930441
21
+ 000878.SZ,0.551521,0.736491
22
+ 000630.SZ,0.514417,0.750244
23
+ SFR.AX,0.512234,0.893583
24
+ CAML.L,0.508801,0.666887
25
+ TKO.L,0.486790,0.908630
26
+ ATYM.L,0.470796,0.713631
27
+ 2009.TW,0.459276,0.934166
28
+ IPMLF,0.394290,0.790985
29
+ 000737.SZ,0.382355,0.744536
30
+ HINDCOPPER.NS,0.380606,0.851744
31
+ HGO.AX,0.365913,0.891063
32
+ MARI.TO,0.364093,0.608576
33
+ JIX.F,0.359048,0.845307
34
+ 601609.SS,0.357557,0.417161
35
+ FDY.TO,0.332434,0.867706
36
+ 9CM0.F,0.321625,0.854389
37
+ 600490.SS,0.319510,0.648428
38
+ SFRRF,0.309463,0.617418
39
+ 3N4.SG,0.308499,0.985685
40
+ 300697.SZ,0.301865,0.581731
41
+ 002203.SZ,0.290713,0.322780
42
+ USCUF,0.290002,1.475519
43
+ CVV.AX,0.288097,0.897164
44
+ XXIX.V,0.287216,0.782133
45
+ CPFXF,0.284255,1.126958
46
+ PUCOBRE.SN,0.275522,0.266430
47
+ 300618.SZ,0.263318,0.526491
48
+ CUU.V,0.258176,0.975104
49
+ GCUMF,0.241195,0.706705
50
+ BCU.V,0.238980,0.954954
51
+ CYM.AX,0.237302,0.894108
52
+ E2E1.F,0.235437,0.396147
53
+ 005810.KS,0.233232,0.308425
54
+ MARIF,0.233169,0.419449
55
+ NRX.AX,0.201323,0.821103
56
+ 4989.TW,0.198282,0.334545
57
+ GRX.AX,0.192572,0.500298
58
+ HDRSF,0.182560,0.582387
59
+ GRX.L,0.181102,0.358387
60
+ 601137.SS,0.178591,0.352066
61
+ BCUFF,0.169038,0.735712
62
+ HI.V,0.168562,0.481719
63
+ 600255.SS,0.165257,0.313489
64
+ PNTZF,0.157047,0.715470
65
+ CUBEXTUB.NS,0.141331,0.384223
66
+ MCL.NS,0.137765,0.342360
67
+ ALM.AX,0.131943,0.506486
68
+ BHAGYANGR.NS,0.131430,0.266450
69
+ 688388.SS,0.129130,0.283810
70
+ 5PMA.F,0.111442,0.332867
71
+ PMAM3.SA,0.101593,0.236565
72
+ HHLKF,0.094041,0.329713
73
+ HIN.MU,0.090223,0.097573
74
+ BFGFF,0.083922,0.685144
75
+ C730.F,0.081191,0.517667
76
+ RDS.AX,0.079513,0.302310
77
+ LA.V,0.079408,0.135596
78
+ NTM.AX,0.070476,0.282849
79
+ MTJ3.F,0.069175,0.098631
80
+ KCC.V,0.059441,0.239155
81
+ ATCUF,0.055618,0.189407
82
+ SARKY.IS,0.054537,0.108979
83
+ LSANF,0.051172,0.096407
84
+ SAGARDEEP.NS,0.050789,0.118154
85
+ BRVRF,0.049157,0.282762
86
+ TWOSF,0.040633,0.220326
87
+ TFM.V,0.033252,0.216380
88
+ 08W.F,0.032241,0.289057
89
+ CFV0.F,0.030216,0.478409
90
+ MMLTF,0.023101,0.055416
91
+ TVCCF,0.016415,0.386299
92
+ TNC.AX,0.015794,0.153874
93
+ ACMDY,0.012986,0.031652
94
+ TWO.V,0.012275,0.047806
95
+ HLGVF,0.010366,0.060247
96
+ KGHPF,0.010355,0.018364
97
+ SLMFF,0.009290,0.142938
98
+ BZDLF,0.002504,0.016546
99
+ ARJN.V,-0.010778,-0.056995
100
+ BP60.F,-0.017995,-0.174410
101
+ RAJMET.NS,-0.020866,-0.054317
102
+ PSGR,-0.044813,-0.319164
103
+ CAEN,-0.050007,-0.123762
104
+ COPR,-0.071854,-1.133700
105
+ ARJNF,-0.203482,-0.983979
Stock_Benchmark Analysis/df_prices.csv ADDED
The diff for this file is too large to render. See raw diff
 
Stock_Benchmark Analysis/df_prices_final.csv ADDED
The diff for this file is too large to render. See raw diff
 
Stock_Benchmark Analysis/excess_summary.csv ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ticker,corr_to_copper,beta_to_copper,alpha_ann,t_alpha,hedged_sharpe,cum_excess_beta_log,cum_excess_1x_log,vol_ann,bench_vol_ann,label
2
+ COPR,-0.071854,-1.133700,0.517289,0.449278,0.131217,2.397436,1.401024,3.952470,0.250506,Neutral
3
+ RAJMET.NS,-0.020866,-0.054317,0.275785,0.785795,0.423001,1.278157,0.785804,0.652115,0.250506,Neutral
4
+ 5PMA.F,0.111442,0.332867,0.207392,0.799005,0.278909,0.961181,0.649638,0.748242,0.250506,Neutral
5
+ PSGR,-0.044813,-0.319164,0.140210,0.253978,0.078666,0.649821,0.033788,1.784135,0.250506,Neutral
6
+ TKO.L,0.486790,0.908630,0.136683,0.797193,0.334639,0.633471,0.590803,0.467589,0.250506,Neutral
7
+ SFRRF,0.309463,0.617418,0.128597,0.633124,0.270583,0.595996,0.417335,0.499792,0.250506,Neutral
8
+ TGB,0.682023,1.598263,0.126734,0.775979,0.295197,0.587363,0.866744,0.587040,0.250506,Neutral
9
+ 000737.SZ,0.382355,0.744536,0.121576,0.597471,0.269730,0.563456,0.444157,0.487795,0.250506,Neutral
10
+ 600255.SS,0.165257,0.313489,0.117316,0.578922,0.250317,0.543717,0.223125,0.475205,0.250506,Neutral
11
+ HI.V,0.168562,0.481719,0.112686,0.406562,0.159690,0.522257,0.280227,0.715901,0.250506,Neutral
12
+ ATCUF,0.055618,0.189407,0.101662,0.339563,0.119353,0.471165,0.092628,0.853097,0.250506,Neutral
13
+ MARIF,0.233169,0.419449,0.096767,0.626419,0.220821,0.448479,0.177369,0.450637,0.250506,Neutral
14
+ MMLTF,0.023101,0.055416,0.091871,0.348528,0.152923,0.425788,-0.015321,0.600930,0.250506,Neutral
15
+ E2E1.F,0.235437,0.396147,0.087337,0.581520,0.213197,0.404773,0.122781,0.421502,0.250506,Neutral
16
+ MARI.TO,0.364093,0.608576,0.086713,0.596685,0.222354,0.401882,0.219092,0.418718,0.250506,Neutral
17
+ HIN.MU,0.090223,0.097573,0.078546,0.637151,0.291117,0.364032,-0.057391,0.270914,0.250506,Neutral
18
+ HDRSF,0.182560,0.582387,0.073606,0.265038,0.093681,0.341137,0.146117,0.799142,0.250506,Neutral
19
+ SCCO,0.751106,1.169829,0.070963,0.662415,0.275507,0.328888,0.408197,0.390157,0.250506,Neutral
20
+ ACMDY,0.012986,0.031652,0.070372,0.263062,0.115258,0.326145,-0.126061,0.610606,0.250506,Neutral
21
+ 601137.SS,0.178591,0.352066,0.067975,0.301107,0.139895,0.315037,0.012460,0.493837,0.250506,Neutral
22
+ CUU.V,0.258176,0.975104,0.060114,0.190617,0.065766,0.278607,0.266981,0.946136,0.250506,Neutral
23
+ FPMB.F,0.715914,1.326046,0.055928,0.464070,0.172639,0.259204,0.411463,0.463998,0.250506,Neutral
24
+ C730.F,0.081191,0.517667,0.054750,0.093825,0.034392,0.253747,0.028504,1.597207,0.250506,Neutral
25
+ FCXO34.SA,0.690445,1.266068,0.054267,0.422590,0.163313,0.251508,0.375759,0.459353,0.250506,Neutral
26
+ FG1.F,0.586101,0.937223,0.049986,0.404243,0.154009,0.231665,0.202350,0.400580,0.250506,Neutral
27
+ KGHPF,0.010355,0.018364,0.049922,0.245796,0.112378,0.231370,-0.227042,0.444257,0.250506,Neutral
28
+ FCX,0.744135,1.426671,0.039917,0.334655,0.124413,0.184998,0.384248,0.480276,0.250506,Neutral
29
+ CPFXF,0.284255,1.126958,0.036005,0.110617,0.037813,0.166872,0.226159,0.993156,0.250506,Neutral
30
+ 002203.SZ,0.290713,0.322780,0.032944,0.268439,0.123793,0.152684,-0.163569,0.278137,0.250506,Neutral
31
+ ANFGF,0.604987,1.130759,0.029458,0.223128,0.079016,0.136525,0.197588,0.468211,0.250506,Neutral
32
+ OCKA.F,0.607377,1.362314,0.012027,0.074320,0.026946,0.055742,0.224938,0.561872,0.250506,Neutral
33
+ 000630.SZ,0.514417,0.750244,0.010539,0.083426,0.033639,0.048844,-0.067789,0.365347,0.250506,Neutral
34
+ HBM.TO,0.694932,1.508307,0.009032,0.059625,0.023101,0.041859,0.279232,0.543708,0.250506,Neutral
35
+ LUNMF,0.668351,1.278764,0.005465,0.035919,0.015328,0.025327,0.155506,0.479296,0.250506,Neutral
36
+ 600362.SS,0.578169,0.859542,0.004120,0.036685,0.013559,0.019095,-0.046497,0.372418,0.250506,Neutral
37
+ CVV.AX,0.288097,0.897164,0.003916,0.011738,0.005243,0.018151,-0.029872,0.780101,0.250506,Neutral
38
+ 300697.SZ,0.301865,0.581731,-0.002608,-0.013438,-0.005666,-0.012085,-0.207411,0.482757,0.250506,Neutral
39
+ IPMLF,0.394290,0.790985,-0.006833,-0.032261,-0.014795,-0.031668,-0.129275,0.502541,0.250506,Neutral
40
+ HBM,0.699505,1.612239,-0.009536,-0.061583,-0.023113,-0.044198,0.241711,0.577374,0.250506,Neutral
41
+ LSANF,0.051172,0.096407,-0.012109,-0.056337,-0.025691,-0.056120,-0.478087,0.471953,0.250506,Neutral
42
+ SAGARDEEP.NS,0.050789,0.118154,-0.018762,-0.065170,-0.032236,-0.086954,-0.498766,0.582763,0.250506,Neutral
43
+ LA.V,0.079408,0.135596,-0.032739,-0.175741,-0.076778,-0.151733,-0.555399,0.427760,0.250506,Neutral
44
+ 600490.SS,0.319510,0.648428,-0.040850,-0.178563,-0.084797,-0.189325,-0.353504,0.508388,0.250506,Neutral
45
+ MCL.NS,0.137765,0.342360,-0.044606,-0.168211,-0.072342,-0.206732,-0.513841,0.622532,0.250506,Neutral
46
+ 4989.TW,0.198282,0.334545,-0.060966,-0.335245,-0.147167,-0.282556,-0.593315,0.422658,0.250506,Neutral
47
+ USCUF,0.290002,1.475519,-0.075437,-0.176058,-0.061844,-0.349621,-0.127559,1.274568,0.250506,Neutral
48
+ CAEN,-0.050007,-0.123762,-0.076369,-0.282749,-0.123334,-0.353939,-0.878722,0.619978,0.250506,Neutral
49
+ FQVLF,0.631128,1.608453,-0.082618,-0.339056,-0.166833,-0.382901,-0.098760,0.638424,0.250506,Neutral
50
+ PNTZF,0.157047,0.715470,-0.108809,-0.222571,-0.096540,-0.504289,-0.637161,1.141246,0.250506,Neutral
51
+ HGO.AX,0.365913,0.891063,-0.116983,-0.476673,-0.206058,-0.542173,-0.593046,0.610027,0.250506,Neutral
52
+ ERO,0.667417,1.451800,-0.130054,-0.716827,-0.320497,-0.602749,-0.391764,0.544914,0.250506,Neutral
53
+ TNC.AX,0.015794,0.153874,-0.176595,-0.160029,-0.072367,-0.818449,-1.213580,2.440585,0.250506,Neutral
54
+ XXIX.V,0.287216,0.782133,-0.185069,-0.690849,-0.283230,-0.857723,-0.959464,0.682166,0.250506,Neutral
55
+ TFM.V,0.033252,0.216380,-0.219508,-0.398677,-0.134731,-1.017337,-1.383278,1.630131,0.250506,Neutral
56
+ BRVRF,0.049157,0.282762,-0.265536,-0.556172,-0.184499,-1.230659,-1.565600,1.440969,0.250506,Neutral
57
+ TWOSF,0.040633,0.220326,-0.267789,-0.648638,-0.197306,-1.241101,-1.605199,1.358348,0.250506,Neutral
58
+ TWO.V,0.012275,0.047806,-0.272738,-0.774504,-0.279577,-1.264038,-1.708701,0.975615,0.250506,Neutral
59
+ CFV0.F,0.030216,0.478409,-0.277935,-0.220005,-0.070107,-1.288122,-1.531698,3.966237,0.250506,Neutral
60
+ 08W.F,0.032241,0.289057,-0.300310,-0.377124,-0.133782,-1.391822,-1.723824,2.245939,0.250506,Neutral
61
+ ARJNF,-0.203482,-0.983979,-0.300538,-0.729700,-0.253399,-1.392878,-2.319372,1.211372,0.250506,Neutral
62
+ BZDLF,0.002504,0.016546,-0.388271,-0.585466,-0.234512,-1.799486,-2.258747,1.655661,0.250506,Neutral
63
+ BP60.F,-0.017995,-0.174410,-0.431102,-0.551721,-0.177583,-1.997994,-2.546429,2.428006,0.250506,Neutral
64
+ HLGVF,0.010366,0.060247,-0.440024,-0.640713,-0.302238,-2.039340,-2.478193,1.455962,0.250506,Neutral
65
+ BFGFF,0.083922,0.685144,-0.638709,-0.724760,-0.313411,-2.960170,-3.107204,2.045144,0.250506,Neutral
66
+ SLMFF,0.009290,0.142938,-0.722036,-0.510444,-0.187347,-3.346360,-3.746597,3.854169,0.250506,Neutral
67
+ 3N4.SG,0.308499,0.985685,0.771863,2.347572,1.013805,3.577288,3.570603,0.800392,0.250506,Outperform
68
+ CUBEXTUB.NS,0.141331,0.384223,0.449954,1.727216,0.667396,2.085361,1.797801,0.681029,0.250506,Outperform
69
+ JIX.F,0.359048,0.845307,0.445271,1.866527,0.808936,2.063658,1.991418,0.589766,0.250506,Outperform
70
+ HINDCOPPER.NS,0.380606,0.851744,0.364786,1.531268,0.703668,1.690643,1.621409,0.560599,0.250506,Outperform
71
+ SARKY.IS,0.054537,0.108979,0.359428,1.683551,0.719093,1.665813,1.249717,0.500581,0.250506,Outperform
72
+ RE8.F,0.558737,0.930441,0.294804,1.992390,0.852114,1.366304,1.333821,0.417158,0.250506,Outperform
73
+ BHAGYANGR.NS,0.131430,0.266450,0.293131,1.419577,0.582246,1.358548,1.015989,0.507854,0.250506,Outperform
74
+ FDY.TO,0.332434,0.867706,0.259834,1.047747,0.421347,1.204230,1.142450,0.653861,0.250506,Outperform
75
+ 9CM0.F,0.321625,0.854389,0.246569,0.958671,0.391314,1.142754,1.074755,0.665463,0.250506,Outperform
76
+ GRX.AX,0.192572,0.500298,0.227764,0.884677,0.356647,1.055600,0.822245,0.650809,0.250506,Outperform
77
+ ARREF,0.663149,1.258081,0.225933,1.501482,0.635153,1.047110,1.167631,0.475243,0.250506,Outperform
78
+ MTJ3.F,0.069175,0.098631,0.211207,1.454970,0.592745,0.978865,0.557937,0.357176,0.250506,Outperform
79
+ GRX.L,0.181102,0.358387,0.198418,0.913302,0.406982,0.919593,0.619967,0.495733,0.250506,Outperform
80
+ 005810.KS,0.233232,0.308425,0.191605,1.338640,0.594803,0.888016,0.565059,0.331268,0.250506,Outperform
81
+ 2009.TW,0.459276,0.934166,0.189113,0.806763,0.417827,0.876468,0.845724,0.509529,0.250506,Outperform
82
+ OUW0.F,0.587710,1.451457,0.163790,0.869680,0.327220,0.759102,0.969927,0.618671,0.250506,Outperform
83
+ CS.TO,0.731330,1.730688,0.162900,0.981190,0.402899,0.754977,1.096199,0.592822,0.250506,Outperform
84
+ SFR.AX,0.512234,0.893583,0.142969,0.921112,0.380927,0.662608,0.612912,0.437004,0.250506,Outperform
85
+ PUCOBRE.SN,0.275522,0.266430,0.126627,1.279036,0.543780,0.586865,0.244297,0.242240,0.250506,Outperform
86
+ ATYM.L,0.470796,0.713631,0.125949,0.849829,0.375963,0.583723,0.449992,0.379717,0.250506,Outperform
87
+ 000878.SZ,0.551521,0.736491,-0.096890,-0.817530,-0.347221,-0.449048,-0.572104,0.334521,0.250506,Underperform
88
+ CAML.L,0.508801,0.666887,-0.102868,-0.826752,-0.363926,-0.476754,-0.632314,0.328339,0.250506,Underperform
89
+ KGH.WA,0.732039,1.197883,-0.115414,-0.942098,-0.413282,-0.534901,-0.442492,0.409920,0.250506,Underperform
90
+ 601609.SS,0.357557,0.417161,-0.142637,-1.261420,-0.522587,-0.661067,-0.933246,0.292265,0.250506,Underperform
91
+ 688388.SS,0.129130,0.283810,-0.196729,-0.831598,-0.360332,-0.911763,-1.246215,0.550576,0.250506,Underperform
92
+ 300618.SZ,0.263318,0.526491,-0.207924,-0.937295,-0.430308,-0.963648,-1.184771,0.500874,0.250506,Underperform
93
+ GCUMF,0.241195,0.706705,-0.310318,-1.136020,-0.435646,-1.438204,-1.575169,0.733986,0.250506,Underperform
94
+ NRX.AX,0.201323,0.821103,-0.340847,-0.971002,-0.340582,-1.579696,-1.663239,1.021698,0.250506,Underperform
95
+ RDS.AX,0.079513,0.302310,-0.362839,-1.117052,-0.382171,-1.681620,-2.007433,0.952431,0.250506,Underperform
96
+ BCU.V,0.238980,0.954954,-0.395339,-1.025668,-0.406724,-1.832246,-1.853282,1.001014,0.250506,Underperform
97
+ PMAM3.SA,0.101593,0.236565,-0.398886,-1.309932,-0.687379,-1.848682,-2.205197,0.583318,0.250506,Underperform
98
+ HHLKF,0.094041,0.329713,-0.403220,-1.393698,-0.461143,-1.868770,-2.181786,0.878285,0.250506,Underperform
99
+ KCC.V,0.059441,0.239155,-0.410701,-1.280651,-0.408206,-1.903442,-2.258747,1.007894,0.250506,Underperform
100
+ BCUFF,0.169038,0.735712,-0.413650,-1.033868,-0.384933,-1.917108,-2.040527,1.090292,0.250506,Underperform
101
+ ALM.AX,0.131943,0.506486,-0.418863,-1.372710,-0.439427,-1.941271,-2.171736,0.961611,0.250506,Underperform
102
+ CYM.AX,0.237302,0.894108,-0.516695,-1.284740,-0.563525,-2.394685,-2.444135,0.943860,0.250506,Underperform
103
+ ARJN.V,-0.010778,-0.056995,-0.728125,-1.492287,-0.549668,-3.374582,-3.868185,1.324742,0.250506,Underperform
104
+ NTM.AX,0.070476,0.282849,-0.786178,-2.409327,-0.783916,-3.643633,-3.978533,1.005386,0.250506,Underperform
105
+ TVCCF,0.016415,0.386299,-1.610456,-1.012340,-0.273220,-7.463845,-7.750436,5.895146,0.250506,Underperform
Stock_Benchmark Analysis/outperforming_stocks.csv ADDED
The diff for this file is too large to render. See raw diff
 
Stock_Benchmark Analysis/unique_companies.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:565a691b35feb08aad7a3912ebdb795bccee61ce6786bb33fff58cc4d59e3b37
3
+ size 17769205
Stock_Benchmark Analysis/unique_companies_copper.csv ADDED
The diff for this file is too large to render. See raw diff