{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "811bdabd", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/3907943495.py:4: DtypeWarning: Columns (11,13,23,24,75,76,79,80,83,84,90) have mixed types. Specify dtype option on import or set low_memory=False.\n", " df = pd.read_csv('unique_companies.csv')\n" ] } ], "source": [ "import pandas as pd\n", "\n", "# Load the CSV file\n", "df = pd.read_csv('unique_companies.csv')\n", "\n", "# Apply the filter\n", "filtered_df = df[\n", " (df['GicSubIndustry'].isin(['Copper', 'Diversified Metals & Mining'])) |\n", " (df['Industry'] == 'Copper')\n", "]\n", "\n", "# Further filter to rows where 'Industry' is 'Other Industrial Metals & Mining' or 'Copper'\n", "final_df = filtered_df[filtered_df['Industry'].isin(['Other Industrial Metals & Mining', 'Copper'])]\n", "\n", "# Save the result to a new CSV if needed\n", "final_df.to_csv('unique_companies_copper.csv', index=False)\n", "\n" ] }, { "cell_type": "code", "execution_count": 2, "id": "7a7c854a", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Ticker transformation complete. Saved as unique_companies_copper_updated.csv\n" ] } ], "source": [ "import pandas as pd\n", "\n", "# Read the CSV\n", "df = pd.read_csv(\"unique_companies_copper.csv\")\n", "\n", "# Mapping dictionary\n", "mapping = {\n", " \"US\": \"\",\n", " \"LSE\": \".L\",\n", " \"STU\": \".SG\",\n", " \"KO\": \".KS\",\n", " \"WAR\": \".WA\",\n", " \"SHG\": \".SS\",\n", " \"NSE\": \".NS\",\n", " \"AU\": \".AX\",\n", " \"JSE\": \".JO\",\n", " \"KLSE\": \".KL\",\n", " \"SHE\": \".SZ\"\n", "}\n", "\n", "def convert_ticker(ticker):\n", " try:\n", " base, suffix = ticker.split(\".\")\n", " if suffix in mapping:\n", " return base + mapping[suffix]\n", " else:\n", " return ticker # leave unchanged if no match\n", " except ValueError:\n", " return ticker # in case ticker doesn't contain \".\"\n", " \n", "# Apply transformation\n", "df[\"PrimaryTicker\"] = df[\"PrimaryTicker\"].apply(convert_ticker)\n", "\n", "# Save the updated CSV\n", "df.to_csv(\"unique_companies_copper_updated.csv\", index=False)\n", "\n", "print(\"Ticker transformation complete. Saved as unique_companies_copper_updated.csv\")\n" ] }, { "cell_type": "code", "execution_count": 17, "id": "261ce11e", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Found 1830 tickers (including HG=F).\n", "Downloading batch 1: 50 tickers\n", "Downloading batch 2: 50 tickers\n", "Downloading batch 3: 50 tickers\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n", "1 Failed download:\n", "['5RJ.F']: YFPricesMissingError('possibly delisted; no price data found (period=6y) (Yahoo error = \"No data found, symbol may be delisted\")')\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Downloading batch 4: 50 tickers\n", "Downloading batch 5: 50 tickers\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n", "1 Failed download:\n", "['ACLHF']: YFPricesMissingError('possibly delisted; no price data found (period=6y) (Yahoo error = \"No data found, symbol may be delisted\")')\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Downloading batch 6: 50 tickers\n", "Downloading batch 7: 50 tickers\n", "Downloading batch 8: 50 tickers\n", "Downloading batch 9: 50 tickers\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n", "2 Failed downloads:\n", "['BR0.F', 'BMOOD']: YFPricesMissingError('possibly delisted; no price data found (period=6y) (Yahoo error = \"No data found, symbol may be delisted\")')\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Downloading batch 10: 50 tickers\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n", "1 Failed download:\n", "['C0O.F']: YFPricesMissingError('possibly delisted; no price data found (period=6y) (Yahoo error = \"No data found, symbol may be delisted\")')\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Downloading batch 11: 50 tickers\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n", "2 Failed downloads:\n", "['CMETF', 'CMDRF']: YFPricesMissingError('possibly delisted; no price data found (period=6y) (Yahoo error = \"No data found, symbol may be delisted\")')\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Downloading batch 12: 50 tickers\n", "Downloading batch 13: 50 tickers\n", "Downloading batch 14: 50 tickers\n", "Downloading batch 15: 50 tickers\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n", "2 Failed downloads:\n", "['ERW.AX', 'EROSF']: YFPricesMissingError('possibly delisted; no price data found (period=6y) (Yahoo error = \"No data found, symbol may be delisted\")')\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Downloading batch 16: 50 tickers\n", "Downloading batch 17: 50 tickers\n", "Downloading batch 18: 50 tickers\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n", "1 Failed download:\n", "['GRFX']: YFPricesMissingError('possibly delisted; no price data found (period=6y) (Yahoo error = \"No data found, symbol may be delisted\")')\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Downloading batch 19: 50 tickers\n", "Downloading batch 20: 50 tickers\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n", "1 Failed download:\n", "['JRV.V']: YFPricesMissingError('possibly delisted; no price data found (period=6y) (Yahoo error = \"No data found, symbol may be delisted\")')\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Downloading batch 21: 50 tickers\n", "Downloading batch 22: 50 tickers\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n", "1 Failed download:\n", "['LLI.AX']: YFPricesMissingError('possibly delisted; no price data found (period=6y) (Yahoo error = \"No data found, symbol may be delisted\")')\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Downloading batch 23: 50 tickers\n", "Downloading batch 24: 50 tickers\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n", "1 Failed download:\n", "['MLYF']: YFPricesMissingError('possibly delisted; no price data found (period=6y) (Yahoo error = \"No data found, symbol may be delisted\")')\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Downloading batch 25: 50 tickers\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n", "2 Failed downloads:\n", "['NALA.TA', 'NGLD.F']: YFPricesMissingError('possibly delisted; no price data found (period=6y) (Yahoo error = \"No data found, symbol may be delisted\")')\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Downloading batch 26: 50 tickers\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n", "3 Failed downloads:\n", "['NSMCF', 'NOCRF', 'NVLIF']: YFPricesMissingError('possibly delisted; no price data found (period=6y) (Yahoo error = \"No data found, symbol may be delisted\")')\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Downloading batch 27: 50 tickers\n", "Downloading batch 28: 50 tickers\n", "Downloading batch 29: 50 tickers\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n", "1 Failed download:\n", "['RCZRF']: YFPricesMissingError('possibly delisted; no price data found (period=6y) (Yahoo error = \"No data found, symbol may be delisted\")')\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Downloading batch 30: 50 tickers\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n", "1 Failed download:\n", "['RLL.AX']: YFPricesMissingError('possibly delisted; no price data found (period=6y) (Yahoo error = \"No data found, symbol may be delisted\")')\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Downloading batch 31: 50 tickers\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n", "1 Failed download:\n", "['SIE.V']: YFPricesMissingError('possibly delisted; no price data found (period=6y) (Yahoo error = \"No data found, symbol may be delisted\")')\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Downloading batch 32: 50 tickers\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n", "1 Failed download:\n", "['SMCDF']: YFPricesMissingError('possibly delisted; no price data found (period=6y) (Yahoo error = \"No data found, symbol may be delisted\")')\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Downloading batch 33: 50 tickers\n", "Downloading batch 34: 50 tickers\n", "Downloading batch 35: 50 tickers\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n", "2 Failed downloads:\n", "['VLV.V', 'V6Y.F']: YFPricesMissingError('possibly delisted; no price data found (period=6y) (Yahoo error = \"No data found, symbol may be delisted\")')\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Downloading batch 36: 50 tickers\n", "Downloading batch 37: 30 tickers\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n", "1 Failed download:\n", "['5RJ.F']: YFRateLimitError('Too Many Requests. Rate limited. Try after a while.')\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Repairing via single-ticker fetch: ['5RJ.F', 'ACLHF', 'BMOOD', 'BR0.F', 'C0O.F', 'CMDRF', 'CMETF', 'EROSF', 'ERW.AX', 'GRFX', 'JRV.V', 'LLI.AX', 'MLYF', 'NALA.TA', 'NGLD.F', 'NOCRF', 'NSMCF', 'NVLIF', 'RCZRF', 'RLL.AX', 'SIE.V', 'SMCDF', 'V6Y.F', 'VLV.V']\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n", "1 Failed download:\n", "['ACLHF']: YFRateLimitError('Too Many Requests. Rate limited. Try after a while.')\n", "\n", "1 Failed download:\n", "['BMOOD']: YFRateLimitError('Too Many Requests. Rate limited. Try after a while.')\n", "\n", "1 Failed download:\n", "['BR0.F']: YFRateLimitError('Too Many Requests. Rate limited. Try after a while.')\n", "\n", "1 Failed download:\n", "['C0O.F']: YFRateLimitError('Too Many Requests. Rate limited. Try after a while.')\n", "\n", "1 Failed download:\n", "['CMDRF']: YFRateLimitError('Too Many Requests. Rate limited. Try after a while.')\n", "\n", "1 Failed download:\n", "['CMETF']: YFRateLimitError('Too Many Requests. Rate limited. Try after a while.')\n", "\n", "1 Failed download:\n", "['EROSF']: YFRateLimitError('Too Many Requests. Rate limited. Try after a while.')\n", "\n", "1 Failed download:\n", "['ERW.AX']: YFRateLimitError('Too Many Requests. Rate limited. Try after a while.')\n", "\n", "1 Failed download:\n", "['GRFX']: YFRateLimitError('Too Many Requests. Rate limited. Try after a while.')\n", "\n", "1 Failed download:\n", "['JRV.V']: YFRateLimitError('Too Many Requests. Rate limited. Try after a while.')\n", "\n", "1 Failed download:\n", "['LLI.AX']: YFRateLimitError('Too Many Requests. Rate limited. Try after a while.')\n", "\n", "1 Failed download:\n", "['MLYF']: YFRateLimitError('Too Many Requests. Rate limited. Try after a while.')\n", "\n", "1 Failed download:\n", "['NALA.TA']: YFRateLimitError('Too Many Requests. Rate limited. Try after a while.')\n", "\n", "1 Failed download:\n", "['NGLD.F']: YFRateLimitError('Too Many Requests. Rate limited. Try after a while.')\n", "\n", "1 Failed download:\n", "['NOCRF']: YFRateLimitError('Too Many Requests. Rate limited. Try after a while.')\n", "\n", "1 Failed download:\n", "['NSMCF']: YFRateLimitError('Too Many Requests. Rate limited. Try after a while.')\n", "\n", "1 Failed download:\n", "['NVLIF']: YFRateLimitError('Too Many Requests. Rate limited. Try after a while.')\n", "\n", "1 Failed download:\n", "['RCZRF']: YFRateLimitError('Too Many Requests. Rate limited. Try after a while.')\n", "\n", "1 Failed download:\n", "['RLL.AX']: YFRateLimitError('Too Many Requests. Rate limited. Try after a while.')\n", "\n", "1 Failed download:\n", "['SIE.V']: YFRateLimitError('Too Many Requests. Rate limited. Try after a while.')\n", "\n", "1 Failed download:\n", "['SMCDF']: YFRateLimitError('Too Many Requests. Rate limited. Try after a while.')\n", "\n", "1 Failed download:\n", "['V6Y.F']: YFRateLimitError('Too Many Requests. Rate limited. Try after a while.')\n", "\n", "1 Failed download:\n", "['VLV.V']: YFRateLimitError('Too Many Requests. Rate limited. Try after a while.')\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Dropping tickers with no usable data: ['5RJ.F', 'ACLHF', 'BMOOD', 'BR0.F', 'C0O.F', 'CMDRF', 'CMETF', 'EROSF', 'ERW.AX', 'GRFX', 'JRV.V', 'LLI.AX', 'MLYF', 'NALA.TA', 'NGLD.F', 'NOCRF', 'NSMCF', 'NVLIF', 'RCZRF', 'RLL.AX', 'SIE.V', 'SMCDF', 'V6Y.F', 'VLV.V']\n", "df_prices shape: (1570, 1806)\n", "Columns: ['HG=F', '000060.SZ', '000426.SZ', '000603.SZ', '000630.SZ', '000657.SZ', '000737.SZ', '000751.SZ', '000758.SZ', '000762.SZ', '000795.SZ', '000878.SZ', '000960.SZ', '000962.SZ', '000969.SZ', '002114.SZ', '002149.SZ', '002155.SZ', '002171.SZ', '002203.SZ', '002378.SZ', '002428.SZ', '002716.SZ', '002738.SZ', '002842.SZ', '002978.SZ', '005810.KS', '00D0.F', '010130.KS', '08W.F', '0CM.F', '0ED.F', '0N4.F', '0PA.F', '0WL.F', '0XX.F', '14S.F', '1AJ2.F', '1D4.F', '1JWA.F', '1K4.F', '1MC.AX', '1OI.F', '1QV.F', '1RR1.F', '1SN.L', '1SZ.F', '1TG.F', '1TT0.F', '1VU0.F', '2009.TW', '20MICRONS.NS', '2220.F', '22Q0.F', '26A1.F', '2CC2.F', '2DK.F', '2I7.F', '2IK.F', '2JCA.F', '2NZ.F', '2QD0.F', '2QPA.F', '2REA.F', '2SO0.F', '2XM.F', '300618.SZ', '300697.SZ', '300930.SZ', '301026.SZ', '301219.SZ', '301511.SZ', '30Z.F', '30Z0.F', '31WN.F', '338B.F', '33X1.F', '34S0.F', '37N.F', '381.F', '39G.F', '3B4.F', '3F9.F', '3FL.F', '3FNA.F', '3FR0.F', '3GE.F', '3GRE.F', '3JC0.F', '3N4.SG', '3NY1.F', '3PM.F', '3S7.F', '3WQ0.F', '40A.F', '432.F', '47GA.F', '48U0.F', '4989.TW', '4A0.F', '4AP.F', '4U50.F', '4V9.F', '4W0.F', '4XH.F', '4XJ.F', '4YW.F', '5298.KL', '540.F', '55D.F', '5E8.F', '5EG.F', '5FG.F', '5GE2.F', '5HR.F', '5IP.F', '5KG0.F', '5KP.F', '5N91.F', '5P0.F', '5PE.F', '5PMA.F', '5V8.F', '600111.SS', '600255.SS', '600259.SS', '600331.SS', '600338.SS', '600362.SS', '600392.SS', '600456.SS', '600490.SS', '600497.SS', '600531.SS', '600549.SS', '600711.SS', '600961.SS', '601020.SS', '601061.SS', '601121.SS', '601137.SS', '601168.SS', '601212.SS', '601609.SS', '601958.SS', '603124.SS', '603132.SS', '603399.SS', '603799.SS', '603979.SS', '603993.SS', '64O.F', '688102.SS', '688231.SS', '688388.SS', '6AY.F', '6D71.F', '6E9.F', '6GS.F', '6ND1.F', '6Q5.F', '6S3A.F', '6TH.MU', '77C.F', '7C6A.F', '7CX.F', '7GI.F', '7JL.F', '7KO.F', '7LY0.F', '7S50.F', '7UE.F', '7WV.F', '7WW.F', '7Y2.F', '7Z00.F', '862.F', '87K.F', '8AMA.F', '8BY1.F', '8CD1.F', '8CHA.F', '8EE.F', '8GCA.F', '8L10.F', '8N6.F', '8OH.F', '8XJ.F', '90AA.F', '92G.MU', '97E0.F', '97G0.F', '9927.TW', '9CM0.F', '9D00.F', '9ED.F', '9EU.F', '9IA.F', '9PZ.F', '9S70.F', '9SC.F', '9TI.F', 'A1G.AX', 'A1S.F', 'A2R.F', 'AABB', 'AAGFF', 'AAL.L', 'AB-H.V', 'ABAT', 'ABCFF', 'ABEPF', 'ABRMF', 'ABZ.V', 'ACMDY', 'ACNE', 'ACP.AX', 'ACRL', 'ADBRF', 'ADC.AX', 'ADMLF', 'ADT1.L', 'ADTLF', 'ADV.AX', 'ADXDF', 'AEV.AX', 'AFM.V', 'AFMJF', 'AFP.L', 'AFR.V', 'AGR.AX', 'AGREF', 'AGXPF', 'AGY.AX', 'AHG.F', 'AHN.AX', 'AHR.V', 'AHS.F', 'AIS.AX', 'AIV.AX', 'AKN.AX', 'AKO.AX', 'AL9.F', 'ALB.AX', 'ALBA.L', 'ALBAF', 'ALEEF', 'ALI0.F', 'ALL.L', 'ALLIF', 'ALM.AX', 'ALMTF', 'ALR.AX', 'ALT.V', 'ALTAF', 'ALTPF', 'AM7.AX', 'AMC.TO', 'AMD.AX', 'AME.V', 'AMG.AS', 'AMI.AX', 'AMLM', 'AMN.AX', 'AMR.V', 'AMRRY', 'AMSLF', 'AMVMF', 'AMYZF', 'ANDMF', 'ANFGF', 'ANGCF', 'ANLBF', 'ANSNF', 'ANX.AX', 'AOA.AX', 'AON.AX', 'APC.AX', 'APCOF', 'APH.JO', 'AQD.AX', 'AQX.AX', 'AR1.AX', 'AR3.AX', 'ARAAF', 'ARAFF', 'ARB3.F', 'ARCM.L', 'ARCT.ST', 'ARD.AX', 'ARDDF', 'ARDNF', 'ARENTERP.NS', 'ARI.JO', 'ARIZF', 'ARJN.V', 'ARJNF', 'ARK.L', 'ARN.AX', 'ARR.AX', 'ARREF', 'ARRRF', 'ARS.L', 'ARSMF', 'ARSRF', 'ARTTF', 'ARU.AX', 'ARXRF', 'ARYMF', 'ASCU.TO', 'ASCUF', 'ASE.AX', 'ASHAPURMIN.NS', 'ASHXF', 'ASKE', 'ASMMF', 'ASN.AX', 'ATBHF', 'ATCMF', 'ATCUF', 'ATM.L', 'ATMMF', 'ATN.L', 'ATOM.V', 'ATOXF', 'ATR.AX', 'ATUSF', 'ATVVF', 'ATY.F', 'ATYM.L', 'AU31.F', 'AU5.F', 'AUE.AX', 'AUG.AX', 'AUMC', 'AUMTF', 'AUN1.F', 'AUR-H.V', 'AUR.AX', 'AURWF', 'AUSDF', 'AUZ.AX', 'AVL.AX', 'AVL.TO', 'AVLNF', 'AVPMF', 'AVRTF', 'AVX.V', 'AW1.AX', 'AWLIF', 'AWMLF', 'AX8.AX', 'AXN.AX', 'AXO.V', 'AXREF', 'AXVEF', 'AYM.L', 'AYT.AX', 'AZ3.F', 'AZ9.AX', 'AZI.AX', 'AZL.AX', 'AZLAF', 'AZM.V', 'AZMCF', 'AZMTF', 'AZZTF', 'B.V', 'B4T0.F', 'B9S.F', 'BAG.V', 'BAJFF', 'BAL-H.V', 'BAPA.F', 'BATXF', 'BBBMF', 'BCA.AX', 'BCMRF', 'BCU.V', 'BCUFF', 'BCW1.F', 'BDD.F', 'BDNNY', 'BDRGF', 'BELMF', 'BEM.L', 'BENZF', 'BER-H.V', 'BFGFF', 'BHAGYANGR.NS', 'BHL.L', 'BHLIF', 'BHP', 'BHP.AX', 'BHPN.MX', 'BHSIF', 'BITTF', 'BJ4.F', 'BKI.TO', 'BKM.V', 'BKMNF', 'BKT.AX', 'BKTPF', 'BLIDF', 'BLLYF', 'BLNMF', 'BLSTF', 'BLTH', 'BM.V', 'BM8.AX', 'BMG.AX', 'BML.AX', 'BMM.AX', 'BMM.V', 'BMOOF', 'BMTLF', 'BMVVF', 'BNTRF', 'BNZ.AX', 'BOA.AX', 'BOCA.V', 'BOL.ST', 'BONE.V', 'BORK', 'BOTX.ST', 'BP60.F', 'BRES.L', 'BRGC', 'BRIOF', 'BRKCF', 'BRL-H.V', 'BRMS.JK', 'BRTN.F', 'BRUZF', 'BRVRF', 'BRVVF', 'BRYGF', 'BSG0.F', 'BSHVF', 'BSSMF', 'BTRMF', 'BTRYF', 'BUR.AX', 'BUR.F', 'BUS.AX', 'BUX.AX', 'BVLDF', 'BVR.AX', 'BWJ0.F', 'BWNAF', 'BWR.V', 'BYDMF', 'BYH.AX', 'BZDLF', 'C1Z.F', 'C29.AX', 'C2U.F', 'C3F.F', 'C5C.F', 'C730.F', 'C7A.AX', 'CAEN', 'CAML.L', 'CAMLF', 'CAMZF', 'CAPFF', 'CASXF', 'CATTF', 'CAULF', 'CAV.AX', 'CAXA.F', 'CAXPF', 'CB82.F', 'CBA.V', 'CBBHF', 'CBBLF', 'CBE.AX', 'CBEEF', 'CBI.V', 'CBRSF', 'CBULF', 'CC9.AX', 'CCCFF', 'CCD.V', 'CCE.V', 'CCEDF', 'CCOB', 'CCOOF', 'CCWOF', 'CCY.F', 'CDBDF', 'CDBMF', 'CDL.L', 'CDMNF', 'CEO-H.V', 'CFV0.F', 'CGMLF', 'CHF.L', 'CHKMF', 'CHR.AX', 'CHW.AX', 'CHZQ', 'CLA.AX', 'CLA.L', 'CLE.AX', 'CLGMF', 'CLLXF', 'CLRSF', 'CLZ.AX', 'CMCLF', 'CMD.AX', 'CMET.L', 'CMG.AX', 'CMIM', 'CMLGF', 'CMO-H.V', 'CMO.AX', 'CMP', 'CMRZF', 'CMTNF', 'CNIKF', 'CNJ.AX', 'CNMTF', 'CNOBF', 'CNRSF', 'CNSUF', 'CNTO', 'CNX.V', 'COD.AX', 'CODMF', 'COH.F', 'COMCF', 'CONE.V', 'CONXF', 'COPAF', 'COPR', 'COSAF', 'CPAU.V', 'CPCPF', 'CPEFF', 'CPER.V', 'CPFXF', 'CPM.AX', 'CPN.AX', 'CPO.AX', 'CPORF', 'CPPKF', 'CPPMF', 'CPR.JO', 'CPYCF', 'CR1.AX', 'CR9.AX', 'CRB.AX', 'CRD.V', 'CRECF', 'CREQF', 'CRGP', 'CRI.V', 'CRICF', 'CRML', 'CRS.AX', 'CRSTF', 'CRTM.L', 'CRUCF', 'CS.TO', 'CSC.AX', 'CSCCF', 'CSRNF', 'CST.AX', 'CTJHY', 'CTL.L', 'CTLHF', 'CTN.V', 'CTTZF', 'CTXDF', 'CUAUF', 'CUBEXTUB.NS', 'CUF.AX', 'CUIRF', 'CUL.AX', 'CUSN.L', 'CUU.V', 'CVKMD.IS', 'CVLB.F', 'CVV.AX', 'CVVRF', 'CWA0.F', 'CWX.AX', 'CXOXF', 'CXXMF', 'CYDVF', 'CYGGF', 'CYLYF', 'CYM.AX', 'CYPMF', 'CZN.AX', 'CZR.F', 'D6V1.F', 'D860.F', 'DAL.AX', 'DANR', 'DAUGF', 'DBGF', 'DBLVF', 'DCNNF', 'DECXF', 'DES.AX', 'DETRF', 'DFMTF', 'DFPP.F', 'DFU.F', 'DFXN.F', 'DFYA.F', 'DGV2.F', 'DIAH', 'DJ5.F', 'DKFT.JK', 'DKM.AX', 'DKN.F', 'DLM.AX', 'DLPRF', 'DM1.AX', 'DMDD', 'DME.AX', 'DMM.AX', 'DMNKF', 'DMX.V', 'DMXCF', 'DRE.AX', 'DRR.AX', 'DRRSF', 'DRX.AX', 'DTWOF', 'DUVNF', 'DVP.AX', 'DW8.F', 'DWMNF', 'DY6.AX', 'DYM.AX', 'E06A.F', 'E25.AX', 'E2E1.F', 'E7Q.F', 'E9E.F', 'EAM.V', 'EB8.F', 'EB9.F', 'EBM.F', 'ECGFF', 'ECOR.TO', 'ECORF', 'ECRAF', 'EDDY.V', 'EDDYF', 'EE1.AX', 'EEL.AX', 'EELFF', 'EEMMF', 'EFF.V', 'EFRMF', 'EG1.AX', 'EGDFF', 'EGM.V', 'EGMMF', 'EGPLF', 'EGR.AX', 'EK7C.F', 'EKWX', 'ELBM', 'ELECF', 'ELMTF', 'ELT.AX', 'ELTLF', 'EM2.AX', 'EMC.AX', 'EMGDF', 'EMH.AX', 'EMHLF', 'EMOTF', 'EMT.AX', 'EMTRF', 'EMU.AX', 'EMUCA.AX', 'EMX', 'ENR.AX', 'ENRG.V', 'ENV.AX', 'EO0.F', 'EON.V', 'EOXFF', 'EP.V', 'EPL.V', 'EQR.AX', 'ER70.F', 'ERA.PA', 'ERD.TO', 'ERDCF', 'ERMAF', 'ERMAY', 'ERO', 'ERR.F', 'ERVFF', 'ESKYF', 'ESPN.V', 'ESR.AX', 'ESVNF', 'ESXMF', 'ETL.V', 'ETM.AX', 'ETPHF', 'ETRUF', 'EUEMF', 'EULIF', 'EUMNF', 'EUR.AX', 'EUUNF', 'EUZ.L', 'EV1.AX', 'EVG.AX', 'EVGUF', 'EVKRF', 'EVNIF', 'EVR.AX', 'EVX.V', 'F12.F', 'F5J0.F', 'F7E1.F', 'FAR.L', 'FBM.AX', 'FCLIF', 'FCM.L', 'FCSMF', 'FCX', 'FCXO34.SA', 'FDY.TO', 'FEERF', 'FEXXF', 'FFM.AX', 'FG1.F', 'FIN.AX', 'FKC0.F', 'FKM.V', 'FKMCF', 'FL1.AX', 'FLM1.F', 'FLMCF', 'FLMTF', 'FMCXF', 'FMK.F', 'FNCJF', 'FNICF', 'FPMB.F', 'FPOCF', 'FPRGF', 'FQVLF', 'FRACF', 'FRB.AX', 'FRG.L', 'FRMA', 'FRPMF', 'FRSPF', 'FSUGY', 'FSUMF', 'FSVEF', 'FSY.TO', 'FTJ.V', 'FTMDF', 'FUR.F', 'FURY', 'FUSEF', 'FUST', 'FVJ.F', 'FWEDF', 'FWL.SG', 'FX8.F', 'FXRVF', 'FYIRF', 'G12.F', 'G4U.F', 'G6A.F', 'G6D2.F', 'G6M.AX', 'G7PA.F', 'G88.AX', 'GAL.AX', 'GALOF', 'GAR-H.V', 'GARWF', 'GBBGF', 'GBDX', 'GBE.AX', 'GBEI', 'GBLEF', 'GBMIF', 'GCCFF', 'GCRCF', 'GCRIF', 'GCUMF', 'GCXXF', 'GDLNF', 'GDMIF', 'GDQMF', 'GDSKF', 'GDTRF', 'GELEF', 'GELGF', 'GEM.OL', 'GEN.AX', 'GENM.TO', 'GENMF', 'GEODF', 'GERFF', 'GES.AX', 'GESI', 'GFM.L', 'GG7.SG', 'GGI.V', 'GGIFF', 'GGTHF', 'GHVNF', 'GIGA.V', 'GIGGF', 'GJST', 'GL1.AX', 'GLATF', 'GLBXF', 'GLCNF', 'GLIIF', 'GLIOF', 'GLKIF', 'GLN.AX', 'GLNCY', 'GLNLF', 'GLR.L', 'GMA.V', 'GMBXF', 'GMWA.F', 'GMX.TO', 'GNG.AX', 'GNG.V', 'GNHRF', 'GNRGF', 'GNTOF', 'GOCOF', 'GODZF', 'GOJ1.F', 'GOLXF', 'GOMRF', 'GORAF', 'GPHOF', 'GPMTF', 'GPPRF', 'GQMLF', 'GR.V', 'GRCMF', 'GRD.V', 'GRDAF', 'GRE.AX', 'GRK.F', 'GRLRF', 'GRLVF', 'GROC.L', 'GRSLF', 'GRX.AX', 'GRX.L', 'GSCCF', 'GSCU.L', 'GSM', 'GSML', 'GSR.F', 'GSS.V', 'GSSRF', 'GT1.AX', 'GTC.V', 'GTCDF', 'GTE.AX', 'GTMLF', 'GWMGF', 'GWMO.L', 'GWRRF', 'GZDIF', 'H3N.F', 'H6F.F', 'HANCF', 'HANNF', 'HAS.AX', 'HBKRF', 'HBM', 'HBM.TO', 'HCH.V', 'HDG0.F', 'HDRSF', 'HECOF', 'HF2.F', 'HGGCF', 'HGO.AX', 'HGO.F', 'HHLKF', 'HHSRF', 'HI.V', 'HILL.JK', 'HIN.MU', 'HINDCOPPER.NS', 'HINDZINC.NS', 'HIO.AX', 'HLGVF', 'HLX.AX', 'HNCUF', 'HOR.AX', 'HPQ.V', 'HPQFF', 'HPYCF', 'HRE.AX', 'HREEF', 'HRFEF', 'HSRMF', 'HT9.F', 'HTM.AX', 'HTRC', 'HUSIF', 'HVY.AX', 'HZLIF', 'HZNM', 'IBATF', 'IBC-H.V', 'ICG.AX', 'ID4.F', 'IDZ1.F', 'IE', 'IE.TO', 'IFSH.JK', 'IG6.AX', 'IGFFF', 'IGO.AX', 'IHB1.F', 'IHS.F', 'IIDDY', 'ILHMF', 'ILI.V', 'ILKAF', 'ILKAY', 'ILU.AX', 'IMA.AX', 'IMC.F', 'IMC.L', 'IMI.AX', 'IMIMF', 'IMPEXFERRO.NS', 'IMTCF', 'INCO.JK', 'INF.AX', 'INR.AX', 'INTR.V', 'INUMF', 'IONGF', 'IONR', 'IPMLF', 'IPOAF', 'IPT.AX', 'IPX', 'IPX.AX', 'ITM.AX', 'ITMIF', 'IVPAF', 'IVR.AX', 'IXRRF', 'IYAA.F', 'J0G.F', 'J1Q0.F', 'JAN.L', 'JAV.AX', 'JBY.AX', 'JD1.F', 'JGRRF', 'JI4.F', 'JIX.F', 'JLL.AX', 'JLRRF', 'JMS.AX', 'JMXXF', 'JNDAF', 'JNOMF', 'JORFF', 'JR1.F', 'JR9.F', 'JUBPF', 'JUGRF', 'JXMNF', 'JZRIF', 'K2I.F', 'K4L.F', 'KAV.L', 'KCC.V', 'KCCFF', 'KDM.F', 'KDNC.L', 'KEN.L', 'KGH.WA', 'KGHPF', 'KGLLF', 'KLDCF', 'KLI.AX', 'KMGLF', 'KMR.L', 'KMRPF', 'KNG.AX', 'KNGRF', 'KNI.AX', 'KOD.L', 'KOGMF', 'KOR.AX', 'KOZAA.IS', 'KP1.F', 'KP10.F', 'KP2.L', 'KRS.L', 'KTR.V', 'KVGOF', 'KWGBF', 'L1M.AX', 'L3L2.F', 'LA.V', 'LAC', 'LANRF', 'LAR', 'LBNKF', 'LBSR', 'LCE.V', 'LCGMF', 'LEG.AX', 'LEL.AX', 'LEMIF', 'LEX.AX', 'LEXTF', 'LGM.AX', 'LGO', 'LGU.F', 'LI.V', 'LIF.F', 'LIFFF', 'LILIF', 'LIN.AX', 'LINRF', 'LIRC.TO', 'LIS.V', 'LISMF', 'LITM', 'LITOF', 'LITRF', 'LITSF', 'LKE.AX', 'LKMNF', 'LKY1.F', 'LLKKF', 'LLL.AX', 'LLLAF', 'LM1.AX', 'LM8.AX', 'LMG.AX', 'LML.AX', 'LMRMF', 'LMSQF', 'LND.L', 'LNDLF', 'LNR.AX', 'LOMEF', 'LOT.AX', 'LPD.AX', 'LPDNF', 'LPM.AX', 'LQK2.F', 'LQRCF', 'LR8.F', 'LRA.V', 'LRAXF', 'LRD.AX', 'LSA.AX', 'LSANF', 'LSLCF', 'LTH.V', 'LTHCF', 'LTHIF', 'LTMCF', 'LTRBF', 'LTSRF', 'LTUM', 'LUCMF', 'LUNMF', 'LXENF', 'LYC.AX', 'LYIC.F', 'LYK.AX', 'LYSCF', 'LYSDY', 'LZ61.F', 'LZM', 'M0G.F', 'M0LY.F', 'M1C1.F', 'M1W.F', 'M24.AX', 'M2R.AX', 'MAC.AX', 'MACQF', 'MAH.AX', 'MALRF', 'MALRY', 'MANVF', 'MARI.TO', 'MARIF', 'MARVF', 'MAU.AX', 'MAXXF', 'MAZ-H.V', 'MCHHF', 'MCL.NS', 'MCREF', 'MDI.TO', 'MDKA.JK', 'MDMN', 'MDNGF', 'MDSQF', 'MEDAF', 'MEEEF', 'MEG.AX', 'MERKO.AT', 'MET1.L', 'MFRVF', 'MGA.AX', 'MGCV', 'MGL.AX', 'MGMNF', 'MGPHF', 'MGT.AX', 'MGU.AX', 'MGXMF', 'MHIFF', 'MHK.AX', 'MIDLF', 'MIMTF', 'MIN.AX', 'MINE.V', 'MIO.AX', 'MJDLF', 'MKNGF', 'MKR.V', 'MKRIF', 'MLG.AX', 'MLGAF', 'MLGCF', 'MLKKF', 'MLLOF', 'MLN.F', 'MLPNF', 'MLSUM.PA', 'MLX.AX', 'MLXEF', 'MLYCF', 'MLZAM.PA', 'MMILF', 'MMLTF', 'MMMKF', 'MNMRF', 'MNXMF', 'MNXXF', 'MOLYMET.SN', 'MOON.V', 'MP', 'MQM.V', 'MQMIF', 'MQR.AX', 'MRD.AX', 'MREYF', 'MRF.JO', 'MRL.AX', 'MRQ.AX', 'MRR.AX', 'MRRDF', 'MSMGF', 'MSV.AX', 'MT1.F', 'MTAL', 'MTB.AX', 'MTBMF', 'MTC.AX', 'MTH.AX', 'MTJ3.F', 'MTLFF', 'MTLI', 'MTRN', 'MTS.V', 'MTU.JO', 'MUNMF', 'MURCF', 'MURMF', 'MXB0.F', 'MXTRF', 'MYRLF', 'MYRUF', 'N3EN.F', 'N6D.F', 'N8HP.F', 'N9SA.F', 'NAK', 'NATUF', 'NB', 'NBLC.V', 'NBM.V', 'NBMFF', 'NBMLF', 'NBRFF', 'NBTRF', 'NBYCF', 'NC1.AX', 'NC3.F', 'NCFFF', 'NCKL.JK', 'NCPCF', 'NDMCF', 'NE0.F', 'NEV.V', 'NEXA', 'NFM.AX', 'NGLOY', 'NGPHF', 'NGX.AX', 'NGXLF', 'NGXXF', 'NIC.AX', 'NICL.JK', 'NICLF', 'NICMF', 'NICN.V', 'NICOF', 'NILA', 'NILIF', 'NIM.AX', 'NIO.ST', 'NIOBW', 'NIOCF', 'NKGFF', 'NKLXF', 'NLGCF', 'NLIBF', 'NLPXF', 'NM5.F', 'NM9A.F', 'NMG', 'NMNZF', 'NMR.AX', 'NMREF', 'NMTLF', 'NNL.AX', 'NNX.V', 'NOAL.V', 'NOM.OL', 'NOURF', 'NPTH', 'NQMLF', 'NRGYF', 'NRHI', 'NRN.V', 'NRRMF', 'NRVTF', 'NRX.AX', 'NSHRF', 'NSRCF', 'NTCPF', 'NTM.AX', 'NTU.AX', 'NU0.F', 'NVA', 'NVA.AX', 'NVLHF', 'NVRA.F', 'NVSGF', 'NWC.AX', 'NWCBF', 'NWCCF', 'NWPG', 'NWX.V', 'NWXPF', 'NXGM', 'NYM.AX', 'NYR.BR', 'NYRSY', 'NZN.V', 'NZP.V', 'O08.F', 'O2R2.F', 'OARFF', 'OBGRF', 'OC.V', 'OCKA.F', 'OCN.AX', 'OD6.AX', 'ODX-H.V', 'OGD.TO', 'OLY.AX', 'OM.V', 'OM1.AX', 'OMH.AX', 'OMHLF', 'OMZNF', 'ORESF', 'ORISSAMINE.NS', 'ORMNF', 'ORN.JO', 'ORQ1.F', 'OTMN', 'OTS-H.V', 'OU5A.F', 'OUW0.F', 'OW3.F', 'OWRDF', 'P4G.F', 'P77.F', 'PAANF', 'PAT.AX', 'PBL.AX', 'PBMLF', 'PCRCF', 'PE&OLES.MX', 'PE.V', 'PEIMF', 'PEK.AX', 'PEMC.V', 'PEMIF', 'PEMSF', 'PEX.AX', 'PFE.AX', 'PFFOF', 'PGE.V', 'PGEZF', 'PGP.V', 'PGPGF', 'PGX.V', 'PGXFF', 'PGZ.V', 'PGZFF', 'PHO.AX', 'PIHN', 'PILBF', 'PIM.AX', 'PJXRF', 'PKREF', 'PLL', 'PLLTL', 'PLN.AX', 'PLS.AX', 'PLSR.V', 'PLY.V', 'PLYFF', 'PMAM3.SA', 'PMDRF', 'PMETF', 'PML.V', 'PMMCF', 'PMOMF', 'PMYLF', 'PNGM', 'PNN.AX', 'PNPNF', 'PNTZF', 'PNX.F', 'POD.AX', 'POROF', 'POW.L', 'PPZRF', 'PR1.AX', 'PRCK', 'PRE.L', 'PREM.L', 'PRMMF', 'PRMNF', 'PRN.AX', 'PSC.AX', 'PSGR', 'PSL.AX', 'PSRHF', 'PTNDY', 'PTRO.JK', 'PUCOBRE.SN', 'PUR.AX', 'PURE.JK', 'PUTKF', 'PUTKY', 'PVT.AX', 'PWCRF', 'PWMCF', 'PWRLF', 'PWRMF', 'PX0.F', 'PXC.L', 'PXCLF', 'PXMFF', 'PXX.AX', 'PXXXF', 'PYV.F', 'Q.V', 'QCCUF', 'QFB.F', 'QGL.AX', 'QMCQF', 'QNBMF', 'QNICF', 'QQREF', 'QR20.F', 'QUEXF', 'QURI.V', 'QXR.AX', 'QZMRF', 'R1EA.F', 'R2FA.F', 'R2TA.F', 'R8L.F', 'R9GA.F', 'RADR', 'RAJMET.NS', 'RAREF', 'RARMF', 'RATHF', 'RBMTF', 'RBWRF', 'RCKTF', 'RDM.AX', 'RDS.AX', 'RDTMF', 'RE8.F', 'REB.DU', 'REC.AX', 'RECHF', 'REE.AX', 'REE.V', 'REEEF', 'REEMF', 'REMRF', 'REZZF', 'RFC4.F', 'RFLXF', 'RFXRF', 'RGLSF', 'RGM-H.V', 'RGVNF', 'RIE.AX', 'RIGMF', 'RIO', 'RIO.AX', 'RIO.BA', 'RIO.L', 'RKMSF', 'RKR.V', 'RLC.AX', 'RM9.F', 'RMESF', 'RMI.AX', 'RMNXF', 'RNU.AX', 'RNX.AX', 'ROYIF', 'RR1.F', 'RRMLF', 'RRR.AX', 'RRR.L', 'RTNTF', 'RTR.AX', 'RTRFF', 'RTTGF', 'RVT.AX', 'RXF.F', 'RYOOF', 'RZT.F', 'S0G.F', 'S2R.AX', 'S32.AX', 'S32.L', 'S35A.F', 'S5M.F', 'S5WA.F', 'S6Q1.F', 'S8QN.F', 'SAGARDEEP.NS', 'SAGGF', 'SAIDF', 'SAL.F', 'SALT.V', 'SAM.TO', 'SAMMF', 'SAO.V', 'SARKY.IS', 'SAU.TO', 'SAV.F', 'SAVNF', 'SBR.AX', 'SBWFF', 'SCCFF', 'SCCO', 'SCLTF', 'SCVFF', 'SCYYF', 'SCZ.V', 'SEHKF', 'SER.AX', 'SFR.AX', 'SFRRF', 'SFX.AX', 'SGA.AX', 'SGML', 'SGPTF', 'SGQ.AX', 'SGZ.V', 'SHCMF', 'SHERF', 'SHL.V', 'SHN.AX', 'SHP.AX', 'SIERF', 'SILEF', 'SJRNF', 'SKE', 'SKKRF', 'SKP.V', 'SLB.AX', 'SLCO', 'SLGGF', 'SLI', 'SLMFF', 'SLMLF', 'SLROF', 'SLS.AX', 'SLSDF', 'SLTFF', 'SLVDF', 'SLZ.AX', 'SLZNF', 'SMC.TO', 'SMDZF', 'SME.V', 'SMM.AX', 'SMMYY', 'SMRVF', 'SMTSF', 'SNLGF', 'SNTAF', 'SOUHY', 'SPC.V', 'SPCNF', 'SPNRF', 'SPQ.AX', 'SQH.F', 'SQX.AX', 'SRAFF', 'SRCAF', 'SRCGF', 'SRCX', 'SRGMF', 'SRGXF', 'SRHYY', 'SRKZF', 'SRLZF', 'SRN.AX', 'SRQRF', 'SRR0.F', 'SRSLF', 'SRSN', 'SRSR', 'SRZ.AX', 'SS5.F', 'SSEBF', 'SSYRF', 'STACF', 'STGDF', 'STHFF', 'STKXF', 'STLNF', 'STMNF', 'STRPF', 'STS.V', 'STSBF', 'STTSY', 'STUD.V', 'STUV.V', 'STXPF', 'SUH.AX', 'SUIFF', 'SULMF', 'SVBL', 'SVMFF', 'SVML.L', 'SVMLF', 'SVY.AX', 'SWNLF', 'SXOOF', 'SYA.AX', 'SYAXF', 'SYHBF', 'SZ71.F', 'T4W.F', 'T5MB.F', 'T62.F', 'T7Y1.F', 'TAOFF', 'TASEF', 'TCVNF', 'TDG.V', 'TEA.V', 'TECK', 'TECK-A.TO', 'TELHF', 'TEM.AX', 'TFM.V', 'TG50.F', 'TG6.AX', 'TGB', 'TGN.AX', 'TGOLF', 'THORF', 'THR.AX', 'THSGF', 'THURF', 'TI.TO', 'TIH1.F', 'TIMCF', 'TIMNF', 'TINO', 'TINS.JK', 'TKM.AX', 'TKO.L', 'TKRFF', 'TLGRF', 'TLOFF', 'TM1.L', 'TMASF', 'TMC', 'TMCWW', 'TMD0.F', 'TMET.V', 'TMG.AX', 'TMGLF', 'TMQ', 'TMRC', 'TMRFF', 'TMS.AX', 'TMSA.F', 'TNC.AX', 'TNGZF', 'TNR.V', 'TNREF', 'TNTMF', 'TOEYF', 'TON.AX', 'TORVF', 'TR3.F', 'TRBMF', 'TRCTF', 'TREVQ', 'TRKUF', 'TROUF', 'TRRCF', 'TRRGF', 'TRRXF', 'TTEXF', 'TTIRF', 'TTLXF', 'TTSRF', 'TUN.L', 'TVCCF', 'TVER', 'TVIPF', 'TVN.AX', 'TWO.V', 'TWOSF', 'TWRFF', 'TYM.L', 'TZN.AX', 'U40.F', 'U7N1.F', 'U9V.MU', 'UAMY', 'UD7C.F', 'UDE0.F', 'UH7A.F', 'UKI.F', 'ULTHF', 'ULTXF', 'UMR.L', 'UPCO', 'URANF', 'URCFF', 'UREKF', 'UREQF', 'URPLF', 'USAR', 'USCUF', 'USGO', 'USGOW', 'USHAF', 'USREF', 'UURAF', 'UUSAF', 'V44.F', 'V5W.F', 'V8V.F', 'VANAF', 'VAND.V', 'VANTF', 'VAR.AX', 'VBAMF', 'VC1.F', 'VCT.V', 'VCUFF', 'VCVVF', 'VDO-H.V', 'VDOMF', 'VDTAF', 'VEDL.NS', 'VERT.V', 'VHM.AX', 'VLT.V', 'VLTLF', 'VLTMF', 'VM3.F', 'VML.AX', 'VMXXF', 'VOP.F', 'VOY0.F', 'VR8.AX', 'VRBFF', 'VRC.AX', 'VRCFF', 'VRCV', 'VRRCF', 'VRX.AX', 'VTMLF', 'VTMXF', 'VUL.AX', 'VUL.F', 'VUL.V', 'VULMF', 'VULNF', 'VZLA', 'W2F.F', 'W5E.F', 'WA1.AX', 'WA8.AX', 'WAK.AX', 'WALRF', 'WAORF', 'WARAF', 'WCUFF', 'WDFCF', 'WDGNF', 'WESMF', 'WEZ.JO', 'WHY.V', 'WIN.AX', 'WKT.AX', 'WLFFF', 'WMK.V', 'WML.V', 'WMLLF', 'WMNNF', 'WN9.F', 'WOLV', 'WR1.AX', 'WRC.F', 'WRE1.F', 'WRN', 'WRSLF', 'WSZWF', 'WTCRF', 'WTCZF', 'WX0.F', 'WYX.AX', 'X0V.F', 'X28.F', 'XCPT', 'XJC.F', 'XPL', 'XT3B.F', 'XTC.AX', 'XTERF', 'XTPT', 'XVALO.MC', 'XXIX.V', 'Y0L.F', 'Y33.F', 'YAR.AX', 'YBB.F', 'YORKF', 'YT1.F', 'YWRLF', 'Z25.F', 'ZBNI.V', 'ZBNIF', 'ZCB.F', 'ZCC-H.V', 'ZE0.F', 'ZEO.AX', 'ZMI.AX', 'ZNC.AX', 'ZNCXF', 'ZNWD.L', 'ZNWLF', 'ZYR1.F', 'ZZZOF']\n", "Saved to 'df_prices.csv'.\n" ] } ], "source": [ "# ------------------------------------------------------------\n", "# Build df_prices.csv for HG=F + tickers in unique_companies_copper.csv\n", "# • period=\"5y\" (more reliable than start/end for some venues)\n", "# • Prefer 'Adj Close', fallback to 'Close'\n", "# • Re-download single tickers that are all-NaN in batch (e.g., 2IK.F)\n", "# ------------------------------------------------------------\n", "# pip install yfinance pandas\n", "\n", "import pandas as pd\n", "import yfinance as yf\n", "\n", "CSV_PATH = \"unique_companies_copper_updated.csv\"\n", "TICKER_COL = \"PrimaryTicker\"\n", "UNDERLYING = \"HG=F\"\n", "BATCH_SIZE = 50\n", "OUT_CSV = \"df_prices.csv\"\n", "\n", "# --- Read tickers ---\n", "tickers = (\n", " pd.read_csv(CSV_PATH, usecols=[TICKER_COL])[TICKER_COL]\n", " .dropna().astype(str).str.strip().str.upper().tolist()\n", ")\n", "tickers = sorted(set(tickers))\n", "if UNDERLYING not in tickers:\n", " tickers = [UNDERLYING] + tickers\n", "\n", "print(f\"Found {len(tickers)} tickers (including {UNDERLYING}).\")\n", "\n", "def _extract_adj_or_close(df_multi: pd.DataFrame) -> tuple[pd.DataFrame, list[str]]:\n", " \"\"\"From yfinance multi-ticker frame, prefer 'Adj Close', else 'Close' per ticker.\"\"\"\n", " if not isinstance(df_multi.columns, pd.MultiIndex):\n", " raise ValueError(\"Expected MultiIndex columns for multi-ticker download.\")\n", " fields = set(df_multi.columns.get_level_values(-1))\n", " adj = df_multi.xs(\"Adj Close\", axis=1, level=-1, drop_level=True) if \"Adj Close\" in fields else pd.DataFrame(index=df_multi.index)\n", " clo = df_multi.xs(\"Close\", axis=1, level=-1, drop_level=True) if \"Close\" in fields else pd.DataFrame(index=df_multi.index)\n", "\n", " cols = sorted(set(adj.columns).union(clo.columns))\n", " out = pd.DataFrame(index=df_multi.index, columns=cols, dtype=\"float64\")\n", " used_close = []\n", "\n", " for t in cols:\n", " a = adj[t] if t in adj.columns else None\n", " c = clo[t] if t in clo.columns else None\n", " if a is not None and not a.dropna().empty:\n", " out[t] = a\n", " elif c is not None and not c.dropna().empty:\n", " out[t] = c\n", " used_close.append(t)\n", " return out, used_close\n", "\n", "def _download_batch(batch):\n", " df = yf.download(\n", " tickers=batch,\n", " period=\"6y\",\n", " interval=\"1d\",\n", " auto_adjust=False,\n", " actions=False,\n", " progress=False,\n", " group_by=\"ticker\",\n", " threads=True\n", " )\n", " if isinstance(df.columns, pd.MultiIndex):\n", " return _extract_adj_or_close(df)\n", " else:\n", " # Single-ticker shape\n", " tkr = batch[0]\n", " adj = df.get(\"Adj Close\")\n", " clo = df.get(\"Close\")\n", " used_close = []\n", " if adj is not None and not adj.dropna().empty:\n", " out = adj.rename(tkr).to_frame()\n", " elif clo is not None and not clo.dropna().empty:\n", " out = clo.rename(tkr).to_frame()\n", " used_close.append(tkr)\n", " else:\n", " out = pd.DataFrame(index=df.index, columns=[tkr], dtype=\"float64\")\n", " return out, used_close\n", "\n", "def _download_single(tkr: str) -> pd.Series:\n", " \"\"\"Single-ticker repair path; prefer Adj Close, else Close.\"\"\"\n", " df = yf.download(\n", " tickers=tkr,\n", " period=\"5y\",\n", " interval=\"1d\",\n", " auto_adjust=False,\n", " actions=False,\n", " progress=False\n", " )\n", " s = df.get(\"Adj Close\")\n", " if s is None or s.dropna().empty:\n", " s = df.get(\"Close\")\n", " if s is None:\n", " return pd.Series(dtype=\"float64\", name=tkr)\n", " return s.rename(tkr)\n", "\n", "# --- Batch download + merge ---\n", "frames, used_close_all = [], []\n", "for i in range(0, len(tickers), BATCH_SIZE):\n", " batch = tickers[i:i+BATCH_SIZE]\n", " print(f\"Downloading batch {i//BATCH_SIZE + 1}: {len(batch)} tickers\")\n", " part, used_close = _download_batch(batch)\n", " frames.append(part)\n", " used_close_all.extend(used_close)\n", "\n", "df_prices = pd.concat(frames, axis=1)\n", "df_prices = df_prices.loc[:, ~df_prices.columns.duplicated()].sort_index()\n", "\n", "# --- Repair tickers that are NaN-only or missing after batch ---\n", "to_repair = [t for t in tickers if (t in df_prices.columns and df_prices[t].dropna().empty) or (t not in df_prices.columns)]\n", "to_repair = sorted(set(to_repair))\n", "if to_repair:\n", " print(f\"Repairing via single-ticker fetch: {to_repair}\")\n", " for t in to_repair:\n", " s = _download_single(t)\n", " if not s.dropna().empty:\n", " df_prices = df_prices.reindex(df_prices.index.union(s.index)).sort_index()\n", " df_prices[t] = s.reindex(df_prices.index)\n", "\n", "# --- Order columns; drop all-NaN tickers ---\n", "ordered_cols = [UNDERLYING] + [t for t in tickers if t != UNDERLYING and t in df_prices.columns]\n", "df_prices = df_prices.reindex(columns=ordered_cols)\n", "all_nan_cols = [c for c in df_prices.columns if df_prices[c].dropna().empty]\n", "if all_nan_cols:\n", " print(f\"Dropping tickers with no usable data: {all_nan_cols}\")\n", " df_prices = df_prices.drop(columns=all_nan_cols)\n", "\n", "# --- Report fallback usage ---\n", "used_close_all = sorted(set([t for t in used_close_all if t in df_prices.columns]))\n", "if used_close_all:\n", " print(f\"Used 'Close' fallback for: {used_close_all}\")\n", "\n", "print(\"df_prices shape:\", df_prices.shape)\n", "print(\"Columns:\", list(df_prices.columns))\n", "\n", "# --- Save ---\n", "df_prices.to_csv(OUT_CSV, index_label=\"Date\", float_format=\"%.6f\")\n", "print(f\"Saved to '{OUT_CSV}'.\")\n" ] }, { "cell_type": "code", "execution_count": 24, "id": "11079562", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "=== Missing % by ticker ===\n", " missing_pct\n", "NIOBW 99.936306\n", "PWMCF 99.936306\n", "USGOW 99.936306\n", "SSEBF 99.936306\n", "MEDAF 98.216561\n", "... ...\n", "MLSUM.PA 1.974522\n", "NYR.BR 1.974522\n", "AMG.AS 1.974522\n", "ERA.PA 1.910828\n", "MLZAM.PA 1.910828\n", "\n", "[1806 rows x 1 columns]\n", "\n", "Dropping 555 tickers (> 10% missing): ['NIOBW', 'PWMCF', 'USGOW', 'SSEBF', 'MEDAF', 'AXO.V', 'BMOOF', 'APCOF', '603124.SS', 'LEXTF', 'MRD.AX', 'RECHF', 'NFM.AX', 'WAORF', 'GGTHF', 'WESMF', 'NKGFF', 'RCKTF', 'DETRF', 'AMRRY', 'LIFFF', 'ASCUF', 'NGXXF', 'PMETF', 'EMU.AX', 'NVA', '1MC.AX', 'EMH.AX', 'CSC.AX', 'IMTCF', 'ASE.AX', 'CWA0.F', 'MAC.AX', 'AWLIF', 'CPPMF', 'C7A.AX', 'PSRHF', 'JGRRF', 'CC9.AX', 'SRSLF', 'LAC', 'VMXXF', 'PLN.AX', 'DMXCF', '301511.SZ', 'NGXLF', 'JBY.AX', 'PLSR.V', 'SRQRF', 'MET1.L', 'USAR', 'Y0L.F', 'HNCUF', 'UREKF', 'TRRCF', 'CHW.AX', 'DY6.AX', 'LITSF', 'COSAF', 'HZLIF', 'NLIBF', 'FRSPF', 'NGX.AX', 'LITRF', 'NCKL.JK', 'EUEMF', 'FCLIF', 'ITMIF', 'CYGGF', 'SGPTF', 'VANAF', 'CNRSF', 'CPR.JO', '601061.SS', 'HILL.JK', 'USGO', 'CVKMD.IS', 'MYRUF', '601121.SS', 'EG1.AX', 'PRMMF', 'LM1.AX', 'NB', 'EELFF', 'LIRC.TO', 'LOMEF', 'FLMCF', 'NOAL.V', 'CTLHF', 'TMGLF', 'SQX.AX', 'MAXXF', 'QNBMF', 'CLA.L', 'VBAMF', 'HTM.AX', 'ADC.AX', 'DYM.AX', 'LTSRF', 'CYPMF', 'VHM.AX', 'H3N.F', 'WRSLF', '9EU.F', 'DES.AX', 'AWMLF', 'CRICF', 'RVT.AX', 'PAT.AX', 'BAPA.F', 'MGMNF', '7JL.F', 'L1M.AX', 'GLNLF', 'GNRGF', 'GTMLF', 'MEEEF', 'Y33.F', 'JR1.F', '862.F', 'WX0.F', 'RFLXF', 'JR9.F', 'UMR.L', 'LML.AX', '381.F', 'ATN.L', 'ANDMF', 'BUS.AX', 'VAND.V', 'CPORF', '48U0.F', 'HT9.F', 'CMG.AX', 'BYDMF', 'WARAF', 'STLNF', 'GRDAF', 'JI4.F', 'CMTNF', '7KO.F', 'FLM1.F', '688231.SS', 'LLLAF', 'X28.F', 'QURI.V', 'HRE.AX', 'ALEEF', '8CD1.F', 'SVMLF', 'LINRF', 'HF2.F', 'EVNIF', 'NICN.V', 'PMOMF', 'PWRLF', 'FCM.L', 'WN9.F', '5P0.F', 'CDBDF', '8L10.F', 'LTHCF', 'SGA.AX', 'IONR', 'IE', 'EO0.F', '2220.F', 'IE.TO', 'MGA.AX', 'OCN.AX', 'IPX', '30Z0.F', 'USREF', 'LLL.AX', 'OD6.AX', 'KNG.AX', 'UUSAF', 'ZNWLF', '0PA.F', 'VLTMF', 'NNL.AX', 'TG6.AX', 'BVR.AX', 'LBNKF', 'CDMNF', '9D00.F', 'CPCPF', 'GBMIF', 'FRPMF', 'KEN.L', 'SNTAF', '301219.SZ', '688102.SS', 'SLS.AX', 'UKI.F', 'JNDAF', 'LPM.AX', 'PR1.AX', 'INUMF', 'CODMF', 'NYM.AX', 'P77.F', 'CSCCF', 'TR3.F', '1SN.L', 'ESPN.V', '603132.SS', 'LRD.AX', 'IG6.AX', 'GDTRF', 'ER70.F', 'PIM.AX', 'CTL.L', 'SLB.AX', 'ABRMF', 'OM1.AX', '9TI.F', 'CHKMF', 'BHLIF', 'CRML', 'NICOF', 'P4G.F', 'KLI.AX', 'WA1.AX', 'VZLA', '4V9.F', 'CMO.AX', '7LY0.F', 'ZBNIF', 'CPER.V', 'TRCTF', 'NC1.AX', 'NU0.F', 'HCH.V', 'ARAAF', 'GRE.AX', 'IMI.AX', '4XJ.F', 'JD1.F', 'GSCU.L', 'K2I.F', 'GRLRF', 'LZM', 'SVML.L', 'AW1.AX', 'TTLXF', 'E9E.F', 'HUSIF', 'J1Q0.F', 'YT1.F', 'QNICF', 'GELEF', 'WDGNF', 'WR1.AX', 'MNMRF', 'PBL.AX', 'LITM', 'NVLHF', 'Q.V', 'ASCU.TO', 'NIM.AX', 'H6F.F', 'CPM.AX', 'TM1.L', 'EV1.AX', 'FRG.L', 'LTRBF', 'GT1.AX', 'RTRFF', '8OH.F', 'ALLIF', 'AR1.AX', 'C29.AX', 'MMILF', '1QV.F', 'AUE.AX', 'JZRIF', '3FR0.F', '0ED.F', 'CCOOF', 'VCUFF', 'TUN.L', 'LYK.AX', 'ITM.AX', 'MANVF', 'BTRMF', '97E0.F', '540.F', 'REC.AX', '7CX.F', 'JNOMF', 'M0LY.F', '87K.F', 'GROC.L', 'DAL.AX', 'MTAL', 'EULIF', '97G0.F', 'RRR.AX', 'TMCWW', 'WIN.AX', 'NICL.JK', '9ED.F', 'TMC', 'CAMLF', '301026.SZ', 'NBTRF', 'HVY.AX', 'LGM.AX', 'CPO.AX', 'BMVVF', 'TAOFF', 'KNI.AX', 'ZBNI.V', 'SAGGF', 'TCVNF', '5298.KL', 'PFE.AX', 'HECOF', 'ORQ1.F', 'ALI0.F', 'BHL.L', 'CAPFF', 'WCUFF', 'BMM.AX', 'AGXPF', 'SPCNF', 'CHR.AX', 'ESXMF', 'BUR.AX', 'CRI.V', 'SCVFF', 'FNICF', 'REMRF', 'AR3.AX', 'VM3.F', 'AM7.AX', '7UE.F', 'SNLGF', 'LM8.AX', 'ANSNF', 'ECGFF', 'ADTLF', '7S50.F', 'CDL.L', 'AFP.L', 'ORMNF', 'EMTRF', '7Z00.F', 'LEL.AX', 'GCRCF', 'F5J0.F', '6Q5.F', '2QD0.F', 'IXRRF', 'GL1.AX', 'IDZ1.F', 'BCA.AX', 'MLG.AX', 'NBLC.V', 'ALB.AX', 'SS5.F', 'TMRFF', 'CR9.AX', 'HSRMF', '7GI.F', 'BWJ0.F', 'LTH.V', 'HANCF', 'ESVNF', '5IP.F', 'MREYF', 'GEM.OL', '300930.SZ', 'FYIRF', 'MLPNF', 'YORKF', 'NRRMF', 'FRB.AX', 'SPC.V', 'THORF', '9PZ.F', 'NM5.F', 'REEEF', 'IIDDY', 'GEN.AX', '7Y2.F', '3WQ0.F', 'LANRF', 'RBWRF', '8BY1.F', 'ZE0.F', '2SO0.F', 'ELMTF', 'BELMF', 'TRKUF', 'CATTF', 'CUSN.L', 'DLPRF', '5PE.F', 'M24.AX', 'LPDNF', 'KLDCF', 'W2F.F', 'AZLAF', 'GSCCF', 'NICLF', 'PCRCF', 'GLIOF', 'ARRRF', 'IONGF', 'ULTHF', 'PWRMF', 'RKMSF', 'TMASF', 'SEHKF', 'LTMCF', 'EVGUF', 'CCWOF', 'AUMC', 'CPPKF', 'BENZF', 'SKKRF', 'NRVTF', 'EMHLF', 'QQREF', 'BBBMF', 'ALTPF', 'QCCUF', 'MTLFF', 'TRBMF', 'AZZTF', 'MNXXF', 'EUMNF', 'TTSRF', 'GESI', 'EEMMF', 'ADBRF', 'SYHBF', 'HANNF', 'GENMF', 'THSGF', 'ELBM', 'PNPNF', 'GPPRF', 'AZMCF', 'TIMCF', '2IK.F', 'NMG', 'JUGRF', 'EVKRF', 'DFMTF', 'GRSLF', 'LISMF', 'SLGGF', 'RGLSF', 'GHVNF', 'HPQFF', 'STKXF', 'BRKCF', 'GIGGF', 'IBATF', 'CMET.L', '1VU0.F', 'ARDNF', 'TTIRF', '7WW.F', 'ZYR1.F', 'KP10.F', 'ALBAF', '3JC0.F', 'BNZ.AX', 'EMGDF', 'MTBMF', 'DM1.AX', 'AKO.AX', 'EGMMF', '0WL.F', 'VULNF', '40A.F', 'HGO.F', 'SAVNF', 'WAK.AX', 'CPN.AX', '8XJ.F', 'MHK.AX', 'MKNGF', 'NMR.AX', 'TNC.AX', 'BM8.AX', 'JLRRF', 'COD.AX', 'MEG.AX', 'BTRYF', 'DRR.AX', 'SSYRF', 'M2R.AX', 'E06A.F', '77C.F', 'ELECF', 'PMYLF', 'USHAF', 'EMOTF', 'CRSTF', 'G6D2.F', 'ASMMF', 'CRTM.L', 'BATXF', '4YW.F', 'UDE0.F', 'GMWA.F', '30Z.F', 'GCXXF', 'ETRUF', 'GOLXF', 'SLVDF', 'WALRF', 'NBRFF', 'SULMF', 'ARDDF', 'VTMXF', 'WRE1.F', '5HR.F', '5KG0.F', 'CNIKF', 'EBM.F', 'STXPF', 'CLRSF', 'VOY0.F', '601609.SS', 'PRE.L', '002978.SZ', 'MP', '2REA.F', 'JLL.AX', '4XH.F', 'NDMCF', 'BLSTF', 'NICMF', '4W0.F', 'ERR.F', '4U50.F', 'GLIIF', 'N6D.F', 'IFSH.JK', 'ORESF', '5EG.F', 'DCNNF', 'KVGOF', 'CST.AX', 'IMIMF', '1OI.F', 'CBE.AX', 'SMM.AX', 'PURE.JK']\n", "\n", "Shapes:\n", "Before: (1570, 1806) After: (1570, 1251)\n", "Saved to 'df_prices_final.csv'.\n" ] } ], "source": [ "# ------------------------------------------------------------\n", "# Load df_prices.csv, compute missing % per ticker,\n", "# drop columns with >50% missing, save df_prices_final.csv\n", "# ------------------------------------------------------------\n", "import pandas as pd\n", "\n", "IN_CSV = \"df_prices.csv\"\n", "OUT_CSV = \"df_prices_final.csv\"\n", "THRESH = 10.0 # percent\n", "\n", "df_prices = pd.read_csv(IN_CSV, parse_dates=[\"Date\"], index_col=\"Date\")\n", "\n", "# Missing % over the full DataFrame index\n", "missing_pct = df_prices.isna().mean() * 100.0\n", "report = (\n", " pd.DataFrame({\"missing_pct\": missing_pct})\n", " .sort_values(\"missing_pct\", ascending=False)\n", ")\n", "print(\"=== Missing % by ticker ===\")\n", "print(report)\n", "\n", "# Drop tickers with >50% missing\n", "to_drop = report.index[report[\"missing_pct\"] > THRESH].tolist()\n", "print(f\"\\nDropping {len(to_drop)} tickers (> {THRESH:.0f}% missing): {to_drop}\")\n", "\n", "df_prices_final = df_prices.drop(columns=to_drop, errors=\"ignore\")\n", "\n", "print(\"\\nShapes:\")\n", "print(\"Before:\", df_prices.shape, \"After:\", df_prices_final.shape)\n", "\n", "df_prices_final.to_csv(OUT_CSV, index_label=\"Date\", float_format=\"%.6f\")\n", "print(f\"Saved to '{OUT_CSV}'.\")\n" ] }, { "cell_type": "code", "execution_count": 25, "id": "8dc7673c", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Saved clean, rectangular prices to 'df_prices_final.csv' with shape (1409, 1251).\n" ] } ], "source": [ "# ------------------------------------------------------------\n", "# Clean df_prices_final: common window + bfill→ffill + final NA drop\n", "# Input : df_prices_final.csv (your current file with some missing)\n", "# Output: df_prices_final.csv (overwritten, rectangular, NA-free)\n", "# ------------------------------------------------------------\n", "import pandas as pd\n", "\n", "IN_CSV = \"df_prices_final.csv\"\n", "OUT_CSV = \"df_prices_final.csv\" # overwrite in place\n", "\n", "df = pd.read_csv(IN_CSV, parse_dates=[\"Date\"], index_col=\"Date\").sort_index()\n", "\n", "# 1) Common window (everyone has started and not yet delisted)\n", "first_valid = df.apply(pd.Series.first_valid_index)\n", "last_valid = df.apply(pd.Series.last_valid_index)\n", "\n", "common_start = max(first_valid.dropna())\n", "common_end = min(last_valid.dropna())\n", "\n", "df = df.loc[common_start:common_end].copy()\n", "\n", "# 2) Business-day index to harmonize calendars\n", "bidx = pd.date_range(df.index.min(), df.index.max(), freq=\"B\")\n", "df = df.reindex(bidx)\n", "\n", "# 3) Fill:\n", "# - Backfill once to seed the first business day for tickers closed on common_start\n", "# - Forward-fill for holiday gaps etc.\n", "df = df.bfill(limit=None).ffill(limit=None)\n", "\n", "# 4) Final sanity check: drop any rare rows still containing NA\n", "before_rows = df.shape[0]\n", "df = df.dropna(how=\"any\")\n", "after_rows = df.shape[0]\n", "if before_rows != after_rows:\n", " print(f\"Dropped {before_rows - after_rows} rows that still had NAs after filling.\")\n", "\n", "# 5) Save\n", "df.to_csv(OUT_CSV, index_label=\"Date\", float_format=\"%.6f\")\n", "print(f\"Saved clean, rectangular prices to '{OUT_CSV}' with shape {df.shape}.\")\n" ] }, { "cell_type": "code", "execution_count": 26, "id": "c5f250f5", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/Users/yanzhe0329/Desktop/Stock Selection/Stock_Benchmark Analysis copy/.venv/lib/python3.12/site-packages/pandas/core/internals/blocks.py:395: RuntimeWarning: invalid value encountered in log\n", " result = func(self.values, **kwargs)\n", "/Users/yanzhe0329/Desktop/Stock Selection/Stock_Benchmark Analysis copy/.venv/lib/python3.12/site-packages/numpy/lib/_function_base_impl.py:3065: RuntimeWarning: invalid value encountered in divide\n", " c /= stddev[:, None]\n", "/Users/yanzhe0329/Desktop/Stock Selection/Stock_Benchmark Analysis copy/.venv/lib/python3.12/site-packages/numpy/lib/_function_base_impl.py:3066: RuntimeWarning: invalid value encountered in divide\n", " c /= stddev[None, :]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "✓ Saved corr_beta.csv, excess_summary.csv (with corr_to_copper & |corr|>0.5 in labels)\n" ] } ], "source": [ "import pandas as pd\n", "import numpy as np\n", "import statsmodels.api as sm\n", "\n", "# ---------- PARAMETERS ----------\n", "IN_CSV = \"df_prices_final.csv\" # prices, Date index\n", "BENCH = \"HG=F\" # underlying metal\n", "FREQ = \"W-FRI\" # 'D' for daily or 'W-FRI'\n", "ROLL_WINDOW = None # 26 weeks ≈ 6 months; set None to skip\n", "USE_LOG_RET = True # True = log-returns, False = pct-change\n", "ALPHA_T_CUT = 0.0 # |t| threshold for alpha significance\n", "SHARPE_CUT = 0.0 # >0 required for outperform label\n", "\n", "# Annualisation factor\n", "PER_YEAR = 52 if FREQ.startswith(\"W\") else 252\n", "\n", "# ---------- LOAD PRICES ----------\n", "px = (pd.read_csv(IN_CSV, parse_dates=[\"Date\"], index_col=\"Date\")\n", " .sort_index())\n", "assert BENCH in px.columns, f\"{BENCH} not found in columns.\"\n", "\n", "# Optional resample (weekly smooths roll/holiday noise)\n", "if FREQ != \"D\":\n", " px = px.resample(FREQ).last()\n", "\n", "# ---------- RETURNS ----------\n", "rets = np.log(px).diff() if USE_LOG_RET else px.pct_change()\n", "rets = rets.dropna(how=\"all\")\n", "\n", "bench = rets[BENCH]\n", "stocks = [c for c in rets.columns if c != BENCH]\n", "\n", "# ---------- 1) CORRELATION & BETA ----------\n", "corr = rets[stocks].corrwith(bench) # Series: index=ticker\n", "var_b = bench.var(ddof=1)\n", "beta = rets[stocks].apply(lambda s: s.cov(bench)) / var_b\n", "\n", "corr_beta = (pd.DataFrame({\"corr_to_copper\": corr,\n", " \"beta_to_copper\": beta})\n", " .sort_values(\"corr_to_copper\", ascending=False))\n", "corr_beta.index.name = \"ticker\"\n", "corr_beta.to_csv(\"corr_beta.csv\", float_format=\"%.6f\")\n", "\n", "# Keep maps for reuse in excess_summary\n", "corr_map = corr.to_dict()\n", "beta_map = beta.to_dict()\n", "\n", "# ---------- OPTIONAL ROLLING CORRELATION ----------\n", "if ROLL_WINDOW:\n", " rolling_long = (\n", " rets[stocks]\n", " .rolling(ROLL_WINDOW)\n", " .corr(bench) # wide: Date × tickers\n", " .stack() # -> Series with MultiIndex (Date, ticker)\n", " .rename(\"rolling_corr\")\n", " .rename_axis([\"Date\", \"ticker\"])\n", " .reset_index()\n", " .dropna(subset=[\"rolling_corr\"])\n", " )\n", " rolling_long.to_csv(\"rolling_corr_long.csv\",\n", " index=False, float_format=\"%.6f\")\n", "\n", "# ---------- 2) β-HEDGED EXCESS & SELECTION ----------\n", "def _nw_lags(freq: str) -> int:\n", " return 4 if freq.startswith(\"W\") else 21 # ≈1 month of obs\n", "\n", "hac_lags = _nw_lags(FREQ)\n", "\n", "rows = []\n", "for s in stocks:\n", " # align & drop NA\n", " df_xy = rets[[s, BENCH]].dropna()\n", " if df_xy.empty:\n", " continue\n", "\n", " y = df_xy[s]\n", " X = sm.add_constant(df_xy[BENCH])\n", " model = sm.OLS(y, X).fit(\n", " cov_type=\"HAC\",\n", " cov_kwds={\"maxlags\": hac_lags}\n", " )\n", "\n", " alpha = model.params[\"const\"]\n", " beta_i = model.params[BENCH]\n", " t_alpha = model.tvalues[\"const\"]\n", " alpha_ann = alpha * PER_YEAR\n", "\n", " # β-hedged excess series\n", " r_excess = y - beta_i * df_xy[BENCH]\n", " mu, sd = r_excess.mean(), r_excess.std(ddof=1)\n", " hedged_sharpe = (mu / sd) * np.sqrt(PER_YEAR) if sd > 0 else np.nan\n", " cum_excess_beta_log = r_excess.cumsum().iloc[-1]\n", "\n", " # Simple 1× copper cumulative log excess\n", " cum_excess_1x_log = y.cumsum().iloc[-1] - df_xy[BENCH].cumsum().iloc[-1]\n", "\n", " # Label (added constraint: |corr_to_copper| > 0.5)\n", " corr_val = corr_map.get(s, np.nan)\n", " if (\n", " (alpha_ann > 0)\n", " and (t_alpha > ALPHA_T_CUT)\n", " and (hedged_sharpe > SHARPE_CUT)\n", " and (abs(corr_val) > 0.4)\n", " ):\n", " label = \"Outperform\"\n", " elif (\n", " (alpha_ann < 0)\n", " and (t_alpha < -ALPHA_T_CUT)\n", " and (hedged_sharpe < -SHARPE_CUT)\n", " and (abs(corr_val) > 0.4)\n", " ):\n", " label = \"Underperform\"\n", " else:\n", " label = \"Neutral\"\n", "\n", " rows.append({\n", " \"ticker\": s,\n", " \"corr_to_copper\": corr_val,\n", " \"beta_to_copper\": beta_map.get(s, np.nan),\n", " \"alpha_ann\": alpha_ann,\n", " \"t_alpha\": t_alpha,\n", " \"hedged_sharpe\": hedged_sharpe,\n", " \"cum_excess_beta_log\": cum_excess_beta_log,\n", " \"cum_excess_1x_log\": cum_excess_1x_log,\n", " \"label\": label\n", " })\n", "\n", "excess_summary = (pd.DataFrame(rows)\n", " .set_index(\"ticker\")\n", " .sort_values([\"label\", \"alpha_ann\", \"hedged_sharpe\"],\n", " ascending=[True, False, False]))\n", "excess_summary.to_csv(\"excess_summary.csv\", float_format=\"%.6f\")\n", "\n", "print(\"✓ Saved corr_beta.csv,\",\n", " \"rolling_corr_long.csv,\" if ROLL_WINDOW else \"\",\n", " \"excess_summary.csv (with corr_to_copper & |corr|>0.5 in labels)\")\n" ] }, { "cell_type": "code", "execution_count": 20, "id": "8cdec19b", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/Users/yanzhe0329/Desktop/Stock Selection/Stock_Benchmark Analysis copy/.venv/lib/python3.12/site-packages/pandas/core/internals/blocks.py:395: RuntimeWarning: invalid value encountered in log\n", " result = func(self.values, **kwargs)\n", "/Users/yanzhe0329/Desktop/Stock Selection/Stock_Benchmark Analysis copy/.venv/lib/python3.12/site-packages/numpy/lib/_function_base_impl.py:3065: RuntimeWarning: invalid value encountered in divide\n", " c /= stddev[:, None]\n", "/Users/yanzhe0329/Desktop/Stock Selection/Stock_Benchmark Analysis copy/.venv/lib/python3.12/site-packages/numpy/lib/_function_base_impl.py:3066: RuntimeWarning: invalid value encountered in divide\n", " c /= stddev[None, :]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "✓ Saved corr_beta.csv, excess_summary.csv (with corr_to_copper)\n" ] } ], "source": [ "# ============================================================\n", "# Copper-linked stocks vs COMEX copper (HG=F)\n", "# • Correlation & beta\n", "# • Rolling correlation\n", "# • β-hedged alpha / excess-return summary\n", "# Outputs:\n", "# corr_beta.csv, rolling_corr_long.csv, excess_summary.csv\n", "# (excess_summary.csv now includes corr_to_copper)\n", "# ============================================================\n", "\n", "import pandas as pd\n", "import numpy as np\n", "import statsmodels.api as sm\n", "\n", "# ---------- PARAMETERS ----------\n", "IN_CSV = \"df_prices_final.csv\" # prices, Date index\n", "BENCH = \"HG=F\" # underlying metal\n", "FREQ = \"W-FRI\" # 'D' for daily or 'W-FRI'\n", "ROLL_WINDOW = None # 26 weeks ≈ 6 months; set None to skip\n", "USE_LOG_RET = True # True = log-returns, False = pct-change\n", "ALPHA_T_CUT = 0.9 # |t| threshold for alpha significance\n", "SHARPE_CUT = 0.0 # >0 required for outperform label\n", "\n", "# Annualisation factor\n", "PER_YEAR = 52 if FREQ.startswith(\"W\") else 252\n", "\n", "# ---------- LOAD PRICES ----------\n", "px = (pd.read_csv(IN_CSV, parse_dates=[\"Date\"], index_col=\"Date\")\n", " .sort_index())\n", "assert BENCH in px.columns, f\"{BENCH} not found in columns.\"\n", "\n", "# Optional resample (weekly smooths roll/holiday noise)\n", "if FREQ != \"D\":\n", " px = px.resample(FREQ).last()\n", "\n", "# ---------- RETURNS ----------\n", "rets = np.log(px).diff() if USE_LOG_RET else px.pct_change()\n", "rets = rets.dropna(how=\"all\")\n", "\n", "bench = rets[BENCH]\n", "stocks = [c for c in rets.columns if c != BENCH]\n", "\n", "# ---------- 1) CORRELATION & BETA ----------\n", "corr = rets[stocks].corrwith(bench) # Series: index=ticker\n", "var_b = bench.var(ddof=1)\n", "beta = rets[stocks].apply(lambda s: s.cov(bench)) / var_b\n", "\n", "corr_beta = (pd.DataFrame({\"corr_to_copper\": corr,\n", " \"beta_to_copper\": beta})\n", " .sort_values(\"corr_to_copper\", ascending=False))\n", "corr_beta.index.name = \"ticker\"\n", "corr_beta.to_csv(\"corr_beta.csv\", float_format=\"%.6f\")\n", "\n", "# Keep maps for reuse in excess_summary\n", "corr_map = corr.to_dict()\n", "beta_map = beta.to_dict()\n", "\n", "# ---------- OPTIONAL ROLLING CORRELATION ----------\n", "if ROLL_WINDOW:\n", " rolling_long = (\n", " rets[stocks]\n", " .rolling(ROLL_WINDOW)\n", " .corr(bench) # wide: Date × tickers\n", " .stack() # -> Series with MultiIndex (Date, ticker)\n", " .rename(\"rolling_corr\")\n", " .rename_axis([\"Date\", \"ticker\"])\n", " .reset_index()\n", " .dropna(subset=[\"rolling_corr\"])\n", " )\n", " rolling_long.to_csv(\"rolling_corr_long.csv\",\n", " index=False, float_format=\"%.6f\")\n", "\n", "# ---------- 2) β-HEDGED EXCESS & SELECTION ----------\n", "def _nw_lags(freq: str) -> int:\n", " return 4 if freq.startswith(\"W\") else 21 # ≈1 month of obs\n", "\n", "hac_lags = _nw_lags(FREQ)\n", "\n", "rows = []\n", "for s in stocks:\n", " # align & drop NA\n", " df_xy = rets[[s, BENCH]].dropna()\n", " if df_xy.empty:\n", " continue\n", "\n", " y = df_xy[s]\n", " X = sm.add_constant(df_xy[BENCH])\n", " model = sm.OLS(y, X).fit(\n", " cov_type=\"HAC\",\n", " cov_kwds={\"maxlags\": hac_lags}\n", " )\n", "\n", " alpha = model.params[\"const\"]\n", " beta_i = model.params[BENCH]\n", " t_alpha = model.tvalues[\"const\"]\n", " alpha_ann = alpha * PER_YEAR\n", "\n", " # β-hedged excess series\n", " r_excess = y - beta_i * df_xy[BENCH]\n", " mu, sd = r_excess.mean(), r_excess.std(ddof=1)\n", " hedged_sharpe = (mu / sd) * np.sqrt(PER_YEAR) if sd > 0 else np.nan\n", " cum_excess_beta_log = r_excess.cumsum().iloc[-1]\n", "\n", " # Simple 1× copper cumulative log excess\n", " cum_excess_1x_log = y.cumsum().iloc[-1] - df_xy[BENCH].cumsum().iloc[-1]\n", "\n", " # Label\n", " if (alpha_ann > 0) and (t_alpha > ALPHA_T_CUT) and (hedged_sharpe > SHARPE_CUT):\n", " label = \"Outperform\"\n", " elif (alpha_ann < 0) and (t_alpha < -ALPHA_T_CUT) and (hedged_sharpe < -SHARPE_CUT):\n", " label = \"Underperform\"\n", " else:\n", " label = \"Neutral\"\n", "\n", " rows.append({\n", " \"ticker\": s,\n", " \"corr_to_copper\": corr_map.get(s, np.nan), # <-- added\n", " \"beta_to_copper\": beta_map.get(s, np.nan), # keep for consistency\n", " \"alpha_ann\": alpha_ann,\n", " \"t_alpha\": t_alpha,\n", " \"hedged_sharpe\": hedged_sharpe,\n", " \"cum_excess_beta_log\": cum_excess_beta_log,\n", " \"cum_excess_1x_log\": cum_excess_1x_log,\n", " \"label\": label\n", " })\n", "\n", "excess_summary = (pd.DataFrame(rows)\n", " .set_index(\"ticker\")\n", " .sort_values([\"label\", \"alpha_ann\", \"hedged_sharpe\"],\n", " ascending=[True, False, False]))\n", "excess_summary.to_csv(\"excess_summary.csv\", float_format=\"%.6f\")\n", "\n", "print(\"✓ Saved corr_beta.csv,\",\n", " \"rolling_corr_long.csv,\" if ROLL_WINDOW else \"\",\n", " \"excess_summary.csv (with corr_to_copper)\")\n" ] }, { "cell_type": "code", "execution_count": 27, "id": "13fb4bda", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Found 44 outperformers; price matrix shape: (1409, 44)\n" ] } ], "source": [ "import pandas as pd\n", "\n", "# Load prices and labels\n", "prices = pd.read_csv(\"df_prices_final.csv\", parse_dates=[\"Date\"], index_col=\"Date\")\n", "labels = pd.read_csv(\"excess_summary.csv\", index_col=0)\n", "\n", "# Select tickers labeled as OUTPERFORM (case-insensitive)\n", "op_tickers = (\n", " labels.assign(_lab=labels[\"label\"].astype(str).str.upper())\n", " .query(\"_lab == 'OUTPERFORM'\")\n", " .index.tolist()\n", ")\n", "\n", "# Keep only those tickers that exist in the price matrix\n", "op_tickers = [t for t in op_tickers if t in prices.columns]\n", "\n", "# Slice prices → new DataFrame\n", "outperforming_stocks = prices.loc[:, op_tickers].copy()\n", "\n", "print(f\"Found {len(op_tickers)} outperformers; price matrix shape: {outperforming_stocks.shape}\")\n", "\n", "# Optional: include the benchmark column too\n", "# outperforming_with_bench = prices.loc[:, ['HG=F'] + op_tickers].copy()\n", "\n", "# Save to CSV (optional)\n", "outperforming_stocks.to_csv(\"outperforming_stocks.csv\", index_label=\"Date\", float_format=\"%.6f\")\n" ] }, { "cell_type": "code", "execution_count": 43, "id": "7fdc653f", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Processing PYV.F ...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:192: FutureWarning: 'A-DEC' is deprecated and will be removed in a future version, please use 'YE-DEC' instead.\n", " return div.resample(\"A-DEC\").sum().sort_index()\n", "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:264: FutureWarning: The default fill_method='pad' in Series.pct_change is deprecated and will be removed in a future version. Either fill in any non-leading NA values prior to calling pct_change or specify 'fill_method=None' to not fill NA values.\n", " df[\"Debt_Growth_Pct\"] = total_debt.pct_change()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing JIX.F ...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:192: FutureWarning: 'A-DEC' is deprecated and will be removed in a future version, please use 'YE-DEC' instead.\n", " return div.resample(\"A-DEC\").sum().sort_index()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing OUW0.F ...\n", "Processing CS.TO ...\n", "Processing 603979.SS ...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:192: FutureWarning: 'A-DEC' is deprecated and will be removed in a future version, please use 'YE-DEC' instead.\n", " return div.resample(\"A-DEC\").sum().sort_index()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing TKO.L ...\n", "Processing RE8.F ...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:192: FutureWarning: 'A-DEC' is deprecated and will be removed in a future version, please use 'YE-DEC' instead.\n", " return div.resample(\"A-DEC\").sum().sort_index()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing FVJ.F ...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:192: FutureWarning: 'A-DEC' is deprecated and will be removed in a future version, please use 'YE-DEC' instead.\n", " return div.resample(\"A-DEC\").sum().sort_index()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing SKE ...\n", "Processing FCXO34.SA ...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:192: FutureWarning: 'A-DEC' is deprecated and will be removed in a future version, please use 'YE-DEC' instead.\n", " return div.resample(\"A-DEC\").sum().sort_index()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing PILBF ...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:192: FutureWarning: 'A-DEC' is deprecated and will be removed in a future version, please use 'YE-DEC' instead.\n", " return div.resample(\"A-DEC\").sum().sort_index()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing 2009.TW ...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:192: FutureWarning: 'A-DEC' is deprecated and will be removed in a future version, please use 'YE-DEC' instead.\n", " return div.resample(\"A-DEC\").sum().sort_index()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing ARREF ...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:192: FutureWarning: 'A-DEC' is deprecated and will be removed in a future version, please use 'YE-DEC' instead.\n", " return div.resample(\"A-DEC\").sum().sort_index()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing LYSDY ...\n", "Processing PRMNF ...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:265: FutureWarning: The default fill_method='pad' in Series.pct_change is deprecated and will be removed in a future version. Either fill in any non-leading NA values prior to calling pct_change or specify 'fill_method=None' to not fill NA values.\n", " df[\"Capex_Growth_Pct\"] = capex_pos.pct_change()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing TGB ...\n", "Processing LYSCF ...\n", "Processing 601168.SS ...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:192: FutureWarning: 'A-DEC' is deprecated and will be removed in a future version, please use 'YE-DEC' instead.\n", " return div.resample(\"A-DEC\").sum().sort_index()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing FSUGY ...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:192: FutureWarning: 'A-DEC' is deprecated and will be removed in a future version, please use 'YE-DEC' instead.\n", " return div.resample(\"A-DEC\").sum().sort_index()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing TECK-A.TO ...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:192: FutureWarning: 'A-DEC' is deprecated and will be removed in a future version, please use 'YE-DEC' instead.\n", " return div.resample(\"A-DEC\").sum().sort_index()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing ATUSF ...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:192: FutureWarning: 'A-DEC' is deprecated and will be removed in a future version, please use 'YE-DEC' instead.\n", " return div.resample(\"A-DEC\").sum().sort_index()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing FPMB.F ...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:192: FutureWarning: 'A-DEC' is deprecated and will be removed in a future version, please use 'YE-DEC' instead.\n", " return div.resample(\"A-DEC\").sum().sort_index()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing FCX ...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:192: FutureWarning: 'A-DEC' is deprecated and will be removed in a future version, please use 'YE-DEC' instead.\n", " return div.resample(\"A-DEC\").sum().sort_index()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing IPMLF ...\n", "Processing ATYM.L ...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:192: FutureWarning: 'A-DEC' is deprecated and will be removed in a future version, please use 'YE-DEC' instead.\n", " return div.resample(\"A-DEC\").sum().sort_index()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing FG1.F ...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:192: FutureWarning: 'A-DEC' is deprecated and will be removed in a future version, please use 'YE-DEC' instead.\n", " return div.resample(\"A-DEC\").sum().sort_index()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing SCCO ...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:192: FutureWarning: 'A-DEC' is deprecated and will be removed in a future version, please use 'YE-DEC' instead.\n", " return div.resample(\"A-DEC\").sum().sort_index()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing IYAA.F ...\n", "Processing ANFGF ...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:192: FutureWarning: 'A-DEC' is deprecated and will be removed in a future version, please use 'YE-DEC' instead.\n", " return div.resample(\"A-DEC\").sum().sort_index()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing SFR.AX ...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:192: FutureWarning: 'A-DEC' is deprecated and will be removed in a future version, please use 'YE-DEC' instead.\n", " return div.resample(\"A-DEC\").sum().sort_index()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing FSUMF ...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:192: FutureWarning: 'A-DEC' is deprecated and will be removed in a future version, please use 'YE-DEC' instead.\n", " return div.resample(\"A-DEC\").sum().sort_index()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing RTNTF ...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:192: FutureWarning: 'A-DEC' is deprecated and will be removed in a future version, please use 'YE-DEC' instead.\n", " return div.resample(\"A-DEC\").sum().sort_index()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing TECK ...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:192: FutureWarning: 'A-DEC' is deprecated and will be removed in a future version, please use 'YE-DEC' instead.\n", " return div.resample(\"A-DEC\").sum().sort_index()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing CMCLF ...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:192: FutureWarning: 'A-DEC' is deprecated and will be removed in a future version, please use 'YE-DEC' instead.\n", " return div.resample(\"A-DEC\").sum().sort_index()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing 603993.SS ...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:192: FutureWarning: 'A-DEC' is deprecated and will be removed in a future version, please use 'YE-DEC' instead.\n", " return div.resample(\"A-DEC\").sum().sort_index()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing IVPAF ...\n", "Processing OCKA.F ...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:192: FutureWarning: 'A-DEC' is deprecated and will be removed in a future version, please use 'YE-DEC' instead.\n", " return div.resample(\"A-DEC\").sum().sort_index()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing BHP.AX ...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:192: FutureWarning: 'A-DEC' is deprecated and will be removed in a future version, please use 'YE-DEC' instead.\n", " return div.resample(\"A-DEC\").sum().sort_index()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing HBM.TO ...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:192: FutureWarning: 'A-DEC' is deprecated and will be removed in a future version, please use 'YE-DEC' instead.\n", " return div.resample(\"A-DEC\").sum().sort_index()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing RIO.AX ...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:192: FutureWarning: 'A-DEC' is deprecated and will be removed in a future version, please use 'YE-DEC' instead.\n", " return div.resample(\"A-DEC\").sum().sort_index()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing GMBXF ...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:192: FutureWarning: 'A-DEC' is deprecated and will be removed in a future version, please use 'YE-DEC' instead.\n", " return div.resample(\"A-DEC\").sum().sort_index()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing 000630.SZ ...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:192: FutureWarning: 'A-DEC' is deprecated and will be removed in a future version, please use 'YE-DEC' instead.\n", " return div.resample(\"A-DEC\").sum().sort_index()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing MIN.AX ...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:192: FutureWarning: 'A-DEC' is deprecated and will be removed in a future version, please use 'YE-DEC' instead.\n", " return div.resample(\"A-DEC\").sum().sort_index()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing HBM ...\n", "\n", "✓ Saved copper_outperformers_financials.csv with 211 rows.\n", "Note: If some tickers still have blank Dividends/Capex, FMP/Yahoo may not cover that listing;\n", " try the primary listing symbol in fmp_try_symbols() or map ADRs to locals.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/1873914036.py:192: FutureWarning: 'A-DEC' is deprecated and will be removed in a future version, please use 'YE-DEC' instead.\n", " return div.resample(\"A-DEC\").sum().sort_index()\n" ] } ], "source": [ "# -*- coding: utf-8 -*-\n", "\"\"\"\n", "Build combined financial dataset for 44 outperformers\n", "- Input : outperforming_stocks.csv (first col=Date, next columns=tickers)\n", "- Output: copper_outperformers_financials.csv (one row per Ticker x FiscalPeriodEnd)\n", "\n", "Raw fields:\n", " Total_Debt, Long_Term_Debt, Short_Current_Debt, Shareholders_Equity,\n", " Cash, Retained_Earnings, Dividends_Paid, Capex_raw (usually negative), Capex_pos (abs)\n", "\n", "Derived fields:\n", " Debt_to_Equity, LT_Debt_Share, Delta_Debt, Delta_Capex_pos,\n", " Debt_Growth_Pct, Capex_Growth_Pct, Debt_Financed_Capex_Share\n", "\"\"\"\n", "\n", "import os\n", "import time\n", "import requests\n", "import pandas as pd\n", "import numpy as np\n", "import yfinance as yf\n", "from typing import List, Tuple\n", "\n", "# ==============================\n", "# CONFIG\n", "# ==============================\n", "INPUT_CSV = \"outperforming_stocks.csv\"\n", "OUTPUT_CSV = \"copper_outperformers_financials.csv\"\n", "\n", "# Your FMP API key (provided)\n", "FMP_API_KEY = os.environ.get(\"FMP_API_KEY\", \"Ij8othylV4h1m3G3NwIWF6lhO4edov0K\")\n", "\n", "# Align calendar-year series (e.g., dividends events) to fiscal via nearest match (max ~6.5 months)\n", "NEAREST_TOLERANCE = pd.Timedelta(\"200D\")\n", "\n", "# Be polite to FMP (free tier ~5 calls/min)\n", "FMP_SLEEP_SEC = 0.3\n", "\n", "\n", "# ==============================\n", "# HELPERS\n", "# ==============================\n", "def first_row(df: pd.DataFrame, labels: List[str]) -> pd.Series:\n", " \"\"\"Return the first available row among possible labels, else empty Series.\"\"\"\n", " if df is None or df.empty:\n", " return pd.Series(dtype=float)\n", " for lb in labels:\n", " if lb in df.index:\n", " return df.loc[lb]\n", " return pd.Series(dtype=float)\n", "\n", "def to_datetime_cols(df: pd.DataFrame) -> pd.DataFrame:\n", " \"\"\"Ensure columns are DatetimeIndex (fiscal period ends).\"\"\"\n", " if df is None or df.empty:\n", " return df\n", " out = df.copy()\n", " try:\n", " out.columns = pd.to_datetime(out.columns)\n", " except Exception:\n", " out.columns = pd.to_datetime([pd.to_datetime(c, errors=\"coerce\") for c in out.columns])\n", " return out\n", "\n", "def fmp_try_symbols(ticker: str) -> List[str]:\n", " \"\"\"\n", " Generate fallback symbol variants for FMP:\n", " - Original tk\n", " - Base without suffix (e.g., 'RIO.L' -> 'RIO')\n", " - Replace '.' with '-' (some APIs prefer hyphens)\n", " \"\"\"\n", " cands = [ticker]\n", " if \".\" in ticker:\n", " base = ticker.split(\".\")[0]\n", " cands.append(base)\n", " cands.append(ticker.replace(\".\", \"-\"))\n", " # de-duplicate while keeping order\n", " seen = set()\n", " uniq = []\n", " for s in cands:\n", " if s not in seen:\n", " uniq.append(s); seen.add(s)\n", " return uniq\n", "\n", "def fmp_get_json(url: str, params: dict) -> list:\n", " r = requests.get(url, params=params, timeout=20)\n", " r.raise_for_status()\n", " data = r.json()\n", " return data if isinstance(data, list) else data\n", "\n", "def fmp_capex_series(ticker: str, api_key: str, limit: int = 16) -> pd.Series:\n", " \"\"\"\n", " Capex from FMP cash-flow endpoint (annual).\n", " Tries multiple symbol variants and column names. Returns ascending Series by fiscal date.\n", " \"\"\"\n", " if not api_key:\n", " return pd.Series(dtype=float)\n", " for tk in fmp_try_symbols(ticker):\n", " try:\n", " url = f\"https://financialmodelingprep.com/api/v3/cash-flow-statement/{tk}\"\n", " params = {\"apikey\": api_key, \"limit\": limit}\n", " data = fmp_get_json(url, params)\n", " if not data:\n", " continue\n", " df = pd.DataFrame(data)\n", " if \"date\" not in df.columns:\n", " continue\n", " col = next((c for c in [\n", " \"capitalExpenditure\",\n", " \"investmentsInPropertyPlantAndEquipment\",\n", " \"capitalExpenditureFixedAssets\"\n", " ] if c in df.columns), None)\n", " if col is None:\n", " continue\n", " s = pd.Series(df[col].values, index=pd.to_datetime(df[\"date\"])).sort_index()\n", " time.sleep(FMP_SLEEP_SEC)\n", " if not s.empty:\n", " return s\n", " except Exception:\n", " continue\n", " return pd.Series(dtype=float)\n", "\n", "def yf_capex_series(t: yf.Ticker) -> pd.Series:\n", " \"\"\"Capex from Yahoo cash flow (annual). Returns Series indexed by fiscal period ends.\"\"\"\n", " try:\n", " cf = t.cashflow\n", " s = first_row(cf, [\"Capital Expenditures\", \"Capital Expenditure\"])\n", " return s if not s.empty else pd.Series(dtype=float)\n", " except Exception:\n", " return pd.Series(dtype=float)\n", "\n", "def fmp_dividends_cashflow_series(ticker: str, api_key: str, limit: int = 16) -> pd.Series:\n", " \"\"\"\n", " FMP cash-flow: 'dividendsPaid' (or 'dividendsPayments'), negative outflow -> flip to +paid.\n", " Indexed by fiscal 'date', ascending.\n", " \"\"\"\n", " if not api_key:\n", " return pd.Series(dtype=float)\n", " for tk in fmp_try_symbols(ticker):\n", " try:\n", " url = f\"https://financialmodelingprep.com/api/v3/cash-flow-statement/{tk}\"\n", " params = {\"apikey\": api_key, \"limit\": limit}\n", " data = fmp_get_json(url, params)\n", " if not data:\n", " continue\n", " df = pd.DataFrame(data)\n", " col = next((c for c in [\"dividendsPaid\", \"dividendsPayments\"] if c in df.columns), None)\n", " if col is None or \"date\" not in df.columns:\n", " continue\n", " s = pd.Series(df[col].values, index=pd.to_datetime(df[\"date\"])).sort_index()\n", " s = (-s) # make positive amount paid\n", " time.sleep(FMP_SLEEP_SEC)\n", " if not s.empty:\n", " return s\n", " except Exception:\n", " continue\n", " return pd.Series(dtype=float)\n", "\n", "def fmp_dividends_events_series(ticker: str, api_key: str, limit_years: int = 20) -> pd.Series:\n", " \"\"\"\n", " FMP stock dividend events endpoint. Sum to calendar years (Dec 31). Returns ascending Series.\n", " \"\"\"\n", " if not api_key:\n", " return pd.Series(dtype=float)\n", " for tk in fmp_try_symbols(ticker):\n", " try:\n", " url = f\"https://financialmodelingprep.com/api/v3/historical-price-full/stock_dividend/{tk}\"\n", " params = {\"apikey\": api_key}\n", " r = requests.get(url, params=params, timeout=20)\n", " r.raise_for_status()\n", " data = r.json()\n", " hist = data.get(\"historical\", [])\n", " if not hist:\n", " continue\n", " df = pd.DataFrame(hist)\n", " if \"date\" not in df.columns or \"dividend\" not in df.columns:\n", " continue\n", " df[\"date\"] = pd.to_datetime(df[\"date\"])\n", " s = df.set_index(\"date\")[\"dividend\"].resample(\"A-DEC\").sum().sort_index()\n", " if limit_years:\n", " s = s.iloc[-limit_years:]\n", " time.sleep(FMP_SLEEP_SEC)\n", " if not s.empty:\n", " return s\n", " except Exception:\n", " continue\n", " return pd.Series(dtype=float)\n", "\n", "def yf_dividends_annual_series(t: yf.Ticker) -> pd.Series:\n", " \"\"\"Yahoo corporate actions -> annual (calendar).\"\"\"\n", " try:\n", " div = t.dividends\n", " if div is not None and not div.empty:\n", " return div.resample(\"A-DEC\").sum().sort_index()\n", " except Exception:\n", " pass\n", " return pd.Series(dtype=float)\n", "\n", "def yf_dividends_cashflow_series(t: yf.Ticker) -> pd.Series:\n", " \"\"\"Yahoo cash-flow row -> annual (fiscal), negative outflow -> +paid.\"\"\"\n", " try:\n", " cf = t.cashflow\n", " s = first_row(cf, [\"Cash Dividends Paid\", \"Dividends Paid\", \"Cash Dividends Paid By Company\"])\n", " if s is not None and not s.empty:\n", " s = (-s).copy()\n", " s.index = pd.to_datetime(s.index)\n", " return s.sort_index()\n", " except Exception:\n", " pass\n", " return pd.Series(dtype=float)\n", "\n", "def best_dividends_series(tk: str, t: yf.Ticker, api_key: str) -> Tuple[pd.Series, str]:\n", " \"\"\"\n", " Try multiple sources in order; return (Series, source_tag).\n", " Preference:\n", " 1) FMP cash-flow (fiscal)\n", " 2) FMP events (calendar)\n", " 3) Yahoo actions (calendar)\n", " 4) Yahoo cash-flow (fiscal)\n", " \"\"\"\n", " s = fmp_dividends_cashflow_series(tk, api_key)\n", " if not s.empty:\n", " return s, \"fmp_cashflow\"\n", " s = fmp_dividends_events_series(tk, api_key)\n", " if not s.empty:\n", " return s, \"fmp_events\"\n", " s = yf_dividends_annual_series(t)\n", " if not s.empty:\n", " return s, \"yf_actions\"\n", " s = yf_dividends_cashflow_series(t)\n", " if not s.empty:\n", " return s, \"yf_cashflow\"\n", " return pd.Series(dtype=float), \"none\"\n", "\n", "def safe_add(a: pd.Series, b: pd.Series) -> pd.Series:\n", " if a is None or a.empty:\n", " return b\n", " if b is None or b.empty:\n", " return a\n", " return a.add(b, fill_value=0)\n", "\n", "def compute_derivatives(per_ticker_df: pd.DataFrame) -> pd.DataFrame:\n", " \"\"\"\n", " Robust derivations that tolerate missing columns.\n", " \"\"\"\n", " df = per_ticker_df.sort_index().copy()\n", "\n", " def col(name):\n", " if name in df.columns:\n", " return df[name]\n", " else:\n", " return pd.Series(index=df.index, dtype=float)\n", "\n", " total_debt = col(\"Total_Debt\")\n", " equity = col(\"Shareholders_Equity\")\n", " lt_debt = col(\"Long_Term_Debt\")\n", " capex_pos = col(\"Capex_pos\")\n", "\n", " # Ratios\n", " df[\"Debt_to_Equity\"] = total_debt / equity.replace(0, np.nan)\n", " df[\"LT_Debt_Share\"] = lt_debt / total_debt.replace(0, np.nan)\n", "\n", " # Levels -> Deltas & growth (YoY)\n", " df[\"Delta_Debt\"] = total_debt.diff()\n", " df[\"Delta_Capex_pos\"] = capex_pos.diff()\n", " df[\"Debt_Growth_Pct\"] = total_debt.pct_change()\n", " df[\"Capex_Growth_Pct\"] = capex_pos.pct_change()\n", "\n", " # Debt-financed Capex share (only when ΔCapex_pos > 0)\n", " df[\"Debt_Financed_Capex_Share\"] = np.where(\n", " (df[\"Delta_Capex_pos\"] > 0) & df[\"Delta_Capex_pos\"].notna(),\n", " df[\"Delta_Debt\"] / df[\"Delta_Capex_pos\"],\n", " np.nan\n", " )\n", " return df\n", "\n", "\n", "# ==============================\n", "# MAIN\n", "# ==============================\n", "def main():\n", " # 1) Read tickers (skip first column 'Date')\n", " px = pd.read_csv(INPUT_CSV)\n", " tickers = [c for c in px.columns if c != \"Date\"]\n", " if not tickers:\n", " raise ValueError(\"No tickers found in outperforming_stocks.csv (columns after 'Date').\")\n", "\n", " all_rows = []\n", "\n", " for tk in tickers:\n", " print(f\"Processing {tk} ...\")\n", " t = yf.Ticker(tk)\n", "\n", " # --- Yahoo balance sheet & fiscal dates ---\n", " try:\n", " bs = t.balance_sheet\n", " except Exception:\n", " bs = pd.DataFrame()\n", " bs = to_datetime_cols(bs)\n", " fiscal_cols = [] if bs is None or bs.empty else list(bs.columns)\n", "\n", " # --- Core rows from Yahoo ---\n", " long_term_debt = first_row(bs, [\"Long Term Debt\"])\n", " short_debt = first_row(bs, [\"Short Long Term Debt\", \"Current Debt\", \"Short Term Debt\"])\n", " total_debt = first_row(bs, [\"Total Debt\"])\n", " if total_debt.empty:\n", " total_debt = safe_add(long_term_debt, short_debt)\n", "\n", " equity = first_row(bs, [\n", " \"Total Stockholder Equity\",\n", " \"Total Equity Gross Minority Interest\",\n", " \"Stockholders Equity\",\n", " \"Ordinary Shares\" # loose fallback\n", " ])\n", " cash = first_row(bs, [\"Cash\", \"Cash And Cash Equivalents\"])\n", " retained = first_row(bs, [\"Retained Earnings\"])\n", "\n", " # --- Capex: FMP primary, Yahoo fallback ---\n", " capex_fmp = fmp_capex_series(tk, FMP_API_KEY, limit=16)\n", " capex_yf = yf_capex_series(t)\n", " capex = capex_fmp if not capex_fmp.empty else capex_yf\n", "\n", " # --- Build fiscal index to align everything ---\n", " if not fiscal_cols:\n", " candidates = [s for s in [total_debt, equity, cash, retained, capex] if not s.empty]\n", " if not candidates:\n", " print(f\" ⚠ Skipping {tk}: no data available.\")\n", " continue\n", " union_idx = pd.Index(sorted(set().union(*[pd.to_datetime(c.index) for c in candidates])))\n", " fiscal_index = pd.DatetimeIndex(union_idx)\n", " else:\n", " fiscal_index = pd.DatetimeIndex(fiscal_cols)\n", "\n", " def align_to_fiscal(s: pd.Series) -> pd.Series:\n", " if s is None or s.empty:\n", " return pd.Series(index=fiscal_index, dtype=float)\n", " # try exact alignment\n", " try:\n", " return s.reindex(fiscal_index)\n", " except Exception:\n", " pass\n", " # nearest with tolerance\n", " try:\n", " return s.reindex(fiscal_index, method=\"nearest\", tolerance=NEAREST_TOLERANCE)\n", " except Exception:\n", " return s.reindex(fiscal_index)\n", "\n", " total_debt_a = align_to_fiscal(total_debt)\n", " long_debt_a = align_to_fiscal(long_term_debt)\n", " short_debt_a = align_to_fiscal(short_debt)\n", " equity_a = align_to_fiscal(equity)\n", " cash_a = align_to_fiscal(cash)\n", " retained_a = align_to_fiscal(retained)\n", "\n", " # --- Dividends: try multiple sources, then align to fiscal ---\n", " div_series, div_src = best_dividends_series(tk, t, FMP_API_KEY)\n", " dividends_a = align_to_fiscal(div_series)\n", "\n", " # --- Capex (raw usually negative), plus positive-spend ---\n", " capex_a = align_to_fiscal(capex)\n", " capex_pos_a = capex_a.abs()\n", "\n", " # --- Build per-ticker frame (index = fiscal period end) ---\n", " per = pd.DataFrame({\n", " \"Total_Debt\": total_debt_a,\n", " \"Long_Term_Debt\": long_debt_a,\n", " \"Short_Current_Debt\": short_debt_a,\n", " \"Shareholders_Equity\": equity_a,\n", " \"Cash\": cash_a,\n", " \"Retained_Earnings\": retained_a,\n", " \"Dividends_Paid\": dividends_a,\n", " \"Capex_raw\": capex_a, # negative outflow\n", " \"Capex_pos\": capex_pos_a # positive spend\n", " })\n", "\n", " # Ensure core columns exist even if all NaN (avoid KeyError downstream)\n", " CORE_COLS = [\n", " \"Total_Debt\", \"Long_Term_Debt\", \"Short_Current_Debt\",\n", " \"Shareholders_Equity\", \"Cash\", \"Retained_Earnings\",\n", " \"Dividends_Paid\", \"Capex_raw\", \"Capex_pos\"\n", " ]\n", " for c in CORE_COLS:\n", " if c not in per.columns:\n", " per[c] = pd.Series(index=per.index, dtype=float)\n", "\n", " # --- Derived metrics for Task 1 & 2 ---\n", " per = compute_derivatives(per)\n", "\n", " # --- Stack for long table ---\n", " per = per.reset_index().rename(columns={\"index\": \"FiscalPeriodEnd\"})\n", " per.insert(0, \"Ticker\", tk)\n", " all_rows.append(per)\n", "\n", " if not all_rows:\n", " raise RuntimeError(\"No data collected. Check tickers and API coverage.\")\n", "\n", " combined = pd.concat(all_rows, axis=0, ignore_index=True)\n", " combined = combined.sort_values([\"Ticker\", \"FiscalPeriodEnd\"]).reset_index(drop=True)\n", " combined.to_csv(OUTPUT_CSV, index=False, float_format=\"%.6f\")\n", "\n", " print(f\"\\n✓ Saved {OUTPUT_CSV} with {len(combined)} rows.\")\n", " print(\"Note: If some tickers still have blank Dividends/Capex, FMP/Yahoo may not cover that listing;\")\n", " print(\" try the primary listing symbol in fmp_try_symbols() or map ADRs to locals.\")\n", "\n", "if __name__ == \"__main__\":\n", " main()\n" ] }, { "cell_type": "code", "execution_count": 46, "id": "7c34df62", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "✓ Saved task1_behavioral_scores.csv and task1_behavioral_shortlist.csv\n", " Task1_Behavior_Score Ideal_Flag\n", "Ticker \n", "ATUSF 1.366295 True\n", "2009.TW 0.998314 True\n", "PILBF 0.899130 False\n", "IYAA.F 0.644812 False\n", "IVPAF 0.644812 False\n", "FVJ.F 0.361688 False\n", "FSUMF 0.361688 False\n", "FSUGY 0.361688 False\n", "LYSCF 0.322783 True\n", "LYSDY 0.322783 True\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/Users/yanzhe0329/Desktop/Stock Selection/Stock_Benchmark Analysis copy/.venv/lib/python3.12/site-packages/numpy/lib/_nanfunctions_impl.py:1214: RuntimeWarning: Mean of empty slice\n", " return np.nanmean(a, axis, out=out, keepdims=keepdims)\n", "/Users/yanzhe0329/Desktop/Stock Selection/Stock_Benchmark Analysis copy/.venv/lib/python3.12/site-packages/numpy/lib/_nanfunctions_impl.py:1214: RuntimeWarning: Mean of empty slice\n", " return np.nanmean(a, axis, out=out, keepdims=keepdims)\n", "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/873942107.py:144: RuntimeWarning: All-NaN slice encountered\n", " dfs_med = np.nanmedian(g[\"Delta_Debt\"] / denom)\n", "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/873942107.py:147: RuntimeWarning: All-NaN slice encountered\n", " debt_to_capex_lvl_med = np.nanmedian(g[\"Delta_Debt\"] / g[\"Capex_pos\"].replace(0, np.nan))\n", "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/873942107.py:193: FutureWarning: DataFrameGroupBy.apply operated on the grouping columns. This behavior is deprecated, and in a future version of pandas the grouping columns will be excluded from the operation. Either pass `include_groups=False` to exclude the groupings or explicitly select the grouping columns after groupby to silence this warning.\n", " scores = fin.groupby(\"Ticker\", as_index=False).apply(summarize_company).reset_index(drop=True).set_index(\"Ticker\")\n" ] } ], "source": [ "# -*- coding: utf-8 -*-\n", "\"\"\"\n", "Task 1 — Debt-focused behavioral analysis with **equal-by-aspect** weighting.\n", "\n", "Inputs (must exist in the working directory):\n", " - copper_outperformers_financials.csv (Ticker, FiscalPeriodEnd, and metrics from your pipeline)\n", " - outperforming_stocks.csv (Date + 44 tickers of prices for the same names)\n", "\n", "Outputs:\n", " - task1_behavioral_scores.csv (all metrics + Behavior Score + Ideal_Flag)\n", " - task1_behavioral_shortlist.csv (sorted by Behavior Score, for review)\n", "\n", "What the Behavior Score measures (4 aspects, equal weight by aspect):\n", " 1) Prudence .............. low leverage now, falling leverage trend, low ΔDebt relative to Capex level\n", " 2) Capex financing ....... capex not mostly debt-financed, healthier LT-debt mix\n", " 3) Growth & reinvestment . capex growth, retained earnings growth (proxy for reinvestment)\n", " 4) Behavior vs stock ..... D/E falls when stock is up; ΔCapex rises when stock is up\n", "\n", "Each aspect gets equal weight **for that company** (if it has at least one usable signal);\n", "within an aspect, weight is split equally across available signals (NaN-robust).\n", "\"\"\"\n", "\n", "import pandas as pd\n", "import numpy as np\n", "\n", "# --------------------------\n", "# Config\n", "# --------------------------\n", "FIN_PATH = \"copper_outperformers_financials.csv\"\n", "PX_PATH = \"outperforming_stocks.csv\"\n", "\n", "N_YEARS_LOOKBACK = 5 # use the last N fiscal periods per ticker\n", "MIN_OBS_FOR_BETA = 3 # minimum paired observations to compute beta/corr\n", "PRICE_ALIGN_TOL_DAYS = 30 # align stock prices to fiscal dates within ±30 days\n", "\n", "\n", "# --------------------------\n", "# Helpers\n", "# --------------------------\n", "def zscore(s: pd.Series) -> pd.Series:\n", " s = s.astype(float)\n", " mu = s.mean(skipna=True)\n", " sd = s.std(skipna=True, ddof=1)\n", " if sd == 0 or np.isnan(sd):\n", " return pd.Series(np.nan, index=s.index)\n", " return (s - mu) / sd\n", "\n", "def slope_per_year(dates: pd.Series, y: pd.Series) -> float:\n", " \"\"\"OLS slope of y vs time, annualized (per year).\"\"\"\n", " df = pd.DataFrame({\"t\": dates, \"y\": y}).dropna()\n", " if df.shape[0] < 2 or df[\"t\"].nunique() < 2:\n", " return np.nan\n", " x = df[\"t\"].map(pd.Timestamp.toordinal).to_numpy(dtype=float)\n", " yv = df[\"y\"].to_numpy(dtype=float)\n", " try:\n", " b1_per_day = np.polyfit(x, yv, 1)[0]\n", " return b1_per_day * 365.0\n", " except Exception:\n", " return np.nan\n", "\n", "def align_stock_returns_to_fiscal(px: pd.Series, fiscal_dates: pd.DatetimeIndex, tol_days=30) -> pd.Series:\n", " \"\"\"\n", " For a price series (indexed by Date), find nearest price to each fiscal date (±tol_days),\n", " then compute period-over-period returns aligned to fiscal dates.\n", " \"\"\"\n", " p = px.dropna()\n", " if p.size < 2:\n", " return pd.Series(index=fiscal_dates, dtype=float)\n", "\n", " aligned_vals = []\n", " aligned_idx = []\n", " tol = pd.Timedelta(days=tol_days)\n", "\n", " for d in fiscal_dates:\n", " # nearest index (absolute difference)\n", " try:\n", " i_near = (p.index - d).abs().argmin()\n", " near_date = p.index[i_near]\n", " if abs(near_date - d) <= tol:\n", " aligned_idx.append(d)\n", " aligned_vals.append(p.loc[near_date])\n", " except Exception:\n", " pass\n", "\n", " if len(aligned_idx) < 2:\n", " return pd.Series(index=fiscal_dates, dtype=float)\n", "\n", " s = pd.Series(aligned_vals, index=pd.DatetimeIndex(aligned_idx)).sort_index()\n", " r = s.pct_change()\n", " return r.reindex(fiscal_dates)\n", "\n", "\n", "# --------------------------\n", "# Load data\n", "# --------------------------\n", "fin = pd.read_csv(FIN_PATH, parse_dates=[\"FiscalPeriodEnd\"]).sort_values([\"Ticker\", \"FiscalPeriodEnd\"])\n", "px = pd.read_csv(PX_PATH, parse_dates=[\"Date\"]).set_index(\"Date\").sort_index()\n", "\n", "# Ensure needed columns exist (if missing in CSV, create as NaN)\n", "need_cols = [\n", " \"Total_Debt\", \"Long_Term_Debt\", \"Short_Current_Debt\", \"Shareholders_Equity\",\n", " \"Cash\", \"Retained_Earnings\", \"Dividends_Paid\", \"Capex_raw\", \"Capex_pos\",\n", " \"Debt_to_Equity\", \"LT_Debt_Share\", \"Delta_Debt\", \"Delta_Capex_pos\",\n", " \"Debt_Growth_Pct\", \"Capex_Growth_Pct\", \"Debt_Financed_Capex_Share\"\n", "]\n", "for c in need_cols:\n", " if c not in fin.columns:\n", " fin[c] = np.nan\n", "\n", "\n", "# --------------------------\n", "# Summarize per ticker\n", "# --------------------------\n", "def summarize_company(g: pd.DataFrame) -> pd.Series:\n", " g = g.sort_values(\"FiscalPeriodEnd\").tail(N_YEARS_LOOKBACK)\n", " if g.empty:\n", " return pd.Series(dtype=float)\n", "\n", " last = g.iloc[-1]\n", " dates = g[\"FiscalPeriodEnd\"]\n", "\n", " # Latest leverage & mix\n", " latest_de = last.get(\"Debt_to_Equity\", np.nan)\n", "\n", " # Prefer existing LT_Debt_Share column; if missing, compute from last row\n", " latest_total_debt = last.get(\"Total_Debt\", np.nan)\n", " latest_lt_debt = last.get(\"Long_Term_Debt\", np.nan)\n", " if pd.notna(last.get(\"LT_Debt_Share\", np.nan)):\n", " lt_share = last.get(\"LT_Debt_Share\", np.nan)\n", " else:\n", " lt_share = (latest_lt_debt / latest_total_debt) if pd.notna(latest_total_debt) and latest_total_debt != 0 else np.nan\n", "\n", " # Trends / growth\n", " de_slope = slope_per_year(dates, g[\"Debt_to_Equity\"])\n", " capex_g_avg = g[\"Capex_Growth_Pct\"].mean(skipna=True)\n", " debt_g_avg = g[\"Debt_Growth_Pct\"].mean(skipna=True)\n", " re_g_avg = g[\"Retained_Earnings\"].pct_change().mean(skipna=True)\n", "\n", " # Financing of capex\n", " dfs_med = g[\"Debt_Financed_Capex_Share\"].median(skipna=True)\n", " if np.isnan(dfs_med):\n", " # Robust fallback: median of ΔDebt / ΔCapex_pos where ΔCapex_pos > 0\n", " denom = g[\"Delta_Capex_pos\"].replace(0, np.nan)\n", " dfs_med = np.nanmedian(g[\"Delta_Debt\"] / denom)\n", "\n", " # Guardrail: ΔDebt relative to Capex level\n", " debt_to_capex_lvl_med = np.nanmedian(g[\"Delta_Debt\"] / g[\"Capex_pos\"].replace(0, np.nan))\n", "\n", " # Behavior vs stock\n", " tk = g[\"Ticker\"].iloc[0]\n", " stock_ret = align_stock_returns_to_fiscal(px[tk] if tk in px.columns else pd.Series(dtype=float),\n", " g[\"FiscalPeriodEnd\"],\n", " tol_days=PRICE_ALIGN_TOL_DAYS)\n", "\n", " # β(D/E on stock returns): regression y = a + b * r_stock\n", " de_on_ret_beta = np.nan\n", " df_beta = pd.concat([g.set_index(\"FiscalPeriodEnd\")[\"Debt_to_Equity\"], stock_ret], axis=1).dropna()\n", " if df_beta.shape[0] >= MIN_OBS_FOR_BETA:\n", " try:\n", " X = np.vstack([np.ones(len(df_beta)), df_beta.iloc[:, 1].values]).T\n", " y = df_beta.iloc[:, 0].values\n", " b = np.linalg.lstsq(X, y, rcond=None)[0]\n", " de_on_ret_beta = b[1]\n", " except Exception:\n", " de_on_ret_beta = np.nan\n", "\n", " # corr(stock returns, ΔCapex)\n", " capex_corr = np.nan\n", " if stock_ret.notna().sum() >= MIN_OBS_FOR_BETA and g[\"Delta_Capex_pos\"].notna().sum() >= MIN_OBS_FOR_BETA:\n", " capex_corr = pd.concat([stock_ret, g.set_index(\"FiscalPeriodEnd\")[\"Delta_Capex_pos\"]], axis=1).corr().iloc[0, 1]\n", "\n", " return pd.Series({\n", " # Levels / mix\n", " \"Latest_Debt_to_Equity\": latest_de,\n", " \"Latest_LT_Debt_Share\": lt_share,\n", "\n", " # Growth & trends\n", " \"Capex_Growth_Avg\": capex_g_avg,\n", " \"Debt_Growth_Avg\": debt_g_avg,\n", " \"RE_Growth_Avg\": re_g_avg,\n", " \"DE_Slope_per_year\": de_slope,\n", "\n", " # Financing of capex\n", " \"Debt_Financed_Capex_Share_med\": dfs_med,\n", " \"Debt_to_CapexLevel_med\": debt_to_capex_lvl_med,\n", "\n", " # Behavior vs stock\n", " \"DE_beta_on_StockRet\": de_on_ret_beta,\n", " \"Capex_vs_StockRet_corr\": capex_corr,\n", " })\n", "\n", "\n", "scores = fin.groupby(\"Ticker\", as_index=False).apply(summarize_company).reset_index(drop=True).set_index(\"Ticker\")\n", "\n", "\n", "# --------------------------\n", "# Z-scores for each signal\n", "# --------------------------\n", "Z = pd.DataFrame(index=scores.index)\n", "\n", "# Prudence\n", "Z[\"z_low_DE\"] = -zscore(scores[\"Latest_Debt_to_Equity\"]) # lower D/E better\n", "Z[\"z_falling_DE\"] = -zscore(scores[\"DE_Slope_per_year\"]) # more negative slope better\n", "Z[\"z_low_debt_to_capex\"]= -zscore(scores[\"Debt_to_CapexLevel_med\"]) # lower ΔDebt / Capex level better\n", "\n", "# Capex financing quality\n", "Z[\"z_low_debt_fin_share\"] = -zscore(scores[\"Debt_Financed_Capex_Share_med\"]) # lower better\n", "Z[\"z_LT_share\"] = zscore(scores[\"Latest_LT_Debt_Share\"]) # higher better\n", "\n", "# Growth & reinvestment\n", "Z[\"z_capex_growth\"] = zscore(scores[\"Capex_Growth_Avg\"]) # higher better\n", "Z[\"z_RE_growth\"] = zscore(scores[\"RE_Growth_Avg\"]) # higher better\n", "\n", "# Behavior vs stock\n", "Z[\"z_DE_beta_stock\"] = -zscore(scores[\"DE_beta_on_StockRet\"]) # more negative better\n", "Z[\"z_capex_vs_stock\"] = zscore(scores[\"Capex_vs_StockRet_corr\"]) # more positive better\n", "\n", "\n", "# --------------------------\n", "# Equal-by-aspect weighting (NaN-robust)\n", "# --------------------------\n", "aspects = {\n", " \"prudence\": [\"z_low_DE\", \"z_falling_DE\", \"z_low_debt_to_capex\"],\n", " \"capex_financing\": [\"z_low_debt_fin_share\", \"z_LT_share\"],\n", " \"growth_reinvest\": [\"z_capex_growth\", \"z_RE_growth\"],\n", " \"stock_behavior\": [\"z_DE_beta_stock\", \"z_capex_vs_stock\"],\n", "}\n", "\n", "# Availability mask per component\n", "A = ~Z.isna()\n", "\n", "# Which aspects have at least one available component for each ticker?\n", "aspect_has_data = pd.DataFrame({a: A[cols].any(axis=1) for a, cols in aspects.items()})\n", "\n", "# Equal weight across available aspects (each gets 1 / #available_aspects)\n", "n_aspects = aspect_has_data.sum(axis=1).replace(0, np.nan)\n", "aspect_w = aspect_has_data.div(n_aspects, axis=0) # per-row aspect weights\n", "\n", "# Within each aspect, split that aspect’s weight equally among its available components\n", "component_w = pd.DataFrame(0.0, index=Z.index, columns=Z.columns)\n", "for a, cols in aspects.items():\n", " if not cols:\n", " continue\n", " n_comp = A[cols].sum(axis=1).replace(0, np.nan) # available components in this aspect per row\n", " per_comp_w = A[cols].div(n_comp, axis=0).mul(aspect_w[a], axis=0) # (aspect weight) / (#available comps)\n", " component_w[cols] = per_comp_w\n", "\n", "# Final Behavior Score\n", "scores[\"Task1_Behavior_Score\"] = (Z.fillna(0) * component_w.fillna(0)).sum(axis=1)\n", "\n", "\n", "# --------------------------\n", "# Aspect-level Ideal flag\n", "# Pass ≥3 of 4 aspects (only counting aspects with data)\n", "# --------------------------\n", "# Base boolean checks\n", "checks = pd.DataFrame(index=scores.index)\n", "checks[\"low_DE_now\"] = scores[\"Latest_Debt_to_Equity\"] < np.nanmedian(scores[\"Latest_Debt_to_Equity\"])\n", "checks[\"falling_DE\"] = scores[\"DE_Slope_per_year\"] < 0\n", "checks[\"capex_growing\"] = scores[\"Capex_Growth_Avg\"] > 0\n", "checks[\"prudent_fin_share\"] = (scores[\"Debt_Financed_Capex_Share_med\"] < 0.6)\n", "checks[\"low_debt_to_capex\"] = (scores[\"Debt_to_CapexLevel_med\"] < np.nanmedian(scores[\"Debt_to_CapexLevel_med\"]))\n", "checks[\"LT_share_ok\"] = scores[\"Latest_LT_Debt_Share\"] > np.nanmedian(scores[\"Latest_LT_Debt_Share\"])\n", "checks[\"DE_drops_when_stock_up\"] = scores[\"DE_beta_on_StockRet\"] < 0\n", "checks[\"capex_moves_with_stock\"] = scores[\"Capex_vs_StockRet_corr\"] > 0\n", "\n", "# Map checks to aspects\n", "aspect_checks = {\n", " \"prudence\": [\"low_DE_now\", \"falling_DE\", \"low_debt_to_capex\"], # require >=2 of these\n", " \"capex_financing\": [\"prudent_fin_share\", \"LT_share_ok\"], # require >=1\n", " \"growth_reinvest\": [\"capex_growing\"], # require >=1 (RE growth optional if you add it)\n", " \"stock_behavior\": [\"DE_drops_when_stock_up\", \"capex_moves_with_stock\"]# require all available\n", "}\n", "\n", "def _aspect_pass_for_row(row: pd.Series, names, mode: str):\n", " vals = row[names]\n", " avail = vals.notna()\n", " if not avail.any():\n", " return np.nan\n", " trues = vals.fillna(False).sum()\n", " total = avail.sum()\n", " if mode == \"at_least_2\":\n", " return trues >= min(2, total)\n", " if mode == \"at_least_1\":\n", " return trues >= 1\n", " if mode == \"all_available\":\n", " return trues == total\n", " return False\n", "\n", "aspect_pass_df = pd.DataFrame(index=scores.index)\n", "aspect_pass_df[\"prudence\"] = checks.apply(lambda r: _aspect_pass_for_row(r, aspect_checks[\"prudence\"], \"at_least_2\"), axis=1)\n", "aspect_pass_df[\"capex_financing\"] = checks.apply(lambda r: _aspect_pass_for_row(r, aspect_checks[\"capex_financing\"], \"at_least_1\"), axis=1)\n", "aspect_pass_df[\"growth_reinvest\"] = checks.apply(lambda r: _aspect_pass_for_row(r, aspect_checks[\"growth_reinvest\"], \"at_least_1\"), axis=1)\n", "aspect_pass_df[\"stock_behavior\"] = checks.apply(lambda r: _aspect_pass_for_row(r, aspect_checks[\"stock_behavior\"], \"all_available\"), axis=1)\n", "\n", "n_avail_aspects = aspect_pass_df.notna().sum(axis=1)\n", "n_pass_aspects = aspect_pass_df.fillna(False).sum(axis=1)\n", "scores[\"Ideal_Flag\"] = (n_avail_aspects >= 3) & (n_pass_aspects >= 3)\n", "\n", "\n", "# --------------------------\n", "# Save outputs\n", "# --------------------------\n", "scores.reset_index().to_csv(\"task1_behavioral_scores.csv\", index=False, float_format=\"%.6f\")\n", "scores.reset_index().sort_values(\"Task1_Behavior_Score\", ascending=False)\\\n", " .to_csv(\"task1_behavioral_shortlist.csv\", index=False, float_format=\"%.6f\")\n", "\n", "print(\"✓ Saved task1_behavioral_scores.csv and task1_behavioral_shortlist.csv\")\n", "print(scores.sort_values(\"Task1_Behavior_Score\", ascending=False).head(10)[\n", " [\"Task1_Behavior_Score\", \"Ideal_Flag\"]\n", "])\n" ] }, { "cell_type": "code", "execution_count": 47, "id": "3d274752", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "✓ Saved task1_behavioral_scores.csv and task1_behavioral_shortlist.csv\n", " Task1_Behavior_Score Ideal_Flag DE_beta_on_StockRet \\\n", "Ticker \n", "ATUSF 1.147166 True 0.036891 \n", "2009.TW 0.887607 True -0.163638 \n", "PILBF 0.551326 False -0.017556 \n", "IVPAF 0.529693 True -0.089586 \n", "HBM 0.502202 True -0.475345 \n", "HBM.TO 0.494770 True -0.437697 \n", "OCKA.F 0.487074 True -0.413293 \n", "IYAA.F 0.476700 False -0.196286 \n", "FVJ.F 0.438831 False -0.112566 \n", "FSUGY 0.437584 False -0.091859 \n", "\n", " Capex_vs_StockRet_corr \n", "Ticker \n", "ATUSF 0.998645 \n", "2009.TW 0.599495 \n", "PILBF -0.594953 \n", "IVPAF 0.231537 \n", "HBM 0.919314 \n", "HBM.TO 0.968417 \n", "OCKA.F 0.983136 \n", "IYAA.F -0.347706 \n", "FVJ.F 0.896529 \n", "FSUGY 0.940414 \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/Users/yanzhe0329/Desktop/Stock Selection/Stock_Benchmark Analysis copy/.venv/lib/python3.12/site-packages/numpy/lib/_nanfunctions_impl.py:1214: RuntimeWarning: Mean of empty slice\n", " return np.nanmean(a, axis, out=out, keepdims=keepdims)\n", "/Users/yanzhe0329/Desktop/Stock Selection/Stock_Benchmark Analysis copy/.venv/lib/python3.12/site-packages/numpy/lib/_nanfunctions_impl.py:1214: RuntimeWarning: Mean of empty slice\n", " return np.nanmean(a, axis, out=out, keepdims=keepdims)\n", "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/4292568386.py:135: RuntimeWarning: All-NaN slice encountered\n", " dfs_med = np.nanmedian(g[\"Delta_Debt\"] / denom)\n", "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/4292568386.py:136: RuntimeWarning: All-NaN slice encountered\n", " debt_to_capex_lvl_med = np.nanmedian(g[\"Delta_Debt\"] / g[\"Capex_pos\"].replace(0, np.nan))\n", "/var/folders/6b/p92_dgbd07ldpbpq29vbpd_m0000gn/T/ipykernel_66700/4292568386.py:177: FutureWarning: DataFrameGroupBy.apply operated on the grouping columns. This behavior is deprecated, and in a future version of pandas the grouping columns will be excluded from the operation. Either pass `include_groups=False` to exclude the groupings or explicitly select the grouping columns after groupby to silence this warning.\n", " scores = fin.groupby(\"Ticker\", as_index=False).apply(summarize_company).reset_index(drop=True).set_index(\"Ticker\")\n" ] } ], "source": [ "# -*- coding: utf-8 -*-\n", "\"\"\"\n", "Task 1 — Debt-focused behavioral analysis with equal-by-aspect weighting.\n", "Robust fix: align stock prices to fiscal dates with ±90d window (prefer prior date).\n", "\n", "Inputs:\n", " - copper_outperformers_financials.csv\n", " - outperforming_stocks.csv (Date + tickers)\n", "\n", "Outputs:\n", " - task1_behavioral_scores.csv\n", " - task1_behavioral_shortlist.csv\n", "\"\"\"\n", "\n", "import pandas as pd\n", "import numpy as np\n", "\n", "# --------------------------\n", "# Config\n", "# --------------------------\n", "FIN_PATH = \"copper_outperformers_financials.csv\"\n", "PX_PATH = \"outperforming_stocks.csv\"\n", "\n", "N_YEARS_LOOKBACK = 5 # use last N fiscal periods\n", "MIN_OBS_FOR_BETA = 3 # min paired obs for beta/corr\n", "PRICE_ALIGN_TOL_D = 90 # days tolerance for aligning prices to fiscal dates\n", "\n", "# --------------------------\n", "# Helpers\n", "# --------------------------\n", "def zscore(s: pd.Series) -> pd.Series:\n", " s = s.astype(float)\n", " mu = s.mean(skipna=True)\n", " sd = s.std(skipna=True, ddof=1)\n", " if not np.isfinite(sd) or sd == 0:\n", " return pd.Series(np.nan, index=s.index)\n", " return (s - mu) / sd\n", "\n", "def slope_per_year(dates: pd.Series, y: pd.Series) -> float:\n", " df = pd.DataFrame({\"t\": dates, \"y\": y}).dropna()\n", " if df.shape[0] < 2 or df[\"t\"].nunique() < 2:\n", " return np.nan\n", " x = df[\"t\"].map(pd.Timestamp.toordinal).to_numpy(float)\n", " yv = df[\"y\"].to_numpy(float)\n", " try:\n", " return np.polyfit(x, yv, 1)[0] * 365.0\n", " except Exception:\n", " return np.nan\n", "\n", "def nearest_price_level(p: pd.Series, d: pd.Timestamp, tol_days: int) -> float:\n", " \"\"\"\n", " Prefer last price on/before d within tol; else first after d within tol; else NaN.\n", " \"\"\"\n", " if p.empty:\n", " return np.nan\n", " tol = pd.Timedelta(days=tol_days)\n", " # last on/before\n", " before = p.loc[:d]\n", " if not before.empty:\n", " dt = d - before.index[-1]\n", " if dt <= tol:\n", " return float(before.iloc[-1])\n", " # first after\n", " after = p.loc[d:]\n", " if not after.empty:\n", " dt = after.index[0] - d\n", " if dt <= tol:\n", " return float(after.iloc[0])\n", " return np.nan\n", "\n", "def annual_returns_aligned(p: pd.Series, fiscal_dates: pd.DatetimeIndex, tol_days: int) -> pd.Series:\n", " \"\"\"\n", " Build a price level at each fiscal date using nearest_price_level, then compute YoY returns.\n", " Returns Series indexed by fiscal_dates.\n", " \"\"\"\n", " if p is None or p.empty:\n", " return pd.Series(index=fiscal_dates, dtype=float)\n", " levels = []\n", " idx = []\n", " for d in fiscal_dates:\n", " val = nearest_price_level(p, pd.Timestamp(d), tol_days)\n", " if np.isfinite(val):\n", " levels.append(val); idx.append(pd.Timestamp(d))\n", " if len(idx) < 2:\n", " return pd.Series(index=fiscal_dates, dtype=float)\n", " s = pd.Series(levels, index=pd.DatetimeIndex(idx)).sort_index()\n", " r = s.pct_change()\n", " return r.reindex(fiscal_dates)\n", "\n", "# --------------------------\n", "# Load data\n", "# --------------------------\n", "fin = pd.read_csv(FIN_PATH, parse_dates=[\"FiscalPeriodEnd\"]).sort_values([\"Ticker\",\"FiscalPeriodEnd\"])\n", "px = pd.read_csv(PX_PATH, parse_dates=[\"Date\"]).set_index(\"Date\").sort_index()\n", "\n", "need_cols = [\n", " \"Total_Debt\",\"Long_Term_Debt\",\"Short_Current_Debt\",\"Shareholders_Equity\",\n", " \"Cash\",\"Retained_Earnings\",\"Dividends_Paid\",\"Capex_raw\",\"Capex_pos\",\n", " \"Debt_to_Equity\",\"LT_Debt_Share\",\"Delta_Debt\",\"Delta_Capex_pos\",\n", " \"Debt_Growth_Pct\",\"Capex_Growth_Pct\",\"Debt_Financed_Capex_Share\"\n", "]\n", "for c in need_cols:\n", " if c not in fin.columns:\n", " fin[c] = np.nan\n", "\n", "# --------------------------\n", "# Per-ticker summary\n", "# --------------------------\n", "def summarize_company(g: pd.DataFrame) -> pd.Series:\n", " g = g.sort_values(\"FiscalPeriodEnd\").tail(N_YEARS_LOOKBACK)\n", " if g.empty:\n", " return pd.Series(dtype=float)\n", "\n", " last = g.iloc[-1]\n", " dates = g[\"FiscalPeriodEnd\"]\n", "\n", " # Latest leverage & mix\n", " latest_de = last.get(\"Debt_to_Equity\", np.nan)\n", " latest_total_debt = last.get(\"Total_Debt\", np.nan)\n", " latest_lt_debt = last.get(\"Long_Term_Debt\", np.nan)\n", " lt_share = last.get(\"LT_Debt_Share\", np.nan)\n", " if pd.isna(lt_share):\n", " lt_share = (latest_lt_debt / latest_total_debt) if pd.notna(latest_total_debt) and latest_total_debt != 0 else np.nan\n", "\n", " # Trends / growth\n", " de_slope = slope_per_year(dates, g[\"Debt_to_Equity\"])\n", " capex_g_avg = g[\"Capex_Growth_Pct\"].mean(skipna=True)\n", " debt_g_avg = g[\"Debt_Growth_Pct\"].mean(skipna=True)\n", " re_g_avg = g[\"Retained_Earnings\"].pct_change().mean(skipna=True)\n", "\n", " # Financing of capex\n", " dfs_med = g[\"Debt_Financed_Capex_Share\"].median(skipna=True)\n", " if np.isnan(dfs_med):\n", " denom = g[\"Delta_Capex_pos\"].replace(0, np.nan)\n", " dfs_med = np.nanmedian(g[\"Delta_Debt\"] / denom)\n", " debt_to_capex_lvl_med = np.nanmedian(g[\"Delta_Debt\"] / g[\"Capex_pos\"].replace(0, np.nan))\n", "\n", " # Behavior vs stock (robust alignment)\n", " tk = g[\"Ticker\"].iloc[0]\n", " if tk in px.columns:\n", " r_stock = annual_returns_aligned(px[tk], g[\"FiscalPeriodEnd\"], tol_days=PRICE_ALIGN_TOL_D)\n", " else:\n", " r_stock = pd.Series(index=g[\"FiscalPeriodEnd\"], dtype=float)\n", "\n", " # β(D/E on stock returns)\n", " de_on_ret_beta = np.nan\n", " df_beta = pd.concat([g.set_index(\"FiscalPeriodEnd\")[\"Debt_to_Equity\"], r_stock], axis=1).dropna()\n", " if df_beta.shape[0] >= MIN_OBS_FOR_BETA:\n", " try:\n", " X = np.vstack([np.ones(len(df_beta)), df_beta.iloc[:,1].values]).T\n", " y = df_beta.iloc[:,0].values\n", " b = np.linalg.lstsq(X, y, rcond=None)[0]\n", " de_on_ret_beta = float(b[1])\n", " except Exception:\n", " de_on_ret_beta = np.nan\n", "\n", " # corr(stock returns, ΔCapex)\n", " capex_corr = np.nan\n", " delta_capex = g.set_index(\"FiscalPeriodEnd\")[\"Delta_Capex_pos\"]\n", " paired = pd.concat([r_stock, delta_capex], axis=1).dropna()\n", " if paired.shape[0] >= MIN_OBS_FOR_BETA:\n", " capex_corr = paired.corr().iloc[0,1]\n", "\n", " return pd.Series({\n", " \"Latest_Debt_to_Equity\": latest_de,\n", " \"Latest_LT_Debt_Share\": lt_share,\n", " \"Capex_Growth_Avg\": capex_g_avg,\n", " \"Debt_Growth_Avg\": debt_g_avg,\n", " \"RE_Growth_Avg\": re_g_avg,\n", " \"DE_Slope_per_year\": de_slope,\n", " \"Debt_Financed_Capex_Share_med\": dfs_med,\n", " \"Debt_to_CapexLevel_med\": debt_to_capex_lvl_med,\n", " \"DE_beta_on_StockRet\": de_on_ret_beta,\n", " \"Capex_vs_StockRet_corr\": capex_corr,\n", " })\n", "\n", "scores = fin.groupby(\"Ticker\", as_index=False).apply(summarize_company).reset_index(drop=True).set_index(\"Ticker\")\n", "\n", "# --------------------------\n", "# Z-scores (signals)\n", "# --------------------------\n", "Z = pd.DataFrame(index=scores.index)\n", "# Prudence\n", "Z[\"z_low_DE\"] = -zscore(scores[\"Latest_Debt_to_Equity\"])\n", "Z[\"z_falling_DE\"] = -zscore(scores[\"DE_Slope_per_year\"])\n", "Z[\"z_low_debt_to_capex\"] = -zscore(scores[\"Debt_to_CapexLevel_med\"])\n", "# Capex financing quality\n", "Z[\"z_low_debt_fin_share\"]= -zscore(scores[\"Debt_Financed_Capex_Share_med\"])\n", "Z[\"z_LT_share\"] = zscore(scores[\"Latest_LT_Debt_Share\"])\n", "# Growth & reinvestment\n", "Z[\"z_capex_growth\"] = zscore(scores[\"Capex_Growth_Avg\"])\n", "Z[\"z_RE_growth\"] = zscore(scores[\"RE_Growth_Avg\"])\n", "# Behavior vs stock\n", "Z[\"z_DE_beta_stock\"] = -zscore(scores[\"DE_beta_on_StockRet\"])\n", "Z[\"z_capex_vs_stock\"] = zscore(scores[\"Capex_vs_StockRet_corr\"])\n", "\n", "# --------------------------\n", "# Equal-by-aspect weighting (NaN-robust)\n", "# --------------------------\n", "aspects = {\n", " \"prudence\": [\"z_low_DE\", \"z_falling_DE\", \"z_low_debt_to_capex\"],\n", " \"capex_financing\": [\"z_low_debt_fin_share\", \"z_LT_share\"],\n", " \"growth_reinvest\": [\"z_capex_growth\", \"z_RE_growth\"],\n", " \"stock_behavior\": [\"z_DE_beta_stock\", \"z_capex_vs_stock\"],\n", "}\n", "A = ~Z.isna()\n", "aspect_has_data = pd.DataFrame({a: A[cols].any(axis=1) for a, cols in aspects.items()})\n", "n_aspects = aspect_has_data.sum(axis=1).replace(0, np.nan)\n", "aspect_w = aspect_has_data.div(n_aspects, axis=0)\n", "\n", "component_w = pd.DataFrame(0.0, index=Z.index, columns=Z.columns)\n", "for a, cols in aspects.items():\n", " if not cols: continue\n", " n_comp = A[cols].sum(axis=1).replace(0, np.nan)\n", " per_comp_w = A[cols].div(n_comp, axis=0).mul(aspect_w[a], axis=0)\n", " component_w[cols] = per_comp_w\n", "\n", "scores[\"Task1_Behavior_Score\"] = (Z.fillna(0) * component_w.fillna(0)).sum(axis=1)\n", "\n", "# --------------------------\n", "# Aspect-level Ideal flag (≥3 of 4 aspects)\n", "# --------------------------\n", "checks = pd.DataFrame(index=scores.index)\n", "checks[\"low_DE_now\"] = scores[\"Latest_Debt_to_Equity\"] < np.nanmedian(scores[\"Latest_Debt_to_Equity\"])\n", "checks[\"falling_DE\"] = scores[\"DE_Slope_per_year\"] < 0\n", "checks[\"capex_growing\"] = scores[\"Capex_Growth_Avg\"] > 0\n", "checks[\"prudent_fin_share\"] = (scores[\"Debt_Financed_Capex_Share_med\"] < 0.6)\n", "checks[\"low_debt_to_capex\"] = (scores[\"Debt_to_CapexLevel_med\"] < np.nanmedian(scores[\"Debt_to_CapexLevel_med\"]))\n", "checks[\"LT_share_ok\"] = scores[\"Latest_LT_Debt_Share\"] > np.nanmedian(scores[\"Latest_LT_Debt_Share\"])\n", "checks[\"DE_drops_when_stock_up\"] = scores[\"DE_beta_on_StockRet\"] < 0\n", "checks[\"capex_moves_with_stock\"] = scores[\"Capex_vs_StockRet_corr\"] > 0\n", "\n", "aspect_checks = {\n", " \"prudence\": [\"low_DE_now\", \"falling_DE\", \"low_debt_to_capex\"], # ≥2\n", " \"capex_financing\": [\"prudent_fin_share\", \"LT_share_ok\"], # ≥1\n", " \"growth_reinvest\": [\"capex_growing\"], # ≥1\n", " \"stock_behavior\": [\"DE_drops_when_stock_up\", \"capex_moves_with_stock\"] # all available\n", "}\n", "\n", "def _aspect_pass_for_row(row: pd.Series, names, mode: str):\n", " vals = row[names]\n", " avail = vals.notna()\n", " if not avail.any():\n", " return np.nan\n", " trues = vals.fillna(False).sum()\n", " total = avail.sum()\n", " if mode == \"at_least_2\":\n", " return trues >= min(2, total)\n", " if mode == \"at_least_1\":\n", " return trues >= 1\n", " if mode == \"all_available\":\n", " return trues == total\n", " return False\n", "\n", "aspect_pass_df = pd.DataFrame(index=scores.index)\n", "aspect_pass_df[\"prudence\"] = checks.apply(lambda r: _aspect_pass_for_row(r, aspect_checks[\"prudence\"], \"at_least_2\"), axis=1)\n", "aspect_pass_df[\"capex_financing\"] = checks.apply(lambda r: _aspect_pass_for_row(r, aspect_checks[\"capex_financing\"], \"at_least_1\"), axis=1)\n", "aspect_pass_df[\"growth_reinvest\"] = checks.apply(lambda r: _aspect_pass_for_row(r, aspect_checks[\"growth_reinvest\"], \"at_least_1\"), axis=1)\n", "aspect_pass_df[\"stock_behavior\"] = checks.apply(lambda r: _aspect_pass_for_row(r, aspect_checks[\"stock_behavior\"], \"all_available\"), axis=1)\n", "\n", "n_avail_aspects = aspect_pass_df.notna().sum(axis=1)\n", "n_pass_aspects = aspect_pass_df.fillna(False).sum(axis=1)\n", "scores[\"Ideal_Flag\"] = (n_avail_aspects >= 3) & (n_pass_aspects >= 3)\n", "\n", "# --------------------------\n", "# Save\n", "# --------------------------\n", "scores.reset_index().to_csv(\"task1_behavioral_scores.csv\", index=False, float_format=\"%.6f\")\n", "scores.reset_index().sort_values(\"Task1_Behavior_Score\", ascending=False)\\\n", " .to_csv(\"task1_behavioral_shortlist.csv\", index=False, float_format=\"%.6f\")\n", "\n", "print(\"✓ Saved task1_behavioral_scores.csv and task1_behavioral_shortlist.csv\")\n", "print(scores.sort_values(\"Task1_Behavior_Score\", ascending=False).head(10)[\n", " [\"Task1_Behavior_Score\", \"Ideal_Flag\", \"DE_beta_on_StockRet\", \"Capex_vs_StockRet_corr\"]\n", "])\n" ] }, { "cell_type": "code", "execution_count": 49, "id": "b85075be", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "=== Task 2 Equity Portfolio — Diagnostics ===\n", "OPT_DEBT_MODE: DE_CAP\n", "Weighted Avg D/E: 0.352 (cap: 0.352, 60th pct)\n", "Portfolio DFS (weighted avg): 1.123 (target >= 0.60)\n", "Weight in high-DFS names: 0.500 (target >= 0.50)\n", "Names in portfolio: 10 / 34\n", "\n", "✓ Saved task2_equity_portfolio_allocation.csv, task2_equity_inputs_snapshot.csv, task2_equity_diagnostics.txt\n" ] } ], "source": [ "# -*- coding: utf-8 -*-\n", "\"\"\"\n", "Task 2 (reinterpreted) — Optimize a STOCK portfolio's expected return\n", "subject to portfolio-level DEBT constraints and \"capex mainly debt-financed\" preference.\n", "\n", "Inputs (must exist in working directory):\n", " - copper_outperformers_financials.csv (long table from your pipeline)\n", " - outperforming_stocks.csv (Date + ticker columns with prices)\n", "\n", "Outputs:\n", " - task2_equity_portfolio_allocation.csv\n", " - task2_equity_inputs_snapshot.csv\n", " - task2_equity_diagnostics.txt\n", "\n", "Notes:\n", " - Choose OPT_DEBT_MODE = \"DE_CAP\" (cap portfolio-weighted avg D/E) or \"DEBT_BUDGET_USD\" (cap total issuer debt dollars)\n", " - Uses trailing equity CAGR as expected return proxy (RET_YEARS)\n", " - Enforces \"capex mainly debt-financed\" via:\n", " (a) portfolio DFS floor (weighted avg DFS >= PORTFOLIO_DFS_MIN), and\n", " (b) minimum weight in high-DFS names (>= MIN_WEIGHT_HIGH_DFS)\n", " - If SciPy is unavailable, uses a greedy fallback (fixed here; no 'nonlocal' usage).\n", "\"\"\"\n", "\n", "import pandas as pd\n", "import numpy as np\n", "from pathlib import Path\n", "\n", "# Optional linear program solver; falls back to greedy if not available\n", "try:\n", " from scipy.optimize import linprog\n", " SCIPY_OK = True\n", "except Exception:\n", " SCIPY_OK = False\n", "\n", "import yfinance as yf\n", "\n", "FIN_CSV = \"copper_outperformers_financials.csv\"\n", "PX_CSV = \"outperforming_stocks.csv\"\n", "\n", "# --------------------------\n", "# Tunables\n", "# --------------------------\n", "# Expected return proxy\n", "RET_YEARS = 3 # trailing CAGR horizon; fallback to 1 if needed\n", "\n", "# Portfolio constraints\n", "MAX_NAME_WEIGHT = 0.12 # per-name cap (fraction of portfolio)\n", "OPT_DEBT_MODE = \"DE_CAP\" # \"DE_CAP\" or \"DEBT_BUDGET_USD\"\n", "\n", "# If OPT_DEBT_MODE == \"DE_CAP\": (scale-invariant)\n", "DE_AVG_PCTL_CAP = 60 # cap weighted avg D/E at this cross-sectional percentile\n", "\n", "# If OPT_DEBT_MODE == \"DEBT_BUDGET_USD\": (dollar cap)\n", "EQUITY_BUDGET_USD = 100_000_000.0 # dollars invested in equity\n", "DEBT_BUDGET_USD = 2_000_000_000.0 # cap on sum_i (w_i * EQUITY_BUDGET * TotalDebt_i)\n", "\n", "# \"Capex mainly debt-financed\"\n", "PORTFOLIO_DFS_MIN = 0.60 # weighted average DFS must be >= this\n", "HIGH_DFS_THRESH = 0.60 # threshold for high-DFS bucket\n", "MIN_WEIGHT_HIGH_DFS = 0.50 # at least this fraction in high-DFS names\n", "\n", "# Industry normalization gates (optional)\n", "APPLY_INDUSTRY_GATES = True\n", "IND_GATES = {\n", " \"Debt_to_Equity_pctl_max\": 90, # drop if above 90th pct within industry\n", " \"Debt_Growth_Pct_pctl_max\": 90\n", "}\n", "\n", "# --------------------------\n", "# Helpers\n", "# --------------------------\n", "def trailing_cagr(prices: pd.Series, years=3):\n", " p = prices.dropna()\n", " if p.size < 2:\n", " return np.nan\n", " end = p.index.max()\n", " start_cut = end - pd.DateOffset(years=years)\n", " p0 = p.loc[:start_cut].iloc[-1] if (p.index <= start_cut).any() else p.iloc[0]\n", " p1 = p.iloc[-1]\n", " if p0 <= 0 or p1 <= 0:\n", " return np.nan\n", " return (p1 / p0) ** (1.0 / max(years, 1e-6)) - 1.0\n", "\n", "def latest_non_na(s: pd.Series):\n", " s = s.dropna()\n", " return s.iloc[-1] if not s.empty else np.nan\n", "\n", "def industry_for_tickers(tickers):\n", " out = {}\n", " for tk in tickers:\n", " try:\n", " info = yf.Ticker(tk).get_info() # .info may be deprecated in some yfinance versions\n", " out[tk] = info.get(\"industry\") or info.get(\"sector\") or \"Unknown\"\n", " except Exception:\n", " out[tk] = \"Unknown\"\n", " return out\n", "\n", "def group_percentile(series, groups, p):\n", " return series.groupby(groups).transform(lambda s: np.nanpercentile(s.dropna(), p) if s.notna().any() else np.nan)\n", "\n", "# --------------------------\n", "# Load data\n", "# --------------------------\n", "fin = pd.read_csv(FIN_CSV, parse_dates=[\"FiscalPeriodEnd\"]).sort_values([\"Ticker\",\"FiscalPeriodEnd\"])\n", "px = pd.read_csv(PX_CSV, parse_dates=[\"Date\"]).set_index(\"Date\").sort_index()\n", "\n", "# Ensure needed columns exist\n", "need = [\"Total_Debt\",\"Debt_to_Equity\",\"Debt_Financed_Capex_Share\",\n", " \"Capex_Growth_Pct\",\"Debt_Growth_Pct\"]\n", "for c in need:\n", " if c not in fin.columns:\n", " fin[c] = np.nan\n", "\n", "# --------------------------\n", "# Issuer snapshot (latest levels + average growth metrics)\n", "# --------------------------\n", "snap = (fin.sort_values(\"FiscalPeriodEnd\")\n", " .groupby(\"Ticker\")\n", " .agg(Total_Debt=(\"Total_Debt\", latest_non_na),\n", " Debt_to_Equity=(\"Debt_to_Equity\", latest_non_na),\n", " Debt_Financed_Capex_Share=(\"Debt_Financed_Capex_Share\",\n", " lambda s: s.dropna().median() if s.notna().any() else np.nan),\n", " Capex_Growth_Pct=(\"Capex_Growth_Pct\", lambda s: s.mean(skipna=True)),\n", " Debt_Growth_Pct=(\"Debt_Growth_Pct\", lambda s: s.mean(skipna=True))\n", " ))\n", "\n", "# Expected returns from price history\n", "def exp_ret(tk):\n", " if tk not in px.columns:\n", " return np.nan\n", " r3 = trailing_cagr(px[tk], years=RET_YEARS)\n", " return r3 if np.isfinite(r3) else trailing_cagr(px[tk], years=1)\n", "\n", "snap[\"Exp_Return\"] = [exp_ret(tk) for tk in snap.index]\n", "\n", "# Industry tags and optional gates\n", "ind_map = industry_for_tickers(snap.index)\n", "snap[\"Industry\"] = [ind_map.get(tk, \"Unknown\") for tk in snap.index]\n", "\n", "if APPLY_INDUSTRY_GATES:\n", " for col, pctl in IND_GATES.items():\n", " metric = col.replace(\"_pctl_max\",\"\")\n", " if metric in snap.columns and pctl is not None:\n", " thr = group_percentile(snap[metric], snap[\"Industry\"], pctl)\n", " snap = snap[(snap[metric].isna()) | (snap[metric] <= thr)]\n", "\n", "# Keep feasible names\n", "opt = snap.dropna(subset=[\"Exp_Return\",\"Debt_to_Equity\",\"Debt_Financed_Capex_Share\"]).copy()\n", "if opt.empty:\n", " raise RuntimeError(\"No tickers have Exp_Return, D/E, and DFS available for optimization.\")\n", "\n", "# DFS bucket\n", "opt[\"is_high_DFS\"] = (opt[\"Debt_Financed_Capex_Share\"] >= HIGH_DFS_THRESH).astype(float)\n", "\n", "# --------------------------\n", "# Build optimization problem\n", "# --------------------------\n", "tickers = opt.index.to_list()\n", "mu = opt[\"Exp_Return\"].to_numpy()\n", "DE = opt[\"Debt_to_Equity\"].to_numpy()\n", "DFS = opt[\"Debt_Financed_Capex_Share\"].to_numpy()\n", "HIGH = opt[\"is_high_DFS\"].to_numpy()\n", "n = len(tickers)\n", "\n", "# Equality: sum w_i = 1\n", "A_eq = np.ones((1, n))\n", "b_eq = np.array([1.0], dtype=float)\n", "\n", "# Inequalities\n", "A_ub_list = []\n", "b_ub_list = []\n", "\n", "# Per-name cap: w_i <= MAX_NAME_WEIGHT\n", "A_ub_list.extend(np.eye(n))\n", "b_ub_list.extend([MAX_NAME_WEIGHT] * n)\n", "\n", "# Debt constraint\n", "if OPT_DEBT_MODE.upper() == \"DE_CAP\":\n", " # cap weighted average D/E at DE_AVG_PCTL_CAP percentile\n", " DE_cap = np.nanpercentile(opt[\"Debt_to_Equity\"].dropna(), DE_AVG_PCTL_CAP)\n", " A_ub_list.append(DE) # sum w_i * DE_i <= DE_cap\n", " b_ub_list.append(DE_cap)\n", "elif OPT_DEBT_MODE.upper() == \"DEBT_BUDGET_USD\":\n", " # sum (w_i * EQUITY_BUDGET_USD) * Total_Debt_i <= DEBT_BUDGET_USD\n", " TD = opt[\"Total_Debt\"].fillna(0.0).to_numpy()\n", " A_ub_list.append(TD) # divide both sides by EQUITY_BUDGET_USD\n", " b_ub_list.append(DEBT_BUDGET_USD / EQUITY_BUDGET_USD)\n", "else:\n", " raise ValueError(\"OPT_DEBT_MODE must be 'DE_CAP' or 'DEBT_BUDGET_USD'.\")\n", "\n", "# DFS floor: sum w_i * DFS_i >= PORTFOLIO_DFS_MIN → -sum w_i * DFS_i <= -PORTFOLIO_DFS_MIN\n", "if np.isfinite(PORTFOLIO_DFS_MIN) and PORTFOLIO_DFS_MIN > 0:\n", " A_ub_list.append(-DFS)\n", " b_ub_list.append(-PORTFOLIO_DFS_MIN)\n", "\n", "# High-DFS weight floor: sum w_i * 1{high} >= MIN_WEIGHT_HIGH_DFS → -sum w_i * HIGH_i <= -MIN_WEIGHT_HIGH_DFS\n", "if np.isfinite(MIN_WEIGHT_HIGH_DFS) and MIN_WEIGHT_HIGH_DFS > 0:\n", " A_ub_list.append(-HIGH)\n", " b_ub_list.append(-MIN_WEIGHT_HIGH_DFS)\n", "\n", "A_ub = np.vstack(A_ub_list)\n", "b_ub = np.array(b_ub_list, dtype=float)\n", "bounds = [(0.0, MAX_NAME_WEIGHT) for _ in range(n)]\n", "\n", "# --------------------------\n", "# Solve (LP if available; greedy fallback otherwise)\n", "# --------------------------\n", "if SCIPY_OK:\n", " res = linprog(c=-mu, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq,\n", " bounds=bounds, method=\"highs\")\n", " use_lp = res.success\n", "else:\n", " use_lp = False\n", "\n", "if not use_lp:\n", " # Greedy fallback: satisfy DFS floors first, then fill by return under caps (no 'nonlocal')\n", " w = np.zeros(n, dtype=float)\n", "\n", " def fill_floor(w_arr: np.ndarray, mask: np.ndarray, target: float) -> np.ndarray:\n", " \"\"\"Ensure sum(w * mask) >= target by topping up best candidates under caps.\"\"\"\n", " if target <= 0:\n", " return w_arr\n", " achieved = float(w_arr @ mask)\n", " remaining = target - achieved\n", " if remaining <= 1e-12:\n", " return w_arr\n", " idxs = np.where(mask > 0)[0]\n", " order = idxs[np.argsort(-mu[idxs])] # prefer higher expected return\n", " for i in order:\n", " if remaining <= 0:\n", " break\n", " cap_left = MAX_NAME_WEIGHT - w_arr[i]\n", " if cap_left <= 0:\n", " continue\n", " add = min(cap_left, remaining)\n", " if add > 0:\n", " w_arr[i] += add\n", " remaining -= add\n", " return w_arr\n", "\n", " # 1) Floor on weight in high-DFS names\n", " if np.isfinite(MIN_WEIGHT_HIGH_DFS) and MIN_WEIGHT_HIGH_DFS > 0:\n", " w = fill_floor(w, HIGH, MIN_WEIGHT_HIGH_DFS)\n", "\n", " # 2) Portfolio DFS floor: push weight toward names with high (DFS * mu)\n", " if np.isfinite(PORTFOLIO_DFS_MIN) and PORTFOLIO_DFS_MIN > 0:\n", " deficit = PORTFOLIO_DFS_MIN - float(w @ DFS)\n", " if deficit > 1e-12:\n", " order = np.argsort(-(DFS * mu)) # prefer high DFS and high return\n", " for i in order:\n", " if deficit <= 1e-12:\n", " break\n", " if DFS[i] <= 0:\n", " continue\n", " cap_left = MAX_NAME_WEIGHT - w[i]\n", " if cap_left <= 0:\n", " continue\n", " needed = deficit / max(DFS[i], 1e-9)\n", " add = min(cap_left, needed)\n", " if add > 0:\n", " w[i] += add\n", " deficit = PORTFOLIO_DFS_MIN - float(w @ DFS)\n", "\n", " # 3) Fill remaining weight purely by expected return\n", " rem = 1.0 - w.sum()\n", " if rem > 1e-9:\n", " order = np.argsort(-mu)\n", " for i in order:\n", " if rem <= 0:\n", " break\n", " cap_left = MAX_NAME_WEIGHT - w[i]\n", " if cap_left <= 0:\n", " continue\n", " add = min(cap_left, rem)\n", " if add > 0:\n", " w[i] += add\n", " rem -= add\n", "\n", " # Package result like linprog\n", " class _Res: pass\n", " res = _Res(); res.x = w\n", "\n", "# Normalize in case of tiny numerical drift\n", "w = pd.Series(res.x, index=tickers, name=\"Weight\")\n", "w = w.clip(lower=0.0, upper=MAX_NAME_WEIGHT)\n", "w = w / w.sum()\n", "\n", "# --------------------------\n", "# Diagnostics & save\n", "# --------------------------\n", "weighted_DE = float(np.nansum(w.to_numpy() * DE))\n", "weighted_DFS = float(np.nansum(w.to_numpy() * DFS))\n", "high_share = float(np.nansum(w.to_numpy() * HIGH))\n", "\n", "implied_debt_usd = np.nan\n", "if OPT_DEBT_MODE.upper() == \"DEBT_BUDGET_USD\":\n", " TD = opt[\"Total_Debt\"].fillna(0.0).to_numpy()\n", " implied_debt_usd = float(np.nansum(w.to_numpy() * TD) * EQUITY_BUDGET_USD)\n", "\n", "out = pd.DataFrame({\n", " \"Ticker\": tickers,\n", " \"Weight\": w.values,\n", " \"Exp_Return\": mu,\n", " \"Debt_to_Equity\": DE,\n", " \"Debt_Financed_Capex_Share\": DFS,\n", " \"High_DFS_flag\": (DFS >= HIGH_DFS_THRESH).astype(int),\n", " \"Total_Debt\": opt[\"Total_Debt\"].to_numpy(),\n", " \"Industry\": opt[\"Industry\"].to_numpy()\n", "}).sort_values(\"Weight\", ascending=False)\n", "\n", "snap_to_save = opt.reset_index()\n", "out.to_csv(\"task2_equity_portfolio_allocation.csv\", index=False, float_format=\"%.6f\")\n", "snap_to_save.to_csv(\"task2_equity_inputs_snapshot.csv\", index=False, float_format=\"%.6f\")\n", "\n", "diag_lines = []\n", "diag_lines.append(f\"OPT_DEBT_MODE: {OPT_DEBT_MODE}\")\n", "if OPT_DEBT_MODE.upper() == \"DE_CAP\":\n", " de_cap_val = np.nanpercentile(opt['Debt_to_Equity'].dropna(), DE_AVG_PCTL_CAP)\n", " diag_lines.append(f\"Weighted Avg D/E: {weighted_DE:.3f} (cap: {de_cap_val:.3f}, {DE_AVG_PCTL_CAP}th pct)\")\n", "else:\n", " diag_lines.append(f\"Equity Budget (USD): {EQUITY_BUDGET_USD:,.0f}\")\n", " diag_lines.append(f\"Implied Debt Exposure (USD): {implied_debt_usd:,.0f} (cap: {DEBT_BUDGET_USD:,.0f})\")\n", "diag_lines.append(f\"Portfolio DFS (weighted avg): {weighted_DFS:.3f} (target >= {PORTFOLIO_DFS_MIN:.2f})\")\n", "diag_lines.append(f\"Weight in high-DFS names: {high_share:.3f} (target >= {MIN_WEIGHT_HIGH_DFS:.2f})\")\n", "diag_lines.append(f\"Names in portfolio: {(w > 1e-6).sum()} / {len(w)}\")\n", "\n", "print(\"\\n=== Task 2 Equity Portfolio — Diagnostics ===\")\n", "for line in diag_lines:\n", " print(line)\n", "\n", "Path(\"task2_equity_diagnostics.txt\").write_text(\"\\n\".join(diag_lines))\n", "print(\"\\n✓ Saved task2_equity_portfolio_allocation.csv, task2_equity_inputs_snapshot.csv, task2_equity_diagnostics.txt\")\n" ] }, { "cell_type": "code", "execution_count": 50, "id": "420631b2", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "=== Portfolio Performance Summary ===\n", " Start End PeriodsPerYear Total_Return CAGR Ann_Vol Sharpe Max_Drawdown Obs Freq RF_Annual\n", "2020-01-20 2025-06-11 252 6.132665 0.421388 0.319802 1.260158 -0.529606 1408 D 0.0\n", "\n", "Saved:\n", " - task2_equity_perf_summary.csv\n", " - task2_equity_nav.csv\n", " - task2_equity_rolling_sharpe.csv\n" ] } ], "source": [ "# -*- coding: utf-8 -*-\n", "\"\"\"\n", "Evaluate Task 2 equity portfolio performance (static weights).\n", "Inputs:\n", " - task2_equity_portfolio_allocation.csv (columns: Ticker, Weight, ...)\n", " - outperforming_stocks.csv (Date + ticker columns with prices)\n", "Outputs:\n", " - task2_equity_perf_summary.csv\n", " - task2_equity_nav.csv\n", " - task2_equity_rolling_sharpe.csv\n", "\"\"\"\n", "\n", "import pandas as pd\n", "import numpy as np\n", "\n", "# ------------------ CONFIG ------------------\n", "WEIGHTS_CSV = \"task2_equity_portfolio_allocation.csv\"\n", "PRICES_CSV = \"outperforming_stocks.csv\"\n", "\n", "# Optional evaluation window (ISO dates or None to use full overlap)\n", "START_DATE = None # e.g., \"2022-01-01\"\n", "END_DATE = None\n", "\n", "# Frequency: 'D' for daily, 'W-FRI' for weekly (Friday)\n", "FREQ = \"D\"\n", "\n", "# Annual risk-free rate for Sharpe (set to 0.0 if unknown)\n", "RISK_FREE_ANNUAL = 0.00\n", "\n", "# --------------------------------------------\n", "\n", "def periods_per_year(freq: str) -> int:\n", " return 52 if freq.startswith(\"W\") else 252\n", "\n", "def to_period_returns(px: pd.DataFrame, freq: str) -> pd.DataFrame:\n", " if freq == \"D\":\n", " return px.pct_change()\n", " else:\n", " # last price of each period (e.g., Friday) then pct_change\n", " return px.resample(freq).last().pct_change()\n", "\n", "def max_drawdown(nav: pd.Series) -> float:\n", " rolling_max = nav.cummax()\n", " dd = nav / rolling_max - 1.0\n", " return float(dd.min())\n", "\n", "def annualize_return(mean_period_ret: float, ppy: int) -> float:\n", " # If you want geometric CAGR from equity curve, we’ll compute that separately.\n", " return mean_period_ret * ppy\n", "\n", "def cagr_from_nav(nav: pd.Series, ppy: int) -> float:\n", " nav = nav.dropna()\n", " if nav.empty:\n", " return np.nan\n", " T = len(nav) / ppy\n", " if nav.iloc[0] <= 0:\n", " return np.nan\n", " return nav.iloc[-1] ** (1.0 / max(T, 1e-9)) - 1.0\n", "\n", "def annualized_vol(std_period_ret: float, ppy: int) -> float:\n", " return std_period_ret * np.sqrt(ppy)\n", "\n", "def rolling_sharpe(returns: pd.Series, window: int, ppy: int, rf_annual: float) -> pd.Series:\n", " rf_period = rf_annual / ppy\n", " ex = returns - rf_period\n", " roll_mean = ex.rolling(window).mean()\n", " roll_std = returns.rolling(window).std(ddof=1)\n", " rs = (roll_mean / roll_std) * np.sqrt(ppy)\n", " return rs\n", "\n", "# ---------- Load data ----------\n", "weights_df = pd.read_csv(WEIGHTS_CSV)\n", "px = pd.read_csv(PRICES_CSV, parse_dates=[\"Date\"]).set_index(\"Date\").sort_index()\n", "\n", "# Keep only tickers with positive weights and present in prices\n", "w = weights_df.set_index(\"Ticker\")[\"Weight\"].astype(float)\n", "w = w[w > 0]\n", "avail = [tk for tk in w.index if tk in px.columns]\n", "missing = sorted(set(w.index) - set(avail))\n", "if missing:\n", " print(f\"Warning: dropping {len(missing)} tickers not found in price file: {missing[:5]}{'...' if len(missing)>5 else ''}\")\n", "w = w.loc[avail]\n", "w = w / w.sum() # re-normalize to 1 after any drops\n", "\n", "# Subset price matrix to portfolio names\n", "px_sub = px[avail].copy()\n", "\n", "# Optional date window\n", "if START_DATE:\n", " px_sub = px_sub.loc[pd.Timestamp(START_DATE):]\n", "if END_DATE:\n", " px_sub = px_sub.loc[:pd.Timestamp(END_DATE)]\n", "\n", "# Compute period returns\n", "rets_asset = to_period_returns(px_sub, FREQ)\n", "# Drop initial NaNs and any all-NaN rows\n", "rets_asset = rets_asset.dropna(how=\"all\")\n", "\n", "# Align weights to columns and compute portfolio returns (static weights)\n", "w = w.reindex(rets_asset.columns).fillna(0.0)\n", "port_ret = (rets_asset * w).sum(axis=1)\n", "\n", "# Build equity curve (start at 1.0)\n", "nav = (1.0 + port_ret.fillna(0)).cumprod()\n", "\n", "# Metrics\n", "PPY = periods_per_year(FREQ)\n", "rf_period = RISK_FREE_ANNUAL / PPY\n", "excess = port_ret - rf_period\n", "\n", "mean_period = port_ret.mean()\n", "std_period = port_ret.std(ddof=1)\n", "\n", "cagr = cagr_from_nav(nav, PPY) # geometric\n", "ann_vol = annualized_vol(std_period, PPY)\n", "sharpe = (excess.mean() / std_period) * np.sqrt(PPY) if std_period > 0 else np.nan\n", "tot_return = float(nav.iloc[-1] - 1.0)\n", "mdd = max_drawdown(nav)\n", "\n", "summary = pd.DataFrame([{\n", " \"Start\": nav.index.min(),\n", " \"End\": nav.index.max(),\n", " \"PeriodsPerYear\": PPY,\n", " \"Total_Return\": tot_return,\n", " \"CAGR\": cagr,\n", " \"Ann_Vol\": ann_vol,\n", " \"Sharpe\": sharpe,\n", " \"Max_Drawdown\": mdd,\n", " \"Obs\": int(port_ret.dropna().shape[0]),\n", " \"Freq\": FREQ,\n", " \"RF_Annual\": RISK_FREE_ANNUAL\n", "}])\n", "\n", "# Rolling Sharpe (about 6 months for daily, 26 weeks for weekly)\n", "roll_win = 126 if FREQ == \"D\" else 26\n", "rolling_sh = rolling_sharpe(port_ret, roll_win, PPY, RISK_FREE_ANNUAL)\n", "\n", "# Save outputs\n", "summary.to_csv(\"task2_equity_perf_summary.csv\", index=False, float_format=\"%.6f\")\n", "nav.rename(\"NAV\").to_csv(\"task2_equity_nav.csv\", header=True)\n", "rolling_sh.rename(\"Rolling_Sharpe\").to_csv(\"task2_equity_rolling_sharpe.csv\", header=True)\n", "\n", "print(\"\\n=== Portfolio Performance Summary ===\")\n", "print(summary.to_string(index=False))\n", "\n", "print(\"\\nSaved:\")\n", "print(\" - task2_equity_perf_summary.csv\")\n", "print(\" - task2_equity_nav.csv\")\n", "print(\" - task2_equity_rolling_sharpe.csv\")\n" ] }, { "cell_type": "code", "execution_count": 54, "id": "eabd3717", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "=== Performance (aligned window) ===\n", " Name Start End Total_Return CAGR Ann_Vol Sharpe Max_Drawdown Obs\n", "Portfolio 2020-01-21 2025-06-11 6.132665 0.439910 0.326600 1.305427 -0.520011 1357\n", " HG=F 2020-01-21 2025-06-11 0.672822 0.100183 0.247592 0.528868 -0.348651 1357\n", "\n", "Saved:\n", " - task2_equity_nav_vs_HG.png\n", " - task2_equity_vs_HG_metrics.csv\n", " - task2_equity_nav.csv\n", " - task2_equity_rolling_sharpe.csv\n" ] } ], "source": [ "# -*- coding: utf-8 -*-\n", "\"\"\"\n", "Compare portfolio equity curve vs underlying (HG=F) and report metrics.\n", "\n", "Inputs:\n", " - task2_equity_portfolio_allocation.csv (columns: Ticker, Weight, ...)\n", " - outperforming_stocks.csv (Date + ticker columns with prices)\n", "\n", "Outputs:\n", " - task2_equity_nav_vs_HG.png\n", " - task2_equity_vs_HG_metrics.csv\n", " - task2_equity_nav.csv\n", " - task2_equity_rolling_sharpe.csv\n", "\"\"\"\n", "\n", "import pandas as pd\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "import yfinance as yf\n", "from pathlib import Path\n", "\n", "# ------------------ CONFIG ------------------\n", "WEIGHTS_CSV = \"task2_equity_portfolio_allocation.csv\"\n", "PRICES_CSV = \"outperforming_stocks.csv\"\n", "\n", "# Optional evaluation window\n", "START_DATE = None # e.g., \"2020-01-01\"\n", "END_DATE = None\n", "\n", "# Frequency: 'D' (daily) or 'W-FRI' (weekly, Friday close)\n", "FREQ = \"D\"\n", "\n", "# Annual risk-free rate for Sharpe\n", "RISK_FREE_ANNUAL = 0.00\n", "\n", "# Ticker for underlying copper\n", "UNDERLYING = \"HG=F\"\n", "\n", "# ------------------ HELPERS ------------------\n", "def periods_per_year(freq: str) -> int:\n", " return 52 if freq.startswith(\"W\") else 252\n", "\n", "def resample_prices(px: pd.DataFrame, freq: str) -> pd.DataFrame:\n", " return px if freq == \"D\" else px.resample(freq).last()\n", "\n", "def to_returns(px: pd.DataFrame, freq: str) -> pd.DataFrame:\n", " pxr = resample_prices(px, freq)\n", " return pxr.pct_change()\n", "\n", "def max_drawdown(nav: pd.Series) -> float:\n", " peak = nav.cummax()\n", " dd = nav / peak - 1.0\n", " return float(dd.min())\n", "\n", "def cagr_from_nav(nav: pd.Series, ppy: int) -> float:\n", " nav = nav.dropna()\n", " if nav.empty or nav.iloc[0] <= 0:\n", " return np.nan\n", " T_years = len(nav) / ppy\n", " return nav.iloc[-1] ** (1.0 / max(T_years, 1e-9)) - 1.0\n", "\n", "def perf_summary(name: str, nav: pd.Series, rets: pd.Series, ppy: int, rf_annual: float) -> dict:\n", " rf_period = rf_annual / ppy\n", " ex = rets - rf_period\n", " mu_p = rets.mean()\n", " sd_p = rets.std(ddof=1)\n", " return {\n", " \"Name\": name,\n", " \"Start\": nav.index.min(),\n", " \"End\": nav.index.max(),\n", " \"Total_Return\": float(nav.iloc[-1] - 1.0),\n", " \"CAGR\": float(cagr_from_nav(nav, ppy)),\n", " \"Ann_Vol\": float(sd_p * np.sqrt(ppy)) if np.isfinite(sd_p) else np.nan,\n", " \"Sharpe\": float((ex.mean() / sd_p) * np.sqrt(ppy)) if (sd_p and sd_p > 0) else np.nan,\n", " \"Max_Drawdown\": float(max_drawdown(nav)),\n", " \"Obs\": int(rets.dropna().shape[0]),\n", " }\n", "\n", "def strip_tz_index(obj):\n", " \"\"\"Return a copy with tz removed from DatetimeIndex (works for Series/DataFrame).\"\"\"\n", " out = obj.copy()\n", " if isinstance(out.index, pd.DatetimeIndex) and out.index.tz is not None:\n", " # Remove timezone (UTC→naive). tz_localize(None) drops tz; tz_convert(None) also works.\n", " out.index = out.index.tz_localize(None)\n", " return out\n", "\n", "# ------------------ LOAD DATA ------------------\n", "weights_df = pd.read_csv(WEIGHTS_CSV)\n", "px_all = pd.read_csv(PRICES_CSV, parse_dates=[\"Date\"]).set_index(\"Date\").sort_index()\n", "px_all = strip_tz_index(px_all)\n", "\n", "# Align date window\n", "if START_DATE:\n", " px_all = px_all.loc[pd.Timestamp(START_DATE):]\n", "if END_DATE:\n", " px_all = px_all.loc[:pd.Timestamp(END_DATE)]\n", "\n", "# Portfolio weights (drop tickers not in price file, then renormalize)\n", "w = (weights_df.set_index(\"Ticker\")[\"Weight\"]\n", " .astype(float)\n", " .loc[lambda s: s > 0])\n", "\n", "present = [tk for tk in w.index if tk in px_all.columns]\n", "if set(present) != set(w.index):\n", " missing = sorted(set(w.index) - set(present))\n", " if missing:\n", " print(f\"⚠️ Dropping {len(missing)} tickers not found in prices: {missing[:6]}{'...' if len(missing) > 6 else ''}\")\n", "w = w.loc[present]\n", "w = w / w.sum()\n", "\n", "# Subset prices to portfolio names\n", "px_port = px_all[present].copy()\n", "\n", "# ------------------ PORTFOLIO RETURNS & NAV ------------------\n", "asset_rets = to_returns(px_port, FREQ).dropna(how=\"all\")\n", "# Ensure weight vector aligns to returns columns\n", "w = w.reindex(asset_rets.columns).fillna(0.0)\n", "port_rets = (asset_rets * w).sum(axis=1).dropna()\n", "nav_port = (1.0 + port_rets).cumprod().rename(\"Portfolio_NAV\")\n", "nav_port = strip_tz_index(nav_port)\n", "\n", "# ------------------ UNDERLYING (HG=F) ------------------\n", "# If HG=F exists in px_all, use it; else fetch via yfinance over the same window\n", "if UNDERLYING in px_all.columns:\n", " px_under = px_all[[UNDERLYING]].copy()\n", "else:\n", " # Fetch with yfinance for the overlapping window\n", " if not asset_rets.empty:\n", " start = asset_rets.index.min() - pd.Timedelta(days=5)\n", " end = asset_rets.index.max() + pd.Timedelta(days=5)\n", " else:\n", " start = None; end = None\n", " raw = yf.Ticker(UNDERLYING).history(start=start, end=end, auto_adjust=False)\n", " if raw.empty:\n", " raise RuntimeError(f\"Could not fetch {UNDERLYING} from yfinance.\")\n", " px_under = raw[[\"Close\"]].rename(columns={\"Close\": UNDERLYING})\n", " px_under.index.name = \"Date\"\n", "\n", "px_under = px_under.sort_index()\n", "px_under = strip_tz_index(px_under)\n", "\n", "if START_DATE:\n", " px_under = px_under.loc[pd.Timestamp(START_DATE):]\n", "if END_DATE:\n", " px_under = px_under.loc[:pd.Timestamp(END_DATE)]\n", "\n", "under_rets = to_returns(px_under, FREQ)[UNDERLYING].dropna()\n", "nav_under = (1.0 + under_rets).cumprod().rename(\"HG=F_NAV\")\n", "nav_under = strip_tz_index(nav_under)\n", "\n", "# ------------------ ALIGN & METRICS ------------------\n", "# Align NAVs to common dates for plotting/metrics (both tz-naive now)\n", "nav_df = pd.concat([nav_port, nav_under], axis=1).dropna()\n", "nav_df.to_csv(\"task2_equity_nav.csv\")\n", "\n", "PPY = periods_per_year(FREQ)\n", "port_aligned_rets = nav_df[\"Portfolio_NAV\"].pct_change().dropna()\n", "under_aligned_rets = nav_df[\"HG=F_NAV\"].pct_change().dropna()\n", "\n", "# Metrics for both (aligned window)\n", "m_port = perf_summary(\"Portfolio\", nav_df[\"Portfolio_NAV\"], port_aligned_rets, PPY, RISK_FREE_ANNUAL)\n", "m_under = perf_summary(\"HG=F\", nav_df[\"HG=F_NAV\"], under_aligned_rets, PPY, RISK_FREE_ANNUAL)\n", "\n", "metrics_df = pd.DataFrame([m_port, m_under])\n", "metrics_df.to_csv(\"task2_equity_vs_HG_metrics.csv\", index=False, float_format=\"%.6f\")\n", "\n", "# ------------------ ROLLING SHARPE (portfolio) ------------------\n", "roll_win = 126 if FREQ == \"D\" else 26\n", "rf_period = RISK_FREE_ANNUAL / PPY\n", "port_excess = port_aligned_rets - rf_period\n", "rolling_sharpe = (port_excess.rolling(roll_win).mean() / port_aligned_rets.rolling(roll_win).std(ddof=1)) * np.sqrt(PPY)\n", "rolling_sharpe.rename(\"Rolling_Sharpe\").to_csv(\"task2_equity_rolling_sharpe.csv\", header=True)\n", "\n", "# ------------------ PLOT ------------------\n", "plt.figure(figsize=(10, 6))\n", "nav_df.plot(ax=plt.gca()) # both NAV lines normalized to 1\n", "plt.title(\"Equity Curve: Portfolio vs HG=F\")\n", "plt.xlabel(\"Date\")\n", "plt.ylabel(\"NAV (start = 1.0)\")\n", "plt.legend(loc=\"best\")\n", "plt.tight_layout()\n", "plt.savefig(\"task2_equity_nav_vs_HG.png\", dpi=150)\n", "plt.close()\n", "\n", "# ------------------ PRINT SUMMARY ------------------\n", "print(\"\\n=== Performance (aligned window) ===\")\n", "print(metrics_df.to_string(index=False))\n", "\n", "print(\"\\nSaved:\")\n", "print(\" - task2_equity_nav_vs_HG.png\")\n", "print(\" - task2_equity_vs_HG_metrics.csv\")\n", "print(\" - task2_equity_nav.csv\")\n", "print(\" - task2_equity_rolling_sharpe.csv\")\n" ] } ], "metadata": { "kernelspec": { "display_name": ".venv", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.12.11" } }, "nbformat": 4, "nbformat_minor": 5 }