Darshan03 commited on
Commit
96e7d53
·
verified ·
1 Parent(s): 0e754f2

Upload 3 files

Browse files
Files changed (3) hide show
  1. portfolio.py +268 -0
  2. scenario.py +145 -0
  3. simluation_data.py +172 -0
portfolio.py ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import os
3
+ import json
4
+ import yfinance as yf
5
+ from langchain_core.output_parsers import JsonOutputParser
6
+ from pydantic import BaseModel, Field, ValidationError
7
+ from typing import List, Optional, Dict
8
+ from langchain_groq import ChatGroq
9
+ from dataclasses import dataclass, field
10
+ from dotenv import load_dotenv
11
+ import pickle
12
+
13
+ load_dotenv() # Load environment variables from .env
14
+
15
+
16
+ # Configuration: Move to configurations
17
+ class Config:
18
+ ALPHA_VANTAGE_API_KEY = os.getenv("ALPHA_VANTAGE_API_KEY")
19
+ GROQ_API_KEY = os.getenv("GROQ_API_KEY")
20
+ STOCK_DATA_DIR = "stock_data_NSE"
21
+ OUTPUT_FILE = "output_files/portfolio.json"
22
+ SECTORS = [
23
+ "Communication Services",
24
+ "Consumer Discretionary",
25
+ "Consumer Staples",
26
+ "Energy",
27
+ "Financials",
28
+ "Health Care",
29
+ "Industrials",
30
+ "Information Technology",
31
+ "Materials",
32
+ "Real Estate",
33
+ "Utilities"
34
+ ]
35
+
36
+
37
+ # Create the output directory if it doesn't exist
38
+ if not os.path.exists(Config.STOCK_DATA_DIR):
39
+ os.makedirs(Config.STOCK_DATA_DIR)
40
+
41
+
42
+ def fetch_stock_data(symbols: List[str]) -> Dict[str, pd.DataFrame | None]:
43
+ """Fetches stock data for multiple symbols from Yahoo Finance.
44
+ Args:
45
+ symbols (list): A list of stock symbols (e.g., ["RELIANCE.NS", "TCS.NS"]).
46
+ Returns:
47
+ dict: A dictionary where keys are stock symbols and values are pandas DataFrames or None if an error occurred.
48
+ """
49
+ stock_dataframes = {}
50
+ for symbol in symbols:
51
+ try:
52
+ ticker = yf.Ticker(symbol)
53
+ data = ticker.history(period="max")
54
+
55
+ if data.empty:
56
+ print(f"Warning: No data found for symbol '{symbol}'.")
57
+ stock_dataframes[symbol] = None
58
+ continue
59
+ stock_dataframes[symbol] = data
60
+
61
+ except Exception as e:
62
+ print(f"Error fetching data for symbol '{symbol}': {e}")
63
+ stock_dataframes[symbol] = None
64
+
65
+ return stock_dataframes
66
+
67
+
68
+ def store_stock_data(stock_dataframes: Dict[str, pd.DataFrame | None],
69
+ output_path: str = Config.STOCK_DATA_DIR) -> None:
70
+ """Stores stock data to local CSV files.
71
+
72
+ Args:
73
+ stock_dataframes (dict): A dictionary where keys are stock symbols and values are pandas DataFrames.
74
+ output_path (str, optional): Path to store files. Defaults to STOCK_DATA_DIR
75
+ """
76
+ for symbol, data in stock_dataframes.items():
77
+ if data is not None:
78
+ file_name = f"{symbol}_daily_data.csv"
79
+ file_path = os.path.join(output_path, file_name)
80
+ try:
81
+ data.to_csv(file_path)
82
+ print(f"Info: Data for '{symbol}' saved to {file_path}")
83
+ except Exception as e:
84
+ print(f"Error saving data for '{symbol}' to {file_path}: {e}")
85
+ else:
86
+ print(f"Warning: No data available for '{symbol}', skipping storage.")
87
+
88
+
89
+ def load_stock_data_and_extract_price(output_path_dir: str) -> Dict[str, Dict[str, float]]:
90
+ """Loads stock data from CSV files and extracts the most recent (last) day's closing price.
91
+
92
+ Args:
93
+ output_path_dir (str): Path where the CSV files are located.
94
+
95
+ Returns:
96
+ dict: A dictionary where keys are stock symbols and values are dictionaries containing the initial price.
97
+ """
98
+ all_stock_data = {}
99
+ for filename in os.listdir(output_path_dir):
100
+ if filename.endswith("_daily_data.csv"):
101
+ symbol = filename.replace("_daily_data.csv", "")
102
+ file_path = os.path.join(output_path_dir, filename)
103
+ try:
104
+ df = pd.read_csv(file_path, index_col=0)
105
+ if not df.empty:
106
+ initial_price = df.iloc[-1]['Close']
107
+ all_stock_data[symbol] = {"initial_price": initial_price}
108
+ else:
109
+ print(f"Warning: Empty dataframe for symbol '{symbol}'. Setting initial price to 0")
110
+ all_stock_data[symbol] = {"initial_price": 0.0}
111
+ except (IndexError, KeyError, FileNotFoundError) as e:
112
+ print(f"Error occurred for reading {symbol}, due to: {e}")
113
+ all_stock_data[symbol] = {"initial_price": 0.0} # default initial price is 0.0
114
+
115
+ return all_stock_data
116
+
117
+
118
+ def merge_stock_data_with_price(stock_data: Dict, extracted_data: Dict) -> Dict:
119
+ """Merges the extracted price data with the main stock data.
120
+
121
+ Args:
122
+ stock_data (dict): Stock data dictionary (name, symbol, quantity)
123
+ extracted_data (dict): Extracted price data dictionary (symbol: initial_price)
124
+ Returns:
125
+ dict: merged data of stocks
126
+ """
127
+ merged_stock_data = stock_data.copy()
128
+ for key, value in stock_data.items():
129
+ symbol = value["symbol"]
130
+ if symbol in extracted_data:
131
+ merged_stock_data[key]["initial_price"] = extracted_data[symbol]["initial_price"]
132
+ else:
133
+ merged_stock_data[key]["initial_price"] = 0.0 # default value if it cannot be extracted
134
+ return merged_stock_data
135
+
136
+
137
+ def generate_prompt(stock_data: Dict) -> str:
138
+ """Generates a prompt for the language model with all the stock data
139
+ Args:
140
+ stock_data (dict): merged stock data that includes stock name, symbol, quantity, and initial price
141
+ Returns:
142
+ str: Formatted prompt for LLM
143
+ """
144
+ prompt_template_with_price = """
145
+ You are a financial analysis expert.
146
+ Please provide a summary of the following stock data, including the company name, stock symbol, and initial purchase price.
147
+
148
+ Stock Data:
149
+ {stock_data}
150
+
151
+ Summary:
152
+ """
153
+ stock_json_str = json.dumps(stock_data)
154
+ formatted_prompt_with_price = prompt_template_with_price.format(stock_data=stock_json_str)
155
+ return formatted_prompt_with_price
156
+
157
+
158
+ class Asset(BaseModel):
159
+ """Represents an asset within a portfolio."""
160
+ quantity: int = Field(..., description="The number of shares or units held for this specific asset.")
161
+ initial_price: float = Field(..., description="The initial purchase price per share or unit of this asset.")
162
+ sector: str = Field(..., description=f"""The economic sector of the asset, based on the stock symbol or company name.
163
+ For example, use this {Config.SECTORS}'Financials' for HDFC or JPM, 'consumer' for PG, 'Information Technology' for GOOG. This categorization
164
+ should be done based on the business nature of the company whose stock is traded. For instance,
165
+ if the stock symbol is 'HDFCBANK', the sector is expected to be 'Financials'.""")
166
+
167
+
168
+ class Portfolio(BaseModel):
169
+ """Represents an individual portfolio."""
170
+ name: str = Field(...,
171
+ description="The name given to this portfolio, for example 'Diversified Portfolio'. 'Aggressive Tech Portfolio' ")
172
+ assets: Dict[str, Asset] = Field(..., description="""A dictionary containing the assets within this portfolio. The keys of the dictionary
173
+ are the ticker symbols of the stocks (e.g., 'JPM', 'PG'), and the values are the corresponding
174
+ 'Asset' objects, which define the quantity, initial price, and sector for each asset.
175
+ Example: {'JPM': {'quantity': 150, 'initial_price': 140, 'sector': 'finance'},
176
+ 'PG': {'quantity': 200, 'initial_price': 160, 'sector': 'consumer'}}"""
177
+ )
178
+
179
+
180
+ def invoke_llm_for_portfolio(formatted_prompt: str) -> Portfolio:
181
+ """Invokes the LLM for structured output of the portfolio
182
+ Args:
183
+ formatted_prompt (str): formatted prompt for the LLM
184
+ Returns:
185
+ Portfolio: structured output of the portfolio
186
+ """
187
+ llm = ChatGroq(groq_api_key=Config.GROQ_API_KEY, model_name="llama-3.1-8b-instant")
188
+ structured_llm = llm.with_structured_output(Portfolio)
189
+ try:
190
+ output = structured_llm.invoke(formatted_prompt)
191
+ return output
192
+ except ValidationError as e:
193
+ print(f"Error during LLM invocation: {e}")
194
+ raise
195
+ except Exception as e:
196
+ print(f"Unexpected error during LLM invocation {e}")
197
+ raise
198
+
199
+
200
+ def portfolio_to_json(portfolio: Portfolio, output_file: str = Config.OUTPUT_FILE) -> None:
201
+ """Converts a Portfolio object to a JSON string and saves it to a file."""
202
+ try:
203
+ json_str = portfolio.model_dump_json(indent=4)
204
+ with open(output_file, "w") as f:
205
+ f.write(json_str)
206
+ print(f"Info: Portfolio saved to '{output_file}'")
207
+ except Exception as e:
208
+ print(f"Error saving JSON file {e}")
209
+
210
+
211
+ if __name__ == '__main__':
212
+ # Sample stock data
213
+ stock_data = {
214
+ "stock1": {"name": "Reliance Industries Ltd.", "symbol": "RELIANCE.NS", "quantity": 10},
215
+ "stock2": {"name": "Tata Consultancy Services Ltd.", "symbol": "TCS.NS", "quantity": 15},
216
+ "stock3": {"name": "HDFC Bank Ltd.", "symbol": "HDFCBANK.NS", "quantity": 20},
217
+ "stock4": {"name": "Infosys Ltd.", "symbol": "INFY.NS", "quantity": 12},
218
+ "stock5": {"name": "Hindustan Unilever Ltd.", "symbol": "HINDUNILVR.NS", "quantity": 8}
219
+ }
220
+
221
+ # 1. Fetch stock data
222
+ stock_symbols = [value["symbol"] for value in stock_data.values()]
223
+ stock_dfs = fetch_stock_data(stock_symbols)
224
+
225
+ # Save DataFrames in a dictionary for future use
226
+ saved_dataframes = {}
227
+ if stock_dfs:
228
+ for symbol, df in stock_dfs.items():
229
+ if df is not None:
230
+ # Save DataFrame in the variable
231
+ saved_dataframes[symbol] = df
232
+ print(f"Data for '{symbol}' loaded into variable.")
233
+ else:
234
+ print(f"No data found for '{symbol}'")
235
+ else:
236
+ print("Error occurred during fetching data. DataFrames are not returned.")
237
+
238
+
239
+
240
+ # Save the dictionary to a local file
241
+ def save_dataframes(dataframes_dict, filename="output_files/saved_dataframes.pkl"):
242
+ with open(filename, 'wb') as file:
243
+ pickle.dump(dataframes_dict, file)
244
+ print(f"DataFrames successfully saved to {filename}.")
245
+ save_dataframes(saved_dataframes)
246
+
247
+ # 2. Store data
248
+ store_stock_data(stock_dfs)
249
+
250
+ # 3. Load the last price
251
+ extracted_data = load_stock_data_and_extract_price(Config.STOCK_DATA_DIR)
252
+
253
+ # 4. Merge extracted price with the main dictionary
254
+ merged_stock_data = merge_stock_data_with_price(stock_data, extracted_data)
255
+
256
+ # 5. Generate prompt for LLM
257
+ formatted_prompt = generate_prompt(merged_stock_data)
258
+ print(formatted_prompt)
259
+
260
+ # 6. Invoke LLM
261
+ try:
262
+ portfolio_output = invoke_llm_for_portfolio(formatted_prompt)
263
+ print(portfolio_output)
264
+ except Exception as e:
265
+ print(f"An unexpected error occurred during the LLM invocation: {e}")
266
+ else:
267
+ # 7. Save portfolio output to JSON
268
+ portfolio_to_json(portfolio_output)
scenario.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Import required modules
2
+ import requests
3
+ from bs4 import BeautifulSoup
4
+ import nest_asyncio
5
+ import asyncio
6
+ import json
7
+ import re
8
+ from crawl4ai import *
9
+ import os
10
+ from dotenv import load_dotenv
11
+ import google.generativeai as genai
12
+
13
+ # Load environment variables from a .env file
14
+ load_dotenv() # Make sure a .env file exists with GOOGLE_API_KEY=<your_api_key>
15
+ GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY") # Fetch the API key
16
+
17
+ # Apply nest_asyncio to enable asynchronous tasks in Jupyter/interactive environments
18
+ nest_asyncio.apply()
19
+
20
+ # Asynchronous function to extract text from a website
21
+ async def extract_text_from_website(url):
22
+ async with AsyncWebCrawler() as crawler:
23
+ result = await crawler.arun(url=url)
24
+ return result.markdown
25
+
26
+
27
+ # Define market sectors
28
+
29
+ # Define the prompt for generating market scenarios
30
+ # Configure the generative AI model
31
+ genai.configure(api_key=GOOGLE_API_KEY) # Replace with your API key
32
+
33
+ generation_config = {
34
+ "temperature": 1,
35
+ "top_p": 0.95,
36
+ "top_k": 40,
37
+ "max_output_tokens": 8192,
38
+ "response_mime_type": "text/plain",
39
+ }
40
+
41
+ model = genai.GenerativeModel(
42
+ model_name="gemini-2.0-flash-exp",
43
+ generation_config=generation_config,
44
+ )
45
+
46
+ chat_session = model.start_chat()
47
+
48
+ # Function to get a response from the generative AI model
49
+ def get_response(llm, prompt):
50
+ response = llm.send_message(prompt)
51
+ return response
52
+
53
+
54
+
55
+ # Function to extract JSON content from the response
56
+ def extract_json_content(text):
57
+ match = re.search(r"```json\n(.*?)```", text, re.DOTALL)
58
+ if match:
59
+ return match.group(1).strip()
60
+ else:
61
+ return None
62
+
63
+
64
+ if __name__ == "__main__":
65
+ # Extract market data from the given URL
66
+ url = "https://www.livemint.com/market/stock-market-news/page-7"
67
+ context_data = asyncio.run(extract_text_from_website(url))
68
+
69
+
70
+ sectors = [
71
+ "Communication Services",
72
+ "Consumer Discretionary",
73
+ "Consumer Staples",
74
+ "Energy",
75
+ "Financials",
76
+ "Health Care",
77
+ "Industrials",
78
+ "Information Technology",
79
+ "Materials",
80
+ "Real Estate",
81
+ "Utilities",
82
+ ]
83
+
84
+ prompt = f"""
85
+ # TASK: Analyze market context and identify potential market scenarios.
86
+
87
+ # CONTEXT:
88
+ {context_data}
89
+ # END CONTEXT
90
+
91
+ # INSTRUCTION: Based on the provided market context, analyze and identify up to three plausible market scenarios.
92
+ # For each scenario, determine its name (e.g., "Moderate Downturn"), the general market direction ("up" or "down"), a major trigger point that could cause the scenario to unfold, and a list of sectors that would be significantly impacted. Each 'sector_impact' list should have less than or equal to 4 sectors.
93
+
94
+ # OUTPUT FORMAT: Provide the analysis in JSON format with the following structure.
95
+ # Use the sector names provided:
96
+ {sectors}
97
+
98
+ # EXAMPLE:
99
+ ```json
100
+ {{
101
+ "market_scenarios": {{
102
+ "scenario1": {{
103
+ "name": "Moderate Downturn",
104
+ "direction": "down",
105
+ "trigger": "Interest rate hike",
106
+ "sector_impact": [
107
+ "Financials",
108
+ "Energy"
109
+ ]
110
+ }},
111
+ "scenario2": {{
112
+ "name": "Bullish Growth",
113
+ "direction": "up",
114
+ "trigger": "Successful vaccine rollout",
115
+ "sector_impact": [
116
+ "Health Care",
117
+ "Information Technology"
118
+ ]
119
+ }}
120
+ }}
121
+ }}
122
+ """
123
+
124
+
125
+
126
+ # Generate the response
127
+ answer = get_response(chat_session, prompt)
128
+
129
+ # Extract the JSON output from the response
130
+ json_output = extract_json_content(answer.text)
131
+
132
+ # Define output file path
133
+ output_file = "output_files/scenario.json"
134
+
135
+ # Parse the output into a JSON object and save it to a file
136
+ try:
137
+ analysis_json = json.loads(json_output)
138
+ os.makedirs(os.path.dirname(output_file), exist_ok=True) # Ensure the output directory exists
139
+ with open(output_file, "w") as f:
140
+ json.dump(analysis_json, f, indent=4) # Save JSON to a file with indentation
141
+ print(f"Analysis saved to '{output_file}'")
142
+ except json.JSONDecodeError:
143
+ print("Error: Could not decode the output from the model into JSON format.")
144
+ except Exception as e:
145
+ print(f"Error: {e}")
simluation_data.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import numpy as np
3
+ import pandas as pd
4
+ import matplotlib.pyplot as plt
5
+ import pickle
6
+ # Function for Monte Carlo Simulation
7
+ def monte_carlo_simulation(portfolio_data, scenario_data, num_simulations=10000):
8
+ """
9
+ Performs a Monte Carlo simulation on a portfolio based on market scenarios.
10
+
11
+ Args:
12
+ portfolio_data (dict): Dictionary of portfolio data.
13
+ scenario_data (dict): Dictionary of market scenario data.
14
+ num_simulations (int, optional): The number of simulations. Defaults to 10000.
15
+
16
+ Returns:
17
+ dict: A dictionary containing simulation results for each scenario.
18
+ """
19
+ scenarios = scenario_data["market_scenarios"]
20
+ results = {}
21
+
22
+ for scenario_key, scenario_details in scenarios.items():
23
+ scenario_name = scenario_details["name"]
24
+ sector_impacts = scenario_details.get("sector_impact", {})
25
+ results[scenario_name] = {
26
+ "portfolio_values": [],
27
+ "average_return": 0,
28
+ "std_dev_return": 0,
29
+ "percentiles": {},
30
+ }
31
+
32
+ for _ in range(num_simulations):
33
+ portfolio_value = 0
34
+ for asset_name, asset_details in portfolio_data["assets"].items():
35
+ sector = asset_details["sector"]
36
+ quantity = asset_details["quantity"]
37
+ initial_price = asset_details["initial_price"]
38
+
39
+ price_change_percentage = 0
40
+ if sector in sector_impacts:
41
+ price_change_percentage = np.random.normal(
42
+ loc=sector_impacts[sector] / 100, scale=0.1
43
+ )
44
+
45
+ # Calculate the new price
46
+ new_price = initial_price * (1 + price_change_percentage)
47
+
48
+ portfolio_value += new_price * quantity
49
+ results[scenario_name]["portfolio_values"].append(portfolio_value)
50
+
51
+ # Calculate Results
52
+ portfolio_values = results[scenario_name]["portfolio_values"]
53
+ initial_portfolio_value = sum(
54
+ asset["quantity"] * asset["initial_price"] for asset in portfolio_data["assets"].values()
55
+ )
56
+ returns = [
57
+ (value - initial_portfolio_value) / initial_portfolio_value
58
+ for value in portfolio_values
59
+ ]
60
+
61
+ results[scenario_name]["average_return"] = np.mean(returns)
62
+ results[scenario_name]["std_dev_return"] = np.std(returns)
63
+ results[scenario_name]["percentiles"] = {
64
+ 5: np.percentile(returns, 5),
65
+ 25: np.percentile(returns, 25),
66
+ 50: np.percentile(returns, 50),
67
+ 75: np.percentile(returns, 75),
68
+ 95: np.percentile(returns, 95),
69
+ }
70
+
71
+ return results
72
+
73
+ if __name__ == "__main__":
74
+ # Load input data
75
+ with open("output_files/scenario.json") as f:
76
+ scenario_data = json.load(f)
77
+
78
+ with open("output_files/portfolio.json") as f:
79
+ portfolio_data = json.load(f)
80
+
81
+
82
+ # Load the dictionary from the local file
83
+ def load_dataframes(filename="output_files/saved_dataframes.pkl"):
84
+ try:
85
+ with open(filename, 'rb') as file:
86
+ saved_dataframes = pickle.load(file)
87
+ print(f"DataFrames successfully loaded from {filename}.")
88
+ return saved_dataframes
89
+ except FileNotFoundError:
90
+ print(f"File {filename} not found.")
91
+ return None
92
+
93
+ saved_dataframes = load_dataframes()
94
+
95
+ # Placeholder for storing results
96
+ scenario_results = {}
97
+
98
+ # Process each scenario
99
+ for scenario_name, scenario_details in scenario_data["market_scenarios"].items():
100
+ impacted_sectors = scenario_details["sector_impact"]
101
+
102
+ # Filter assets in the impacted sectors
103
+ relevant_assets = [
104
+ symbol
105
+ for symbol, details in portfolio_data["assets"].items()
106
+ if details["sector"] in impacted_sectors
107
+ ]
108
+
109
+ # Calculate magnitudes for the scenario
110
+ sector_magnitudes = {}
111
+ for symbol in relevant_assets:
112
+ df = saved_dataframes[symbol]
113
+ sector = portfolio_data["assets"][symbol]["sector"]
114
+
115
+ # Calculate magnitude as the absolute difference between first and last Close price
116
+ magnitude = abs(df["Close"].iloc[-2] - df["Close"].iloc[-1])
117
+
118
+ # Aggregate by sector
119
+ if sector not in sector_magnitudes:
120
+ sector_magnitudes[sector] = 0
121
+ sector_magnitudes[sector] += magnitude
122
+
123
+ # Calculate aggregated magnitude for the scenario
124
+ aggregated_magnitude = sum(sector_magnitudes.values())
125
+
126
+ # Store results
127
+ scenario_results[scenario_name] = {
128
+ "individual_magnitudes": sector_magnitudes,
129
+ "aggregated_magnitude": aggregated_magnitude,
130
+ }
131
+
132
+ # Display results
133
+ for scenario_name, results in scenario_results.items():
134
+ print(f"\nScenario: {scenario_name}")
135
+ print("Individual Sector Magnitudes:")
136
+ for sector, magnitude in results["individual_magnitudes"].items():
137
+ print(f" {sector}: {magnitude:.2f}")
138
+ print(f"Aggregated Magnitude: {results['aggregated_magnitude']:.2f}")
139
+
140
+ # Integrate calculated results into scenario data
141
+ for scenario_id, results in scenario_results.items():
142
+ # Update the sector impacts to include individual magnitudes
143
+ scenario_data["market_scenarios"][scenario_id]["sector_impact"] = results["individual_magnitudes"]
144
+ # Update aggregated magnitude
145
+ scenario_data["market_scenarios"][scenario_id]["aggregated_magnitude"] = results["aggregated_magnitude"]
146
+
147
+ # Save the updated scenario data to a local JSON file
148
+ output_file_path = "output_files/updated_scenario_data.json"
149
+ with open(output_file_path, "w") as file:
150
+ json.dump(scenario_data, file, indent=4)
151
+
152
+ print(f"Updated scenario data saved to '{output_file_path}' successfully!")
153
+
154
+ # Run Monte Carlo simulation
155
+ simulation_results = monte_carlo_simulation(portfolio_data, scenario_data)
156
+
157
+ # Save simulation results to a local JSON file
158
+ simulation_results_file = "output_files/simulation_results.json"
159
+ with open(simulation_results_file, "w") as file:
160
+ json.dump(simulation_results, file, indent=4)
161
+
162
+ print(f"Simulation results saved to '{simulation_results_file}' successfully!")
163
+
164
+ # Print simulation results
165
+ for scenario_name, results in simulation_results.items():
166
+ print(f"Scenario: {scenario_name}")
167
+ print(f" Average Return: {results['average_return']:.4f}")
168
+ print(f" Std Dev Return: {results['std_dev_return']:.4f}")
169
+ print(" Return Percentiles:")
170
+ for percentile, value in results["percentiles"].items():
171
+ print(f" {percentile}th: {value:.4f}")
172
+ print("-" * 40)