File size: 4,528 Bytes
96e7d53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
# Import required modules
import requests
from bs4 import BeautifulSoup
import nest_asyncio
import asyncio
import json
import re
from crawl4ai import *
import os
from dotenv import load_dotenv
import google.generativeai as genai

# Load environment variables from a .env file
load_dotenv()  # Make sure a .env file exists with GOOGLE_API_KEY=<your_api_key>
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")  # Fetch the API key

# Apply nest_asyncio to enable asynchronous tasks in Jupyter/interactive environments
nest_asyncio.apply()

# Asynchronous function to extract text from a website
async def extract_text_from_website(url):
    async with AsyncWebCrawler() as crawler:
        result = await crawler.arun(url=url)
        return result.markdown


# Define market sectors

# Define the prompt for generating market scenarios
# Configure the generative AI model
genai.configure(api_key=GOOGLE_API_KEY)  # Replace with your API key

generation_config = {
    "temperature": 1,
    "top_p": 0.95,
    "top_k": 40,
    "max_output_tokens": 8192,
    "response_mime_type": "text/plain",
}

model = genai.GenerativeModel(
    model_name="gemini-2.0-flash-exp",
    generation_config=generation_config,
)

chat_session = model.start_chat()

# Function to get a response from the generative AI model
def get_response(llm, prompt):
    response = llm.send_message(prompt)
    return response



# Function to extract JSON content from the response
def extract_json_content(text):
    match = re.search(r"```json\n(.*?)```", text, re.DOTALL)
    if match:
        return match.group(1).strip()
    else:
        return None


if __name__ == "__main__":
    # Extract market data from the given URL
    url = "https://www.livemint.com/market/stock-market-news/page-7"
    context_data = asyncio.run(extract_text_from_website(url))


    sectors = [
        "Communication Services",
        "Consumer Discretionary",
        "Consumer Staples",
        "Energy",
        "Financials",
        "Health Care",
        "Industrials",
        "Information Technology",
        "Materials",
        "Real Estate",
        "Utilities",
    ]

    prompt = f"""

    # TASK: Analyze market context and identify potential market scenarios.



    # CONTEXT:

    {context_data}

    # END CONTEXT



    # INSTRUCTION: Based on the provided market context, analyze and identify up to three plausible market scenarios.

    # For each scenario, determine its name (e.g., "Moderate Downturn"), the general market direction ("up" or "down"), a major trigger point that could cause the scenario to unfold, and a list of sectors that would be significantly impacted. Each 'sector_impact' list should have less than or equal to 4 sectors.



    # OUTPUT FORMAT: Provide the analysis in JSON format with the following structure.

    # Use the sector names provided:

    {sectors}



    # EXAMPLE:

    ```json

    {{

        "market_scenarios": {{

            "scenario1": {{

                "name": "Moderate Downturn",

                "direction": "down",

                "trigger": "Interest rate hike",

                "sector_impact": [

                    "Financials",

                    "Energy"

                ]

            }},

            "scenario2": {{

                "name": "Bullish Growth",

                "direction": "up",

                "trigger": "Successful vaccine rollout",

                "sector_impact": [

                    "Health Care",

                    "Information Technology"

                ]

            }}

        }}

    }}

    """



    # Generate the response
    answer = get_response(chat_session, prompt)

    # Extract the JSON output from the response
    json_output = extract_json_content(answer.text)

    # Define output file path
    output_file = "output_files/scenario.json"

    # Parse the output into a JSON object and save it to a file
    try:
        analysis_json = json.loads(json_output)
        os.makedirs(os.path.dirname(output_file), exist_ok=True)  # Ensure the output directory exists
        with open(output_file, "w") as f:
            json.dump(analysis_json, f, indent=4)  # Save JSON to a file with indentation
        print(f"Analysis saved to '{output_file}'")
    except json.JSONDecodeError:
        print("Error: Could not decode the output from the model into JSON format.")
    except Exception as e:
        print(f"Error: {e}")