Spaces:
Build error
Build error
Upload folder using huggingface_hub
Browse files- common/aagents/google_agent.py +1 -0
- common/aagents/healthcare_agent.py +3 -2
- common/aagents/news_agent.py +7 -16
- common/aagents/search_agent.py +51 -0
- common/aagents/weather_agent.py +1 -4
- common/aagents/web_research_agent.py +1 -2
- common/aagents/yf_agent.py +17 -11
- common/mcp/tools/time_tools.py +1 -0
- common/mcp/tools/yf_tools.py +65 -0
- src/chatbot/aagents/__init__.py +0 -0
- src/chatbot/aagents/input_validation_agent.py +65 -0
- src/chatbot/aagents/orchestrator_agent.py +126 -0
- src/chatbot/app.py +12 -4
- src/chatbot/prompts/upcoming_earnings.txt +1 -1
common/aagents/google_agent.py
CHANGED
|
@@ -135,5 +135,6 @@ google_agent = Agent(
|
|
| 135 |
- Respect timeouts and handle errors gracefully
|
| 136 |
""",
|
| 137 |
)
|
|
|
|
| 138 |
|
| 139 |
__all__ = ["google_agent", "google_search", "google_search_recent", "duckduckgo_search", "fetch_page_content", "current_datetime"]
|
|
|
|
| 135 |
- Respect timeouts and handle errors gracefully
|
| 136 |
""",
|
| 137 |
)
|
| 138 |
+
google_agent.description = "A Google search agent that finds accurate, up-to-date information and recent news using Google Search."
|
| 139 |
|
| 140 |
__all__ = ["google_agent", "google_search", "google_search_recent", "duckduckgo_search", "fetch_page_content", "current_datetime"]
|
common/aagents/healthcare_agent.py
CHANGED
|
@@ -6,7 +6,7 @@ from openai import AsyncOpenAI
|
|
| 6 |
|
| 7 |
# Import tools
|
| 8 |
from mcp.tools.rag_tool import rag_search, UserContext
|
| 9 |
-
from mcp.tools.search_tools import duckduckgo_search
|
| 10 |
from mcp.tools.time_tools import current_datetime
|
| 11 |
|
| 12 |
|
|
@@ -34,7 +34,7 @@ groq_model = OpenAIChatCompletionsModel(model="groq/compound", openai_client=gro
|
|
| 34 |
healthcare_agent = Agent[UserContext](
|
| 35 |
name="HealthcareRAGAgent",
|
| 36 |
model=gemini_model,
|
| 37 |
-
tools=[rag_search, duckduckgo_search],
|
| 38 |
instructions="""
|
| 39 |
You are a healthcare information retrieval agent. You retrieve information from tools and synthesize it into well-formatted markdown responses.
|
| 40 |
|
|
@@ -96,5 +96,6 @@ healthcare_agent = Agent[UserContext](
|
|
| 96 |
- Accept useless RAG results without calling web search
|
| 97 |
""",
|
| 98 |
)
|
|
|
|
| 99 |
|
| 100 |
__all__ = ["healthcare_agent"]
|
|
|
|
| 6 |
|
| 7 |
# Import tools
|
| 8 |
from mcp.tools.rag_tool import rag_search, UserContext
|
| 9 |
+
from mcp.tools.search_tools import duckduckgo_search, fetch_page_content
|
| 10 |
from mcp.tools.time_tools import current_datetime
|
| 11 |
|
| 12 |
|
|
|
|
| 34 |
healthcare_agent = Agent[UserContext](
|
| 35 |
name="HealthcareRAGAgent",
|
| 36 |
model=gemini_model,
|
| 37 |
+
tools=[rag_search, duckduckgo_search, fetch_page_content],
|
| 38 |
instructions="""
|
| 39 |
You are a healthcare information retrieval agent. You retrieve information from tools and synthesize it into well-formatted markdown responses.
|
| 40 |
|
|
|
|
| 96 |
- Accept useless RAG results without calling web search
|
| 97 |
""",
|
| 98 |
)
|
| 99 |
+
healthcare_agent.description = "A healthcare agent that combines RAG (Retrieval Augmented Generation) with web search to answer medical questions."
|
| 100 |
|
| 101 |
__all__ = ["healthcare_agent"]
|
common/aagents/news_agent.py
CHANGED
|
@@ -3,7 +3,6 @@ import os
|
|
| 3 |
from agents import Agent, OpenAIChatCompletionsModel
|
| 4 |
from dotenv import load_dotenv
|
| 5 |
from mcp.tools.news_tools import get_top_headlines, search_news, get_news_by_category
|
| 6 |
-
from mcp.tools.search_tools import duckduckgo_search
|
| 7 |
from mcp.tools.time_tools import current_datetime
|
| 8 |
from openai import AsyncOpenAI
|
| 9 |
|
|
@@ -25,7 +24,7 @@ groq_model = OpenAIChatCompletionsModel(model="groq/compound", openai_client=gro
|
|
| 25 |
news_agent = Agent(
|
| 26 |
name="NewsAgent",
|
| 27 |
model=gemini_model,
|
| 28 |
-
tools=[current_datetime, get_top_headlines, search_news, get_news_by_category
|
| 29 |
instructions="""
|
| 30 |
You are a NewsAgent specialized in fetching and analyzing recent news articles and headlines.
|
| 31 |
Your role is to provide users with up-to-date, relevant news information from reliable sources.
|
|
@@ -46,13 +45,8 @@ news_agent = Agent(
|
|
| 46 |
- Categories: "business", "entertainment", "general", "health", "science", "sports", "technology"
|
| 47 |
- Input: { "category": "business", "country": "us", "num_results": 5 }
|
| 48 |
|
| 49 |
-
**FALLBACK TOOL (DuckDuckGo Search):**
|
| 50 |
-
4. 'duckduckgo_search': Use ONLY when NewsAPI tools fail or API key is missing
|
| 51 |
-
- Set search_type to "news" for news-specific results
|
| 52 |
-
- Input: { "query": "topic", "max_results": 5, "search_type": "news", "timelimit": "d" }
|
| 53 |
-
|
| 54 |
**TIME CONTEXT:**
|
| 55 |
-
|
| 56 |
- Input: { "format": "natural" }
|
| 57 |
|
| 58 |
## Workflow
|
|
@@ -62,14 +56,11 @@ news_agent = Agent(
|
|
| 62 |
- Topic-specific → use search_news
|
| 63 |
- Category-specific → use get_news_by_category
|
| 64 |
|
| 65 |
-
2. **
|
| 66 |
-
|
| 67 |
-
3. **Fallback if Needed**: If NewsAPI returns an error (missing API key, no results),
|
| 68 |
-
use duckduckgo_search with search_type="news"
|
| 69 |
|
| 70 |
-
|
| 71 |
|
| 72 |
-
|
| 73 |
- Headlines/titles
|
| 74 |
- Sources
|
| 75 |
- Publication dates
|
|
@@ -95,12 +86,12 @@ news_agent = Agent(
|
|
| 95 |
|
| 96 |
- Always cite sources and include publication dates
|
| 97 |
- Prioritize recent news (within last 7 days unless specified otherwise)
|
| 98 |
-
- If API key is missing, inform the user and use the fallback tool
|
| 99 |
- Never fabricate news or sources
|
| 100 |
- Present news objectively without bias
|
| 101 |
- Include URLs so users can read full articles
|
| 102 |
- Use current_datetime to ensure temporal accuracy
|
| 103 |
""",
|
| 104 |
)
|
|
|
|
| 105 |
|
| 106 |
-
__all__ = ["news_agent", "get_top_headlines", "search_news", "get_news_by_category", "
|
|
|
|
| 3 |
from agents import Agent, OpenAIChatCompletionsModel
|
| 4 |
from dotenv import load_dotenv
|
| 5 |
from mcp.tools.news_tools import get_top_headlines, search_news, get_news_by_category
|
|
|
|
| 6 |
from mcp.tools.time_tools import current_datetime
|
| 7 |
from openai import AsyncOpenAI
|
| 8 |
|
|
|
|
| 24 |
news_agent = Agent(
|
| 25 |
name="NewsAgent",
|
| 26 |
model=gemini_model,
|
| 27 |
+
tools=[current_datetime, get_top_headlines, search_news, get_news_by_category],
|
| 28 |
instructions="""
|
| 29 |
You are a NewsAgent specialized in fetching and analyzing recent news articles and headlines.
|
| 30 |
Your role is to provide users with up-to-date, relevant news information from reliable sources.
|
|
|
|
| 45 |
- Categories: "business", "entertainment", "general", "health", "science", "sports", "technology"
|
| 46 |
- Input: { "category": "business", "country": "us", "num_results": 5 }
|
| 47 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
**TIME CONTEXT:**
|
| 49 |
+
4. 'current_datetime': Use to provide current date/time context in your responses
|
| 50 |
- Input: { "format": "natural" }
|
| 51 |
|
| 52 |
## Workflow
|
|
|
|
| 56 |
- Topic-specific → use search_news
|
| 57 |
- Category-specific → use get_news_by_category
|
| 58 |
|
| 59 |
+
2. **Execute Search**: Use the appropriate NewsAPI tool.
|
|
|
|
|
|
|
|
|
|
| 60 |
|
| 61 |
+
3. **Include Time Context**: Use current_datetime to provide temporal context.
|
| 62 |
|
| 63 |
+
4. **Format Response**: Present news in a clear, organized format with:
|
| 64 |
- Headlines/titles
|
| 65 |
- Sources
|
| 66 |
- Publication dates
|
|
|
|
| 86 |
|
| 87 |
- Always cite sources and include publication dates
|
| 88 |
- Prioritize recent news (within last 7 days unless specified otherwise)
|
|
|
|
| 89 |
- Never fabricate news or sources
|
| 90 |
- Present news objectively without bias
|
| 91 |
- Include URLs so users can read full articles
|
| 92 |
- Use current_datetime to ensure temporal accuracy
|
| 93 |
""",
|
| 94 |
)
|
| 95 |
+
news_agent.description = "A news agent that fetches top headlines and searches for news articles by category or topic."
|
| 96 |
|
| 97 |
+
__all__ = ["news_agent", "get_top_headlines", "search_news", "get_news_by_category", "current_datetime"]
|
common/aagents/search_agent.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Search agent module for comprehensive web searches."""
|
| 2 |
+
import os
|
| 3 |
+
from agents import Agent, OpenAIChatCompletionsModel
|
| 4 |
+
from openai import AsyncOpenAI
|
| 5 |
+
from dotenv import load_dotenv
|
| 6 |
+
from mcp.tools.search_tools import duckduckgo_search, fetch_page_content
|
| 7 |
+
from mcp.tools.time_tools import current_datetime
|
| 8 |
+
|
| 9 |
+
# ---------------------------------------------------------
|
| 10 |
+
# Load environment variables
|
| 11 |
+
# ---------------------------------------------------------
|
| 12 |
+
load_dotenv()
|
| 13 |
+
|
| 14 |
+
GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/v1beta/openai/"
|
| 15 |
+
google_api_key = os.getenv('GOOGLE_API_KEY')
|
| 16 |
+
gemini_client = AsyncOpenAI(base_url=GEMINI_BASE_URL, api_key=google_api_key)
|
| 17 |
+
gemini_model = OpenAIChatCompletionsModel(model="gemini-2.0-flash-exp", openai_client=gemini_client)
|
| 18 |
+
|
| 19 |
+
search_agent = Agent(
|
| 20 |
+
name="Web Search Agent",
|
| 21 |
+
model=gemini_model,
|
| 22 |
+
tools=[current_datetime, duckduckgo_search, fetch_page_content],
|
| 23 |
+
instructions="""
|
| 24 |
+
You are a highly efficient and specialized **Web Search Agent** 🌐. Your sole function is to retrieve and analyze information from the internet using the **duckduckgo_search** and **fetch_page_content** functions. You must act as a digital librarian and researcher, providing synthesized, cited, and up-to-date answers.
|
| 25 |
+
|
| 26 |
+
## Core Directives & Priorities
|
| 27 |
+
1. **Time Awareness First:** ALWAYS invoke **current_datetime** at the very beginning of your execution to establish the current temporal context. This is crucial for answering questions about "today", "yesterday", or recent events.
|
| 28 |
+
2. **Search Strategy:**
|
| 29 |
+
* Analyze the user's request and construct 1-3 targeted search queries.
|
| 30 |
+
* Use **duckduckgo_search** to find relevant information. Use the 'news' type for current events.
|
| 31 |
+
* **Mandatory Deep Dive:** You MUST select the **top 3** most relevant search results and use **fetch_page_content** to retrieve their full text. *Do not rely solely on the short search snippets.*
|
| 32 |
+
3. **Synthesis & Answer Construction:**
|
| 33 |
+
* Read the fetched content thoroughly.
|
| 34 |
+
* Synthesize the information into a coherent answer.
|
| 35 |
+
* **Conflict Resolution:** If sources disagree, note the discrepancy and favor the most recent or authoritative source.
|
| 36 |
+
* **Citations:** You **must** cite your sources. At the end of your response, list the *Title* and *URL* of the pages you used.
|
| 37 |
+
4. **Clarity:** Use professional, plain language. Use headings and bullet points for readability.
|
| 38 |
+
5. **Data Gaps:** If you cannot find a conclusive answer after searching and fetching, state: **"A conclusive answer could not be verified by current web search results."**
|
| 39 |
+
|
| 40 |
+
## Workflow Example
|
| 41 |
+
1. Call `current_datetime()`.
|
| 42 |
+
2. Call `duckduckgo_search(query="...")`.
|
| 43 |
+
3. Loop through top 3 results: `fetch_page_content(url=...)`.
|
| 44 |
+
4. Synthesize findings into final answer.
|
| 45 |
+
|
| 46 |
+
**Crucially, never fabricate information. Your answer must be grounded in the text you have fetched.**
|
| 47 |
+
""",
|
| 48 |
+
)
|
| 49 |
+
search_agent.description = "A web search agent that retrieves information using DuckDuckGo and fetches page content for detailed answers."
|
| 50 |
+
|
| 51 |
+
__all__ = ["search_agent"]
|
common/aagents/weather_agent.py
CHANGED
|
@@ -32,7 +32,6 @@ groq_model = OpenAIChatCompletionsModel(model="groq/compound", openai_client=gro
|
|
| 32 |
weather_agent = Agent(
|
| 33 |
name="WeatherAgent",
|
| 34 |
model=gemini_model, #"gpt-4o-mini",
|
| 35 |
-
# description="An agent that can perform web searches using DuckDuckGo.",
|
| 36 |
tools=[current_datetime, get_weather_forecast, search_weather_fallback_ddgs, search_weather_fallback_bs],
|
| 37 |
instructions="""
|
| 38 |
You are a Weather Forecast agent who forecasts weather information ONLY.
|
|
@@ -61,9 +60,7 @@ weather_agent = Agent(
|
|
| 61 |
}.
|
| 62 |
]
|
| 63 |
""",
|
| 64 |
-
# output_type=AgentOutputSchema(list[searchResult], strict_json_schema=False),
|
| 65 |
-
# output_type=list[dict], # safer than list[searchResult],
|
| 66 |
-
# output_type=list[searchResult],
|
| 67 |
)
|
|
|
|
| 68 |
|
| 69 |
__all__ = ["weather_agent", "get_weather_forecast", "search_weather_fallback_ddgs", "search_weather_fallback_bs"]
|
|
|
|
| 32 |
weather_agent = Agent(
|
| 33 |
name="WeatherAgent",
|
| 34 |
model=gemini_model, #"gpt-4o-mini",
|
|
|
|
| 35 |
tools=[current_datetime, get_weather_forecast, search_weather_fallback_ddgs, search_weather_fallback_bs],
|
| 36 |
instructions="""
|
| 37 |
You are a Weather Forecast agent who forecasts weather information ONLY.
|
|
|
|
| 60 |
}.
|
| 61 |
]
|
| 62 |
""",
|
|
|
|
|
|
|
|
|
|
| 63 |
)
|
| 64 |
+
weather_agent.description = "A weather agent that provides current and forecasted weather information for specific cities."
|
| 65 |
|
| 66 |
__all__ = ["weather_agent", "get_weather_forecast", "search_weather_fallback_ddgs", "search_weather_fallback_bs"]
|
common/aagents/web_research_agent.py
CHANGED
|
@@ -29,9 +29,7 @@ groq_client = AsyncOpenAI(base_url=GROQ_BASE_URL, api_key=groq_api_key)
|
|
| 29 |
groq_model = OpenAIChatCompletionsModel(model="groq/compound", openai_client=groq_client)
|
| 30 |
|
| 31 |
web_research_agent = Agent(
|
| 32 |
-
name="WebResearchAgent",
|
| 33 |
model="gpt-4o-mini",
|
| 34 |
-
# description="An agent that can perform web searches using DuckDuckGo.",
|
| 35 |
tools=[duckduckgo_search, fetch_page_content],
|
| 36 |
instructions="""
|
| 37 |
You are WebResearchAgent — an advanced internet research assistant with two core abilities:
|
|
@@ -79,5 +77,6 @@ IMPORTANT RULES
|
|
| 79 |
"""
|
| 80 |
,
|
| 81 |
)
|
|
|
|
| 82 |
|
| 83 |
__all__ = ["web_research_agent", "duckduckgo_search", "fetch_page_content", "searchQuery", "searchResult"]
|
|
|
|
| 29 |
groq_model = OpenAIChatCompletionsModel(model="groq/compound", openai_client=groq_client)
|
| 30 |
|
| 31 |
web_research_agent = Agent(
|
|
|
|
| 32 |
model="gpt-4o-mini",
|
|
|
|
| 33 |
tools=[duckduckgo_search, fetch_page_content],
|
| 34 |
instructions="""
|
| 35 |
You are WebResearchAgent — an advanced internet research assistant with two core abilities:
|
|
|
|
| 77 |
"""
|
| 78 |
,
|
| 79 |
)
|
| 80 |
+
web_research_agent.description = "A deep research agent that performs extensive web searches and content fetching for complex research queries."
|
| 81 |
|
| 82 |
__all__ = ["web_research_agent", "duckduckgo_search", "fetch_page_content", "searchQuery", "searchResult"]
|
common/aagents/yf_agent.py
CHANGED
|
@@ -2,7 +2,7 @@
|
|
| 2 |
import os
|
| 3 |
from agents import Agent, OpenAIChatCompletionsModel
|
| 4 |
from dotenv import load_dotenv
|
| 5 |
-
from mcp.tools.yf_tools import get_summary, get_market_sentiment, get_history
|
| 6 |
from mcp.tools.time_tools import current_datetime
|
| 7 |
from openai import AsyncOpenAI
|
| 8 |
|
|
@@ -24,7 +24,7 @@ groq_model = OpenAIChatCompletionsModel(model="groq/compound", openai_client=gro
|
|
| 24 |
yf_agent = Agent(
|
| 25 |
name="YahooFinanceAgent",
|
| 26 |
model=gemini_model,
|
| 27 |
-
tools=[current_datetime, get_summary, get_market_sentiment, get_history],
|
| 28 |
instructions="""
|
| 29 |
You are a specialized **Financial Analysis Agent** 💰, expert in market research, financial data retrieval, and market analysis.
|
| 30 |
Your primary role is to provide *actionable*, *data-driven*, and *concise* financial reports based on the available tools.
|
|
@@ -35,14 +35,16 @@ yf_agent = Agent(
|
|
| 35 |
Financial data is extremely time-sensitive.
|
| 36 |
|
| 37 |
2. **Financial Data Integrity:** Use the Yahoo Finance tools for specific stock/index data:
|
| 38 |
-
- 'get_summary': Get latest summary information and intraday price data for a ticker
|
| 39 |
-
- 'get_market_sentiment': Analyze recent price changes and provide market sentiment (Bullish/Bearish/Neutral)
|
| 40 |
-
- 'get_history': Fetch historical price data for a given ticker
|
|
|
|
|
|
|
| 41 |
|
| 42 |
Be precise about the date range and data source.
|
| 43 |
|
| 44 |
-
3. **Synthesis and Analysis:** Do not just list data. You must **synthesize** financial data (prices, volume, sentiment)
|
| 45 |
-
to provide a complete analytical perspective (e.g., "Stock X is up 5% today driven by strong market momentum").
|
| 46 |
|
| 47 |
4. **Professional Clarity:** Present information in a clear, professional, and structured format.
|
| 48 |
Use numerical data and financial terminology correctly.
|
|
@@ -62,9 +64,12 @@ yf_agent = Agent(
|
|
| 62 |
|
| 63 |
Tool: get_market_sentiment
|
| 64 |
Input: { "symbol": "AAPL", "period": "1mo" }
|
| 65 |
-
|
| 66 |
-
Tool:
|
| 67 |
-
Input: { "symbol": "AAPL"
|
|
|
|
|
|
|
|
|
|
| 68 |
|
| 69 |
## Output Format Guidelines
|
| 70 |
|
|
@@ -74,5 +79,6 @@ yf_agent = Agent(
|
|
| 74 |
* Always include a disclaimer: "This analysis is for informational purposes only and is not financial advice."
|
| 75 |
""",
|
| 76 |
)
|
|
|
|
| 77 |
|
| 78 |
-
__all__ = ["yf_agent", "get_summary", "get_market_sentiment", "get_history", "current_datetime"]
|
|
|
|
| 2 |
import os
|
| 3 |
from agents import Agent, OpenAIChatCompletionsModel
|
| 4 |
from dotenv import load_dotenv
|
| 5 |
+
from mcp.tools.yf_tools import get_summary, get_market_sentiment, get_history, get_analyst_recommendations, get_earnings_calendar
|
| 6 |
from mcp.tools.time_tools import current_datetime
|
| 7 |
from openai import AsyncOpenAI
|
| 8 |
|
|
|
|
| 24 |
yf_agent = Agent(
|
| 25 |
name="YahooFinanceAgent",
|
| 26 |
model=gemini_model,
|
| 27 |
+
tools=[current_datetime, get_summary, get_market_sentiment, get_history, get_analyst_recommendations, get_earnings_calendar],
|
| 28 |
instructions="""
|
| 29 |
You are a specialized **Financial Analysis Agent** 💰, expert in market research, financial data retrieval, and market analysis.
|
| 30 |
Your primary role is to provide *actionable*, *data-driven*, and *concise* financial reports based on the available tools.
|
|
|
|
| 35 |
Financial data is extremely time-sensitive.
|
| 36 |
|
| 37 |
2. **Financial Data Integrity:** Use the Yahoo Finance tools for specific stock/index data:
|
| 38 |
+
- 'get_summary': Get latest summary information and intraday price data for a ticker.
|
| 39 |
+
- 'get_market_sentiment': Analyze recent price changes and provide market sentiment (Bullish/Bearish/Neutral).
|
| 40 |
+
- 'get_history': Fetch historical price data for a given ticker.
|
| 41 |
+
- 'get_analyst_recommendations': Fetch latest analyst ratings (Buy/Sell/Hold) for a symbol to provide **trading recommendations**.
|
| 42 |
+
- 'get_earnings_calendar': Fetch upcoming earnings dates for a symbol.
|
| 43 |
|
| 44 |
Be precise about the date range and data source.
|
| 45 |
|
| 46 |
+
3. **Synthesis and Analysis:** Do not just list data. You must **synthesize** financial data (prices, volume, sentiment, recommendations)
|
| 47 |
+
to provide a complete analytical perspective (e.g., "Stock X is up 5% today driven by strong market momentum and a generic 'Buy' rating from analysts").
|
| 48 |
|
| 49 |
4. **Professional Clarity:** Present information in a clear, professional, and structured format.
|
| 50 |
Use numerical data and financial terminology correctly.
|
|
|
|
| 64 |
|
| 65 |
Tool: get_market_sentiment
|
| 66 |
Input: { "symbol": "AAPL", "period": "1mo" }
|
| 67 |
+
|
| 68 |
+
Tool: get_analyst_recommendations
|
| 69 |
+
Input: { "symbol": "AAPL" }
|
| 70 |
+
|
| 71 |
+
Tool: get_earnings_calendar
|
| 72 |
+
Input: { "symbol": "AAPL" }
|
| 73 |
|
| 74 |
## Output Format Guidelines
|
| 75 |
|
|
|
|
| 79 |
* Always include a disclaimer: "This analysis is for informational purposes only and is not financial advice."
|
| 80 |
""",
|
| 81 |
)
|
| 82 |
+
yf_agent.description = "A financial analysis agent that provides stock summaries, market sentiment, and historical data using Yahoo Finance."
|
| 83 |
|
| 84 |
+
__all__ = ["yf_agent", "get_summary", "get_market_sentiment", "get_history", "get_analyst_recommendations", "get_earnings_calendar", "current_datetime"]
|
common/mcp/tools/time_tools.py
CHANGED
|
@@ -18,6 +18,7 @@ def current_datetime(format: str = "natural") -> str:
|
|
| 18 |
Returns:
|
| 19 |
str: Current date and time in the specified format
|
| 20 |
"""
|
|
|
|
| 21 |
now = datetime.now()
|
| 22 |
|
| 23 |
# Natural format options
|
|
|
|
| 18 |
Returns:
|
| 19 |
str: Current date and time in the specified format
|
| 20 |
"""
|
| 21 |
+
print(f"[DEBUG] current_datetime called with format='{format}'")
|
| 22 |
now = datetime.now()
|
| 23 |
|
| 24 |
# Natural format options
|
common/mcp/tools/yf_tools.py
CHANGED
|
@@ -37,6 +37,7 @@ def get_summary(symbol: str, period: str = "1d", interval: str = "1h") -> str:
|
|
| 37 |
- Volume
|
| 38 |
- Period and interval used
|
| 39 |
"""
|
|
|
|
| 40 |
try:
|
| 41 |
ticker = yf.Ticker(symbol)
|
| 42 |
|
|
@@ -109,6 +110,7 @@ def get_market_sentiment(symbol: str, period: str = "1mo") -> str:
|
|
| 109 |
str
|
| 110 |
A human-readable sentiment string including percentage change.
|
| 111 |
"""
|
|
|
|
| 112 |
try:
|
| 113 |
ticker = yf.Ticker(symbol)
|
| 114 |
|
|
@@ -164,6 +166,7 @@ def get_history(symbol: str, period: str = "1mo") -> str:
|
|
| 164 |
str
|
| 165 |
A formatted string showing the last 5 rows of historical prices (Open, High, Low, Close, Volume).
|
| 166 |
"""
|
|
|
|
| 167 |
try:
|
| 168 |
ticker = yf.Ticker(symbol)
|
| 169 |
|
|
@@ -190,3 +193,65 @@ def get_history(symbol: str, period: str = "1mo") -> str:
|
|
| 190 |
|
| 191 |
except Exception as e:
|
| 192 |
return f"Error fetching historical data for '{symbol}': {e}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
- Volume
|
| 38 |
- Period and interval used
|
| 39 |
"""
|
| 40 |
+
print(f"[DEBUG] get_summary called for symbol='{symbol}', period='{period}', interval='{interval}'")
|
| 41 |
try:
|
| 42 |
ticker = yf.Ticker(symbol)
|
| 43 |
|
|
|
|
| 110 |
str
|
| 111 |
A human-readable sentiment string including percentage change.
|
| 112 |
"""
|
| 113 |
+
print(f"[DEBUG] get_market_sentiment called for symbol='{symbol}', period='{period}'")
|
| 114 |
try:
|
| 115 |
ticker = yf.Ticker(symbol)
|
| 116 |
|
|
|
|
| 166 |
str
|
| 167 |
A formatted string showing the last 5 rows of historical prices (Open, High, Low, Close, Volume).
|
| 168 |
"""
|
| 169 |
+
print(f"[DEBUG] get_history called for symbol='{symbol}', period='{period}'")
|
| 170 |
try:
|
| 171 |
ticker = yf.Ticker(symbol)
|
| 172 |
|
|
|
|
| 193 |
|
| 194 |
except Exception as e:
|
| 195 |
return f"Error fetching historical data for '{symbol}': {e}"
|
| 196 |
+
|
| 197 |
+
@function_tool
|
| 198 |
+
def get_analyst_recommendations(symbol: str) -> str:
|
| 199 |
+
"""
|
| 200 |
+
Fetch analyst recommendations for a given ticker.
|
| 201 |
+
|
| 202 |
+
Parameters:
|
| 203 |
+
-----------
|
| 204 |
+
symbol : str
|
| 205 |
+
The ticker symbol.
|
| 206 |
+
|
| 207 |
+
Returns:
|
| 208 |
+
--------
|
| 209 |
+
str
|
| 210 |
+
Formatted string string of analyst recommendations.
|
| 211 |
+
"""
|
| 212 |
+
print(f"[DEBUG] get_analyst_recommendations called for symbol='{symbol}'")
|
| 213 |
+
try:
|
| 214 |
+
ticker = yf.Ticker(symbol)
|
| 215 |
+
recs = ticker.recommendations
|
| 216 |
+
if recs is None or recs.empty:
|
| 217 |
+
return f"No analyst recommendations found for {symbol}."
|
| 218 |
+
|
| 219 |
+
# Format the last few recommendations
|
| 220 |
+
latest = recs.tail(5)
|
| 221 |
+
return f"Analyst Recommendations for {symbol}:\n{latest.to_string()}"
|
| 222 |
+
except Exception as e:
|
| 223 |
+
return f"Error fetching recommendations for '{symbol}': {e}"
|
| 224 |
+
|
| 225 |
+
@function_tool
|
| 226 |
+
def get_earnings_calendar(symbol: str) -> str:
|
| 227 |
+
"""
|
| 228 |
+
Fetch the next earnings date for a ticker.
|
| 229 |
+
|
| 230 |
+
Parameters:
|
| 231 |
+
-----------
|
| 232 |
+
symbol : str
|
| 233 |
+
The ticker symbol.
|
| 234 |
+
|
| 235 |
+
Returns:
|
| 236 |
+
--------
|
| 237 |
+
str
|
| 238 |
+
Next earnings date info.
|
| 239 |
+
"""
|
| 240 |
+
print(f"[DEBUG] get_earnings_calendar called for symbol='{symbol}'")
|
| 241 |
+
try:
|
| 242 |
+
ticker = yf.Ticker(symbol)
|
| 243 |
+
calendar = ticker.calendar
|
| 244 |
+
if calendar is None:
|
| 245 |
+
return f"No earnings calendar found for {symbol}."
|
| 246 |
+
|
| 247 |
+
# Handle dict (new yfinance) or DataFrame (old yfinance)
|
| 248 |
+
if isinstance(calendar, dict):
|
| 249 |
+
if not calendar:
|
| 250 |
+
return f"No earnings calendar found for {symbol}."
|
| 251 |
+
elif hasattr(calendar, 'empty') and calendar.empty:
|
| 252 |
+
return f"No earnings calendar found for {symbol}."
|
| 253 |
+
|
| 254 |
+
return f"Earnings Calendar for {symbol}:\n{calendar}"
|
| 255 |
+
except Exception as e:
|
| 256 |
+
return f"Error fetching earnings calendar for '{symbol}': {e}"
|
| 257 |
+
|
src/chatbot/aagents/__init__.py
ADDED
|
File without changes
|
src/chatbot/aagents/input_validation_agent.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import os
|
| 3 |
+
import json
|
| 4 |
+
from agents import Agent, OpenAIChatCompletionsModel, Runner, GuardrailFunctionOutput
|
| 5 |
+
from pydantic import BaseModel
|
| 6 |
+
from openai import AsyncOpenAI
|
| 7 |
+
from dotenv import load_dotenv
|
| 8 |
+
|
| 9 |
+
load_dotenv()
|
| 10 |
+
|
| 11 |
+
class ValidatedOutput(BaseModel):
|
| 12 |
+
is_valid: bool
|
| 13 |
+
reasoning: str
|
| 14 |
+
|
| 15 |
+
GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/v1beta/openai/"
|
| 16 |
+
google_api_key = os.getenv('GOOGLE_API_KEY')
|
| 17 |
+
gemini_client = AsyncOpenAI(base_url=GEMINI_BASE_URL, api_key=google_api_key)
|
| 18 |
+
gemini_model = OpenAIChatCompletionsModel(model="gemini-2.0-flash", openai_client=gemini_client)
|
| 19 |
+
|
| 20 |
+
input_validation_agent = Agent(
|
| 21 |
+
name="Guardrail Input Validation Agent",
|
| 22 |
+
instructions="""
|
| 23 |
+
You are a highly efficient and specialized **Agent** 🌐. Your sole function is to validate the user inputs.
|
| 24 |
+
|
| 25 |
+
## Core Directives & Priorities
|
| 26 |
+
1. You should flag if the user uses unparaliamentary language ONLY.
|
| 27 |
+
2. You MUST give reasoning for the same.
|
| 28 |
+
|
| 29 |
+
## Rules
|
| 30 |
+
- If it contains any of these, mark `"is_valid": false` and explain **why** in `"reasoning"`.
|
| 31 |
+
- Otherwise, mark `"is_valid": true` with reasoning like "The input follows respectful communication guidelines."
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
## Output Format (MANDATORY)
|
| 35 |
+
* Return a JSON object with the following structure:
|
| 36 |
+
{
|
| 37 |
+
"is_valid": <boolean>,
|
| 38 |
+
"reasoning": <string>
|
| 39 |
+
}
|
| 40 |
+
""",
|
| 41 |
+
model=gemini_model,
|
| 42 |
+
output_type=ValidatedOutput,
|
| 43 |
+
)
|
| 44 |
+
input_validation_agent.description = "A guardrail agent that validates user input for unparliamentary language."
|
| 45 |
+
|
| 46 |
+
async def input_validation_guardrail(ctx, agent, input_data):
|
| 47 |
+
result = await Runner.run(input_validation_agent, input_data, context=ctx.context)
|
| 48 |
+
raw_output = result.final_output
|
| 49 |
+
|
| 50 |
+
# Handle different return shapes gracefully
|
| 51 |
+
if isinstance(raw_output, ValidatedOutput):
|
| 52 |
+
final_output = raw_output
|
| 53 |
+
print("Parsed ValidatedOutput:", final_output)
|
| 54 |
+
else:
|
| 55 |
+
final_output = ValidatedOutput(
|
| 56 |
+
is_valid=False,
|
| 57 |
+
reasoning=f"Unexpected output type: {type(raw_output)}"
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
return GuardrailFunctionOutput(
|
| 61 |
+
output_info=final_output,
|
| 62 |
+
tripwire_triggered=not final_output.is_valid,
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
__all__ = ["input_validation_agent", "input_validation_guardrail", "ValidatedOutput"]
|
src/chatbot/aagents/orchestrator_agent.py
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import os
|
| 3 |
+
import asyncio
|
| 4 |
+
from common.aagents.search_agent import search_agent
|
| 5 |
+
from common.aagents.news_agent import news_agent
|
| 6 |
+
from common.aagents.yf_agent import yf_agent
|
| 7 |
+
from aagents.input_validation_agent import input_validation_guardrail
|
| 8 |
+
from agents import Agent, OpenAIChatCompletionsModel, Runner, function_tool
|
| 9 |
+
from openai import AsyncOpenAI
|
| 10 |
+
from dotenv import load_dotenv
|
| 11 |
+
|
| 12 |
+
load_dotenv()
|
| 13 |
+
|
| 14 |
+
# --- Model setup ---
|
| 15 |
+
GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/v1beta/openai/"
|
| 16 |
+
google_api_key = os.getenv("GOOGLE_API_KEY")
|
| 17 |
+
gemini_client = AsyncOpenAI(base_url=GEMINI_BASE_URL, api_key=google_api_key)
|
| 18 |
+
gemini_model = OpenAIChatCompletionsModel(
|
| 19 |
+
model="gemini-2.0-flash",
|
| 20 |
+
openai_client=gemini_client
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
# ----------------------------------------------------------
|
| 24 |
+
# PARALLEL EXECUTION TOOL
|
| 25 |
+
# ----------------------------------------------------------
|
| 26 |
+
@function_tool
|
| 27 |
+
async def broadcast_research(query: str) -> str:
|
| 28 |
+
"""
|
| 29 |
+
Broadcasts the search query to multiple specialized agents (Finance, News, Web Search)
|
| 30 |
+
in parallel and aggregates their responses.
|
| 31 |
+
|
| 32 |
+
Args:
|
| 33 |
+
query: The user's question or topic to research.
|
| 34 |
+
|
| 35 |
+
Returns:
|
| 36 |
+
Combined reports from all agents.
|
| 37 |
+
"""
|
| 38 |
+
print(f"[DEBUG] broadcast_research called with query='{query}'")
|
| 39 |
+
|
| 40 |
+
# Define tasks for parallel execution
|
| 41 |
+
# We use a new Runner.run call for each agent.
|
| 42 |
+
# Note: We are not passing a session to keep them stateless/independent for this query.
|
| 43 |
+
|
| 44 |
+
task_yf = Runner.run(yf_agent, query)
|
| 45 |
+
task_news = Runner.run(news_agent, query)
|
| 46 |
+
task_search = Runner.run(search_agent, query)
|
| 47 |
+
|
| 48 |
+
# Run all in parallel
|
| 49 |
+
results = await asyncio.gather(task_yf, task_news, task_search, return_exceptions=True)
|
| 50 |
+
|
| 51 |
+
yf_res, news_res, search_res = results
|
| 52 |
+
|
| 53 |
+
# Access .final_output safely (handling potential exceptions)
|
| 54 |
+
def extract_output(res, name):
|
| 55 |
+
if isinstance(res, Exception):
|
| 56 |
+
return f"❌ {name} Error: {str(res)}"
|
| 57 |
+
return f"✅ {name} Report:\n{res.final_output}"
|
| 58 |
+
|
| 59 |
+
out_yf = extract_output(yf_res, "YahooFinanceAgent")
|
| 60 |
+
out_news = extract_output(news_res, "NewsAgent")
|
| 61 |
+
out_search = extract_output(search_res, "WebSearchAgent")
|
| 62 |
+
|
| 63 |
+
combined_response = f"""
|
| 64 |
+
--- START OF AGENT REPORTS ---
|
| 65 |
+
|
| 66 |
+
{out_yf}
|
| 67 |
+
|
| 68 |
+
-----------------------------------
|
| 69 |
+
|
| 70 |
+
{out_news}
|
| 71 |
+
|
| 72 |
+
-----------------------------------
|
| 73 |
+
|
| 74 |
+
{out_search}
|
| 75 |
+
|
| 76 |
+
--- END OF AGENT REPORTS ---
|
| 77 |
+
"""
|
| 78 |
+
return combined_response
|
| 79 |
+
|
| 80 |
+
orchestrator_agent = Agent(
|
| 81 |
+
name="AI Market Research Orchestrator",
|
| 82 |
+
tools=[broadcast_research],
|
| 83 |
+
instructions="""
|
| 84 |
+
You are the **AI Market Research Orchestrator**.
|
| 85 |
+
Your goal is to provide a comprehensive, multi-perspective answer by synthesizing data from specialized sub-agents.
|
| 86 |
+
|
| 87 |
+
**Workflow**:
|
| 88 |
+
1. **Analyze Request**: Understand the user's question.
|
| 89 |
+
2. **Broadcast Query**: IMMEDIATELY call the `broadcast_research` tool with a relevant search query.
|
| 90 |
+
* This tool runs the Finance, News, and Web Search agents in parallel.
|
| 91 |
+
3. **Synthesize Results**: Read the returned "Agent Reports".
|
| 92 |
+
* Combine the financial data (prices, sentiment), news headlines, and general search context.
|
| 93 |
+
* Compare and contrast findings if necessary.
|
| 94 |
+
* Resolve conflicts by prioritizing specific data (e.g., Yahoo Finance for prices) over general text.
|
| 95 |
+
4. **Final Response**: Generate a clear, professional, and well-structured summary for the user. Do not simply paste the individual reports.
|
| 96 |
+
|
| 97 |
+
**Final Response Structure (MANDATORY)**:
|
| 98 |
+
You MUST structure your final response exactly as follows:
|
| 99 |
+
|
| 100 |
+
# [Market Analysis Title]
|
| 101 |
+
|
| 102 |
+
## 📊 Financial Snapshot
|
| 103 |
+
* **Price/Sentiment**: [Synthesized from Yahoo Finance]
|
| 104 |
+
* **Analyst Rating**: [Buy/Sell/Hold consensus]
|
| 105 |
+
|
| 106 |
+
## 📰 Key Developments
|
| 107 |
+
* [Headline 1] - [Source]
|
| 108 |
+
* [Headline 2] - [Source]
|
| 109 |
+
|
| 110 |
+
## 🔍 Web Insights
|
| 111 |
+
* [Key finding from general search, if any]
|
| 112 |
+
|
| 113 |
+
## ⚖️ Synthesis & Recommendation
|
| 114 |
+
* [Your comprehensive summary merging all data points. Highlight any conflicts.]
|
| 115 |
+
|
| 116 |
+
**Constraint**:
|
| 117 |
+
* Do NOT try to answer based on your own knowledge if live data is needed.
|
| 118 |
+
* ALWAYS use `broadcast_research` for queries requiring up-to-date information.
|
| 119 |
+
* If agents return "No data", explicitly state that in the relevant section.
|
| 120 |
+
""",
|
| 121 |
+
model=gemini_model,
|
| 122 |
+
)
|
| 123 |
+
orchestrator_agent.description = "An intelligent orchestrator that queries Finance, News, and Search agents in parallel and synthesizes a comprehensive response."
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
__all__ = ["orchestrator_agent"]
|
src/chatbot/app.py
CHANGED
|
@@ -4,11 +4,14 @@ import glob
|
|
| 4 |
import asyncio
|
| 5 |
import sys
|
| 6 |
import uuid
|
| 7 |
-
|
| 8 |
# Add project root
|
| 9 |
-
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".")))
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
-
from
|
| 12 |
from agents import Runner, trace, SQLiteSession
|
| 13 |
from agents.exceptions import InputGuardrailTripwireTriggered
|
| 14 |
|
|
@@ -187,7 +190,7 @@ st.markdown("""
|
|
| 187 |
# -----------------------------
|
| 188 |
async def get_ai_response(prompt: str) -> str:
|
| 189 |
try:
|
| 190 |
-
agent =
|
| 191 |
# Ensure session is valid
|
| 192 |
current_session = st.session_state.ai_session
|
| 193 |
with trace("Chatbot Agent Run"):
|
|
@@ -216,6 +219,11 @@ with st.sidebar:
|
|
| 216 |
for idx, prompt_text in enumerate(prompts):
|
| 217 |
label = prompt_labels[idx] if idx < len(prompt_labels) else f"Prompt {idx+1}"
|
| 218 |
if st.button(label, key=f"sidebar_btn_{idx}", use_container_width=True):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 219 |
selected_prompt = prompt_text
|
| 220 |
|
| 221 |
st.markdown("---")
|
|
|
|
| 4 |
import asyncio
|
| 5 |
import sys
|
| 6 |
import uuid
|
| 7 |
+
from pathlib import Path
|
| 8 |
# Add project root
|
| 9 |
+
# sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".")))
|
| 10 |
+
# Add common directory to path for imports
|
| 11 |
+
project_root = Path(__file__).resolve().parent.parent.parent
|
| 12 |
+
sys.path.insert(0, str(project_root))
|
| 13 |
|
| 14 |
+
from aagents.orchestrator_agent import orchestrator_agent
|
| 15 |
from agents import Runner, trace, SQLiteSession
|
| 16 |
from agents.exceptions import InputGuardrailTripwireTriggered
|
| 17 |
|
|
|
|
| 190 |
# -----------------------------
|
| 191 |
async def get_ai_response(prompt: str) -> str:
|
| 192 |
try:
|
| 193 |
+
agent = orchestrator_agent
|
| 194 |
# Ensure session is valid
|
| 195 |
current_session = st.session_state.ai_session
|
| 196 |
with trace("Chatbot Agent Run"):
|
|
|
|
| 219 |
for idx, prompt_text in enumerate(prompts):
|
| 220 |
label = prompt_labels[idx] if idx < len(prompt_labels) else f"Prompt {idx+1}"
|
| 221 |
if st.button(label, key=f"sidebar_btn_{idx}", use_container_width=True):
|
| 222 |
+
# Reset conversation
|
| 223 |
+
st.session_state.messages = []
|
| 224 |
+
st.session_state.ai_session_id = str(uuid.uuid4())
|
| 225 |
+
# Recreate session object with new ID
|
| 226 |
+
st.session_state.ai_session = SQLiteSession(f"conversation_{st.session_state.ai_session_id}.db")
|
| 227 |
selected_prompt = prompt_text
|
| 228 |
|
| 229 |
st.markdown("---")
|
src/chatbot/prompts/upcoming_earnings.txt
CHANGED
|
@@ -1 +1 @@
|
|
| 1 |
-
|
|
|
|
| 1 |
+
Search for upcoming critical earnings in the stock market.
|