Spaces:
Sleeping
Sleeping
Faham
commited on
Commit
·
139d9d7
1
Parent(s):
31adc25
UPDATE: revamp frontend
Browse files- streamlit_app.py → Home.py +387 -107
- README.md +1 -1
- pages/System_Monitor.py +123 -0
- pyproject.toml +2 -0
- requirements.txt +4 -0
- resource_monitor.py +319 -0
- agent_client.py → terminal_client.py +1 -1
- test_prophet_accuracy.py +373 -0
- uv.lock +0 -0
streamlit_app.py → Home.py
RENAMED
|
@@ -3,17 +3,38 @@ import asyncio
|
|
| 3 |
import json
|
| 4 |
import re
|
| 5 |
import os
|
| 6 |
-
import pandas as pd
|
| 7 |
import plotly.graph_objects as go
|
| 8 |
-
from plotly.subplots import make_subplots
|
| 9 |
import yfinance as yf
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
from dotenv import load_dotenv
|
| 12 |
from openai import OpenAI
|
| 13 |
from mcp.client.session import ClientSession
|
| 14 |
from mcp.client.stdio import stdio_client
|
| 15 |
from mcp import StdioServerParameters, types
|
| 16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
# Load environment variables
|
| 18 |
load_dotenv()
|
| 19 |
|
|
@@ -230,15 +251,222 @@ async def get_stock_data(ticker: str) -> str:
|
|
| 230 |
|
| 231 |
|
| 232 |
def create_stock_chart(ticker: str):
|
| 233 |
-
"""Create an interactive stock price chart for the given ticker."""
|
| 234 |
try:
|
| 235 |
-
#
|
| 236 |
-
|
| 237 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 238 |
|
| 239 |
if hist_data.empty:
|
| 240 |
st.warning(f"No data available for {ticker}")
|
| 241 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 242 |
|
| 243 |
# Create simple line chart
|
| 244 |
fig = go.Figure()
|
|
@@ -326,11 +554,32 @@ async def execute_tool_call(tool_call):
|
|
| 326 |
"""Execute a tool call using MCP servers."""
|
| 327 |
try:
|
| 328 |
tool_name = tool_call.function.name
|
| 329 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 330 |
ticker = arguments.get("ticker")
|
| 331 |
|
| 332 |
with st.status(
|
| 333 |
-
f"🛠️ Executing {tool_name} for {ticker}...", expanded=
|
| 334 |
) as status:
|
| 335 |
if tool_name == "get_latest_news":
|
| 336 |
result = await get_news_data(ticker)
|
|
@@ -353,9 +602,6 @@ async def execute_tool_call(tool_call):
|
|
| 353 |
else:
|
| 354 |
status.update(label=f"❌ Unknown tool: {tool_name}", state="error")
|
| 355 |
return f"Unknown tool: {tool_name}"
|
| 356 |
-
except json.JSONDecodeError as e:
|
| 357 |
-
st.error(f"❌ Invalid tool arguments format: {e}")
|
| 358 |
-
return f"Error: Invalid tool arguments format"
|
| 359 |
except Exception as e:
|
| 360 |
st.error(f"❌ Error executing tool {tool_call.function.name}: {e}")
|
| 361 |
return f"Error executing tool {tool_call.function.name}: {e}"
|
|
@@ -399,7 +645,7 @@ async def run_agent(user_query, selected_ticker):
|
|
| 399 |
|
| 400 |
try:
|
| 401 |
# Get initial response from the model
|
| 402 |
-
with st.spinner("🤖
|
| 403 |
response = client.chat.completions.create(
|
| 404 |
model=model,
|
| 405 |
messages=messages,
|
|
@@ -438,7 +684,7 @@ async def run_agent(user_query, selected_ticker):
|
|
| 438 |
)
|
| 439 |
|
| 440 |
# Get final response from the model
|
| 441 |
-
with st.spinner("🤖
|
| 442 |
final_response = client.chat.completions.create(
|
| 443 |
model="openai/gpt-4o-mini", # Try a different model
|
| 444 |
messages=messages,
|
|
@@ -475,16 +721,24 @@ def display_top_news(ticker: str):
|
|
| 475 |
clean_text = re.sub(r"\s+", " ", clean_text).strip()
|
| 476 |
return clean_text
|
| 477 |
|
| 478 |
-
#
|
| 479 |
-
|
| 480 |
-
|
| 481 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 482 |
|
| 483 |
if not articles:
|
| 484 |
st.info(f"No recent news found for {ticker}")
|
| 485 |
return
|
| 486 |
|
| 487 |
-
|
| 488 |
for i, article in enumerate(articles[:5], 1):
|
| 489 |
title = preprocess_text(article.get("title", ""))
|
| 490 |
url = article.get("url", "")
|
|
@@ -517,55 +771,28 @@ def test_server_availability():
|
|
| 517 |
# Test news server
|
| 518 |
news_server_path = os.path.join(current_dir, "news_server.py")
|
| 519 |
if not os.path.exists(news_server_path):
|
| 520 |
-
|
| 521 |
return False
|
| 522 |
|
| 523 |
# Test stock data server
|
| 524 |
stock_server_path = os.path.join(current_dir, "stock_data_server.py")
|
| 525 |
if not os.path.exists(stock_server_path):
|
| 526 |
-
|
| 527 |
return False
|
| 528 |
|
| 529 |
# Test if servers can be executed by checking if they can be imported
|
| 530 |
import sys
|
| 531 |
import importlib.util
|
| 532 |
|
| 533 |
-
# Initialize session state for notifications
|
| 534 |
-
if "notifications" not in st.session_state:
|
| 535 |
-
st.session_state.notifications = []
|
| 536 |
-
if "notification_times" not in st.session_state:
|
| 537 |
-
st.session_state.notification_times = {}
|
| 538 |
-
if "servers_importable_shown" not in st.session_state:
|
| 539 |
-
st.session_state.servers_importable_shown = False
|
| 540 |
-
|
| 541 |
-
current_time = time.time()
|
| 542 |
-
|
| 543 |
-
# Clean up old notifications (older than 10 seconds)
|
| 544 |
-
st.session_state.notifications = [
|
| 545 |
-
msg
|
| 546 |
-
for msg, timestamp in zip(
|
| 547 |
-
st.session_state.notifications, st.session_state.notification_times.values()
|
| 548 |
-
)
|
| 549 |
-
if current_time - timestamp < 10
|
| 550 |
-
]
|
| 551 |
-
st.session_state.notification_times = {
|
| 552 |
-
k: v
|
| 553 |
-
for k, v in st.session_state.notification_times.items()
|
| 554 |
-
if current_time - v < 10
|
| 555 |
-
}
|
| 556 |
-
|
| 557 |
try:
|
| 558 |
# Test if news_server can be imported
|
| 559 |
spec = importlib.util.spec_from_file_location("news_server", news_server_path)
|
| 560 |
if spec is None or spec.loader is None:
|
| 561 |
-
|
| 562 |
else:
|
| 563 |
-
|
| 564 |
-
if not st.session_state.servers_importable_shown:
|
| 565 |
-
st.success("✅ news_server.py is importable")
|
| 566 |
-
st.session_state.servers_importable_shown = True
|
| 567 |
except Exception as e:
|
| 568 |
-
|
| 569 |
|
| 570 |
try:
|
| 571 |
# Test if stock_data_server can be imported
|
|
@@ -573,26 +800,29 @@ def test_server_availability():
|
|
| 573 |
"stock_data_server", stock_server_path
|
| 574 |
)
|
| 575 |
if spec is None or spec.loader is None:
|
| 576 |
-
|
| 577 |
else:
|
| 578 |
-
|
| 579 |
-
if not st.session_state.servers_importable_shown:
|
| 580 |
-
st.success("✅ stock_data_server.py is importable")
|
| 581 |
-
st.session_state.servers_importable_shown = True
|
| 582 |
except Exception as e:
|
| 583 |
-
|
| 584 |
|
| 585 |
return True
|
| 586 |
|
| 587 |
|
| 588 |
def main():
|
| 589 |
-
st.set_page_config(page_title="
|
| 590 |
|
| 591 |
-
st.title("📈
|
| 592 |
st.markdown(
|
| 593 |
"Get comprehensive financial analysis and insights for your selected stocks."
|
| 594 |
)
|
| 595 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 596 |
# Initialize tools
|
| 597 |
initialize_tools()
|
| 598 |
|
|
@@ -622,6 +852,20 @@ def main():
|
|
| 622 |
placeholder="Select a ticker...",
|
| 623 |
)
|
| 624 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 625 |
# Main content area
|
| 626 |
if not selected_ticker:
|
| 627 |
st.info(
|
|
@@ -647,6 +891,11 @@ def main():
|
|
| 647 |
f"✅ Selected: {selected_ticker} - {available_tickers[selected_ticker]}"
|
| 648 |
)
|
| 649 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 650 |
# Stock Chart and News Section
|
| 651 |
st.header("📈 Stock Analysis")
|
| 652 |
|
|
@@ -655,59 +904,90 @@ def main():
|
|
| 655 |
|
| 656 |
with col1:
|
| 657 |
st.subheader("📈 Stock Price Chart")
|
| 658 |
-
#
|
| 659 |
-
|
| 660 |
-
|
| 661 |
-
|
| 662 |
-
|
| 663 |
-
|
| 664 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 665 |
|
| 666 |
with col2:
|
| 667 |
st.subheader("📰 Top News")
|
| 668 |
-
#
|
| 669 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 670 |
|
| 671 |
-
# Chat Section
|
| 672 |
st.header("💬 Chat with Financial Agent")
|
| 673 |
|
| 674 |
-
#
|
| 675 |
-
|
| 676 |
-
|
| 677 |
-
|
| 678 |
-
|
| 679 |
-
|
| 680 |
-
|
| 681 |
-
|
| 682 |
-
|
| 683 |
-
|
| 684 |
-
|
| 685 |
-
|
| 686 |
-
|
| 687 |
-
|
| 688 |
-
|
| 689 |
-
|
| 690 |
-
|
| 691 |
-
|
| 692 |
-
|
| 693 |
-
|
| 694 |
-
|
| 695 |
-
|
| 696 |
-
|
| 697 |
-
|
| 698 |
-
|
| 699 |
-
|
| 700 |
-
|
| 701 |
-
|
| 702 |
-
|
| 703 |
-
#
|
| 704 |
-
|
| 705 |
-
|
| 706 |
-
|
| 707 |
-
|
| 708 |
-
|
| 709 |
-
|
| 710 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 711 |
|
| 712 |
|
| 713 |
if __name__ == "__main__":
|
|
|
|
| 3 |
import json
|
| 4 |
import re
|
| 5 |
import os
|
|
|
|
| 6 |
import plotly.graph_objects as go
|
|
|
|
| 7 |
import yfinance as yf
|
| 8 |
+
import time
|
| 9 |
+
|
| 10 |
+
try:
|
| 11 |
+
from prophet import Prophet
|
| 12 |
+
except ImportError:
|
| 13 |
+
st.error("Prophet not installed. Please run: pip install prophet")
|
| 14 |
+
Prophet = None
|
| 15 |
from dotenv import load_dotenv
|
| 16 |
from openai import OpenAI
|
| 17 |
from mcp.client.session import ClientSession
|
| 18 |
from mcp.client.stdio import stdio_client
|
| 19 |
from mcp import StdioServerParameters, types
|
| 20 |
|
| 21 |
+
# Import resource monitoring
|
| 22 |
+
try:
|
| 23 |
+
from resource_monitor import (
|
| 24 |
+
start_resource_monitoring,
|
| 25 |
+
stop_resource_monitoring,
|
| 26 |
+
get_resource_stats,
|
| 27 |
+
create_resource_dashboard,
|
| 28 |
+
get_resource_summary,
|
| 29 |
+
export_resource_data,
|
| 30 |
+
resource_monitor,
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
RESOURCE_MONITORING_AVAILABLE = True
|
| 34 |
+
except ImportError:
|
| 35 |
+
RESOURCE_MONITORING_AVAILABLE = False
|
| 36 |
+
st.warning("Resource monitoring not available. Install psutil: pip install psutil")
|
| 37 |
+
|
| 38 |
# Load environment variables
|
| 39 |
load_dotenv()
|
| 40 |
|
|
|
|
| 251 |
|
| 252 |
|
| 253 |
def create_stock_chart(ticker: str):
|
| 254 |
+
"""Create an interactive stock price chart with Prophet predictions for the given ticker."""
|
| 255 |
try:
|
| 256 |
+
# Check if Prophet is available
|
| 257 |
+
if Prophet is None:
|
| 258 |
+
st.error("Prophet is not installed. Please install it with: uv add prophet")
|
| 259 |
+
return create_basic_stock_chart(ticker)
|
| 260 |
+
|
| 261 |
+
# Get stock data - 1 year for training Prophet
|
| 262 |
+
with st.spinner(f"📊 Fetching stock data for {ticker}..."):
|
| 263 |
+
stock = yf.Ticker(ticker)
|
| 264 |
+
hist_data = stock.history(period="1y")
|
| 265 |
+
|
| 266 |
+
# Track yfinance API call
|
| 267 |
+
if RESOURCE_MONITORING_AVAILABLE:
|
| 268 |
+
resource_monitor.increment_yfinance_calls()
|
| 269 |
|
| 270 |
if hist_data.empty:
|
| 271 |
st.warning(f"No data available for {ticker}")
|
| 272 |
+
return None
|
| 273 |
+
|
| 274 |
+
# Prepare data for Prophet with outlier removal
|
| 275 |
+
df = hist_data.reset_index()
|
| 276 |
+
|
| 277 |
+
# Remove outliers using IQR method for better model training
|
| 278 |
+
Q1 = df["Close"].quantile(0.25)
|
| 279 |
+
Q3 = df["Close"].quantile(0.75)
|
| 280 |
+
IQR = Q3 - Q1
|
| 281 |
+
lower_bound = Q1 - 1.5 * IQR
|
| 282 |
+
upper_bound = Q3 + 1.5 * IQR
|
| 283 |
+
|
| 284 |
+
# Filter out outliers
|
| 285 |
+
df = df[(df["Close"] >= lower_bound) & (df["Close"] <= upper_bound)]
|
| 286 |
+
|
| 287 |
+
# Remove timezone information from the Date column for Prophet compatibility
|
| 288 |
+
df["ds"] = df["Date"].dt.tz_localize(
|
| 289 |
+
None
|
| 290 |
+
) # Prophet requires timezone-naive dates
|
| 291 |
+
df["y"] = df["Close"] # Prophet requires 'y' column for values
|
| 292 |
+
|
| 293 |
+
# Train Prophet model with optimized configuration
|
| 294 |
+
start_time = time.time()
|
| 295 |
+
with st.spinner(f"Training Prophet model for {ticker}..."):
|
| 296 |
+
# Configure Prophet model with optimized parameters
|
| 297 |
+
model = Prophet(
|
| 298 |
+
yearly_seasonality=True,
|
| 299 |
+
weekly_seasonality=True,
|
| 300 |
+
daily_seasonality=False,
|
| 301 |
+
changepoint_prior_scale=0.01, # Reduced for smoother trends
|
| 302 |
+
seasonality_prior_scale=10.0, # Increased seasonality strength
|
| 303 |
+
seasonality_mode="multiplicative",
|
| 304 |
+
interval_width=0.8, # Tighter confidence intervals
|
| 305 |
+
mcmc_samples=0, # Disable MCMC for faster training
|
| 306 |
+
)
|
| 307 |
+
|
| 308 |
+
# Add custom seasonalities for better stock patterns
|
| 309 |
+
model.add_seasonality(name="monthly", period=30.5, fourier_order=5)
|
| 310 |
+
|
| 311 |
+
model.add_seasonality(name="quarterly", period=91.25, fourier_order=8)
|
| 312 |
+
|
| 313 |
+
model.fit(df[["ds", "y"]])
|
| 314 |
+
|
| 315 |
+
# Make predictions for next 30 days
|
| 316 |
+
future = model.make_future_dataframe(periods=30)
|
| 317 |
+
forecast = model.predict(future)
|
| 318 |
+
|
| 319 |
+
# Get the forecast data for the next 30 days (future predictions only)
|
| 320 |
+
# Find the last date in historical data
|
| 321 |
+
last_historical_date = df["ds"].max()
|
| 322 |
+
|
| 323 |
+
# Add one day to ensure we start from tomorrow
|
| 324 |
+
from datetime import timedelta
|
| 325 |
+
|
| 326 |
+
tomorrow = last_historical_date + timedelta(days=1)
|
| 327 |
+
|
| 328 |
+
# Filter for only future predictions (starting from tomorrow)
|
| 329 |
+
forecast_future = forecast[forecast["ds"] >= tomorrow].copy()
|
| 330 |
+
|
| 331 |
+
# Track Prophet training time
|
| 332 |
+
training_time = time.time() - start_time
|
| 333 |
+
if RESOURCE_MONITORING_AVAILABLE:
|
| 334 |
+
resource_monitor.add_prophet_training_time(training_time)
|
| 335 |
+
|
| 336 |
+
# Create interactive chart with historical data and predictions
|
| 337 |
+
fig = go.Figure()
|
| 338 |
+
|
| 339 |
+
# Add historical price data (full year for context)
|
| 340 |
+
# Ensure we only show actual historical data, not predictions
|
| 341 |
+
# Convert timezone-aware dates to timezone-naive for comparison
|
| 342 |
+
hist_data_filtered = hist_data[
|
| 343 |
+
hist_data.index.tz_localize(None) <= last_historical_date
|
| 344 |
+
]
|
| 345 |
+
fig.add_trace(
|
| 346 |
+
go.Scatter(
|
| 347 |
+
x=hist_data_filtered.index,
|
| 348 |
+
y=hist_data_filtered["Close"],
|
| 349 |
+
mode="lines+markers",
|
| 350 |
+
name=f"{ticker} Historical Price (Last Year)",
|
| 351 |
+
line=dict(color="#1f77b4", width=2),
|
| 352 |
+
marker=dict(size=4),
|
| 353 |
+
)
|
| 354 |
+
)
|
| 355 |
+
|
| 356 |
+
# Add Prophet predictions for next 30 days (starting from tomorrow)
|
| 357 |
+
fig.add_trace(
|
| 358 |
+
go.Scatter(
|
| 359 |
+
x=forecast_future["ds"],
|
| 360 |
+
y=forecast_future["yhat"],
|
| 361 |
+
mode="lines+markers",
|
| 362 |
+
name=f"{ticker} Future Predictions (Next 30 Days)",
|
| 363 |
+
line=dict(color="#ff7f0e", width=2, dash="dash"),
|
| 364 |
+
marker=dict(size=4),
|
| 365 |
+
)
|
| 366 |
+
)
|
| 367 |
+
|
| 368 |
+
# Add confidence intervals for future predictions
|
| 369 |
+
fig.add_trace(
|
| 370 |
+
go.Scatter(
|
| 371 |
+
x=forecast_future["ds"].tolist() + forecast_future["ds"].tolist()[::-1],
|
| 372 |
+
y=forecast_future["yhat_upper"].tolist()
|
| 373 |
+
+ forecast_future["yhat_lower"].tolist()[::-1],
|
| 374 |
+
fill="toself",
|
| 375 |
+
fillcolor="rgba(255, 127, 14, 0.3)",
|
| 376 |
+
line=dict(color="rgba(255, 127, 14, 0)"),
|
| 377 |
+
name="Prediction Confidence Interval",
|
| 378 |
+
showlegend=False,
|
| 379 |
+
)
|
| 380 |
+
)
|
| 381 |
+
|
| 382 |
+
# Update layout
|
| 383 |
+
fig.update_layout(
|
| 384 |
+
title=f"{ticker} Stock Price with Next 30-Day Predictions",
|
| 385 |
+
xaxis_title="Date",
|
| 386 |
+
yaxis_title="Price ($)",
|
| 387 |
+
height=500,
|
| 388 |
+
hovermode="x unified",
|
| 389 |
+
legend=dict(
|
| 390 |
+
orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1
|
| 391 |
+
),
|
| 392 |
+
)
|
| 393 |
+
|
| 394 |
+
# Update axes
|
| 395 |
+
fig.update_xaxes(
|
| 396 |
+
title_text="Date",
|
| 397 |
+
tickformat="%b %d",
|
| 398 |
+
tickangle=45,
|
| 399 |
+
)
|
| 400 |
+
fig.update_yaxes(title_text="Price ($)")
|
| 401 |
+
|
| 402 |
+
# Display prediction summary
|
| 403 |
+
current_price = hist_data["Close"].iloc[-1]
|
| 404 |
+
predicted_price_30d = forecast_future["yhat"].iloc[-1]
|
| 405 |
+
price_change = predicted_price_30d - current_price
|
| 406 |
+
price_change_pct = (price_change / current_price) * 100
|
| 407 |
+
|
| 408 |
+
# Calculate confidence interval
|
| 409 |
+
confidence_lower = forecast_future["yhat_lower"].iloc[-1]
|
| 410 |
+
confidence_upper = forecast_future["yhat_upper"].iloc[-1]
|
| 411 |
+
confidence_range = confidence_upper - confidence_lower
|
| 412 |
+
|
| 413 |
+
# Display detailed prediction information
|
| 414 |
+
col1, col2, col3 = st.columns([1, 1, 1])
|
| 415 |
+
|
| 416 |
+
with col1:
|
| 417 |
+
st.metric(
|
| 418 |
+
"Current Price",
|
| 419 |
+
f"${current_price:.2f}",
|
| 420 |
+
)
|
| 421 |
+
|
| 422 |
+
with col2:
|
| 423 |
+
st.metric(
|
| 424 |
+
"30-Day Prediction",
|
| 425 |
+
f"${predicted_price_30d:.2f}",
|
| 426 |
+
delta=f"{price_change_pct:+.2f}%",
|
| 427 |
+
)
|
| 428 |
+
|
| 429 |
+
with col3:
|
| 430 |
+
st.metric(
|
| 431 |
+
"Expected Change",
|
| 432 |
+
f"${price_change:.2f} ({price_change_pct:+.2f}%)",
|
| 433 |
+
)
|
| 434 |
+
|
| 435 |
+
# Additional prediction details
|
| 436 |
+
st.info(
|
| 437 |
+
f"""
|
| 438 |
+
**📊 30-Day Prediction Details for {ticker}:**
|
| 439 |
+
- **Current Price:** ${current_price:.2f}
|
| 440 |
+
- **Predicted Price (30 days):** ${predicted_price_30d:.2f}
|
| 441 |
+
- **Expected Change:** ${price_change:.2f} ({price_change_pct:+.2f}%)
|
| 442 |
+
- **Confidence Range:** ${confidence_lower:.2f} - ${confidence_upper:.2f} (±${confidence_range/2:.2f})
|
| 443 |
+
- **Model Training Time:** {training_time:.2f}s
|
| 444 |
+
|
| 445 |
+
⚠️ **Disclaimer**: Stock predictions have approximately 51% accuracy.
|
| 446 |
+
These forecasts are for informational purposes only and should not be used as
|
| 447 |
+
the sole basis for investment decisions. Always conduct your own research
|
| 448 |
+
and consider consulting with financial advisors.
|
| 449 |
+
"""
|
| 450 |
+
)
|
| 451 |
+
|
| 452 |
+
return fig
|
| 453 |
+
|
| 454 |
+
except Exception as e:
|
| 455 |
+
st.error(f"Error creating chart for {ticker}: {e}")
|
| 456 |
+
return create_basic_stock_chart(ticker)
|
| 457 |
+
|
| 458 |
+
|
| 459 |
+
def create_basic_stock_chart(ticker: str):
|
| 460 |
+
"""Create a basic stock price chart without Prophet predictions."""
|
| 461 |
+
try:
|
| 462 |
+
# Get stock data with loading state
|
| 463 |
+
with st.spinner(f"📊 Fetching basic stock data for {ticker}..."):
|
| 464 |
+
stock = yf.Ticker(ticker)
|
| 465 |
+
hist_data = stock.history(period="30d")
|
| 466 |
+
|
| 467 |
+
if hist_data.empty:
|
| 468 |
+
st.warning(f"No data available for {ticker}")
|
| 469 |
+
return None
|
| 470 |
|
| 471 |
# Create simple line chart
|
| 472 |
fig = go.Figure()
|
|
|
|
| 554 |
"""Execute a tool call using MCP servers."""
|
| 555 |
try:
|
| 556 |
tool_name = tool_call.function.name
|
| 557 |
+
|
| 558 |
+
# Clean and validate the arguments JSON
|
| 559 |
+
arguments_str = tool_call.function.arguments.strip()
|
| 560 |
+
|
| 561 |
+
# Try to extract valid JSON if there's extra content
|
| 562 |
+
try:
|
| 563 |
+
arguments = json.loads(arguments_str)
|
| 564 |
+
except json.JSONDecodeError:
|
| 565 |
+
# Try to find JSON within the string
|
| 566 |
+
import re
|
| 567 |
+
|
| 568 |
+
json_match = re.search(r"\{[^{}]*\}", arguments_str)
|
| 569 |
+
if json_match:
|
| 570 |
+
try:
|
| 571 |
+
arguments = json.loads(json_match.group())
|
| 572 |
+
except json.JSONDecodeError:
|
| 573 |
+
st.error(f"❌ Could not parse tool arguments: {arguments_str}")
|
| 574 |
+
return f"Error: Invalid tool arguments format"
|
| 575 |
+
else:
|
| 576 |
+
st.error(f"❌ Could not parse tool arguments: {arguments_str}")
|
| 577 |
+
return f"Error: Invalid tool arguments format"
|
| 578 |
+
|
| 579 |
ticker = arguments.get("ticker")
|
| 580 |
|
| 581 |
with st.status(
|
| 582 |
+
f"🛠️ Executing {tool_name} for {ticker}...", expanded=True
|
| 583 |
) as status:
|
| 584 |
if tool_name == "get_latest_news":
|
| 585 |
result = await get_news_data(ticker)
|
|
|
|
| 602 |
else:
|
| 603 |
status.update(label=f"❌ Unknown tool: {tool_name}", state="error")
|
| 604 |
return f"Unknown tool: {tool_name}"
|
|
|
|
|
|
|
|
|
|
| 605 |
except Exception as e:
|
| 606 |
st.error(f"❌ Error executing tool {tool_call.function.name}: {e}")
|
| 607 |
return f"Error executing tool {tool_call.function.name}: {e}"
|
|
|
|
| 645 |
|
| 646 |
try:
|
| 647 |
# Get initial response from the model
|
| 648 |
+
with st.spinner("🤖 Generating analysis..."):
|
| 649 |
response = client.chat.completions.create(
|
| 650 |
model=model,
|
| 651 |
messages=messages,
|
|
|
|
| 684 |
)
|
| 685 |
|
| 686 |
# Get final response from the model
|
| 687 |
+
with st.spinner("🤖 Finalizing analysis..."):
|
| 688 |
final_response = client.chat.completions.create(
|
| 689 |
model="openai/gpt-4o-mini", # Try a different model
|
| 690 |
messages=messages,
|
|
|
|
| 721 |
clean_text = re.sub(r"\s+", " ", clean_text).strip()
|
| 722 |
return clean_text
|
| 723 |
|
| 724 |
+
# Check if news is already cached
|
| 725 |
+
news_cache_key = f"news_data_{ticker}"
|
| 726 |
+
if news_cache_key in st.session_state:
|
| 727 |
+
articles = st.session_state[news_cache_key]
|
| 728 |
+
else:
|
| 729 |
+
# Get news data with loading state
|
| 730 |
+
with st.spinner(f"📰 Loading news for {ticker}..."):
|
| 731 |
+
google_news = gnews.GNews(language="en", country="US", period="7d")
|
| 732 |
+
search_query = f'"{ticker}" stock market news'
|
| 733 |
+
articles = google_news.get_news(search_query)
|
| 734 |
+
# Cache the articles
|
| 735 |
+
st.session_state[news_cache_key] = articles
|
| 736 |
|
| 737 |
if not articles:
|
| 738 |
st.info(f"No recent news found for {ticker}")
|
| 739 |
return
|
| 740 |
|
| 741 |
+
# Display top 5 articles
|
| 742 |
for i, article in enumerate(articles[:5], 1):
|
| 743 |
title = preprocess_text(article.get("title", ""))
|
| 744 |
url = article.get("url", "")
|
|
|
|
| 771 |
# Test news server
|
| 772 |
news_server_path = os.path.join(current_dir, "news_server.py")
|
| 773 |
if not os.path.exists(news_server_path):
|
| 774 |
+
print(f"❌ ERROR: news_server.py not found at {news_server_path}")
|
| 775 |
return False
|
| 776 |
|
| 777 |
# Test stock data server
|
| 778 |
stock_server_path = os.path.join(current_dir, "stock_data_server.py")
|
| 779 |
if not os.path.exists(stock_server_path):
|
| 780 |
+
print(f"❌ ERROR: stock_data_server.py not found at {stock_server_path}")
|
| 781 |
return False
|
| 782 |
|
| 783 |
# Test if servers can be executed by checking if they can be imported
|
| 784 |
import sys
|
| 785 |
import importlib.util
|
| 786 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 787 |
try:
|
| 788 |
# Test if news_server can be imported
|
| 789 |
spec = importlib.util.spec_from_file_location("news_server", news_server_path)
|
| 790 |
if spec is None or spec.loader is None:
|
| 791 |
+
print("⚠️ WARNING: Could not load news_server.py")
|
| 792 |
else:
|
| 793 |
+
print("✅ SUCCESS: news_server.py is importable")
|
|
|
|
|
|
|
|
|
|
| 794 |
except Exception as e:
|
| 795 |
+
print(f"⚠️ WARNING: Could not import news_server.py: {e}")
|
| 796 |
|
| 797 |
try:
|
| 798 |
# Test if stock_data_server can be imported
|
|
|
|
| 800 |
"stock_data_server", stock_server_path
|
| 801 |
)
|
| 802 |
if spec is None or spec.loader is None:
|
| 803 |
+
print("⚠️ WARNING: Could not load stock_data_server.py")
|
| 804 |
else:
|
| 805 |
+
print("✅ SUCCESS: stock_data_server.py is importable")
|
|
|
|
|
|
|
|
|
|
| 806 |
except Exception as e:
|
| 807 |
+
print(f"⚠️ WARNING: Could not import stock_data_server.py: {e}")
|
| 808 |
|
| 809 |
return True
|
| 810 |
|
| 811 |
|
| 812 |
def main():
|
| 813 |
+
st.set_page_config(page_title="QueryStockAI", page_icon="📈", layout="wide")
|
| 814 |
|
| 815 |
+
st.title("📈 QueryStockAI")
|
| 816 |
st.markdown(
|
| 817 |
"Get comprehensive financial analysis and insights for your selected stocks."
|
| 818 |
)
|
| 819 |
|
| 820 |
+
# Initialize resource monitoring
|
| 821 |
+
if RESOURCE_MONITORING_AVAILABLE:
|
| 822 |
+
if "resource_monitoring_started" not in st.session_state:
|
| 823 |
+
start_resource_monitoring()
|
| 824 |
+
st.session_state.resource_monitoring_started = True
|
| 825 |
+
|
| 826 |
# Initialize tools
|
| 827 |
initialize_tools()
|
| 828 |
|
|
|
|
| 852 |
placeholder="Select a ticker...",
|
| 853 |
)
|
| 854 |
|
| 855 |
+
# Clear cache when ticker changes
|
| 856 |
+
if (
|
| 857 |
+
"current_ticker" in st.session_state
|
| 858 |
+
and st.session_state.current_ticker != selected_ticker
|
| 859 |
+
):
|
| 860 |
+
# Clear all cached data for the previous ticker
|
| 861 |
+
for key in list(st.session_state.keys()):
|
| 862 |
+
if key.startswith("chart_") or key.startswith("news_"):
|
| 863 |
+
del st.session_state[key]
|
| 864 |
+
|
| 865 |
+
# Update current ticker
|
| 866 |
+
if selected_ticker:
|
| 867 |
+
st.session_state.current_ticker = selected_ticker
|
| 868 |
+
|
| 869 |
# Main content area
|
| 870 |
if not selected_ticker:
|
| 871 |
st.info(
|
|
|
|
| 891 |
f"✅ Selected: {selected_ticker} - {available_tickers[selected_ticker]}"
|
| 892 |
)
|
| 893 |
|
| 894 |
+
# Add loading state for initial page load
|
| 895 |
+
if "page_loaded" not in st.session_state:
|
| 896 |
+
with st.spinner("🔄 Loading application..."):
|
| 897 |
+
st.session_state.page_loaded = True
|
| 898 |
+
|
| 899 |
# Stock Chart and News Section
|
| 900 |
st.header("📈 Stock Analysis")
|
| 901 |
|
|
|
|
| 904 |
|
| 905 |
with col1:
|
| 906 |
st.subheader("📈 Stock Price Chart")
|
| 907 |
+
# Cache the chart to prevent rerendering
|
| 908 |
+
chart_key = f"chart_{selected_ticker}"
|
| 909 |
+
if chart_key not in st.session_state:
|
| 910 |
+
with st.spinner(f"📊 Loading chart for {selected_ticker}..."):
|
| 911 |
+
chart_fig = create_stock_chart(selected_ticker)
|
| 912 |
+
if chart_fig:
|
| 913 |
+
st.session_state[chart_key] = chart_fig
|
| 914 |
+
else:
|
| 915 |
+
st.session_state[chart_key] = None
|
| 916 |
+
|
| 917 |
+
# Display the cached chart
|
| 918 |
+
if st.session_state[chart_key]:
|
| 919 |
+
st.plotly_chart(st.session_state[chart_key], use_container_width=True)
|
| 920 |
+
else:
|
| 921 |
+
st.warning(f"Could not load chart for {selected_ticker}")
|
| 922 |
|
| 923 |
with col2:
|
| 924 |
st.subheader("📰 Top News")
|
| 925 |
+
# Cache the news to prevent rerendering
|
| 926 |
+
news_key = f"news_{selected_ticker}"
|
| 927 |
+
if news_key not in st.session_state:
|
| 928 |
+
st.session_state[news_key] = True # Mark as loaded
|
| 929 |
+
display_top_news(selected_ticker)
|
| 930 |
+
else:
|
| 931 |
+
# Re-display cached news without reloading
|
| 932 |
+
display_top_news(selected_ticker)
|
| 933 |
|
| 934 |
+
# Chat Section
|
| 935 |
st.header("💬 Chat with Financial Agent")
|
| 936 |
|
| 937 |
+
# Initialize chat history
|
| 938 |
+
if "messages" not in st.session_state:
|
| 939 |
+
st.session_state.messages = []
|
| 940 |
+
|
| 941 |
+
# Display existing chat messages using custom styling
|
| 942 |
+
for message in st.session_state.messages:
|
| 943 |
+
if message["role"] == "user":
|
| 944 |
+
st.markdown(
|
| 945 |
+
f"""
|
| 946 |
+
<div style="background-color: #e3f2fd; padding: 10px; border-radius: 10px; margin: 5px 0; border: 1px solid #bbdefb;">
|
| 947 |
+
<strong>You:</strong> {message["content"]}
|
| 948 |
+
</div>
|
| 949 |
+
""",
|
| 950 |
+
unsafe_allow_html=True,
|
| 951 |
+
)
|
| 952 |
+
else:
|
| 953 |
+
st.markdown(
|
| 954 |
+
f"""
|
| 955 |
+
<div style=" padding: 10px; border-radius: 10px; margin: 5px 0;">
|
| 956 |
+
<strong>Agent:</strong>
|
| 957 |
+
</div>
|
| 958 |
+
""",
|
| 959 |
+
unsafe_allow_html=True,
|
| 960 |
+
)
|
| 961 |
+
# Render the content as markdown for proper formatting
|
| 962 |
+
st.markdown(message["content"])
|
| 963 |
+
|
| 964 |
+
# Chat input with proper loading state
|
| 965 |
+
if prompt := st.chat_input(f"Ask about {selected_ticker}...", key="chat_input"):
|
| 966 |
+
# Track streamlit request
|
| 967 |
+
if RESOURCE_MONITORING_AVAILABLE:
|
| 968 |
+
resource_monitor.increment_streamlit_requests()
|
| 969 |
+
|
| 970 |
+
# Add user message to chat history
|
| 971 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 972 |
+
|
| 973 |
+
# Display assistant response with spinner above input
|
| 974 |
+
with st.spinner("🤖 Analyzing your request..."):
|
| 975 |
+
response = asyncio.run(run_agent(prompt, selected_ticker))
|
| 976 |
+
st.session_state.messages.append(
|
| 977 |
+
{"role": "assistant", "content": response}
|
| 978 |
+
)
|
| 979 |
+
|
| 980 |
+
# Rerun to display the new message (charts and news are cached)
|
| 981 |
+
st.rerun()
|
| 982 |
+
|
| 983 |
+
# Clear chat button
|
| 984 |
+
# col1, col2 = st.columns([1, 4])
|
| 985 |
+
# with col1:
|
| 986 |
+
# if st.button("🗑️ Clear Chat History", key="clear_button"):
|
| 987 |
+
# st.session_state.messages = []
|
| 988 |
+
# st.rerun()
|
| 989 |
+
# with col2:
|
| 990 |
+
# st.markdown("*Chat history will be maintained during your session*")
|
| 991 |
|
| 992 |
|
| 993 |
if __name__ == "__main__":
|
README.md
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
#
|
| 2 |
|
| 3 |
A comprehensive financial analysis tool that provides real-time stock data and news analysis through an AI-powered chat interface.
|
| 4 |
|
|
|
|
| 1 |
+
# QueryStockAI
|
| 2 |
|
| 3 |
A comprehensive financial analysis tool that provides real-time stock data and news analysis through an AI-powered chat interface.
|
| 4 |
|
pages/System_Monitor.py
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import time
|
| 3 |
+
|
| 4 |
+
# Import resource monitoring
|
| 5 |
+
try:
|
| 6 |
+
from resource_monitor import (
|
| 7 |
+
start_resource_monitoring,
|
| 8 |
+
stop_resource_monitoring,
|
| 9 |
+
get_resource_stats,
|
| 10 |
+
create_resource_dashboard,
|
| 11 |
+
get_resource_summary,
|
| 12 |
+
export_resource_data,
|
| 13 |
+
resource_monitor,
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
RESOURCE_MONITORING_AVAILABLE = True
|
| 17 |
+
except ImportError:
|
| 18 |
+
RESOURCE_MONITORING_AVAILABLE = False
|
| 19 |
+
st.warning("Resource monitoring not available. Install psutil: pip install psutil")
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def main():
|
| 23 |
+
st.set_page_config(page_title="System Monitor", page_icon="📊", layout="wide")
|
| 24 |
+
|
| 25 |
+
st.title("📊 System Resource Monitor")
|
| 26 |
+
st.markdown("Real-time monitoring of system resources and application metrics.")
|
| 27 |
+
|
| 28 |
+
# Initialize resource monitoring
|
| 29 |
+
if RESOURCE_MONITORING_AVAILABLE:
|
| 30 |
+
if "resource_monitoring_started" not in st.session_state:
|
| 31 |
+
start_resource_monitoring()
|
| 32 |
+
st.session_state.resource_monitoring_started = True
|
| 33 |
+
|
| 34 |
+
# Current stats with loading state
|
| 35 |
+
with st.spinner("📊 Loading resource statistics..."):
|
| 36 |
+
current_stats = get_resource_stats()
|
| 37 |
+
|
| 38 |
+
if "error" not in current_stats:
|
| 39 |
+
# System Metrics
|
| 40 |
+
st.subheader("🖥️ System Metrics")
|
| 41 |
+
col1, col2, col3, col4 = st.columns(4)
|
| 42 |
+
|
| 43 |
+
with col1:
|
| 44 |
+
st.metric("CPU Usage", f"{current_stats['cpu_percent']:.1f}%")
|
| 45 |
+
st.metric("Memory Usage", f"{current_stats['memory_percent']:.1f}%")
|
| 46 |
+
|
| 47 |
+
with col2:
|
| 48 |
+
st.metric("Memory (GB)", f"{current_stats['memory_gb']:.2f} GB")
|
| 49 |
+
st.metric("Disk Usage", f"{current_stats['disk_usage_percent']:.1f}%")
|
| 50 |
+
|
| 51 |
+
with col3:
|
| 52 |
+
st.metric("Network Sent", f"{current_stats['network_sent_mb']:.1f} MB")
|
| 53 |
+
st.metric("Network Recv", f"{current_stats['network_recv_mb']:.1f} MB")
|
| 54 |
+
|
| 55 |
+
with col4:
|
| 56 |
+
st.metric("Process Count", current_stats["process_count"])
|
| 57 |
+
st.metric("Uptime", f"{current_stats['uptime_seconds']/60:.1f} min")
|
| 58 |
+
|
| 59 |
+
# Application-specific metrics
|
| 60 |
+
st.subheader("📈 Application Metrics")
|
| 61 |
+
col1, col2, col3 = st.columns(3)
|
| 62 |
+
|
| 63 |
+
with col1:
|
| 64 |
+
st.metric("YFinance Calls", current_stats["yfinance_calls"])
|
| 65 |
+
|
| 66 |
+
with col2:
|
| 67 |
+
st.metric(
|
| 68 |
+
"Prophet Training Time",
|
| 69 |
+
f"{current_stats['prophet_training_time']:.2f}s",
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
with col3:
|
| 73 |
+
st.metric("Streamlit Requests", current_stats["streamlit_requests"])
|
| 74 |
+
|
| 75 |
+
# Summary statistics
|
| 76 |
+
summary_stats = get_resource_summary()
|
| 77 |
+
if summary_stats:
|
| 78 |
+
st.subheader("📋 Summary Statistics")
|
| 79 |
+
col1, col2 = st.columns(2)
|
| 80 |
+
|
| 81 |
+
with col1:
|
| 82 |
+
st.write(
|
| 83 |
+
f"**Average CPU Usage:** {summary_stats.get('avg_cpu_percent', 0):.1f}%"
|
| 84 |
+
)
|
| 85 |
+
st.write(
|
| 86 |
+
f"**Max CPU Usage:** {summary_stats.get('max_cpu_percent', 0):.1f}%"
|
| 87 |
+
)
|
| 88 |
+
st.write(
|
| 89 |
+
f"**Average Memory Usage:** {summary_stats.get('avg_memory_percent', 0):.1f}%"
|
| 90 |
+
)
|
| 91 |
+
st.write(
|
| 92 |
+
f"**Max Memory Usage:** {summary_stats.get('max_memory_percent', 0):.1f}%"
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
with col2:
|
| 96 |
+
st.write(
|
| 97 |
+
f"**Total Network Sent:** {summary_stats.get('total_network_sent_mb', 0):.1f} MB"
|
| 98 |
+
)
|
| 99 |
+
st.write(
|
| 100 |
+
f"**Total Network Recv:** {summary_stats.get('total_network_recv_mb', 0):.1f} MB"
|
| 101 |
+
)
|
| 102 |
+
st.write(
|
| 103 |
+
f"**Total Uptime:** {summary_stats.get('total_uptime_minutes', 0):.1f} minutes"
|
| 104 |
+
)
|
| 105 |
+
st.write(
|
| 106 |
+
f"**Total YFinance Calls:** {summary_stats.get('yfinance_calls', 0)}"
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
# Export data button
|
| 110 |
+
if st.button("📥 Export Resource Data"):
|
| 111 |
+
filename = export_resource_data()
|
| 112 |
+
st.success(f"Resource data exported to: {filename}")
|
| 113 |
+
|
| 114 |
+
else:
|
| 115 |
+
st.error(f"Error getting resource stats: {current_stats['error']}")
|
| 116 |
+
else:
|
| 117 |
+
st.warning(
|
| 118 |
+
"Resource monitoring is not available. Please install psutil: pip install psutil"
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
if __name__ == "__main__":
|
| 123 |
+
main()
|
pyproject.toml
CHANGED
|
@@ -12,6 +12,8 @@ dependencies = [
|
|
| 12 |
"openai>=1.97.1",
|
| 13 |
"pandas>=2.3.1",
|
| 14 |
"plotly>=5.17.0",
|
|
|
|
|
|
|
| 15 |
"python-dotenv>=1.1.1",
|
| 16 |
"sentence-transformers>=5.0.0",
|
| 17 |
"streamlit>=1.28.0",
|
|
|
|
| 12 |
"openai>=1.97.1",
|
| 13 |
"pandas>=2.3.1",
|
| 14 |
"plotly>=5.17.0",
|
| 15 |
+
"prophet>=1.1.7",
|
| 16 |
+
"psutil>=7.0.0",
|
| 17 |
"python-dotenv>=1.1.1",
|
| 18 |
"sentence-transformers>=5.0.0",
|
| 19 |
"streamlit>=1.28.0",
|
requirements.txt
CHANGED
|
@@ -1,5 +1,9 @@
|
|
| 1 |
beautifulsoup4>=4.13.4
|
| 2 |
fastmcp>=2.10.6
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
gnews>=0.4.1
|
| 4 |
mcp[cli]>=1.12.2
|
| 5 |
openai>=1.97.1
|
|
|
|
| 1 |
beautifulsoup4>=4.13.4
|
| 2 |
fastmcp>=2.10.6
|
| 3 |
+
prophet>=1.1.4
|
| 4 |
+
psutil>=5.9.0
|
| 5 |
+
scikit-learn>=1.3.0
|
| 6 |
+
numpy>=1.24.0
|
| 7 |
gnews>=0.4.1
|
| 8 |
mcp[cli]>=1.12.2
|
| 9 |
openai>=1.97.1
|
resource_monitor.py
ADDED
|
@@ -0,0 +1,319 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import psutil
|
| 2 |
+
import time
|
| 3 |
+
import threading
|
| 4 |
+
import streamlit as st
|
| 5 |
+
import pandas as pd
|
| 6 |
+
import plotly.graph_objects as go
|
| 7 |
+
from datetime import datetime, timedelta
|
| 8 |
+
import os
|
| 9 |
+
import sys
|
| 10 |
+
import asyncio
|
| 11 |
+
import yfinance as yf
|
| 12 |
+
from typing import Dict, List, Optional
|
| 13 |
+
import json
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class ResourceMonitor:
|
| 17 |
+
"""Monitor system resources for the QueryStockAI."""
|
| 18 |
+
|
| 19 |
+
def __init__(self):
|
| 20 |
+
self.monitoring = False
|
| 21 |
+
self.monitor_thread = None
|
| 22 |
+
self.resource_data = {
|
| 23 |
+
"timestamps": [],
|
| 24 |
+
"cpu_percent": [],
|
| 25 |
+
"memory_percent": [],
|
| 26 |
+
"memory_mb": [],
|
| 27 |
+
"disk_usage_percent": [],
|
| 28 |
+
"network_sent_mb": [],
|
| 29 |
+
"network_recv_mb": [],
|
| 30 |
+
"process_count": [],
|
| 31 |
+
"yfinance_calls": 0,
|
| 32 |
+
"prophet_training_time": 0,
|
| 33 |
+
"streamlit_requests": 0,
|
| 34 |
+
}
|
| 35 |
+
self.start_time = None
|
| 36 |
+
self.process = psutil.Process()
|
| 37 |
+
|
| 38 |
+
def start_monitoring(self):
|
| 39 |
+
"""Start resource monitoring in a separate thread."""
|
| 40 |
+
if not self.monitoring:
|
| 41 |
+
self.monitoring = True
|
| 42 |
+
self.start_time = datetime.now()
|
| 43 |
+
self.monitor_thread = threading.Thread(
|
| 44 |
+
target=self._monitor_loop, daemon=True
|
| 45 |
+
)
|
| 46 |
+
self.monitor_thread.start()
|
| 47 |
+
return True
|
| 48 |
+
return False
|
| 49 |
+
|
| 50 |
+
def stop_monitoring(self):
|
| 51 |
+
"""Stop resource monitoring."""
|
| 52 |
+
self.monitoring = False
|
| 53 |
+
if self.monitor_thread:
|
| 54 |
+
self.monitor_thread.join(timeout=1)
|
| 55 |
+
|
| 56 |
+
def _monitor_loop(self):
|
| 57 |
+
"""Main monitoring loop."""
|
| 58 |
+
while self.monitoring:
|
| 59 |
+
try:
|
| 60 |
+
# Get current timestamp
|
| 61 |
+
timestamp = datetime.now()
|
| 62 |
+
|
| 63 |
+
# CPU usage
|
| 64 |
+
cpu_percent = psutil.cpu_percent(interval=1)
|
| 65 |
+
|
| 66 |
+
# Memory usage
|
| 67 |
+
memory = psutil.virtual_memory()
|
| 68 |
+
memory_percent = memory.percent
|
| 69 |
+
memory_mb = memory.used / (1024 * 1024) # Convert to MB
|
| 70 |
+
|
| 71 |
+
# Disk usage
|
| 72 |
+
disk = psutil.disk_usage("/")
|
| 73 |
+
disk_usage_percent = disk.percent
|
| 74 |
+
|
| 75 |
+
# Network usage
|
| 76 |
+
network = psutil.net_io_counters()
|
| 77 |
+
network_sent_mb = network.bytes_sent / (1024 * 1024)
|
| 78 |
+
network_recv_mb = network.bytes_recv / (1024 * 1024)
|
| 79 |
+
|
| 80 |
+
# Process count
|
| 81 |
+
process_count = len(psutil.pids())
|
| 82 |
+
|
| 83 |
+
# Store data
|
| 84 |
+
self.resource_data["timestamps"].append(timestamp)
|
| 85 |
+
self.resource_data["cpu_percent"].append(cpu_percent)
|
| 86 |
+
self.resource_data["memory_percent"].append(memory_percent)
|
| 87 |
+
self.resource_data["memory_mb"].append(memory_mb)
|
| 88 |
+
self.resource_data["disk_usage_percent"].append(disk_usage_percent)
|
| 89 |
+
self.resource_data["network_sent_mb"].append(network_sent_mb)
|
| 90 |
+
self.resource_data["network_recv_mb"].append(network_recv_mb)
|
| 91 |
+
self.resource_data["process_count"].append(process_count)
|
| 92 |
+
|
| 93 |
+
# Keep only last 1000 data points to prevent memory issues
|
| 94 |
+
max_points = 1000
|
| 95 |
+
if len(self.resource_data["timestamps"]) > max_points:
|
| 96 |
+
for key in self.resource_data:
|
| 97 |
+
if isinstance(self.resource_data[key], list):
|
| 98 |
+
self.resource_data[key] = self.resource_data[key][
|
| 99 |
+
-max_points:
|
| 100 |
+
]
|
| 101 |
+
|
| 102 |
+
time.sleep(2) # Monitor every 2 seconds
|
| 103 |
+
|
| 104 |
+
except Exception as e:
|
| 105 |
+
print(f"Error in monitoring loop: {e}")
|
| 106 |
+
time.sleep(5)
|
| 107 |
+
|
| 108 |
+
def get_current_stats(self) -> Dict:
|
| 109 |
+
"""Get current resource statistics."""
|
| 110 |
+
try:
|
| 111 |
+
memory = psutil.virtual_memory()
|
| 112 |
+
disk = psutil.disk_usage("/")
|
| 113 |
+
network = psutil.net_io_counters()
|
| 114 |
+
|
| 115 |
+
return {
|
| 116 |
+
"cpu_percent": psutil.cpu_percent(),
|
| 117 |
+
"memory_percent": memory.percent,
|
| 118 |
+
"memory_mb": memory.used / (1024 * 1024),
|
| 119 |
+
"memory_gb": memory.used / (1024 * 1024 * 1024),
|
| 120 |
+
"disk_usage_percent": disk.percent,
|
| 121 |
+
"disk_free_gb": disk.free / (1024 * 1024 * 1024),
|
| 122 |
+
"network_sent_mb": network.bytes_sent / (1024 * 1024),
|
| 123 |
+
"network_recv_mb": network.bytes_recv / (1024 * 1024),
|
| 124 |
+
"process_count": len(psutil.pids()),
|
| 125 |
+
"uptime_seconds": (
|
| 126 |
+
(datetime.now() - self.start_time).total_seconds()
|
| 127 |
+
if self.start_time
|
| 128 |
+
else 0
|
| 129 |
+
),
|
| 130 |
+
"yfinance_calls": self.resource_data["yfinance_calls"],
|
| 131 |
+
"prophet_training_time": self.resource_data["prophet_training_time"],
|
| 132 |
+
"streamlit_requests": self.resource_data["streamlit_requests"],
|
| 133 |
+
}
|
| 134 |
+
except Exception as e:
|
| 135 |
+
return {"error": str(e)}
|
| 136 |
+
|
| 137 |
+
def increment_yfinance_calls(self):
|
| 138 |
+
"""Increment yfinance API call counter."""
|
| 139 |
+
self.resource_data["yfinance_calls"] += 1
|
| 140 |
+
|
| 141 |
+
def add_prophet_training_time(self, seconds: float):
|
| 142 |
+
"""Add Prophet training time."""
|
| 143 |
+
self.resource_data["prophet_training_time"] += seconds
|
| 144 |
+
|
| 145 |
+
def increment_streamlit_requests(self):
|
| 146 |
+
"""Increment Streamlit request counter."""
|
| 147 |
+
self.resource_data["streamlit_requests"] += 1
|
| 148 |
+
|
| 149 |
+
def create_resource_dashboard(self) -> go.Figure:
|
| 150 |
+
"""Create a comprehensive resource dashboard."""
|
| 151 |
+
if not self.resource_data["timestamps"]:
|
| 152 |
+
return None
|
| 153 |
+
|
| 154 |
+
# Create subplots
|
| 155 |
+
fig = go.Figure()
|
| 156 |
+
|
| 157 |
+
# CPU Usage
|
| 158 |
+
fig.add_trace(
|
| 159 |
+
go.Scatter(
|
| 160 |
+
x=self.resource_data["timestamps"],
|
| 161 |
+
y=self.resource_data["cpu_percent"],
|
| 162 |
+
mode="lines",
|
| 163 |
+
name="CPU %",
|
| 164 |
+
line=dict(color="red", width=2),
|
| 165 |
+
)
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
# Memory Usage
|
| 169 |
+
fig.add_trace(
|
| 170 |
+
go.Scatter(
|
| 171 |
+
x=self.resource_data["timestamps"],
|
| 172 |
+
y=self.resource_data["memory_percent"],
|
| 173 |
+
mode="lines",
|
| 174 |
+
name="Memory %",
|
| 175 |
+
line=dict(color="blue", width=2),
|
| 176 |
+
yaxis="y2",
|
| 177 |
+
)
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
# Memory Usage in MB
|
| 181 |
+
fig.add_trace(
|
| 182 |
+
go.Scatter(
|
| 183 |
+
x=self.resource_data["timestamps"],
|
| 184 |
+
y=self.resource_data["memory_mb"],
|
| 185 |
+
mode="lines",
|
| 186 |
+
name="Memory (MB)",
|
| 187 |
+
line=dict(color="lightblue", width=2),
|
| 188 |
+
yaxis="y3",
|
| 189 |
+
)
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
# Network Usage
|
| 193 |
+
fig.add_trace(
|
| 194 |
+
go.Scatter(
|
| 195 |
+
x=self.resource_data["timestamps"],
|
| 196 |
+
y=self.resource_data["network_sent_mb"],
|
| 197 |
+
mode="lines",
|
| 198 |
+
name="Network Sent (MB)",
|
| 199 |
+
line=dict(color="green", width=2),
|
| 200 |
+
yaxis="y4",
|
| 201 |
+
)
|
| 202 |
+
)
|
| 203 |
+
|
| 204 |
+
fig.add_trace(
|
| 205 |
+
go.Scatter(
|
| 206 |
+
x=self.resource_data["timestamps"],
|
| 207 |
+
y=self.resource_data["network_recv_mb"],
|
| 208 |
+
mode="lines",
|
| 209 |
+
name="Network Recv (MB)",
|
| 210 |
+
line=dict(color="orange", width=2),
|
| 211 |
+
yaxis="y4",
|
| 212 |
+
)
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
# Update layout
|
| 216 |
+
fig.update_layout(
|
| 217 |
+
title="System Resource Usage",
|
| 218 |
+
xaxis_title="Time",
|
| 219 |
+
height=600,
|
| 220 |
+
hovermode="x unified",
|
| 221 |
+
legend=dict(
|
| 222 |
+
orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1
|
| 223 |
+
),
|
| 224 |
+
yaxis=dict(title="CPU %", side="left"),
|
| 225 |
+
yaxis2=dict(title="Memory %", side="right", overlaying="y"),
|
| 226 |
+
yaxis3=dict(title="Memory (MB)", side="right", position=0.95),
|
| 227 |
+
yaxis4=dict(title="Network (MB)", side="right", position=0.9),
|
| 228 |
+
)
|
| 229 |
+
|
| 230 |
+
return fig
|
| 231 |
+
|
| 232 |
+
def get_summary_stats(self) -> Dict:
|
| 233 |
+
"""Get summary statistics."""
|
| 234 |
+
if not self.resource_data["timestamps"]:
|
| 235 |
+
return {}
|
| 236 |
+
|
| 237 |
+
return {
|
| 238 |
+
"total_uptime_minutes": (
|
| 239 |
+
(datetime.now() - self.start_time).total_seconds() / 60
|
| 240 |
+
if self.start_time
|
| 241 |
+
else 0
|
| 242 |
+
),
|
| 243 |
+
"avg_cpu_percent": sum(self.resource_data["cpu_percent"])
|
| 244 |
+
/ len(self.resource_data["cpu_percent"]),
|
| 245 |
+
"max_cpu_percent": max(self.resource_data["cpu_percent"]),
|
| 246 |
+
"avg_memory_percent": sum(self.resource_data["memory_percent"])
|
| 247 |
+
/ len(self.resource_data["memory_percent"]),
|
| 248 |
+
"max_memory_percent": max(self.resource_data["memory_percent"]),
|
| 249 |
+
"avg_memory_mb": sum(self.resource_data["memory_mb"])
|
| 250 |
+
/ len(self.resource_data["memory_mb"]),
|
| 251 |
+
"max_memory_mb": max(self.resource_data["memory_mb"]),
|
| 252 |
+
"total_network_sent_mb": sum(self.resource_data["network_sent_mb"]),
|
| 253 |
+
"total_network_recv_mb": sum(self.resource_data["network_recv_mb"]),
|
| 254 |
+
"yfinance_calls": self.resource_data["yfinance_calls"],
|
| 255 |
+
"prophet_training_time": self.resource_data["prophet_training_time"],
|
| 256 |
+
"streamlit_requests": self.resource_data["streamlit_requests"],
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
def export_data(self, filename: str = None):
|
| 260 |
+
"""Export monitoring data to JSON file."""
|
| 261 |
+
if filename is None:
|
| 262 |
+
filename = (
|
| 263 |
+
f"resource_monitor_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
|
| 264 |
+
)
|
| 265 |
+
|
| 266 |
+
export_data = {
|
| 267 |
+
"summary_stats": self.get_summary_stats(),
|
| 268 |
+
"monitoring_data": {
|
| 269 |
+
"timestamps": [
|
| 270 |
+
ts.isoformat() for ts in self.resource_data["timestamps"]
|
| 271 |
+
],
|
| 272 |
+
"cpu_percent": self.resource_data["cpu_percent"],
|
| 273 |
+
"memory_percent": self.resource_data["memory_percent"],
|
| 274 |
+
"memory_mb": self.resource_data["memory_mb"],
|
| 275 |
+
"disk_usage_percent": self.resource_data["disk_usage_percent"],
|
| 276 |
+
"network_sent_mb": self.resource_data["network_sent_mb"],
|
| 277 |
+
"network_recv_mb": self.resource_data["network_recv_mb"],
|
| 278 |
+
"process_count": self.resource_data["process_count"],
|
| 279 |
+
},
|
| 280 |
+
}
|
| 281 |
+
|
| 282 |
+
with open(filename, "w") as f:
|
| 283 |
+
json.dump(export_data, f, indent=2)
|
| 284 |
+
|
| 285 |
+
return filename
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
# Global monitor instance
|
| 289 |
+
resource_monitor = ResourceMonitor()
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
def start_resource_monitoring():
|
| 293 |
+
"""Start resource monitoring."""
|
| 294 |
+
return resource_monitor.start_monitoring()
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
def stop_resource_monitoring():
|
| 298 |
+
"""Stop resource monitoring."""
|
| 299 |
+
resource_monitor.stop_monitoring()
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
def get_resource_stats():
|
| 303 |
+
"""Get current resource statistics."""
|
| 304 |
+
return resource_monitor.get_current_stats()
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
def create_resource_dashboard():
|
| 308 |
+
"""Create resource dashboard."""
|
| 309 |
+
return resource_monitor.create_resource_dashboard()
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
def get_resource_summary():
|
| 313 |
+
"""Get resource summary."""
|
| 314 |
+
return resource_monitor.get_summary_stats()
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
def export_resource_data(filename=None):
|
| 318 |
+
"""Export resource data."""
|
| 319 |
+
return resource_monitor.export_data(filename)
|
agent_client.py → terminal_client.py
RENAMED
|
@@ -356,7 +356,7 @@ async def main():
|
|
| 356 |
# Predefined tickers
|
| 357 |
available_tickers = {"1": "AAPL", "2": "TSLA", "3": "MSFT", "4": "GOOG"}
|
| 358 |
|
| 359 |
-
print("===
|
| 360 |
print("Select a stock ticker to analyze:")
|
| 361 |
print("1. AAPL (Apple)")
|
| 362 |
print("2. TSLA (Tesla)")
|
|
|
|
| 356 |
# Predefined tickers
|
| 357 |
available_tickers = {"1": "AAPL", "2": "TSLA", "3": "MSFT", "4": "GOOG"}
|
| 358 |
|
| 359 |
+
print("=== QueryStockAI ===")
|
| 360 |
print("Select a stock ticker to analyze:")
|
| 361 |
print("1. AAPL (Apple)")
|
| 362 |
print("2. TSLA (Tesla)")
|
test_prophet_accuracy.py
ADDED
|
@@ -0,0 +1,373 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Prophet Accuracy Test Script
|
| 4 |
+
Trains Prophet model on given ticker data up to June 2025 and tests predictions for July 2025.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import yfinance as yf
|
| 8 |
+
import pandas as pd
|
| 9 |
+
from prophet import Prophet
|
| 10 |
+
from datetime import datetime, timedelta
|
| 11 |
+
import numpy as np
|
| 12 |
+
from sklearn.metrics import (
|
| 13 |
+
mean_absolute_error,
|
| 14 |
+
mean_squared_error,
|
| 15 |
+
mean_absolute_percentage_error,
|
| 16 |
+
)
|
| 17 |
+
import warnings
|
| 18 |
+
|
| 19 |
+
warnings.filterwarnings("ignore")
|
| 20 |
+
|
| 21 |
+
ticker = "AAPL"
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def get_aapl_data():
|
| 25 |
+
"""Get AAPL historical data."""
|
| 26 |
+
print("📊 Fetching AAPL historical data...")
|
| 27 |
+
|
| 28 |
+
# Get data for the past 2 years to have enough training data
|
| 29 |
+
raw_data = yf.Ticker("AAPL")
|
| 30 |
+
data = raw_data.history(period="2y")
|
| 31 |
+
|
| 32 |
+
if data.empty:
|
| 33 |
+
raise ValueError("No data received for AAPL")
|
| 34 |
+
|
| 35 |
+
print(f"✅ Retrieved {len(data)} days of AAPL data")
|
| 36 |
+
print(f"📅 Date range: {data.index.min().date()} to {data.index.max().date()}")
|
| 37 |
+
|
| 38 |
+
return data
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def prepare_prophet_data(data, end_date):
|
| 42 |
+
"""Prepare data for Prophet training (up to end_date)."""
|
| 43 |
+
print(f"\n🔧 Preparing Prophet data up to {end_date.date()}...")
|
| 44 |
+
|
| 45 |
+
# Convert end_date to timezone-aware datetime to match data index
|
| 46 |
+
if data.index.tz is not None:
|
| 47 |
+
end_date = pd.Timestamp(end_date).tz_localize(data.index.tz)
|
| 48 |
+
|
| 49 |
+
# Filter data up to end_date
|
| 50 |
+
training_data = data[data.index <= end_date].copy()
|
| 51 |
+
|
| 52 |
+
# Remove outliers using IQR method
|
| 53 |
+
Q1 = training_data["Close"].quantile(0.25)
|
| 54 |
+
Q3 = training_data["Close"].quantile(0.75)
|
| 55 |
+
IQR = Q3 - Q1
|
| 56 |
+
lower_bound = Q1 - 1.5 * IQR
|
| 57 |
+
upper_bound = Q3 + 1.5 * IQR
|
| 58 |
+
|
| 59 |
+
# Filter out outliers
|
| 60 |
+
training_data = training_data[
|
| 61 |
+
(training_data["Close"] >= lower_bound)
|
| 62 |
+
& (training_data["Close"] <= upper_bound)
|
| 63 |
+
]
|
| 64 |
+
|
| 65 |
+
print(
|
| 66 |
+
f"📊 Removed outliers: {len(data[data.index <= end_date]) - len(training_data)} points"
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
# Prepare for Prophet (requires 'ds' and 'y' columns)
|
| 70 |
+
prophet_data = training_data.reset_index()
|
| 71 |
+
prophet_data["ds"] = prophet_data["Date"].dt.tz_localize(None) # Remove timezone
|
| 72 |
+
prophet_data["y"] = prophet_data["Close"]
|
| 73 |
+
|
| 74 |
+
# Select only required columns
|
| 75 |
+
prophet_data = prophet_data[["ds", "y"]]
|
| 76 |
+
|
| 77 |
+
print(f"✅ Training data prepared: {len(prophet_data)} days")
|
| 78 |
+
print(
|
| 79 |
+
f"📈 Price range: ${prophet_data['y'].min():.2f} - ${prophet_data['y'].max():.2f}"
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
return prophet_data
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def train_prophet_model(data):
|
| 86 |
+
"""Train Prophet model on the provided data."""
|
| 87 |
+
print("\n🤖 Training Prophet model...")
|
| 88 |
+
|
| 89 |
+
# Configure Prophet model with optimized parameters
|
| 90 |
+
model = Prophet(
|
| 91 |
+
yearly_seasonality=True,
|
| 92 |
+
weekly_seasonality=True,
|
| 93 |
+
daily_seasonality=False,
|
| 94 |
+
changepoint_prior_scale=0.01, # Reduced for smoother trends
|
| 95 |
+
seasonality_prior_scale=10.0, # Increased seasonality strength
|
| 96 |
+
seasonality_mode="multiplicative",
|
| 97 |
+
interval_width=0.8, # Tighter confidence intervals
|
| 98 |
+
mcmc_samples=0, # Disable MCMC for faster training
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
# Add custom seasonalities for better stock patterns
|
| 102 |
+
model.add_seasonality(name="monthly", period=30.5, fourier_order=5)
|
| 103 |
+
|
| 104 |
+
model.add_seasonality(name="quarterly", period=91.25, fourier_order=8)
|
| 105 |
+
|
| 106 |
+
# Train the model
|
| 107 |
+
model.fit(data)
|
| 108 |
+
|
| 109 |
+
print("✅ Prophet model trained successfully")
|
| 110 |
+
return model
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def make_predictions(model, start_date, end_date):
|
| 114 |
+
"""Make predictions for the specified date range."""
|
| 115 |
+
print(f"\n🔮 Making predictions from {start_date.date()} to {end_date.date()}...")
|
| 116 |
+
|
| 117 |
+
# Calculate the number of days to predict
|
| 118 |
+
days_to_predict = (end_date - start_date).days + 1
|
| 119 |
+
|
| 120 |
+
# Create future dataframe
|
| 121 |
+
future = model.make_future_dataframe(periods=days_to_predict)
|
| 122 |
+
forecast = model.predict(future)
|
| 123 |
+
|
| 124 |
+
# Filter predictions for the specified period
|
| 125 |
+
predictions = forecast[
|
| 126 |
+
(forecast["ds"] >= start_date) & (forecast["ds"] <= end_date)
|
| 127 |
+
].copy()
|
| 128 |
+
|
| 129 |
+
print(f"✅ Generated {len(predictions)} predictions for {days_to_predict} days")
|
| 130 |
+
|
| 131 |
+
return predictions
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def get_actual_july_data(data, start_date, end_date):
|
| 135 |
+
"""Get actual AAPL data for July."""
|
| 136 |
+
print(f"\n📊 Fetching actual July data...")
|
| 137 |
+
|
| 138 |
+
# Convert dates to timezone-aware datetime to match data index
|
| 139 |
+
if data.index.tz is not None:
|
| 140 |
+
start_date = pd.Timestamp(start_date).tz_localize(data.index.tz)
|
| 141 |
+
end_date = pd.Timestamp(end_date).tz_localize(data.index.tz)
|
| 142 |
+
|
| 143 |
+
# Get actual data for July
|
| 144 |
+
actual_data = data[(data.index >= start_date) & (data.index <= end_date)].copy()
|
| 145 |
+
|
| 146 |
+
print(f"✅ Retrieved {len(actual_data)} days of actual July data")
|
| 147 |
+
|
| 148 |
+
return actual_data
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
def calculate_accuracy_metrics(predictions, actual_data):
|
| 152 |
+
"""Calculate accuracy metrics."""
|
| 153 |
+
print("\n📈 Calculating accuracy metrics...")
|
| 154 |
+
|
| 155 |
+
# Prepare actual data with timezone-naive dates for merging
|
| 156 |
+
actual_data_prepared = actual_data.reset_index().copy()
|
| 157 |
+
actual_data_prepared["Date"] = actual_data_prepared["Date"].dt.tz_localize(None)
|
| 158 |
+
|
| 159 |
+
# Merge predictions with actual data
|
| 160 |
+
comparison = pd.merge(
|
| 161 |
+
predictions[["ds", "yhat", "yhat_lower", "yhat_upper"]],
|
| 162 |
+
actual_data_prepared[["Date", "Close"]],
|
| 163 |
+
left_on="ds",
|
| 164 |
+
right_on="Date",
|
| 165 |
+
how="inner",
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
if comparison.empty:
|
| 169 |
+
print("❌ No overlapping data found for comparison")
|
| 170 |
+
return None
|
| 171 |
+
|
| 172 |
+
# Calculate metrics
|
| 173 |
+
mae = mean_absolute_error(comparison["Close"], comparison["yhat"])
|
| 174 |
+
mse = mean_squared_error(comparison["Close"], comparison["yhat"])
|
| 175 |
+
rmse = np.sqrt(mse)
|
| 176 |
+
mape = mean_absolute_percentage_error(comparison["Close"], comparison["yhat"]) * 100
|
| 177 |
+
|
| 178 |
+
# Calculate directional accuracy (up/down prediction)
|
| 179 |
+
actual_direction = comparison["Close"].diff().dropna()
|
| 180 |
+
predicted_direction = comparison["yhat"].diff().dropna()
|
| 181 |
+
|
| 182 |
+
# Align the data
|
| 183 |
+
min_len = min(len(actual_direction), len(predicted_direction))
|
| 184 |
+
actual_direction = actual_direction.iloc[-min_len:]
|
| 185 |
+
predicted_direction = predicted_direction.iloc[-min_len:]
|
| 186 |
+
|
| 187 |
+
directional_accuracy = (
|
| 188 |
+
np.mean((actual_direction > 0) == (predicted_direction > 0)) * 100
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
return {
|
| 192 |
+
"mae": mae,
|
| 193 |
+
"mse": mse,
|
| 194 |
+
"rmse": rmse,
|
| 195 |
+
"mape": mape,
|
| 196 |
+
"directional_accuracy": directional_accuracy,
|
| 197 |
+
"comparison_data": comparison,
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
def print_results(metrics, predictions, actual_data):
|
| 202 |
+
"""Print detailed results."""
|
| 203 |
+
print("\n" + "=" * 60)
|
| 204 |
+
print("📊 PROPHET ACCURACY TEST RESULTS")
|
| 205 |
+
print("=" * 60)
|
| 206 |
+
|
| 207 |
+
if metrics is None:
|
| 208 |
+
print("❌ No metrics available - insufficient data for comparison")
|
| 209 |
+
return
|
| 210 |
+
|
| 211 |
+
print(f"\n📈 Accuracy Metrics:")
|
| 212 |
+
print(f" Mean Absolute Error (MAE): ${metrics['mae']:.2f}")
|
| 213 |
+
print(f" Mean Squared Error (MSE): {metrics['mse']:.2f}")
|
| 214 |
+
print(f" Root Mean Squared Error (RMSE): ${metrics['rmse']:.2f}")
|
| 215 |
+
print(f" Mean Absolute Percentage Error (MAPE): {metrics['mape']:.2f}%")
|
| 216 |
+
print(f" Directional Accuracy: {metrics['directional_accuracy']:.1f}%")
|
| 217 |
+
|
| 218 |
+
print(f"\n📊 Prediction Summary:")
|
| 219 |
+
print(f" Training Period: Up to June 30, 2025")
|
| 220 |
+
print(f" Test Period: July 1-25, 2025")
|
| 221 |
+
print(f" Test Days: {len(metrics['comparison_data'])}")
|
| 222 |
+
|
| 223 |
+
# Show some sample predictions vs actual
|
| 224 |
+
comparison = metrics["comparison_data"]
|
| 225 |
+
print(f"\n📋 Sample Predictions vs Actual (first 5 days):")
|
| 226 |
+
print(f"{'Date':<12} {'Predicted':<12} {'Actual':<12} {'Error':<12}")
|
| 227 |
+
print("-" * 50)
|
| 228 |
+
|
| 229 |
+
for i in range(min(5, len(comparison))):
|
| 230 |
+
row = comparison.iloc[i]
|
| 231 |
+
error = row["Close"] - row["yhat"]
|
| 232 |
+
print(
|
| 233 |
+
f"{row['ds'].strftime('%Y-%m-%d'):<12} "
|
| 234 |
+
f"${row['yhat']:<11.2f} "
|
| 235 |
+
f"${row['Close']:<11.2f} "
|
| 236 |
+
f"${error:<11.2f}"
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
print("\n" + "=" * 60)
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
def test_multiple_configurations(data, training_end, test_start, test_end):
|
| 243 |
+
"""Test multiple Prophet configurations to find the best one."""
|
| 244 |
+
print("\n🔬 Testing multiple Prophet configurations...")
|
| 245 |
+
|
| 246 |
+
configurations = [
|
| 247 |
+
{
|
| 248 |
+
"name": "Default",
|
| 249 |
+
"params": {
|
| 250 |
+
"yearly_seasonality": True,
|
| 251 |
+
"weekly_seasonality": True,
|
| 252 |
+
"daily_seasonality": False,
|
| 253 |
+
"changepoint_prior_scale": 0.05,
|
| 254 |
+
"seasonality_mode": "multiplicative",
|
| 255 |
+
},
|
| 256 |
+
},
|
| 257 |
+
{
|
| 258 |
+
"name": "Optimized",
|
| 259 |
+
"params": {
|
| 260 |
+
"yearly_seasonality": True,
|
| 261 |
+
"weekly_seasonality": True,
|
| 262 |
+
"daily_seasonality": False,
|
| 263 |
+
"changepoint_prior_scale": 0.01,
|
| 264 |
+
"seasonality_prior_scale": 10.0,
|
| 265 |
+
"seasonality_mode": "multiplicative",
|
| 266 |
+
"interval_width": 0.8,
|
| 267 |
+
"mcmc_samples": 0,
|
| 268 |
+
},
|
| 269 |
+
},
|
| 270 |
+
{
|
| 271 |
+
"name": "Conservative",
|
| 272 |
+
"params": {
|
| 273 |
+
"yearly_seasonality": True,
|
| 274 |
+
"weekly_seasonality": False,
|
| 275 |
+
"daily_seasonality": False,
|
| 276 |
+
"changepoint_prior_scale": 0.001,
|
| 277 |
+
"seasonality_mode": "additive",
|
| 278 |
+
},
|
| 279 |
+
},
|
| 280 |
+
]
|
| 281 |
+
|
| 282 |
+
best_config = None
|
| 283 |
+
best_mape = float("inf")
|
| 284 |
+
results = []
|
| 285 |
+
|
| 286 |
+
for config in configurations:
|
| 287 |
+
print(f"\n🧪 Testing {config['name']} configuration...")
|
| 288 |
+
|
| 289 |
+
try:
|
| 290 |
+
# Prepare training data
|
| 291 |
+
training_data = prepare_prophet_data(data, training_end)
|
| 292 |
+
|
| 293 |
+
# Create and train model
|
| 294 |
+
model = Prophet(**config["params"])
|
| 295 |
+
|
| 296 |
+
# Add custom seasonalities for optimized config
|
| 297 |
+
if config["name"] == "Optimized":
|
| 298 |
+
model.add_seasonality(name="monthly", period=30.5, fourier_order=5)
|
| 299 |
+
model.add_seasonality(name="quarterly", period=91.25, fourier_order=8)
|
| 300 |
+
|
| 301 |
+
model.fit(training_data)
|
| 302 |
+
|
| 303 |
+
# Make predictions
|
| 304 |
+
predictions = make_predictions(model, test_start, test_end)
|
| 305 |
+
|
| 306 |
+
# Get actual data
|
| 307 |
+
actual_data = get_actual_july_data(data, test_start, test_end)
|
| 308 |
+
|
| 309 |
+
# Calculate metrics
|
| 310 |
+
metrics = calculate_accuracy_metrics(predictions, actual_data)
|
| 311 |
+
|
| 312 |
+
if metrics:
|
| 313 |
+
results.append({"config": config["name"], "metrics": metrics})
|
| 314 |
+
|
| 315 |
+
if metrics["mape"] < best_mape:
|
| 316 |
+
best_mape = metrics["mape"]
|
| 317 |
+
best_config = config["name"]
|
| 318 |
+
|
| 319 |
+
print(
|
| 320 |
+
f"✅ {config['name']}: MAPE = {metrics['mape']:.2f}%, Directional = {metrics['directional_accuracy']:.1f}%"
|
| 321 |
+
)
|
| 322 |
+
else:
|
| 323 |
+
print(f"❌ {config['name']}: No valid metrics")
|
| 324 |
+
|
| 325 |
+
except Exception as e:
|
| 326 |
+
print(f"❌ {config['name']}: Error - {e}")
|
| 327 |
+
|
| 328 |
+
return best_config, results
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
def main():
|
| 332 |
+
"""Main function to run the Prophet accuracy test."""
|
| 333 |
+
print("🚀 Starting Prophet Accuracy Test for AAPL")
|
| 334 |
+
print("=" * 60)
|
| 335 |
+
|
| 336 |
+
try:
|
| 337 |
+
# Define date ranges
|
| 338 |
+
training_end = datetime(2025, 6, 30) # End of June 2025
|
| 339 |
+
test_start = datetime(2025, 7, 1) # Start of July 2025
|
| 340 |
+
test_end = datetime(2025, 7, 25) # End of July 2025 (up to 25th)
|
| 341 |
+
|
| 342 |
+
print(f"📅 Training Period: Up to {training_end.date()}")
|
| 343 |
+
print(f"📅 Test Period: {test_start.date()} to {test_end.date()}")
|
| 344 |
+
|
| 345 |
+
# Get data
|
| 346 |
+
data = get_aapl_data()
|
| 347 |
+
|
| 348 |
+
# Test multiple configurations
|
| 349 |
+
best_config, all_results = test_multiple_configurations(
|
| 350 |
+
data, training_end, test_start, test_end
|
| 351 |
+
)
|
| 352 |
+
|
| 353 |
+
if best_config:
|
| 354 |
+
print(f"\n🏆 Best configuration: {best_config}")
|
| 355 |
+
|
| 356 |
+
# Use the best configuration for final results
|
| 357 |
+
best_result = next(r for r in all_results if r["config"] == best_config)
|
| 358 |
+
metrics = best_result["metrics"]
|
| 359 |
+
|
| 360 |
+
# Print final results
|
| 361 |
+
print_results(metrics, None, None)
|
| 362 |
+
else:
|
| 363 |
+
print("\n❌ No valid configurations found")
|
| 364 |
+
|
| 365 |
+
except Exception as e:
|
| 366 |
+
print(f"\n❌ Error during testing: {e}")
|
| 367 |
+
import traceback
|
| 368 |
+
|
| 369 |
+
traceback.print_exc()
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
if __name__ == "__main__":
|
| 373 |
+
main()
|
uv.lock
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|