Spaces:
Sleeping
Sleeping
Faham
commited on
Commit
Β·
dff71a5
1
Parent(s):
031a6a1
UPDATE: mcps updated and deployed to ralway
Browse files- .gitignore +1 -0
- .python-version +1 -1
- Dockerfile +42 -0
- Home.py +382 -228
- RAILWAY_DEPLOYMENT.md +118 -0
- README.md +4 -7
- mcp_config.json +22 -0
- news_server.py β mcp_news_server.py +15 -5
- stock_data_server.py β mcp_stock_server.py +16 -8
- pages/System_Monitor.py +0 -162
- pyproject.toml +7 -3
- railway.toml +16 -0
- requirements.txt +0 -0
- resource_monitor.py +0 -386
- start_services.py +170 -0
- terminal_client.py +0 -454
- test_deployment.py +117 -0
- update_mcp_urls.py +59 -0
- uv.lock +0 -0
.gitignore
CHANGED
|
@@ -8,6 +8,7 @@ wheels/
|
|
| 8 |
|
| 9 |
# Virtual environments
|
| 10 |
.venv
|
|
|
|
| 11 |
|
| 12 |
# Environment variables
|
| 13 |
.env
|
|
|
|
| 8 |
|
| 9 |
# Virtual environments
|
| 10 |
.venv
|
| 11 |
+
.venv3122
|
| 12 |
|
| 13 |
# Environment variables
|
| 14 |
.env
|
.python-version
CHANGED
|
@@ -1 +1 @@
|
|
| 1 |
-
3.
|
|
|
|
| 1 |
+
3.12
|
Dockerfile
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Use Python 3.12 slim image for smaller size
|
| 2 |
+
FROM python:3.12-slim
|
| 3 |
+
|
| 4 |
+
# Set working directory
|
| 5 |
+
WORKDIR /app
|
| 6 |
+
|
| 7 |
+
# Set environment variables
|
| 8 |
+
ENV PYTHONUNBUFFERED=1
|
| 9 |
+
ENV PYTHONDONTWRITEBYTECODE=1
|
| 10 |
+
ENV STREAMLIT_SERVER_PORT=8501
|
| 11 |
+
ENV STREAMLIT_SERVER_ADDRESS=0.0.0.0
|
| 12 |
+
|
| 13 |
+
# Install system dependencies
|
| 14 |
+
RUN apt-get update && apt-get install -y \
|
| 15 |
+
gcc \
|
| 16 |
+
g++ \
|
| 17 |
+
curl \
|
| 18 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 19 |
+
|
| 20 |
+
# Copy requirements first for better caching
|
| 21 |
+
COPY requirements.txt .
|
| 22 |
+
|
| 23 |
+
# Install Python dependencies
|
| 24 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 25 |
+
|
| 26 |
+
# Copy application code
|
| 27 |
+
COPY . .
|
| 28 |
+
|
| 29 |
+
# Create non-root user for security
|
| 30 |
+
RUN useradd -m -u 1000 streamlit && \
|
| 31 |
+
chown -R streamlit:streamlit /app
|
| 32 |
+
USER streamlit
|
| 33 |
+
|
| 34 |
+
# Expose port (Railway will set $PORT)
|
| 35 |
+
EXPOSE 8501
|
| 36 |
+
|
| 37 |
+
# Health check
|
| 38 |
+
HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 \
|
| 39 |
+
CMD curl -f http://localhost:$PORT/_stcore/health || exit 1
|
| 40 |
+
|
| 41 |
+
# Run the application (Railway will set $PORT)
|
| 42 |
+
CMD ["python", "start_services.py"]
|
Home.py
CHANGED
|
@@ -19,51 +19,41 @@ from sklearn.metrics import mean_squared_error, r2_score
|
|
| 19 |
from sklearn.linear_model import Ridge
|
| 20 |
from sklearn.model_selection import GridSearchCV
|
| 21 |
from dotenv import load_dotenv
|
| 22 |
-
from openai import OpenAI
|
| 23 |
-
from mcp.client.session import ClientSession
|
| 24 |
-
from mcp.client.stdio import stdio_client
|
| 25 |
-
from mcp import StdioServerParameters, types
|
| 26 |
from sklearn.preprocessing import StandardScaler
|
| 27 |
|
|
|
|
| 28 |
try:
|
| 29 |
-
from
|
| 30 |
-
start_resource_monitoring,
|
| 31 |
-
resource_monitor,
|
| 32 |
-
)
|
| 33 |
-
|
| 34 |
-
RESOURCE_MONITORING_AVAILABLE = True
|
| 35 |
except ImportError:
|
| 36 |
-
|
| 37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
|
| 39 |
# Load environment variables
|
| 40 |
load_dotenv()
|
| 41 |
|
| 42 |
# Check if API key exists - support both .env and Streamlit secrets
|
| 43 |
-
|
| 44 |
-
|
| 45 |
|
| 46 |
-
if not
|
| 47 |
st.error(
|
| 48 |
-
"β Error:
|
| 49 |
)
|
| 50 |
st.stop()
|
| 51 |
|
| 52 |
-
if not
|
| 53 |
st.error(
|
| 54 |
"β Error: MODEL not found. Please set it in your environment variables or Streamlit secrets."
|
| 55 |
)
|
| 56 |
st.stop()
|
| 57 |
|
| 58 |
-
# Configure the client to connect to OpenRouter
|
| 59 |
-
client = OpenAI(
|
| 60 |
-
base_url="https://openrouter.ai/api/v1",
|
| 61 |
-
api_key=api_key,
|
| 62 |
-
)
|
| 63 |
-
|
| 64 |
-
# Global variable to store discovered tools
|
| 65 |
-
discovered_tools = []
|
| 66 |
-
|
| 67 |
|
| 68 |
@st.cache_data(ttl=3600) # Cache for 1 hour
|
| 69 |
def get_available_tickers():
|
|
@@ -265,137 +255,6 @@ def search_ticker(ticker_symbol):
|
|
| 265 |
return None
|
| 266 |
|
| 267 |
|
| 268 |
-
async def get_news_data(ticker: str) -> str:
|
| 269 |
-
"""Get news data by calling the news server via MCP."""
|
| 270 |
-
try:
|
| 271 |
-
# Set up MCP server parameters
|
| 272 |
-
|
| 273 |
-
current_dir = os.path.dirname(os.path.abspath(__file__))
|
| 274 |
-
news_server_path = os.path.join(current_dir, "news_server.py")
|
| 275 |
-
|
| 276 |
-
if not os.path.exists(news_server_path):
|
| 277 |
-
return f"Error: news_server.py not found at {news_server_path}"
|
| 278 |
-
|
| 279 |
-
# Use the same Python executable as the current process
|
| 280 |
-
python_executable = sys.executable
|
| 281 |
-
server_params = StdioServerParameters(
|
| 282 |
-
command=python_executable, args=[news_server_path]
|
| 283 |
-
)
|
| 284 |
-
|
| 285 |
-
# Connect to the MCP server
|
| 286 |
-
try:
|
| 287 |
-
async with stdio_client(server_params) as (read, write):
|
| 288 |
-
async with ClientSession(read, write) as session:
|
| 289 |
-
# Initialize the session
|
| 290 |
-
await session.initialize()
|
| 291 |
-
|
| 292 |
-
# Call the get_latest_news tool
|
| 293 |
-
with st.status(
|
| 294 |
-
f"π Fetching news data for {ticker}...", expanded=False
|
| 295 |
-
) as status:
|
| 296 |
-
try:
|
| 297 |
-
result = await asyncio.wait_for(
|
| 298 |
-
session.call_tool(
|
| 299 |
-
"get_latest_news", {"ticker": ticker}
|
| 300 |
-
),
|
| 301 |
-
timeout=30.0, # 30 second timeout
|
| 302 |
-
)
|
| 303 |
-
status.update(
|
| 304 |
-
label=f"β
News data fetched for {ticker}",
|
| 305 |
-
state="complete",
|
| 306 |
-
)
|
| 307 |
-
except asyncio.TimeoutError:
|
| 308 |
-
status.update(
|
| 309 |
-
label="β News data fetch timed out", state="error"
|
| 310 |
-
)
|
| 311 |
-
return f"Timeout getting news for {ticker}"
|
| 312 |
-
except Exception as e:
|
| 313 |
-
status.update(
|
| 314 |
-
label=f"β Error fetching news: {e}", state="error"
|
| 315 |
-
)
|
| 316 |
-
return f"Error getting news for {ticker}: {e}"
|
| 317 |
-
|
| 318 |
-
# Parse the result properly
|
| 319 |
-
if result.content:
|
| 320 |
-
for content in result.content:
|
| 321 |
-
if isinstance(content, types.TextContent):
|
| 322 |
-
return content.text
|
| 323 |
-
|
| 324 |
-
return f"No news data returned for {ticker}"
|
| 325 |
-
except Exception as e:
|
| 326 |
-
st.error(f"β Failed to connect to news server: {e}")
|
| 327 |
-
return f"Failed to connect to news server: {e}"
|
| 328 |
-
|
| 329 |
-
except Exception as e:
|
| 330 |
-
return f"Error getting news for {ticker}: {e}"
|
| 331 |
-
|
| 332 |
-
|
| 333 |
-
async def get_stock_data(ticker: str) -> str:
|
| 334 |
-
"""Get stock data by calling the stock server via MCP."""
|
| 335 |
-
try:
|
| 336 |
-
# Set up MCP server parameters
|
| 337 |
-
|
| 338 |
-
current_dir = os.path.dirname(os.path.abspath(__file__))
|
| 339 |
-
stock_server_path = os.path.join(current_dir, "stock_data_server.py")
|
| 340 |
-
|
| 341 |
-
if not os.path.exists(stock_server_path):
|
| 342 |
-
return f"Error: stock_data_server.py not found at {stock_server_path}"
|
| 343 |
-
|
| 344 |
-
# Use the same Python executable as the current process
|
| 345 |
-
python_executable = sys.executable
|
| 346 |
-
server_params = StdioServerParameters(
|
| 347 |
-
command=python_executable, args=[stock_server_path]
|
| 348 |
-
)
|
| 349 |
-
|
| 350 |
-
# Connect to the MCP server
|
| 351 |
-
try:
|
| 352 |
-
async with stdio_client(server_params) as (read, write):
|
| 353 |
-
async with ClientSession(read, write) as session:
|
| 354 |
-
# Initialize the session
|
| 355 |
-
await session.initialize()
|
| 356 |
-
|
| 357 |
-
# Call the get_historical_stock_data tool
|
| 358 |
-
with st.status(
|
| 359 |
-
f"π Fetching stock data for {ticker}...", expanded=False
|
| 360 |
-
) as status:
|
| 361 |
-
try:
|
| 362 |
-
result = await asyncio.wait_for(
|
| 363 |
-
session.call_tool(
|
| 364 |
-
"get_historical_stock_data", {"ticker": ticker}
|
| 365 |
-
),
|
| 366 |
-
timeout=30.0, # 30 second timeout
|
| 367 |
-
)
|
| 368 |
-
status.update(
|
| 369 |
-
label=f"β
Stock data fetched for {ticker}",
|
| 370 |
-
state="complete",
|
| 371 |
-
)
|
| 372 |
-
except asyncio.TimeoutError:
|
| 373 |
-
status.update(
|
| 374 |
-
label="β Stock data fetch timed out", state="error"
|
| 375 |
-
)
|
| 376 |
-
return f"Timeout getting stock data for {ticker}"
|
| 377 |
-
except Exception as e:
|
| 378 |
-
status.update(
|
| 379 |
-
label=f"β Error fetching stock data: {e}",
|
| 380 |
-
state="error",
|
| 381 |
-
)
|
| 382 |
-
return f"Error getting stock data for {ticker}: {e}"
|
| 383 |
-
|
| 384 |
-
# Parse the result properly
|
| 385 |
-
if result.content:
|
| 386 |
-
for content in result.content:
|
| 387 |
-
if isinstance(content, types.TextContent):
|
| 388 |
-
return content.text
|
| 389 |
-
|
| 390 |
-
return f"No stock data returned for {ticker}"
|
| 391 |
-
except Exception as e:
|
| 392 |
-
st.error(f"β Failed to connect to stock data server: {e}")
|
| 393 |
-
return f"Failed to connect to stock data server: {e}"
|
| 394 |
-
|
| 395 |
-
except Exception as e:
|
| 396 |
-
return f"Error getting stock data for {ticker}: {e}"
|
| 397 |
-
|
| 398 |
-
|
| 399 |
def calculate_rsi(data, window):
|
| 400 |
"""Calculate RSI (Relative Strength Index) for the given data."""
|
| 401 |
delta = data.diff()
|
|
@@ -417,10 +276,6 @@ def create_stock_chart(ticker: str):
|
|
| 417 |
stock = yf.Ticker(ticker)
|
| 418 |
hist_data = stock.history(period="5y")
|
| 419 |
|
| 420 |
-
# Track yfinance API call
|
| 421 |
-
if RESOURCE_MONITORING_AVAILABLE:
|
| 422 |
-
resource_monitor.increment_yfinance_calls()
|
| 423 |
-
|
| 424 |
if hist_data.empty:
|
| 425 |
st.warning(f"No data available for {ticker}")
|
| 426 |
return None
|
|
@@ -614,10 +469,6 @@ def create_stock_chart(ticker: str):
|
|
| 614 |
|
| 615 |
# Track training time
|
| 616 |
training_time = time.time() - start_time
|
| 617 |
-
if RESOURCE_MONITORING_AVAILABLE:
|
| 618 |
-
resource_monitor.add_ridge_training_time(
|
| 619 |
-
training_time
|
| 620 |
-
) # Updated method name
|
| 621 |
|
| 622 |
# Get the best alpha value for display
|
| 623 |
best_alpha = grid_search.best_params_["alpha"]
|
|
@@ -844,10 +695,6 @@ def create_stock_chart(ticker: str):
|
|
| 844 |
# Make predictions for the next 30 trading days
|
| 845 |
future_predictions = model.predict(X_future_scaled)
|
| 846 |
|
| 847 |
-
# Track ML predictions
|
| 848 |
-
if RESOURCE_MONITORING_AVAILABLE:
|
| 849 |
-
resource_monitor.increment_ml_predictions()
|
| 850 |
-
|
| 851 |
# Create interactive chart with historical data and future predictions
|
| 852 |
fig = go.Figure()
|
| 853 |
|
|
@@ -1015,48 +862,6 @@ def create_basic_stock_chart(ticker: str):
|
|
| 1015 |
return None
|
| 1016 |
|
| 1017 |
|
| 1018 |
-
def initialize_tools():
|
| 1019 |
-
"""Initialize the available tools."""
|
| 1020 |
-
global discovered_tools
|
| 1021 |
-
|
| 1022 |
-
discovered_tools = [
|
| 1023 |
-
{
|
| 1024 |
-
"type": "function",
|
| 1025 |
-
"function": {
|
| 1026 |
-
"name": "get_latest_news",
|
| 1027 |
-
"description": "Fetches recent news headlines and descriptions for a specific stock ticker. Use this when user asks about news, updates, or recent events about a company.",
|
| 1028 |
-
"parameters": {
|
| 1029 |
-
"type": "object",
|
| 1030 |
-
"properties": {
|
| 1031 |
-
"ticker": {
|
| 1032 |
-
"type": "string",
|
| 1033 |
-
"description": "The stock ticker symbol (e.g., 'AAPL', 'GOOG', 'TSLA'). Must be a valid stock symbol.",
|
| 1034 |
-
}
|
| 1035 |
-
},
|
| 1036 |
-
"required": ["ticker"],
|
| 1037 |
-
},
|
| 1038 |
-
},
|
| 1039 |
-
},
|
| 1040 |
-
{
|
| 1041 |
-
"type": "function",
|
| 1042 |
-
"function": {
|
| 1043 |
-
"name": "get_historical_stock_data",
|
| 1044 |
-
"description": "Fetches recent historical stock data (Open, High, Low, Close, Volume) for a given ticker. Use this when user asks about stock performance, price data, or market performance.",
|
| 1045 |
-
"parameters": {
|
| 1046 |
-
"type": "object",
|
| 1047 |
-
"properties": {
|
| 1048 |
-
"ticker": {
|
| 1049 |
-
"type": "string",
|
| 1050 |
-
"description": "The stock ticker symbol (e.g., 'AAPL', 'TSLA', 'MSFT'). Must be a valid stock symbol.",
|
| 1051 |
-
}
|
| 1052 |
-
},
|
| 1053 |
-
"required": ["ticker"],
|
| 1054 |
-
},
|
| 1055 |
-
},
|
| 1056 |
-
},
|
| 1057 |
-
]
|
| 1058 |
-
|
| 1059 |
-
|
| 1060 |
async def execute_tool_call(tool_call):
|
| 1061 |
"""Execute a tool call using MCP servers."""
|
| 1062 |
try:
|
|
@@ -1153,7 +958,7 @@ async def run_agent(user_query, selected_ticker):
|
|
| 1153 |
# Get initial response from the model
|
| 1154 |
with st.spinner("π€ Generating analysis..."):
|
| 1155 |
response = client.chat.completions.create(
|
| 1156 |
-
model=
|
| 1157 |
messages=messages,
|
| 1158 |
tools=discovered_tools,
|
| 1159 |
tool_choice="required",
|
|
@@ -1192,7 +997,7 @@ async def run_agent(user_query, selected_ticker):
|
|
| 1192 |
# Get final response from the model
|
| 1193 |
with st.spinner("π€ Finalizing analysis..."):
|
| 1194 |
final_response = client.chat.completions.create(
|
| 1195 |
-
model=
|
| 1196 |
messages=messages,
|
| 1197 |
)
|
| 1198 |
|
|
@@ -1211,6 +1016,286 @@ async def run_agent(user_query, selected_ticker):
|
|
| 1211 |
return "Please try again with a different question."
|
| 1212 |
|
| 1213 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1214 |
@st.cache_data(ttl=1800) # Cache for 30 minutes
|
| 1215 |
def display_top_news(ticker: str):
|
| 1216 |
"""Display top news headlines for the given ticker with clickable links."""
|
|
@@ -1357,14 +1442,62 @@ def main():
|
|
| 1357 |
"Get comprehensive financial analysis and insights for your selected stocks."
|
| 1358 |
)
|
| 1359 |
|
| 1360 |
-
# Initialize
|
| 1361 |
-
|
| 1362 |
-
|
| 1363 |
-
|
| 1364 |
-
st.
|
|
|
|
|
|
|
|
|
|
| 1365 |
|
| 1366 |
-
|
| 1367 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1368 |
|
| 1369 |
# Test server availability only once on startup
|
| 1370 |
if "servers_tested" not in st.session_state:
|
|
@@ -1504,29 +1637,50 @@ def main():
|
|
| 1504 |
else:
|
| 1505 |
st.markdown(
|
| 1506 |
f"""
|
| 1507 |
-
<div style=" padding: 10px; border-radius: 10px; margin: 5px 0;">
|
| 1508 |
<strong>Agent:</strong>
|
| 1509 |
</div>
|
| 1510 |
""",
|
| 1511 |
unsafe_allow_html=True,
|
| 1512 |
)
|
| 1513 |
-
# Render the content as markdown for proper formatting
|
| 1514 |
-
st.markdown(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1515 |
|
| 1516 |
# Chat input with proper loading state
|
| 1517 |
if prompt := st.chat_input(f"Ask about {selected_ticker}...", key="chat_input"):
|
| 1518 |
-
# Track streamlit request
|
| 1519 |
-
if RESOURCE_MONITORING_AVAILABLE:
|
| 1520 |
-
resource_monitor.increment_streamlit_requests()
|
| 1521 |
|
| 1522 |
# Add user message to chat history
|
| 1523 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 1524 |
|
| 1525 |
# Display assistant response with spinner above input
|
| 1526 |
with st.spinner("π€ Analyzing your request..."):
|
| 1527 |
-
response = asyncio.run(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1528 |
st.session_state.messages.append(
|
| 1529 |
-
{"role": "assistant", "content":
|
| 1530 |
)
|
| 1531 |
|
| 1532 |
# Rerun to display the new message - the chart and news are cached in session state
|
|
|
|
| 19 |
from sklearn.linear_model import Ridge
|
| 20 |
from sklearn.model_selection import GridSearchCV
|
| 21 |
from dotenv import load_dotenv
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
from sklearn.preprocessing import StandardScaler
|
| 23 |
|
| 24 |
+
# Try different import approaches
|
| 25 |
try:
|
| 26 |
+
from langchain_mcp_adapters.client import MultiServerMCPClient
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
except ImportError:
|
| 28 |
+
try:
|
| 29 |
+
from langchain_mcp_adapters import MultiServerMCPClient
|
| 30 |
+
except ImportError:
|
| 31 |
+
# Fallback to basic MCP client
|
| 32 |
+
MultiServerMCPClient = None
|
| 33 |
+
from langgraph.prebuilt import create_react_agent
|
| 34 |
+
from langchain_groq import ChatGroq
|
| 35 |
+
from langchain_core.tools import tool
|
| 36 |
+
|
| 37 |
|
| 38 |
# Load environment variables
|
| 39 |
load_dotenv()
|
| 40 |
|
| 41 |
# Check if API key exists - support both .env and Streamlit secrets
|
| 42 |
+
os.environ["GROQ_API_KEY"] = os.getenv("GROQ_API_KEY") or st.secrets.get("GROQ_API_KEY")
|
| 43 |
+
model_name = os.getenv("MODEL") or st.secrets.get("MODEL")
|
| 44 |
|
| 45 |
+
if not os.environ["GROQ_API_KEY"]:
|
| 46 |
st.error(
|
| 47 |
+
"β Error: GROQ_API_KEY not found. Please set it in your environment variables or Streamlit secrets."
|
| 48 |
)
|
| 49 |
st.stop()
|
| 50 |
|
| 51 |
+
if not model_name:
|
| 52 |
st.error(
|
| 53 |
"β Error: MODEL not found. Please set it in your environment variables or Streamlit secrets."
|
| 54 |
)
|
| 55 |
st.stop()
|
| 56 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
|
| 58 |
@st.cache_data(ttl=3600) # Cache for 1 hour
|
| 59 |
def get_available_tickers():
|
|
|
|
| 255 |
return None
|
| 256 |
|
| 257 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 258 |
def calculate_rsi(data, window):
|
| 259 |
"""Calculate RSI (Relative Strength Index) for the given data."""
|
| 260 |
delta = data.diff()
|
|
|
|
| 276 |
stock = yf.Ticker(ticker)
|
| 277 |
hist_data = stock.history(period="5y")
|
| 278 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 279 |
if hist_data.empty:
|
| 280 |
st.warning(f"No data available for {ticker}")
|
| 281 |
return None
|
|
|
|
| 469 |
|
| 470 |
# Track training time
|
| 471 |
training_time = time.time() - start_time
|
|
|
|
|
|
|
|
|
|
|
|
|
| 472 |
|
| 473 |
# Get the best alpha value for display
|
| 474 |
best_alpha = grid_search.best_params_["alpha"]
|
|
|
|
| 695 |
# Make predictions for the next 30 trading days
|
| 696 |
future_predictions = model.predict(X_future_scaled)
|
| 697 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 698 |
# Create interactive chart with historical data and future predictions
|
| 699 |
fig = go.Figure()
|
| 700 |
|
|
|
|
| 862 |
return None
|
| 863 |
|
| 864 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 865 |
async def execute_tool_call(tool_call):
|
| 866 |
"""Execute a tool call using MCP servers."""
|
| 867 |
try:
|
|
|
|
| 958 |
# Get initial response from the model
|
| 959 |
with st.spinner("π€ Generating analysis..."):
|
| 960 |
response = client.chat.completions.create(
|
| 961 |
+
model=model_name,
|
| 962 |
messages=messages,
|
| 963 |
tools=discovered_tools,
|
| 964 |
tool_choice="required",
|
|
|
|
| 997 |
# Get final response from the model
|
| 998 |
with st.spinner("π€ Finalizing analysis..."):
|
| 999 |
final_response = client.chat.completions.create(
|
| 1000 |
+
model=model_name,
|
| 1001 |
messages=messages,
|
| 1002 |
)
|
| 1003 |
|
|
|
|
| 1016 |
return "Please try again with a different question."
|
| 1017 |
|
| 1018 |
|
| 1019 |
+
async def initialize_mcp_agent(model, tools):
|
| 1020 |
+
"""Initialize the MCP agent using LangGraph React agent"""
|
| 1021 |
+
try:
|
| 1022 |
+
# Create MCP agent using LangGraph React agent
|
| 1023 |
+
try:
|
| 1024 |
+
# Bind model with system message
|
| 1025 |
+
system_message = """You are a helpful financial assistant. You have access to tools that can fetch stock data and news.
|
| 1026 |
+
When asked about a stock, use the available tools to get the latest information and provide a comprehensive analysis.
|
| 1027 |
+
Always be helpful and provide detailed insights based on the data you gather."""
|
| 1028 |
+
|
| 1029 |
+
model_with_system = model.bind(system=system_message)
|
| 1030 |
+
|
| 1031 |
+
# Create React agent with tools
|
| 1032 |
+
agent = create_react_agent(
|
| 1033 |
+
model_with_system,
|
| 1034 |
+
tools,
|
| 1035 |
+
)
|
| 1036 |
+
print(f"β
Created agent with {len(tools)} tools")
|
| 1037 |
+
|
| 1038 |
+
except Exception as e:
|
| 1039 |
+
st.error(f"β Failed to create MCP agent: {str(e)}")
|
| 1040 |
+
print(f"β MCP agent creation error: {str(e)}")
|
| 1041 |
+
import traceback
|
| 1042 |
+
|
| 1043 |
+
print(f"β MCP agent creation traceback: {traceback.format_exc()}")
|
| 1044 |
+
return None
|
| 1045 |
+
|
| 1046 |
+
# Test the agent with LangGraph
|
| 1047 |
+
try:
|
| 1048 |
+
# Use LangGraph ainvoke method for async tools
|
| 1049 |
+
test_result = await agent.ainvoke(
|
| 1050 |
+
{
|
| 1051 |
+
"messages": [
|
| 1052 |
+
{"role": "user", "content": "What tools do you have available?"}
|
| 1053 |
+
]
|
| 1054 |
+
}
|
| 1055 |
+
)
|
| 1056 |
+
print(f"π Test result: {test_result}")
|
| 1057 |
+
except Exception as e:
|
| 1058 |
+
st.warning(f"β οΈ MCP agent test failed: {str(e)}")
|
| 1059 |
+
print(f"β MCP agent test error: {str(e)}")
|
| 1060 |
+
import traceback
|
| 1061 |
+
|
| 1062 |
+
print(f"β MCP agent test traceback: {traceback.format_exc()}")
|
| 1063 |
+
|
| 1064 |
+
return agent
|
| 1065 |
+
|
| 1066 |
+
except Exception as e:
|
| 1067 |
+
st.error(f"β Error initializing MCP agent: {str(e)}")
|
| 1068 |
+
st.error(f"Error type: {type(e).__name__}")
|
| 1069 |
+
import traceback
|
| 1070 |
+
|
| 1071 |
+
st.error(f"Full traceback: {traceback.format_exc()}")
|
| 1072 |
+
return None
|
| 1073 |
+
|
| 1074 |
+
|
| 1075 |
+
async def run_agent_with_mcp(user_query: str, selected_ticker: str = None):
|
| 1076 |
+
"""Run the agent using LangGraph React agent"""
|
| 1077 |
+
try:
|
| 1078 |
+
# Get tools and model from session state
|
| 1079 |
+
if "mcp_tools" not in st.session_state or "mcp_model" not in st.session_state:
|
| 1080 |
+
return "β MCP tools and model not initialized. Please restart the application."
|
| 1081 |
+
|
| 1082 |
+
tools = st.session_state.mcp_tools
|
| 1083 |
+
model = st.session_state.mcp_model
|
| 1084 |
+
|
| 1085 |
+
# Initialize agent if not already done
|
| 1086 |
+
if "mcp_agent" not in st.session_state or st.session_state.mcp_agent is None:
|
| 1087 |
+
agent = await initialize_mcp_agent(model, tools)
|
| 1088 |
+
if not agent:
|
| 1089 |
+
return "Failed to initialize MCP agent"
|
| 1090 |
+
st.session_state.mcp_agent = agent
|
| 1091 |
+
else:
|
| 1092 |
+
agent = st.session_state.mcp_agent
|
| 1093 |
+
|
| 1094 |
+
# Construct the query with system instructions
|
| 1095 |
+
if selected_ticker:
|
| 1096 |
+
full_query = f"""You are a financial assistant. Use the available tools to get data for {selected_ticker} and then provide a comprehensive analysis.
|
| 1097 |
+
|
| 1098 |
+
AVAILABLE TOOLS:
|
| 1099 |
+
- get_latest_news: Get recent news for a ticker
|
| 1100 |
+
- get_historical_stock_data: Get stock performance data for a ticker
|
| 1101 |
+
|
| 1102 |
+
INSTRUCTIONS:
|
| 1103 |
+
1. Call get_latest_news with {{"ticker": "{selected_ticker}"}}
|
| 1104 |
+
2. Call get_historical_stock_data with {{"ticker": "{selected_ticker}"}}
|
| 1105 |
+
3. After getting the data, provide a comprehensive analysis answering: {user_query}
|
| 1106 |
+
|
| 1107 |
+
IMPORTANT: After calling the tools, you MUST provide a final analysis with insights, trends, and recommendations. Do not just show the tool calls.
|
| 1108 |
+
|
| 1109 |
+
Question: {user_query} for {selected_ticker}"""
|
| 1110 |
+
else:
|
| 1111 |
+
full_query = user_query
|
| 1112 |
+
|
| 1113 |
+
# Run the agent with LangGraph
|
| 1114 |
+
with st.spinner("π€ Processing with MCP agent..."):
|
| 1115 |
+
try:
|
| 1116 |
+
# Use LangGraph ainvoke method for async tools
|
| 1117 |
+
result = await agent.ainvoke(
|
| 1118 |
+
{"messages": [{"role": "user", "content": full_query}]}
|
| 1119 |
+
)
|
| 1120 |
+
|
| 1121 |
+
# Debug: Log the result
|
| 1122 |
+
print(f"π MCP Agent Result: {result}")
|
| 1123 |
+
|
| 1124 |
+
# Extract the final answer from the result
|
| 1125 |
+
if isinstance(result, dict) and "output" in result:
|
| 1126 |
+
final_response = result["output"]
|
| 1127 |
+
elif isinstance(result, str):
|
| 1128 |
+
final_response = result
|
| 1129 |
+
else:
|
| 1130 |
+
final_response = str(result)
|
| 1131 |
+
|
| 1132 |
+
# Debug: Print the result type and structure
|
| 1133 |
+
print(f"π Result type: {type(result)}")
|
| 1134 |
+
if isinstance(result, dict):
|
| 1135 |
+
print(f"π Result keys: {list(result.keys())}")
|
| 1136 |
+
print(f"π Final response type: {type(final_response)}")
|
| 1137 |
+
|
| 1138 |
+
# If the response contains multiple messages, extract only the final AI response
|
| 1139 |
+
if "AIMessage" in final_response:
|
| 1140 |
+
# Debug: Print the response structure
|
| 1141 |
+
print(f"π Response structure: {final_response[:500]}...")
|
| 1142 |
+
|
| 1143 |
+
# Look for the last AIMessage with content
|
| 1144 |
+
ai_messages = re.findall(
|
| 1145 |
+
r"AIMessage\(content=\'(.*?)\',", final_response, re.DOTALL
|
| 1146 |
+
)
|
| 1147 |
+
if ai_messages:
|
| 1148 |
+
final_response = ai_messages[-1] # Get the last AI message
|
| 1149 |
+
print(f"β
Extracted AI message: {final_response[:100]}...")
|
| 1150 |
+
else:
|
| 1151 |
+
# Try alternative pattern without the comma
|
| 1152 |
+
ai_messages = re.findall(
|
| 1153 |
+
r"AIMessage\(content=\'(.*?)\'", final_response, re.DOTALL
|
| 1154 |
+
)
|
| 1155 |
+
if ai_messages:
|
| 1156 |
+
final_response = ai_messages[-1]
|
| 1157 |
+
print(
|
| 1158 |
+
f"β
Extracted AI message (alt): {final_response[:100]}..."
|
| 1159 |
+
)
|
| 1160 |
+
else:
|
| 1161 |
+
print("β No AI messages found in response")
|
| 1162 |
+
# Try to find any content after the last ToolMessage
|
| 1163 |
+
if "ToolMessage" in final_response:
|
| 1164 |
+
# Split by ToolMessage and take the last part
|
| 1165 |
+
parts = final_response.split("ToolMessage")
|
| 1166 |
+
if len(parts) > 1:
|
| 1167 |
+
last_part = parts[-1]
|
| 1168 |
+
# Look for AIMessage in the last part
|
| 1169 |
+
ai_match = re.search(
|
| 1170 |
+
r"AIMessage\(content=\'(.*?)\'",
|
| 1171 |
+
last_part,
|
| 1172 |
+
re.DOTALL,
|
| 1173 |
+
)
|
| 1174 |
+
if ai_match:
|
| 1175 |
+
final_response = ai_match.group(1)
|
| 1176 |
+
print(
|
| 1177 |
+
f"β
Extracted from last part: {final_response[:100]}..."
|
| 1178 |
+
)
|
| 1179 |
+
|
| 1180 |
+
# Clean up the final response to remove escaped characters
|
| 1181 |
+
final_response = (
|
| 1182 |
+
final_response.replace("\\n", "\n")
|
| 1183 |
+
.replace("\\'", "'")
|
| 1184 |
+
.replace('\\"', '"')
|
| 1185 |
+
)
|
| 1186 |
+
|
| 1187 |
+
# Check if response is incomplete (only shows tool calls)
|
| 1188 |
+
if "[TOOL_CALLS" in final_response and (
|
| 1189 |
+
"Final Answer:" not in final_response
|
| 1190 |
+
and "Based on" not in final_response
|
| 1191 |
+
):
|
| 1192 |
+
st.warning("β οΈ Agent response incomplete - only tool calls detected")
|
| 1193 |
+
st.info("π Trying to get complete response...")
|
| 1194 |
+
# Try a simpler query to get the final analysis
|
| 1195 |
+
try:
|
| 1196 |
+
simple_query = f"Provide a comprehensive analysis of {selected_ticker} based on the latest news and stock data."
|
| 1197 |
+
simple_result = await agent.ainvoke(
|
| 1198 |
+
{"messages": [{"role": "user", "content": simple_query}]}
|
| 1199 |
+
)
|
| 1200 |
+
|
| 1201 |
+
if (
|
| 1202 |
+
isinstance(simple_result, dict)
|
| 1203 |
+
and "output" in simple_result
|
| 1204 |
+
):
|
| 1205 |
+
simple_response = simple_result["output"]
|
| 1206 |
+
else:
|
| 1207 |
+
simple_response = str(simple_result)
|
| 1208 |
+
|
| 1209 |
+
if len(simple_response) > 100:
|
| 1210 |
+
return simple_response
|
| 1211 |
+
else:
|
| 1212 |
+
return f"Tool execution started but final analysis incomplete. Please try again."
|
| 1213 |
+
except Exception as e2:
|
| 1214 |
+
return f"Tool execution started but final analysis failed: {str(e2)}"
|
| 1215 |
+
|
| 1216 |
+
# Look for the final answer section
|
| 1217 |
+
if "Final Answer:" in final_response:
|
| 1218 |
+
# Extract everything after "Final Answer:"
|
| 1219 |
+
final_answer = final_response.split("Final Answer:")[-1].strip()
|
| 1220 |
+
return final_answer
|
| 1221 |
+
elif "Thought:" in final_response and "Action:" in final_response:
|
| 1222 |
+
# If it contains thought process, try to extract the final analysis
|
| 1223 |
+
# Look for the last meaningful paragraph
|
| 1224 |
+
lines = final_response.split("\n")
|
| 1225 |
+
final_lines = []
|
| 1226 |
+
for line in reversed(lines):
|
| 1227 |
+
if (
|
| 1228 |
+
line.strip()
|
| 1229 |
+
and not line.startswith("Thought:")
|
| 1230 |
+
and not line.startswith("Action:")
|
| 1231 |
+
and not line.startswith("Observation:")
|
| 1232 |
+
):
|
| 1233 |
+
final_lines.insert(0, line)
|
| 1234 |
+
elif line.strip() and (
|
| 1235 |
+
"Based on" in line
|
| 1236 |
+
or "Recommendation:" in line
|
| 1237 |
+
or "Conclusion:" in line
|
| 1238 |
+
):
|
| 1239 |
+
final_lines.insert(0, line)
|
| 1240 |
+
if final_lines:
|
| 1241 |
+
return "\n".join(final_lines)
|
| 1242 |
+
else:
|
| 1243 |
+
return final_response
|
| 1244 |
+
else:
|
| 1245 |
+
# Always return a string as a fallback
|
| 1246 |
+
return str(final_response)
|
| 1247 |
+
except Exception as e:
|
| 1248 |
+
st.error(f"β Error during agent execution: {str(e)}")
|
| 1249 |
+
st.error(f"Error type: {type(e).__name__}")
|
| 1250 |
+
import traceback
|
| 1251 |
+
|
| 1252 |
+
st.error(f"Full traceback: {traceback.format_exc()}")
|
| 1253 |
+
# Log to console
|
| 1254 |
+
print(f"β MCP Agent Execution Error: {str(e)}")
|
| 1255 |
+
print(f"Error type: {type(e).__name__}")
|
| 1256 |
+
print(f"Full traceback: {traceback.format_exc()}")
|
| 1257 |
+
|
| 1258 |
+
# Check if it's a TaskGroup error
|
| 1259 |
+
if "TaskGroup" in str(e):
|
| 1260 |
+
st.error(
|
| 1261 |
+
"β TaskGroup error - this might be due to MCP server connection issues"
|
| 1262 |
+
)
|
| 1263 |
+
st.info("π Trying to restart MCP agent...")
|
| 1264 |
+
# Clear the agent and try to reinitialize
|
| 1265 |
+
if "mcp_agent" in st.session_state:
|
| 1266 |
+
del st.session_state.mcp_agent
|
| 1267 |
+
return "Please try again - MCP agent will be reinitialized"
|
| 1268 |
+
else:
|
| 1269 |
+
# Try a different approach - use the agent's available methods
|
| 1270 |
+
st.info("π Trying alternative execution method...")
|
| 1271 |
+
try:
|
| 1272 |
+
# Try using the agent's available methods
|
| 1273 |
+
if hasattr(agent, "arun"):
|
| 1274 |
+
result = await agent.arun(full_query)
|
| 1275 |
+
elif hasattr(agent, "run"):
|
| 1276 |
+
result = await agent.run(
|
| 1277 |
+
full_query
|
| 1278 |
+
) # Always await async methods
|
| 1279 |
+
else:
|
| 1280 |
+
result = "Agent has no available execution methods"
|
| 1281 |
+
return result
|
| 1282 |
+
except Exception as e2:
|
| 1283 |
+
st.error(f"β Alternative execution also failed: {str(e2)}")
|
| 1284 |
+
st.error(f"Error type: {type(e2).__name__}")
|
| 1285 |
+
import traceback
|
| 1286 |
+
|
| 1287 |
+
st.error(f"Full traceback: {traceback.format_exc()}")
|
| 1288 |
+
# Log to console
|
| 1289 |
+
print(f"β MCP Alternative Execution Error: {str(e2)}")
|
| 1290 |
+
print(f"Error type: {type(e2).__name__}")
|
| 1291 |
+
print(f"Full traceback: {traceback.format_exc()}")
|
| 1292 |
+
return f"Error: {str(e2)}"
|
| 1293 |
+
|
| 1294 |
+
except Exception as e:
|
| 1295 |
+
st.error(f"β Error running MCP agent: {e}")
|
| 1296 |
+
return f"Error: {e}"
|
| 1297 |
+
|
| 1298 |
+
|
| 1299 |
@st.cache_data(ttl=1800) # Cache for 30 minutes
|
| 1300 |
def display_top_news(ticker: str):
|
| 1301 |
"""Display top news headlines for the given ticker with clickable links."""
|
|
|
|
| 1442 |
"Get comprehensive financial analysis and insights for your selected stocks."
|
| 1443 |
)
|
| 1444 |
|
| 1445 |
+
# Initialize MCP client and tools silently
|
| 1446 |
+
try:
|
| 1447 |
+
# Initialize MCP client with proper configuration
|
| 1448 |
+
if MultiServerMCPClient is None:
|
| 1449 |
+
st.error(
|
| 1450 |
+
"β MultiServerMCPClient not available. Please install langchain-mcp-adapters"
|
| 1451 |
+
)
|
| 1452 |
+
st.stop()
|
| 1453 |
|
| 1454 |
+
try:
|
| 1455 |
+
# Pass servers configuration as positional argument
|
| 1456 |
+
client = MultiServerMCPClient(
|
| 1457 |
+
{
|
| 1458 |
+
"news_server": {
|
| 1459 |
+
"url": "http://localhost:8001/mcp",
|
| 1460 |
+
"transport": "streamable_http",
|
| 1461 |
+
},
|
| 1462 |
+
"stock_server": {
|
| 1463 |
+
"url": "http://localhost:8002/mcp",
|
| 1464 |
+
"transport": "streamable_http",
|
| 1465 |
+
},
|
| 1466 |
+
}
|
| 1467 |
+
)
|
| 1468 |
+
except Exception as e:
|
| 1469 |
+
# Try with different transport type
|
| 1470 |
+
try:
|
| 1471 |
+
client = MultiServerMCPClient(
|
| 1472 |
+
{
|
| 1473 |
+
"news_server": {
|
| 1474 |
+
"url": "http://localhost:8001/mcp",
|
| 1475 |
+
"transport": "http",
|
| 1476 |
+
},
|
| 1477 |
+
"stock_server": {
|
| 1478 |
+
"url": "http://localhost:8002/mcp",
|
| 1479 |
+
"transport": "http",
|
| 1480 |
+
},
|
| 1481 |
+
}
|
| 1482 |
+
)
|
| 1483 |
+
except Exception as e2:
|
| 1484 |
+
st.error(f"β Failed to initialize MCP client: {str(e2)}")
|
| 1485 |
+
st.stop()
|
| 1486 |
+
|
| 1487 |
+
# Get tools from MCP servers
|
| 1488 |
+
tools = asyncio.run(client.get_tools())
|
| 1489 |
+
|
| 1490 |
+
# Create model with proper configuration
|
| 1491 |
+
model = ChatGroq(model=model_name, temperature=0.1, max_tokens=4096)
|
| 1492 |
+
|
| 1493 |
+
# Store tools and model in session state for later use
|
| 1494 |
+
st.session_state.mcp_tools = tools
|
| 1495 |
+
st.session_state.mcp_model = model
|
| 1496 |
+
st.session_state.mcp_client = client
|
| 1497 |
+
|
| 1498 |
+
except Exception as e:
|
| 1499 |
+
st.error(f"β Failed to initialize MCP client: {str(e)}")
|
| 1500 |
+
st.stop()
|
| 1501 |
|
| 1502 |
# Test server availability only once on startup
|
| 1503 |
if "servers_tested" not in st.session_state:
|
|
|
|
| 1637 |
else:
|
| 1638 |
st.markdown(
|
| 1639 |
f"""
|
| 1640 |
+
<div style="background-color: #f5f5f5; padding: 10px; border-radius: 10px; margin: 5px 0; border: 1px solid #e0e0e0;">
|
| 1641 |
<strong>Agent:</strong>
|
| 1642 |
</div>
|
| 1643 |
""",
|
| 1644 |
unsafe_allow_html=True,
|
| 1645 |
)
|
| 1646 |
+
# Render the content as markdown for proper formatting with controlled text size
|
| 1647 |
+
st.markdown(
|
| 1648 |
+
f"""
|
| 1649 |
+
<div style="font-size: 13px; line-height: 1.4; padding: 12px; border-radius: 8px; margin: 5px 0; border-left: 4px solid #007bff; max-height: 400px; overflow-y: auto;"
|
| 1650 |
+
""",
|
| 1651 |
+
unsafe_allow_html=True,
|
| 1652 |
+
)
|
| 1653 |
+
# Clean up the content to remove raw markdown syntax
|
| 1654 |
+
cleaned_content = (
|
| 1655 |
+
message["content"].replace("\\n", "\n").replace("\\'", "'")
|
| 1656 |
+
)
|
| 1657 |
+
st.markdown(cleaned_content)
|
| 1658 |
+
st.markdown("</div>", unsafe_allow_html=True)
|
| 1659 |
|
| 1660 |
# Chat input with proper loading state
|
| 1661 |
if prompt := st.chat_input(f"Ask about {selected_ticker}...", key="chat_input"):
|
|
|
|
|
|
|
|
|
|
| 1662 |
|
| 1663 |
# Add user message to chat history
|
| 1664 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 1665 |
|
| 1666 |
# Display assistant response with spinner above input
|
| 1667 |
with st.spinner("π€ Analyzing your request..."):
|
| 1668 |
+
response = asyncio.run(run_agent_with_mcp(prompt, selected_ticker))
|
| 1669 |
+
|
| 1670 |
+
# Ensure only a string is appended to chat history
|
| 1671 |
+
if isinstance(response, dict) and "content" in response:
|
| 1672 |
+
clean_response = response["content"]
|
| 1673 |
+
elif (
|
| 1674 |
+
isinstance(response, list)
|
| 1675 |
+
and len(response) > 0
|
| 1676 |
+
and "content" in response[-1]
|
| 1677 |
+
):
|
| 1678 |
+
clean_response = response[-1]["content"]
|
| 1679 |
+
else:
|
| 1680 |
+
clean_response = str(response)
|
| 1681 |
+
|
| 1682 |
st.session_state.messages.append(
|
| 1683 |
+
{"role": "assistant", "content": clean_response}
|
| 1684 |
)
|
| 1685 |
|
| 1686 |
# Rerun to display the new message - the chart and news are cached in session state
|
RAILWAY_DEPLOYMENT.md
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Railway Deployment Guide
|
| 2 |
+
|
| 3 |
+
This guide explains how to deploy the Financial Agent on Railway with all services running on one server.
|
| 4 |
+
|
| 5 |
+
## Architecture
|
| 6 |
+
|
| 7 |
+
The deployment runs three services on a single Railway server:
|
| 8 |
+
|
| 9 |
+
1. **Streamlit App** (Main Port): The main web interface
|
| 10 |
+
2. **MCP Stock Server** (Port + 1): Provides stock data tools
|
| 11 |
+
3. **MCP News Server** (Port + 2): Provides news data tools
|
| 12 |
+
|
| 13 |
+
## Port Configuration
|
| 14 |
+
|
| 15 |
+
- **Main Port**: `$PORT` (set by Railway)
|
| 16 |
+
- **Stock Server**: `$PORT + 1`
|
| 17 |
+
- **News Server**: `$PORT + 2`
|
| 18 |
+
|
| 19 |
+
For example, if Railway assigns port 3000:
|
| 20 |
+
|
| 21 |
+
- Streamlit: `http://localhost:3000`
|
| 22 |
+
- Stock Server: `http://localhost:3001`
|
| 23 |
+
- News Server: `http://localhost:3002`
|
| 24 |
+
|
| 25 |
+
## Files Overview
|
| 26 |
+
|
| 27 |
+
### Core Application Files
|
| 28 |
+
|
| 29 |
+
- `Home.py` - Main Streamlit application (unchanged)
|
| 30 |
+
- `mcp_stock_server.py` - Stock data MCP server
|
| 31 |
+
- `mcp_news_server.py` - News data MCP server
|
| 32 |
+
|
| 33 |
+
### Deployment Files
|
| 34 |
+
|
| 35 |
+
- `start_services.py` - Main startup script that launches all services
|
| 36 |
+
- `update_mcp_urls.py` - Updates MCP server URLs in Home.py
|
| 37 |
+
- `railway.toml` - Railway configuration
|
| 38 |
+
- `Dockerfile` - Container configuration
|
| 39 |
+
|
| 40 |
+
## Deployment Process
|
| 41 |
+
|
| 42 |
+
1. **Railway Build**: Railway builds the Docker container
|
| 43 |
+
2. **Startup**: `start_services.py` is executed
|
| 44 |
+
3. **URL Update**: MCP server URLs are updated for the correct ports
|
| 45 |
+
4. **Service Launch**: All three services start in parallel
|
| 46 |
+
5. **Health Check**: Railway monitors the main Streamlit port
|
| 47 |
+
|
| 48 |
+
## Environment Variables
|
| 49 |
+
|
| 50 |
+
Set these in Railway dashboard:
|
| 51 |
+
|
| 52 |
+
```bash
|
| 53 |
+
GROQ_API_KEY=your_groq_api_key_here
|
| 54 |
+
MODEL=mistralai/mistral-small-3.2-24b-instruct:free
|
| 55 |
+
```
|
| 56 |
+
|
| 57 |
+
## Local Development
|
| 58 |
+
|
| 59 |
+
To test locally:
|
| 60 |
+
|
| 61 |
+
```bash
|
| 62 |
+
# Set environment variables
|
| 63 |
+
export PORT=8501
|
| 64 |
+
export GROQ_API_KEY=your_key_here
|
| 65 |
+
export MODEL=mistralai/mistral-small-3.2-24b-instruct:free
|
| 66 |
+
|
| 67 |
+
# Run the startup script
|
| 68 |
+
python start_services.py
|
| 69 |
+
```
|
| 70 |
+
|
| 71 |
+
## Troubleshooting
|
| 72 |
+
|
| 73 |
+
### Services Not Starting
|
| 74 |
+
|
| 75 |
+
1. Check Railway logs for error messages
|
| 76 |
+
2. Verify all required files are present
|
| 77 |
+
3. Ensure environment variables are set correctly
|
| 78 |
+
|
| 79 |
+
### MCP Connection Issues
|
| 80 |
+
|
| 81 |
+
1. Check if MCP servers are running on correct ports
|
| 82 |
+
2. Verify URLs in Home.py are updated correctly
|
| 83 |
+
3. Check network connectivity between services
|
| 84 |
+
|
| 85 |
+
### Memory Issues
|
| 86 |
+
|
| 87 |
+
- Railway provides limited memory
|
| 88 |
+
- Consider reducing model complexity if needed
|
| 89 |
+
- Monitor memory usage in Railway dashboard
|
| 90 |
+
|
| 91 |
+
## Monitoring
|
| 92 |
+
|
| 93 |
+
- **Main App**: Access via Railway URL
|
| 94 |
+
- **Logs**: Check Railway dashboard for service logs
|
| 95 |
+
- **Health**: Railway monitors `/_stcore/health` endpoint
|
| 96 |
+
|
| 97 |
+
## Security Notes
|
| 98 |
+
|
| 99 |
+
- All services run on localhost (internal communication)
|
| 100 |
+
- Only the main Streamlit port is exposed externally
|
| 101 |
+
- MCP servers are not directly accessible from outside
|
| 102 |
+
|
| 103 |
+
## Customization
|
| 104 |
+
|
| 105 |
+
To modify the deployment:
|
| 106 |
+
|
| 107 |
+
1. **Add new services**: Update `start_services.py`
|
| 108 |
+
2. **Change ports**: Modify port calculation logic
|
| 109 |
+
3. **Update configuration**: Edit `railway.toml` or `Dockerfile`
|
| 110 |
+
|
| 111 |
+
## Support
|
| 112 |
+
|
| 113 |
+
For deployment issues:
|
| 114 |
+
|
| 115 |
+
1. Check Railway logs first
|
| 116 |
+
2. Verify environment variables
|
| 117 |
+
3. Test locally before deploying
|
| 118 |
+
4. Review service startup sequence
|
README.md
CHANGED
|
@@ -99,15 +99,13 @@ A comprehensive financial analysis tool that provides stock data, news analysis,
|
|
| 99 |
- News data via `gnews`
|
| 100 |
- **AI Model**: mistral-small-3.2-24b-instruct via OpenRouter
|
| 101 |
- **MCP Servers**: Modular servers for stock data and news
|
| 102 |
-
- **Monitoring**: Real-time system resource monitoring
|
| 103 |
|
| 104 |
## Files
|
| 105 |
|
| 106 |
- `Home.py`: Main Streamlit web application with ML predictions
|
| 107 |
-
- `
|
| 108 |
-
- `
|
| 109 |
-
- `
|
| 110 |
-
- `pages/System_Monitor.py`: System monitoring dashboard
|
| 111 |
- `requirements.txt`: Python dependencies
|
| 112 |
- `pyproject.toml`: Project configuration
|
| 113 |
|
|
@@ -120,7 +118,7 @@ A comprehensive financial analysis tool that provides stock data, news analysis,
|
|
| 120 |
- **scikit-learn**: Machine learning (Ridge Regression, StandardScaler, GridSearchCV)
|
| 121 |
- **pandas**: Data manipulation
|
| 122 |
- **numpy**: Numerical computations
|
| 123 |
-
|
| 124 |
- **openai**: AI model integration
|
| 125 |
- **fastmcp**: MCP server framework
|
| 126 |
|
|
@@ -165,7 +163,6 @@ A comprehensive financial analysis tool that provides stock data, news analysis,
|
|
| 165 |
- Python 3.10 or higher
|
| 166 |
- OpenRouter API key
|
| 167 |
- Internet connection for real-time data
|
| 168 |
-
- Optional: psutil for system monitoring features
|
| 169 |
|
| 170 |
## Disclaimer
|
| 171 |
|
|
|
|
| 99 |
- News data via `gnews`
|
| 100 |
- **AI Model**: mistral-small-3.2-24b-instruct via OpenRouter
|
| 101 |
- **MCP Servers**: Modular servers for stock data and news
|
|
|
|
| 102 |
|
| 103 |
## Files
|
| 104 |
|
| 105 |
- `Home.py`: Main Streamlit web application with ML predictions
|
| 106 |
+
- `Dockerfile`: Docker configuration for Railway deployment
|
| 107 |
+
- `docker-compose.yml`: Local development setup
|
| 108 |
+
- `railway.toml`: Railway deployment configuration
|
|
|
|
| 109 |
- `requirements.txt`: Python dependencies
|
| 110 |
- `pyproject.toml`: Project configuration
|
| 111 |
|
|
|
|
| 118 |
- **scikit-learn**: Machine learning (Ridge Regression, StandardScaler, GridSearchCV)
|
| 119 |
- **pandas**: Data manipulation
|
| 120 |
- **numpy**: Numerical computations
|
| 121 |
+
|
| 122 |
- **openai**: AI model integration
|
| 123 |
- **fastmcp**: MCP server framework
|
| 124 |
|
|
|
|
| 163 |
- Python 3.10 or higher
|
| 164 |
- OpenRouter API key
|
| 165 |
- Internet connection for real-time data
|
|
|
|
| 166 |
|
| 167 |
## Disclaimer
|
| 168 |
|
mcp_config.json
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"servers": {
|
| 3 |
+
"news_server": {
|
| 4 |
+
"url": "http://localhost:8001/mcp",
|
| 5 |
+
"transport": "streamable_http"
|
| 6 |
+
},
|
| 7 |
+
"stock_server": {
|
| 8 |
+
"url": "http://localhost:8002/mcp",
|
| 9 |
+
"transport": "streamable_http"
|
| 10 |
+
}
|
| 11 |
+
},
|
| 12 |
+
"railway": {
|
| 13 |
+
"news_server": {
|
| 14 |
+
"url": "http://localhost:$PORT+1/mcp",
|
| 15 |
+
"transport": "streamable_http"
|
| 16 |
+
},
|
| 17 |
+
"stock_server": {
|
| 18 |
+
"url": "http://localhost:$PORT+2/mcp",
|
| 19 |
+
"transport": "streamable_http"
|
| 20 |
+
}
|
| 21 |
+
}
|
| 22 |
+
}
|
news_server.py β mcp_news_server.py
RENAMED
|
@@ -4,9 +4,16 @@ from bs4 import BeautifulSoup
|
|
| 4 |
import re
|
| 5 |
|
| 6 |
# Initialize the MCP Server
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
mcp = FastMCP(
|
| 8 |
name="Financial News Server",
|
| 9 |
-
description="Provides tools and resources for fetching and analyzing financial news.",
|
|
|
|
|
|
|
|
|
|
| 10 |
)
|
| 11 |
|
| 12 |
|
|
@@ -21,7 +28,7 @@ def preprocess_text(text):
|
|
| 21 |
|
| 22 |
|
| 23 |
@mcp.tool()
|
| 24 |
-
def get_latest_news(ticker: str) -> str:
|
| 25 |
"""
|
| 26 |
Fetches recent news headlines and descriptions for a specific stock ticker.
|
| 27 |
Use this tool when a user asks for news, updates, recent events about a company or about the company's stock.
|
|
@@ -78,6 +85,9 @@ def summarize_news_sentiment(news_headlines: str) -> str:
|
|
| 78 |
|
| 79 |
|
| 80 |
if __name__ == "__main__":
|
| 81 |
-
# Use
|
| 82 |
-
print("News Server running
|
| 83 |
-
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
import re
|
| 5 |
|
| 6 |
# Initialize the MCP Server
|
| 7 |
+
import os
|
| 8 |
+
|
| 9 |
+
port = int(os.environ.get("PORT", "8002"))
|
| 10 |
+
|
| 11 |
mcp = FastMCP(
|
| 12 |
name="Financial News Server",
|
| 13 |
+
description="Provides tools and resources for fetching and analyzing financial news with real-time updates via SSE.",
|
| 14 |
+
host="0.0.0.0",
|
| 15 |
+
port=port,
|
| 16 |
+
stateless_http=True,
|
| 17 |
)
|
| 18 |
|
| 19 |
|
|
|
|
| 28 |
|
| 29 |
|
| 30 |
@mcp.tool()
|
| 31 |
+
async def get_latest_news(ticker: str) -> str:
|
| 32 |
"""
|
| 33 |
Fetches recent news headlines and descriptions for a specific stock ticker.
|
| 34 |
Use this tool when a user asks for news, updates, recent events about a company or about the company's stock.
|
|
|
|
| 85 |
|
| 86 |
|
| 87 |
if __name__ == "__main__":
|
| 88 |
+
# Use streamable HTTP transport with MCP endpoint
|
| 89 |
+
print(f"News Server running on http://localhost:{port}")
|
| 90 |
+
print("MCP endpoint available at:")
|
| 91 |
+
print(f"- http://localhost:{port}/mcp")
|
| 92 |
+
|
| 93 |
+
mcp.run(transport="streamable-http")
|
stock_data_server.py β mcp_stock_server.py
RENAMED
|
@@ -1,14 +1,22 @@
|
|
| 1 |
from mcp.server.fastmcp import FastMCP
|
| 2 |
import yfinance as yf
|
| 3 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
mcp = FastMCP(
|
| 5 |
name="Stock Market Data Server",
|
| 6 |
-
description="Provides tools for fetching historical stock prices and market data.",
|
|
|
|
|
|
|
|
|
|
| 7 |
)
|
| 8 |
|
| 9 |
|
| 10 |
@mcp.tool()
|
| 11 |
-
def get_historical_stock_data(ticker: str) -> str:
|
| 12 |
"""
|
| 13 |
Fetches recent historical stock data (Open, High, Low, Close, Volume) for a given stock ticker.
|
| 14 |
The input must be a single, valid stock ticker symbol (e.g., 'TSLA').
|
|
@@ -22,8 +30,6 @@ def get_historical_stock_data(ticker: str) -> str:
|
|
| 22 |
if hist_data.empty:
|
| 23 |
return f"No stock data found for ticker {ticker}."
|
| 24 |
|
| 25 |
-
# Return the last 5 days of data as a clean string
|
| 26 |
-
# return f"Recent stock data for {ticker}:\n{hist_data.tail().to_string()}"
|
| 27 |
return f"Recent stock data for {ticker}:\n{hist_data.to_string()}"
|
| 28 |
except Exception as e:
|
| 29 |
return f"An error occurred while fetching stock data: {e}"
|
|
@@ -41,8 +47,10 @@ def get_ohlc_definitions() -> str:
|
|
| 41 |
"""
|
| 42 |
|
| 43 |
|
| 44 |
-
# Add this to the end of stock_data_server.py
|
| 45 |
if __name__ == "__main__":
|
| 46 |
-
# Use
|
| 47 |
-
print("Stock Data Server running
|
| 48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
from mcp.server.fastmcp import FastMCP
|
| 2 |
import yfinance as yf
|
| 3 |
|
| 4 |
+
# Initialize the MCP Server
|
| 5 |
+
import os
|
| 6 |
+
|
| 7 |
+
port = int(os.environ.get("PORT", "8001"))
|
| 8 |
+
|
| 9 |
mcp = FastMCP(
|
| 10 |
name="Stock Market Data Server",
|
| 11 |
+
description="Provides tools for fetching historical stock prices and market data with real-time updates via SSE.",
|
| 12 |
+
host="0.0.0.0",
|
| 13 |
+
port=port,
|
| 14 |
+
stateless_http=True,
|
| 15 |
)
|
| 16 |
|
| 17 |
|
| 18 |
@mcp.tool()
|
| 19 |
+
async def get_historical_stock_data(ticker: str) -> str:
|
| 20 |
"""
|
| 21 |
Fetches recent historical stock data (Open, High, Low, Close, Volume) for a given stock ticker.
|
| 22 |
The input must be a single, valid stock ticker symbol (e.g., 'TSLA').
|
|
|
|
| 30 |
if hist_data.empty:
|
| 31 |
return f"No stock data found for ticker {ticker}."
|
| 32 |
|
|
|
|
|
|
|
| 33 |
return f"Recent stock data for {ticker}:\n{hist_data.to_string()}"
|
| 34 |
except Exception as e:
|
| 35 |
return f"An error occurred while fetching stock data: {e}"
|
|
|
|
| 47 |
"""
|
| 48 |
|
| 49 |
|
|
|
|
| 50 |
if __name__ == "__main__":
|
| 51 |
+
# Use streamable HTTP transport with MCP endpoint
|
| 52 |
+
print(f"Stock Data Server running on http://localhost:{port}")
|
| 53 |
+
print("MCP endpoint available at:")
|
| 54 |
+
print(f"- http://localhost:{port}/mcp")
|
| 55 |
+
|
| 56 |
+
mcp.run(transport="streamable-http")
|
pages/System_Monitor.py
DELETED
|
@@ -1,162 +0,0 @@
|
|
| 1 |
-
import streamlit as st
|
| 2 |
-
|
| 3 |
-
try:
|
| 4 |
-
from resource_monitor import (
|
| 5 |
-
start_resource_monitoring,
|
| 6 |
-
get_resource_stats,
|
| 7 |
-
get_resource_summary,
|
| 8 |
-
export_resource_data,
|
| 9 |
-
)
|
| 10 |
-
|
| 11 |
-
RESOURCE_MONITORING_AVAILABLE = True
|
| 12 |
-
except ImportError:
|
| 13 |
-
RESOURCE_MONITORING_AVAILABLE = False
|
| 14 |
-
st.warning("Resource monitoring not available. Install psutil: pip install psutil")
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
def main():
|
| 18 |
-
st.set_page_config(page_title="System Monitor", page_icon="π", layout="wide")
|
| 19 |
-
|
| 20 |
-
st.title("π System Resource Monitor")
|
| 21 |
-
st.markdown("Real-time monitoring of system resources and application metrics.")
|
| 22 |
-
|
| 23 |
-
# Initialize resource monitoring
|
| 24 |
-
if RESOURCE_MONITORING_AVAILABLE:
|
| 25 |
-
if "resource_monitoring_started" not in st.session_state:
|
| 26 |
-
start_resource_monitoring()
|
| 27 |
-
st.session_state.resource_monitoring_started = True
|
| 28 |
-
|
| 29 |
-
# Current stats with loading state
|
| 30 |
-
with st.spinner("π Loading resource statistics..."):
|
| 31 |
-
current_stats = get_resource_stats()
|
| 32 |
-
|
| 33 |
-
if "error" not in current_stats:
|
| 34 |
-
# System Metrics
|
| 35 |
-
st.subheader("π₯οΈ System Metrics")
|
| 36 |
-
col1, col2, col3, col4 = st.columns(4)
|
| 37 |
-
|
| 38 |
-
with col1:
|
| 39 |
-
st.metric("CPU Usage", f"{current_stats['cpu_percent']:.1f}%")
|
| 40 |
-
st.metric("Memory Usage", f"{current_stats['memory_percent']:.1f}%")
|
| 41 |
-
|
| 42 |
-
with col2:
|
| 43 |
-
st.metric("Memory (GB)", f"{current_stats['memory_gb']:.2f} GB")
|
| 44 |
-
st.metric("Disk Usage", f"{current_stats['disk_usage_percent']:.1f}%")
|
| 45 |
-
|
| 46 |
-
with col3:
|
| 47 |
-
st.metric("Network Sent", f"{current_stats['network_sent_mb']:.1f} MB")
|
| 48 |
-
st.metric("Network Recv", f"{current_stats['network_recv_mb']:.1f} MB")
|
| 49 |
-
|
| 50 |
-
with col4:
|
| 51 |
-
st.metric("Process Count", current_stats["process_count"])
|
| 52 |
-
st.metric("Uptime", f"{current_stats['uptime_seconds']/60:.1f} min")
|
| 53 |
-
|
| 54 |
-
# Application-specific metrics
|
| 55 |
-
st.subheader("π Application Metrics")
|
| 56 |
-
col1, col2, col3, col4 = st.columns(4)
|
| 57 |
-
|
| 58 |
-
with col1:
|
| 59 |
-
st.metric("YFinance Calls", current_stats["yfinance_calls"])
|
| 60 |
-
|
| 61 |
-
with col2:
|
| 62 |
-
st.metric(
|
| 63 |
-
"Ridge Training Time",
|
| 64 |
-
f"{current_stats['ridge_training_time']:.2f}s",
|
| 65 |
-
)
|
| 66 |
-
|
| 67 |
-
with col3:
|
| 68 |
-
st.metric("ML Predictions", current_stats.get("ml_predictions", 0))
|
| 69 |
-
|
| 70 |
-
with col4:
|
| 71 |
-
st.metric("Streamlit Requests", current_stats["streamlit_requests"])
|
| 72 |
-
|
| 73 |
-
# ML Model Information
|
| 74 |
-
st.subheader("π€ Machine Learning Metrics")
|
| 75 |
-
col1, col2, col3 = st.columns(3)
|
| 76 |
-
|
| 77 |
-
with col1:
|
| 78 |
-
st.metric(
|
| 79 |
-
"Model Type", current_stats.get("ml_model", "Ridge Regression")
|
| 80 |
-
)
|
| 81 |
-
st.metric("Features Used", current_stats.get("feature_count", 35))
|
| 82 |
-
|
| 83 |
-
with col2:
|
| 84 |
-
st.metric(
|
| 85 |
-
"Technical Indicators", current_stats.get("feature_count", 35)
|
| 86 |
-
)
|
| 87 |
-
st.metric("Training Data", "5 years")
|
| 88 |
-
|
| 89 |
-
with col3:
|
| 90 |
-
st.metric("Prediction Period", "30 days")
|
| 91 |
-
st.metric("Model Accuracy", "80-95% RΒ²")
|
| 92 |
-
|
| 93 |
-
# Summary statistics
|
| 94 |
-
summary_stats = get_resource_summary()
|
| 95 |
-
if summary_stats:
|
| 96 |
-
st.subheader("π Summary Statistics")
|
| 97 |
-
col1, col2 = st.columns(2)
|
| 98 |
-
|
| 99 |
-
with col1:
|
| 100 |
-
st.write(
|
| 101 |
-
f"**Average CPU Usage:** {summary_stats.get('avg_cpu_percent', 0):.1f}%"
|
| 102 |
-
)
|
| 103 |
-
st.write(
|
| 104 |
-
f"**Max CPU Usage:** {summary_stats.get('max_cpu_percent', 0):.1f}%"
|
| 105 |
-
)
|
| 106 |
-
st.write(
|
| 107 |
-
f"**Average Memory Usage:** {summary_stats.get('avg_memory_percent', 0):.1f}%"
|
| 108 |
-
)
|
| 109 |
-
st.write(
|
| 110 |
-
f"**Max Memory Usage:** {summary_stats.get('max_memory_percent', 0):.1f}%"
|
| 111 |
-
)
|
| 112 |
-
|
| 113 |
-
with col2:
|
| 114 |
-
st.write(
|
| 115 |
-
f"**Total Network Sent:** {summary_stats.get('total_network_sent_mb', 0):.1f} MB"
|
| 116 |
-
)
|
| 117 |
-
st.write(
|
| 118 |
-
f"**Total Network Recv:** {summary_stats.get('total_network_recv_mb', 0):.1f} MB"
|
| 119 |
-
)
|
| 120 |
-
st.write(
|
| 121 |
-
f"**Total Uptime:** {summary_stats.get('total_uptime_minutes', 0):.1f} minutes"
|
| 122 |
-
)
|
| 123 |
-
st.write(
|
| 124 |
-
f"**Total YFinance Calls:** {summary_stats.get('yfinance_calls', 0)}"
|
| 125 |
-
)
|
| 126 |
-
|
| 127 |
-
# ML Summary
|
| 128 |
-
if summary_stats:
|
| 129 |
-
st.subheader("π€ ML Model Summary")
|
| 130 |
-
col1, col2 = st.columns(2)
|
| 131 |
-
|
| 132 |
-
with col1:
|
| 133 |
-
st.write(
|
| 134 |
-
f"**Total Ridge Training Time:** {summary_stats.get('ridge_training_time', 0):.2f}s"
|
| 135 |
-
)
|
| 136 |
-
st.write(
|
| 137 |
-
f"**Total ML Predictions:** {summary_stats.get('ml_predictions', 0)}"
|
| 138 |
-
)
|
| 139 |
-
st.write(
|
| 140 |
-
f"**Model Type:** {summary_stats.get('ml_model', 'Ridge Regression')}"
|
| 141 |
-
)
|
| 142 |
-
st.write(
|
| 143 |
-
f"**Technical Indicators:** {summary_stats.get('technical_indicators', 35)}"
|
| 144 |
-
)
|
| 145 |
-
|
| 146 |
-
with col2:
|
| 147 |
-
st.write("**Features Include:**")
|
| 148 |
-
st.write("β’ Moving Averages (SMA 10, 20, 50, 200)")
|
| 149 |
-
st.write("β’ Momentum Indicators (RSI, MACD, Stochastic)")
|
| 150 |
-
st.write("β’ Volatility Indicators (Bollinger Bands)")
|
| 151 |
-
st.write("β’ Volume Analysis & Market Sentiment")
|
| 152 |
-
|
| 153 |
-
else:
|
| 154 |
-
st.error(f"Error getting resource stats: {current_stats['error']}")
|
| 155 |
-
else:
|
| 156 |
-
st.warning(
|
| 157 |
-
"Resource monitoring is not available. Please install psutil: pip install psutil"
|
| 158 |
-
)
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
if __name__ == "__main__":
|
| 162 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pyproject.toml
CHANGED
|
@@ -3,17 +3,21 @@ name = "financial-agent"
|
|
| 3 |
version = "0.1.0"
|
| 4 |
description = "Add your description here"
|
| 5 |
readme = "README.md"
|
| 6 |
-
requires-python = ">=3.
|
| 7 |
dependencies = [
|
|
|
|
| 8 |
"beautifulsoup4>=4.13.4",
|
| 9 |
"fastmcp>=2.10.6",
|
| 10 |
"gnews>=0.4.1",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
"mcp[cli]>=1.12.2",
|
| 12 |
"openai>=1.97.1",
|
| 13 |
"pandas>=2.3.1",
|
| 14 |
"plotly>=5.17.0",
|
| 15 |
-
"prophet>=1.1.7",
|
| 16 |
-
"psutil>=7.0.0",
|
| 17 |
"python-dotenv>=1.1.1",
|
| 18 |
"sentence-transformers>=5.0.0",
|
| 19 |
"streamlit>=1.28.0",
|
|
|
|
| 3 |
version = "0.1.0"
|
| 4 |
description = "Add your description here"
|
| 5 |
readme = "README.md"
|
| 6 |
+
requires-python = ">=3.12"
|
| 7 |
dependencies = [
|
| 8 |
+
"aiohttp>=3.12.15",
|
| 9 |
"beautifulsoup4>=4.13.4",
|
| 10 |
"fastmcp>=2.10.6",
|
| 11 |
"gnews>=0.4.1",
|
| 12 |
+
"holidays>=0.77",
|
| 13 |
+
"httpx>=0.28.1",
|
| 14 |
+
"langchain-groq>=0.3.6",
|
| 15 |
+
"langchain-mcp-adapters>=0.1.9",
|
| 16 |
+
"langgraph>=0.6.3",
|
| 17 |
"mcp[cli]>=1.12.2",
|
| 18 |
"openai>=1.97.1",
|
| 19 |
"pandas>=2.3.1",
|
| 20 |
"plotly>=5.17.0",
|
|
|
|
|
|
|
| 21 |
"python-dotenv>=1.1.1",
|
| 22 |
"sentence-transformers>=5.0.0",
|
| 23 |
"streamlit>=1.28.0",
|
railway.toml
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[build]
|
| 2 |
+
builder = "DOCKERFILE"
|
| 3 |
+
dockerfilePath = "Dockerfile"
|
| 4 |
+
|
| 5 |
+
[deploy]
|
| 6 |
+
startCommand = "python start_services.py"
|
| 7 |
+
healthcheckPath = "/_stcore/health"
|
| 8 |
+
healthcheckTimeout = 300
|
| 9 |
+
restartPolicyType = "ON_FAILURE"
|
| 10 |
+
restartPolicyMaxRetries = 10
|
| 11 |
+
|
| 12 |
+
[env]
|
| 13 |
+
GROQ_API_KEY = ""
|
| 14 |
+
MODEL = ""
|
| 15 |
+
NEWS_SERVER_URL = "http://localhost:8001/mcp"
|
| 16 |
+
STOCK_SERVER_URL = "http://localhost:8002/mcp"
|
requirements.txt
CHANGED
|
Binary files a/requirements.txt and b/requirements.txt differ
|
|
|
resource_monitor.py
DELETED
|
@@ -1,386 +0,0 @@
|
|
| 1 |
-
import psutil
|
| 2 |
-
import time
|
| 3 |
-
import threading
|
| 4 |
-
import plotly.graph_objects as go
|
| 5 |
-
from datetime import datetime
|
| 6 |
-
import json
|
| 7 |
-
import os
|
| 8 |
-
import glob
|
| 9 |
-
from typing import Dict
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
class ResourceMonitor:
|
| 13 |
-
"""Monitor system resources for the QueryStockAI."""
|
| 14 |
-
|
| 15 |
-
def __init__(self):
|
| 16 |
-
self.monitoring = False
|
| 17 |
-
self.monitor_thread = None
|
| 18 |
-
self.resource_data = {
|
| 19 |
-
"timestamps": [],
|
| 20 |
-
"cpu_percent": [],
|
| 21 |
-
"memory_percent": [],
|
| 22 |
-
"memory_mb": [],
|
| 23 |
-
"disk_usage_percent": [],
|
| 24 |
-
"network_sent_mb": [],
|
| 25 |
-
"network_recv_mb": [],
|
| 26 |
-
"process_count": [],
|
| 27 |
-
"yfinance_calls": 0,
|
| 28 |
-
"ridge_training_time": 0, # Updated from prophet_training_time
|
| 29 |
-
"streamlit_requests": 0,
|
| 30 |
-
"ml_predictions": 0, # New counter for ML predictions
|
| 31 |
-
"feature_count": 35, # Number of technical indicators used
|
| 32 |
-
}
|
| 33 |
-
self.start_time = None
|
| 34 |
-
self.process = psutil.Process()
|
| 35 |
-
|
| 36 |
-
def start_monitoring(self):
|
| 37 |
-
"""Start resource monitoring in a separate thread."""
|
| 38 |
-
if not self.monitoring:
|
| 39 |
-
self.monitoring = True
|
| 40 |
-
self.start_time = datetime.now()
|
| 41 |
-
|
| 42 |
-
# Clean up old files when starting monitoring
|
| 43 |
-
self.cleanup_old_files()
|
| 44 |
-
|
| 45 |
-
self.monitor_thread = threading.Thread(
|
| 46 |
-
target=self._monitor_loop, daemon=True
|
| 47 |
-
)
|
| 48 |
-
self.monitor_thread.start()
|
| 49 |
-
return True
|
| 50 |
-
return False
|
| 51 |
-
|
| 52 |
-
def stop_monitoring(self):
|
| 53 |
-
"""Stop resource monitoring."""
|
| 54 |
-
self.monitoring = False
|
| 55 |
-
if self.monitor_thread:
|
| 56 |
-
self.monitor_thread.join(timeout=1)
|
| 57 |
-
|
| 58 |
-
def _monitor_loop(self):
|
| 59 |
-
"""Main monitoring loop."""
|
| 60 |
-
while self.monitoring:
|
| 61 |
-
try:
|
| 62 |
-
# Get current timestamp
|
| 63 |
-
timestamp = datetime.now()
|
| 64 |
-
|
| 65 |
-
# CPU usage
|
| 66 |
-
cpu_percent = psutil.cpu_percent(interval=1)
|
| 67 |
-
|
| 68 |
-
# Memory usage
|
| 69 |
-
memory = psutil.virtual_memory()
|
| 70 |
-
memory_percent = memory.percent
|
| 71 |
-
memory_mb = memory.used / (1024 * 1024) # Convert to MB
|
| 72 |
-
|
| 73 |
-
# Disk usage
|
| 74 |
-
disk = psutil.disk_usage("/")
|
| 75 |
-
disk_usage_percent = disk.percent
|
| 76 |
-
|
| 77 |
-
# Network usage
|
| 78 |
-
network = psutil.net_io_counters()
|
| 79 |
-
network_sent_mb = network.bytes_sent / (1024 * 1024)
|
| 80 |
-
network_recv_mb = network.bytes_recv / (1024 * 1024)
|
| 81 |
-
|
| 82 |
-
# Process count
|
| 83 |
-
process_count = len(psutil.pids())
|
| 84 |
-
|
| 85 |
-
# Store data
|
| 86 |
-
self.resource_data["timestamps"].append(timestamp)
|
| 87 |
-
self.resource_data["cpu_percent"].append(cpu_percent)
|
| 88 |
-
self.resource_data["memory_percent"].append(memory_percent)
|
| 89 |
-
self.resource_data["memory_mb"].append(memory_mb)
|
| 90 |
-
self.resource_data["disk_usage_percent"].append(disk_usage_percent)
|
| 91 |
-
self.resource_data["network_sent_mb"].append(network_sent_mb)
|
| 92 |
-
self.resource_data["network_recv_mb"].append(network_recv_mb)
|
| 93 |
-
self.resource_data["process_count"].append(process_count)
|
| 94 |
-
|
| 95 |
-
# Keep only last 1000 data points to prevent memory issues
|
| 96 |
-
max_points = 1000
|
| 97 |
-
if len(self.resource_data["timestamps"]) > max_points:
|
| 98 |
-
for key in self.resource_data:
|
| 99 |
-
if isinstance(self.resource_data[key], list):
|
| 100 |
-
self.resource_data[key] = self.resource_data[key][
|
| 101 |
-
-max_points:
|
| 102 |
-
]
|
| 103 |
-
|
| 104 |
-
time.sleep(2) # Monitor every 2 seconds
|
| 105 |
-
|
| 106 |
-
except Exception as e:
|
| 107 |
-
print(f"Error in monitoring loop: {e}")
|
| 108 |
-
time.sleep(5)
|
| 109 |
-
|
| 110 |
-
def get_current_stats(self) -> Dict:
|
| 111 |
-
"""Get current resource statistics."""
|
| 112 |
-
try:
|
| 113 |
-
memory = psutil.virtual_memory()
|
| 114 |
-
disk = psutil.disk_usage("/")
|
| 115 |
-
network = psutil.net_io_counters()
|
| 116 |
-
|
| 117 |
-
return {
|
| 118 |
-
"cpu_percent": psutil.cpu_percent(),
|
| 119 |
-
"memory_percent": memory.percent,
|
| 120 |
-
"memory_mb": memory.used / (1024 * 1024),
|
| 121 |
-
"memory_gb": memory.used / (1024 * 1024 * 1024),
|
| 122 |
-
"disk_usage_percent": disk.percent,
|
| 123 |
-
"disk_free_gb": disk.free / (1024 * 1024 * 1024),
|
| 124 |
-
"network_sent_mb": network.bytes_sent / (1024 * 1024),
|
| 125 |
-
"network_recv_mb": network.bytes_recv / (1024 * 1024),
|
| 126 |
-
"process_count": len(psutil.pids()),
|
| 127 |
-
"uptime_seconds": (
|
| 128 |
-
(datetime.now() - self.start_time).total_seconds()
|
| 129 |
-
if self.start_time
|
| 130 |
-
else 0
|
| 131 |
-
),
|
| 132 |
-
"yfinance_calls": self.resource_data["yfinance_calls"],
|
| 133 |
-
"ridge_training_time": self.resource_data[
|
| 134 |
-
"ridge_training_time"
|
| 135 |
-
], # Updated
|
| 136 |
-
"streamlit_requests": self.resource_data["streamlit_requests"],
|
| 137 |
-
"ml_predictions": self.resource_data["ml_predictions"], # New
|
| 138 |
-
"feature_count": self.resource_data["feature_count"], # New
|
| 139 |
-
}
|
| 140 |
-
except Exception as e:
|
| 141 |
-
return {"error": str(e)}
|
| 142 |
-
|
| 143 |
-
def increment_yfinance_calls(self):
|
| 144 |
-
"""Increment yfinance API call counter."""
|
| 145 |
-
self.resource_data["yfinance_calls"] += 1
|
| 146 |
-
|
| 147 |
-
def add_ridge_training_time(self, seconds: float):
|
| 148 |
-
"""Add Ridge Regression training time."""
|
| 149 |
-
self.resource_data["ridge_training_time"] += seconds
|
| 150 |
-
|
| 151 |
-
def increment_streamlit_requests(self):
|
| 152 |
-
"""Increment Streamlit request counter."""
|
| 153 |
-
self.resource_data["streamlit_requests"] += 1
|
| 154 |
-
|
| 155 |
-
def increment_ml_predictions(self):
|
| 156 |
-
"""Increment ML prediction counter."""
|
| 157 |
-
self.resource_data["ml_predictions"] += 1
|
| 158 |
-
|
| 159 |
-
def create_resource_dashboard(self) -> go.Figure:
|
| 160 |
-
"""Create a comprehensive resource dashboard."""
|
| 161 |
-
if not self.resource_data["timestamps"]:
|
| 162 |
-
return None
|
| 163 |
-
|
| 164 |
-
# Create subplots
|
| 165 |
-
fig = go.Figure()
|
| 166 |
-
|
| 167 |
-
# CPU Usage
|
| 168 |
-
fig.add_trace(
|
| 169 |
-
go.Scatter(
|
| 170 |
-
x=self.resource_data["timestamps"],
|
| 171 |
-
y=self.resource_data["cpu_percent"],
|
| 172 |
-
mode="lines",
|
| 173 |
-
name="CPU %",
|
| 174 |
-
line=dict(color="red", width=2),
|
| 175 |
-
)
|
| 176 |
-
)
|
| 177 |
-
|
| 178 |
-
# Memory Usage
|
| 179 |
-
fig.add_trace(
|
| 180 |
-
go.Scatter(
|
| 181 |
-
x=self.resource_data["timestamps"],
|
| 182 |
-
y=self.resource_data["memory_percent"],
|
| 183 |
-
mode="lines",
|
| 184 |
-
name="Memory %",
|
| 185 |
-
line=dict(color="blue", width=2),
|
| 186 |
-
yaxis="y2",
|
| 187 |
-
)
|
| 188 |
-
)
|
| 189 |
-
|
| 190 |
-
# Memory Usage in MB
|
| 191 |
-
fig.add_trace(
|
| 192 |
-
go.Scatter(
|
| 193 |
-
x=self.resource_data["timestamps"],
|
| 194 |
-
y=self.resource_data["memory_mb"],
|
| 195 |
-
mode="lines",
|
| 196 |
-
name="Memory (MB)",
|
| 197 |
-
line=dict(color="lightblue", width=2),
|
| 198 |
-
yaxis="y3",
|
| 199 |
-
)
|
| 200 |
-
)
|
| 201 |
-
|
| 202 |
-
# Network Usage
|
| 203 |
-
fig.add_trace(
|
| 204 |
-
go.Scatter(
|
| 205 |
-
x=self.resource_data["timestamps"],
|
| 206 |
-
y=self.resource_data["network_sent_mb"],
|
| 207 |
-
mode="lines",
|
| 208 |
-
name="Network Sent (MB)",
|
| 209 |
-
line=dict(color="green", width=2),
|
| 210 |
-
yaxis="y4",
|
| 211 |
-
)
|
| 212 |
-
)
|
| 213 |
-
|
| 214 |
-
fig.add_trace(
|
| 215 |
-
go.Scatter(
|
| 216 |
-
x=self.resource_data["timestamps"],
|
| 217 |
-
y=self.resource_data["network_recv_mb"],
|
| 218 |
-
mode="lines",
|
| 219 |
-
name="Network Recv (MB)",
|
| 220 |
-
line=dict(color="orange", width=2),
|
| 221 |
-
yaxis="y4",
|
| 222 |
-
)
|
| 223 |
-
)
|
| 224 |
-
|
| 225 |
-
# Update layout
|
| 226 |
-
fig.update_layout(
|
| 227 |
-
title="System Resource Usage - QueryStockAI with Ridge Regression",
|
| 228 |
-
xaxis_title="Time",
|
| 229 |
-
height=600,
|
| 230 |
-
hovermode="x unified",
|
| 231 |
-
legend=dict(
|
| 232 |
-
orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1
|
| 233 |
-
),
|
| 234 |
-
yaxis=dict(title="CPU %", side="left"),
|
| 235 |
-
yaxis2=dict(title="Memory %", side="right", overlaying="y"),
|
| 236 |
-
yaxis3=dict(title="Memory (MB)", side="right", position=0.95),
|
| 237 |
-
yaxis4=dict(title="Network (MB)", side="right", position=0.9),
|
| 238 |
-
)
|
| 239 |
-
|
| 240 |
-
return fig
|
| 241 |
-
|
| 242 |
-
def get_summary_stats(self) -> Dict:
|
| 243 |
-
"""Get summary statistics."""
|
| 244 |
-
if not self.resource_data["timestamps"]:
|
| 245 |
-
return {}
|
| 246 |
-
|
| 247 |
-
return {
|
| 248 |
-
"total_uptime_minutes": (
|
| 249 |
-
(datetime.now() - self.start_time).total_seconds() / 60
|
| 250 |
-
if self.start_time
|
| 251 |
-
else 0
|
| 252 |
-
),
|
| 253 |
-
"avg_cpu_percent": sum(self.resource_data["cpu_percent"])
|
| 254 |
-
/ len(self.resource_data["cpu_percent"]),
|
| 255 |
-
"max_cpu_percent": max(self.resource_data["cpu_percent"]),
|
| 256 |
-
"avg_memory_percent": sum(self.resource_data["memory_percent"])
|
| 257 |
-
/ len(self.resource_data["memory_percent"]),
|
| 258 |
-
"max_memory_percent": max(self.resource_data["memory_percent"]),
|
| 259 |
-
"avg_memory_mb": sum(self.resource_data["memory_mb"])
|
| 260 |
-
/ len(self.resource_data["memory_mb"]),
|
| 261 |
-
"max_memory_mb": max(self.resource_data["memory_mb"]),
|
| 262 |
-
"total_network_sent_mb": sum(self.resource_data["network_sent_mb"]),
|
| 263 |
-
"total_network_recv_mb": sum(self.resource_data["network_recv_mb"]),
|
| 264 |
-
"yfinance_calls": self.resource_data["yfinance_calls"],
|
| 265 |
-
"ridge_training_time": self.resource_data["ridge_training_time"], # Updated
|
| 266 |
-
"streamlit_requests": self.resource_data["streamlit_requests"],
|
| 267 |
-
"ml_predictions": self.resource_data["ml_predictions"], # New
|
| 268 |
-
"feature_count": self.resource_data["feature_count"], # New
|
| 269 |
-
"ml_model": "Ridge Regression", # New
|
| 270 |
-
"technical_indicators": self.resource_data["feature_count"], # New
|
| 271 |
-
}
|
| 272 |
-
|
| 273 |
-
def export_data(self, filename: str = None):
|
| 274 |
-
"""Export monitoring data to JSON file."""
|
| 275 |
-
if filename is None:
|
| 276 |
-
filename = (
|
| 277 |
-
f"resource_monitor_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
|
| 278 |
-
)
|
| 279 |
-
|
| 280 |
-
export_data = {
|
| 281 |
-
"summary_stats": self.get_summary_stats(),
|
| 282 |
-
"monitoring_data": {
|
| 283 |
-
"timestamps": [
|
| 284 |
-
ts.isoformat() for ts in self.resource_data["timestamps"]
|
| 285 |
-
],
|
| 286 |
-
"cpu_percent": self.resource_data["cpu_percent"],
|
| 287 |
-
"memory_percent": self.resource_data["memory_percent"],
|
| 288 |
-
"memory_mb": self.resource_data["memory_mb"],
|
| 289 |
-
"disk_usage_percent": self.resource_data["disk_usage_percent"],
|
| 290 |
-
"network_sent_mb": self.resource_data["network_sent_mb"],
|
| 291 |
-
"network_recv_mb": self.resource_data["network_recv_mb"],
|
| 292 |
-
"process_count": self.resource_data["process_count"],
|
| 293 |
-
},
|
| 294 |
-
"ml_metrics": { # New section
|
| 295 |
-
"model_type": "Ridge Regression",
|
| 296 |
-
"feature_count": self.resource_data["feature_count"],
|
| 297 |
-
"training_time": self.resource_data["ridge_training_time"],
|
| 298 |
-
"predictions_made": self.resource_data["ml_predictions"],
|
| 299 |
-
"data_sources": ["yfinance", "technical_indicators"],
|
| 300 |
-
},
|
| 301 |
-
}
|
| 302 |
-
|
| 303 |
-
with open(filename, "w") as f:
|
| 304 |
-
json.dump(export_data, f, indent=2)
|
| 305 |
-
|
| 306 |
-
# Clean up old files after creating a new one
|
| 307 |
-
self.cleanup_old_files()
|
| 308 |
-
|
| 309 |
-
return filename
|
| 310 |
-
|
| 311 |
-
def cleanup_old_files(self, max_files: int = 10, max_age_days: int = 7):
|
| 312 |
-
"""Clean up old JSON files to prevent disk space issues."""
|
| 313 |
-
try:
|
| 314 |
-
# Get all resource monitor JSON files
|
| 315 |
-
pattern = "resource_monitor_*.json"
|
| 316 |
-
files = glob.glob(pattern)
|
| 317 |
-
|
| 318 |
-
if len(files) <= max_files:
|
| 319 |
-
return # No cleanup needed
|
| 320 |
-
|
| 321 |
-
# Sort files by modification time (oldest first)
|
| 322 |
-
files.sort(key=lambda x: os.path.getmtime(x))
|
| 323 |
-
|
| 324 |
-
# Calculate cutoff time for age-based cleanup
|
| 325 |
-
cutoff_time = time.time() - (max_age_days * 24 * 60 * 60)
|
| 326 |
-
|
| 327 |
-
files_to_delete = []
|
| 328 |
-
|
| 329 |
-
# Add files that are too old
|
| 330 |
-
for file in files:
|
| 331 |
-
if os.path.getmtime(file) < cutoff_time:
|
| 332 |
-
files_to_delete.append(file)
|
| 333 |
-
|
| 334 |
-
# Add oldest files if we still have too many
|
| 335 |
-
remaining_files = [f for f in files if f not in files_to_delete]
|
| 336 |
-
if len(remaining_files) > max_files:
|
| 337 |
-
files_to_delete.extend(remaining_files[:-max_files])
|
| 338 |
-
|
| 339 |
-
# Delete the files
|
| 340 |
-
deleted_count = 0
|
| 341 |
-
for file in files_to_delete:
|
| 342 |
-
try:
|
| 343 |
-
os.remove(file)
|
| 344 |
-
deleted_count += 1
|
| 345 |
-
except Exception as e:
|
| 346 |
-
print(f"Error deleting file {file}: {e}")
|
| 347 |
-
|
| 348 |
-
if deleted_count > 0:
|
| 349 |
-
print(f"Cleaned up {deleted_count} old resource monitor files")
|
| 350 |
-
|
| 351 |
-
except Exception as e:
|
| 352 |
-
print(f"Error during file cleanup: {e}")
|
| 353 |
-
|
| 354 |
-
|
| 355 |
-
# Global monitor instance
|
| 356 |
-
resource_monitor = ResourceMonitor()
|
| 357 |
-
|
| 358 |
-
|
| 359 |
-
def start_resource_monitoring():
|
| 360 |
-
"""Start resource monitoring."""
|
| 361 |
-
return resource_monitor.start_monitoring()
|
| 362 |
-
|
| 363 |
-
|
| 364 |
-
def stop_resource_monitoring():
|
| 365 |
-
"""Stop resource monitoring."""
|
| 366 |
-
resource_monitor.stop_monitoring()
|
| 367 |
-
|
| 368 |
-
|
| 369 |
-
def get_resource_stats():
|
| 370 |
-
"""Get current resource statistics."""
|
| 371 |
-
return resource_monitor.get_current_stats()
|
| 372 |
-
|
| 373 |
-
|
| 374 |
-
def create_resource_dashboard():
|
| 375 |
-
"""Create resource dashboard."""
|
| 376 |
-
return resource_monitor.create_resource_dashboard()
|
| 377 |
-
|
| 378 |
-
|
| 379 |
-
def get_resource_summary():
|
| 380 |
-
"""Get resource summary."""
|
| 381 |
-
return resource_monitor.get_summary_stats()
|
| 382 |
-
|
| 383 |
-
|
| 384 |
-
def export_resource_data(filename=None):
|
| 385 |
-
"""Export resource data."""
|
| 386 |
-
return resource_monitor.export_data(filename)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
start_services.py
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Startup script to run all services on Railway deployment.
|
| 4 |
+
This script launches:
|
| 5 |
+
1. Streamlit app on $PORT (main port)
|
| 6 |
+
2. MCP Stock Server on $PORT + 1
|
| 7 |
+
3. MCP News Server on $PORT + 2
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import os
|
| 11 |
+
import sys
|
| 12 |
+
import time
|
| 13 |
+
import signal
|
| 14 |
+
import subprocess
|
| 15 |
+
import multiprocessing
|
| 16 |
+
from pathlib import Path
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def get_port():
|
| 20 |
+
"""Get the main port from Railway environment."""
|
| 21 |
+
port = os.environ.get("PORT", "8501")
|
| 22 |
+
return int(port)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def run_streamlit():
|
| 26 |
+
"""Run Streamlit app on main port."""
|
| 27 |
+
port = get_port()
|
| 28 |
+
cmd = [
|
| 29 |
+
sys.executable,
|
| 30 |
+
"-m",
|
| 31 |
+
"streamlit",
|
| 32 |
+
"run",
|
| 33 |
+
"Home.py",
|
| 34 |
+
"--server.port",
|
| 35 |
+
str(port),
|
| 36 |
+
"--server.address",
|
| 37 |
+
"0.0.0.0",
|
| 38 |
+
"--server.headless",
|
| 39 |
+
"true",
|
| 40 |
+
"--browser.gatherUsageStats",
|
| 41 |
+
"false",
|
| 42 |
+
]
|
| 43 |
+
print(f"π Starting Streamlit on port {port}")
|
| 44 |
+
subprocess.run(cmd, check=True)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def run_stock_server():
|
| 48 |
+
"""Run MCP Stock Server on port + 1."""
|
| 49 |
+
port = get_port() + 1
|
| 50 |
+
cmd = [sys.executable, "mcp_stock_server.py"]
|
| 51 |
+
print(f"π Starting MCP Stock Server on port {port}")
|
| 52 |
+
# Set the port environment variable for the stock server
|
| 53 |
+
env = os.environ.copy()
|
| 54 |
+
env["PORT"] = str(port)
|
| 55 |
+
subprocess.run(cmd, env=env, check=True)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def run_news_server():
|
| 59 |
+
"""Run MCP News Server on port + 2."""
|
| 60 |
+
port = get_port() + 2
|
| 61 |
+
cmd = [sys.executable, "mcp_news_server.py"]
|
| 62 |
+
print(f"π° Starting MCP News Server on port {port}")
|
| 63 |
+
# Set the port environment variable for the news server
|
| 64 |
+
env = os.environ.copy()
|
| 65 |
+
env["PORT"] = str(port)
|
| 66 |
+
subprocess.run(cmd, env=env, check=True)
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def signal_handler(signum, frame):
|
| 70 |
+
"""Handle shutdown signals gracefully."""
|
| 71 |
+
print("\nπ Received shutdown signal. Stopping all services...")
|
| 72 |
+
sys.exit(0)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def main():
|
| 76 |
+
"""Main function to start all services."""
|
| 77 |
+
# Set up signal handlers
|
| 78 |
+
signal.signal(signal.SIGINT, signal_handler)
|
| 79 |
+
signal.signal(signal.SIGTERM, signal_handler)
|
| 80 |
+
|
| 81 |
+
print("π Starting Financial Agent Services...")
|
| 82 |
+
print(f"π Configuration:")
|
| 83 |
+
print(f" - Main Port: {get_port()}")
|
| 84 |
+
print(f" - Stock Server Port: {get_port() + 1}")
|
| 85 |
+
print(f" - News Server Port: {get_port() + 2}")
|
| 86 |
+
|
| 87 |
+
# Check if required files exist
|
| 88 |
+
required_files = ["Home.py", "mcp_stock_server.py", "mcp_news_server.py"]
|
| 89 |
+
for file in required_files:
|
| 90 |
+
if not Path(file).exists():
|
| 91 |
+
print(f"β Error: {file} not found!")
|
| 92 |
+
sys.exit(1)
|
| 93 |
+
|
| 94 |
+
# Update MCP server URLs in Home.py for Railway deployment
|
| 95 |
+
try:
|
| 96 |
+
print("π Updating MCP server URLs...")
|
| 97 |
+
subprocess.run([sys.executable, "update_mcp_urls.py"], check=True)
|
| 98 |
+
print("β
MCP server URLs updated successfully!")
|
| 99 |
+
except Exception as e:
|
| 100 |
+
print(f"β οΈ Warning: Could not update MCP URLs: {e}")
|
| 101 |
+
print("Continuing with default configuration...")
|
| 102 |
+
|
| 103 |
+
# Start all services in separate processes
|
| 104 |
+
processes = []
|
| 105 |
+
|
| 106 |
+
try:
|
| 107 |
+
# Start MCP servers first (they need to be ready before Streamlit)
|
| 108 |
+
print("π Starting MCP servers...")
|
| 109 |
+
|
| 110 |
+
# Start stock server
|
| 111 |
+
stock_process = multiprocessing.Process(
|
| 112 |
+
target=run_stock_server, name="stock-server"
|
| 113 |
+
)
|
| 114 |
+
stock_process.start()
|
| 115 |
+
processes.append(stock_process)
|
| 116 |
+
|
| 117 |
+
# Start news server
|
| 118 |
+
news_process = multiprocessing.Process(
|
| 119 |
+
target=run_news_server, name="news-server"
|
| 120 |
+
)
|
| 121 |
+
news_process.start()
|
| 122 |
+
processes.append(news_process)
|
| 123 |
+
|
| 124 |
+
# Wait a bit for MCP servers to start
|
| 125 |
+
print("β³ Waiting for MCP servers to initialize...")
|
| 126 |
+
time.sleep(5)
|
| 127 |
+
|
| 128 |
+
# Start Streamlit app
|
| 129 |
+
print("π Starting Streamlit app...")
|
| 130 |
+
streamlit_process = multiprocessing.Process(
|
| 131 |
+
target=run_streamlit, name="streamlit"
|
| 132 |
+
)
|
| 133 |
+
streamlit_process.start()
|
| 134 |
+
processes.append(streamlit_process)
|
| 135 |
+
|
| 136 |
+
print("β
All services started successfully!")
|
| 137 |
+
print(f"π Streamlit app available at: http://localhost:{get_port()}")
|
| 138 |
+
print(f"π Stock server available at: http://localhost:{get_port() + 1}")
|
| 139 |
+
print(f"π° News server available at: http://localhost:{get_port() + 2}")
|
| 140 |
+
|
| 141 |
+
# Optional: Run deployment test
|
| 142 |
+
if os.environ.get("RUN_DEPLOYMENT_TEST", "false").lower() == "true":
|
| 143 |
+
print("\nπ§ͺ Running deployment test...")
|
| 144 |
+
try:
|
| 145 |
+
subprocess.run([sys.executable, "test_deployment.py"], check=True)
|
| 146 |
+
print("β
Deployment test passed!")
|
| 147 |
+
except subprocess.CalledProcessError:
|
| 148 |
+
print("β οΈ Deployment test failed, but services may still be working...")
|
| 149 |
+
|
| 150 |
+
# Wait for all processes
|
| 151 |
+
for process in processes:
|
| 152 |
+
process.join()
|
| 153 |
+
|
| 154 |
+
except KeyboardInterrupt:
|
| 155 |
+
print("\nπ Shutting down services...")
|
| 156 |
+
except Exception as e:
|
| 157 |
+
print(f"β Error starting services: {e}")
|
| 158 |
+
finally:
|
| 159 |
+
# Terminate all processes
|
| 160 |
+
for process in processes:
|
| 161 |
+
if process.is_alive():
|
| 162 |
+
process.terminate()
|
| 163 |
+
process.join(timeout=5)
|
| 164 |
+
if process.is_alive():
|
| 165 |
+
process.kill()
|
| 166 |
+
print("β
All services stopped.")
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
if __name__ == "__main__":
|
| 170 |
+
main()
|
terminal_client.py
DELETED
|
@@ -1,454 +0,0 @@
|
|
| 1 |
-
import os
|
| 2 |
-
import asyncio
|
| 3 |
-
import json
|
| 4 |
-
from dotenv import load_dotenv
|
| 5 |
-
from openai import OpenAI
|
| 6 |
-
from mcp.client.session import ClientSession
|
| 7 |
-
from mcp.client.stdio import stdio_client
|
| 8 |
-
from mcp import StdioServerParameters, types
|
| 9 |
-
import yfinance as yf
|
| 10 |
-
|
| 11 |
-
# Load API key from.env file
|
| 12 |
-
load_dotenv()
|
| 13 |
-
|
| 14 |
-
# Check if API key exists
|
| 15 |
-
api_key = os.getenv("OPENROUTER_API_KEY")
|
| 16 |
-
model = os.getenv("MODEL")
|
| 17 |
-
if not api_key:
|
| 18 |
-
print("β Error: OPENROUTER_API_KEY not found in .env file")
|
| 19 |
-
exit(1)
|
| 20 |
-
if not model:
|
| 21 |
-
print("β Error: MODEL not found in .env file")
|
| 22 |
-
exit(1)
|
| 23 |
-
|
| 24 |
-
# Configure the client to connect to OpenRouter
|
| 25 |
-
client = OpenAI(
|
| 26 |
-
base_url="https://openrouter.ai/api/v1",
|
| 27 |
-
api_key=api_key,
|
| 28 |
-
)
|
| 29 |
-
|
| 30 |
-
# Global variable to store discovered tools
|
| 31 |
-
discovered_tools = []
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
def get_available_tickers():
|
| 35 |
-
"""Hardcoded tickers for testing"""
|
| 36 |
-
|
| 37 |
-
# Fallback to 10 most popular tickers if Lookup fails
|
| 38 |
-
try:
|
| 39 |
-
print("Using fallback to 10 most popular tickers...")
|
| 40 |
-
popular_tickers = {}
|
| 41 |
-
|
| 42 |
-
# 10 most popular tickers
|
| 43 |
-
popular_ticker_list = [
|
| 44 |
-
"AAPL",
|
| 45 |
-
"MSFT",
|
| 46 |
-
"GOOGL",
|
| 47 |
-
"AMZN",
|
| 48 |
-
"TSLA",
|
| 49 |
-
"META",
|
| 50 |
-
"NVDA",
|
| 51 |
-
"BRK-B",
|
| 52 |
-
"JNJ",
|
| 53 |
-
"JPM",
|
| 54 |
-
]
|
| 55 |
-
|
| 56 |
-
print(f"Loading {len(popular_ticker_list)} popular tickers...")
|
| 57 |
-
|
| 58 |
-
# Get company names for each ticker
|
| 59 |
-
for ticker in popular_ticker_list:
|
| 60 |
-
try:
|
| 61 |
-
ticker_obj = yf.Ticker(ticker)
|
| 62 |
-
info = ticker_obj.info
|
| 63 |
-
|
| 64 |
-
if info and (info.get("longName") or info.get("shortName")):
|
| 65 |
-
company_name = info.get("longName", info.get("shortName", ticker))
|
| 66 |
-
popular_tickers[ticker] = company_name
|
| 67 |
-
|
| 68 |
-
except Exception as e:
|
| 69 |
-
# Skip tickers that cause errors
|
| 70 |
-
continue
|
| 71 |
-
|
| 72 |
-
print(f"Successfully loaded {len(popular_tickers)} tickers")
|
| 73 |
-
return popular_tickers
|
| 74 |
-
|
| 75 |
-
except Exception as e:
|
| 76 |
-
print(f"Error fetching available tickers: {e}")
|
| 77 |
-
# Final fallback to basic tickers if there's an error
|
| 78 |
-
return {
|
| 79 |
-
"AAPL": "Apple Inc.",
|
| 80 |
-
"TSLA": "Tesla Inc.",
|
| 81 |
-
"MSFT": "Microsoft Corporation",
|
| 82 |
-
"GOOGL": "Alphabet Inc. (Google)",
|
| 83 |
-
}
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
def search_ticker(ticker_symbol):
|
| 87 |
-
"""Search for a ticker symbol and get its company name using yfinance."""
|
| 88 |
-
try:
|
| 89 |
-
ticker = yf.Ticker(ticker_symbol)
|
| 90 |
-
info = ticker.info
|
| 91 |
-
company_name = info.get("longName", info.get("shortName", ticker_symbol))
|
| 92 |
-
return company_name
|
| 93 |
-
except Exception as e:
|
| 94 |
-
return None
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
async def get_news_data(ticker: str) -> str:
|
| 98 |
-
"""Get news data by calling the news server via MCP."""
|
| 99 |
-
try:
|
| 100 |
-
# Set up MCP server parameters
|
| 101 |
-
server_params = StdioServerParameters(command="python", args=["news_server.py"])
|
| 102 |
-
|
| 103 |
-
# Connect to the MCP server
|
| 104 |
-
async with stdio_client(server_params) as (read, write):
|
| 105 |
-
async with ClientSession(read, write) as session:
|
| 106 |
-
# Initialize the session
|
| 107 |
-
await session.initialize()
|
| 108 |
-
|
| 109 |
-
# Call the get_latest_news tool
|
| 110 |
-
print(f"π Calling MCP tool: get_latest_news with ticker: {ticker}")
|
| 111 |
-
try:
|
| 112 |
-
result = await asyncio.wait_for(
|
| 113 |
-
session.call_tool("get_latest_news", {"ticker": ticker}),
|
| 114 |
-
timeout=30.0, # 30 second timeout
|
| 115 |
-
)
|
| 116 |
-
print(f"π MCP result type: {type(result)}")
|
| 117 |
-
print(f"π MCP result content: {result.content}")
|
| 118 |
-
except asyncio.TimeoutError:
|
| 119 |
-
print("β MCP call timed out")
|
| 120 |
-
return f"Timeout getting news for {ticker}"
|
| 121 |
-
except Exception as e:
|
| 122 |
-
print(f"β MCP call error: {e}")
|
| 123 |
-
return f"Error getting news for {ticker}: {e}"
|
| 124 |
-
|
| 125 |
-
# Parse the result properly
|
| 126 |
-
if result.content:
|
| 127 |
-
for content in result.content:
|
| 128 |
-
if isinstance(content, types.TextContent):
|
| 129 |
-
return content.text
|
| 130 |
-
|
| 131 |
-
return f"No news data returned for {ticker}"
|
| 132 |
-
|
| 133 |
-
except Exception as e:
|
| 134 |
-
return f"Error getting news for {ticker}: {e}"
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
async def get_stock_data(ticker: str) -> str:
|
| 138 |
-
"""Get stock data by calling the stock server via MCP."""
|
| 139 |
-
try:
|
| 140 |
-
# Set up MCP server parameters
|
| 141 |
-
server_params = StdioServerParameters(
|
| 142 |
-
command="python", args=["stock_data_server.py"]
|
| 143 |
-
)
|
| 144 |
-
|
| 145 |
-
# Connect to the MCP server
|
| 146 |
-
async with stdio_client(server_params) as (read, write):
|
| 147 |
-
async with ClientSession(read, write) as session:
|
| 148 |
-
# Initialize the session
|
| 149 |
-
await session.initialize()
|
| 150 |
-
|
| 151 |
-
# Call the get_historical_stock_data tool
|
| 152 |
-
print(
|
| 153 |
-
f"π Calling MCP tool: get_historical_stock_data with ticker: {ticker}"
|
| 154 |
-
)
|
| 155 |
-
try:
|
| 156 |
-
result = await asyncio.wait_for(
|
| 157 |
-
session.call_tool(
|
| 158 |
-
"get_historical_stock_data", {"ticker": ticker}
|
| 159 |
-
),
|
| 160 |
-
timeout=30.0, # 30 second timeout
|
| 161 |
-
)
|
| 162 |
-
print(f"π MCP result type: {type(result)}")
|
| 163 |
-
print(f"π MCP result content: {result.content}")
|
| 164 |
-
except asyncio.TimeoutError:
|
| 165 |
-
print("β MCP call timed out")
|
| 166 |
-
return f"Timeout getting stock data for {ticker}"
|
| 167 |
-
except Exception as e:
|
| 168 |
-
print(f"β MCP call error: {e}")
|
| 169 |
-
return f"Error getting stock data for {ticker}: {e}"
|
| 170 |
-
|
| 171 |
-
# Parse the result properly
|
| 172 |
-
if result.content:
|
| 173 |
-
for content in result.content:
|
| 174 |
-
if isinstance(content, types.TextContent):
|
| 175 |
-
return content.text
|
| 176 |
-
|
| 177 |
-
return f"No stock data returned for {ticker}"
|
| 178 |
-
|
| 179 |
-
except Exception as e:
|
| 180 |
-
return f"Error getting stock data for {ticker}: {e}"
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
def initialize_tools():
|
| 184 |
-
"""Initialize the available tools."""
|
| 185 |
-
global discovered_tools
|
| 186 |
-
|
| 187 |
-
discovered_tools = [
|
| 188 |
-
{
|
| 189 |
-
"type": "function",
|
| 190 |
-
"function": {
|
| 191 |
-
"name": "get_latest_news",
|
| 192 |
-
"description": "Fetches recent news headlines and descriptions for a specific stock ticker. Use this when user asks about news, updates, or recent events about a company.",
|
| 193 |
-
"parameters": {
|
| 194 |
-
"type": "object",
|
| 195 |
-
"properties": {
|
| 196 |
-
"ticker": {
|
| 197 |
-
"type": "string",
|
| 198 |
-
"description": "The stock ticker symbol (e.g., 'AAPL', 'GOOG', 'TSLA'). Must be a valid stock symbol.",
|
| 199 |
-
}
|
| 200 |
-
},
|
| 201 |
-
"required": ["ticker"],
|
| 202 |
-
},
|
| 203 |
-
},
|
| 204 |
-
},
|
| 205 |
-
{
|
| 206 |
-
"type": "function",
|
| 207 |
-
"function": {
|
| 208 |
-
"name": "get_historical_stock_data",
|
| 209 |
-
"description": "Fetches recent historical stock data (Open, High, Low, Close, Volume) for a given ticker. Use this when user asks about stock performance, price data, or market performance.",
|
| 210 |
-
"parameters": {
|
| 211 |
-
"type": "object",
|
| 212 |
-
"properties": {
|
| 213 |
-
"ticker": {
|
| 214 |
-
"type": "string",
|
| 215 |
-
"description": "The stock ticker symbol (e.g., 'AAPL', 'TSLA', 'MSFT'). Must be a valid stock symbol.",
|
| 216 |
-
}
|
| 217 |
-
},
|
| 218 |
-
"required": ["ticker"],
|
| 219 |
-
},
|
| 220 |
-
},
|
| 221 |
-
},
|
| 222 |
-
]
|
| 223 |
-
|
| 224 |
-
print(f"β
Initialized {len(discovered_tools)} tools")
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
async def execute_tool_call(tool_call):
|
| 228 |
-
"""Execute a tool call using MCP servers."""
|
| 229 |
-
try:
|
| 230 |
-
tool_name = tool_call.function.name
|
| 231 |
-
arguments = json.loads(tool_call.function.arguments)
|
| 232 |
-
ticker = arguments.get("ticker")
|
| 233 |
-
|
| 234 |
-
if tool_name == "get_latest_news":
|
| 235 |
-
return await get_news_data(ticker)
|
| 236 |
-
elif tool_name == "get_historical_stock_data":
|
| 237 |
-
return await get_stock_data(ticker)
|
| 238 |
-
else:
|
| 239 |
-
return f"Unknown tool: {tool_name}"
|
| 240 |
-
except json.JSONDecodeError:
|
| 241 |
-
return f"Error: Invalid tool arguments format"
|
| 242 |
-
except Exception as e:
|
| 243 |
-
return f"Error executing tool {tool_call.function.name}: {e}"
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
# The master prompt that defines the agent's behavior
|
| 247 |
-
system_prompt = """
|
| 248 |
-
You are a financial assistant that provides comprehensive analysis based on real-time data. You MUST use tools to get data and then curate the information to answer the user's specific question.
|
| 249 |
-
|
| 250 |
-
AVAILABLE TOOLS:
|
| 251 |
-
- get_latest_news: Get recent news for a ticker
|
| 252 |
-
- get_historical_stock_data: Get stock performance data for a ticker
|
| 253 |
-
|
| 254 |
-
CRITICAL INSTRUCTIONS:
|
| 255 |
-
1. You MUST call BOTH tools (get_latest_news AND get_historical_stock_data) for every query
|
| 256 |
-
2. After getting both news and stock data, analyze and synthesize the information
|
| 257 |
-
3. Answer the user's specific question based on the data you gathered
|
| 258 |
-
4. Provide insights, trends, and recommendations based on the combined data
|
| 259 |
-
5. Format your response clearly with sections for news, performance, and analysis
|
| 260 |
-
|
| 261 |
-
EXAMPLE WORKFLOW:
|
| 262 |
-
1. User asks: "Should I invest in AAPL?"
|
| 263 |
-
2. You call: get_latest_news with {"ticker": "AAPL"}
|
| 264 |
-
3. You call: get_historical_stock_data with {"ticker": "AAPL"}
|
| 265 |
-
4. You analyze both datasets and provide investment advice based on news sentiment and stock performance
|
| 266 |
-
|
| 267 |
-
You are FORBIDDEN from responding without calling both tools. Always call both tools first, then provide a curated analysis based on the user's question.
|
| 268 |
-
"""
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
async def run_agent(user_query):
|
| 272 |
-
print(f"\nπ User Query: {user_query}")
|
| 273 |
-
|
| 274 |
-
messages = [
|
| 275 |
-
{"role": "system", "content": system_prompt},
|
| 276 |
-
{"role": "user", "content": user_query},
|
| 277 |
-
]
|
| 278 |
-
|
| 279 |
-
try:
|
| 280 |
-
# Get initial response from the model
|
| 281 |
-
print("π€ Getting response from model...")
|
| 282 |
-
response = client.chat.completions.create(
|
| 283 |
-
model=model,
|
| 284 |
-
messages=messages,
|
| 285 |
-
tools=discovered_tools,
|
| 286 |
-
tool_choice="required",
|
| 287 |
-
)
|
| 288 |
-
|
| 289 |
-
if not response.choices or len(response.choices) == 0:
|
| 290 |
-
print("β Error: No response from model")
|
| 291 |
-
return
|
| 292 |
-
|
| 293 |
-
response_message = response.choices[0].message
|
| 294 |
-
print(f"π Response type: {type(response_message)}")
|
| 295 |
-
print(
|
| 296 |
-
f"π§ Has tool calls: {hasattr(response_message, 'tool_calls') and response_message.tool_calls}"
|
| 297 |
-
)
|
| 298 |
-
|
| 299 |
-
# Truncate tool call IDs if they're too long
|
| 300 |
-
if hasattr(response_message, "tool_calls") and response_message.tool_calls:
|
| 301 |
-
for tool_call in response_message.tool_calls:
|
| 302 |
-
if len(tool_call.id) > 40:
|
| 303 |
-
tool_call.id = tool_call.id[:40]
|
| 304 |
-
|
| 305 |
-
messages.append(response_message)
|
| 306 |
-
|
| 307 |
-
# Execute tool calls if any
|
| 308 |
-
if response_message.tool_calls:
|
| 309 |
-
print("\nπ οΈ Executing Tool Calls ---")
|
| 310 |
-
for tool_call in response_message.tool_calls:
|
| 311 |
-
print(f"π Calling: {tool_call.function.name}")
|
| 312 |
-
print(f"π Arguments: {tool_call.function.arguments}")
|
| 313 |
-
|
| 314 |
-
# Execute the tool call
|
| 315 |
-
tool_result = await execute_tool_call(tool_call)
|
| 316 |
-
print(
|
| 317 |
-
f"π Result: {tool_result[:200] if tool_result else 'No result'}..."
|
| 318 |
-
) # Show first 200 chars
|
| 319 |
-
|
| 320 |
-
# Add tool result to messages
|
| 321 |
-
messages.append(
|
| 322 |
-
{
|
| 323 |
-
"role": "tool",
|
| 324 |
-
"tool_call_id": tool_call.id[:40], # Truncate to max 40 chars
|
| 325 |
-
"content": tool_result if tool_result else "No data available",
|
| 326 |
-
}
|
| 327 |
-
)
|
| 328 |
-
|
| 329 |
-
# Get final response from the model (using same model for consistency)
|
| 330 |
-
print("π€ Getting final response from model...")
|
| 331 |
-
print(f"π Messages count: {len(messages)}")
|
| 332 |
-
final_response = client.chat.completions.create(
|
| 333 |
-
model="openai/gpt-4o-mini", # Try a different model
|
| 334 |
-
messages=messages,
|
| 335 |
-
)
|
| 336 |
-
|
| 337 |
-
print("\nπ€ Final Agent Response ---")
|
| 338 |
-
if final_response.choices and len(final_response.choices) > 0:
|
| 339 |
-
final_content = final_response.choices[0].message.content
|
| 340 |
-
print(
|
| 341 |
-
f"Final response length: {len(final_content) if final_content else 0}"
|
| 342 |
-
)
|
| 343 |
-
print(final_content if final_content else "Empty response")
|
| 344 |
-
else:
|
| 345 |
-
print("No response generated")
|
| 346 |
-
print("----------------------------")
|
| 347 |
-
else:
|
| 348 |
-
print("\nπ€ Agent Response ---")
|
| 349 |
-
print(response_message.content)
|
| 350 |
-
print("----------------------")
|
| 351 |
-
|
| 352 |
-
except Exception as e:
|
| 353 |
-
print(f"β Error: {e}")
|
| 354 |
-
print("Please try again with a different question.")
|
| 355 |
-
|
| 356 |
-
|
| 357 |
-
async def main():
|
| 358 |
-
"""Main function to run the connected agent interactively."""
|
| 359 |
-
try:
|
| 360 |
-
# Initialize tools
|
| 361 |
-
initialize_tools()
|
| 362 |
-
|
| 363 |
-
# Get available tickers
|
| 364 |
-
available_tickers = get_available_tickers()
|
| 365 |
-
|
| 366 |
-
print("=== QueryStockAI ===")
|
| 367 |
-
print("Select a stock ticker to analyze:")
|
| 368 |
-
print("Type 'quit' or 'exit' to stop the program.")
|
| 369 |
-
print("=" * 50)
|
| 370 |
-
|
| 371 |
-
while True:
|
| 372 |
-
# Show ticker menu
|
| 373 |
-
print("\nπ Available Stocks:")
|
| 374 |
-
ticker_list = list(available_tickers.items())
|
| 375 |
-
for i, (ticker, name) in enumerate(ticker_list, 1):
|
| 376 |
-
print(f" {i}. {ticker} ({name})")
|
| 377 |
-
print(" s. Search for custom ticker")
|
| 378 |
-
print(" q. Quit")
|
| 379 |
-
|
| 380 |
-
# Get user selection
|
| 381 |
-
selection = (
|
| 382 |
-
input("\n㪠Select a stock, search (s), or type 'quit': ")
|
| 383 |
-
.strip()
|
| 384 |
-
.lower()
|
| 385 |
-
)
|
| 386 |
-
|
| 387 |
-
# Check if user wants to exit
|
| 388 |
-
if selection in ["quit", "exit", "q"]:
|
| 389 |
-
print("π Goodbye!")
|
| 390 |
-
break
|
| 391 |
-
|
| 392 |
-
# Handle search option
|
| 393 |
-
if selection == "s":
|
| 394 |
-
custom_ticker = (
|
| 395 |
-
input("Enter ticker symbol (e.g., AAPL): ").strip().upper()
|
| 396 |
-
)
|
| 397 |
-
if custom_ticker:
|
| 398 |
-
company_name = search_ticker(custom_ticker)
|
| 399 |
-
if company_name:
|
| 400 |
-
print(f"β
Found: {custom_ticker} - {company_name}")
|
| 401 |
-
# Add to available tickers temporarily
|
| 402 |
-
available_tickers[custom_ticker] = company_name
|
| 403 |
-
selected_ticker = custom_ticker
|
| 404 |
-
else:
|
| 405 |
-
print(f"β Could not find ticker: {custom_ticker}")
|
| 406 |
-
continue
|
| 407 |
-
else:
|
| 408 |
-
print("β Please enter a valid ticker symbol.")
|
| 409 |
-
continue
|
| 410 |
-
else:
|
| 411 |
-
# Check if selection is valid
|
| 412 |
-
try:
|
| 413 |
-
selection_num = int(selection)
|
| 414 |
-
if selection_num < 1 or selection_num > len(ticker_list):
|
| 415 |
-
print(
|
| 416 |
-
f"β Invalid selection. Please choose 1-{len(ticker_list)}, 's' for search, or 'quit'."
|
| 417 |
-
)
|
| 418 |
-
continue
|
| 419 |
-
selected_ticker = ticker_list[selection_num - 1][0]
|
| 420 |
-
except ValueError:
|
| 421 |
-
print(
|
| 422 |
-
"β Invalid selection. Please enter a number, 's' for search, or 'quit'."
|
| 423 |
-
)
|
| 424 |
-
continue
|
| 425 |
-
|
| 426 |
-
print(f"\nπ Selected: {selected_ticker}")
|
| 427 |
-
|
| 428 |
-
# Always fetch both news and stock data by default
|
| 429 |
-
print(f"\nπ Fetching comprehensive data for {selected_ticker}...")
|
| 430 |
-
|
| 431 |
-
# Get user's specific question
|
| 432 |
-
user_question = input(
|
| 433 |
-
f"\n㪠What would you like to know about {selected_ticker}? (e.g., 'How is it performing?', 'What's the latest news?', 'Should I invest?'): "
|
| 434 |
-
).strip()
|
| 435 |
-
|
| 436 |
-
if not user_question:
|
| 437 |
-
user_question = (
|
| 438 |
-
f"How is {selected_ticker} performing and what's the latest news?"
|
| 439 |
-
)
|
| 440 |
-
|
| 441 |
-
# Construct the query to always fetch both data types
|
| 442 |
-
user_query = f"Based on the latest news and stock performance data for {selected_ticker}, {user_question}"
|
| 443 |
-
|
| 444 |
-
# Run the agent with the user's query
|
| 445 |
-
await run_agent(user_query)
|
| 446 |
-
|
| 447 |
-
except KeyboardInterrupt:
|
| 448 |
-
print("\n\nπ Goodbye!")
|
| 449 |
-
except Exception as e:
|
| 450 |
-
print(f"β Error: {e}")
|
| 451 |
-
|
| 452 |
-
|
| 453 |
-
if __name__ == "__main__":
|
| 454 |
-
asyncio.run(main())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
test_deployment.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Test script to verify Railway deployment is working correctly.
|
| 4 |
+
This script checks if all services are running on the expected ports.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import os
|
| 8 |
+
import time
|
| 9 |
+
import requests
|
| 10 |
+
import subprocess
|
| 11 |
+
import sys
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def get_port():
|
| 15 |
+
"""Get the main port from Railway environment."""
|
| 16 |
+
port = os.environ.get("PORT", "8501")
|
| 17 |
+
return int(port)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def test_service(url, service_name, timeout=10):
|
| 21 |
+
"""Test if a service is responding."""
|
| 22 |
+
try:
|
| 23 |
+
response = requests.get(url, timeout=timeout)
|
| 24 |
+
if response.status_code == 200:
|
| 25 |
+
print(f"β
{service_name} is running at {url}")
|
| 26 |
+
return True
|
| 27 |
+
else:
|
| 28 |
+
print(f"β {service_name} returned status {response.status_code} at {url}")
|
| 29 |
+
return False
|
| 30 |
+
except requests.exceptions.RequestException as e:
|
| 31 |
+
print(f"β {service_name} is not responding at {url}: {e}")
|
| 32 |
+
return False
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def test_mcp_endpoint(url, service_name):
|
| 36 |
+
"""Test MCP endpoint specifically."""
|
| 37 |
+
try:
|
| 38 |
+
response = requests.get(f"{url}/mcp", timeout=10)
|
| 39 |
+
if response.status_code == 200:
|
| 40 |
+
print(f"β
{service_name} MCP endpoint is accessible at {url}/mcp")
|
| 41 |
+
return True
|
| 42 |
+
else:
|
| 43 |
+
print(
|
| 44 |
+
f"β {service_name} MCP endpoint returned status {response.status_code}"
|
| 45 |
+
)
|
| 46 |
+
return False
|
| 47 |
+
except requests.exceptions.RequestException as e:
|
| 48 |
+
print(f"β {service_name} MCP endpoint is not accessible: {e}")
|
| 49 |
+
return False
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def main():
|
| 53 |
+
"""Main test function."""
|
| 54 |
+
port = get_port()
|
| 55 |
+
|
| 56 |
+
print("π§ͺ Testing Railway Deployment...")
|
| 57 |
+
print(f"π Configuration:")
|
| 58 |
+
print(f" - Main Port: {port}")
|
| 59 |
+
print(f" - Stock Server Port: {port + 1}")
|
| 60 |
+
print(f" - News Server Port: {port + 2}")
|
| 61 |
+
print()
|
| 62 |
+
|
| 63 |
+
# Test services
|
| 64 |
+
services = [
|
| 65 |
+
(f"http://localhost:{port}", "Streamlit App"),
|
| 66 |
+
(f"http://localhost:{port + 1}", "Stock Server"),
|
| 67 |
+
(f"http://localhost:{port + 2}", "News Server"),
|
| 68 |
+
]
|
| 69 |
+
|
| 70 |
+
results = []
|
| 71 |
+
|
| 72 |
+
for url, name in services:
|
| 73 |
+
result = test_service(url, name)
|
| 74 |
+
results.append((name, result))
|
| 75 |
+
|
| 76 |
+
print()
|
| 77 |
+
print("π Testing MCP Endpoints...")
|
| 78 |
+
|
| 79 |
+
# Test MCP endpoints
|
| 80 |
+
mcp_results = []
|
| 81 |
+
mcp_services = [
|
| 82 |
+
(f"http://localhost:{port + 1}", "Stock Server"),
|
| 83 |
+
(f"http://localhost:{port + 2}", "News Server"),
|
| 84 |
+
]
|
| 85 |
+
|
| 86 |
+
for url, name in mcp_services:
|
| 87 |
+
result = test_mcp_endpoint(url, name)
|
| 88 |
+
mcp_results.append((name, result))
|
| 89 |
+
|
| 90 |
+
print()
|
| 91 |
+
print("π Test Results Summary:")
|
| 92 |
+
|
| 93 |
+
all_passed = True
|
| 94 |
+
for name, result in results + mcp_results:
|
| 95 |
+
status = "β
PASS" if result else "β FAIL"
|
| 96 |
+
print(f" {name}: {status}")
|
| 97 |
+
if not result:
|
| 98 |
+
all_passed = False
|
| 99 |
+
|
| 100 |
+
print()
|
| 101 |
+
if all_passed:
|
| 102 |
+
print("π All services are running correctly!")
|
| 103 |
+
print("π Your Railway deployment is ready to use.")
|
| 104 |
+
else:
|
| 105 |
+
print("β οΈ Some services failed. Check the logs above for details.")
|
| 106 |
+
print("π§ Try restarting the deployment or check Railway logs.")
|
| 107 |
+
|
| 108 |
+
return 0 if all_passed else 1
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
if __name__ == "__main__":
|
| 112 |
+
# Wait a bit for services to start
|
| 113 |
+
print("β³ Waiting for services to start...")
|
| 114 |
+
time.sleep(5)
|
| 115 |
+
|
| 116 |
+
exit_code = main()
|
| 117 |
+
sys.exit(exit_code)
|
update_mcp_urls.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Script to update MCP server URLs for Railway deployment.
|
| 4 |
+
This script modifies the MCP client configuration in Home.py to use the correct ports.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import os
|
| 8 |
+
import re
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def get_port():
|
| 12 |
+
"""Get the main port from Railway environment."""
|
| 13 |
+
port = os.environ.get("PORT", "8501")
|
| 14 |
+
return int(port)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def update_home_py():
|
| 18 |
+
"""Update Home.py to use correct MCP server URLs for Railway deployment."""
|
| 19 |
+
port = get_port()
|
| 20 |
+
|
| 21 |
+
# Calculate the ports for MCP servers
|
| 22 |
+
stock_port = port + 1
|
| 23 |
+
news_port = port + 2
|
| 24 |
+
|
| 25 |
+
# Read the current Home.py file
|
| 26 |
+
with open("Home.py", "r", encoding="utf-8") as f:
|
| 27 |
+
content = f.read()
|
| 28 |
+
|
| 29 |
+
# Update the MCP client configuration
|
| 30 |
+
# Find the MultiServerMCPClient configuration
|
| 31 |
+
pattern = r'client = MultiServerMCPClient\(\s*\{\s*"news_server":\s*\{\s*"url":\s*"[^"]*",\s*"transport":\s*"[^"]*",?\s*\},\s*"stock_server":\s*\{\s*"url":\s*"[^"]*",\s*"transport":\s*"[^"]*",?\s*\},\s*\}\s*\)'
|
| 32 |
+
|
| 33 |
+
new_config = f"""client = MultiServerMCPClient(
|
| 34 |
+
{{
|
| 35 |
+
"news_server": {{
|
| 36 |
+
"url": "http://localhost:{news_port}/mcp",
|
| 37 |
+
"transport": "streamable_http",
|
| 38 |
+
}},
|
| 39 |
+
"stock_server": {{
|
| 40 |
+
"url": "http://localhost:{stock_port}/mcp",
|
| 41 |
+
"transport": "streamable_http",
|
| 42 |
+
}},
|
| 43 |
+
}}
|
| 44 |
+
)"""
|
| 45 |
+
|
| 46 |
+
# Replace the configuration
|
| 47 |
+
updated_content = re.sub(pattern, new_config, content, flags=re.DOTALL)
|
| 48 |
+
|
| 49 |
+
# Write the updated content back
|
| 50 |
+
with open("Home.py", "w", encoding="utf-8") as f:
|
| 51 |
+
f.write(updated_content)
|
| 52 |
+
|
| 53 |
+
print(f"β
Updated Home.py with MCP server URLs:")
|
| 54 |
+
print(f" - Stock server: http://localhost:{stock_port}/mcp")
|
| 55 |
+
print(f" - News server: http://localhost:{news_port}/mcp")
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
if __name__ == "__main__":
|
| 59 |
+
update_home_py()
|
uv.lock
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|