|
|
|
|
|
""" |
|
|
Premium Trading Dashboard - Full Featured |
|
|
Beautiful Vercel-style dashboard with VM data integration |
|
|
""" |
|
|
|
|
|
import os |
|
|
import sys |
|
|
import pandas as pd |
|
|
import gradio as gr |
|
|
import plotly.graph_objects as go |
|
|
import plotly.express as px |
|
|
from datetime import datetime, timedelta, timezone |
|
|
import logging |
|
|
import requests |
|
|
import time |
|
|
from alpaca.trading.client import TradingClient |
|
|
from alpaca.trading.requests import GetOrdersRequest, GetPortfolioHistoryRequest |
|
|
from alpaca.trading.enums import OrderStatus |
|
|
from alpaca.data.timeframe import TimeFrame |
|
|
from alpaca.data.historical import StockHistoricalDataClient |
|
|
from textblob import TextBlob |
|
|
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer |
|
|
import yfinance as yf |
|
|
|
|
|
|
|
|
API_KEY = os.getenv('ALPACA_API_KEY', 'PK2FD9B2S86LHR7ZBHG1') |
|
|
SECRET_KEY = os.getenv('ALPACA_SECRET_KEY', 'QPmGPDgbPArvHv6cldBXc7uWddapYcIAnBhtkuBW') |
|
|
VM_API_URL = os.getenv('VM_API_URL', 'http://34.56.193.18:8090') |
|
|
|
|
|
|
|
|
logging.basicConfig( |
|
|
level=logging.INFO, |
|
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', |
|
|
handlers=[ |
|
|
logging.StreamHandler(), |
|
|
] |
|
|
) |
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
|
|
|
logger.info("🚀 Starting Premium Trading Dashboard... (Build: 2025-07-29 05:15 - Fixed directory structure)") |
|
|
logger.info(f"Python version: {sys.version}") |
|
|
logger.info(f"Working directory: {os.getcwd()}") |
|
|
|
|
|
|
|
|
logger.info("🔌 Initializing Alpaca trading client...") |
|
|
try: |
|
|
trading_client = TradingClient(api_key=API_KEY, secret_key=SECRET_KEY) |
|
|
logger.info("✅ Alpaca trading client initialized successfully") |
|
|
except Exception as e: |
|
|
logger.error(f"❌ Failed to initialize Alpaca trading client: {e}") |
|
|
raise |
|
|
|
|
|
logger.info("📊 Initializing Alpaca data client...") |
|
|
try: |
|
|
data_client = StockHistoricalDataClient(API_KEY, SECRET_KEY) |
|
|
logger.info("✅ Alpaca data client initialized successfully") |
|
|
except Exception as e: |
|
|
logger.error(f"❌ Failed to initialize Alpaca data client: {e}") |
|
|
raise |
|
|
|
|
|
|
|
|
logger.info("🧠 Initializing sentiment analysis engines...") |
|
|
try: |
|
|
vader = SentimentIntensityAnalyzer() |
|
|
logger.info("✅ VADER sentiment analyzer initialized") |
|
|
except Exception as e: |
|
|
logger.error(f"❌ Failed to initialize VADER: {e}") |
|
|
raise |
|
|
|
|
|
try: |
|
|
from textblob import TextBlob |
|
|
|
|
|
test_blob = TextBlob("test") |
|
|
logger.info("✅ TextBlob sentiment analyzer initialized") |
|
|
except Exception as e: |
|
|
logger.error(f"❌ Failed to initialize TextBlob: {e}") |
|
|
raise |
|
|
|
|
|
headers = {'User-Agent': 'TradingHistoryBacktester/1.0'} |
|
|
logger.info("✅ HTTP headers configured") |
|
|
|
|
|
|
|
|
COLORS = { |
|
|
'primary': '#0070f3', |
|
|
'success': '#00d647', |
|
|
'error': '#ff0080', |
|
|
'warning': '#f5a623', |
|
|
'neutral': '#8b949e', |
|
|
'background': '#fafafa', |
|
|
'surface': '#ffffff', |
|
|
'text': '#000000', |
|
|
'text_secondary': '#666666', |
|
|
'border': '#eaeaea' |
|
|
} |
|
|
|
|
|
def fetch_from_vm(endpoint, default_value=None): |
|
|
"""Fetch data from VM API server""" |
|
|
try: |
|
|
response = requests.get(f"{VM_API_URL}/api/{endpoint}", timeout=10) |
|
|
if response.status_code == 200: |
|
|
return response.json() |
|
|
else: |
|
|
logger.warning(f"VM API {endpoint} returned {response.status_code}") |
|
|
return default_value |
|
|
except Exception as e: |
|
|
logger.error(f"Error fetching from VM {endpoint}: {e}") |
|
|
return default_value |
|
|
|
|
|
def get_account_info(): |
|
|
"""Get current account information from Alpaca""" |
|
|
try: |
|
|
account = trading_client.get_account() |
|
|
return { |
|
|
'portfolio_value': float(account.portfolio_value), |
|
|
'buying_power': float(account.buying_power), |
|
|
'cash': float(account.cash), |
|
|
'equity': float(account.equity), |
|
|
'day_change': float(getattr(account, 'unrealized_pl', 0)) if hasattr(account, 'unrealized_pl') else 0, |
|
|
'day_change_percent': float(getattr(account, 'unrealized_plpc', 0)) * 100 if hasattr(account, 'unrealized_plpc') else 0, |
|
|
'last_equity': float(account.last_equity) if account.last_equity else 0 |
|
|
} |
|
|
except Exception as e: |
|
|
logger.error(f"Error fetching account info: {e}") |
|
|
return { |
|
|
'portfolio_value': 0, 'buying_power': 0, 'cash': 0, 'equity': 0, |
|
|
'day_change': 0, 'day_change_percent': 0, 'last_equity': 0 |
|
|
} |
|
|
|
|
|
def get_portfolio_history(): |
|
|
"""Get portfolio value history from Alpaca""" |
|
|
try: |
|
|
portfolio_history_request = GetPortfolioHistoryRequest( |
|
|
period="1M", |
|
|
timeframe="1D", |
|
|
extended_hours=False |
|
|
) |
|
|
|
|
|
portfolio_history = trading_client.get_portfolio_history(portfolio_history_request) |
|
|
|
|
|
timestamps = [datetime.fromtimestamp(ts, tz=timezone.utc) for ts in portfolio_history.timestamp] |
|
|
equity_values = portfolio_history.equity |
|
|
|
|
|
df = pd.DataFrame({ |
|
|
'timestamp': timestamps, |
|
|
'equity': equity_values |
|
|
}) |
|
|
|
|
|
return df.dropna() |
|
|
except Exception as e: |
|
|
logger.error(f"Error fetching portfolio history: {e}") |
|
|
return pd.DataFrame() |
|
|
|
|
|
def get_current_positions(): |
|
|
"""Get current positions""" |
|
|
try: |
|
|
positions = trading_client.get_all_positions() |
|
|
position_data = [] |
|
|
|
|
|
for position in positions: |
|
|
position_data.append({ |
|
|
'symbol': position.symbol, |
|
|
'qty': float(position.qty), |
|
|
'market_value': float(position.market_value), |
|
|
'cost_basis': float(position.cost_basis), |
|
|
'unrealized_pl': float(position.unrealized_pl), |
|
|
'unrealized_plpc': float(position.unrealized_plpc) * 100, |
|
|
'current_price': float(position.current_price) if position.current_price else 0 |
|
|
}) |
|
|
|
|
|
return position_data |
|
|
except Exception as e: |
|
|
logger.error(f"Error fetching positions: {e}") |
|
|
return [] |
|
|
|
|
|
def create_portfolio_chart(): |
|
|
"""Create beautiful portfolio value chart""" |
|
|
portfolio_df = get_portfolio_history() |
|
|
|
|
|
if portfolio_df.empty: |
|
|
fig = go.Figure() |
|
|
fig.add_annotation( |
|
|
text="No portfolio history available", |
|
|
x=0.5, y=0.5, |
|
|
xref="paper", yref="paper", |
|
|
showarrow=False, |
|
|
font=dict(size=16, color=COLORS['text_secondary']) |
|
|
) |
|
|
else: |
|
|
fig = go.Figure() |
|
|
|
|
|
fig.add_trace(go.Scatter( |
|
|
x=portfolio_df['timestamp'], |
|
|
y=portfolio_df['equity'], |
|
|
mode='lines', |
|
|
name='Portfolio Value', |
|
|
line=dict(color=COLORS['primary'], width=3), |
|
|
fill='tonexty', |
|
|
fillcolor=f"rgba(0, 112, 243, 0.1)", |
|
|
hovertemplate='<b>%{y:$,.2f}</b><br>%{x}<extra></extra>' |
|
|
)) |
|
|
|
|
|
if len(portfolio_df) > 0: |
|
|
current_value = portfolio_df['equity'].iloc[-1] |
|
|
fig.add_annotation( |
|
|
x=portfolio_df['timestamp'].iloc[-1], |
|
|
y=current_value, |
|
|
text=f"${current_value:,.2f}", |
|
|
showarrow=True, |
|
|
arrowhead=2, |
|
|
arrowcolor=COLORS['primary'], |
|
|
bgcolor="white", |
|
|
bordercolor=COLORS['primary'], |
|
|
borderwidth=2, |
|
|
font=dict(size=12, color=COLORS['text']) |
|
|
) |
|
|
|
|
|
fig.update_layout( |
|
|
title=dict( |
|
|
text="Portfolio Value (Last 30 Days)", |
|
|
font=dict(size=24, color=COLORS['text'], family="Inter"), |
|
|
x=0.02 |
|
|
), |
|
|
xaxis=dict( |
|
|
title="Date", |
|
|
showgrid=True, |
|
|
gridcolor=COLORS['border'], |
|
|
color=COLORS['text_secondary'] |
|
|
), |
|
|
yaxis=dict( |
|
|
title="Portfolio Value ($)", |
|
|
showgrid=True, |
|
|
gridcolor=COLORS['border'], |
|
|
color=COLORS['text_secondary'], |
|
|
tickformat='$,.0f' |
|
|
), |
|
|
plot_bgcolor='white', |
|
|
paper_bgcolor='white', |
|
|
height=400, |
|
|
margin=dict(l=60, r=40, t=60, b=60), |
|
|
hovermode='x unified', |
|
|
showlegend=False |
|
|
) |
|
|
|
|
|
return fig |
|
|
|
|
|
def create_ipo_discovery_chart(): |
|
|
"""Create IPO discovery chart with investment decisions""" |
|
|
ipos = fetch_from_vm('ipos?limit=100', []) |
|
|
|
|
|
if not ipos: |
|
|
fig = go.Figure() |
|
|
fig.add_annotation( |
|
|
text="No IPO data available from VM", |
|
|
x=0.5, y=0.5, |
|
|
xref="paper", yref="paper", |
|
|
showarrow=False, |
|
|
font=dict(size=16, color=COLORS['text_secondary']) |
|
|
) |
|
|
else: |
|
|
|
|
|
status_counts = {} |
|
|
for ipo in ipos: |
|
|
status = ipo.get('investment_status', 'UNKNOWN') |
|
|
status_counts[status] = status_counts.get(status, 0) + 1 |
|
|
|
|
|
|
|
|
labels = list(status_counts.keys()) |
|
|
values = list(status_counts.values()) |
|
|
|
|
|
|
|
|
color_map = { |
|
|
'INVESTED': COLORS['success'], |
|
|
'ELIGIBLE_NOT_INVESTED': COLORS['warning'], |
|
|
'WRONG_TYPE': COLORS['neutral'], |
|
|
'UNKNOWN': COLORS['error'] |
|
|
} |
|
|
colors = [color_map.get(label, COLORS['neutral']) for label in labels] |
|
|
|
|
|
fig = go.Figure(data=[go.Pie( |
|
|
labels=labels, |
|
|
values=values, |
|
|
hole=0.4, |
|
|
marker=dict(colors=colors), |
|
|
textinfo='label+percent', |
|
|
textposition='outside' |
|
|
)]) |
|
|
|
|
|
fig.update_layout( |
|
|
title=dict( |
|
|
text="IPO Investment Decisions", |
|
|
font=dict(size=24, color=COLORS['text'], family="Inter"), |
|
|
x=0.5 |
|
|
), |
|
|
plot_bgcolor='white', |
|
|
paper_bgcolor='white', |
|
|
height=400, |
|
|
margin=dict(l=60, r=60, t=60, b=60), |
|
|
showlegend=True |
|
|
) |
|
|
|
|
|
return fig |
|
|
|
|
|
def refresh_account_overview(): |
|
|
"""Refresh account overview display""" |
|
|
account = get_account_info() |
|
|
|
|
|
portfolio_value = f"${account['portfolio_value']:,.2f}" |
|
|
buying_power = f"${account['buying_power']:,.2f}" |
|
|
cash = f"${account['cash']:,.2f}" |
|
|
|
|
|
day_change_value = account['day_change'] |
|
|
day_change_percent = account['day_change_percent'] |
|
|
if day_change_value > 0: |
|
|
day_change = f"↗️ +${day_change_value:,.2f} (+{day_change_percent:.2f}%)" |
|
|
elif day_change_value < 0: |
|
|
day_change = f"↘️ ${day_change_value:,.2f} ({day_change_percent:.2f}%)" |
|
|
else: |
|
|
day_change = f"➡️ ${day_change_value:,.2f} ({day_change_percent:.2f}%)" |
|
|
|
|
|
equity = f"${account['equity']:,.2f}" |
|
|
|
|
|
return portfolio_value, buying_power, cash, day_change, equity |
|
|
|
|
|
def refresh_positions_table(): |
|
|
"""Refresh current positions table""" |
|
|
positions = get_current_positions() |
|
|
if not positions: |
|
|
return pd.DataFrame(columns=['Symbol', 'Quantity', 'Market Value', 'Unrealized P&L', 'Unrealized %']) |
|
|
|
|
|
df_data = [] |
|
|
for pos in positions: |
|
|
pnl_indicator = "🟢" if pos['unrealized_pl'] > 0 else "🔴" if pos['unrealized_pl'] < 0 else "⚪" |
|
|
df_data.append({ |
|
|
'Symbol': f"{pnl_indicator} {pos['symbol']}", |
|
|
'Quantity': f"{pos['qty']:.0f}", |
|
|
'Market Value': f"${pos['market_value']:,.2f}", |
|
|
'Unrealized P&L': f"${pos['unrealized_pl']:,.2f}", |
|
|
'Unrealized %': f"{pos['unrealized_plpc']:.2f}%" |
|
|
}) |
|
|
|
|
|
return pd.DataFrame(df_data) |
|
|
|
|
|
def refresh_ipo_discoveries_table(): |
|
|
"""Refresh IPO discoveries table with investment decisions""" |
|
|
ipos = fetch_from_vm('ipos?limit=100', []) |
|
|
|
|
|
if not ipos: |
|
|
return pd.DataFrame(columns=['Status', 'Symbol', 'Security Type', 'Price', 'Detected At']) |
|
|
|
|
|
df_data = [] |
|
|
for ipo in ipos: |
|
|
status_emoji = ipo.get('status_emoji', '⚪') |
|
|
status = ipo.get('investment_status', 'UNKNOWN') |
|
|
|
|
|
|
|
|
display_status = { |
|
|
'INVESTED': '🟢 INVESTED', |
|
|
'ELIGIBLE_NOT_INVESTED': '🟡 ELIGIBLE', |
|
|
'WRONG_TYPE': '⚪ WRONG TYPE', |
|
|
'UNKNOWN': '🔴 UNKNOWN' |
|
|
}.get(status, '⚪ UNKNOWN') |
|
|
|
|
|
df_data.append({ |
|
|
'Status': display_status, |
|
|
'Symbol': ipo.get('symbol', 'N/A'), |
|
|
'Security Type': ipo.get('security_type', 'N/A'), |
|
|
'Price': f"${ipo.get('trading_price', 0)}" if ipo.get('trading_price') != 'N/A' else 'N/A', |
|
|
'Detected At': ipo.get('detected_at', 'N/A') |
|
|
}) |
|
|
|
|
|
return pd.DataFrame(df_data) |
|
|
|
|
|
def get_order_history(): |
|
|
"""Get order history from Alpaca""" |
|
|
try: |
|
|
|
|
|
end_date = datetime.now(timezone.utc) |
|
|
start_date = end_date - timedelta(days=365) |
|
|
|
|
|
order_request = GetOrdersRequest( |
|
|
status="closed", |
|
|
limit=500, |
|
|
after=start_date, |
|
|
until=end_date |
|
|
) |
|
|
|
|
|
orders = trading_client.get_orders(order_request) |
|
|
logger.info(f"Successfully fetched {len(orders)} orders using closed status filter") |
|
|
return orders |
|
|
except Exception as e: |
|
|
logger.error(f"Error fetching order history: {e}") |
|
|
return [] |
|
|
|
|
|
def refresh_investment_performance_table(): |
|
|
"""Refresh investment performance table with P&L and sentiment analysis for all trading symbols""" |
|
|
logger.info("📊 Starting investment performance table refresh...") |
|
|
|
|
|
|
|
|
logger.info("🔌 Fetching IPO data from VM...") |
|
|
ipos = fetch_from_vm('ipos?limit=100', []) |
|
|
logger.info(f"📈 Retrieved {len(ipos)} IPO records from VM") |
|
|
|
|
|
logger.info("📋 Fetching order history from Alpaca...") |
|
|
orders = get_order_history() |
|
|
logger.info(f"📝 Retrieved {len(orders)} orders from Alpaca") |
|
|
|
|
|
logger.info("💼 Fetching current positions from Alpaca...") |
|
|
positions = get_current_positions() |
|
|
logger.info(f"🏦 Retrieved {len(positions)} current positions") |
|
|
|
|
|
|
|
|
columns = ['Symbol', 'Status', 'IPO Price', 'Buy Price', 'Sell Price', 'Investment', 'P&L ($)', 'P&L (%)', 'Sentiment', 'Predicted', 'Date'] |
|
|
|
|
|
logger.info(f"Found {len(orders)} total orders for performance analysis") |
|
|
|
|
|
if not orders: |
|
|
return pd.DataFrame(columns=columns) |
|
|
|
|
|
|
|
|
symbols_traded = set() |
|
|
for order in orders: |
|
|
if hasattr(order, 'symbol') and order.symbol: |
|
|
symbols_traded.add(order.symbol) |
|
|
|
|
|
logger.info(f"Found {len(symbols_traded)} unique symbols traded: {list(symbols_traded)}") |
|
|
|
|
|
|
|
|
ipo_price_lookup = {} |
|
|
for ipo in ipos: |
|
|
symbol = ipo.get('symbol', '') |
|
|
if symbol: |
|
|
try: |
|
|
price = float(ipo.get('trading_price', 0)) |
|
|
if price > 0: |
|
|
ipo_price_lookup[symbol] = price |
|
|
except (ValueError, TypeError): |
|
|
pass |
|
|
|
|
|
invested_data = [] |
|
|
|
|
|
|
|
|
for symbol in sorted(symbols_traded): |
|
|
|
|
|
symbol_orders = [o for o in orders if o.symbol == symbol] |
|
|
|
|
|
if symbol_orders: |
|
|
|
|
|
buy_orders = [o for o in symbol_orders if o.side.value == 'buy'] |
|
|
sell_orders = [o for o in symbol_orders if o.side.value == 'sell'] |
|
|
|
|
|
if buy_orders: |
|
|
total_bought = sum(float(o.filled_qty or 0) for o in buy_orders) |
|
|
total_cost = sum(float(o.filled_qty or 0) * float(o.filled_avg_price or 0) for o in buy_orders) |
|
|
avg_buy_price = total_cost / total_bought if total_bought > 0 else 0 |
|
|
|
|
|
total_sold = sum(float(o.filled_qty or 0) for o in sell_orders) |
|
|
current_qty = total_bought - total_sold |
|
|
|
|
|
|
|
|
ipo_price = ipo_price_lookup.get(symbol, 0) |
|
|
|
|
|
|
|
|
first_buy_order = min(buy_orders, key=lambda x: x.filled_at) |
|
|
first_buy_date = first_buy_order.filled_at.strftime('%Y-%m-%d') |
|
|
investment_time = first_buy_order.filled_at |
|
|
logger.info(f"Date for {symbol}: {first_buy_date} (from {first_buy_order.filled_at})") |
|
|
|
|
|
|
|
|
if sell_orders: |
|
|
avg_sell_price = sum(float(o.filled_qty or 0) * float(o.filled_avg_price or 0) for o in sell_orders) / sum(float(o.filled_qty or 0) for o in sell_orders) |
|
|
else: |
|
|
avg_sell_price = 0 |
|
|
|
|
|
current_qty = total_bought - total_sold |
|
|
|
|
|
if current_qty > 0: |
|
|
|
|
|
status = "🟦 HOLDING" |
|
|
pos = next((p for p in positions if p['symbol'] == symbol), None) |
|
|
if pos: |
|
|
current_price = pos['current_price'] |
|
|
current_value = current_qty * current_price |
|
|
investment = current_qty * avg_buy_price |
|
|
pl_dollars = current_value - investment |
|
|
pl_percent = (pl_dollars / investment * 100) if investment > 0 else 0 |
|
|
else: |
|
|
|
|
|
investment = current_qty * avg_buy_price |
|
|
pl_dollars = 0 |
|
|
pl_percent = 0 |
|
|
else: |
|
|
|
|
|
status = "🟨 SOLD" |
|
|
investment = total_cost |
|
|
sold_value = sum(float(o.filled_qty or 0) * float(o.filled_avg_price or 0) for o in sell_orders) |
|
|
pl_dollars = sold_value - investment |
|
|
pl_percent = (pl_dollars / investment * 100) if investment > 0 else 0 |
|
|
|
|
|
|
|
|
if pl_dollars > 0: |
|
|
pl_arrow = "<span style='color: #00d647; font-size: 1.4em;'>▲</span>" |
|
|
pl_color = "#00d647" |
|
|
row_bg = "rgba(0, 214, 71, 0.1)" |
|
|
elif pl_dollars < 0: |
|
|
pl_arrow = "<span style='color: #ff0080; font-size: 1.4em;'>▼</span>" |
|
|
pl_color = "#ff0080" |
|
|
row_bg = "rgba(255, 0, 128, 0.1)" |
|
|
else: |
|
|
pl_arrow = "" |
|
|
pl_color = "#8b949e" |
|
|
row_bg = "rgba(139, 148, 158, 0.05)" |
|
|
|
|
|
|
|
|
pl_dollar_str = f"{pl_arrow} <span style='color: {pl_color}; font-weight: 600;'>${abs(pl_dollars):.2f}</span>" |
|
|
pl_percent_str = f"{pl_arrow} <span style='color: {pl_color}; font-weight: 600;'>{abs(pl_percent):.2f}%</span>" |
|
|
|
|
|
|
|
|
logger.info(f"🧠 Starting sentiment analysis for {symbol}...") |
|
|
start_time = time.time() |
|
|
try: |
|
|
|
|
|
logger.info(f"📰 Gathering pre-investment news for {symbol}...") |
|
|
news_items = get_pre_investment_news(symbol, investment_time, hours_before=12) |
|
|
logger.info(f"📑 Found {len(news_items)} total news items for {symbol}") |
|
|
|
|
|
|
|
|
logger.info(f"🔍 Analyzing sentiment for {symbol}...") |
|
|
avg_sentiment, predicted_change, prediction_label, source_breakdown = analyze_pre_investment_sentiment(news_items) |
|
|
|
|
|
analysis_time = time.time() - start_time |
|
|
logger.info(f"⚡ Sentiment analysis for {symbol} completed in {analysis_time:.1f}s") |
|
|
|
|
|
|
|
|
if prediction_label == "bullish": |
|
|
sentiment_display = f"<span style='color: #00d647; font-weight: 600;'>🚀 {prediction_label.title()}</span>" |
|
|
elif prediction_label == "bearish": |
|
|
sentiment_display = f"<span style='color: #ff0080; font-weight: 600;'>📉 {prediction_label.title()}</span>" |
|
|
else: |
|
|
sentiment_display = f"<span style='color: #8b949e; font-weight: 600;'>😐 {prediction_label.title()}</span>" |
|
|
|
|
|
|
|
|
if predicted_change > 0: |
|
|
predicted_display = f"<span style='color: #00d647; font-weight: 600;'>+{predicted_change:.1f}%</span>" |
|
|
elif predicted_change < 0: |
|
|
predicted_display = f"<span style='color: #ff0080; font-weight: 600;'>{predicted_change:.1f}%</span>" |
|
|
else: |
|
|
predicted_display = f"<span style='color: #8b949e; font-weight: 600;'>{predicted_change:.1f}%</span>" |
|
|
|
|
|
reddit_count = len(source_breakdown.get('Reddit', [])) |
|
|
news_count = len(source_breakdown.get('Google News', [])) |
|
|
logger.info(f"🎯 {symbol} RESULTS: {prediction_label.upper()} ({predicted_change:+.1f}%) | Reddit: {reddit_count} posts | News: {news_count} articles") |
|
|
|
|
|
|
|
|
if reddit_count > 0: |
|
|
sample_reddit = source_breakdown['Reddit'][0]['title'][:50] |
|
|
logger.info(f"📱 Sample Reddit: {sample_reddit}...") |
|
|
if news_count > 0: |
|
|
sample_news = source_breakdown['Google News'][0]['title'][:50] |
|
|
logger.info(f"📰 Sample News: {sample_news}...") |
|
|
|
|
|
except Exception as e: |
|
|
analysis_time = time.time() - start_time |
|
|
logger.error(f"❌ Sentiment analysis failed for {symbol} after {analysis_time:.1f}s: {str(e)}") |
|
|
logger.error(f"🔍 Error type: {type(e).__name__}") |
|
|
import traceback |
|
|
logger.error(f"📋 Traceback: {traceback.format_exc()[:200]}...") |
|
|
sentiment_display = "<span style='color: #8b949e;'>❓ Error</span>" |
|
|
predicted_display = "<span style='color: #8b949e;'>N/A</span>" |
|
|
|
|
|
|
|
|
pass |
|
|
|
|
|
invested_data.append({ |
|
|
'Symbol': symbol, |
|
|
'Status': status, |
|
|
'IPO Price': f"${ipo_price:.2f}" if ipo_price > 0 else 'N/A', |
|
|
'Buy Price': f"${avg_buy_price:.2f}", |
|
|
'Sell Price': f"${avg_sell_price:.2f}" if avg_sell_price > 0 else 'N/A', |
|
|
'Investment': f"${investment:.2f}", |
|
|
'P&L ($)': pl_dollar_str, |
|
|
'P&L (%)': pl_percent_str, |
|
|
'Sentiment': sentiment_display, |
|
|
'Predicted': predicted_display, |
|
|
'Date': first_buy_date, |
|
|
'_row_bg': row_bg, |
|
|
'_sort_date': first_buy_order.filled_at |
|
|
}) |
|
|
|
|
|
|
|
|
invested_data.sort(key=lambda x: x['_sort_date'], reverse=True) |
|
|
logger.info(f"📋 Processed {len(invested_data)} investments with sentiment analysis") |
|
|
|
|
|
df = pd.DataFrame(invested_data) |
|
|
logger.info(f"✅ Investment performance table refresh completed - {len(df)} rows") |
|
|
return df |
|
|
|
|
|
def refresh_investment_performance_html(): |
|
|
"""Return styled HTML table for investment performance""" |
|
|
df = refresh_investment_performance_table() |
|
|
|
|
|
if df.empty: |
|
|
return "<div style='text-align: center; padding: 2rem; color: #666;'>No trading data available</div>" |
|
|
|
|
|
|
|
|
html = '<table class="investment-table">' |
|
|
|
|
|
|
|
|
html += '<thead><tr>' |
|
|
for col in df.columns: |
|
|
if not col.startswith('_'): |
|
|
html += f'<th>{col}</th>' |
|
|
html += '</tr></thead>' |
|
|
|
|
|
|
|
|
html += '<tbody>' |
|
|
for _, row in df.iterrows(): |
|
|
|
|
|
row_class = "" |
|
|
pl_str = str(row.get('P&L ($)', '')) |
|
|
if '▲' in pl_str: |
|
|
row_class = "profit-row" |
|
|
elif '▼' in pl_str: |
|
|
row_class = "loss-row" |
|
|
else: |
|
|
row_class = "neutral-row" |
|
|
|
|
|
html += f'<tr class="{row_class}">' |
|
|
for col in df.columns: |
|
|
if not col.startswith('_'): |
|
|
html += f'<td>{row[col]}</td>' |
|
|
html += '</tr>' |
|
|
|
|
|
html += '</tbody></table>' |
|
|
return html |
|
|
|
|
|
def refresh_vm_stats(): |
|
|
"""Refresh VM statistics""" |
|
|
stats = fetch_from_vm('stats', {}) |
|
|
|
|
|
if not stats: |
|
|
return "0", "0", "0", "0%", "No data" |
|
|
|
|
|
return ( |
|
|
str(stats.get('total_ipos_detected', 0)), |
|
|
str(stats.get('ipos_invested', 0)), |
|
|
str(stats.get('cs_stocks_detected', 0)), |
|
|
f"{stats.get('investment_rate', 0):.1f}%", |
|
|
stats.get('last_updated', 'N/A') |
|
|
) |
|
|
|
|
|
def refresh_system_logs(): |
|
|
"""Refresh system logs from VM""" |
|
|
logs = fetch_from_vm('logs', []) |
|
|
|
|
|
if not logs: |
|
|
return "No logs available from VM" |
|
|
|
|
|
|
|
|
formatted_logs = [] |
|
|
for log in logs: |
|
|
emoji = log.get('emoji', '⚪') |
|
|
timestamp = log.get('timestamp', 'N/A') |
|
|
message = log.get('message', '') |
|
|
formatted_logs.append(f"{emoji} {timestamp} | {message}") |
|
|
|
|
|
return '\n'.join(formatted_logs) |
|
|
|
|
|
def refresh_raw_logs(): |
|
|
"""Refresh raw logs from VM""" |
|
|
raw_data = fetch_from_vm('logs/raw?lines=1000', {}) |
|
|
|
|
|
if not raw_data: |
|
|
return "No raw logs available from VM" |
|
|
|
|
|
content = raw_data.get('content', 'No content') |
|
|
total_lines = raw_data.get('total_lines', 0) |
|
|
showing_lines = raw_data.get('showing_lines', 0) |
|
|
|
|
|
header = f"=== RAW CRON LOGS ===\nShowing last {showing_lines} of {total_lines} total lines\n\n" |
|
|
return header + content |
|
|
|
|
|
def run_vm_command(command, current_output="", command_history=""): |
|
|
"""Execute command on VM and return output""" |
|
|
try: |
|
|
if not command.strip(): |
|
|
return current_output, "", command_history |
|
|
|
|
|
|
|
|
history_list = command_history.split("|||") if command_history else [] |
|
|
if command not in history_list: |
|
|
history_list.append(command) |
|
|
|
|
|
history_list = history_list[-50:] |
|
|
new_history = "|||".join(history_list) |
|
|
|
|
|
response = requests.post(f"{VM_API_URL}/api/execute", |
|
|
json={"command": command}, |
|
|
timeout=10) |
|
|
|
|
|
if response.status_code == 200: |
|
|
data = response.json() |
|
|
output = data.get('output', '') |
|
|
exit_code = data.get('exit_code', 0) |
|
|
|
|
|
|
|
|
colored_output = colorize_output(output) |
|
|
|
|
|
|
|
|
|
|
|
clean_output = colored_output.strip().replace('\r', '') |
|
|
new_line = f"$ {command}\n{clean_output}" |
|
|
if exit_code != 0: |
|
|
new_line += f"\n[Exit code: {exit_code}]" |
|
|
new_line += "\n$ " |
|
|
|
|
|
|
|
|
if current_output.strip(): |
|
|
full_output = new_line + "\n" + current_output.rstrip() |
|
|
else: |
|
|
full_output = new_line |
|
|
|
|
|
return full_output, "", new_history |
|
|
else: |
|
|
error_line = f"\n$ {command}\nError: VM API returned {response.status_code}\n$ " |
|
|
return current_output + error_line, "", new_history |
|
|
|
|
|
except Exception as e: |
|
|
error_line = f"\n$ {command}\nError: {str(e)}\n$ " |
|
|
return current_output + error_line, "", new_history |
|
|
|
|
|
def colorize_output(output): |
|
|
"""Add basic color coding to terminal output""" |
|
|
import re |
|
|
|
|
|
|
|
|
colored = output |
|
|
|
|
|
|
|
|
colored = re.sub(r'^(d)([rwx-]{9})', r'<span style="color: #4A90E2;">\1\2</span>', colored, flags=re.MULTILINE) |
|
|
colored = re.sub(r'^(-)([rwx-]{9})', r'<span style="color: #50E3C2;">\1\2</span>', colored, flags=re.MULTILINE) |
|
|
|
|
|
|
|
|
colored = re.sub(r'(ERROR|Error|error)', r'<span style="color: #FF6B6B;">\1</span>', colored) |
|
|
colored = re.sub(r'(WARNING|Warning|warning)', r'<span style="color: #FFD93D;">\1</span>', colored) |
|
|
|
|
|
|
|
|
colored = re.sub(r'(SUCCESS|Success|success)', r'<span style="color: #6BCF7F;">\1</span>', colored) |
|
|
|
|
|
|
|
|
colored = re.sub(r'(\w+\.(py|log|csv|json|txt))', r'<span style="color: #BD93F9;">\1</span>', colored) |
|
|
|
|
|
|
|
|
colored = re.sub(r'(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})', r'<span style="color: #50FA7B;">\1</span>', colored) |
|
|
|
|
|
return colored |
|
|
|
|
|
def debug_order_history(): |
|
|
"""Debug function to show raw order history data""" |
|
|
try: |
|
|
|
|
|
debug_info = f"=== ORDER HISTORY DEBUG ===\n" |
|
|
|
|
|
|
|
|
try: |
|
|
end_date = datetime.now(timezone.utc) |
|
|
start_date = end_date - timedelta(days=180) |
|
|
old_request = GetOrdersRequest(limit=500, after=start_date, until=end_date) |
|
|
old_orders = trading_client.get_orders(old_request) |
|
|
debug_info += f"Method 1 (6 months, all statuses): {len(old_orders)} orders [DEPRECATED]\n" |
|
|
except Exception as e: |
|
|
debug_info += f"Method 1 failed: {str(e)}\n" |
|
|
|
|
|
|
|
|
orders = get_order_history() |
|
|
debug_info += f"Method 1B (PRIMARY - 1 year, CLOSED): {len(orders)} orders [CURRENTLY USED]\n" |
|
|
|
|
|
|
|
|
try: |
|
|
end_date = datetime.now(timezone.utc) |
|
|
start_date = end_date - timedelta(days=365) |
|
|
filled_request = GetOrdersRequest( |
|
|
status="closed", |
|
|
limit=500, |
|
|
after=start_date, |
|
|
until=end_date |
|
|
) |
|
|
filled_orders = trading_client.get_orders(filled_request) |
|
|
debug_info += f"Method 2 (1 year, CLOSED orders): {len(filled_orders)} orders\n" |
|
|
except Exception as e: |
|
|
debug_info += f"Method 2 failed: {str(e)}\n" |
|
|
|
|
|
|
|
|
try: |
|
|
recent_request = GetOrdersRequest(limit=100) |
|
|
recent_orders = trading_client.get_orders(recent_request) |
|
|
debug_info += f"Method 3 (recent 100, no date filter): {len(recent_orders)} orders\n" |
|
|
except Exception as e: |
|
|
debug_info += f"Method 3 failed: {str(e)}\n" |
|
|
|
|
|
debug_info += "\n" |
|
|
|
|
|
|
|
|
all_orders = orders if orders else (filled_orders if 'filled_orders' in locals() else (recent_orders if 'recent_orders' in locals() else [])) |
|
|
|
|
|
if all_orders: |
|
|
debug_info += f"Sample orders (showing first 10):\n" |
|
|
for i, order in enumerate(all_orders[:10]): |
|
|
debug_info += f"{i+1}. Symbol: {order.symbol}, Side: {order.side}, " |
|
|
debug_info += f"Qty: {order.filled_qty}, Price: {order.filled_avg_price}, " |
|
|
debug_info += f"Status: {order.status}, Time: {order.filled_at}, " |
|
|
debug_info += f"Created: {order.created_at}\n" |
|
|
else: |
|
|
debug_info += "❌ NO ORDERS FOUND WITH ANY METHOD!\n" |
|
|
debug_info += "\nLet's check account details:\n" |
|
|
|
|
|
|
|
|
try: |
|
|
account = trading_client.get_account() |
|
|
debug_info += f"Account ID: {account.account_number}\n" |
|
|
debug_info += f"Account Status: {account.status}\n" |
|
|
debug_info += f"Trading Blocked: {account.trading_blocked}\n" |
|
|
debug_info += f"Pattern Day Trader: {account.pattern_day_trader}\n" |
|
|
debug_info += f"Cash: ${float(account.cash):,.2f}\n" |
|
|
debug_info += f"Portfolio Value: ${float(account.portfolio_value):,.2f}\n" |
|
|
|
|
|
|
|
|
debug_info += f"\nAPI Keys being used:\n" |
|
|
debug_info += f"API Key: {API_KEY[:8]}...{API_KEY[-4:]}\n" |
|
|
if "PK" in API_KEY: |
|
|
debug_info += "🟢 This appears to be PAPER TRADING (PK prefix)\n" |
|
|
elif "AK" in API_KEY: |
|
|
debug_info += "🔴 This appears to be LIVE TRADING (AK prefix)\n" |
|
|
else: |
|
|
debug_info += "❓ Unknown API key type\n" |
|
|
|
|
|
except Exception as e: |
|
|
debug_info += f"❌ Error getting account info: {str(e)}\n" |
|
|
|
|
|
debug_info += "\nPossible issues:\n" |
|
|
debug_info += "- No actual trading activity on this account\n" |
|
|
debug_info += "- Using paper trading account (no real orders)\n" |
|
|
debug_info += "- Orders are older than 1 year\n" |
|
|
debug_info += "- API key permissions issue\n" |
|
|
debug_info += "- Different Alpaca account than expected\n" |
|
|
|
|
|
return debug_info |
|
|
except Exception as e: |
|
|
return f"ERROR getting order history: {str(e)}" |
|
|
|
|
|
def debug_current_positions(): |
|
|
"""Debug function to show current positions""" |
|
|
try: |
|
|
positions = get_current_positions() |
|
|
debug_info = f"=== CURRENT POSITIONS DEBUG ===\n" |
|
|
debug_info += f"Total positions: {len(positions)}\n\n" |
|
|
|
|
|
for pos in positions: |
|
|
debug_info += f"Symbol: {pos['symbol']}, Qty: {pos['qty']}, " |
|
|
debug_info += f"Market Value: ${pos['market_value']:.2f}, " |
|
|
debug_info += f"P&L: ${pos['unrealized_pl']:.2f}\n" |
|
|
|
|
|
return debug_info |
|
|
except Exception as e: |
|
|
return f"ERROR getting positions: {str(e)}" |
|
|
|
|
|
def debug_ipo_data(): |
|
|
"""Debug function to show IPO data from VM""" |
|
|
try: |
|
|
ipos = fetch_from_vm('ipos?limit=20', []) |
|
|
debug_info = f"=== IPO DATA DEBUG ===\n" |
|
|
debug_info += f"Total IPOs: {len(ipos)}\n\n" |
|
|
|
|
|
invested_count = 0 |
|
|
for ipo in ipos: |
|
|
status = ipo.get('investment_status', 'UNKNOWN') |
|
|
if status == 'INVESTED': |
|
|
invested_count += 1 |
|
|
debug_info += f"INVESTED: {ipo.get('symbol')} - Price: ${ipo.get('trading_price')}\n" |
|
|
|
|
|
debug_info += f"\nTotal INVESTED IPOs: {invested_count}\n" |
|
|
return debug_info |
|
|
except Exception as e: |
|
|
return f"ERROR getting IPO data: {str(e)}" |
|
|
|
|
|
def debug_account_info(): |
|
|
"""Debug function to show account info""" |
|
|
try: |
|
|
account = get_account_info() |
|
|
debug_info = f"=== ACCOUNT INFO DEBUG ===\n" |
|
|
for key, value in account.items(): |
|
|
debug_info += f"{key}: {value}\n" |
|
|
return debug_info |
|
|
except Exception as e: |
|
|
return f"ERROR getting account info: {str(e)}" |
|
|
|
|
|
def calculate_sequential_reinvestment(): |
|
|
"""Calculate P&L% if reinvesting same amount sequentially in each stock""" |
|
|
try: |
|
|
orders = get_order_history() |
|
|
if not orders: |
|
|
return "No order data available for calculation" |
|
|
|
|
|
|
|
|
symbols_by_date = {} |
|
|
for order in orders: |
|
|
if order.side.value == 'buy' and order.status.value == 'filled': |
|
|
symbol = order.symbol |
|
|
fill_date = order.filled_at |
|
|
if symbol not in symbols_by_date or fill_date < symbols_by_date[symbol]: |
|
|
symbols_by_date[symbol] = fill_date |
|
|
|
|
|
|
|
|
sorted_symbols = sorted(symbols_by_date.items(), key=lambda x: x[1]) |
|
|
|
|
|
|
|
|
initial_investment = 1000 |
|
|
current_value = initial_investment |
|
|
|
|
|
results = [] |
|
|
total_return = 0 |
|
|
|
|
|
for symbol, first_date in sorted_symbols: |
|
|
|
|
|
symbol_orders = [o for o in orders if o.symbol == symbol] |
|
|
buy_orders = [o for o in symbol_orders if o.side.value == 'buy'] |
|
|
sell_orders = [o for o in symbol_orders if o.side.value == 'sell'] |
|
|
|
|
|
if buy_orders: |
|
|
|
|
|
total_bought = sum(float(o.filled_qty or 0) for o in buy_orders) |
|
|
total_cost = sum(float(o.filled_qty or 0) * float(o.filled_avg_price or 0) for o in buy_orders) |
|
|
|
|
|
if sell_orders: |
|
|
|
|
|
sold_value = sum(float(o.filled_qty or 0) * float(o.filled_avg_price or 0) for o in sell_orders) |
|
|
pl_percent = ((sold_value - total_cost) / total_cost) if total_cost > 0 else 0 |
|
|
else: |
|
|
|
|
|
positions = get_current_positions() |
|
|
pos = next((p for p in positions if p['symbol'] == symbol), None) |
|
|
if pos: |
|
|
current_price = pos['current_price'] |
|
|
current_symbol_value = total_bought * current_price |
|
|
pl_percent = ((current_symbol_value - total_cost) / total_cost) if total_cost > 0 else 0 |
|
|
else: |
|
|
pl_percent = 0 |
|
|
|
|
|
|
|
|
new_value = current_value * (1 + pl_percent) |
|
|
gain_loss = new_value - current_value |
|
|
|
|
|
results.append(f"{symbol}: {pl_percent*100:+.2f}% | ${current_value:.2f} → ${new_value:.2f} ({gain_loss:+.2f})") |
|
|
current_value = new_value |
|
|
total_return += pl_percent |
|
|
|
|
|
final_return_pct = ((current_value - initial_investment) / initial_investment) * 100 |
|
|
|
|
|
output = f"🧮 SEQUENTIAL REINVESTMENT ANALYSIS\n" |
|
|
output += f"Starting Investment: ${initial_investment:.2f}\n" |
|
|
output += f"Final Value: ${current_value:.2f}\n" |
|
|
output += f"Total Return: {final_return_pct:+.2f}%\n" |
|
|
output += f"Number of Trades: {len(sorted_symbols)}\n\n" |
|
|
output += "Trade Sequence:\n" |
|
|
output += "\n".join(results) |
|
|
|
|
|
return output |
|
|
|
|
|
except Exception as e: |
|
|
return f"ERROR calculating sequential reinvestment: {str(e)}" |
|
|
|
|
|
def calculate_equal_weight_portfolio(): |
|
|
"""Calculate P&L% if investing equal amounts in all stocks simultaneously""" |
|
|
try: |
|
|
orders = get_order_history() |
|
|
if not orders: |
|
|
return "No order data available for calculation" |
|
|
|
|
|
|
|
|
symbols = set() |
|
|
for order in orders: |
|
|
if order.side.value == 'buy': |
|
|
symbols.add(order.symbol) |
|
|
|
|
|
total_pl = 0 |
|
|
valid_symbols = 0 |
|
|
results = [] |
|
|
|
|
|
for symbol in sorted(symbols): |
|
|
symbol_orders = [o for o in orders if o.symbol == symbol] |
|
|
buy_orders = [o for o in symbol_orders if o.side.value == 'buy'] |
|
|
sell_orders = [o for o in symbol_orders if o.side.value == 'sell'] |
|
|
|
|
|
if buy_orders: |
|
|
total_bought = sum(float(o.filled_qty or 0) for o in buy_orders) |
|
|
total_cost = sum(float(o.filled_qty or 0) * float(o.filled_avg_price or 0) for o in buy_orders) |
|
|
avg_buy_price = total_cost / total_bought if total_bought > 0 else 0 |
|
|
|
|
|
if sell_orders: |
|
|
|
|
|
sold_value = sum(float(o.filled_qty or 0) * float(o.filled_avg_price or 0) for o in sell_orders) |
|
|
pl_percent = ((sold_value - total_cost) / total_cost) if total_cost > 0 else 0 |
|
|
status = "SOLD" |
|
|
else: |
|
|
|
|
|
positions = get_current_positions() |
|
|
pos = next((p for p in positions if p['symbol'] == symbol), None) |
|
|
if pos: |
|
|
current_price = pos['current_price'] |
|
|
current_value = total_bought * current_price |
|
|
pl_percent = ((current_value - total_cost) / total_cost) if total_cost > 0 else 0 |
|
|
status = "HOLDING" |
|
|
else: |
|
|
pl_percent = 0 |
|
|
status = "UNKNOWN" |
|
|
|
|
|
total_pl += pl_percent |
|
|
valid_symbols += 1 |
|
|
|
|
|
results.append(f"{symbol}: {pl_percent*100:+.2f}% ({status})") |
|
|
|
|
|
avg_return = (total_pl / valid_symbols) * 100 if valid_symbols > 0 else 0 |
|
|
|
|
|
output = f"⚖️ EQUAL WEIGHT PORTFOLIO ANALYSIS\n" |
|
|
output += f"Total Symbols: {valid_symbols}\n" |
|
|
output += f"Average Return per Symbol: {avg_return:+.2f}%\n" |
|
|
output += f"Portfolio Return (equal weights): {avg_return:+.2f}%\n\n" |
|
|
output += "Individual Returns:\n" |
|
|
output += "\n".join(results) |
|
|
|
|
|
return output |
|
|
|
|
|
except Exception as e: |
|
|
return f"ERROR calculating equal weight portfolio: {str(e)}" |
|
|
|
|
|
def calculate_best_worst_performers(): |
|
|
"""Find best and worst performing stocks""" |
|
|
try: |
|
|
orders = get_order_history() |
|
|
if not orders: |
|
|
return "No order data available for calculation" |
|
|
|
|
|
symbols = set() |
|
|
for order in orders: |
|
|
if order.side.value == 'buy': |
|
|
symbols.add(order.symbol) |
|
|
|
|
|
performance = [] |
|
|
|
|
|
for symbol in symbols: |
|
|
symbol_orders = [o for o in orders if o.symbol == symbol] |
|
|
buy_orders = [o for o in symbol_orders if o.side.value == 'buy'] |
|
|
sell_orders = [o for o in symbol_orders if o.side.value == 'sell'] |
|
|
|
|
|
if buy_orders: |
|
|
total_bought = sum(float(o.filled_qty or 0) for o in buy_orders) |
|
|
total_cost = sum(float(o.filled_qty or 0) * float(o.filled_avg_price or 0) for o in buy_orders) |
|
|
|
|
|
if sell_orders: |
|
|
sold_value = sum(float(o.filled_qty or 0) * float(o.filled_avg_price or 0) for o in sell_orders) |
|
|
pl_percent = ((sold_value - total_cost) / total_cost) if total_cost > 0 else 0 |
|
|
pl_dollars = sold_value - total_cost |
|
|
status = "SOLD" |
|
|
else: |
|
|
positions = get_current_positions() |
|
|
pos = next((p for p in positions if p['symbol'] == symbol), None) |
|
|
if pos: |
|
|
current_price = pos['current_price'] |
|
|
current_value = total_bought * current_price |
|
|
pl_percent = ((current_value - total_cost) / total_cost) if total_cost > 0 else 0 |
|
|
pl_dollars = current_value - total_cost |
|
|
status = "HOLDING" |
|
|
else: |
|
|
pl_percent = 0 |
|
|
pl_dollars = 0 |
|
|
status = "UNKNOWN" |
|
|
|
|
|
performance.append({ |
|
|
'symbol': symbol, |
|
|
'pl_percent': pl_percent, |
|
|
'pl_dollars': pl_dollars, |
|
|
'investment': total_cost, |
|
|
'status': status |
|
|
}) |
|
|
|
|
|
|
|
|
performance.sort(key=lambda x: x['pl_percent'], reverse=True) |
|
|
|
|
|
output = f"🏆 BEST vs WORST PERFORMERS\n\n" |
|
|
|
|
|
if performance: |
|
|
output += "🥇 TOP 5 PERFORMERS:\n" |
|
|
for i, perf in enumerate(performance[:5]): |
|
|
output += f"{i+1}. {perf['symbol']}: {perf['pl_percent']*100:+.2f}% (${perf['pl_dollars']:+.2f}) - {perf['status']}\n" |
|
|
|
|
|
output += "\n🥉 BOTTOM 5 PERFORMERS:\n" |
|
|
for i, perf in enumerate(performance[-5:]): |
|
|
rank = len(performance) - 4 + i |
|
|
output += f"{rank}. {perf['symbol']}: {perf['pl_percent']*100:+.2f}% (${perf['pl_dollars']:+.2f}) - {perf['status']}\n" |
|
|
|
|
|
|
|
|
total_winners = len([p for p in performance if p['pl_percent'] > 0]) |
|
|
total_losers = len([p for p in performance if p['pl_percent'] < 0]) |
|
|
|
|
|
output += f"\n📊 SUMMARY:\n" |
|
|
output += f"Winners: {total_winners}/{len(performance)} ({total_winners/len(performance)*100:.1f}%)\n" |
|
|
output += f"Losers: {total_losers}/{len(performance)} ({total_losers/len(performance)*100:.1f}%)\n" |
|
|
|
|
|
return output |
|
|
|
|
|
except Exception as e: |
|
|
return f"ERROR calculating best/worst performers: {str(e)}" |
|
|
|
|
|
def calculate_win_rate_metrics(): |
|
|
"""Calculate win rate and average returns""" |
|
|
try: |
|
|
orders = get_order_history() |
|
|
if not orders: |
|
|
return "No order data available for calculation" |
|
|
|
|
|
symbols = set() |
|
|
for order in orders: |
|
|
if order.side.value == 'buy': |
|
|
symbols.add(order.symbol) |
|
|
|
|
|
performance = [] |
|
|
total_investment = 0 |
|
|
|
|
|
for symbol in symbols: |
|
|
symbol_orders = [o for o in orders if o.symbol == symbol] |
|
|
buy_orders = [o for o in symbol_orders if o.side.value == 'buy'] |
|
|
sell_orders = [o for o in symbol_orders if o.side.value == 'sell'] |
|
|
|
|
|
if buy_orders: |
|
|
total_bought = sum(float(o.filled_qty or 0) for o in buy_orders) |
|
|
total_cost = sum(float(o.filled_qty or 0) * float(o.filled_avg_price or 0) for o in buy_orders) |
|
|
total_investment += total_cost |
|
|
|
|
|
if sell_orders: |
|
|
sold_value = sum(float(o.filled_qty or 0) * float(o.filled_avg_price or 0) for o in sell_orders) |
|
|
pl_percent = ((sold_value - total_cost) / total_cost) if total_cost > 0 else 0 |
|
|
pl_dollars = sold_value - total_cost |
|
|
else: |
|
|
positions = get_current_positions() |
|
|
pos = next((p for p in positions if p['symbol'] == symbol), None) |
|
|
if pos: |
|
|
current_price = pos['current_price'] |
|
|
current_value = total_bought * current_price |
|
|
pl_percent = ((current_value - total_cost) / total_cost) if total_cost > 0 else 0 |
|
|
pl_dollars = current_value - total_cost |
|
|
else: |
|
|
pl_percent = 0 |
|
|
pl_dollars = 0 |
|
|
|
|
|
performance.append({ |
|
|
'symbol': symbol, |
|
|
'pl_percent': pl_percent, |
|
|
'pl_dollars': pl_dollars, |
|
|
'investment': total_cost |
|
|
}) |
|
|
|
|
|
if not performance: |
|
|
return "No performance data available" |
|
|
|
|
|
|
|
|
winners = [p for p in performance if p['pl_percent'] > 0] |
|
|
losers = [p for p in performance if p['pl_percent'] < 0] |
|
|
breakeven = [p for p in performance if p['pl_percent'] == 0] |
|
|
|
|
|
win_rate = len(winners) / len(performance) * 100 |
|
|
avg_win = sum(p['pl_percent'] for p in winners) / len(winners) * 100 if winners else 0 |
|
|
avg_loss = sum(p['pl_percent'] for p in losers) / len(losers) * 100 if losers else 0 |
|
|
|
|
|
total_pl_dollars = sum(p['pl_dollars'] for p in performance) |
|
|
total_pl_percent = (total_pl_dollars / total_investment) * 100 if total_investment > 0 else 0 |
|
|
|
|
|
|
|
|
risk_reward = abs(avg_win / avg_loss) if avg_loss != 0 else float('inf') |
|
|
|
|
|
output = f"🎯 WIN RATE & AVERAGE RETURNS\n\n" |
|
|
output += f"Total Trades: {len(performance)}\n" |
|
|
output += f"Win Rate: {win_rate:.1f}% ({len(winners)} winners)\n" |
|
|
output += f"Loss Rate: {len(losers)/len(performance)*100:.1f}% ({len(losers)} losers)\n" |
|
|
output += f"Breakeven: {len(breakeven)} trades\n\n" |
|
|
|
|
|
output += f"📈 AVERAGE PERFORMANCE:\n" |
|
|
output += f"Average Winner: +{avg_win:.2f}%\n" |
|
|
output += f"Average Loser: {avg_loss:.2f}%\n" |
|
|
output += f"Risk/Reward Ratio: {risk_reward:.2f}:1\n\n" |
|
|
|
|
|
output += f"💰 TOTAL PERFORMANCE:\n" |
|
|
output += f"Total Invested: ${total_investment:.2f}\n" |
|
|
output += f"Total P&L: ${total_pl_dollars:+.2f}\n" |
|
|
output += f"Total Return: {total_pl_percent:+.2f}%\n" |
|
|
|
|
|
return output |
|
|
|
|
|
except Exception as e: |
|
|
return f"ERROR calculating win rate metrics: {str(e)}" |
|
|
|
|
|
def calculate_risk_metrics(): |
|
|
"""Calculate risk metrics and volatility""" |
|
|
try: |
|
|
orders = get_order_history() |
|
|
if not orders: |
|
|
return "No order data available for calculation" |
|
|
|
|
|
symbols = set() |
|
|
for order in orders: |
|
|
if order.side.value == 'buy': |
|
|
symbols.add(order.symbol) |
|
|
|
|
|
returns = [] |
|
|
investments = [] |
|
|
|
|
|
for symbol in symbols: |
|
|
symbol_orders = [o for o in orders if o.symbol == symbol] |
|
|
buy_orders = [o for o in symbol_orders if o.side.value == 'buy'] |
|
|
sell_orders = [o for o in symbol_orders if o.side.value == 'sell'] |
|
|
|
|
|
if buy_orders: |
|
|
total_bought = sum(float(o.filled_qty or 0) for o in buy_orders) |
|
|
total_cost = sum(float(o.filled_qty or 0) * float(o.filled_avg_price or 0) for o in buy_orders) |
|
|
investments.append(total_cost) |
|
|
|
|
|
if sell_orders: |
|
|
sold_value = sum(float(o.filled_qty or 0) * float(o.filled_avg_price or 0) for o in sell_orders) |
|
|
pl_percent = ((sold_value - total_cost) / total_cost) if total_cost > 0 else 0 |
|
|
else: |
|
|
positions = get_current_positions() |
|
|
pos = next((p for p in positions if p['symbol'] == symbol), None) |
|
|
if pos: |
|
|
current_price = pos['current_price'] |
|
|
current_value = total_bought * current_price |
|
|
pl_percent = ((current_value - total_cost) / total_cost) if total_cost > 0 else 0 |
|
|
else: |
|
|
pl_percent = 0 |
|
|
|
|
|
returns.append(pl_percent) |
|
|
|
|
|
if not returns: |
|
|
return "No return data available" |
|
|
|
|
|
|
|
|
import statistics |
|
|
avg_return = statistics.mean(returns) * 100 |
|
|
median_return = statistics.median(returns) * 100 |
|
|
volatility = statistics.stdev(returns) * 100 if len(returns) > 1 else 0 |
|
|
|
|
|
|
|
|
sharpe = avg_return / volatility if volatility > 0 else 0 |
|
|
|
|
|
|
|
|
max_return = max(returns) * 100 |
|
|
min_return = min(returns) * 100 |
|
|
max_drawdown = max_return - min_return |
|
|
|
|
|
|
|
|
total_investment = sum(investments) |
|
|
avg_position_size = statistics.mean(investments) |
|
|
largest_position = max(investments) |
|
|
concentration = (largest_position / total_investment) * 100 if total_investment > 0 else 0 |
|
|
|
|
|
output = f"⚠️ RISK METRICS & VOLATILITY\n\n" |
|
|
output += f"📊 RETURN STATISTICS:\n" |
|
|
output += f"Average Return: {avg_return:+.2f}%\n" |
|
|
output += f"Median Return: {median_return:+.2f}%\n" |
|
|
output += f"Volatility (StdDev): {volatility:.2f}%\n" |
|
|
output += f"Sharpe-like Ratio: {sharpe:.2f}\n\n" |
|
|
|
|
|
output += f"📉 RISK MEASURES:\n" |
|
|
output += f"Best Trade: +{max_return:.2f}%\n" |
|
|
output += f"Worst Trade: {min_return:.2f}%\n" |
|
|
output += f"Max Range: {max_drawdown:.2f}%\n\n" |
|
|
|
|
|
output += f"🎯 POSITION SIZING:\n" |
|
|
output += f"Average Position: ${avg_position_size:.2f}\n" |
|
|
output += f"Largest Position: ${largest_position:.2f}\n" |
|
|
output += f"Concentration Risk: {concentration:.1f}% in largest\n" |
|
|
|
|
|
return output |
|
|
|
|
|
except Exception as e: |
|
|
return f"ERROR calculating risk metrics: {str(e)}" |
|
|
|
|
|
def calculate_time_analysis(): |
|
|
"""Analyze performance by time periods""" |
|
|
try: |
|
|
orders = get_order_history() |
|
|
if not orders: |
|
|
return "No order data available for calculation" |
|
|
|
|
|
from datetime import datetime, timezone |
|
|
|
|
|
|
|
|
monthly_performance = {} |
|
|
|
|
|
for order in orders: |
|
|
if order.side.value == 'buy' and order.status.value == 'filled': |
|
|
month_key = order.filled_at.strftime('%Y-%m') |
|
|
if month_key not in monthly_performance: |
|
|
monthly_performance[month_key] = {'symbols': set(), 'investment': 0, 'returns': []} |
|
|
|
|
|
symbol = order.symbol |
|
|
monthly_performance[month_key]['symbols'].add(symbol) |
|
|
|
|
|
|
|
|
symbols = set() |
|
|
for order in orders: |
|
|
if order.side.value == 'buy': |
|
|
symbols.add(order.symbol) |
|
|
|
|
|
symbol_performance = {} |
|
|
for symbol in symbols: |
|
|
symbol_orders = [o for o in orders if o.symbol == symbol] |
|
|
buy_orders = [o for o in symbol_orders if o.side.value == 'buy'] |
|
|
sell_orders = [o for o in symbol_orders if o.side.value == 'sell'] |
|
|
|
|
|
if buy_orders: |
|
|
first_buy = min(buy_orders, key=lambda x: x.filled_at) |
|
|
month_key = first_buy.filled_at.strftime('%Y-%m') |
|
|
|
|
|
total_bought = sum(float(o.filled_qty or 0) for o in buy_orders) |
|
|
total_cost = sum(float(o.filled_qty or 0) * float(o.filled_avg_price or 0) for o in buy_orders) |
|
|
|
|
|
if sell_orders: |
|
|
sold_value = sum(float(o.filled_qty or 0) * float(o.filled_avg_price or 0) for o in sell_orders) |
|
|
pl_percent = ((sold_value - total_cost) / total_cost) if total_cost > 0 else 0 |
|
|
else: |
|
|
positions = get_current_positions() |
|
|
pos = next((p for p in positions if p['symbol'] == symbol), None) |
|
|
if pos: |
|
|
current_price = pos['current_price'] |
|
|
current_value = total_bought * current_price |
|
|
pl_percent = ((current_value - total_cost) / total_cost) if total_cost > 0 else 0 |
|
|
else: |
|
|
pl_percent = 0 |
|
|
|
|
|
if month_key in monthly_performance: |
|
|
monthly_performance[month_key]['investment'] += total_cost |
|
|
monthly_performance[month_key]['returns'].append(pl_percent) |
|
|
|
|
|
output = f"⏰ TIME-BASED PERFORMANCE ANALYSIS\n\n" |
|
|
|
|
|
for month in sorted(monthly_performance.keys()): |
|
|
data = monthly_performance[month] |
|
|
if data['returns']: |
|
|
avg_return = sum(data['returns']) / len(data['returns']) * 100 |
|
|
total_investment = data['investment'] |
|
|
num_trades = len(data['returns']) |
|
|
|
|
|
output += f"📅 {month}: {avg_return:+.2f}% avg return\n" |
|
|
output += f" • {num_trades} trades, ${total_investment:.2f} invested\n" |
|
|
|
|
|
|
|
|
sorted_months = sorted(monthly_performance.keys()) |
|
|
if len(sorted_months) >= 2: |
|
|
early_months = sorted_months[:len(sorted_months)//2] |
|
|
recent_months = sorted_months[len(sorted_months)//2:] |
|
|
|
|
|
early_returns = [] |
|
|
recent_returns = [] |
|
|
|
|
|
for month in early_months: |
|
|
early_returns.extend(monthly_performance[month]['returns']) |
|
|
|
|
|
for month in recent_months: |
|
|
recent_returns.extend(monthly_performance[month]['returns']) |
|
|
|
|
|
if early_returns and recent_returns: |
|
|
early_avg = sum(early_returns) / len(early_returns) * 100 |
|
|
recent_avg = sum(recent_returns) / len(recent_returns) * 100 |
|
|
|
|
|
output += f"\n📈 TREND ANALYSIS:\n" |
|
|
output += f"Early Period Avg: {early_avg:+.2f}% ({len(early_returns)} trades)\n" |
|
|
output += f"Recent Period Avg: {recent_avg:+.2f}% ({len(recent_returns)} trades)\n" |
|
|
output += f"Improvement: {recent_avg - early_avg:+.2f}% difference\n" |
|
|
|
|
|
return output |
|
|
|
|
|
except Exception as e: |
|
|
return f"ERROR calculating time analysis: {str(e)}" |
|
|
|
|
|
|
|
|
def get_pre_investment_news(symbol, investment_time, hours_before=12): |
|
|
"""Get news from 12 hours before we invested""" |
|
|
|
|
|
cutoff_time = investment_time - timedelta(minutes=30) |
|
|
search_start = investment_time - timedelta(hours=hours_before) |
|
|
|
|
|
logger.info(f"🔍 NEWS SEARCH for {symbol}:") |
|
|
logger.info(f" 📅 Time window: {search_start.strftime('%Y-%m-%d %H:%M')} → {cutoff_time.strftime('%Y-%m-%d %H:%M')}") |
|
|
logger.info(f" ⏰ Search duration: {hours_before} hours before investment") |
|
|
|
|
|
all_news = [] |
|
|
|
|
|
|
|
|
logger.info(f"🧵 Starting Reddit search for {symbol}...") |
|
|
reddit_start = time.time() |
|
|
reddit_posts = get_reddit_pre_investment(symbol, search_start, cutoff_time) |
|
|
reddit_time = time.time() - reddit_start |
|
|
logger.info(f"✅ Reddit search completed in {reddit_time:.1f}s - found {len(reddit_posts)} posts") |
|
|
all_news.extend(reddit_posts) |
|
|
|
|
|
|
|
|
logger.info(f"📰 Starting Google News search for {symbol}...") |
|
|
news_start = time.time() |
|
|
google_news = get_google_news_pre_investment(symbol, search_start, cutoff_time) |
|
|
news_time = time.time() - news_start |
|
|
logger.info(f"✅ Google News search completed in {news_time:.1f}s - found {len(google_news)} articles") |
|
|
all_news.extend(google_news) |
|
|
|
|
|
logger.info(f"📊 TOTAL NEWS GATHERED for {symbol}: {len(all_news)} items ({len(reddit_posts)} Reddit + {len(google_news)} News)") |
|
|
return all_news |
|
|
|
|
|
def get_reddit_pre_investment(symbol, start_time, cutoff_time): |
|
|
"""Get Reddit posts from before our investment""" |
|
|
|
|
|
reddit_posts = [] |
|
|
|
|
|
|
|
|
subreddits = ['wallstreetbets', 'stocks', 'investing'] |
|
|
search_terms = [symbol, f'{symbol} stock', f'{symbol} IPO', f'${symbol}'] |
|
|
|
|
|
for subreddit in subreddits: |
|
|
for search_term in search_terms: |
|
|
try: |
|
|
url = f"https://www.reddit.com/r/{subreddit}/search.json" |
|
|
params = { |
|
|
'q': search_term, |
|
|
'restrict_sr': 'true', |
|
|
'limit': 5, |
|
|
't': 'all', |
|
|
'sort': 'relevance' |
|
|
} |
|
|
|
|
|
response = requests.get(url, params=params, headers=headers, timeout=10) |
|
|
if response.status_code == 200: |
|
|
data = response.json() |
|
|
posts_found = len(data.get('data', {}).get('children', [])) |
|
|
logger.info(f"Reddit search: r/{subreddit} + '{search_term}' found {posts_found} posts") |
|
|
|
|
|
for post in data.get('data', {}).get('children', []): |
|
|
post_data = post.get('data', {}) |
|
|
|
|
|
if not post_data.get('title'): |
|
|
continue |
|
|
|
|
|
|
|
|
title = post_data.get('title', '') |
|
|
if any(existing['title'] == title for existing in reddit_posts): |
|
|
continue |
|
|
|
|
|
|
|
|
title_text = f"{title} {post_data.get('selftext', '')}".upper() |
|
|
if symbol.upper() in title_text or f'${symbol.upper()}' in title_text: |
|
|
reddit_post = { |
|
|
'title': title, |
|
|
'selftext': post_data.get('selftext', '')[:300], |
|
|
'score': post_data.get('score', 0), |
|
|
'num_comments': post_data.get('num_comments', 0), |
|
|
'subreddit': subreddit, |
|
|
'source': 'Reddit', |
|
|
'url': f"https://reddit.com{post_data.get('permalink', '')}", |
|
|
'search_term': search_term |
|
|
} |
|
|
reddit_posts.append(reddit_post) |
|
|
logger.info(f"Added Reddit post: {title[:50]}... (score: {post_data.get('score', 0)})") |
|
|
|
|
|
time.sleep(0.5) |
|
|
|
|
|
except Exception as e: |
|
|
logger.warning(f"Reddit error for r/{subreddit} + '{search_term}': {e}") |
|
|
|
|
|
logger.info(f"Total Reddit posts found for {symbol}: {len(reddit_posts)}") |
|
|
return reddit_posts |
|
|
|
|
|
def get_google_news_pre_investment(symbol, start_time, cutoff_time): |
|
|
"""Get Google News from before our investment""" |
|
|
|
|
|
google_news = [] |
|
|
|
|
|
try: |
|
|
|
|
|
search_queries = [ |
|
|
f'{symbol} IPO', |
|
|
f'{symbol} stock', |
|
|
f'{symbol} public offering' |
|
|
] |
|
|
|
|
|
for query in search_queries: |
|
|
url = "https://news.google.com/rss/search" |
|
|
params = { |
|
|
'q': query, |
|
|
'hl': 'en-US', |
|
|
'gl': 'US', |
|
|
'ceid': 'US:en' |
|
|
} |
|
|
|
|
|
response = requests.get(url, params=params, headers=headers, timeout=10) |
|
|
if response.status_code == 200: |
|
|
|
|
|
from xml.etree import ElementTree as ET |
|
|
root = ET.fromstring(response.content) |
|
|
|
|
|
for item in root.findall('.//item')[:5]: |
|
|
title_elem = item.find('title') |
|
|
link_elem = item.find('link') |
|
|
description_elem = item.find('description') |
|
|
|
|
|
if title_elem is not None: |
|
|
description = description_elem.text if description_elem is not None else "" |
|
|
|
|
|
import re |
|
|
description = re.sub(r'<[^>]+>', '', description) |
|
|
|
|
|
news_item = { |
|
|
'title': title_elem.text, |
|
|
'description': description, |
|
|
'source': 'Google News', |
|
|
'url': link_elem.text if link_elem is not None else '' |
|
|
} |
|
|
google_news.append(news_item) |
|
|
|
|
|
time.sleep(0.5) |
|
|
|
|
|
except Exception as e: |
|
|
logger.warning(f"Google News error: {e}") |
|
|
|
|
|
return google_news |
|
|
|
|
|
def analyze_pre_investment_sentiment(news_items): |
|
|
"""Analyze sentiment from news before our investment""" |
|
|
|
|
|
if not news_items: |
|
|
return 0.0, 0.0, "neutral", {} |
|
|
|
|
|
sentiments = [] |
|
|
source_breakdown = {'Reddit': [], 'Google News': []} |
|
|
|
|
|
for item in news_items: |
|
|
|
|
|
if item['source'] == 'Reddit': |
|
|
text = f"{item['title']} {item.get('selftext', '')}" |
|
|
else: |
|
|
text = f"{item['title']} {item.get('description', '')}" |
|
|
|
|
|
|
|
|
vader_scores = vader.polarity_scores(text) |
|
|
blob = TextBlob(text) |
|
|
combined_sentiment = (vader_scores['compound'] * 0.6) + (blob.sentiment.polarity * 0.4) |
|
|
|
|
|
|
|
|
if item['source'] == 'Reddit': |
|
|
engagement = item.get('score', 0) + item.get('num_comments', 0) |
|
|
weight = min(engagement / 100.0, 2.0) if engagement > 0 else 0.5 |
|
|
else: |
|
|
weight = 1.0 |
|
|
|
|
|
weighted_sentiment = combined_sentiment * weight |
|
|
sentiments.append(weighted_sentiment) |
|
|
|
|
|
|
|
|
source_breakdown[item['source']].append({ |
|
|
'sentiment': weighted_sentiment, |
|
|
'title': item['title'][:80], |
|
|
'weight': weight |
|
|
}) |
|
|
|
|
|
|
|
|
avg_sentiment = sum(sentiments) / len(sentiments) |
|
|
|
|
|
|
|
|
predicted_change = avg_sentiment * 25.0 |
|
|
|
|
|
|
|
|
reddit_sentiments = [s['sentiment'] for s in source_breakdown['Reddit']] |
|
|
news_sentiments = [s['sentiment'] for s in source_breakdown['Google News']] |
|
|
|
|
|
reddit_avg = sum(reddit_sentiments) / len(reddit_sentiments) if reddit_sentiments else 0 |
|
|
news_avg = sum(news_sentiments) / len(news_sentiments) if news_sentiments else 0 |
|
|
|
|
|
|
|
|
if (reddit_avg > 0 and news_avg > 0) or (reddit_avg < 0 and news_avg < 0): |
|
|
predicted_change *= 1.2 |
|
|
|
|
|
|
|
|
if predicted_change >= 5.0: |
|
|
prediction_label = "bullish" |
|
|
elif predicted_change <= -5.0: |
|
|
prediction_label = "bearish" |
|
|
else: |
|
|
prediction_label = "neutral" |
|
|
|
|
|
return avg_sentiment, predicted_change, prediction_label, source_breakdown |
|
|
|
|
|
def get_actual_performance(symbol, investment_time, investment_price): |
|
|
"""Get actual stock performance after our investment""" |
|
|
|
|
|
try: |
|
|
ticker = yf.Ticker(symbol) |
|
|
|
|
|
|
|
|
start_date = investment_time.date() |
|
|
end_date = start_date + timedelta(days=5) |
|
|
|
|
|
hist = ticker.history(start=start_date, end=end_date, interval='1h') |
|
|
|
|
|
if hist.empty: |
|
|
return None, None, None |
|
|
|
|
|
|
|
|
day_data = hist[hist.index.date == start_date] |
|
|
|
|
|
if len(day_data) > 0: |
|
|
first_price = day_data.iloc[0]['Open'] |
|
|
|
|
|
|
|
|
if len(day_data) >= 2: |
|
|
first_hour_high = day_data.iloc[0:2]['High'].max() |
|
|
first_hour_change = ((first_hour_high - first_price) / first_price) * 100 |
|
|
else: |
|
|
|
|
|
first_day_close = day_data.iloc[-1]['Close'] |
|
|
first_hour_change = ((first_day_close - first_price) / first_price) * 100 |
|
|
|
|
|
|
|
|
end_of_day_close = day_data.iloc[-1]['Close'] |
|
|
day_change = ((end_of_day_close - first_price) / first_price) * 100 |
|
|
|
|
|
return first_hour_change, day_change, first_price |
|
|
|
|
|
except Exception as e: |
|
|
logger.warning(f"Error getting {symbol} performance: {e}") |
|
|
|
|
|
return None, None, None |
|
|
|
|
|
def run_trading_history_backtest(): |
|
|
"""Run backtest on all our actual investments""" |
|
|
|
|
|
logger.info("Starting trading history backtesting...") |
|
|
|
|
|
try: |
|
|
|
|
|
orders = get_order_history() |
|
|
|
|
|
if not orders: |
|
|
return "❌ No trading history found", pd.DataFrame() |
|
|
|
|
|
|
|
|
symbols_traded = set() |
|
|
for order in orders: |
|
|
if hasattr(order, 'symbol') and order.symbol and order.side.value == 'buy': |
|
|
symbols_traded.add(order.symbol) |
|
|
|
|
|
logger.info(f"Found {len(symbols_traded)} unique symbols traded") |
|
|
|
|
|
results = [] |
|
|
total_error = 0 |
|
|
correct_directions = 0 |
|
|
valid_results = 0 |
|
|
|
|
|
summary_text = f"🎯 TRADING HISTORY BACKTESTING\n" |
|
|
summary_text += f"Testing sentiment analysis on {len(symbols_traded)} IPOs we actually invested in...\n" |
|
|
summary_text += f"Using news from 12 hours before our investment time\n\n" |
|
|
|
|
|
|
|
|
for symbol in sorted(symbols_traded): |
|
|
|
|
|
symbol_orders = [o for o in orders if o.symbol == symbol] |
|
|
buy_orders = [o for o in symbol_orders if o.side.value == 'buy'] |
|
|
|
|
|
if buy_orders: |
|
|
|
|
|
first_buy_order = min(buy_orders, key=lambda x: x.filled_at) |
|
|
investment_time = first_buy_order.filled_at |
|
|
|
|
|
total_bought = sum(float(o.filled_qty or 0) for o in buy_orders) |
|
|
total_cost = sum(float(o.filled_qty or 0) * float(o.filled_avg_price or 0) for o in buy_orders) |
|
|
avg_buy_price = total_cost / total_bought if total_bought > 0 else 0 |
|
|
|
|
|
logger.info(f"Analyzing {symbol} (invested {investment_time.strftime('%Y-%m-%d %H:%M')})...") |
|
|
|
|
|
|
|
|
news_items = get_pre_investment_news(symbol, investment_time) |
|
|
|
|
|
|
|
|
avg_sentiment, predicted_change, prediction_label, source_breakdown = analyze_pre_investment_sentiment(news_items) |
|
|
|
|
|
|
|
|
first_hour_change, day_change, actual_open = get_actual_performance(symbol, investment_time, avg_buy_price) |
|
|
|
|
|
if first_hour_change is not None: |
|
|
|
|
|
error = abs(predicted_change - first_hour_change) |
|
|
total_error += error |
|
|
valid_results += 1 |
|
|
|
|
|
|
|
|
predicted_direction = "UP" if predicted_change > 0 else "DOWN" if predicted_change < 0 else "FLAT" |
|
|
actual_direction = "UP" if first_hour_change > 0 else "DOWN" if first_hour_change < 0 else "FLAT" |
|
|
direction_correct = predicted_direction == actual_direction |
|
|
|
|
|
if direction_correct: |
|
|
correct_directions += 1 |
|
|
|
|
|
|
|
|
reddit_items = source_breakdown['Reddit'] |
|
|
news_items_found = source_breakdown['Google News'] |
|
|
|
|
|
top_reddit_title = "" |
|
|
if reddit_items: |
|
|
top_reddit = max(reddit_items, key=lambda x: abs(x['sentiment'])) |
|
|
top_reddit_title = top_reddit['title'] |
|
|
|
|
|
top_news_title = "" |
|
|
if news_items_found: |
|
|
top_news = max(news_items_found, key=lambda x: abs(x['sentiment'])) |
|
|
top_news_title = top_news['title'] |
|
|
|
|
|
result = { |
|
|
'Symbol': symbol, |
|
|
'Investment Date': investment_time.strftime('%Y-%m-%d'), |
|
|
'Investment Price': f"${avg_buy_price:.2f}", |
|
|
'Predicted Change': f"{predicted_change:+.1f}%", |
|
|
'Actual 1H Change': f"{first_hour_change:+.1f}%", |
|
|
'Error': f"{error:.1f}%", |
|
|
'Direction': '✅ Correct' if direction_correct else '❌ Wrong', |
|
|
'Sentiment': prediction_label.title(), |
|
|
'News Sources': len(news_items), |
|
|
'Reddit Posts': len(reddit_items), |
|
|
'Top Reddit': top_reddit_title, |
|
|
'Top News': top_news_title |
|
|
} |
|
|
|
|
|
else: |
|
|
result = { |
|
|
'Symbol': symbol, |
|
|
'Investment Date': investment_time.strftime('%Y-%m-%d'), |
|
|
'Investment Price': f"${avg_buy_price:.2f}", |
|
|
'Predicted Change': f"{predicted_change:+.1f}%", |
|
|
'Actual 1H Change': 'N/A', |
|
|
'Error': 'N/A', |
|
|
'Direction': '❓ No Data', |
|
|
'Sentiment': prediction_label.title(), |
|
|
'News Sources': len(news_items), |
|
|
'Reddit Posts': len(source_breakdown['Reddit']), |
|
|
'Top Reddit': '', |
|
|
'Top News': '' |
|
|
} |
|
|
|
|
|
results.append(result) |
|
|
|
|
|
|
|
|
if valid_results > 0: |
|
|
avg_error = total_error / valid_results |
|
|
direction_accuracy = (correct_directions / valid_results) * 100 |
|
|
|
|
|
summary_text += f"📈 BACKTESTING RESULTS SUMMARY:\n" |
|
|
summary_text += f" Total Investments Tested: {len(results)}\n" |
|
|
summary_text += f" Valid Results: {valid_results}\n" |
|
|
summary_text += f" Average Error: {avg_error:.1f}%\n" |
|
|
summary_text += f" Direction Accuracy: {direction_accuracy:.1f}% ({correct_directions}/{valid_results})\n\n" |
|
|
|
|
|
if direction_accuracy >= 60: |
|
|
summary_text += f" ✅ Strong predictive value!\n" |
|
|
elif direction_accuracy >= 40: |
|
|
summary_text += f" ⚡ Some predictive value\n" |
|
|
else: |
|
|
summary_text += f" ❌ Needs improvement\n" |
|
|
else: |
|
|
summary_text += f"❌ No valid results available for analysis\n" |
|
|
|
|
|
|
|
|
df = pd.DataFrame(results) |
|
|
|
|
|
return summary_text, df |
|
|
|
|
|
except Exception as e: |
|
|
error_msg = f"❌ Error running backtesting: {str(e)}" |
|
|
logger.error(error_msg) |
|
|
return error_msg, pd.DataFrame() |
|
|
|
|
|
def clear_terminal(): |
|
|
"""Clear terminal output""" |
|
|
return "🖥️ VM Terminal Ready\n$ " |
|
|
|
|
|
def run_quick_command(cmd): |
|
|
"""Helper for quick command buttons""" |
|
|
def execute(current_output): |
|
|
return run_vm_command(cmd, current_output) |
|
|
return execute |
|
|
|
|
|
|
|
|
custom_css = """ |
|
|
.gradio-container { |
|
|
font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif !important; |
|
|
background: #fafafa !important; |
|
|
} |
|
|
|
|
|
.main-header { |
|
|
background: linear-gradient(135deg, #0070f3 0%, #0051a5 100%); |
|
|
color: white; |
|
|
padding: 2rem; |
|
|
border-radius: 16px; |
|
|
margin-bottom: 2rem; |
|
|
box-shadow: 0 10px 40px rgba(0, 112, 243, 0.3); |
|
|
} |
|
|
|
|
|
.metric-card { |
|
|
background: white; |
|
|
border: 1px solid #eaeaea; |
|
|
border-radius: 12px; |
|
|
padding: 1.5rem; |
|
|
box-shadow: 0 4px 16px rgba(0, 0, 0, 0.04); |
|
|
transition: all 0.3s ease; |
|
|
} |
|
|
|
|
|
.metric-card:hover { |
|
|
box-shadow: 0 8px 32px rgba(0, 0, 0, 0.12); |
|
|
transform: translateY(-4px); |
|
|
} |
|
|
|
|
|
.gr-button { |
|
|
background: linear-gradient(135deg, #0070f3 0%, #0051a5 100%) !important; |
|
|
color: white !important; |
|
|
border: none !important; |
|
|
border-radius: 12px !important; |
|
|
font-weight: 600 !important; |
|
|
padding: 1rem 2rem !important; |
|
|
transition: all 0.3s ease !important; |
|
|
box-shadow: 0 4px 16px rgba(0, 112, 243, 0.3) !important; |
|
|
} |
|
|
|
|
|
.gr-button:hover { |
|
|
transform: translateY(-2px) !important; |
|
|
box-shadow: 0 8px 32px rgba(0, 112, 243, 0.4) !important; |
|
|
} |
|
|
|
|
|
.gr-textbox, .gr-dataframe { |
|
|
border: 1px solid #eaeaea !important; |
|
|
border-radius: 12px !important; |
|
|
background: white !important; |
|
|
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.04) !important; |
|
|
} |
|
|
|
|
|
.plotly-graph-div { |
|
|
border-radius: 16px !important; |
|
|
box-shadow: 0 4px 20px rgba(0, 0, 0, 0.08) !important; |
|
|
background: white !important; |
|
|
} |
|
|
|
|
|
.status-invested { color: #00d647 !important; font-weight: 600 !important; } |
|
|
.status-eligible { color: #f5a623 !important; font-weight: 600 !important; } |
|
|
.status-wrong { color: #8b949e !important; } |
|
|
.status-unknown { color: #ff0080 !important; } |
|
|
|
|
|
/* Investment Performance Table Styling */ |
|
|
.investment-table { |
|
|
width: 100%; |
|
|
border-collapse: separate; |
|
|
border-spacing: 0; |
|
|
background: white; |
|
|
border-radius: 16px; |
|
|
overflow: hidden; |
|
|
box-shadow: 0 4px 20px rgba(0, 0, 0, 0.08); |
|
|
} |
|
|
|
|
|
.investment-table th { |
|
|
background: linear-gradient(135deg, #0070f3 0%, #0051a5 100%); |
|
|
color: white; |
|
|
padding: 1rem; |
|
|
font-weight: 600; |
|
|
text-align: left; |
|
|
border: none; |
|
|
} |
|
|
|
|
|
.investment-table th:first-child { |
|
|
border-top-left-radius: 16px; |
|
|
} |
|
|
|
|
|
.investment-table th:last-child { |
|
|
border-top-right-radius: 16px; |
|
|
} |
|
|
|
|
|
.investment-table td { |
|
|
padding: 1rem; |
|
|
border-bottom: 1px solid #f5f5f5; |
|
|
font-weight: 500; |
|
|
} |
|
|
|
|
|
.profit-row { |
|
|
background: rgba(0, 214, 71, 0.1) !important; |
|
|
border-left: 4px solid #00d647; |
|
|
} |
|
|
|
|
|
.loss-row { |
|
|
background: rgba(255, 0, 128, 0.1) !important; |
|
|
border-left: 4px solid #ff0080; |
|
|
} |
|
|
|
|
|
.neutral-row { |
|
|
background: rgba(139, 148, 158, 0.05) !important; |
|
|
border-left: 4px solid #8b949e; |
|
|
} |
|
|
|
|
|
.investment-table tr:last-child td:first-child { |
|
|
border-bottom-left-radius: 16px; |
|
|
} |
|
|
|
|
|
.investment-table tr:last-child td:last-child { |
|
|
border-bottom-right-radius: 16px; |
|
|
} |
|
|
|
|
|
.profit-positive { color: #00d647 !important; font-weight: 600 !important; } |
|
|
.profit-negative { color: #ff0080 !important; font-weight: 600 !important; } |
|
|
.profit-neutral { color: #8b949e !important; } |
|
|
|
|
|
.terminal-container { |
|
|
background: #000000 !important; |
|
|
border: 1px solid #333 !important; |
|
|
border-radius: 8px !important; |
|
|
padding: 0 !important; |
|
|
margin: 1rem 0 !important; |
|
|
height: 500px !important; |
|
|
overflow-y: auto !important; |
|
|
display: flex !important; |
|
|
flex-direction: column !important; |
|
|
/* Hide scrollbars but keep functionality */ |
|
|
scrollbar-width: none !important; /* Firefox */ |
|
|
-ms-overflow-style: none !important; /* IE/Edge */ |
|
|
} |
|
|
|
|
|
.terminal-container::-webkit-scrollbar { |
|
|
display: none !important; /* Chrome/Safari/Webkit */ |
|
|
} |
|
|
|
|
|
.terminal-display { |
|
|
font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', 'Consolas', monospace !important; |
|
|
background: #000000 !important; |
|
|
color: #ffffff !important; |
|
|
padding: 1rem !important; |
|
|
font-size: 14px !important; |
|
|
line-height: 1.4 !important; |
|
|
white-space: pre-wrap !important; |
|
|
word-wrap: break-word !important; |
|
|
margin: 0 !important; |
|
|
flex-grow: 1 !important; |
|
|
overflow-anchor: none !important; |
|
|
/* Always stick to bottom */ |
|
|
display: flex !important; |
|
|
flex-direction: column !important; |
|
|
justify-content: flex-end !important; |
|
|
} |
|
|
|
|
|
.terminal-display::-webkit-scrollbar { |
|
|
display: none !important; /* Chrome/Safari/Webkit */ |
|
|
} |
|
|
|
|
|
.terminal-input input { |
|
|
font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', 'Consolas', monospace !important; |
|
|
background: #1a1a1a !important; |
|
|
color: #ffffff !important; |
|
|
border: 1px solid #333 !important; |
|
|
border-radius: 4px !important; |
|
|
font-size: 14px !important; |
|
|
} |
|
|
|
|
|
.terminal-input input:focus { |
|
|
border-color: #00ff00 !important; |
|
|
box-shadow: 0 0 5px rgba(0, 255, 0, 0.3) !important; |
|
|
} |
|
|
|
|
|
/* Force Gradio HTML to stick to bottom */ |
|
|
.gr-html { |
|
|
height: 500px !important; |
|
|
overflow-y: auto !important; |
|
|
scrollbar-width: none !important; |
|
|
-ms-overflow-style: none !important; |
|
|
display: flex !important; |
|
|
flex-direction: column !important; |
|
|
} |
|
|
|
|
|
.gr-html::-webkit-scrollbar { |
|
|
display: none !important; |
|
|
} |
|
|
|
|
|
/* Force content to bottom with CSS anchor */ |
|
|
.gr-html > div { |
|
|
display: flex !important; |
|
|
flex-direction: column !important; |
|
|
justify-content: flex-end !important; |
|
|
min-height: 100% !important; |
|
|
} |
|
|
""" |
|
|
|
|
|
def create_dashboard(): |
|
|
logger.info("🎨 Creating Gradio dashboard interface...") |
|
|
|
|
|
try: |
|
|
with gr.Blocks( |
|
|
title="🚀 Premium Trading Dashboard", |
|
|
theme=gr.themes.Soft(primary_hue="blue"), |
|
|
css=custom_css |
|
|
) as demo: |
|
|
logger.info("🖼️ Dashboard blocks created successfully") |
|
|
|
|
|
|
|
|
gr.HTML(""" |
|
|
<div class="main-header"> |
|
|
<h1 style="margin: 0; font-size: 3rem; font-weight: 800; text-shadow: 0 2px 4px rgba(0,0,0,0.1);"> |
|
|
🚀 Premium Trading Dashboard |
|
|
</h1> |
|
|
<p style="margin: 1rem 0 0 0; font-size: 1.3rem; opacity: 0.95;"> |
|
|
Real-time portfolio monitoring with IPO discovery analytics |
|
|
</p> |
|
|
</div> |
|
|
""") |
|
|
|
|
|
with gr.Tabs(): |
|
|
|
|
|
with gr.Tab("📊 Portfolio Overview"): |
|
|
gr.Markdown("## 💼 Account Summary") |
|
|
with gr.Row(): |
|
|
portfolio_value = gr.Textbox(label="💰 Portfolio Value", interactive=False, elem_classes=["metric-card"]) |
|
|
buying_power = gr.Textbox(label="💳 Buying Power", interactive=False, elem_classes=["metric-card"]) |
|
|
cash = gr.Textbox(label="💵 Cash", interactive=False, elem_classes=["metric-card"]) |
|
|
day_change = gr.Textbox(label="📈 Day Change", interactive=False, elem_classes=["metric-card"]) |
|
|
equity = gr.Textbox(label="🏦 Total Equity", interactive=False, elem_classes=["metric-card"]) |
|
|
|
|
|
gr.Markdown("## 📈 Portfolio Performance") |
|
|
portfolio_chart = gr.Plot(label="Portfolio Value Over Time") |
|
|
|
|
|
refresh_overview_btn = gr.Button("🔄 Refresh Portfolio Data", variant="primary", size="lg") |
|
|
|
|
|
|
|
|
with gr.Tab("🔍 IPO Discoveries"): |
|
|
gr.Markdown("## 📊 IPO Discovery Analytics") |
|
|
|
|
|
with gr.Row(): |
|
|
total_ipos = gr.Textbox(label="🎯 Total IPOs Detected", interactive=False, elem_classes=["metric-card"]) |
|
|
ipos_invested = gr.Textbox(label="💰 IPOs Invested", interactive=False, elem_classes=["metric-card"]) |
|
|
cs_stocks = gr.Textbox(label="📈 CS Stocks Found", interactive=False, elem_classes=["metric-card"]) |
|
|
investment_rate = gr.Textbox(label="🎲 Investment Rate", interactive=False, elem_classes=["metric-card"]) |
|
|
last_updated = gr.Textbox(label="🕒 Last Updated", interactive=False, elem_classes=["metric-card"]) |
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(scale=1): |
|
|
ipo_chart = gr.Plot(label="Investment Decision Breakdown") |
|
|
|
|
|
with gr.Column(scale=2): |
|
|
gr.Markdown("## 🆕 Recent IPO Discoveries") |
|
|
ipo_table = gr.Dataframe( |
|
|
label="IPO Discoveries with Investment Decisions", |
|
|
elem_classes=["gr-dataframe"] |
|
|
) |
|
|
|
|
|
refresh_ipo_btn = gr.Button("🔄 Refresh IPO Data", variant="primary", size="lg") |
|
|
|
|
|
|
|
|
with gr.Tab("💰 Investment Performance"): |
|
|
gr.Markdown("## 🎯 IPO Investment Performance") |
|
|
gr.Markdown("### Track profit/loss on your IPO investments with **real-time sentiment analysis**") |
|
|
gr.Markdown("🧠 **NEW**: Each row automatically shows sentiment predictions from Reddit + Google News!") |
|
|
|
|
|
investment_performance_table = gr.HTML( |
|
|
label="IPO Investment P&L Analysis", |
|
|
value="<div style='text-align: center; padding: 2rem; color: #666;'>Click Refresh to load investment performance data</div>" |
|
|
) |
|
|
refresh_investment_btn = gr.Button("🔄 Refresh Investment Performance", variant="primary", size="lg") |
|
|
|
|
|
gr.Markdown("### 🧮 Trading Statistics & Analysis") |
|
|
gr.Markdown("Calculate interesting metrics from your trading data") |
|
|
|
|
|
with gr.Row(): |
|
|
calc_sequential_btn = gr.Button("📈 Sequential Reinvestment P&L%", variant="secondary", size="sm") |
|
|
calc_equal_weight_btn = gr.Button("⚖️ Equal Weight Portfolio P&L%", variant="secondary", size="sm") |
|
|
calc_best_worst_btn = gr.Button("🏆 Best vs Worst Performers", variant="secondary", size="sm") |
|
|
|
|
|
with gr.Row(): |
|
|
calc_win_rate_btn = gr.Button("🎯 Win Rate & Avg Returns", variant="secondary", size="sm") |
|
|
calc_risk_metrics_btn = gr.Button("⚠️ Risk Metrics & Volatility", variant="secondary", size="sm") |
|
|
calc_time_analysis_btn = gr.Button("⏰ Time-based Performance", variant="secondary", size="sm") |
|
|
|
|
|
stats_output = gr.Textbox( |
|
|
label="Statistical Analysis Results", |
|
|
lines=8, |
|
|
interactive=False, |
|
|
elem_classes=["gr-textbox"] |
|
|
) |
|
|
|
|
|
gr.Markdown("### 🔧 Debug API Calls") |
|
|
debug_output = gr.Textbox( |
|
|
label="Debug Output", |
|
|
lines=10, |
|
|
interactive=False |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
debug_orders_btn = gr.Button("🔍 Debug Order History", variant="secondary") |
|
|
debug_positions_btn = gr.Button("📊 Debug Current Positions", variant="secondary") |
|
|
debug_ipos_btn = gr.Button("🎯 Debug IPO Data", variant="secondary") |
|
|
debug_account_btn = gr.Button("💼 Debug Account Info", variant="secondary") |
|
|
|
|
|
|
|
|
with gr.Tab("💻 VM Terminal"): |
|
|
gr.Markdown("## 🖥️ Remote VM Terminal") |
|
|
gr.Markdown("### Execute commands directly on your trading VM") |
|
|
|
|
|
|
|
|
command_history = gr.State("") |
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(scale=4): |
|
|
command_input = gr.Textbox( |
|
|
label="Command (Press Enter to run)", |
|
|
placeholder="Enter command to run on VM...", |
|
|
interactive=True, |
|
|
elem_classes=["terminal-input"] |
|
|
) |
|
|
with gr.Column(scale=1): |
|
|
run_command_btn = gr.Button("▶️ Run", variant="primary", size="lg") |
|
|
clear_terminal_btn = gr.Button("🗑️ Clear", variant="secondary", size="lg") |
|
|
|
|
|
terminal_output = gr.HTML( |
|
|
label="Terminal Output", |
|
|
value='<div class="terminal-display" id="terminal-content">🖥️ VM Terminal Ready<br>$ </div>', |
|
|
elem_classes=["terminal-container"] |
|
|
) |
|
|
|
|
|
gr.Markdown("**📁 File & System Commands:**") |
|
|
with gr.Row(): |
|
|
quick_ls = gr.Button("📁 ls -la", size="sm") |
|
|
quick_pwd = gr.Button("📍 pwd", size="sm") |
|
|
quick_ps = gr.Button("🔄 ps aux | grep python", size="sm") |
|
|
quick_vm_status = gr.Button("🖥️ uptime && df -h", size="sm") |
|
|
quick_who = gr.Button("👤 whoami", size="sm") |
|
|
|
|
|
gr.Markdown("**📋 Log Files:**") |
|
|
with gr.Row(): |
|
|
quick_script_log = gr.Button("📜 tail -50 script.log", size="sm") |
|
|
quick_server_log = gr.Button("🖥️ tail -50 server.log", size="sm") |
|
|
quick_cron_log = gr.Button("⏰ tail -50 /var/log/cron", size="sm") |
|
|
quick_portfolio = gr.Button("💼 cat portfolio.txt", size="sm") |
|
|
quick_tickers = gr.Button("🎯 head -20 new_tickers_log.csv", size="sm") |
|
|
|
|
|
gr.Markdown("**🔍 Search & Analysis:**") |
|
|
with gr.Row(): |
|
|
quick_errors = gr.Button("🚨 grep -i error script.log | tail -10", size="sm") |
|
|
quick_trades = gr.Button("💰 grep -i 'buy\\|sell' script.log | tail -10", size="sm") |
|
|
quick_ipos = gr.Button("🆕 grep -i 'new ticker' script.log | tail -10", size="sm") |
|
|
|
|
|
|
|
|
with gr.Tab("📋 System Logs"): |
|
|
gr.Markdown("## 🖥️ Trading Bot Activity") |
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(): |
|
|
gr.Markdown("### 🎯 Parsed Logs (Color Coded)") |
|
|
system_logs = gr.Textbox( |
|
|
label="Recent System Activity", |
|
|
lines=20, |
|
|
max_lines=20, |
|
|
interactive=False, |
|
|
elem_classes=["gr-textbox"] |
|
|
) |
|
|
|
|
|
with gr.Column(): |
|
|
gr.Markdown("### 📄 Raw Cron Logs") |
|
|
raw_logs = gr.Textbox( |
|
|
label="Raw Log Output", |
|
|
lines=20, |
|
|
max_lines=20, |
|
|
interactive=False, |
|
|
elem_classes=["gr-textbox"] |
|
|
) |
|
|
|
|
|
refresh_logs_btn = gr.Button("🔄 Refresh All Logs", variant="primary", size="lg") |
|
|
|
|
|
|
|
|
gr.HTML(""" |
|
|
<div style="text-align: center; padding: 2rem; color: #666; border-top: 1px solid #eaeaea; margin-top: 3rem; background: white; border-radius: 16px;"> |
|
|
<p style="font-size: 1.1rem;"><strong>🤖 Automated Trading Dashboard</strong></p> |
|
|
<p style="font-size: 0.95rem;">Real-time data from Alpaca Markets + VM Analytics | Built with ❤️</p> |
|
|
</div> |
|
|
""") |
|
|
|
|
|
logger.info("🔗 Setting up event handlers...") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
refresh_overview_btn.click( |
|
|
fn=refresh_account_overview, |
|
|
outputs=[portfolio_value, buying_power, cash, day_change, equity] |
|
|
) |
|
|
refresh_overview_btn.click( |
|
|
fn=create_portfolio_chart, |
|
|
outputs=[portfolio_chart] |
|
|
) |
|
|
|
|
|
|
|
|
refresh_ipo_btn.click( |
|
|
fn=refresh_vm_stats, |
|
|
outputs=[total_ipos, ipos_invested, cs_stocks, investment_rate, last_updated] |
|
|
) |
|
|
refresh_ipo_btn.click( |
|
|
fn=create_ipo_discovery_chart, |
|
|
outputs=[ipo_chart] |
|
|
) |
|
|
refresh_ipo_btn.click( |
|
|
fn=refresh_ipo_discoveries_table, |
|
|
outputs=[ipo_table] |
|
|
) |
|
|
|
|
|
|
|
|
refresh_investment_btn.click( |
|
|
fn=refresh_investment_performance_html, |
|
|
outputs=[investment_performance_table] |
|
|
) |
|
|
|
|
|
|
|
|
debug_orders_btn.click( |
|
|
fn=debug_order_history, |
|
|
outputs=[debug_output] |
|
|
) |
|
|
debug_positions_btn.click( |
|
|
fn=debug_current_positions, |
|
|
outputs=[debug_output] |
|
|
) |
|
|
debug_ipos_btn.click( |
|
|
fn=debug_ipo_data, |
|
|
outputs=[debug_output] |
|
|
) |
|
|
debug_account_btn.click( |
|
|
fn=debug_account_info, |
|
|
outputs=[debug_output] |
|
|
) |
|
|
|
|
|
|
|
|
calc_sequential_btn.click( |
|
|
fn=calculate_sequential_reinvestment, |
|
|
outputs=[stats_output] |
|
|
) |
|
|
calc_equal_weight_btn.click( |
|
|
fn=calculate_equal_weight_portfolio, |
|
|
outputs=[stats_output] |
|
|
) |
|
|
calc_best_worst_btn.click( |
|
|
fn=calculate_best_worst_performers, |
|
|
outputs=[stats_output] |
|
|
) |
|
|
calc_win_rate_btn.click( |
|
|
fn=calculate_win_rate_metrics, |
|
|
outputs=[stats_output] |
|
|
) |
|
|
calc_risk_metrics_btn.click( |
|
|
fn=calculate_risk_metrics, |
|
|
outputs=[stats_output] |
|
|
) |
|
|
calc_time_analysis_btn.click( |
|
|
fn=calculate_time_analysis, |
|
|
outputs=[stats_output] |
|
|
) |
|
|
|
|
|
|
|
|
def run_and_clear(cmd, output, history): |
|
|
new_output, _, new_history = run_vm_command(cmd, output, history) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
unique_id = f"terminal-{hash(new_output) % 100000}" |
|
|
|
|
|
html_output = f'''<div class="terminal-display"> |
|
|
<pre id="{unique_id}" style="margin: 0; font-family: inherit; color: inherit; background: inherit; white-space: pre-wrap; word-wrap: break-word;">{new_output}</pre> |
|
|
</div> |
|
|
<script> |
|
|
// Force immediate scroll to top by targeting multiple elements |
|
|
setTimeout(() => {{ |
|
|
// Method 1: All possible scroll containers |
|
|
document.querySelectorAll('.gr-html, .terminal-container, .terminal-display').forEach(el => {{ |
|
|
if (el && el.scrollTop !== undefined) el.scrollTop = 0; |
|
|
}}); |
|
|
|
|
|
// Method 2: Force scroll on the new element's containers |
|
|
const newEl = document.getElementById('{unique_id}'); |
|
|
if (newEl) {{ |
|
|
let parent = newEl.parentElement; |
|
|
while (parent) {{ |
|
|
if (parent.scrollTop !== undefined) parent.scrollTop = 0; |
|
|
parent = parent.parentElement; |
|
|
}} |
|
|
}} |
|
|
|
|
|
// Method 3: Nuclear option - scroll everything |
|
|
window.scrollTo(0, 0); |
|
|
}}, 5); |
|
|
</script>''' |
|
|
return html_output, "", new_history |
|
|
|
|
|
def clear_and_reset(): |
|
|
return '<div class="terminal-display" id="terminal-content">🖥️ VM Terminal Ready<br>$ </div>', "" |
|
|
|
|
|
run_command_btn.click( |
|
|
fn=run_and_clear, |
|
|
inputs=[command_input, terminal_output, command_history], |
|
|
outputs=[terminal_output, command_input, command_history] |
|
|
) |
|
|
command_input.submit( |
|
|
fn=run_and_clear, |
|
|
inputs=[command_input, terminal_output, command_history], |
|
|
outputs=[terminal_output, command_input, command_history] |
|
|
) |
|
|
clear_terminal_btn.click( |
|
|
fn=clear_and_reset, |
|
|
outputs=[terminal_output, command_input] |
|
|
) |
|
|
|
|
|
|
|
|
quick_ls.click( |
|
|
fn=lambda output, hist: run_and_clear("ls -la", output, hist), |
|
|
inputs=[terminal_output, command_history], |
|
|
outputs=[terminal_output, command_input, command_history] |
|
|
) |
|
|
quick_pwd.click( |
|
|
fn=lambda output, hist: run_and_clear("pwd", output, hist), |
|
|
inputs=[terminal_output, command_history], |
|
|
outputs=[terminal_output, command_input, command_history] |
|
|
) |
|
|
quick_ps.click( |
|
|
fn=lambda output, hist: run_and_clear("ps aux | grep python", output, hist), |
|
|
inputs=[terminal_output, command_history], |
|
|
outputs=[terminal_output, command_input, command_history] |
|
|
) |
|
|
quick_vm_status.click( |
|
|
fn=lambda output, hist: run_and_clear("uptime && df -h", output, hist), |
|
|
inputs=[terminal_output, command_history], |
|
|
outputs=[terminal_output, command_input, command_history] |
|
|
) |
|
|
quick_who.click( |
|
|
fn=lambda output, hist: run_and_clear("whoami", output, hist), |
|
|
inputs=[terminal_output, command_history], |
|
|
outputs=[terminal_output, command_input, command_history] |
|
|
) |
|
|
|
|
|
|
|
|
quick_script_log.click( |
|
|
fn=lambda output, hist: run_and_clear("tail -50 script.log", output, hist), |
|
|
inputs=[terminal_output, command_history], |
|
|
outputs=[terminal_output, command_input, command_history] |
|
|
) |
|
|
quick_server_log.click( |
|
|
fn=lambda output, hist: run_and_clear("tail -50 server.log", output, hist), |
|
|
inputs=[terminal_output, command_history], |
|
|
outputs=[terminal_output, command_input, command_history] |
|
|
) |
|
|
quick_cron_log.click( |
|
|
fn=lambda output, hist: run_and_clear("tail -50 /var/log/cron", output, hist), |
|
|
inputs=[terminal_output, command_history], |
|
|
outputs=[terminal_output, command_input, command_history] |
|
|
) |
|
|
quick_portfolio.click( |
|
|
fn=lambda output, hist: run_and_clear("cat portfolio.txt", output, hist), |
|
|
inputs=[terminal_output, command_history], |
|
|
outputs=[terminal_output, command_input, command_history] |
|
|
) |
|
|
quick_tickers.click( |
|
|
fn=lambda output, hist: run_and_clear("head -20 new_tickers_log.csv", output, hist), |
|
|
inputs=[terminal_output, command_history], |
|
|
outputs=[terminal_output, command_input, command_history] |
|
|
) |
|
|
|
|
|
|
|
|
quick_errors.click( |
|
|
fn=lambda output, hist: run_and_clear("grep -i error script.log | tail -10", output, hist), |
|
|
inputs=[terminal_output, command_history], |
|
|
outputs=[terminal_output, command_input, command_history] |
|
|
) |
|
|
quick_trades.click( |
|
|
fn=lambda output, hist: run_and_clear("grep -i 'buy\\|sell' script.log | tail -10", output, hist), |
|
|
inputs=[terminal_output, command_history], |
|
|
outputs=[terminal_output, command_input, command_history] |
|
|
) |
|
|
quick_ipos.click( |
|
|
fn=lambda output, hist: run_and_clear("grep -i 'new ticker' script.log | tail -10", output, hist), |
|
|
inputs=[terminal_output, command_history], |
|
|
outputs=[terminal_output, command_input, command_history] |
|
|
) |
|
|
|
|
|
|
|
|
refresh_logs_btn.click( |
|
|
fn=refresh_system_logs, |
|
|
outputs=[system_logs] |
|
|
) |
|
|
refresh_logs_btn.click( |
|
|
fn=refresh_raw_logs, |
|
|
outputs=[raw_logs] |
|
|
) |
|
|
|
|
|
|
|
|
demo.load( |
|
|
fn=refresh_account_overview, |
|
|
outputs=[portfolio_value, buying_power, cash, day_change, equity] |
|
|
) |
|
|
demo.load(fn=create_portfolio_chart, outputs=[portfolio_chart]) |
|
|
demo.load( |
|
|
fn=refresh_vm_stats, |
|
|
outputs=[total_ipos, ipos_invested, cs_stocks, investment_rate, last_updated] |
|
|
) |
|
|
demo.load(fn=create_ipo_discovery_chart, outputs=[ipo_chart]) |
|
|
demo.load(fn=refresh_ipo_discoveries_table, outputs=[ipo_table]) |
|
|
demo.load(fn=refresh_investment_performance_html, outputs=[investment_performance_table]) |
|
|
|
|
|
logger.info("✅ All event handlers configured successfully") |
|
|
return demo |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"❌ Failed to create dashboard: {e}") |
|
|
raise |
|
|
|
|
|
|
|
|
logger.info("🏗️ Building dashboard...") |
|
|
try: |
|
|
demo = create_dashboard() |
|
|
logger.info("✅ Dashboard created successfully!") |
|
|
except Exception as e: |
|
|
logger.error(f"❌ Dashboard creation failed: {e}") |
|
|
raise |
|
|
|
|
|
if __name__ == "__main__": |
|
|
logger.info("🚀 Launching dashboard server...") |
|
|
try: |
|
|
demo.launch() |
|
|
logger.info("✅ Dashboard launched successfully!") |
|
|
except Exception as e: |
|
|
logger.error(f"❌ Dashboard launch failed: {e}") |
|
|
raise |