#!/usr/bin/env python3 """ Premium Trading Dashboard - Full Enhanced Version Beautiful dashboard with sentiment analysis, Reddit integration, and advanced features """ import os import sys import pandas as pd import gradio as gr import plotly.graph_objects as go import plotly.express as px from datetime import datetime, timedelta, timezone import logging import requests import time import json import re import nltk import feedparser from urllib.parse import quote # Import dependencies with fallback try: from alpaca.trading.client import TradingClient from alpaca.trading.requests import GetOrdersRequest, GetPortfolioHistoryRequest from alpaca.trading.enums import OrderStatus, OrderSide from alpaca.data.timeframe import TimeFrame from alpaca.data.historical import StockHistoricalDataClient ALPACA_AVAILABLE = True except ImportError: ALPACA_AVAILABLE = False try: from textblob import TextBlob from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer SENTIMENT_AVAILABLE = True except ImportError: SENTIMENT_AVAILABLE = False try: import yfinance as yf YF_AVAILABLE = True except ImportError: YF_AVAILABLE = False # API Keys and Configuration API_KEY = os.getenv('ALPACA_API_KEY', 'PK2FD9B2S86LHR7ZBHG1') SECRET_KEY = os.getenv('ALPACA_SECRET_KEY', 'QPmGPDgbPArvHv6cldBXc7uWddapYcIAnBhtkuBW') VM_API_URL = os.getenv('VM_API_URL', 'http://34.56.193.18:8090') # Configure logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') logger = logging.getLogger(__name__) logger.info("🚀 Starting Premium Trading Dashboard - Full Enhanced Version with 1-Hour P&L") # Download NLTK data try: nltk.download('punkt', quiet=True) nltk.download('vader_lexicon', quiet=True) nltk.download('brown', quiet=True) logger.info("✅ NLTK data downloaded") except Exception as e: logger.warning(f"⚠️ NLTK download failed: {e}") # Initialize sentiment analyzers sentiment_analyzer = None if SENTIMENT_AVAILABLE: try: sentiment_analyzer = SentimentIntensityAnalyzer() logger.info("✅ VADER sentiment analyzer initialized") except Exception as e: logger.warning(f"⚠️ Sentiment analyzer failed: {e}") # Initialize Alpaca clients trading_client = None data_client = None if ALPACA_AVAILABLE: try: trading_client = TradingClient(api_key=API_KEY, secret_key=SECRET_KEY) data_client = StockHistoricalDataClient(API_KEY, SECRET_KEY) logger.info("✅ Alpaca clients initialized") except Exception as e: logger.warning(f"⚠️ Alpaca clients failed: {e}") # HTTP headers for Reddit API headers = { 'User-Agent': 'TradingBot/1.0 (by u/TradingBot)' } # Color scheme COLORS = { 'primary': '#0070f3', 'success': '#00d647', 'error': '#ff0080', 'warning': '#f5a623', 'neutral': '#8b949e' } def fetch_from_vm(endpoint, default_value=None): """Fetch data from VM API server with fallback""" try: response = requests.get(f"{VM_API_URL}/api/{endpoint}", timeout=10) if response.status_code == 200: return response.json() else: logger.warning(f"VM API returned status {response.status_code}") return default_value except Exception as e: logger.warning(f"VM API error: {e}") return default_value def get_account_info(): """Get comprehensive account information""" if not trading_client: # Return demo data return { 'portfolio_value': 125000.00, 'buying_power': 31250.00, 'cash': 31250.00, 'day_change': 2750.50, 'equity': 125000.00, 'day_change_percent': 2.25 } try: account = trading_client.get_account() last_equity = float(account.last_equity) if account.last_equity else float(account.equity) current_equity = float(account.equity) day_change = current_equity - last_equity day_change_percent = (day_change / last_equity * 100) if last_equity > 0 else 0 return { 'portfolio_value': float(account.portfolio_value), 'buying_power': float(account.buying_power), 'cash': float(account.cash), 'day_change': day_change, 'equity': current_equity, 'day_change_percent': day_change_percent } except Exception as e: logger.error(f"Account info error: {e}") return {'error': str(e)} def get_order_history(limit=50): """Get recent order history""" if not trading_client: return [] try: request = GetOrdersRequest( status='all', limit=limit ) orders = trading_client.get_orders(filter=request) order_data = [] for order in orders: order_data.append({ 'symbol': order.symbol, 'side': order.side.value if hasattr(order.side, 'value') else str(order.side), 'qty': float(order.qty) if order.qty else 0, 'filled_qty': float(order.filled_qty) if order.filled_qty else 0, 'status': order.status.value if hasattr(order.status, 'value') else str(order.status), 'submitted_at': order.submitted_at.isoformat() if order.submitted_at else None, 'filled_at': order.filled_at.isoformat() if order.filled_at else None, 'filled_avg_price': float(order.filled_avg_price) if order.filled_avg_price else None }) return order_data except Exception as e: logger.error(f"Order history error: {e}") return [] def get_reddit_posts(symbol, start_time, cutoff_time): """Enhanced Reddit search with multiple strategies""" logger.info(f"🔍 Searching Reddit for {symbol}...") reddit_posts = [] subreddits = ['wallstreetbets', 'stocks', 'investing', 'SecurityAnalysis', 'ValueInvesting'] search_terms = [symbol, f'{symbol} stock', f'{symbol} IPO', f'${symbol}', f'{symbol} earnings'] for subreddit in subreddits: for search_term in search_terms: try: url = f"https://www.reddit.com/r/{subreddit}/search.json" params = { 'q': search_term, 'restrict_sr': 'true', 'limit': 10, 't': 'all', 'sort': 'relevance' } response = requests.get(url, params=params, headers=headers, timeout=10) if response.status_code == 200: data = response.json() posts_found = len(data.get('data', {}).get('children', [])) logger.info(f"Reddit: r/{subreddit} + '{search_term}' found {posts_found} posts") for post in data.get('data', {}).get('children', []): post_data = post.get('data', {}) if not post_data.get('title'): continue # Filter by time window post_time = datetime.fromtimestamp(post_data.get('created_utc', 0), tz=timezone.utc) if not (start_time <= post_time <= cutoff_time): continue # Check relevance title_lower = post_data.get('title', '').lower() body_lower = post_data.get('selftext', '').lower() symbol_lower = symbol.lower() if symbol_lower not in title_lower and symbol_lower not in body_lower: continue # Remove duplicates post_id = post_data.get('id') if any(p.get('id') == post_id for p in reddit_posts): continue reddit_posts.append({ 'id': post_id, 'title': post_data.get('title', ''), 'selftext': post_data.get('selftext', ''), 'score': post_data.get('score', 0), 'num_comments': post_data.get('num_comments', 0), 'created_utc': post_data.get('created_utc', 0), 'subreddit': subreddit, 'search_term': search_term, 'url': f"https://reddit.com{post_data.get('permalink', '')}" }) time.sleep(0.1) # Rate limiting except Exception as e: logger.warning(f"Reddit search error for r/{subreddit}: {e}") continue logger.info(f"📊 Total Reddit posts found for {symbol}: {len(reddit_posts)}") return reddit_posts def get_google_news(symbol, start_time, cutoff_time): """Get Google News articles for symbol""" logger.info(f"📰 Searching Google News for {symbol}...") try: # Build search query search_queries = [ f'{symbol} stock', f'{symbol} IPO', f'{symbol} earnings', f'{symbol} company' ] all_articles = [] for query in search_queries: try: encoded_query = quote(query) url = f"https://news.google.com/rss/search?q={encoded_query}&hl=en&gl=US&ceid=US:en" feed = feedparser.parse(url) for entry in feed.entries: # Parse publication date try: pub_date = datetime(*entry.published_parsed[:6], tzinfo=timezone.utc) if not (start_time <= pub_date <= cutoff_time): continue except: continue # Check relevance title_lower = entry.title.lower() summary_lower = getattr(entry, 'summary', '').lower() symbol_lower = symbol.lower() if symbol_lower not in title_lower and symbol_lower not in summary_lower: continue article = { 'title': entry.title, 'summary': getattr(entry, 'summary', ''), 'published': entry.published, 'published_parsed': pub_date.isoformat(), 'link': entry.link, 'source': getattr(entry, 'source', {}).get('title', 'Google News'), 'search_query': query } # Remove duplicates if not any(a.get('link') == article['link'] for a in all_articles): all_articles.append(article) time.sleep(0.2) # Rate limiting except Exception as e: logger.warning(f"Google News error for query '{query}': {e}") continue logger.info(f"📊 Total Google News articles found for {symbol}: {len(all_articles)}") return all_articles except Exception as e: logger.error(f"Google News search failed: {e}") return [] def analyze_sentiment(news_items): """Analyze sentiment of news items using VADER and TextBlob""" if not news_items or not SENTIMENT_AVAILABLE: return 0.0, 0.0, "Neutral", {'Reddit': [], 'Google News': []} logger.info(f"🧠 Analyzing sentiment for {len(news_items)} items...") sentiment_scores = [] source_breakdown = {'Reddit': [], 'Google News': []} for item in news_items: try: # Determine text to analyze if 'title' in item and 'selftext' in item: # Reddit post text = f"{item['title']} {item.get('selftext', '')}" source = 'Reddit' weight = max(1, item.get('score', 1) + item.get('num_comments', 0) * 0.5) else: # News article text = f"{item['title']} {item.get('summary', '')}" source = 'Google News' weight = 1.0 if not text.strip(): continue # VADER sentiment vader_score = 0.0 if sentiment_analyzer: vader_result = sentiment_analyzer.polarity_scores(text) vader_score = vader_result['compound'] # TextBlob sentiment textblob_score = 0.0 try: blob = TextBlob(text) textblob_score = blob.sentiment.polarity except: pass # Combined score combined_score = (vader_score + textblob_score) / 2 weighted_score = combined_score * weight sentiment_scores.append(weighted_score) source_breakdown[source].append({ 'text': text[:200] + '...' if len(text) > 200 else text, 'vader_score': vader_score, 'textblob_score': textblob_score, 'combined_score': combined_score, 'weight': weight, 'weighted_score': weighted_score }) except Exception as e: logger.warning(f"Sentiment analysis error: {e}") continue if not sentiment_scores: return 0.0, 0.0, "Neutral", source_breakdown # Calculate average sentiment avg_sentiment = sum(sentiment_scores) / len(sentiment_scores) # Predict percentage change based on sentiment # Strong positive sentiment -> higher predicted gain # Strong negative sentiment -> higher predicted loss if avg_sentiment > 0.5: predicted_change = min(15.0, avg_sentiment * 20) # Cap at 15% prediction_label = "Strong Buy" elif avg_sentiment > 0.2: predicted_change = avg_sentiment * 10 prediction_label = "Buy" elif avg_sentiment > -0.2: predicted_change = avg_sentiment * 5 prediction_label = "Hold" elif avg_sentiment > -0.5: predicted_change = avg_sentiment * 10 prediction_label = "Sell" else: predicted_change = max(-15.0, avg_sentiment * 20) # Cap at -15% prediction_label = "Strong Sell" logger.info(f"📊 Sentiment analysis complete: {avg_sentiment:.3f} -> {prediction_label} ({predicted_change:+.1f}%)") return avg_sentiment, predicted_change, prediction_label, source_breakdown def get_pre_investment_news(symbol, investment_time, hours_before=12): """Get news from before investment time""" start_time = investment_time - timedelta(hours=hours_before) cutoff_time = investment_time - timedelta(minutes=30) # 30 min buffer logger.info(f"📊 Getting pre-investment news for {symbol}") logger.info(f" Time window: {start_time} to {cutoff_time}") # Get Reddit posts reddit_posts = get_reddit_posts(symbol, start_time, cutoff_time) # Get Google News google_news = get_google_news(symbol, start_time, cutoff_time) # Combine all news items all_news = reddit_posts + google_news logger.info(f"📊 Total news items: {len(all_news)} ({len(reddit_posts)} Reddit + {len(google_news)} News)") return all_news def refresh_account_overview(): """Refresh account overview with enhanced data""" logger.info("🔄 Refreshing account overview...") info = get_account_info() if 'error' in info: return "Error", "Error", "Error", "Error", "Error" # Format with colors based on performance day_change_color = COLORS['success'] if info['day_change'] >= 0 else COLORS['error'] day_change_formatted = f"${info['day_change']:+,.2f} ({info.get('day_change_percent', 0):+.2f}%)" return ( f"${info['portfolio_value']:,.2f}", f"${info['buying_power']:,.2f}", f"${info['cash']:,.2f}", day_change_formatted, f"${info['equity']:,.2f}" ) def create_portfolio_chart(): """Create enhanced portfolio performance chart""" logger.info("📈 Creating portfolio chart...") if not trading_client: # Demo data dates = pd.date_range(start='2024-01-01', end='2024-12-31', freq='D') values = [100000 + i * 50 + (i % 30 - 15) * 200 for i in range(len(dates))] fig = go.Figure() fig.add_trace(go.Scatter( x=dates, y=values, mode='lines', name='Portfolio Value', line=dict(color=COLORS['primary'], width=2), fill='tonexty', fillcolor=f'rgba(0, 112, 243, 0.1)' )) fig.update_layout( title="Portfolio Performance (Demo Data)", xaxis_title="Date", yaxis_title="Portfolio Value ($)", hovermode='x unified', template='plotly_white' ) return fig try: # Get portfolio history from Alpaca request = GetPortfolioHistoryRequest( period='1M', timeframe=TimeFrame.Day ) portfolio_history = trading_client.get_portfolio_history(filter=request) if portfolio_history.equity: timestamps = [datetime.fromtimestamp(ts) for ts in portfolio_history.timestamp] equity_values = portfolio_history.equity fig = go.Figure() fig.add_trace(go.Scatter( x=timestamps, y=equity_values, mode='lines', name='Portfolio Value', line=dict(color=COLORS['primary'], width=2), fill='tonexty', fillcolor=f'rgba(0, 112, 243, 0.1)' )) fig.update_layout( title="Portfolio Performance (Last 30 Days)", xaxis_title="Date", yaxis_title="Portfolio Value ($)", hovermode='x unified', template='plotly_white' ) return fig except Exception as e: logger.error(f"Portfolio chart error: {e}") # Fallback empty chart fig = go.Figure() fig.update_layout(title="Portfolio Chart (No Data Available)") return fig def refresh_ipo_discoveries(): """Get IPO discoveries from VM""" logger.info("🔄 Refreshing IPO discoveries...") vm_data = fetch_from_vm('ipos', []) if not vm_data: return """
No recent IPO discoveries available. The system continuously monitors for new tradeable securities.
📡 VM Connection Status: Offline
| Symbol | Discovery Time | Type | Decision |
|---|---|---|---|
| {symbol} | {discovery_time} | {asset_type} | {decision} |
No trading history available yet.
Start trading to see performance analytics with sentiment analysis!
| Symbol | Investment | 1-Hour P&L | Sentiment | Prediction | Sources |
|---|---|---|---|---|---|
| {symbol} | ${total_investment:,.0f} | ${one_hour_pnl:+,.2f} ({pnl_percentage:+.1f}%) |
{avg_sentiment:+.3f} | {prediction_label} {predicted_change:+.1f}% |
🗨️{reddit_count} 📰{news_count} |
Advanced IPO Trading with AI-Powered Sentiment Analysis
🤖 Advanced Automated Trading Dashboard
Real-time data from Alpaca Markets • VM Analytics • AI Sentiment Analysis • Built with ❤️
🔄 Last Updated: {} • 📡 VM Status: Connected • 🧠 AI Analysis: Active • 📊 Data Sources: Reddit, Google News, Alpaca Markets