Dmitry Beresnev commited on
Commit Β·
dbd43f7
1
Parent(s): 09cafdd
add a draft for monitoring, ema crossover strategy, etc
Browse files- examples/ema_strategy_example.py +231 -0
- src/core/monitoring_engine/__init__.py +5 -0
- src/core/monitoring_engine/config.py +47 -0
- src/core/monitoring_engine/data_sources.py +144 -0
- src/core/monitoring_engine/monitoring_service.py +221 -0
- src/core/monitoring_engine/subscriptions.py +77 -0
- src/core/trading/__init__.py +12 -4
- src/core/trading/ema_crossover_strategy.py +472 -0
- src/telegram_bot/telegram_bot_service.py +81 -0
examples/ema_strategy_example.py
ADDED
|
@@ -0,0 +1,231 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
EMA 50/200 Crossover Strategy - Usage Examples
|
| 3 |
+
|
| 4 |
+
Demonstrates the Golden Cross / Death Cross trading strategy.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import pandas as pd
|
| 8 |
+
import yfinance as yf
|
| 9 |
+
from src.core.trading.ema_crossover_strategy import EMACrossoverStrategy
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def example_1_basic_usage():
|
| 13 |
+
"""Example 1: Basic EMA crossover analysis."""
|
| 14 |
+
print("\n=== Example 1: Basic EMA Crossover ===\n")
|
| 15 |
+
|
| 16 |
+
# Initialize strategy
|
| 17 |
+
strategy = EMACrossoverStrategy(
|
| 18 |
+
fast_ema=50,
|
| 19 |
+
slow_ema=200,
|
| 20 |
+
use_volume_filter=True,
|
| 21 |
+
use_adx_filter=True
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
# Download data
|
| 25 |
+
ticker = "AAPL"
|
| 26 |
+
data = yf.download(ticker, period="2y", progress=False)
|
| 27 |
+
|
| 28 |
+
# Generate signals
|
| 29 |
+
signals = strategy.generate_signals(data, ticker=ticker)
|
| 30 |
+
|
| 31 |
+
# Display recent signals
|
| 32 |
+
print(f"Analysis for {ticker}:")
|
| 33 |
+
print(f" Current Price: ${signals['Close'].iloc[-1]:.2f}")
|
| 34 |
+
print(f" EMA 50: ${signals['EMA_Fast'].iloc[-1]:.2f}")
|
| 35 |
+
print(f" EMA 200: ${signals['EMA_Slow'].iloc[-1]:.2f}")
|
| 36 |
+
print(f" Position: {strategy.get_current_position(signals)}")
|
| 37 |
+
|
| 38 |
+
# Find recent crossovers
|
| 39 |
+
golden_crosses = signals[signals['Golden_Cross']].tail(3)
|
| 40 |
+
death_crosses = signals[signals['Death_Cross']].tail(3)
|
| 41 |
+
|
| 42 |
+
if not golden_crosses.empty:
|
| 43 |
+
print(f"\nRecent Golden Crosses:")
|
| 44 |
+
for date, row in golden_crosses.iterrows():
|
| 45 |
+
print(f" {date.date()}: ${row['Close']:.2f}")
|
| 46 |
+
|
| 47 |
+
if not death_crosses.empty:
|
| 48 |
+
print(f"\nRecent Death Crosses:")
|
| 49 |
+
for date, row in death_crosses.iterrows():
|
| 50 |
+
print(f" {date.date()}: ${row['Close']:.2f}")
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def example_2_market_scan():
|
| 54 |
+
"""Example 2: Scan market for signals."""
|
| 55 |
+
print("\n=== Example 2: Market Scan ===\n")
|
| 56 |
+
|
| 57 |
+
# Initialize strategy
|
| 58 |
+
strategy = EMACrossoverStrategy()
|
| 59 |
+
|
| 60 |
+
# Define tickers to scan
|
| 61 |
+
tickers = [
|
| 62 |
+
"AAPL", "MSFT", "GOOGL", "AMZN", "NVDA",
|
| 63 |
+
"TSLA", "META", "AMD", "NFLX", "DIS"
|
| 64 |
+
]
|
| 65 |
+
|
| 66 |
+
# Data loader function
|
| 67 |
+
def load_data(ticker, period):
|
| 68 |
+
return yf.download(ticker, period=period, progress=False)
|
| 69 |
+
|
| 70 |
+
# Scan market
|
| 71 |
+
print("Scanning market for EMA crossover signals...\n")
|
| 72 |
+
results = strategy.scan_market(tickers, load_data, period="1y")
|
| 73 |
+
|
| 74 |
+
if not results.empty:
|
| 75 |
+
print(f"\nFound {len(results)} signals:\n")
|
| 76 |
+
print(results.to_string(index=False))
|
| 77 |
+
else:
|
| 78 |
+
print("\nNo signals found.")
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def example_3_custom_parameters():
|
| 82 |
+
"""Example 3: Custom EMA parameters."""
|
| 83 |
+
print("\n=== Example 3: Custom EMA 20/50 ===\n")
|
| 84 |
+
|
| 85 |
+
# Faster crossover (20/50 instead of 50/200)
|
| 86 |
+
strategy = EMACrossoverStrategy(
|
| 87 |
+
fast_ema=20,
|
| 88 |
+
slow_ema=50,
|
| 89 |
+
atr_multiplier_sl=1.5, # Tighter stop
|
| 90 |
+
atr_multiplier_tp=3.0, # Closer target
|
| 91 |
+
adx_threshold=25, # Stronger trend required
|
| 92 |
+
use_volume_filter=True,
|
| 93 |
+
use_adx_filter=True
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
print(strategy.get_strategy_description())
|
| 97 |
+
|
| 98 |
+
# Test on volatile stock
|
| 99 |
+
ticker = "NVDA"
|
| 100 |
+
data = yf.download(ticker, period="6mo", progress=False)
|
| 101 |
+
signals = strategy.generate_signals(data, ticker=ticker)
|
| 102 |
+
|
| 103 |
+
print(f"\nAnalysis for {ticker}:")
|
| 104 |
+
print(f" Current Position: {strategy.get_current_position(signals)}")
|
| 105 |
+
print(f" ADX: {signals['ADX'].iloc[-1]:.1f}")
|
| 106 |
+
print(f" Volume Ratio: {signals['Volume_Ratio'].iloc[-1]:.2f}x")
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def example_4_signal_details():
|
| 110 |
+
"""Example 4: Detailed signal analysis."""
|
| 111 |
+
print("\n=== Example 4: Signal Details ===\n")
|
| 112 |
+
|
| 113 |
+
strategy = EMACrossoverStrategy()
|
| 114 |
+
|
| 115 |
+
ticker = "TSLA"
|
| 116 |
+
data = yf.download(ticker, period="1y", progress=False)
|
| 117 |
+
signals = strategy.generate_signals(data, ticker=ticker)
|
| 118 |
+
|
| 119 |
+
# Find most recent signal
|
| 120 |
+
recent_long = signals[signals['Signal_Long']].tail(1)
|
| 121 |
+
recent_short = signals[signals['Signal_Short']].tail(1)
|
| 122 |
+
|
| 123 |
+
if not recent_long.empty:
|
| 124 |
+
row = recent_long.iloc[0]
|
| 125 |
+
print(f"Most Recent LONG Signal for {ticker}:")
|
| 126 |
+
print(f" Date: {recent_long.index[0].date()}")
|
| 127 |
+
print(f" Price: ${row['Close']:.2f}")
|
| 128 |
+
print(f" Stop Loss: ${row['Stop_Loss_Long']:.2f} ({((row['Stop_Loss_Long']/row['Close']-1)*100):.1f}%)")
|
| 129 |
+
print(f" Take Profit: ${row['Take_Profit_Long']:.2f} ({((row['Take_Profit_Long']/row['Close']-1)*100):.1f}%)")
|
| 130 |
+
print(f" R:R Ratio: {row['RR_Ratio']:.1f}:1")
|
| 131 |
+
print(f" ADX: {row['ADX']:.1f}")
|
| 132 |
+
print(f" Volume: {row['Volume_Ratio']:.2f}x average")
|
| 133 |
+
|
| 134 |
+
if not recent_short.empty:
|
| 135 |
+
row = recent_short.iloc[0]
|
| 136 |
+
print(f"\nMost Recent SHORT Signal for {ticker}:")
|
| 137 |
+
print(f" Date: {recent_short.index[0].date()}")
|
| 138 |
+
print(f" Price: ${row['Close']:.2f}")
|
| 139 |
+
print(f" Stop Loss: ${row['Stop_Loss_Short']:.2f}")
|
| 140 |
+
print(f" Take Profit: ${row['Take_Profit_Short']:.2f}")
|
| 141 |
+
print(f" R:R Ratio: {row['RR_Ratio']:.1f}:1")
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
def example_5_backtest_simulation():
|
| 145 |
+
"""Example 5: Simple backtest simulation."""
|
| 146 |
+
print("\n=== Example 5: Backtest Simulation ===\n")
|
| 147 |
+
|
| 148 |
+
strategy = EMACrossoverStrategy()
|
| 149 |
+
|
| 150 |
+
ticker = "SPY"
|
| 151 |
+
data = yf.download(ticker, period="5y", progress=False)
|
| 152 |
+
signals = strategy.generate_signals(data, ticker=ticker)
|
| 153 |
+
|
| 154 |
+
# Extract trades
|
| 155 |
+
long_signals = signals[signals['Signal_Long']]
|
| 156 |
+
short_signals = signals[signals['Signal_Short']]
|
| 157 |
+
|
| 158 |
+
print(f"Backtest Results for {ticker} (5 years):")
|
| 159 |
+
print(f" Total LONG signals: {len(long_signals)}")
|
| 160 |
+
print(f" Total SHORT signals: {len(short_signals)}")
|
| 161 |
+
|
| 162 |
+
# Calculate simple stats
|
| 163 |
+
if len(long_signals) > 0:
|
| 164 |
+
avg_adx = long_signals['ADX'].mean()
|
| 165 |
+
avg_vol_ratio = long_signals['Volume_Ratio'].mean()
|
| 166 |
+
|
| 167 |
+
print(f"\nLONG Signal Statistics:")
|
| 168 |
+
print(f" Average ADX: {avg_adx:.1f}")
|
| 169 |
+
print(f" Average Volume Ratio: {avg_vol_ratio:.2f}x")
|
| 170 |
+
|
| 171 |
+
# Show signal distribution by year
|
| 172 |
+
all_signals = pd.concat([long_signals, short_signals])
|
| 173 |
+
if not all_signals.empty:
|
| 174 |
+
signals_by_year = all_signals.groupby(all_signals.index.year).size()
|
| 175 |
+
print(f"\nSignals by Year:")
|
| 176 |
+
for year, count in signals_by_year.items():
|
| 177 |
+
print(f" {year}: {count} signals")
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
def example_6_no_filters():
|
| 181 |
+
"""Example 6: Pure EMA crossover (no filters)."""
|
| 182 |
+
print("\n=== Example 6: Pure EMA Crossover (No Filters) ===\n")
|
| 183 |
+
|
| 184 |
+
# Disable all filters
|
| 185 |
+
strategy = EMACrossoverStrategy(
|
| 186 |
+
use_volume_filter=False,
|
| 187 |
+
use_adx_filter=False
|
| 188 |
+
)
|
| 189 |
+
|
| 190 |
+
ticker = "AAPL"
|
| 191 |
+
data = yf.download(ticker, period="2y", progress=False)
|
| 192 |
+
signals = strategy.generate_signals(data, ticker=ticker)
|
| 193 |
+
|
| 194 |
+
# Count signals
|
| 195 |
+
total_long = signals['Golden_Cross'].sum()
|
| 196 |
+
total_short = signals['Death_Cross'].sum()
|
| 197 |
+
|
| 198 |
+
print(f"Pure EMA Crossover for {ticker}:")
|
| 199 |
+
print(f" Golden Crosses: {total_long}")
|
| 200 |
+
print(f" Death Crosses: {total_short}")
|
| 201 |
+
print(f" Current Position: {strategy.get_current_position(signals)}")
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
def main():
|
| 205 |
+
"""Run all examples."""
|
| 206 |
+
examples = [
|
| 207 |
+
example_1_basic_usage,
|
| 208 |
+
example_2_market_scan,
|
| 209 |
+
example_3_custom_parameters,
|
| 210 |
+
example_4_signal_details,
|
| 211 |
+
example_5_backtest_simulation,
|
| 212 |
+
example_6_no_filters,
|
| 213 |
+
]
|
| 214 |
+
|
| 215 |
+
for example in examples:
|
| 216 |
+
try:
|
| 217 |
+
example()
|
| 218 |
+
except Exception as e:
|
| 219 |
+
print(f"\nβ Example failed: {e}")
|
| 220 |
+
import traceback
|
| 221 |
+
traceback.print_exc()
|
| 222 |
+
|
| 223 |
+
print("\n" + "=" * 70)
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
if __name__ == "__main__":
|
| 227 |
+
# Run single example
|
| 228 |
+
# example_1_basic_usage()
|
| 229 |
+
|
| 230 |
+
# Or run all examples
|
| 231 |
+
main()
|
src/core/monitoring_engine/__init__.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Monitoring engine for multi-timeframe price drop detection."""
|
| 2 |
+
|
| 3 |
+
from .monitoring_service import MonitoringEngineService
|
| 4 |
+
|
| 5 |
+
__all__ = ["MonitoringEngineService"]
|
src/core/monitoring_engine/config.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Configuration for the monitoring engine."""
|
| 2 |
+
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
+
import os
|
| 5 |
+
from typing import List
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@dataclass
|
| 9 |
+
class MonitoringConfig:
|
| 10 |
+
"""Runtime configuration for monitoring."""
|
| 11 |
+
|
| 12 |
+
timeframes_minutes: List[int]
|
| 13 |
+
sigma_multiplier: float
|
| 14 |
+
min_alert_gap_minutes: int
|
| 15 |
+
poll_interval_seconds: int
|
| 16 |
+
subscriptions_path: str
|
| 17 |
+
|
| 18 |
+
alpaca_api_key: str
|
| 19 |
+
alpaca_api_secret: str
|
| 20 |
+
alpaca_data_base_url: str
|
| 21 |
+
|
| 22 |
+
finnhub_api_key: str
|
| 23 |
+
|
| 24 |
+
@classmethod
|
| 25 |
+
def from_env(cls) -> "MonitoringConfig":
|
| 26 |
+
timeframes = os.getenv("MONITORING_TIMEFRAMES", "10,30,60,120,180,360,600")
|
| 27 |
+
timeframes_minutes = [int(x.strip()) for x in timeframes.split(",") if x.strip()]
|
| 28 |
+
|
| 29 |
+
sigma_multiplier = float(os.getenv("MONITORING_SIGMA_MULTIPLIER", "3.0"))
|
| 30 |
+
min_alert_gap_minutes = int(os.getenv("MONITORING_ALERT_GAP_MINUTES", "60"))
|
| 31 |
+
poll_interval_seconds = int(os.getenv("MONITORING_POLL_INTERVAL_SECONDS", "60"))
|
| 32 |
+
subscriptions_path = os.getenv(
|
| 33 |
+
"MONITORING_SUBSCRIPTIONS_PATH",
|
| 34 |
+
"data/monitoring_subscriptions.json"
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
return cls(
|
| 38 |
+
timeframes_minutes=timeframes_minutes,
|
| 39 |
+
sigma_multiplier=sigma_multiplier,
|
| 40 |
+
min_alert_gap_minutes=min_alert_gap_minutes,
|
| 41 |
+
poll_interval_seconds=poll_interval_seconds,
|
| 42 |
+
subscriptions_path=subscriptions_path,
|
| 43 |
+
alpaca_api_key=os.getenv("ALPACA_API_KEY", ""),
|
| 44 |
+
alpaca_api_secret=os.getenv("ALPACA_API_SECRET", ""),
|
| 45 |
+
alpaca_data_base_url=os.getenv("ALPACA_DATA_BASE_URL", ""),
|
| 46 |
+
finnhub_api_key=os.getenv("FINNHUB_API_TOKEN", "")
|
| 47 |
+
)
|
src/core/monitoring_engine/data_sources.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Price data sources for monitoring."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import asyncio
|
| 6 |
+
from dataclasses import dataclass
|
| 7 |
+
from typing import Optional
|
| 8 |
+
|
| 9 |
+
import httpx
|
| 10 |
+
|
| 11 |
+
from src.telegram_bot.logger import main_logger as logger
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@dataclass
|
| 15 |
+
class PriceResult:
|
| 16 |
+
"""Price result wrapper."""
|
| 17 |
+
price: Optional[float]
|
| 18 |
+
source: str
|
| 19 |
+
error: Optional[str] = None
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class BasePriceSource:
|
| 23 |
+
"""Base class for price sources."""
|
| 24 |
+
|
| 25 |
+
name: str = "base"
|
| 26 |
+
|
| 27 |
+
async def get_price(self, ticker: str) -> PriceResult:
|
| 28 |
+
raise NotImplementedError
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class AlpacaPriceSource(BasePriceSource):
|
| 32 |
+
"""Alpaca Data API (REST) price source."""
|
| 33 |
+
|
| 34 |
+
name = "alpaca"
|
| 35 |
+
|
| 36 |
+
def __init__(self, api_key: str, api_secret: str, base_url: str, http_client: httpx.AsyncClient | None = None):
|
| 37 |
+
self._api_key = api_key
|
| 38 |
+
self._api_secret = api_secret
|
| 39 |
+
self._base_url = base_url.rstrip("/")
|
| 40 |
+
self._http_client = http_client
|
| 41 |
+
|
| 42 |
+
async def get_price(self, ticker: str) -> PriceResult:
|
| 43 |
+
if not self._api_key or not self._api_secret or not self._base_url:
|
| 44 |
+
return PriceResult(price=None, source=self.name, error="Missing Alpaca credentials")
|
| 45 |
+
|
| 46 |
+
url = f"{self._base_url}/v2/stocks/{ticker}/trades/latest"
|
| 47 |
+
headers = {
|
| 48 |
+
"APCA-API-KEY-ID": self._api_key,
|
| 49 |
+
"APCA-API-SECRET-KEY": self._api_secret
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
client = self._http_client or httpx.AsyncClient(timeout=10.0)
|
| 53 |
+
close_client = self._http_client is None
|
| 54 |
+
try:
|
| 55 |
+
response = await client.get(url, headers=headers)
|
| 56 |
+
response.raise_for_status()
|
| 57 |
+
payload = response.json()
|
| 58 |
+
trade = payload.get("trade") or {}
|
| 59 |
+
price = trade.get("p") or trade.get("price")
|
| 60 |
+
if price is None:
|
| 61 |
+
return PriceResult(price=None, source=self.name, error="No price in Alpaca response")
|
| 62 |
+
return PriceResult(price=float(price), source=self.name)
|
| 63 |
+
except Exception as exc:
|
| 64 |
+
return PriceResult(price=None, source=self.name, error=str(exc))
|
| 65 |
+
finally:
|
| 66 |
+
if close_client:
|
| 67 |
+
await client.aclose()
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
class FinnhubPriceSource(BasePriceSource):
|
| 71 |
+
"""Finnhub price source."""
|
| 72 |
+
|
| 73 |
+
name = "finnhub"
|
| 74 |
+
|
| 75 |
+
def __init__(self, api_key: str):
|
| 76 |
+
self._api_key = api_key
|
| 77 |
+
|
| 78 |
+
async def get_price(self, ticker: str) -> PriceResult:
|
| 79 |
+
if not self._api_key:
|
| 80 |
+
return PriceResult(price=None, source=self.name, error="Missing Finnhub API key")
|
| 81 |
+
|
| 82 |
+
try:
|
| 83 |
+
import finnhub
|
| 84 |
+
except Exception as exc:
|
| 85 |
+
return PriceResult(price=None, source=self.name, error=f"Finnhub import error: {exc}")
|
| 86 |
+
|
| 87 |
+
def _fetch() -> PriceResult:
|
| 88 |
+
try:
|
| 89 |
+
client = finnhub.Client(api_key=self._api_key)
|
| 90 |
+
quote = client.quote(ticker)
|
| 91 |
+
price = quote.get("c")
|
| 92 |
+
if price is None:
|
| 93 |
+
return PriceResult(price=None, source=self.name, error="No price in Finnhub response")
|
| 94 |
+
return PriceResult(price=float(price), source=self.name)
|
| 95 |
+
except Exception as exc:
|
| 96 |
+
return PriceResult(price=None, source=self.name, error=str(exc))
|
| 97 |
+
|
| 98 |
+
return await asyncio.to_thread(_fetch)
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
class YFinancePriceSource(BasePriceSource):
|
| 102 |
+
"""yfinance price source."""
|
| 103 |
+
|
| 104 |
+
name = "yfinance"
|
| 105 |
+
|
| 106 |
+
async def get_price(self, ticker: str) -> PriceResult:
|
| 107 |
+
try:
|
| 108 |
+
import yfinance as yf
|
| 109 |
+
except Exception as exc:
|
| 110 |
+
return PriceResult(price=None, source=self.name, error=f"yfinance import error: {exc}")
|
| 111 |
+
|
| 112 |
+
def _fetch() -> PriceResult:
|
| 113 |
+
try:
|
| 114 |
+
yf_ticker = yf.Ticker(ticker)
|
| 115 |
+
fast_info = getattr(yf_ticker, "fast_info", None)
|
| 116 |
+
price = None
|
| 117 |
+
if fast_info and isinstance(fast_info, dict):
|
| 118 |
+
price = fast_info.get("last_price") or fast_info.get("lastPrice")
|
| 119 |
+
if price is None:
|
| 120 |
+
data = yf_ticker.history(period="1d", interval="1m")
|
| 121 |
+
if not data.empty:
|
| 122 |
+
price = float(data["Close"].iloc[-1])
|
| 123 |
+
if price is None:
|
| 124 |
+
return PriceResult(price=None, source=self.name, error="No price in yfinance response")
|
| 125 |
+
return PriceResult(price=float(price), source=self.name)
|
| 126 |
+
except Exception as exc:
|
| 127 |
+
return PriceResult(price=None, source=self.name, error=str(exc))
|
| 128 |
+
|
| 129 |
+
return await asyncio.to_thread(_fetch)
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
class PriceSourceChain:
|
| 133 |
+
"""Chain of price sources with fallback."""
|
| 134 |
+
|
| 135 |
+
def __init__(self, sources: list[BasePriceSource]):
|
| 136 |
+
self._sources = sources
|
| 137 |
+
|
| 138 |
+
async def get_price(self, ticker: str) -> PriceResult:
|
| 139 |
+
for source in self._sources:
|
| 140 |
+
result = await source.get_price(ticker)
|
| 141 |
+
if result.price is not None:
|
| 142 |
+
return result
|
| 143 |
+
logger.debug(f"Price source {source.name} failed for {ticker}: {result.error}")
|
| 144 |
+
return PriceResult(price=None, source="none", error="All price sources failed")
|
src/core/monitoring_engine/monitoring_service.py
ADDED
|
@@ -0,0 +1,221 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Monitoring service for multi-timeframe price drops with deduplication."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import asyncio
|
| 6 |
+
import time
|
| 7 |
+
from collections import defaultdict, deque
|
| 8 |
+
from dataclasses import dataclass
|
| 9 |
+
from typing import Callable, Deque, Dict, List, Optional
|
| 10 |
+
|
| 11 |
+
import numpy as np
|
| 12 |
+
import httpx
|
| 13 |
+
|
| 14 |
+
from src.telegram_bot.logger import main_logger as logger
|
| 15 |
+
from .config import MonitoringConfig
|
| 16 |
+
from .data_sources import (
|
| 17 |
+
AlpacaPriceSource,
|
| 18 |
+
FinnhubPriceSource,
|
| 19 |
+
YFinancePriceSource,
|
| 20 |
+
PriceSourceChain,
|
| 21 |
+
PriceResult
|
| 22 |
+
)
|
| 23 |
+
from .subscriptions import SubscriptionStore
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
@dataclass
|
| 27 |
+
class DropEvent:
|
| 28 |
+
"""Represents a detected drop event."""
|
| 29 |
+
ticker: str
|
| 30 |
+
timeframe_minutes: int
|
| 31 |
+
drop_pct: float
|
| 32 |
+
threshold_pct: float
|
| 33 |
+
source: str
|
| 34 |
+
timestamp: float
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class MonitoringEngineService:
|
| 38 |
+
"""Service that polls prices, detects drops, and notifies subscribers."""
|
| 39 |
+
|
| 40 |
+
def __init__(
|
| 41 |
+
self,
|
| 42 |
+
send_alert: Callable[[int, str], asyncio.Future],
|
| 43 |
+
config: Optional[MonitoringConfig] = None,
|
| 44 |
+
http_client: httpx.AsyncClient | None = None
|
| 45 |
+
):
|
| 46 |
+
self._config = config or MonitoringConfig.from_env()
|
| 47 |
+
self._subscription_store = SubscriptionStore(self._config.subscriptions_path)
|
| 48 |
+
self._http_client = http_client
|
| 49 |
+
|
| 50 |
+
self._price_sources = PriceSourceChain([
|
| 51 |
+
AlpacaPriceSource(
|
| 52 |
+
api_key=self._config.alpaca_api_key,
|
| 53 |
+
api_secret=self._config.alpaca_api_secret,
|
| 54 |
+
base_url=self._config.alpaca_data_base_url,
|
| 55 |
+
http_client=self._http_client
|
| 56 |
+
),
|
| 57 |
+
FinnhubPriceSource(api_key=self._config.finnhub_api_key),
|
| 58 |
+
YFinancePriceSource()
|
| 59 |
+
])
|
| 60 |
+
|
| 61 |
+
self._send_alert = send_alert
|
| 62 |
+
self._running = False
|
| 63 |
+
self._task: Optional[asyncio.Task] = None
|
| 64 |
+
|
| 65 |
+
self._buffers: Dict[str, Dict[int, Deque[float]]] = defaultdict(dict)
|
| 66 |
+
self._last_alert_ts: Dict[int, Dict[str, float]] = defaultdict(dict)
|
| 67 |
+
self._lock = asyncio.Lock()
|
| 68 |
+
|
| 69 |
+
# -------------------------
|
| 70 |
+
# Public API / hooks
|
| 71 |
+
# -------------------------
|
| 72 |
+
def subscribe(self, chat_id: int, tickers: List[str]) -> List[str]:
|
| 73 |
+
"""Subscribe chat to tickers."""
|
| 74 |
+
return self._subscription_store.subscribe(chat_id, tickers)
|
| 75 |
+
|
| 76 |
+
def unsubscribe(self, chat_id: int, tickers: List[str]) -> List[str]:
|
| 77 |
+
"""Unsubscribe chat from tickers."""
|
| 78 |
+
return self._subscription_store.unsubscribe(chat_id, tickers)
|
| 79 |
+
|
| 80 |
+
def list_subscriptions(self, chat_id: int) -> List[str]:
|
| 81 |
+
"""List tickers for a chat."""
|
| 82 |
+
return self._subscription_store.list_tickers(chat_id)
|
| 83 |
+
|
| 84 |
+
async def start(self) -> None:
|
| 85 |
+
"""Start background monitoring loop."""
|
| 86 |
+
if self._running:
|
| 87 |
+
return
|
| 88 |
+
self._running = True
|
| 89 |
+
self._task = asyncio.create_task(self._run_loop())
|
| 90 |
+
logger.info("Monitoring engine started")
|
| 91 |
+
|
| 92 |
+
async def stop(self) -> None:
|
| 93 |
+
"""Stop background monitoring loop."""
|
| 94 |
+
self._running = False
|
| 95 |
+
if self._task:
|
| 96 |
+
self._task.cancel()
|
| 97 |
+
self._task = None
|
| 98 |
+
logger.info("Monitoring engine stopped")
|
| 99 |
+
|
| 100 |
+
async def process_price(self, ticker: str, price: float, source: str = "external") -> None:
|
| 101 |
+
"""API hook: process external price updates."""
|
| 102 |
+
async with self._lock:
|
| 103 |
+
self._update_buffers(ticker, price)
|
| 104 |
+
events = self._detect_events(ticker, source)
|
| 105 |
+
await self._dispatch_events(ticker, events)
|
| 106 |
+
|
| 107 |
+
# -------------------------
|
| 108 |
+
# Internal loop
|
| 109 |
+
# -------------------------
|
| 110 |
+
async def _run_loop(self) -> None:
|
| 111 |
+
while self._running:
|
| 112 |
+
try:
|
| 113 |
+
await self._scan_once()
|
| 114 |
+
except Exception as exc:
|
| 115 |
+
logger.error(f"Monitoring scan failed: {exc}")
|
| 116 |
+
await asyncio.sleep(self._config.poll_interval_seconds)
|
| 117 |
+
|
| 118 |
+
async def _scan_once(self) -> None:
|
| 119 |
+
subscriptions = self._subscription_store.get_all()
|
| 120 |
+
tickers = sorted({t for tickers in subscriptions.values() for t in tickers})
|
| 121 |
+
if not tickers:
|
| 122 |
+
return
|
| 123 |
+
|
| 124 |
+
for ticker in tickers:
|
| 125 |
+
price_result = await self._price_sources.get_price(ticker)
|
| 126 |
+
if price_result.price is None:
|
| 127 |
+
logger.warning(f"Price fetch failed for {ticker}: {price_result.error}")
|
| 128 |
+
continue
|
| 129 |
+
|
| 130 |
+
async with self._lock:
|
| 131 |
+
self._update_buffers(ticker, price_result.price)
|
| 132 |
+
events = self._detect_events(ticker, price_result.source)
|
| 133 |
+
|
| 134 |
+
await self._dispatch_events(ticker, events)
|
| 135 |
+
|
| 136 |
+
# -------------------------
|
| 137 |
+
# Detection pipeline
|
| 138 |
+
# -------------------------
|
| 139 |
+
def _update_buffers(self, ticker: str, price: float) -> None:
|
| 140 |
+
for tf in self._config.timeframes_minutes:
|
| 141 |
+
if tf not in self._buffers[ticker]:
|
| 142 |
+
self._buffers[ticker][tf] = deque(maxlen=tf)
|
| 143 |
+
self._buffers[ticker][tf].append(price)
|
| 144 |
+
|
| 145 |
+
def _detect_events(self, ticker: str, source: str) -> List[DropEvent]:
|
| 146 |
+
events: List[DropEvent] = []
|
| 147 |
+
now_ts = time.time()
|
| 148 |
+
for tf in self._config.timeframes_minutes:
|
| 149 |
+
window = self._buffers[ticker].get(tf)
|
| 150 |
+
if not window or len(window) < tf:
|
| 151 |
+
continue
|
| 152 |
+
drop_pct = self._compute_drop_pct(window)
|
| 153 |
+
threshold_pct = self._adaptive_threshold_pct(window)
|
| 154 |
+
if threshold_pct is None:
|
| 155 |
+
continue
|
| 156 |
+
if drop_pct <= threshold_pct:
|
| 157 |
+
events.append(
|
| 158 |
+
DropEvent(
|
| 159 |
+
ticker=ticker,
|
| 160 |
+
timeframe_minutes=tf,
|
| 161 |
+
drop_pct=drop_pct,
|
| 162 |
+
threshold_pct=threshold_pct,
|
| 163 |
+
source=source,
|
| 164 |
+
timestamp=now_ts
|
| 165 |
+
)
|
| 166 |
+
)
|
| 167 |
+
return events
|
| 168 |
+
|
| 169 |
+
def _compute_drop_pct(self, window: Deque[float]) -> float:
|
| 170 |
+
start_price = window[0]
|
| 171 |
+
end_price = window[-1]
|
| 172 |
+
if start_price == 0:
|
| 173 |
+
return 0.0
|
| 174 |
+
return ((end_price - start_price) / start_price) * 100.0
|
| 175 |
+
|
| 176 |
+
def _adaptive_threshold_pct(self, window: Deque[float]) -> Optional[float]:
|
| 177 |
+
if len(window) < 5:
|
| 178 |
+
return None
|
| 179 |
+
prices = np.array(window, dtype=float)
|
| 180 |
+
returns = np.diff(prices) / prices[:-1]
|
| 181 |
+
sigma = float(np.std(returns))
|
| 182 |
+
if sigma == 0:
|
| 183 |
+
return None
|
| 184 |
+
return -self._config.sigma_multiplier * sigma * 100.0
|
| 185 |
+
|
| 186 |
+
# -------------------------
|
| 187 |
+
# Alert dispatch / dedupe
|
| 188 |
+
# -------------------------
|
| 189 |
+
async def _dispatch_events(self, ticker: str, events: List[DropEvent]) -> None:
|
| 190 |
+
if not events:
|
| 191 |
+
return
|
| 192 |
+
subscriptions = self._subscription_store.get_all()
|
| 193 |
+
for chat_id, tickers in subscriptions.items():
|
| 194 |
+
if ticker not in tickers:
|
| 195 |
+
continue
|
| 196 |
+
if self._is_deduped(chat_id, ticker):
|
| 197 |
+
continue
|
| 198 |
+
message = self._format_alert_message(ticker, events)
|
| 199 |
+
await self._send_alert(chat_id, message)
|
| 200 |
+
self._mark_alert(chat_id, ticker)
|
| 201 |
+
|
| 202 |
+
def _is_deduped(self, chat_id: int, ticker: str) -> bool:
|
| 203 |
+
last_ts = self._last_alert_ts.get(chat_id, {}).get(ticker)
|
| 204 |
+
if not last_ts:
|
| 205 |
+
return False
|
| 206 |
+
gap_seconds = self._config.min_alert_gap_minutes * 60
|
| 207 |
+
return (time.time() - last_ts) < gap_seconds
|
| 208 |
+
|
| 209 |
+
def _mark_alert(self, chat_id: int, ticker: str) -> None:
|
| 210 |
+
self._last_alert_ts.setdefault(chat_id, {})[ticker] = time.time()
|
| 211 |
+
|
| 212 |
+
def _format_alert_message(self, ticker: str, events: List[DropEvent]) -> str:
|
| 213 |
+
events_sorted = sorted(events, key=lambda e: e.timeframe_minutes)
|
| 214 |
+
lines = [f"π¨ Drop alert for {ticker}"]
|
| 215 |
+
lines.append(f"Source: {events_sorted[0].source}")
|
| 216 |
+
for event in events_sorted:
|
| 217 |
+
lines.append(
|
| 218 |
+
f"- {event.timeframe_minutes}m: {event.drop_pct:.2f}% "
|
| 219 |
+
f"(threshold {event.threshold_pct:.2f}%)"
|
| 220 |
+
)
|
| 221 |
+
return "\n".join(lines)
|
src/core/monitoring_engine/subscriptions.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Subscription storage for monitoring alerts."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import json
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from typing import Dict, List, Set
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class SubscriptionStore:
|
| 11 |
+
"""Simple JSON-backed subscription store."""
|
| 12 |
+
|
| 13 |
+
def __init__(self, path: str):
|
| 14 |
+
self._path = Path(path)
|
| 15 |
+
self._subscriptions: Dict[int, Set[str]] = {}
|
| 16 |
+
self.load()
|
| 17 |
+
|
| 18 |
+
def load(self) -> None:
|
| 19 |
+
"""Load subscriptions from disk if present."""
|
| 20 |
+
if not self._path.exists():
|
| 21 |
+
self._subscriptions = {}
|
| 22 |
+
return
|
| 23 |
+
|
| 24 |
+
try:
|
| 25 |
+
data = json.loads(self._path.read_text(encoding="utf-8"))
|
| 26 |
+
self._subscriptions = {
|
| 27 |
+
int(chat_id): set(map(str.upper, tickers))
|
| 28 |
+
for chat_id, tickers in data.items()
|
| 29 |
+
}
|
| 30 |
+
except Exception:
|
| 31 |
+
# If parsing fails, start with empty storage to avoid crashes
|
| 32 |
+
self._subscriptions = {}
|
| 33 |
+
|
| 34 |
+
def save(self) -> None:
|
| 35 |
+
"""Persist subscriptions to disk."""
|
| 36 |
+
self._path.parent.mkdir(parents=True, exist_ok=True)
|
| 37 |
+
data = {str(chat_id): sorted(list(tickers)) for chat_id, tickers in self._subscriptions.items()}
|
| 38 |
+
self._path.write_text(json.dumps(data, indent=2), encoding="utf-8")
|
| 39 |
+
|
| 40 |
+
def subscribe(self, chat_id: int, tickers: List[str]) -> List[str]:
|
| 41 |
+
"""Subscribe chat to a list of tickers. Returns added tickers."""
|
| 42 |
+
norm = [t.strip().upper() for t in tickers if t.strip()]
|
| 43 |
+
if not norm:
|
| 44 |
+
return []
|
| 45 |
+
if chat_id not in self._subscriptions:
|
| 46 |
+
self._subscriptions[chat_id] = set()
|
| 47 |
+
before = set(self._subscriptions[chat_id])
|
| 48 |
+
self._subscriptions[chat_id].update(norm)
|
| 49 |
+
added = sorted(list(self._subscriptions[chat_id] - before))
|
| 50 |
+
self.save()
|
| 51 |
+
return added
|
| 52 |
+
|
| 53 |
+
def unsubscribe(self, chat_id: int, tickers: List[str]) -> List[str]:
|
| 54 |
+
"""Unsubscribe chat from tickers. Returns removed tickers."""
|
| 55 |
+
norm = [t.strip().upper() for t in tickers if t.strip()]
|
| 56 |
+
if chat_id not in self._subscriptions or not norm:
|
| 57 |
+
return []
|
| 58 |
+
before = set(self._subscriptions[chat_id])
|
| 59 |
+
for t in norm:
|
| 60 |
+
self._subscriptions[chat_id].discard(t)
|
| 61 |
+
removed = sorted(list(before - self._subscriptions[chat_id]))
|
| 62 |
+
if not self._subscriptions[chat_id]:
|
| 63 |
+
del self._subscriptions[chat_id]
|
| 64 |
+
self.save()
|
| 65 |
+
return removed
|
| 66 |
+
|
| 67 |
+
def list_tickers(self, chat_id: int) -> List[str]:
|
| 68 |
+
"""Return tickers for a chat."""
|
| 69 |
+
return sorted(list(self._subscriptions.get(chat_id, set())))
|
| 70 |
+
|
| 71 |
+
def get_all(self) -> Dict[int, List[str]]:
|
| 72 |
+
"""Return all subscriptions."""
|
| 73 |
+
return {chat_id: sorted(list(tickers)) for chat_id, tickers in self._subscriptions.items()}
|
| 74 |
+
|
| 75 |
+
def get_chat_ids(self) -> List[int]:
|
| 76 |
+
"""Return all subscribed chat IDs."""
|
| 77 |
+
return list(self._subscriptions.keys())
|
src/core/trading/__init__.py
CHANGED
|
@@ -1,20 +1,28 @@
|
|
| 1 |
"""
|
| 2 |
Trading Strategy Module
|
| 3 |
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
- Vectorized backtesting
|
| 9 |
- Position sizing with Kelly Criterion
|
| 10 |
"""
|
| 11 |
|
| 12 |
from src.core.trading.macd_strategy import AdvancedMACDStrategy
|
|
|
|
| 13 |
from src.core.trading.backtest_engine import VectorizedBacktest
|
| 14 |
from src.core.trading.risk_engine import RiskEngine
|
| 15 |
|
| 16 |
__all__ = [
|
| 17 |
'AdvancedMACDStrategy',
|
|
|
|
| 18 |
'VectorizedBacktest',
|
| 19 |
'RiskEngine',
|
| 20 |
]
|
|
|
|
| 1 |
"""
|
| 2 |
Trading Strategy Module
|
| 3 |
|
| 4 |
+
Professional trading strategies with risk management:
|
| 5 |
+
|
| 6 |
+
Strategies:
|
| 7 |
+
- AdvancedMACDStrategy: Zero-Lag MACD with divergence detection
|
| 8 |
+
- EMACrossoverStrategy: Golden Cross / Death Cross (EMA 50/200)
|
| 9 |
+
|
| 10 |
+
Features:
|
| 11 |
+
- ATR-based stop loss / take profit
|
| 12 |
+
- ADX trend strength filter
|
| 13 |
+
- Volume confirmation
|
| 14 |
- Vectorized backtesting
|
| 15 |
- Position sizing with Kelly Criterion
|
| 16 |
"""
|
| 17 |
|
| 18 |
from src.core.trading.macd_strategy import AdvancedMACDStrategy
|
| 19 |
+
from src.core.trading.ema_crossover_strategy import EMACrossoverStrategy
|
| 20 |
from src.core.trading.backtest_engine import VectorizedBacktest
|
| 21 |
from src.core.trading.risk_engine import RiskEngine
|
| 22 |
|
| 23 |
__all__ = [
|
| 24 |
'AdvancedMACDStrategy',
|
| 25 |
+
'EMACrossoverStrategy',
|
| 26 |
'VectorizedBacktest',
|
| 27 |
'RiskEngine',
|
| 28 |
]
|
src/core/trading/ema_crossover_strategy.py
ADDED
|
@@ -0,0 +1,472 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
EMA 50/200 Crossover Strategy (Golden Cross / Death Cross)
|
| 3 |
+
|
| 4 |
+
Classic trend-following strategy based on exponential moving average crossovers.
|
| 5 |
+
Includes volume, trend strength, and risk management filters.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import pandas as pd
|
| 9 |
+
import numpy as np
|
| 10 |
+
from datetime import datetime, timedelta
|
| 11 |
+
from typing import Dict, List, Optional, Tuple
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class EMACrossoverStrategy:
|
| 15 |
+
"""
|
| 16 |
+
Professional EMA 50/200 crossover strategy with filters.
|
| 17 |
+
|
| 18 |
+
Golden Cross: EMA 50 crosses above EMA 200 β BUY signal
|
| 19 |
+
Death Cross: EMA 50 crosses below EMA 200 β SELL signal
|
| 20 |
+
|
| 21 |
+
Features:
|
| 22 |
+
- Volume confirmation
|
| 23 |
+
- Trend strength (ADX) filter
|
| 24 |
+
- ATR-based stop loss / take profit
|
| 25 |
+
- Cooldown period between trades
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
def __init__(
|
| 29 |
+
self,
|
| 30 |
+
fast_ema: int = 50,
|
| 31 |
+
slow_ema: int = 200,
|
| 32 |
+
atr_period: int = 14,
|
| 33 |
+
atr_multiplier_sl: float = 2.0,
|
| 34 |
+
atr_multiplier_tp: float = 4.0,
|
| 35 |
+
adx_period: int = 14,
|
| 36 |
+
adx_threshold: float = 20,
|
| 37 |
+
volume_period: int = 20,
|
| 38 |
+
use_volume_filter: bool = True,
|
| 39 |
+
use_adx_filter: bool = True,
|
| 40 |
+
cooldown_candles: int = 5
|
| 41 |
+
):
|
| 42 |
+
"""
|
| 43 |
+
Initialize EMA crossover strategy.
|
| 44 |
+
|
| 45 |
+
Args:
|
| 46 |
+
fast_ema: Fast EMA period (default 50)
|
| 47 |
+
slow_ema: Slow EMA period (default 200)
|
| 48 |
+
atr_period: ATR period for volatility (default 14)
|
| 49 |
+
atr_multiplier_sl: ATR multiplier for stop loss (default 2.0)
|
| 50 |
+
atr_multiplier_tp: ATR multiplier for take profit (default 4.0)
|
| 51 |
+
adx_period: ADX period for trend strength (default 14)
|
| 52 |
+
adx_threshold: Minimum ADX for valid signal (default 20)
|
| 53 |
+
volume_period: Period for average volume (default 20)
|
| 54 |
+
use_volume_filter: Require above-average volume (default True)
|
| 55 |
+
use_adx_filter: Require strong trend (default True)
|
| 56 |
+
cooldown_candles: Minimum bars between trades (default 5)
|
| 57 |
+
"""
|
| 58 |
+
self.fast_ema = fast_ema
|
| 59 |
+
self.slow_ema = slow_ema
|
| 60 |
+
self.atr_period = atr_period
|
| 61 |
+
self.atr_multiplier_sl = atr_multiplier_sl
|
| 62 |
+
self.atr_multiplier_tp = atr_multiplier_tp
|
| 63 |
+
self.adx_period = adx_period
|
| 64 |
+
self.adx_threshold = adx_threshold
|
| 65 |
+
self.volume_period = volume_period
|
| 66 |
+
self.use_volume_filter = use_volume_filter
|
| 67 |
+
self.use_adx_filter = use_adx_filter
|
| 68 |
+
self.cooldown_candles = cooldown_candles
|
| 69 |
+
self.last_trade_idx = {}
|
| 70 |
+
|
| 71 |
+
def calculate_ema(self, data: pd.Series, period: int) -> pd.Series:
|
| 72 |
+
"""
|
| 73 |
+
Calculate Exponential Moving Average.
|
| 74 |
+
|
| 75 |
+
Args:
|
| 76 |
+
data: Price series
|
| 77 |
+
period: EMA period
|
| 78 |
+
|
| 79 |
+
Returns:
|
| 80 |
+
EMA series
|
| 81 |
+
"""
|
| 82 |
+
return data.ewm(span=period, adjust=False).mean()
|
| 83 |
+
|
| 84 |
+
def calculate_wilder_ema(self, data: pd.Series, period: int) -> pd.Series:
|
| 85 |
+
"""
|
| 86 |
+
Wilder's EMA (used for ATR and ADX).
|
| 87 |
+
|
| 88 |
+
Args:
|
| 89 |
+
data: Data series
|
| 90 |
+
period: Smoothing period
|
| 91 |
+
|
| 92 |
+
Returns:
|
| 93 |
+
Smoothed series
|
| 94 |
+
"""
|
| 95 |
+
alpha = 1.0 / period
|
| 96 |
+
return data.ewm(alpha=alpha, adjust=False).mean()
|
| 97 |
+
|
| 98 |
+
def calculate_atr(self, data: pd.DataFrame) -> pd.Series:
|
| 99 |
+
"""
|
| 100 |
+
Calculate Average True Range (Wilder's method).
|
| 101 |
+
|
| 102 |
+
Args:
|
| 103 |
+
data: OHLC DataFrame
|
| 104 |
+
|
| 105 |
+
Returns:
|
| 106 |
+
ATR series
|
| 107 |
+
"""
|
| 108 |
+
high = data['High']
|
| 109 |
+
low = data['Low']
|
| 110 |
+
close = data['Close']
|
| 111 |
+
|
| 112 |
+
tr1 = high - low
|
| 113 |
+
tr2 = abs(high - close.shift())
|
| 114 |
+
tr3 = abs(low - close.shift())
|
| 115 |
+
|
| 116 |
+
tr = pd.concat([tr1, tr2, tr3], axis=1).max(axis=1)
|
| 117 |
+
atr = self.calculate_wilder_ema(tr, self.atr_period)
|
| 118 |
+
|
| 119 |
+
return atr
|
| 120 |
+
|
| 121 |
+
def calculate_adx(self, data: pd.DataFrame) -> Tuple[pd.Series, pd.Series, pd.Series]:
|
| 122 |
+
"""
|
| 123 |
+
Calculate Average Directional Index (trend strength).
|
| 124 |
+
|
| 125 |
+
Args:
|
| 126 |
+
data: OHLC DataFrame
|
| 127 |
+
|
| 128 |
+
Returns:
|
| 129 |
+
Tuple of (ADX, +DI, -DI)
|
| 130 |
+
"""
|
| 131 |
+
high = data['High']
|
| 132 |
+
low = data['Low']
|
| 133 |
+
close = data['Close']
|
| 134 |
+
|
| 135 |
+
# Directional movement
|
| 136 |
+
up_move = high.diff()
|
| 137 |
+
down_move = -low.diff()
|
| 138 |
+
|
| 139 |
+
plus_dm = pd.Series(
|
| 140 |
+
np.where((up_move > down_move) & (up_move > 0), up_move, 0),
|
| 141 |
+
index=data.index
|
| 142 |
+
)
|
| 143 |
+
minus_dm = pd.Series(
|
| 144 |
+
np.where((down_move > up_move) & (down_move > 0), down_move, 0),
|
| 145 |
+
index=data.index
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
# True range
|
| 149 |
+
tr1 = high - low
|
| 150 |
+
tr2 = abs(high - close.shift())
|
| 151 |
+
tr3 = abs(low - close.shift())
|
| 152 |
+
tr = pd.concat([tr1, tr2, tr3], axis=1).max(axis=1)
|
| 153 |
+
|
| 154 |
+
# Smooth with Wilder's method
|
| 155 |
+
atr = self.calculate_wilder_ema(tr, self.adx_period)
|
| 156 |
+
plus_di_smooth = self.calculate_wilder_ema(plus_dm, self.adx_period)
|
| 157 |
+
minus_di_smooth = self.calculate_wilder_ema(minus_dm, self.adx_period)
|
| 158 |
+
|
| 159 |
+
# Directional indicators
|
| 160 |
+
plus_di = 100 * plus_di_smooth / atr
|
| 161 |
+
minus_di = 100 * minus_di_smooth / atr
|
| 162 |
+
|
| 163 |
+
# ADX calculation
|
| 164 |
+
dx = 100 * abs(plus_di - minus_di) / (plus_di + minus_di)
|
| 165 |
+
adx = self.calculate_wilder_ema(dx, self.adx_period)
|
| 166 |
+
|
| 167 |
+
return adx, plus_di, minus_di
|
| 168 |
+
|
| 169 |
+
def calculate_risk_levels(self, df: pd.DataFrame) -> pd.DataFrame:
|
| 170 |
+
"""
|
| 171 |
+
Calculate stop loss and take profit levels based on ATR.
|
| 172 |
+
|
| 173 |
+
Args:
|
| 174 |
+
df: DataFrame with ATR calculated
|
| 175 |
+
|
| 176 |
+
Returns:
|
| 177 |
+
DataFrame with risk levels added
|
| 178 |
+
"""
|
| 179 |
+
# Long positions
|
| 180 |
+
df['Stop_Loss_Long'] = df['Close'] - self.atr_multiplier_sl * df['ATR']
|
| 181 |
+
df['Take_Profit_Long'] = df['Close'] + self.atr_multiplier_tp * df['ATR']
|
| 182 |
+
|
| 183 |
+
# Short positions
|
| 184 |
+
df['Stop_Loss_Short'] = df['Close'] + self.atr_multiplier_sl * df['ATR']
|
| 185 |
+
df['Take_Profit_Short'] = df['Close'] - self.atr_multiplier_tp * df['ATR']
|
| 186 |
+
|
| 187 |
+
# Risk-reward ratio
|
| 188 |
+
df['RR_Ratio'] = self.atr_multiplier_tp / self.atr_multiplier_sl
|
| 189 |
+
|
| 190 |
+
return df
|
| 191 |
+
|
| 192 |
+
def check_cooldown(self, ticker: str, current_idx: int) -> bool:
|
| 193 |
+
"""
|
| 194 |
+
Check if enough time has passed since last trade.
|
| 195 |
+
|
| 196 |
+
Args:
|
| 197 |
+
ticker: Ticker symbol
|
| 198 |
+
current_idx: Current bar index
|
| 199 |
+
|
| 200 |
+
Returns:
|
| 201 |
+
True if cooldown period passed, False otherwise
|
| 202 |
+
"""
|
| 203 |
+
if ticker not in self.last_trade_idx:
|
| 204 |
+
return True
|
| 205 |
+
|
| 206 |
+
bars_since_last = current_idx - self.last_trade_idx[ticker]
|
| 207 |
+
return bars_since_last >= self.cooldown_candles
|
| 208 |
+
|
| 209 |
+
def generate_signals(
|
| 210 |
+
self,
|
| 211 |
+
data: pd.DataFrame,
|
| 212 |
+
ticker: Optional[str] = None
|
| 213 |
+
) -> pd.DataFrame:
|
| 214 |
+
"""
|
| 215 |
+
Generate trading signals based on EMA crossovers and filters.
|
| 216 |
+
|
| 217 |
+
Args:
|
| 218 |
+
data: OHLC DataFrame with 'Open', 'High', 'Low', 'Close', 'Volume'
|
| 219 |
+
ticker: Optional ticker symbol for cooldown tracking
|
| 220 |
+
|
| 221 |
+
Returns:
|
| 222 |
+
DataFrame with signals and indicators
|
| 223 |
+
"""
|
| 224 |
+
df = data.copy()
|
| 225 |
+
|
| 226 |
+
# Calculate EMAs
|
| 227 |
+
df['EMA_Fast'] = self.calculate_ema(df['Close'], self.fast_ema)
|
| 228 |
+
df['EMA_Slow'] = self.calculate_ema(df['Close'], self.slow_ema)
|
| 229 |
+
|
| 230 |
+
# EMA relationship
|
| 231 |
+
df['EMA_Fast_Above'] = df['EMA_Fast'] > df['EMA_Slow']
|
| 232 |
+
df['EMA_Fast_Below'] = df['EMA_Fast'] < df['EMA_Slow']
|
| 233 |
+
|
| 234 |
+
# Detect crossovers
|
| 235 |
+
df['Golden_Cross'] = (
|
| 236 |
+
(df['EMA_Fast'] > df['EMA_Slow']) &
|
| 237 |
+
(df['EMA_Fast'].shift(1) <= df['EMA_Slow'].shift(1))
|
| 238 |
+
)
|
| 239 |
+
|
| 240 |
+
df['Death_Cross'] = (
|
| 241 |
+
(df['EMA_Fast'] < df['EMA_Slow']) &
|
| 242 |
+
(df['EMA_Fast'].shift(1) >= df['EMA_Slow'].shift(1))
|
| 243 |
+
)
|
| 244 |
+
|
| 245 |
+
# Calculate ATR for risk management
|
| 246 |
+
df['ATR'] = self.calculate_atr(df)
|
| 247 |
+
|
| 248 |
+
# Calculate ADX for trend strength (if enabled)
|
| 249 |
+
if self.use_adx_filter:
|
| 250 |
+
df['ADX'], df['Plus_DI'], df['Minus_DI'] = self.calculate_adx(df)
|
| 251 |
+
df['Strong_Trend'] = df['ADX'] > self.adx_threshold
|
| 252 |
+
else:
|
| 253 |
+
df['ADX'] = 0
|
| 254 |
+
df['Plus_DI'] = 0
|
| 255 |
+
df['Minus_DI'] = 0
|
| 256 |
+
df['Strong_Trend'] = True
|
| 257 |
+
|
| 258 |
+
# Calculate volume filter (if enabled)
|
| 259 |
+
if self.use_volume_filter:
|
| 260 |
+
df['Avg_Volume'] = df['Volume'].rolling(window=self.volume_period).mean()
|
| 261 |
+
df['High_Volume'] = df['Volume'] > df['Avg_Volume']
|
| 262 |
+
else:
|
| 263 |
+
df['Avg_Volume'] = df['Volume']
|
| 264 |
+
df['High_Volume'] = True
|
| 265 |
+
|
| 266 |
+
# Additional metrics
|
| 267 |
+
df['ATR_Pct'] = (df['ATR'] / df['Close']) * 100
|
| 268 |
+
df['Volume_Ratio'] = df['Volume'] / df['Avg_Volume']
|
| 269 |
+
|
| 270 |
+
# EMA distance (% above/below slow EMA)
|
| 271 |
+
df['EMA_Distance_Pct'] = ((df['Close'] - df['EMA_Slow']) / df['EMA_Slow']) * 100
|
| 272 |
+
|
| 273 |
+
# Calculate risk levels
|
| 274 |
+
df = self.calculate_risk_levels(df)
|
| 275 |
+
|
| 276 |
+
# Generate trading signals
|
| 277 |
+
# LONG: Golden Cross + filters
|
| 278 |
+
df['Signal_Long'] = (
|
| 279 |
+
df['Golden_Cross'] &
|
| 280 |
+
df['Strong_Trend'] &
|
| 281 |
+
df['High_Volume']
|
| 282 |
+
)
|
| 283 |
+
|
| 284 |
+
# SHORT: Death Cross + filters
|
| 285 |
+
df['Signal_Short'] = (
|
| 286 |
+
df['Death_Cross'] &
|
| 287 |
+
df['Strong_Trend'] &
|
| 288 |
+
df['High_Volume']
|
| 289 |
+
)
|
| 290 |
+
|
| 291 |
+
# Apply cooldown if ticker specified
|
| 292 |
+
if ticker is not None:
|
| 293 |
+
cooldown_mask = pd.Series(False, index=df.index)
|
| 294 |
+
|
| 295 |
+
for i in range(len(df)):
|
| 296 |
+
if df['Signal_Long'].iloc[i] or df['Signal_Short'].iloc[i]:
|
| 297 |
+
if not self.check_cooldown(ticker, i):
|
| 298 |
+
cooldown_mask.iloc[i] = True
|
| 299 |
+
else:
|
| 300 |
+
self.last_trade_idx[ticker] = i
|
| 301 |
+
|
| 302 |
+
df.loc[cooldown_mask, 'Signal_Long'] = False
|
| 303 |
+
df.loc[cooldown_mask, 'Signal_Short'] = False
|
| 304 |
+
|
| 305 |
+
return df
|
| 306 |
+
|
| 307 |
+
def scan_market(
|
| 308 |
+
self,
|
| 309 |
+
tickers: List[str],
|
| 310 |
+
data_loader,
|
| 311 |
+
period: str = '1y'
|
| 312 |
+
) -> pd.DataFrame:
|
| 313 |
+
"""
|
| 314 |
+
Scan multiple tickers for EMA crossover signals.
|
| 315 |
+
|
| 316 |
+
Args:
|
| 317 |
+
tickers: List of ticker symbols
|
| 318 |
+
data_loader: Function to load OHLC data (e.g., yfinance)
|
| 319 |
+
period: Historical period to analyze
|
| 320 |
+
|
| 321 |
+
Returns:
|
| 322 |
+
DataFrame with signals for all tickers
|
| 323 |
+
"""
|
| 324 |
+
results = []
|
| 325 |
+
|
| 326 |
+
for ticker in tickers:
|
| 327 |
+
try:
|
| 328 |
+
print(f"Scanning {ticker}...")
|
| 329 |
+
|
| 330 |
+
# Load data
|
| 331 |
+
data = data_loader(ticker, period=period)
|
| 332 |
+
|
| 333 |
+
if len(data) < self.slow_ema:
|
| 334 |
+
print(f" β Insufficient data ({len(data)} bars < {self.slow_ema})")
|
| 335 |
+
continue
|
| 336 |
+
|
| 337 |
+
# Generate signals
|
| 338 |
+
df = self.generate_signals(data, ticker=ticker)
|
| 339 |
+
|
| 340 |
+
# Check latest signal
|
| 341 |
+
last_signal_long = df['Signal_Long'].iloc[-1]
|
| 342 |
+
last_signal_short = df['Signal_Short'].iloc[-1]
|
| 343 |
+
|
| 344 |
+
if last_signal_long or last_signal_short:
|
| 345 |
+
signal_type = 'LONG' if last_signal_long else 'SHORT'
|
| 346 |
+
|
| 347 |
+
# Calculate entry quality score
|
| 348 |
+
quality_score = self._calculate_quality_score(df)
|
| 349 |
+
|
| 350 |
+
results.append({
|
| 351 |
+
'Ticker': ticker,
|
| 352 |
+
'Signal': signal_type,
|
| 353 |
+
'Quality': quality_score,
|
| 354 |
+
'Price': round(df['Close'].iloc[-1], 2),
|
| 355 |
+
'EMA_Fast': round(df['EMA_Fast'].iloc[-1], 2),
|
| 356 |
+
'EMA_Slow': round(df['EMA_Slow'].iloc[-1], 2),
|
| 357 |
+
'Stop_Loss': round(
|
| 358 |
+
df['Stop_Loss_Long'].iloc[-1] if signal_type == 'LONG'
|
| 359 |
+
else df['Stop_Loss_Short'].iloc[-1], 2
|
| 360 |
+
),
|
| 361 |
+
'Take_Profit': round(
|
| 362 |
+
df['Take_Profit_Long'].iloc[-1] if signal_type == 'LONG'
|
| 363 |
+
else df['Take_Profit_Short'].iloc[-1], 2
|
| 364 |
+
),
|
| 365 |
+
'RR_Ratio': round(df['RR_Ratio'].iloc[-1], 2),
|
| 366 |
+
'ADX': round(df['ADX'].iloc[-1], 2),
|
| 367 |
+
'ATR_Pct': round(df['ATR_Pct'].iloc[-1], 2),
|
| 368 |
+
'Volume_Ratio': round(df['Volume_Ratio'].iloc[-1], 2),
|
| 369 |
+
'EMA_Distance': round(df['EMA_Distance_Pct'].iloc[-1], 2),
|
| 370 |
+
'Date': df.index[-1]
|
| 371 |
+
})
|
| 372 |
+
|
| 373 |
+
except Exception as e:
|
| 374 |
+
print(f" β Error analyzing {ticker}: {e}")
|
| 375 |
+
continue
|
| 376 |
+
|
| 377 |
+
if results:
|
| 378 |
+
result_df = pd.DataFrame(results)
|
| 379 |
+
# Sort by quality score (descending)
|
| 380 |
+
result_df = result_df.sort_values('Quality', ascending=False)
|
| 381 |
+
return result_df
|
| 382 |
+
else:
|
| 383 |
+
return pd.DataFrame()
|
| 384 |
+
|
| 385 |
+
def _calculate_quality_score(self, df: pd.DataFrame) -> str:
|
| 386 |
+
"""
|
| 387 |
+
Calculate signal quality based on multiple factors.
|
| 388 |
+
|
| 389 |
+
Args:
|
| 390 |
+
df: DataFrame with indicators
|
| 391 |
+
|
| 392 |
+
Returns:
|
| 393 |
+
Quality rating: 'A', 'B', or 'C'
|
| 394 |
+
"""
|
| 395 |
+
score = 0
|
| 396 |
+
|
| 397 |
+
# Factor 1: ADX strength
|
| 398 |
+
adx = df['ADX'].iloc[-1]
|
| 399 |
+
if adx > 30:
|
| 400 |
+
score += 3
|
| 401 |
+
elif adx > 25:
|
| 402 |
+
score += 2
|
| 403 |
+
elif adx > 20:
|
| 404 |
+
score += 1
|
| 405 |
+
|
| 406 |
+
# Factor 2: Volume confirmation
|
| 407 |
+
volume_ratio = df['Volume_Ratio'].iloc[-1]
|
| 408 |
+
if volume_ratio > 1.5:
|
| 409 |
+
score += 2
|
| 410 |
+
elif volume_ratio > 1.2:
|
| 411 |
+
score += 1
|
| 412 |
+
|
| 413 |
+
# Factor 3: ATR (volatility - not too high, not too low)
|
| 414 |
+
atr_pct = df['ATR_Pct'].iloc[-1]
|
| 415 |
+
if 1.0 <= atr_pct <= 3.0:
|
| 416 |
+
score += 2 # Ideal volatility range
|
| 417 |
+
elif 0.5 <= atr_pct <= 5.0:
|
| 418 |
+
score += 1 # Acceptable range
|
| 419 |
+
|
| 420 |
+
# Factor 4: Clean crossover (not too far from EMAs)
|
| 421 |
+
ema_distance = abs(df['EMA_Distance_Pct'].iloc[-1])
|
| 422 |
+
if ema_distance < 2.0:
|
| 423 |
+
score += 2 # Recent crossover
|
| 424 |
+
elif ema_distance < 5.0:
|
| 425 |
+
score += 1 # Somewhat recent
|
| 426 |
+
|
| 427 |
+
# Convert to letter grade
|
| 428 |
+
if score >= 7:
|
| 429 |
+
return 'A'
|
| 430 |
+
elif score >= 5:
|
| 431 |
+
return 'B'
|
| 432 |
+
else:
|
| 433 |
+
return 'C'
|
| 434 |
+
|
| 435 |
+
def get_current_position(self, df: pd.DataFrame) -> str:
|
| 436 |
+
"""
|
| 437 |
+
Get current position based on EMA alignment.
|
| 438 |
+
|
| 439 |
+
Args:
|
| 440 |
+
df: DataFrame with EMAs
|
| 441 |
+
|
| 442 |
+
Returns:
|
| 443 |
+
'LONG', 'SHORT', or 'NEUTRAL'
|
| 444 |
+
"""
|
| 445 |
+
if df['EMA_Fast_Above'].iloc[-1]:
|
| 446 |
+
return 'LONG'
|
| 447 |
+
elif df['EMA_Fast_Below'].iloc[-1]:
|
| 448 |
+
return 'SHORT'
|
| 449 |
+
else:
|
| 450 |
+
return 'NEUTRAL'
|
| 451 |
+
|
| 452 |
+
def get_strategy_description(self) -> str:
|
| 453 |
+
"""Get human-readable strategy description."""
|
| 454 |
+
return f"""
|
| 455 |
+
EMA {self.fast_ema}/{self.slow_ema} Crossover Strategy
|
| 456 |
+
|
| 457 |
+
Entry Rules:
|
| 458 |
+
β’ LONG: EMA {self.fast_ema} crosses above EMA {self.slow_ema} (Golden Cross)
|
| 459 |
+
β’ SHORT: EMA {self.fast_ema} crosses below EMA {self.slow_ema} (Death Cross)
|
| 460 |
+
|
| 461 |
+
Filters:
|
| 462 |
+
β’ ADX: {'Enabled' if self.use_adx_filter else 'Disabled'} (threshold: {self.adx_threshold})
|
| 463 |
+
β’ Volume: {'Enabled' if self.use_volume_filter else 'Disabled'} ({self.volume_period}-period average)
|
| 464 |
+
|
| 465 |
+
Risk Management:
|
| 466 |
+
β’ Stop Loss: {self.atr_multiplier_sl}x ATR
|
| 467 |
+
β’ Take Profit: {self.atr_multiplier_tp}x ATR
|
| 468 |
+
β’ Risk/Reward: {self.atr_multiplier_tp / self.atr_multiplier_sl:.1f}:1
|
| 469 |
+
β’ Cooldown: {self.cooldown_candles} candles between trades
|
| 470 |
+
|
| 471 |
+
ATR Period: {self.atr_period}
|
| 472 |
+
"""
|
src/telegram_bot/telegram_bot_service.py
CHANGED
|
@@ -24,6 +24,7 @@ from src.services.async_trading_grid_calculator import generate_grid_message
|
|
| 24 |
from src.core.fundamental_analysis.async_fundamental_analyzer import AsyncFundamentalAnalyzer
|
| 25 |
from src.api.insiders.insider_trading_aggregator import InsiderTradingAggregator
|
| 26 |
from src.telegram_bot.logger import main_logger as logger
|
|
|
|
| 27 |
from src.core.ticker_scanner import TickerAnalyzer
|
| 28 |
from src.core.valuation_engine import ValuationEngine
|
| 29 |
#from src.core.trading.macd_strategy import AdvancedMACDStrategy
|
|
@@ -39,6 +40,7 @@ class TelegramBotService:
|
|
| 39 |
# Trading components (lazy initialized)
|
| 40 |
self.live_trader = None
|
| 41 |
self.pending_approvals: Dict[str, Any] = {} # approval_id -> approval data
|
|
|
|
| 42 |
|
| 43 |
async def initialize(self):
|
| 44 |
"""Initialize HTTP client"""
|
|
@@ -47,14 +49,26 @@ class TelegramBotService:
|
|
| 47 |
limits=httpx.Limits(max_keepalive_connections=5, max_connections=10),
|
| 48 |
follow_redirects=True
|
| 49 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
main_logger.info("TelegramBotService initialized")
|
| 51 |
|
| 52 |
async def cleanup(self):
|
| 53 |
"""Cleanup resources"""
|
|
|
|
|
|
|
| 54 |
if self.http_client:
|
| 55 |
await self.http_client.aclose()
|
| 56 |
main_logger.info("TelegramBotService cleaned up")
|
| 57 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 58 |
async def send_message_via_proxy(
|
| 59 |
self,
|
| 60 |
chat_id: int,
|
|
@@ -183,6 +197,11 @@ class TelegramBotService:
|
|
| 183 |
response += "/hello - Say hello\n"
|
| 184 |
response += "/help - Show help\n"
|
| 185 |
response += "/status - Check bot status\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 186 |
response += "/news - Show all today's news\n"
|
| 187 |
response += "/run - News feed analysis by ticker (NVDA)\n\n"
|
| 188 |
response += "/pooling - News feed pooling by ticker (NVDA) π§ͺ (not working properly, testing)\n\n"
|
|
@@ -197,6 +216,11 @@ class TelegramBotService:
|
|
| 197 |
response += "/start or /hello - Get started\n"
|
| 198 |
response += "/help - Show this help message\n"
|
| 199 |
response += "/status - Check bot status\n\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 200 |
response += "<b>About:</b>\n"
|
| 201 |
response += "This bot provides financial news and sentiment analysis."
|
| 202 |
response += "<b>Risk Analysis Commands:</b>\n"
|
|
@@ -252,6 +276,57 @@ class TelegramBotService:
|
|
| 252 |
response += "π§ System: Running on HuggingFace Spaces\n"
|
| 253 |
response += "π Proxy: Google Apps Script\n"
|
| 254 |
response += "π Status: All systems operational"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 255 |
|
| 256 |
elif base_command == "/news":
|
| 257 |
await self.news_feed_analysing(chat_id, command, user_name)
|
|
@@ -327,6 +402,12 @@ class TelegramBotService:
|
|
| 327 |
|
| 328 |
await self.send_message_via_proxy(chat_id, response)
|
| 329 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 330 |
async def _handle_regular_message(self, chat_id: int, text: str, user_name: str) -> None:
|
| 331 |
"""Handle regular (non-command) messages"""
|
| 332 |
response = f"Hello {user_name}! π\n\n"
|
|
|
|
| 24 |
from src.core.fundamental_analysis.async_fundamental_analyzer import AsyncFundamentalAnalyzer
|
| 25 |
from src.api.insiders.insider_trading_aggregator import InsiderTradingAggregator
|
| 26 |
from src.telegram_bot.logger import main_logger as logger
|
| 27 |
+
from src.core.monitoring_engine import MonitoringEngineService
|
| 28 |
from src.core.ticker_scanner import TickerAnalyzer
|
| 29 |
from src.core.valuation_engine import ValuationEngine
|
| 30 |
#from src.core.trading.macd_strategy import AdvancedMACDStrategy
|
|
|
|
| 40 |
# Trading components (lazy initialized)
|
| 41 |
self.live_trader = None
|
| 42 |
self.pending_approvals: Dict[str, Any] = {} # approval_id -> approval data
|
| 43 |
+
self.monitoring_engine: MonitoringEngineService | None = None
|
| 44 |
|
| 45 |
async def initialize(self):
|
| 46 |
"""Initialize HTTP client"""
|
|
|
|
| 49 |
limits=httpx.Limits(max_keepalive_connections=5, max_connections=10),
|
| 50 |
follow_redirects=True
|
| 51 |
)
|
| 52 |
+
# Initialize monitoring engine with Telegram callback
|
| 53 |
+
self.monitoring_engine = MonitoringEngineService(
|
| 54 |
+
send_alert=self._send_monitoring_alert,
|
| 55 |
+
http_client=self.http_client
|
| 56 |
+
)
|
| 57 |
+
await self.monitoring_engine.start()
|
| 58 |
main_logger.info("TelegramBotService initialized")
|
| 59 |
|
| 60 |
async def cleanup(self):
|
| 61 |
"""Cleanup resources"""
|
| 62 |
+
if self.monitoring_engine:
|
| 63 |
+
await self.monitoring_engine.stop()
|
| 64 |
if self.http_client:
|
| 65 |
await self.http_client.aclose()
|
| 66 |
main_logger.info("TelegramBotService cleaned up")
|
| 67 |
|
| 68 |
+
async def _send_monitoring_alert(self, chat_id: int, text: str) -> None:
|
| 69 |
+
"""Send monitoring alert to Telegram."""
|
| 70 |
+
await self.send_message_via_proxy(chat_id, text)
|
| 71 |
+
|
| 72 |
async def send_message_via_proxy(
|
| 73 |
self,
|
| 74 |
chat_id: int,
|
|
|
|
| 197 |
response += "/hello - Say hello\n"
|
| 198 |
response += "/help - Show help\n"
|
| 199 |
response += "/status - Check bot status\n"
|
| 200 |
+
response += "/monitor_subscribe TICKERS - Subscribe to drop alerts (e.g., /monitor_subscribe AAPL TSLA)\n"
|
| 201 |
+
response += "/monitor_unsubscribe TICKERS - Unsubscribe from drop alerts\n"
|
| 202 |
+
response += "/monitor_list - List subscribed tickers\n"
|
| 203 |
+
response += "/monitor_start - Start monitoring loop\n"
|
| 204 |
+
response += "/monitor_stop - Stop monitoring loop\n"
|
| 205 |
response += "/news - Show all today's news\n"
|
| 206 |
response += "/run - News feed analysis by ticker (NVDA)\n\n"
|
| 207 |
response += "/pooling - News feed pooling by ticker (NVDA) π§ͺ (not working properly, testing)\n\n"
|
|
|
|
| 216 |
response += "/start or /hello - Get started\n"
|
| 217 |
response += "/help - Show this help message\n"
|
| 218 |
response += "/status - Check bot status\n\n"
|
| 219 |
+
response += "/monitor_subscribe TICKERS - Subscribe to drop alerts\n"
|
| 220 |
+
response += "/monitor_unsubscribe TICKERS - Unsubscribe from drop alerts\n"
|
| 221 |
+
response += "/monitor_list - List subscribed tickers\n"
|
| 222 |
+
response += "/monitor_start - Start monitoring loop\n"
|
| 223 |
+
response += "/monitor_stop - Stop monitoring loop\n\n"
|
| 224 |
response += "<b>About:</b>\n"
|
| 225 |
response += "This bot provides financial news and sentiment analysis."
|
| 226 |
response += "<b>Risk Analysis Commands:</b>\n"
|
|
|
|
| 276 |
response += "π§ System: Running on HuggingFace Spaces\n"
|
| 277 |
response += "π Proxy: Google Apps Script\n"
|
| 278 |
response += "π Status: All systems operational"
|
| 279 |
+
elif base_command == "/monitor_subscribe":
|
| 280 |
+
if not self.monitoring_engine:
|
| 281 |
+
response = "β Monitoring engine is not initialized."
|
| 282 |
+
else:
|
| 283 |
+
tickers = self._parse_tickers(command_parts[1:]) if len(command_parts) > 1 else []
|
| 284 |
+
if not tickers:
|
| 285 |
+
response = "β Please provide tickers. Example: /monitor_subscribe AAPL TSLA"
|
| 286 |
+
else:
|
| 287 |
+
added = self.monitoring_engine.subscribe(chat_id, tickers)
|
| 288 |
+
if added:
|
| 289 |
+
response = f"β
Subscribed to: {', '.join(added)}"
|
| 290 |
+
else:
|
| 291 |
+
response = "βΉοΈ No new tickers were added."
|
| 292 |
+
|
| 293 |
+
elif base_command == "/monitor_unsubscribe":
|
| 294 |
+
if not self.monitoring_engine:
|
| 295 |
+
response = "β Monitoring engine is not initialized."
|
| 296 |
+
else:
|
| 297 |
+
tickers = self._parse_tickers(command_parts[1:]) if len(command_parts) > 1 else []
|
| 298 |
+
if not tickers:
|
| 299 |
+
response = "β Please provide tickers. Example: /monitor_unsubscribe AAPL TSLA"
|
| 300 |
+
else:
|
| 301 |
+
removed = self.monitoring_engine.unsubscribe(chat_id, tickers)
|
| 302 |
+
if removed:
|
| 303 |
+
response = f"β
Unsubscribed from: {', '.join(removed)}"
|
| 304 |
+
else:
|
| 305 |
+
response = "βΉοΈ No tickers were removed."
|
| 306 |
+
|
| 307 |
+
elif base_command == "/monitor_list":
|
| 308 |
+
if not self.monitoring_engine:
|
| 309 |
+
response = "β Monitoring engine is not initialized."
|
| 310 |
+
else:
|
| 311 |
+
tickers = self.monitoring_engine.list_subscriptions(chat_id)
|
| 312 |
+
if tickers:
|
| 313 |
+
response = f"π Subscribed tickers: {', '.join(tickers)}"
|
| 314 |
+
else:
|
| 315 |
+
response = "βΉοΈ You have no subscriptions yet."
|
| 316 |
+
|
| 317 |
+
elif base_command == "/monitor_start":
|
| 318 |
+
if not self.monitoring_engine:
|
| 319 |
+
response = "β Monitoring engine is not initialized."
|
| 320 |
+
else:
|
| 321 |
+
await self.monitoring_engine.start()
|
| 322 |
+
response = "β
Monitoring engine started."
|
| 323 |
+
|
| 324 |
+
elif base_command == "/monitor_stop":
|
| 325 |
+
if not self.monitoring_engine:
|
| 326 |
+
response = "β Monitoring engine is not initialized."
|
| 327 |
+
else:
|
| 328 |
+
await self.monitoring_engine.stop()
|
| 329 |
+
response = "β
Monitoring engine stopped."
|
| 330 |
|
| 331 |
elif base_command == "/news":
|
| 332 |
await self.news_feed_analysing(chat_id, command, user_name)
|
|
|
|
| 402 |
|
| 403 |
await self.send_message_via_proxy(chat_id, response)
|
| 404 |
|
| 405 |
+
def _parse_tickers(self, parts: list[str]) -> list[str]:
|
| 406 |
+
"""Parse tickers from command arguments."""
|
| 407 |
+
joined = " ".join(parts)
|
| 408 |
+
raw = [item.strip().upper() for item in joined.replace(",", " ").split()]
|
| 409 |
+
return [t for t in raw if t]
|
| 410 |
+
|
| 411 |
async def _handle_regular_message(self, chat_id: int, text: str, user_name: str) -> None:
|
| 412 |
"""Handle regular (non-command) messages"""
|
| 413 |
response = f"Hello {user_name}! π\n\n"
|