Dmitry Beresnev commited on
Commit ·
d5036bb
1
Parent(s): 6da625b
add Market Risk Engine
Browse files- examples/risk_engine_example.py +154 -0
- requirements.txt +1 -0
- src/core/macroeconomics/markets/risk_engine/__init__.py +6 -0
- src/core/macroeconomics/markets/risk_engine/aggregator.py +349 -0
- src/core/macroeconomics/markets/risk_engine/engine.py +373 -0
- src/core/macroeconomics/markets/risk_engine/indicators/__init__.py +99 -0
- src/core/macroeconomics/markets/risk_engine/indicators/breadth.py +449 -0
- src/core/macroeconomics/markets/risk_engine/indicators/credit.py +414 -0
- src/core/macroeconomics/markets/risk_engine/indicators/fx_funding.py +415 -0
- src/core/macroeconomics/markets/risk_engine/indicators/liquidity.py +429 -0
- src/core/macroeconomics/markets/risk_engine/indicators/macro_rates.py +551 -0
- src/core/macroeconomics/markets/risk_engine/indicators/tail_risk.py +538 -0
- src/core/macroeconomics/markets/risk_engine/indicators/volatility.py +405 -0
examples/risk_engine_example.py
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Market Risk Engine - Usage Examples
|
| 3 |
+
|
| 4 |
+
Simple examples showing how to use the Market Risk Engine.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import asyncio
|
| 8 |
+
from src.core.macroeconomics.markets.risk_engine import (
|
| 9 |
+
MarketRiskEngine,
|
| 10 |
+
RiskRegime,
|
| 11 |
+
RiskEngineConfig,
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
async def example_1_basic_usage():
|
| 16 |
+
"""Example 1: Basic risk assessment."""
|
| 17 |
+
print("\n=== Example 1: Basic Risk Assessment ===\n")
|
| 18 |
+
|
| 19 |
+
# Initialize engine
|
| 20 |
+
engine = MarketRiskEngine()
|
| 21 |
+
|
| 22 |
+
# Get current risk score
|
| 23 |
+
risk_score = await engine.assess_current_risk()
|
| 24 |
+
|
| 25 |
+
# Display results
|
| 26 |
+
print(f"Global Risk Score: {risk_score.risk_score:.1f}/100")
|
| 27 |
+
print(f"Risk Regime: {risk_score.regime.value.upper()}")
|
| 28 |
+
|
| 29 |
+
# Act based on regime
|
| 30 |
+
if risk_score.regime == RiskRegime.RED:
|
| 31 |
+
print("⚠️ HIGH RISK: Consider defensive positioning")
|
| 32 |
+
elif risk_score.regime == RiskRegime.YELLOW:
|
| 33 |
+
print("⚡ MODERATE RISK: Monitor closely")
|
| 34 |
+
else:
|
| 35 |
+
print("✅ LOW RISK: Favorable environment for risk assets")
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
async def example_2_category_breakdown():
|
| 39 |
+
"""Example 2: Detailed category breakdown."""
|
| 40 |
+
print("\n=== Example 2: Category Breakdown ===\n")
|
| 41 |
+
|
| 42 |
+
engine = MarketRiskEngine()
|
| 43 |
+
risk_score = await engine.assess_current_risk()
|
| 44 |
+
|
| 45 |
+
print("Risk by Category:")
|
| 46 |
+
for category, score in risk_score.category_scores.items():
|
| 47 |
+
cat_name = category.value.replace('_', ' ').title()
|
| 48 |
+
print(f" {cat_name:20s} {score.score:5.1f}/100")
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
async def example_3_custom_config():
|
| 52 |
+
"""Example 3: Custom configuration."""
|
| 53 |
+
print("\n=== Example 3: Custom Configuration ===\n")
|
| 54 |
+
|
| 55 |
+
# Create custom config
|
| 56 |
+
config = RiskEngineConfig(
|
| 57 |
+
lookback_days=252 # 1 year instead of default 2 years
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
engine = MarketRiskEngine(config=config)
|
| 61 |
+
risk_score = await engine.assess_current_risk()
|
| 62 |
+
|
| 63 |
+
print(f"Risk Score (1-year lookback): {risk_score.risk_score:.1f}/100")
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
async def example_4_full_report():
|
| 67 |
+
"""Example 4: Generate full report."""
|
| 68 |
+
print("\n=== Example 4: Full Report ===\n")
|
| 69 |
+
|
| 70 |
+
engine = MarketRiskEngine()
|
| 71 |
+
|
| 72 |
+
# Generate comprehensive report
|
| 73 |
+
report = await engine.get_full_report()
|
| 74 |
+
|
| 75 |
+
print(report)
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
async def example_5_top_risks():
|
| 79 |
+
"""Example 5: Identify top risk categories."""
|
| 80 |
+
print("\n=== Example 5: Top Risk Categories ===\n")
|
| 81 |
+
|
| 82 |
+
engine = MarketRiskEngine()
|
| 83 |
+
risk_score = await engine.assess_current_risk()
|
| 84 |
+
|
| 85 |
+
# Get top 3 most stressed categories
|
| 86 |
+
top_risks = engine.aggregator.get_top_risks(risk_score, n=3)
|
| 87 |
+
|
| 88 |
+
print("Top 3 Risk Areas:")
|
| 89 |
+
for i, cat_score in enumerate(top_risks, 1):
|
| 90 |
+
cat_name = cat_score.category.value.replace('_', ' ').title()
|
| 91 |
+
print(f"\n{i}. {cat_name}")
|
| 92 |
+
print(f" Score: {cat_score.score:.1f}/100")
|
| 93 |
+
print(f" {cat_score.description}")
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
async def example_6_programmatic_usage():
|
| 97 |
+
"""Example 6: Programmatic decision making."""
|
| 98 |
+
print("\n=== Example 6: Programmatic Usage ===\n")
|
| 99 |
+
|
| 100 |
+
engine = MarketRiskEngine()
|
| 101 |
+
risk_score = await engine.assess_current_risk()
|
| 102 |
+
|
| 103 |
+
# Make trading decisions based on risk
|
| 104 |
+
if risk_score.risk_score > 70:
|
| 105 |
+
print("🔴 Risk Score > 70: Reduce exposure")
|
| 106 |
+
print(" Action: Move to defensive positions (TLT, GLD, cash)")
|
| 107 |
+
|
| 108 |
+
elif risk_score.risk_score > 50:
|
| 109 |
+
print("⚡ Risk Score 50-70: Cautious positioning")
|
| 110 |
+
print(" Action: Reduce leverage, tighten stops")
|
| 111 |
+
|
| 112 |
+
else:
|
| 113 |
+
print("✅ Risk Score < 50: Favorable for risk assets")
|
| 114 |
+
print(" Action: Normal positioning, consider opportunities")
|
| 115 |
+
|
| 116 |
+
# Check specific categories
|
| 117 |
+
credit_score = risk_score.category_scores.get(
|
| 118 |
+
next(c for c in risk_score.category_scores.keys()
|
| 119 |
+
if c.value == "credit")
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
if credit_score and credit_score.score > 65:
|
| 123 |
+
print("\n⚠️ Credit stress detected:")
|
| 124 |
+
print(" Consider: Reduce HY exposure, focus on quality")
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
async def main():
|
| 128 |
+
"""Run all examples."""
|
| 129 |
+
examples = [
|
| 130 |
+
example_1_basic_usage,
|
| 131 |
+
example_2_category_breakdown,
|
| 132 |
+
example_3_custom_config,
|
| 133 |
+
example_4_full_report,
|
| 134 |
+
example_5_top_risks,
|
| 135 |
+
example_6_programmatic_usage,
|
| 136 |
+
]
|
| 137 |
+
|
| 138 |
+
for example in examples:
|
| 139 |
+
try:
|
| 140 |
+
await example()
|
| 141 |
+
except Exception as e:
|
| 142 |
+
print(f"\n✗ Example failed: {e}")
|
| 143 |
+
import traceback
|
| 144 |
+
traceback.print_exc()
|
| 145 |
+
|
| 146 |
+
print("\n" + "=" * 70 + "\n")
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
if __name__ == "__main__":
|
| 150 |
+
# Run single example
|
| 151 |
+
# asyncio.run(example_1_basic_usage())
|
| 152 |
+
|
| 153 |
+
# Or run all examples
|
| 154 |
+
asyncio.run(main())
|
requirements.txt
CHANGED
|
@@ -6,6 +6,7 @@ pandas>=1.5.0,<3.0.0
|
|
| 6 |
openpyxl>=3.0.0
|
| 7 |
|
| 8 |
yfinance>=0.2.18
|
|
|
|
| 9 |
finnhub-python>=2.4.0
|
| 10 |
|
| 11 |
fastapi>=0.100.0
|
|
|
|
| 6 |
openpyxl>=3.0.0
|
| 7 |
|
| 8 |
yfinance>=0.2.18
|
| 9 |
+
pandas-datareader>=0.10.0
|
| 10 |
finnhub-python>=2.4.0
|
| 11 |
|
| 12 |
fastapi>=0.100.0
|
src/core/macroeconomics/markets/risk_engine/__init__.py
CHANGED
|
@@ -50,6 +50,9 @@ from src.core.macroeconomics.markets.risk_engine.data_sources import (
|
|
| 50 |
YAHOO_SYMBOLS,
|
| 51 |
)
|
| 52 |
|
|
|
|
|
|
|
|
|
|
| 53 |
__all__ = [
|
| 54 |
# Core Models
|
| 55 |
"RiskRegime",
|
|
@@ -65,6 +68,9 @@ __all__ = [
|
|
| 65 |
"YahooSource",
|
| 66 |
"FRED_SYMBOLS",
|
| 67 |
"YAHOO_SYMBOLS",
|
|
|
|
|
|
|
|
|
|
| 68 |
]
|
| 69 |
|
| 70 |
__version__ = "0.1.0"
|
|
|
|
| 50 |
YAHOO_SYMBOLS,
|
| 51 |
)
|
| 52 |
|
| 53 |
+
from src.core.macroeconomics.markets.risk_engine.aggregator import RiskAggregator
|
| 54 |
+
from src.core.macroeconomics.markets.risk_engine.engine import MarketRiskEngine
|
| 55 |
+
|
| 56 |
__all__ = [
|
| 57 |
# Core Models
|
| 58 |
"RiskRegime",
|
|
|
|
| 68 |
"YahooSource",
|
| 69 |
"FRED_SYMBOLS",
|
| 70 |
"YAHOO_SYMBOLS",
|
| 71 |
+
# Engine
|
| 72 |
+
"MarketRiskEngine",
|
| 73 |
+
"RiskAggregator",
|
| 74 |
]
|
| 75 |
|
| 76 |
__version__ = "0.1.0"
|
src/core/macroeconomics/markets/risk_engine/aggregator.py
ADDED
|
@@ -0,0 +1,349 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Aggregation and scoring logic for risk engine.
|
| 3 |
+
|
| 4 |
+
Combines individual indicators into category scores and global risk score.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from typing import Dict, List, Optional
|
| 8 |
+
from collections import defaultdict
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
from src.core.macroeconomics.markets.risk_engine.models import (
|
| 12 |
+
IndicatorSignal,
|
| 13 |
+
IndicatorCategory,
|
| 14 |
+
CategoryScore,
|
| 15 |
+
GlobalRiskScore,
|
| 16 |
+
RiskRegime,
|
| 17 |
+
)
|
| 18 |
+
from src.telegram_bot.logger import main_logger as logger
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class RiskAggregator:
|
| 22 |
+
"""
|
| 23 |
+
Aggregates individual indicators into category and global scores.
|
| 24 |
+
|
| 25 |
+
Architecture:
|
| 26 |
+
1. Group indicators by category
|
| 27 |
+
2. Calculate category-level stress scores
|
| 28 |
+
3. Combine categories into global risk score
|
| 29 |
+
4. Map to risk regime (GREEN/YELLOW/RED)
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
# Category weights for global score
|
| 33 |
+
# Based on systemic importance
|
| 34 |
+
CATEGORY_WEIGHTS = {
|
| 35 |
+
IndicatorCategory.VOLATILITY: 0.20, # 20% - Market fear gauge
|
| 36 |
+
IndicatorCategory.CREDIT: 0.18, # 18% - Credit markets critical
|
| 37 |
+
IndicatorCategory.LIQUIDITY: 0.15, # 15% - Liquidity crises = systemic
|
| 38 |
+
IndicatorCategory.MACRO_RATES: 0.15, # 15% - Macro regime shifts
|
| 39 |
+
IndicatorCategory.FX_FUNDING: 0.12, # 12% - Dollar funding stress
|
| 40 |
+
IndicatorCategory.BREADTH: 0.10, # 10% - Market internals
|
| 41 |
+
IndicatorCategory.TAIL_RISK: 0.10, # 10% - Tail events
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
# Risk regime thresholds
|
| 45 |
+
REGIME_THRESHOLDS = {
|
| 46 |
+
"green_yellow": 40, # Below 40 = GREEN (Risk-On)
|
| 47 |
+
"yellow_red": 65, # Above 65 = RED (Deleveraging Risk)
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
def __init__(self):
|
| 51 |
+
"""Initialize aggregator."""
|
| 52 |
+
# Validate weights sum to 1
|
| 53 |
+
total_weight = sum(self.CATEGORY_WEIGHTS.values())
|
| 54 |
+
if not np.isclose(total_weight, 1.0):
|
| 55 |
+
logger.warning(
|
| 56 |
+
f"Category weights sum to {total_weight}, not 1.0. "
|
| 57 |
+
"Normalizing..."
|
| 58 |
+
)
|
| 59 |
+
# Normalize
|
| 60 |
+
factor = 1.0 / total_weight
|
| 61 |
+
self.CATEGORY_WEIGHTS = {
|
| 62 |
+
k: v * factor for k, v in self.CATEGORY_WEIGHTS.items()
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
def aggregate(
|
| 66 |
+
self,
|
| 67 |
+
signals: List[IndicatorSignal]
|
| 68 |
+
) -> GlobalRiskScore:
|
| 69 |
+
"""
|
| 70 |
+
Aggregate indicators into global risk score.
|
| 71 |
+
|
| 72 |
+
Args:
|
| 73 |
+
signals: List of indicator signals
|
| 74 |
+
|
| 75 |
+
Returns:
|
| 76 |
+
GlobalRiskScore with regime and breakdown
|
| 77 |
+
"""
|
| 78 |
+
if not signals:
|
| 79 |
+
logger.warning("No signals provided for aggregation")
|
| 80 |
+
return self._create_default_score()
|
| 81 |
+
|
| 82 |
+
# Group signals by category
|
| 83 |
+
category_signals = self._group_by_category(signals)
|
| 84 |
+
|
| 85 |
+
# Calculate category scores
|
| 86 |
+
category_scores = {}
|
| 87 |
+
for category, cat_signals in category_signals.items():
|
| 88 |
+
category_scores[category] = self._calculate_category_score(
|
| 89 |
+
category,
|
| 90 |
+
cat_signals
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
# Calculate global risk score
|
| 94 |
+
global_score = self._calculate_global_score(category_scores)
|
| 95 |
+
|
| 96 |
+
# Determine risk regime
|
| 97 |
+
regime = self._determine_regime(global_score)
|
| 98 |
+
|
| 99 |
+
return GlobalRiskScore(
|
| 100 |
+
risk_score=global_score,
|
| 101 |
+
regime=regime,
|
| 102 |
+
category_scores=category_scores
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
def _group_by_category(
|
| 106 |
+
self,
|
| 107 |
+
signals: List[IndicatorSignal]
|
| 108 |
+
) -> Dict[IndicatorCategory, List[IndicatorSignal]]:
|
| 109 |
+
"""Group signals by category."""
|
| 110 |
+
grouped = defaultdict(list)
|
| 111 |
+
for signal in signals:
|
| 112 |
+
grouped[signal.category].append(signal)
|
| 113 |
+
return dict(grouped)
|
| 114 |
+
|
| 115 |
+
def _calculate_category_score(
|
| 116 |
+
self,
|
| 117 |
+
category: IndicatorCategory,
|
| 118 |
+
signals: List[IndicatorSignal]
|
| 119 |
+
) -> CategoryScore:
|
| 120 |
+
"""
|
| 121 |
+
Calculate category-level score.
|
| 122 |
+
|
| 123 |
+
Uses equal-weighted average of indicator stress levels.
|
| 124 |
+
Tracks contributing indicators and their signals.
|
| 125 |
+
|
| 126 |
+
Args:
|
| 127 |
+
category: Indicator category
|
| 128 |
+
signals: Signals in this category
|
| 129 |
+
|
| 130 |
+
Returns:
|
| 131 |
+
CategoryScore
|
| 132 |
+
"""
|
| 133 |
+
if not signals:
|
| 134 |
+
return CategoryScore(
|
| 135 |
+
category=category,
|
| 136 |
+
score=50.0, # Neutral
|
| 137 |
+
active_indicators=0,
|
| 138 |
+
description="No data available"
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
# Equal-weighted average of stress levels
|
| 142 |
+
stress_levels = [s.stress_level for s in signals]
|
| 143 |
+
avg_stress = np.mean(stress_levels)
|
| 144 |
+
|
| 145 |
+
# Find most stressed indicator for description
|
| 146 |
+
max_signal = max(signals, key=lambda s: s.stress_level)
|
| 147 |
+
|
| 148 |
+
# Count active (non-neutral) indicators
|
| 149 |
+
active_count = sum(1 for s in signals if s.stress_level > 55 or s.stress_level < 45)
|
| 150 |
+
|
| 151 |
+
# Create description highlighting key signals
|
| 152 |
+
if avg_stress > 70:
|
| 153 |
+
status = "🔴 SEVERE"
|
| 154 |
+
elif avg_stress > 55:
|
| 155 |
+
status = "⚠️ ELEVATED"
|
| 156 |
+
elif avg_stress < 40:
|
| 157 |
+
status = "✅ CALM"
|
| 158 |
+
else:
|
| 159 |
+
status = "⚡ WATCH"
|
| 160 |
+
|
| 161 |
+
description = (
|
| 162 |
+
f"{status}: {category.value.replace('_', ' ').title()} "
|
| 163 |
+
f"({len(signals)} indicators, {active_count} active)"
|
| 164 |
+
)
|
| 165 |
+
|
| 166 |
+
return CategoryScore(
|
| 167 |
+
category=category,
|
| 168 |
+
score=avg_stress,
|
| 169 |
+
active_indicators=len(signals),
|
| 170 |
+
description=description
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
def _calculate_global_score(
|
| 174 |
+
self,
|
| 175 |
+
category_scores: Dict[IndicatorCategory, CategoryScore]
|
| 176 |
+
) -> float:
|
| 177 |
+
"""
|
| 178 |
+
Calculate global risk score from category scores.
|
| 179 |
+
|
| 180 |
+
Uses weighted average based on CATEGORY_WEIGHTS.
|
| 181 |
+
Missing categories are treated as neutral (50.0).
|
| 182 |
+
|
| 183 |
+
Args:
|
| 184 |
+
category_scores: Category-level scores
|
| 185 |
+
|
| 186 |
+
Returns:
|
| 187 |
+
Global risk score (0-100)
|
| 188 |
+
"""
|
| 189 |
+
weighted_sum = 0.0
|
| 190 |
+
total_weight = 0.0
|
| 191 |
+
|
| 192 |
+
for category, weight in self.CATEGORY_WEIGHTS.items():
|
| 193 |
+
if category in category_scores:
|
| 194 |
+
score = category_scores[category].score
|
| 195 |
+
weighted_sum += score * weight
|
| 196 |
+
total_weight += weight
|
| 197 |
+
else:
|
| 198 |
+
# Missing category: use neutral score
|
| 199 |
+
weighted_sum += 50.0 * weight
|
| 200 |
+
total_weight += weight
|
| 201 |
+
|
| 202 |
+
if total_weight == 0:
|
| 203 |
+
return 50.0 # Neutral
|
| 204 |
+
|
| 205 |
+
global_score = weighted_sum / total_weight
|
| 206 |
+
return float(np.clip(global_score, 0.0, 100.0))
|
| 207 |
+
|
| 208 |
+
def _determine_regime(self, risk_score: float) -> RiskRegime:
|
| 209 |
+
"""
|
| 210 |
+
Map risk score to regime.
|
| 211 |
+
|
| 212 |
+
Thresholds:
|
| 213 |
+
- [0, 40): GREEN (Risk-On)
|
| 214 |
+
- [40, 65): YELLOW (Fragile)
|
| 215 |
+
- [65, 100]: RED (Deleveraging Risk)
|
| 216 |
+
|
| 217 |
+
Args:
|
| 218 |
+
risk_score: Global risk score (0-100)
|
| 219 |
+
|
| 220 |
+
Returns:
|
| 221 |
+
RiskRegime
|
| 222 |
+
"""
|
| 223 |
+
if risk_score < self.REGIME_THRESHOLDS["green_yellow"]:
|
| 224 |
+
return RiskRegime.GREEN
|
| 225 |
+
elif risk_score < self.REGIME_THRESHOLDS["yellow_red"]:
|
| 226 |
+
return RiskRegime.YELLOW
|
| 227 |
+
else:
|
| 228 |
+
return RiskRegime.RED
|
| 229 |
+
|
| 230 |
+
def _create_default_score(self) -> GlobalRiskScore:
|
| 231 |
+
"""Create default score when no data available."""
|
| 232 |
+
category_scores = {
|
| 233 |
+
category: CategoryScore(
|
| 234 |
+
category=category,
|
| 235 |
+
score=50.0,
|
| 236 |
+
active_indicators=0,
|
| 237 |
+
description="No data"
|
| 238 |
+
)
|
| 239 |
+
for category in IndicatorCategory
|
| 240 |
+
}
|
| 241 |
+
|
| 242 |
+
return GlobalRiskScore(
|
| 243 |
+
risk_score=50.0,
|
| 244 |
+
regime=RiskRegime.YELLOW,
|
| 245 |
+
category_scores=category_scores
|
| 246 |
+
)
|
| 247 |
+
|
| 248 |
+
def get_top_risks(
|
| 249 |
+
self,
|
| 250 |
+
global_score: GlobalRiskScore,
|
| 251 |
+
n: int = 5
|
| 252 |
+
) -> List[CategoryScore]:
|
| 253 |
+
"""
|
| 254 |
+
Get top N most stressed categories.
|
| 255 |
+
|
| 256 |
+
Args:
|
| 257 |
+
global_score: Global risk score
|
| 258 |
+
n: Number of top risks to return
|
| 259 |
+
|
| 260 |
+
Returns:
|
| 261 |
+
List of category scores, sorted by stress level
|
| 262 |
+
"""
|
| 263 |
+
sorted_categories = sorted(
|
| 264 |
+
global_score.category_scores.values(),
|
| 265 |
+
key=lambda c: c.score,
|
| 266 |
+
reverse=True
|
| 267 |
+
)
|
| 268 |
+
return sorted_categories[:n]
|
| 269 |
+
|
| 270 |
+
def get_regime_interpretation(self, regime: RiskRegime) -> str:
|
| 271 |
+
"""
|
| 272 |
+
Get human-readable interpretation of regime.
|
| 273 |
+
|
| 274 |
+
Args:
|
| 275 |
+
regime: Risk regime
|
| 276 |
+
|
| 277 |
+
Returns:
|
| 278 |
+
Interpretation string
|
| 279 |
+
"""
|
| 280 |
+
interpretations = {
|
| 281 |
+
RiskRegime.GREEN: (
|
| 282 |
+
"✅ RISK-ON: Markets healthy, favorable environment for risk assets. "
|
| 283 |
+
"Low volatility, tight credit spreads, strong liquidity."
|
| 284 |
+
),
|
| 285 |
+
RiskRegime.YELLOW: (
|
| 286 |
+
"⚡ FRAGILE: Markets showing stress signals but not yet critical. "
|
| 287 |
+
"Elevated volatility, widening spreads, or deteriorating breadth. "
|
| 288 |
+
"Monitor closely for regime shift."
|
| 289 |
+
),
|
| 290 |
+
RiskRegime.RED: (
|
| 291 |
+
"🔴 DELEVERAGING RISK: Severe market stress detected. "
|
| 292 |
+
"Multiple warning signals flashing. High probability of forced selling, "
|
| 293 |
+
"liquidity issues, or systemic stress. Risk-off positioning recommended."
|
| 294 |
+
)
|
| 295 |
+
}
|
| 296 |
+
return interpretations.get(regime, "Unknown regime")
|
| 297 |
+
|
| 298 |
+
def format_summary(self, global_score: GlobalRiskScore) -> str:
|
| 299 |
+
"""
|
| 300 |
+
Format global score as text summary.
|
| 301 |
+
|
| 302 |
+
Args:
|
| 303 |
+
global_score: Global risk score
|
| 304 |
+
|
| 305 |
+
Returns:
|
| 306 |
+
Formatted summary string
|
| 307 |
+
"""
|
| 308 |
+
lines = []
|
| 309 |
+
lines.append("=" * 50)
|
| 310 |
+
lines.append("MARKET RISK ENGINE - GLOBAL ASSESSMENT")
|
| 311 |
+
lines.append("=" * 50)
|
| 312 |
+
lines.append("")
|
| 313 |
+
|
| 314 |
+
# Global score
|
| 315 |
+
regime_emoji = {
|
| 316 |
+
RiskRegime.GREEN: "✅",
|
| 317 |
+
RiskRegime.YELLOW: "⚡",
|
| 318 |
+
RiskRegime.RED: "🔴"
|
| 319 |
+
}
|
| 320 |
+
lines.append(
|
| 321 |
+
f"Global Risk Score: {global_score.risk_score:.1f}/100 "
|
| 322 |
+
f"{regime_emoji[global_score.regime]} {global_score.regime.value.upper()}"
|
| 323 |
+
)
|
| 324 |
+
lines.append("")
|
| 325 |
+
lines.append(self.get_regime_interpretation(global_score.regime))
|
| 326 |
+
lines.append("")
|
| 327 |
+
|
| 328 |
+
# Category breakdown
|
| 329 |
+
lines.append("Category Breakdown:")
|
| 330 |
+
lines.append("-" * 50)
|
| 331 |
+
|
| 332 |
+
# Sort categories by score (descending)
|
| 333 |
+
sorted_cats = sorted(
|
| 334 |
+
global_score.category_scores.items(),
|
| 335 |
+
key=lambda x: x[1].score,
|
| 336 |
+
reverse=True
|
| 337 |
+
)
|
| 338 |
+
|
| 339 |
+
for category, score in sorted_cats:
|
| 340 |
+
cat_name = category.value.replace('_', ' ').title()
|
| 341 |
+
weight = self.CATEGORY_WEIGHTS[category] * 100
|
| 342 |
+
lines.append(
|
| 343 |
+
f" {cat_name:20s} {score.score:5.1f}/100 "
|
| 344 |
+
f"(weight: {weight:4.1f}%) - {score.description}"
|
| 345 |
+
)
|
| 346 |
+
|
| 347 |
+
lines.append("=" * 50)
|
| 348 |
+
|
| 349 |
+
return "\n".join(lines)
|
src/core/macroeconomics/markets/risk_engine/engine.py
ADDED
|
@@ -0,0 +1,373 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Main Market Risk Engine.
|
| 3 |
+
|
| 4 |
+
Orchestrates data loading, indicator calculation, and risk scoring.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import asyncio
|
| 8 |
+
from typing import Dict, List, Optional
|
| 9 |
+
from datetime import datetime, timedelta
|
| 10 |
+
|
| 11 |
+
from src.core.macroeconomics.markets.risk_engine.models import (
|
| 12 |
+
TimeSeriesData,
|
| 13 |
+
GlobalRiskScore,
|
| 14 |
+
IndicatorSignal,
|
| 15 |
+
RiskEngineConfig,
|
| 16 |
+
)
|
| 17 |
+
from src.core.macroeconomics.markets.risk_engine.data_sources import (
|
| 18 |
+
FREDSource,
|
| 19 |
+
YahooSource,
|
| 20 |
+
)
|
| 21 |
+
from src.core.macroeconomics.markets.risk_engine.indicators import *
|
| 22 |
+
from src.core.macroeconomics.markets.risk_engine.aggregator import RiskAggregator
|
| 23 |
+
from src.telegram_bot.logger import main_logger as logger
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class MarketRiskEngine:
|
| 27 |
+
"""
|
| 28 |
+
Professional market risk engine for turbulence detection.
|
| 29 |
+
|
| 30 |
+
Architecture:
|
| 31 |
+
1. Data Sources → Fetch macro/market data
|
| 32 |
+
2. Indicators → Calculate stress signals
|
| 33 |
+
3. Aggregation → Combine into category scores
|
| 34 |
+
4. Risk Score → Global regime (GREEN/YELLOW/RED)
|
| 35 |
+
|
| 36 |
+
Example:
|
| 37 |
+
engine = MarketRiskEngine()
|
| 38 |
+
risk_score = await engine.assess_current_risk()
|
| 39 |
+
print(f"Risk: {risk_score.risk_score}/100")
|
| 40 |
+
print(f"Regime: {risk_score.regime.value}")
|
| 41 |
+
"""
|
| 42 |
+
|
| 43 |
+
def __init__(self, config: Optional[RiskEngineConfig] = None):
|
| 44 |
+
"""
|
| 45 |
+
Initialize risk engine.
|
| 46 |
+
|
| 47 |
+
Args:
|
| 48 |
+
config: Engine configuration (optional)
|
| 49 |
+
"""
|
| 50 |
+
self.config = config or RiskEngineConfig()
|
| 51 |
+
|
| 52 |
+
# Initialize data sources
|
| 53 |
+
self.fred_source = FREDSource()
|
| 54 |
+
self.yahoo_source = YahooSource()
|
| 55 |
+
|
| 56 |
+
# Initialize aggregator
|
| 57 |
+
self.aggregator = RiskAggregator()
|
| 58 |
+
|
| 59 |
+
# Initialize all indicators
|
| 60 |
+
self.indicators = self._initialize_indicators()
|
| 61 |
+
|
| 62 |
+
logger.info(
|
| 63 |
+
f"MarketRiskEngine initialized with {len(self.indicators)} indicators"
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
def _initialize_indicators(self) -> List:
|
| 67 |
+
"""Initialize all indicator instances."""
|
| 68 |
+
return [
|
| 69 |
+
# Volatility (4)
|
| 70 |
+
VIXStressIndicator(),
|
| 71 |
+
VIXTermStructureIndicator(),
|
| 72 |
+
VVIXIndicator(),
|
| 73 |
+
RealizedVolatilityIndicator(),
|
| 74 |
+
|
| 75 |
+
# Credit (4)
|
| 76 |
+
CreditSpreadWideningIndicator(),
|
| 77 |
+
IGvsHYDivergenceIndicator(),
|
| 78 |
+
CreditETFPressureIndicator(),
|
| 79 |
+
CorporateBondStressIndicator(),
|
| 80 |
+
|
| 81 |
+
# Liquidity (4)
|
| 82 |
+
AmihudIlliquidityIndicator(),
|
| 83 |
+
BidAskSpreadIndicator(),
|
| 84 |
+
TreasuryLiquidityIndicator(),
|
| 85 |
+
MarketDepthIndicator(),
|
| 86 |
+
|
| 87 |
+
# Macro/Rates (5)
|
| 88 |
+
YieldCurveIndicator(),
|
| 89 |
+
NFCIIndicator(),
|
| 90 |
+
RepoStressIndicator(),
|
| 91 |
+
MOVEIndexIndicator(),
|
| 92 |
+
RealRatesIndicator(),
|
| 93 |
+
|
| 94 |
+
# FX/Funding (4)
|
| 95 |
+
DXYSpikeIndicator(),
|
| 96 |
+
JPYCarryUnwindIndicator(),
|
| 97 |
+
CrossCurrencyBasisIndicator(),
|
| 98 |
+
EmergingMarketStressIndicator(),
|
| 99 |
+
|
| 100 |
+
# Breadth (4)
|
| 101 |
+
SPYQQQDivergenceIndicator(),
|
| 102 |
+
SectorRotationIndicator(),
|
| 103 |
+
MarketConcentrationIndicator(),
|
| 104 |
+
SmallCapWeaknessIndicator(),
|
| 105 |
+
|
| 106 |
+
# Tail Risk (5)
|
| 107 |
+
CorrelationSpikeIndicator(),
|
| 108 |
+
VolatilityRegimeIndicator(),
|
| 109 |
+
DrawdownIndicator(),
|
| 110 |
+
SkewIndicator(),
|
| 111 |
+
TailEventIndicator(),
|
| 112 |
+
]
|
| 113 |
+
|
| 114 |
+
async def assess_current_risk(
|
| 115 |
+
self,
|
| 116 |
+
lookback_days: Optional[int] = None
|
| 117 |
+
) -> GlobalRiskScore:
|
| 118 |
+
"""
|
| 119 |
+
Assess current market risk.
|
| 120 |
+
|
| 121 |
+
Main entry point for risk assessment.
|
| 122 |
+
|
| 123 |
+
Args:
|
| 124 |
+
lookback_days: Days of historical data to fetch (default from config)
|
| 125 |
+
|
| 126 |
+
Returns:
|
| 127 |
+
GlobalRiskScore with regime and breakdown
|
| 128 |
+
"""
|
| 129 |
+
lookback = lookback_days or self.config.lookback_days
|
| 130 |
+
|
| 131 |
+
logger.info(f"Starting risk assessment (lookback: {lookback} days)")
|
| 132 |
+
|
| 133 |
+
# Step 1: Load data
|
| 134 |
+
data = await self._load_data(lookback)
|
| 135 |
+
|
| 136 |
+
if not data:
|
| 137 |
+
logger.error("Failed to load any data")
|
| 138 |
+
return self.aggregator._create_default_score()
|
| 139 |
+
|
| 140 |
+
logger.info(f"Loaded data for {len(data)} symbols")
|
| 141 |
+
|
| 142 |
+
# Step 2: Calculate indicators
|
| 143 |
+
signals = self._calculate_indicators(data)
|
| 144 |
+
|
| 145 |
+
if not signals:
|
| 146 |
+
logger.warning("No indicators calculated successfully")
|
| 147 |
+
return self.aggregator._create_default_score()
|
| 148 |
+
|
| 149 |
+
logger.info(
|
| 150 |
+
f"Calculated {len(signals)}/{len(self.indicators)} indicators successfully"
|
| 151 |
+
)
|
| 152 |
+
|
| 153 |
+
# Step 3: Aggregate into risk score
|
| 154 |
+
risk_score = self.aggregator.aggregate(signals)
|
| 155 |
+
|
| 156 |
+
logger.info(
|
| 157 |
+
f"Risk assessment complete: {risk_score.risk_score:.1f}/100 "
|
| 158 |
+
f"({risk_score.regime.value})"
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
return risk_score
|
| 162 |
+
|
| 163 |
+
async def _load_data(
|
| 164 |
+
self,
|
| 165 |
+
lookback_days: int
|
| 166 |
+
) -> Dict[str, TimeSeriesData]:
|
| 167 |
+
"""
|
| 168 |
+
Load data from all sources.
|
| 169 |
+
|
| 170 |
+
Args:
|
| 171 |
+
lookback_days: Days of historical data
|
| 172 |
+
|
| 173 |
+
Returns:
|
| 174 |
+
Dictionary of symbol -> TimeSeriesData
|
| 175 |
+
"""
|
| 176 |
+
# Calculate date range
|
| 177 |
+
end_date = datetime.now()
|
| 178 |
+
start_date = end_date - timedelta(days=lookback_days)
|
| 179 |
+
|
| 180 |
+
logger.info(
|
| 181 |
+
f"Loading data from {start_date.date()} to {end_date.date()}"
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
# Load from both sources in parallel
|
| 185 |
+
fred_task = self.fred_source.fetch_multiple(start_date, end_date)
|
| 186 |
+
yahoo_task = self.yahoo_source.fetch_multiple(start_date, end_date)
|
| 187 |
+
|
| 188 |
+
fred_data, yahoo_data = await asyncio.gather(
|
| 189 |
+
fred_task,
|
| 190 |
+
yahoo_task,
|
| 191 |
+
return_exceptions=True
|
| 192 |
+
)
|
| 193 |
+
|
| 194 |
+
# Combine data
|
| 195 |
+
combined = {}
|
| 196 |
+
|
| 197 |
+
if isinstance(fred_data, dict):
|
| 198 |
+
combined.update(fred_data)
|
| 199 |
+
logger.info(f"Loaded {len(fred_data)} FRED symbols")
|
| 200 |
+
else:
|
| 201 |
+
logger.error(f"FRED data loading failed: {fred_data}")
|
| 202 |
+
|
| 203 |
+
if isinstance(yahoo_data, dict):
|
| 204 |
+
combined.update(yahoo_data)
|
| 205 |
+
logger.info(f"Loaded {len(yahoo_data)} Yahoo symbols")
|
| 206 |
+
else:
|
| 207 |
+
logger.error(f"Yahoo data loading failed: {yahoo_data}")
|
| 208 |
+
|
| 209 |
+
return combined
|
| 210 |
+
|
| 211 |
+
def _calculate_indicators(
|
| 212 |
+
self,
|
| 213 |
+
data: Dict[str, TimeSeriesData]
|
| 214 |
+
) -> List[IndicatorSignal]:
|
| 215 |
+
"""
|
| 216 |
+
Calculate all indicators.
|
| 217 |
+
|
| 218 |
+
Args:
|
| 219 |
+
data: Market/macro data
|
| 220 |
+
|
| 221 |
+
Returns:
|
| 222 |
+
List of indicator signals
|
| 223 |
+
"""
|
| 224 |
+
signals = []
|
| 225 |
+
|
| 226 |
+
for indicator in self.indicators:
|
| 227 |
+
try:
|
| 228 |
+
signal = indicator.compute(data)
|
| 229 |
+
if signal is not None:
|
| 230 |
+
signals.append(signal)
|
| 231 |
+
else:
|
| 232 |
+
logger.debug(
|
| 233 |
+
f"{indicator.name}: No signal (likely missing data)"
|
| 234 |
+
)
|
| 235 |
+
except Exception as e:
|
| 236 |
+
logger.error(
|
| 237 |
+
f"{indicator.name}: Calculation failed: {e}",
|
| 238 |
+
exc_info=True
|
| 239 |
+
)
|
| 240 |
+
|
| 241 |
+
return signals
|
| 242 |
+
|
| 243 |
+
def get_indicator_details(
|
| 244 |
+
self,
|
| 245 |
+
signals: List[IndicatorSignal]
|
| 246 |
+
) -> str:
|
| 247 |
+
"""
|
| 248 |
+
Format detailed indicator breakdown.
|
| 249 |
+
|
| 250 |
+
Args:
|
| 251 |
+
signals: List of indicator signals
|
| 252 |
+
|
| 253 |
+
Returns:
|
| 254 |
+
Formatted string with all indicator details
|
| 255 |
+
"""
|
| 256 |
+
lines = []
|
| 257 |
+
lines.append("=" * 60)
|
| 258 |
+
lines.append("INDICATOR DETAILS")
|
| 259 |
+
lines.append("=" * 60)
|
| 260 |
+
lines.append("")
|
| 261 |
+
|
| 262 |
+
# Group by category
|
| 263 |
+
from collections import defaultdict
|
| 264 |
+
by_category = defaultdict(list)
|
| 265 |
+
for signal in signals:
|
| 266 |
+
by_category[signal.category].append(signal)
|
| 267 |
+
|
| 268 |
+
# Sort categories
|
| 269 |
+
sorted_cats = sorted(by_category.items(), key=lambda x: x[0].value)
|
| 270 |
+
|
| 271 |
+
for category, cat_signals in sorted_cats:
|
| 272 |
+
lines.append(f"\n### {category.value.replace('_', ' ').upper()} ###")
|
| 273 |
+
lines.append("-" * 60)
|
| 274 |
+
|
| 275 |
+
# Sort signals by stress level (descending)
|
| 276 |
+
sorted_signals = sorted(
|
| 277 |
+
cat_signals,
|
| 278 |
+
key=lambda s: s.stress_level,
|
| 279 |
+
reverse=True
|
| 280 |
+
)
|
| 281 |
+
|
| 282 |
+
for signal in sorted_signals:
|
| 283 |
+
stress_bar = self._create_stress_bar(signal.stress_level)
|
| 284 |
+
lines.append(f"\n{signal.indicator_name}")
|
| 285 |
+
lines.append(f" Stress: {signal.stress_level:.1f}/100 {stress_bar}")
|
| 286 |
+
lines.append(f" Value: {signal.description}")
|
| 287 |
+
lines.append(f" {signal.interpretation}")
|
| 288 |
+
|
| 289 |
+
lines.append("\n" + "=" * 60)
|
| 290 |
+
|
| 291 |
+
return "\n".join(lines)
|
| 292 |
+
|
| 293 |
+
def _create_stress_bar(self, stress_level: float) -> str:
|
| 294 |
+
"""Create visual stress level bar."""
|
| 295 |
+
# 10 blocks, each = 10%
|
| 296 |
+
filled = int(stress_level / 10)
|
| 297 |
+
empty = 10 - filled
|
| 298 |
+
|
| 299 |
+
if stress_level >= 70:
|
| 300 |
+
bar_char = "█"
|
| 301 |
+
color = "🔴"
|
| 302 |
+
elif stress_level >= 55:
|
| 303 |
+
bar_char = "▓"
|
| 304 |
+
color = "⚠️"
|
| 305 |
+
else:
|
| 306 |
+
bar_char = "░"
|
| 307 |
+
color = "✅"
|
| 308 |
+
|
| 309 |
+
bar = bar_char * filled + "░" * empty
|
| 310 |
+
return f"{color} [{bar}]"
|
| 311 |
+
|
| 312 |
+
async def get_full_report(
|
| 313 |
+
self,
|
| 314 |
+
lookback_days: Optional[int] = None
|
| 315 |
+
) -> str:
|
| 316 |
+
"""
|
| 317 |
+
Generate complete risk report.
|
| 318 |
+
|
| 319 |
+
Args:
|
| 320 |
+
lookback_days: Days of historical data
|
| 321 |
+
|
| 322 |
+
Returns:
|
| 323 |
+
Full formatted report
|
| 324 |
+
"""
|
| 325 |
+
# Get risk score
|
| 326 |
+
risk_score = await self.assess_current_risk(lookback_days)
|
| 327 |
+
|
| 328 |
+
# Get data for indicator details
|
| 329 |
+
lookback = lookback_days or self.config.lookback_days
|
| 330 |
+
data = await self._load_data(lookback)
|
| 331 |
+
signals = self._calculate_indicators(data)
|
| 332 |
+
|
| 333 |
+
# Build report
|
| 334 |
+
report_parts = []
|
| 335 |
+
|
| 336 |
+
# 1. Global summary
|
| 337 |
+
report_parts.append(self.aggregator.format_summary(risk_score))
|
| 338 |
+
report_parts.append("\n")
|
| 339 |
+
|
| 340 |
+
# 2. Top risks
|
| 341 |
+
report_parts.append("=" * 60)
|
| 342 |
+
report_parts.append("TOP RISKS")
|
| 343 |
+
report_parts.append("=" * 60)
|
| 344 |
+
report_parts.append("")
|
| 345 |
+
|
| 346 |
+
top_risks = self.aggregator.get_top_risks(risk_score, n=3)
|
| 347 |
+
for i, cat_score in enumerate(top_risks, 1):
|
| 348 |
+
cat_name = cat_score.category.value.replace('_', ' ').title()
|
| 349 |
+
report_parts.append(
|
| 350 |
+
f"{i}. {cat_name}: {cat_score.score:.1f}/100 - {cat_score.description}"
|
| 351 |
+
)
|
| 352 |
+
|
| 353 |
+
report_parts.append("\n")
|
| 354 |
+
|
| 355 |
+
# 3. Indicator details
|
| 356 |
+
report_parts.append(self.get_indicator_details(signals))
|
| 357 |
+
|
| 358 |
+
# 4. Timestamp
|
| 359 |
+
report_parts.append("\n")
|
| 360 |
+
report_parts.append(f"Report generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
| 361 |
+
|
| 362 |
+
return "\n".join(report_parts)
|
| 363 |
+
|
| 364 |
+
def get_config_summary(self) -> str:
|
| 365 |
+
"""Get configuration summary."""
|
| 366 |
+
lines = []
|
| 367 |
+
lines.append("Risk Engine Configuration:")
|
| 368 |
+
lines.append(f" Lookback days: {self.config.lookback_days}")
|
| 369 |
+
lines.append(f" Total indicators: {len(self.indicators)}")
|
| 370 |
+
lines.append(f" Data sources: FRED, Yahoo Finance")
|
| 371 |
+
lines.append(f" Regime thresholds: GREEN<{self.aggregator.REGIME_THRESHOLDS['green_yellow']}, "
|
| 372 |
+
f"YELLOW<{self.aggregator.REGIME_THRESHOLDS['yellow_red']}, RED")
|
| 373 |
+
return "\n".join(lines)
|
src/core/macroeconomics/markets/risk_engine/indicators/__init__.py
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Market risk indicators.
|
| 3 |
+
|
| 4 |
+
Organized by category:
|
| 5 |
+
- Volatility: VIX, VVIX, term structure, realized vol
|
| 6 |
+
- Credit: Spreads, default risk
|
| 7 |
+
- Liquidity: Bid-ask, Amihud
|
| 8 |
+
- Macro/Rates: Yield curve, NFCI, repo
|
| 9 |
+
- FX/Funding: DXY, JPY carry, basis
|
| 10 |
+
- Breadth: Advance/decline, MA divergences
|
| 11 |
+
- Tail Risk: Correlations, regime shifts
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
from src.core.macroeconomics.markets.risk_engine.indicators.base import BaseIndicator
|
| 15 |
+
from src.core.macroeconomics.markets.risk_engine.indicators.volatility import (
|
| 16 |
+
VIXStressIndicator,
|
| 17 |
+
VIXTermStructureIndicator,
|
| 18 |
+
VVIXIndicator,
|
| 19 |
+
RealizedVolatilityIndicator,
|
| 20 |
+
)
|
| 21 |
+
from src.core.macroeconomics.markets.risk_engine.indicators.credit import (
|
| 22 |
+
CreditSpreadWideningIndicator,
|
| 23 |
+
IGvsHYDivergenceIndicator,
|
| 24 |
+
CreditETFPressureIndicator,
|
| 25 |
+
CorporateBondStressIndicator,
|
| 26 |
+
)
|
| 27 |
+
from src.core.macroeconomics.markets.risk_engine.indicators.liquidity import (
|
| 28 |
+
AmihudIlliquidityIndicator,
|
| 29 |
+
BidAskSpreadIndicator,
|
| 30 |
+
TreasuryLiquidityIndicator,
|
| 31 |
+
MarketDepthIndicator,
|
| 32 |
+
)
|
| 33 |
+
from src.core.macroeconomics.markets.risk_engine.indicators.macro_rates import (
|
| 34 |
+
YieldCurveIndicator,
|
| 35 |
+
NFCIIndicator,
|
| 36 |
+
RepoStressIndicator,
|
| 37 |
+
MOVEIndexIndicator,
|
| 38 |
+
RealRatesIndicator,
|
| 39 |
+
)
|
| 40 |
+
from src.core.macroeconomics.markets.risk_engine.indicators.fx_funding import (
|
| 41 |
+
DXYSpikeIndicator,
|
| 42 |
+
JPYCarryUnwindIndicator,
|
| 43 |
+
CrossCurrencyBasisIndicator,
|
| 44 |
+
EmergingMarketStressIndicator,
|
| 45 |
+
)
|
| 46 |
+
from src.core.macroeconomics.markets.risk_engine.indicators.breadth import (
|
| 47 |
+
SPYQQQDivergenceIndicator,
|
| 48 |
+
SectorRotationIndicator,
|
| 49 |
+
MarketConcentrationIndicator,
|
| 50 |
+
SmallCapWeaknessIndicator,
|
| 51 |
+
)
|
| 52 |
+
from src.core.macroeconomics.markets.risk_engine.indicators.tail_risk import (
|
| 53 |
+
CorrelationSpikeIndicator,
|
| 54 |
+
VolatilityRegimeIndicator,
|
| 55 |
+
DrawdownIndicator,
|
| 56 |
+
SkewIndicator,
|
| 57 |
+
TailEventIndicator,
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
__all__ = [
|
| 61 |
+
"BaseIndicator",
|
| 62 |
+
# Volatility
|
| 63 |
+
"VIXStressIndicator",
|
| 64 |
+
"VIXTermStructureIndicator",
|
| 65 |
+
"VVIXIndicator",
|
| 66 |
+
"RealizedVolatilityIndicator",
|
| 67 |
+
# Credit
|
| 68 |
+
"CreditSpreadWideningIndicator",
|
| 69 |
+
"IGvsHYDivergenceIndicator",
|
| 70 |
+
"CreditETFPressureIndicator",
|
| 71 |
+
"CorporateBondStressIndicator",
|
| 72 |
+
# Liquidity
|
| 73 |
+
"AmihudIlliquidityIndicator",
|
| 74 |
+
"BidAskSpreadIndicator",
|
| 75 |
+
"TreasuryLiquidityIndicator",
|
| 76 |
+
"MarketDepthIndicator",
|
| 77 |
+
# Macro/Rates
|
| 78 |
+
"YieldCurveIndicator",
|
| 79 |
+
"NFCIIndicator",
|
| 80 |
+
"RepoStressIndicator",
|
| 81 |
+
"MOVEIndexIndicator",
|
| 82 |
+
"RealRatesIndicator",
|
| 83 |
+
# FX/Funding
|
| 84 |
+
"DXYSpikeIndicator",
|
| 85 |
+
"JPYCarryUnwindIndicator",
|
| 86 |
+
"CrossCurrencyBasisIndicator",
|
| 87 |
+
"EmergingMarketStressIndicator",
|
| 88 |
+
# Breadth
|
| 89 |
+
"SPYQQQDivergenceIndicator",
|
| 90 |
+
"SectorRotationIndicator",
|
| 91 |
+
"MarketConcentrationIndicator",
|
| 92 |
+
"SmallCapWeaknessIndicator",
|
| 93 |
+
# Tail Risk
|
| 94 |
+
"CorrelationSpikeIndicator",
|
| 95 |
+
"VolatilityRegimeIndicator",
|
| 96 |
+
"DrawdownIndicator",
|
| 97 |
+
"SkewIndicator",
|
| 98 |
+
"TailEventIndicator",
|
| 99 |
+
]
|
src/core/macroeconomics/markets/risk_engine/indicators/breadth.py
ADDED
|
@@ -0,0 +1,449 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Market breadth indicators for risk detection.
|
| 3 |
+
|
| 4 |
+
Monitors advance/decline, participation, and market internals.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from typing import Dict, Optional
|
| 8 |
+
import pandas as pd
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
from src.core.macroeconomics.markets.risk_engine.models import (
|
| 12 |
+
IndicatorSignal,
|
| 13 |
+
IndicatorCategory,
|
| 14 |
+
TimeSeriesData
|
| 15 |
+
)
|
| 16 |
+
from src.core.macroeconomics.markets.risk_engine.indicators.base import BaseIndicator
|
| 17 |
+
from src.telegram_bot.logger import main_logger as logger
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class SPYQQQDivergenceIndicator(BaseIndicator):
|
| 21 |
+
"""
|
| 22 |
+
SPY vs QQQ divergence detector.
|
| 23 |
+
|
| 24 |
+
Monitors if large-cap tech (QQQ) diverging from broad market (SPY).
|
| 25 |
+
Narrowing leadership = fragile rally.
|
| 26 |
+
|
| 27 |
+
Signal construction:
|
| 28 |
+
- Calculate SPY and QQQ 20-day returns
|
| 29 |
+
- Monitor performance spread
|
| 30 |
+
- Widening divergence = concentration risk
|
| 31 |
+
|
| 32 |
+
Required data:
|
| 33 |
+
- SPY: S&P 500 ETF
|
| 34 |
+
- QQQ: Nasdaq-100 ETF
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
def __init__(self):
|
| 38 |
+
super().__init__(
|
| 39 |
+
name="SPY-QQQ Divergence",
|
| 40 |
+
category=IndicatorCategory.BREADTH,
|
| 41 |
+
description="Large-cap tech vs broad market",
|
| 42 |
+
zscore_window=252
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
def compute(self, data: Dict[str, TimeSeriesData]) -> Optional[IndicatorSignal]:
|
| 46 |
+
"""Compute SPY-QQQ divergence."""
|
| 47 |
+
required = ["SPY", "QQQ"]
|
| 48 |
+
if not self._validate_data(data, required):
|
| 49 |
+
return None
|
| 50 |
+
|
| 51 |
+
try:
|
| 52 |
+
spy = data["SPY"].data
|
| 53 |
+
qqq = data["QQQ"].data
|
| 54 |
+
|
| 55 |
+
# Align dates
|
| 56 |
+
combined = pd.DataFrame({
|
| 57 |
+
'spy': spy,
|
| 58 |
+
'qqq': qqq
|
| 59 |
+
}).dropna()
|
| 60 |
+
|
| 61 |
+
if len(combined) < self.zscore_window:
|
| 62 |
+
logger.warning(f"{self.name}: Insufficient data")
|
| 63 |
+
return None
|
| 64 |
+
|
| 65 |
+
# Calculate 20-day returns
|
| 66 |
+
spy_ret = (combined['spy'].iloc[-1] / combined['spy'].iloc[-21] - 1) * 100
|
| 67 |
+
qqq_ret = (combined['qqq'].iloc[-1] / combined['qqq'].iloc[-21] - 1) * 100
|
| 68 |
+
|
| 69 |
+
# Performance spread (QQQ - SPY)
|
| 70 |
+
# Positive = QQQ outperforming = concentration
|
| 71 |
+
spread = qqq_ret - spy_ret
|
| 72 |
+
|
| 73 |
+
# Calculate rolling spread for z-score
|
| 74 |
+
spy_rets = combined['spy'].pct_change(20).dropna() * 100
|
| 75 |
+
qqq_rets = combined['qqq'].pct_change(20).dropna() * 100
|
| 76 |
+
spread_series = qqq_rets - spy_rets
|
| 77 |
+
|
| 78 |
+
if len(spread_series) < self.zscore_window:
|
| 79 |
+
return None
|
| 80 |
+
|
| 81 |
+
# Z-score of spread
|
| 82 |
+
spread_zscore = self.calculate_zscore(spread_series)
|
| 83 |
+
|
| 84 |
+
# Stress logic:
|
| 85 |
+
# - QQQ massively outperforming = narrow leadership = stress
|
| 86 |
+
# - QQQ massively underperforming = tech weakness = stress
|
| 87 |
+
# Use absolute z-score
|
| 88 |
+
stress_zscore = abs(spread_zscore)
|
| 89 |
+
|
| 90 |
+
stress_level = self.normalize_to_level(stress_zscore)
|
| 91 |
+
|
| 92 |
+
return IndicatorSignal(
|
| 93 |
+
indicator_name=self.name,
|
| 94 |
+
category=self.category,
|
| 95 |
+
value=spread,
|
| 96 |
+
zscore=spread_zscore,
|
| 97 |
+
stress_level=stress_level,
|
| 98 |
+
description=(
|
| 99 |
+
f"SPY 20d: {spy_ret:+.1f}%, "
|
| 100 |
+
f"QQQ 20d: {qqq_ret:+.1f}%, "
|
| 101 |
+
f"Spread: {spread:+.1f}% (z={spread_zscore:+.1f})"
|
| 102 |
+
),
|
| 103 |
+
interpretation=self._interpret(stress_level, spread, spread_zscore)
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
except Exception as e:
|
| 107 |
+
logger.error(f"{self.name} calculation failed: {e}")
|
| 108 |
+
return None
|
| 109 |
+
|
| 110 |
+
def _interpret(
|
| 111 |
+
self,
|
| 112 |
+
stress_level: float,
|
| 113 |
+
spread: float,
|
| 114 |
+
zscore: float
|
| 115 |
+
) -> str:
|
| 116 |
+
"""Generate interpretation."""
|
| 117 |
+
if stress_level > 70:
|
| 118 |
+
if zscore > 2:
|
| 119 |
+
return f"🔴 NARROW: Extreme QQQ outperformance (narrow leadership, {spread:+.1f}%)"
|
| 120 |
+
else:
|
| 121 |
+
return f"🔴 ROTATION: Extreme QQQ underperformance (tech weakness, {spread:+.1f}%)"
|
| 122 |
+
elif stress_level > 55:
|
| 123 |
+
if zscore > 0:
|
| 124 |
+
return "⚠️ CONCENTRATION: QQQ leading (narrow breadth)"
|
| 125 |
+
else:
|
| 126 |
+
return "⚠️ DIVERGENCE: QQQ lagging (sector rotation)"
|
| 127 |
+
elif stress_level < 40:
|
| 128 |
+
return "✅ HEALTHY: SPY and QQQ moving together"
|
| 129 |
+
else:
|
| 130 |
+
return "⚡ WATCH: Moderate SPY-QQQ divergence"
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
class SectorRotationIndicator(BaseIndicator):
|
| 134 |
+
"""
|
| 135 |
+
Sector rotation stress indicator.
|
| 136 |
+
|
| 137 |
+
Monitor defensive vs cyclical sector performance.
|
| 138 |
+
Flight to defensives = risk-off signal.
|
| 139 |
+
|
| 140 |
+
Use sector ETFs as proxy:
|
| 141 |
+
- XLU (Utilities) = defensive
|
| 142 |
+
- XLF (Financials) = cyclical
|
| 143 |
+
|
| 144 |
+
Signal construction:
|
| 145 |
+
- Calculate XLU/XLF relative strength ratio
|
| 146 |
+
- Rising ratio = defensive leadership = risk-off
|
| 147 |
+
|
| 148 |
+
Required data:
|
| 149 |
+
- XLU: Utilities sector ETF (defensive)
|
| 150 |
+
- XLF: Financials sector ETF (cyclical)
|
| 151 |
+
"""
|
| 152 |
+
|
| 153 |
+
def __init__(self):
|
| 154 |
+
super().__init__(
|
| 155 |
+
name="Sector Rotation",
|
| 156 |
+
category=IndicatorCategory.BREADTH,
|
| 157 |
+
description="Defensive vs cyclical sector strength",
|
| 158 |
+
zscore_window=252
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
def compute(self, data: Dict[str, TimeSeriesData]) -> Optional[IndicatorSignal]:
|
| 162 |
+
"""Compute sector rotation signal."""
|
| 163 |
+
# Check for sector ETFs (may not be in current data sources)
|
| 164 |
+
has_xlu = "XLU" in data and len(data["XLU"].data) > 0
|
| 165 |
+
has_xlf = "XLF" in data and len(data["XLF"].data) > 0
|
| 166 |
+
|
| 167 |
+
if not (has_xlu and has_xlf):
|
| 168 |
+
logger.warning(f"{self.name}: Sector ETF data not available")
|
| 169 |
+
return None
|
| 170 |
+
|
| 171 |
+
try:
|
| 172 |
+
xlu = data["XLU"].data
|
| 173 |
+
xlf = data["XLF"].data
|
| 174 |
+
|
| 175 |
+
# Align dates
|
| 176 |
+
combined = pd.DataFrame({
|
| 177 |
+
'xlu': xlu,
|
| 178 |
+
'xlf': xlf
|
| 179 |
+
}).dropna()
|
| 180 |
+
|
| 181 |
+
if len(combined) < self.zscore_window:
|
| 182 |
+
logger.warning(f"{self.name}: Insufficient data")
|
| 183 |
+
return None
|
| 184 |
+
|
| 185 |
+
# Calculate relative strength ratio (XLU/XLF)
|
| 186 |
+
# Rising = defensives outperforming = risk-off
|
| 187 |
+
ratio = combined['xlu'] / combined['xlf']
|
| 188 |
+
|
| 189 |
+
# Z-score of ratio
|
| 190 |
+
ratio_zscore = self.calculate_zscore(ratio)
|
| 191 |
+
|
| 192 |
+
# Calculate ratio acceleration
|
| 193 |
+
ratio_accel = self.calculate_acceleration(ratio, periods=10)
|
| 194 |
+
|
| 195 |
+
# Positive z-score = defensive leadership = stress
|
| 196 |
+
stress_level = self.normalize_to_level(ratio_zscore)
|
| 197 |
+
|
| 198 |
+
current_ratio = float(ratio.iloc[-1])
|
| 199 |
+
xlu_ret = (combined['xlu'].iloc[-1] / combined['xlu'].iloc[-21] - 1) * 100
|
| 200 |
+
xlf_ret = (combined['xlf'].iloc[-1] / combined['xlf'].iloc[-21] - 1) * 100
|
| 201 |
+
|
| 202 |
+
return IndicatorSignal(
|
| 203 |
+
indicator_name=self.name,
|
| 204 |
+
category=self.category,
|
| 205 |
+
value=current_ratio,
|
| 206 |
+
zscore=ratio_zscore,
|
| 207 |
+
stress_level=stress_level,
|
| 208 |
+
description=(
|
| 209 |
+
f"XLU/XLF: {current_ratio:.3f} "
|
| 210 |
+
f"(z={ratio_zscore:+.1f}), "
|
| 211 |
+
f"XLU: {xlu_ret:+.1f}%, XLF: {xlf_ret:+.1f}%"
|
| 212 |
+
),
|
| 213 |
+
interpretation=self._interpret(stress_level, ratio_zscore, xlu_ret, xlf_ret)
|
| 214 |
+
)
|
| 215 |
+
|
| 216 |
+
except Exception as e:
|
| 217 |
+
logger.error(f"{self.name} calculation failed: {e}")
|
| 218 |
+
return None
|
| 219 |
+
|
| 220 |
+
def _interpret(
|
| 221 |
+
self,
|
| 222 |
+
stress_level: float,
|
| 223 |
+
zscore: float,
|
| 224 |
+
xlu_ret: float,
|
| 225 |
+
xlf_ret: float
|
| 226 |
+
) -> str:
|
| 227 |
+
"""Generate interpretation."""
|
| 228 |
+
if stress_level > 70:
|
| 229 |
+
return "🔴 DEFENSIVE: Flight to defensive sectors (risk-off)"
|
| 230 |
+
elif stress_level > 55:
|
| 231 |
+
return "⚠️ ROTATION: Defensives outperforming (caution)"
|
| 232 |
+
elif stress_level < 40:
|
| 233 |
+
return "✅ CYCLICAL: Cyclicals leading (risk-on)"
|
| 234 |
+
else:
|
| 235 |
+
return "⚡ WATCH: Mixed sector performance"
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
class MarketConcentrationIndicator(BaseIndicator):
|
| 239 |
+
"""
|
| 240 |
+
Market concentration indicator.
|
| 241 |
+
|
| 242 |
+
Compare SPY (broad S&P 500) vs largest components.
|
| 243 |
+
Use QQQ as mega-cap proxy.
|
| 244 |
+
|
| 245 |
+
Extreme concentration = fragile market structure.
|
| 246 |
+
|
| 247 |
+
Signal construction:
|
| 248 |
+
- Calculate rolling correlation between SPY and QQQ
|
| 249 |
+
- Very high correlation = concentrated market
|
| 250 |
+
- Also monitor relative volatility
|
| 251 |
+
|
| 252 |
+
Required data:
|
| 253 |
+
- SPY: S&P 500
|
| 254 |
+
- QQQ: Nasdaq-100 (mega-cap proxy)
|
| 255 |
+
"""
|
| 256 |
+
|
| 257 |
+
def __init__(self):
|
| 258 |
+
super().__init__(
|
| 259 |
+
name="Market Concentration",
|
| 260 |
+
category=IndicatorCategory.BREADTH,
|
| 261 |
+
description="Mega-cap concentration risk",
|
| 262 |
+
zscore_window=252
|
| 263 |
+
)
|
| 264 |
+
|
| 265 |
+
def compute(self, data: Dict[str, TimeSeriesData]) -> Optional[IndicatorSignal]:
|
| 266 |
+
"""Compute market concentration."""
|
| 267 |
+
required = ["SPY", "QQQ"]
|
| 268 |
+
if not self._validate_data(data, required):
|
| 269 |
+
return None
|
| 270 |
+
|
| 271 |
+
try:
|
| 272 |
+
spy = data["SPY"].data
|
| 273 |
+
qqq = data["QQQ"].data
|
| 274 |
+
|
| 275 |
+
# Align dates
|
| 276 |
+
combined = pd.DataFrame({
|
| 277 |
+
'spy': spy,
|
| 278 |
+
'qqq': qqq
|
| 279 |
+
}).dropna()
|
| 280 |
+
|
| 281 |
+
if len(combined) < self.zscore_window + 60:
|
| 282 |
+
logger.warning(f"{self.name}: Insufficient data")
|
| 283 |
+
return None
|
| 284 |
+
|
| 285 |
+
# Calculate returns
|
| 286 |
+
spy_ret = combined['spy'].pct_change().dropna()
|
| 287 |
+
qqq_ret = combined['qqq'].pct_change().dropna()
|
| 288 |
+
|
| 289 |
+
# Calculate rolling 60-day correlation
|
| 290 |
+
correlation = spy_ret.rolling(60).corr(qqq_ret).dropna()
|
| 291 |
+
|
| 292 |
+
if len(correlation) < self.zscore_window:
|
| 293 |
+
return None
|
| 294 |
+
|
| 295 |
+
# Z-score of correlation
|
| 296 |
+
# High correlation = concentrated market = stress
|
| 297 |
+
corr_zscore = self.calculate_zscore(correlation)
|
| 298 |
+
|
| 299 |
+
stress_level = self.normalize_to_level(corr_zscore)
|
| 300 |
+
|
| 301 |
+
current_corr = float(correlation.iloc[-1])
|
| 302 |
+
|
| 303 |
+
# Also calculate relative volatility
|
| 304 |
+
spy_vol = spy_ret.rolling(20).std().iloc[-1] * np.sqrt(252) * 100
|
| 305 |
+
qqq_vol = qqq_ret.rolling(20).std().iloc[-1] * np.sqrt(252) * 100
|
| 306 |
+
|
| 307 |
+
return IndicatorSignal(
|
| 308 |
+
indicator_name=self.name,
|
| 309 |
+
category=self.category,
|
| 310 |
+
value=current_corr,
|
| 311 |
+
zscore=corr_zscore,
|
| 312 |
+
stress_level=stress_level,
|
| 313 |
+
description=(
|
| 314 |
+
f"SPY-QQQ corr: {current_corr:.2f} "
|
| 315 |
+
f"(z={corr_zscore:+.1f}), "
|
| 316 |
+
f"SPY vol: {spy_vol:.1f}%, QQQ vol: {qqq_vol:.1f}%"
|
| 317 |
+
),
|
| 318 |
+
interpretation=self._interpret(stress_level, current_corr, corr_zscore)
|
| 319 |
+
)
|
| 320 |
+
|
| 321 |
+
except Exception as e:
|
| 322 |
+
logger.error(f"{self.name} calculation failed: {e}")
|
| 323 |
+
return None
|
| 324 |
+
|
| 325 |
+
def _interpret(
|
| 326 |
+
self,
|
| 327 |
+
stress_level: float,
|
| 328 |
+
corr: float,
|
| 329 |
+
zscore: float
|
| 330 |
+
) -> str:
|
| 331 |
+
"""Generate interpretation."""
|
| 332 |
+
if stress_level > 70:
|
| 333 |
+
return f"🔴 CONCENTRATED: Extreme correlation (market = mega-caps, {corr:.2f})"
|
| 334 |
+
elif stress_level > 55:
|
| 335 |
+
return "⚠️ NARROWING: Market concentration increasing"
|
| 336 |
+
elif stress_level < 40:
|
| 337 |
+
return "✅ BROAD: Healthy market breadth"
|
| 338 |
+
else:
|
| 339 |
+
return "⚡ WATCH: Moderate concentration"
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
class SmallCapWeaknessIndicator(BaseIndicator):
|
| 343 |
+
"""
|
| 344 |
+
Small-cap weakness indicator.
|
| 345 |
+
|
| 346 |
+
Monitor IWM (Russell 2000) vs SPY.
|
| 347 |
+
Small-caps underperforming = risk appetite declining.
|
| 348 |
+
|
| 349 |
+
Signal construction:
|
| 350 |
+
- Calculate IWM/SPY relative strength
|
| 351 |
+
- Falling ratio = small-cap weakness = stress
|
| 352 |
+
|
| 353 |
+
Required data:
|
| 354 |
+
- IWM: Russell 2000 small-cap ETF
|
| 355 |
+
- SPY: S&P 500
|
| 356 |
+
"""
|
| 357 |
+
|
| 358 |
+
def __init__(self):
|
| 359 |
+
super().__init__(
|
| 360 |
+
name="Small-Cap Weakness",
|
| 361 |
+
category=IndicatorCategory.BREADTH,
|
| 362 |
+
description="Small-cap underperformance",
|
| 363 |
+
zscore_window=252
|
| 364 |
+
)
|
| 365 |
+
|
| 366 |
+
def compute(self, data: Dict[str, TimeSeriesData]) -> Optional[IndicatorSignal]:
|
| 367 |
+
"""Compute small-cap weakness."""
|
| 368 |
+
# Check for IWM
|
| 369 |
+
has_iwm = "IWM" in data and len(data["IWM"].data) > 0
|
| 370 |
+
has_spy = "SPY" in data and len(data["SPY"].data) > 0
|
| 371 |
+
|
| 372 |
+
if not (has_iwm and has_spy):
|
| 373 |
+
logger.warning(f"{self.name}: IWM or SPY data not available")
|
| 374 |
+
return None
|
| 375 |
+
|
| 376 |
+
try:
|
| 377 |
+
iwm = data["IWM"].data
|
| 378 |
+
spy = data["SPY"].data
|
| 379 |
+
|
| 380 |
+
# Align dates
|
| 381 |
+
combined = pd.DataFrame({
|
| 382 |
+
'iwm': iwm,
|
| 383 |
+
'spy': spy
|
| 384 |
+
}).dropna()
|
| 385 |
+
|
| 386 |
+
if len(combined) < self.zscore_window:
|
| 387 |
+
logger.warning(f"{self.name}: Insufficient data")
|
| 388 |
+
return None
|
| 389 |
+
|
| 390 |
+
# Calculate relative strength ratio (IWM/SPY)
|
| 391 |
+
# Falling = small-caps lagging = risk-off
|
| 392 |
+
ratio = combined['iwm'] / combined['spy']
|
| 393 |
+
|
| 394 |
+
# Calculate momentum (20-day % change of ratio)
|
| 395 |
+
ratio_momentum = (ratio.iloc[-1] / ratio.iloc[-21] - 1) * 100
|
| 396 |
+
|
| 397 |
+
# Z-score of momentum
|
| 398 |
+
momentum_series = ratio.pct_change(20).dropna() * 100
|
| 399 |
+
if len(momentum_series) < self.zscore_window:
|
| 400 |
+
return None
|
| 401 |
+
|
| 402 |
+
momentum_zscore = self.calculate_zscore(momentum_series)
|
| 403 |
+
|
| 404 |
+
# Negative momentum = small-caps lagging = stress
|
| 405 |
+
# Flip sign
|
| 406 |
+
stress_zscore = -momentum_zscore
|
| 407 |
+
|
| 408 |
+
stress_level = self.normalize_to_level(stress_zscore)
|
| 409 |
+
|
| 410 |
+
current_ratio = float(ratio.iloc[-1])
|
| 411 |
+
iwm_ret = (combined['iwm'].iloc[-1] / combined['iwm'].iloc[-21] - 1) * 100
|
| 412 |
+
spy_ret = (combined['spy'].iloc[-1] / combined['spy'].iloc[-21] - 1) * 100
|
| 413 |
+
|
| 414 |
+
return IndicatorSignal(
|
| 415 |
+
indicator_name=self.name,
|
| 416 |
+
category=self.category,
|
| 417 |
+
value=current_ratio,
|
| 418 |
+
zscore=stress_zscore,
|
| 419 |
+
stress_level=stress_level,
|
| 420 |
+
description=(
|
| 421 |
+
f"IWM/SPY: {current_ratio:.3f} "
|
| 422 |
+
f"(momentum: {ratio_momentum:+.1f}%), "
|
| 423 |
+
f"IWM: {iwm_ret:+.1f}%, SPY: {spy_ret:+.1f}%"
|
| 424 |
+
),
|
| 425 |
+
interpretation=self._interpret(stress_level, ratio_momentum, iwm_ret, spy_ret)
|
| 426 |
+
)
|
| 427 |
+
|
| 428 |
+
except Exception as e:
|
| 429 |
+
logger.error(f"{self.name} calculation failed: {e}")
|
| 430 |
+
return None
|
| 431 |
+
|
| 432 |
+
def _interpret(
|
| 433 |
+
self,
|
| 434 |
+
stress_level: float,
|
| 435 |
+
momentum: float,
|
| 436 |
+
iwm_ret: float,
|
| 437 |
+
spy_ret: float
|
| 438 |
+
) -> str:
|
| 439 |
+
"""Generate interpretation."""
|
| 440 |
+
underperformance = spy_ret - iwm_ret
|
| 441 |
+
|
| 442 |
+
if stress_level > 70:
|
| 443 |
+
return f"🔴 WEAK: Small-caps severely lagging ({underperformance:+.1f}% vs SPY)"
|
| 444 |
+
elif stress_level > 55:
|
| 445 |
+
return "⚠️ LAGGING: Small-caps underperforming (risk appetite declining)"
|
| 446 |
+
elif stress_level < 40:
|
| 447 |
+
return "✅ STRONG: Small-caps leading (healthy risk appetite)"
|
| 448 |
+
else:
|
| 449 |
+
return "⚡ WATCH: Small-caps moderately weak"
|
src/core/macroeconomics/markets/risk_engine/indicators/credit.py
ADDED
|
@@ -0,0 +1,414 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Credit market indicators for risk detection.
|
| 3 |
+
|
| 4 |
+
Monitors corporate bond spreads, credit stress, and default risk.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from typing import Dict, Optional
|
| 8 |
+
import pandas as pd
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
from src.core.macroeconomics.markets.risk_engine.models import (
|
| 12 |
+
IndicatorSignal,
|
| 13 |
+
IndicatorCategory,
|
| 14 |
+
TimeSeriesData
|
| 15 |
+
)
|
| 16 |
+
from src.core.macroeconomics.markets.risk_engine.indicators.base import BaseIndicator
|
| 17 |
+
from src.telegram_bot.logger import main_logger as logger
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class CreditSpreadWideningIndicator(BaseIndicator):
|
| 21 |
+
"""
|
| 22 |
+
Detect credit spread widening (HY vs IG).
|
| 23 |
+
|
| 24 |
+
Credit spreads widen during stress as investors demand higher yields
|
| 25 |
+
for corporate bonds vs risk-free Treasuries.
|
| 26 |
+
|
| 27 |
+
Signal construction:
|
| 28 |
+
- Monitor High Yield spread (BAMLH0A0HYM2)
|
| 29 |
+
- Calculate z-score vs historical levels
|
| 30 |
+
- Rising spreads = credit stress
|
| 31 |
+
|
| 32 |
+
Required data:
|
| 33 |
+
- BAMLH0A0HYM2: ICE BofA US High Yield Option-Adjusted Spread
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
def __init__(self):
|
| 37 |
+
super().__init__(
|
| 38 |
+
name="HY Credit Spread",
|
| 39 |
+
category=IndicatorCategory.CREDIT,
|
| 40 |
+
description="High yield credit spread widening",
|
| 41 |
+
zscore_window=252
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
def compute(self, data: Dict[str, TimeSeriesData]) -> Optional[IndicatorSignal]:
|
| 45 |
+
"""Compute credit spread signal."""
|
| 46 |
+
if not self._validate_data(data, ["BAMLH0A0HYM2"]):
|
| 47 |
+
return None
|
| 48 |
+
|
| 49 |
+
try:
|
| 50 |
+
spread_data = data["BAMLH0A0HYM2"].data
|
| 51 |
+
|
| 52 |
+
if len(spread_data) < self.zscore_window:
|
| 53 |
+
logger.warning(
|
| 54 |
+
f"{self.name}: Insufficient data "
|
| 55 |
+
f"({len(spread_data)} < {self.zscore_window})"
|
| 56 |
+
)
|
| 57 |
+
return None
|
| 58 |
+
|
| 59 |
+
# Calculate z-score of spread level
|
| 60 |
+
spread_zscore = self.calculate_zscore(spread_data)
|
| 61 |
+
|
| 62 |
+
# Calculate spread acceleration (widening speed)
|
| 63 |
+
spread_accel = self.calculate_acceleration(spread_data, periods=5)
|
| 64 |
+
|
| 65 |
+
# Combined signal: level + momentum
|
| 66 |
+
# If spreads are wide AND widening fast = severe stress
|
| 67 |
+
if spread_zscore > 0 and spread_accel > 0:
|
| 68 |
+
# Amplify signal when both level and momentum elevated
|
| 69 |
+
amplified_zscore = spread_zscore * (1 + min(spread_accel / 50, 0.5))
|
| 70 |
+
else:
|
| 71 |
+
amplified_zscore = spread_zscore
|
| 72 |
+
|
| 73 |
+
stress_level = self.normalize_to_level(amplified_zscore)
|
| 74 |
+
|
| 75 |
+
current_spread = float(spread_data.iloc[-1])
|
| 76 |
+
|
| 77 |
+
return IndicatorSignal(
|
| 78 |
+
indicator_name=self.name,
|
| 79 |
+
category=self.category,
|
| 80 |
+
value=current_spread,
|
| 81 |
+
zscore=spread_zscore,
|
| 82 |
+
stress_level=stress_level,
|
| 83 |
+
description=(
|
| 84 |
+
f"HY spread: {current_spread:.0f} bps "
|
| 85 |
+
f"(z={spread_zscore:+.1f}), "
|
| 86 |
+
f"acceleration: {spread_accel:+.1f} bps"
|
| 87 |
+
),
|
| 88 |
+
interpretation=self._interpret(stress_level, spread_zscore, spread_accel)
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
except Exception as e:
|
| 92 |
+
logger.error(f"{self.name} calculation failed: {e}")
|
| 93 |
+
return None
|
| 94 |
+
|
| 95 |
+
def _interpret(
|
| 96 |
+
self,
|
| 97 |
+
stress_level: float,
|
| 98 |
+
zscore: float,
|
| 99 |
+
acceleration: float
|
| 100 |
+
) -> str:
|
| 101 |
+
"""Generate interpretation."""
|
| 102 |
+
if stress_level > 75:
|
| 103 |
+
if acceleration > 0:
|
| 104 |
+
return "🔴 SEVERE: Credit spreads blowing out (accelerating)"
|
| 105 |
+
else:
|
| 106 |
+
return "🔴 ELEVATED: Wide spreads (but stabilizing)"
|
| 107 |
+
elif stress_level > 60:
|
| 108 |
+
return "⚠️ WARNING: Credit spreads widening"
|
| 109 |
+
elif stress_level < 40:
|
| 110 |
+
return "✅ HEALTHY: Tight credit spreads"
|
| 111 |
+
else:
|
| 112 |
+
return "⚡ WATCH: Credit spreads moderately wide"
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
class IGvsHYDivergenceIndicator(BaseIndicator):
|
| 116 |
+
"""
|
| 117 |
+
Detect Investment Grade vs High Yield divergence.
|
| 118 |
+
|
| 119 |
+
During stress, HY spreads widen much faster than IG spreads.
|
| 120 |
+
Divergence indicates flight-to-quality and credit tiering.
|
| 121 |
+
|
| 122 |
+
Signal construction:
|
| 123 |
+
- Calculate HY/IG spread ratio
|
| 124 |
+
- Z-score of ratio
|
| 125 |
+
- Rising ratio = HY stress relative to IG
|
| 126 |
+
|
| 127 |
+
Required data:
|
| 128 |
+
- BAMLH0A0HYM2: High Yield spread
|
| 129 |
+
- BAMLC0A0CM: Investment Grade spread
|
| 130 |
+
"""
|
| 131 |
+
|
| 132 |
+
def __init__(self):
|
| 133 |
+
super().__init__(
|
| 134 |
+
name="IG vs HY Divergence",
|
| 135 |
+
category=IndicatorCategory.CREDIT,
|
| 136 |
+
description="High yield underperforming investment grade",
|
| 137 |
+
zscore_window=252
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
def compute(self, data: Dict[str, TimeSeriesData]) -> Optional[IndicatorSignal]:
|
| 141 |
+
"""Compute IG/HY divergence signal."""
|
| 142 |
+
required = ["BAMLH0A0HYM2", "BAMLC0A0CM"]
|
| 143 |
+
if not self._validate_data(data, required):
|
| 144 |
+
return None
|
| 145 |
+
|
| 146 |
+
try:
|
| 147 |
+
hy_spread = data["BAMLH0A0HYM2"].data
|
| 148 |
+
ig_spread = data["BAMLC0A0CM"].data
|
| 149 |
+
|
| 150 |
+
# Align dates
|
| 151 |
+
combined = pd.DataFrame({
|
| 152 |
+
'hy': hy_spread,
|
| 153 |
+
'ig': ig_spread
|
| 154 |
+
}).dropna()
|
| 155 |
+
|
| 156 |
+
if len(combined) < self.zscore_window:
|
| 157 |
+
logger.warning(f"{self.name}: Insufficient data")
|
| 158 |
+
return None
|
| 159 |
+
|
| 160 |
+
# Calculate HY/IG ratio
|
| 161 |
+
ratio = combined['hy'] / combined['ig']
|
| 162 |
+
|
| 163 |
+
# Z-score of ratio
|
| 164 |
+
ratio_zscore = self.calculate_zscore(ratio)
|
| 165 |
+
|
| 166 |
+
# Calculate ratio acceleration
|
| 167 |
+
ratio_accel = self.calculate_acceleration(ratio, periods=5)
|
| 168 |
+
|
| 169 |
+
stress_level = self.normalize_to_level(ratio_zscore)
|
| 170 |
+
|
| 171 |
+
current_hy = float(hy_spread.iloc[-1])
|
| 172 |
+
current_ig = float(ig_spread.iloc[-1])
|
| 173 |
+
current_ratio = current_hy / current_ig
|
| 174 |
+
|
| 175 |
+
return IndicatorSignal(
|
| 176 |
+
indicator_name=self.name,
|
| 177 |
+
category=self.category,
|
| 178 |
+
value=current_ratio,
|
| 179 |
+
zscore=ratio_zscore,
|
| 180 |
+
stress_level=stress_level,
|
| 181 |
+
description=(
|
| 182 |
+
f"HY: {current_hy:.0f} bps, IG: {current_ig:.0f} bps, "
|
| 183 |
+
f"Ratio: {current_ratio:.2f} (z={ratio_zscore:+.1f})"
|
| 184 |
+
),
|
| 185 |
+
interpretation=self._interpret(stress_level, ratio_zscore, ratio_accel)
|
| 186 |
+
)
|
| 187 |
+
|
| 188 |
+
except Exception as e:
|
| 189 |
+
logger.error(f"{self.name} calculation failed: {e}")
|
| 190 |
+
return None
|
| 191 |
+
|
| 192 |
+
def _interpret(
|
| 193 |
+
self,
|
| 194 |
+
stress_level: float,
|
| 195 |
+
zscore: float,
|
| 196 |
+
acceleration: float
|
| 197 |
+
) -> str:
|
| 198 |
+
"""Generate interpretation."""
|
| 199 |
+
if stress_level > 75:
|
| 200 |
+
return "🔴 SEVERE: HY severely underperforming IG (flight-to-quality)"
|
| 201 |
+
elif stress_level > 60:
|
| 202 |
+
return "⚠️ WARNING: HY/IG divergence increasing"
|
| 203 |
+
elif stress_level < 40:
|
| 204 |
+
return "✅ STABLE: HY and IG spreads moving together"
|
| 205 |
+
else:
|
| 206 |
+
return "⚡ WATCH: HY starting to underperform IG"
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
class CreditETFPressureIndicator(BaseIndicator):
|
| 210 |
+
"""
|
| 211 |
+
Detect credit ETF price pressure.
|
| 212 |
+
|
| 213 |
+
Monitor HYG (High Yield ETF) and LQD (Investment Grade ETF) for selling.
|
| 214 |
+
Falling credit ETF prices indicate institutional deleveraging.
|
| 215 |
+
|
| 216 |
+
Signal construction:
|
| 217 |
+
- Calculate HYG and LQD returns
|
| 218 |
+
- Z-score of returns (negative returns = stress)
|
| 219 |
+
- Compare vs SPY to detect credit-specific weakness
|
| 220 |
+
|
| 221 |
+
Required data:
|
| 222 |
+
- HYG: iShares High Yield Corporate Bond ETF
|
| 223 |
+
- LQD: iShares Investment Grade Corporate Bond ETF
|
| 224 |
+
- SPY: S&P 500 for comparison
|
| 225 |
+
"""
|
| 226 |
+
|
| 227 |
+
def __init__(self):
|
| 228 |
+
super().__init__(
|
| 229 |
+
name="Credit ETF Pressure",
|
| 230 |
+
category=IndicatorCategory.CREDIT,
|
| 231 |
+
description="Credit ETF selling pressure",
|
| 232 |
+
zscore_window=252
|
| 233 |
+
)
|
| 234 |
+
|
| 235 |
+
def compute(self, data: Dict[str, TimeSeriesData]) -> Optional[IndicatorSignal]:
|
| 236 |
+
"""Compute credit ETF pressure signal."""
|
| 237 |
+
required = ["HYG", "LQD", "SPY"]
|
| 238 |
+
if not self._validate_data(data, required):
|
| 239 |
+
return None
|
| 240 |
+
|
| 241 |
+
try:
|
| 242 |
+
hyg_data = data["HYG"].data
|
| 243 |
+
lqd_data = data["LQD"].data
|
| 244 |
+
spy_data = data["SPY"].data
|
| 245 |
+
|
| 246 |
+
# Align dates
|
| 247 |
+
combined = pd.DataFrame({
|
| 248 |
+
'hyg': hyg_data,
|
| 249 |
+
'lqd': lqd_data,
|
| 250 |
+
'spy': spy_data
|
| 251 |
+
}).dropna()
|
| 252 |
+
|
| 253 |
+
if len(combined) < self.zscore_window:
|
| 254 |
+
logger.warning(f"{self.name}: Insufficient data")
|
| 255 |
+
return None
|
| 256 |
+
|
| 257 |
+
# Calculate 20-day returns
|
| 258 |
+
hyg_ret = (combined['hyg'].iloc[-1] / combined['hyg'].iloc[-21] - 1) * 100
|
| 259 |
+
lqd_ret = (combined['lqd'].iloc[-1] / combined['lqd'].iloc[-21] - 1) * 100
|
| 260 |
+
spy_ret = (combined['spy'].iloc[-1] / combined['spy'].iloc[-21] - 1) * 100
|
| 261 |
+
|
| 262 |
+
# Calculate rolling returns for z-score
|
| 263 |
+
hyg_rets = combined['hyg'].pct_change(20).dropna() * 100
|
| 264 |
+
lqd_rets = combined['lqd'].pct_change(20).dropna() * 100
|
| 265 |
+
|
| 266 |
+
# Combined credit return
|
| 267 |
+
credit_ret = (hyg_ret + lqd_ret) / 2
|
| 268 |
+
credit_rets = (hyg_rets + lqd_rets) / 2
|
| 269 |
+
|
| 270 |
+
# Z-score of credit returns (negative z = underperformance)
|
| 271 |
+
credit_zscore = self.calculate_zscore(credit_rets)
|
| 272 |
+
|
| 273 |
+
# Flip sign: negative returns = positive stress
|
| 274 |
+
stress_zscore = -credit_zscore
|
| 275 |
+
|
| 276 |
+
# Amplify if credit underperforming equity
|
| 277 |
+
if credit_ret < spy_ret - 2.0:
|
| 278 |
+
# Credit lagging equity by >2% = amplify signal
|
| 279 |
+
underperformance = spy_ret - credit_ret
|
| 280 |
+
stress_zscore *= (1 + min(underperformance / 10, 0.5))
|
| 281 |
+
|
| 282 |
+
stress_level = self.normalize_to_level(stress_zscore)
|
| 283 |
+
|
| 284 |
+
return IndicatorSignal(
|
| 285 |
+
indicator_name=self.name,
|
| 286 |
+
category=self.category,
|
| 287 |
+
value=credit_ret,
|
| 288 |
+
zscore=stress_zscore,
|
| 289 |
+
stress_level=stress_level,
|
| 290 |
+
description=(
|
| 291 |
+
f"HYG 20d: {hyg_ret:+.1f}%, "
|
| 292 |
+
f"LQD 20d: {lqd_ret:+.1f}%, "
|
| 293 |
+
f"SPY 20d: {spy_ret:+.1f}%"
|
| 294 |
+
),
|
| 295 |
+
interpretation=self._interpret(stress_level, credit_ret, spy_ret)
|
| 296 |
+
)
|
| 297 |
+
|
| 298 |
+
except Exception as e:
|
| 299 |
+
logger.error(f"{self.name} calculation failed: {e}")
|
| 300 |
+
return None
|
| 301 |
+
|
| 302 |
+
def _interpret(
|
| 303 |
+
self,
|
| 304 |
+
stress_level: float,
|
| 305 |
+
credit_ret: float,
|
| 306 |
+
spy_ret: float
|
| 307 |
+
) -> str:
|
| 308 |
+
"""Generate interpretation."""
|
| 309 |
+
underperf = spy_ret - credit_ret
|
| 310 |
+
|
| 311 |
+
if stress_level > 75:
|
| 312 |
+
return f"🔴 SEVERE: Credit ETFs selling hard ({credit_ret:+.1f}%, {underperf:+.1f}% vs SPY)"
|
| 313 |
+
elif stress_level > 60:
|
| 314 |
+
return f"⚠️ WARNING: Credit ETF weakness ({credit_ret:+.1f}%)"
|
| 315 |
+
elif stress_level < 40:
|
| 316 |
+
return "✅ STABLE: Credit ETFs performing well"
|
| 317 |
+
else:
|
| 318 |
+
return "⚡ WATCH: Credit ETFs under mild pressure"
|
| 319 |
+
|
| 320 |
+
|
| 321 |
+
class CorporateBondStressIndicator(BaseIndicator):
|
| 322 |
+
"""
|
| 323 |
+
Composite corporate bond stress indicator.
|
| 324 |
+
|
| 325 |
+
Combines spread levels, spread velocity, and price action
|
| 326 |
+
into single corporate credit stress measure.
|
| 327 |
+
|
| 328 |
+
Signal construction:
|
| 329 |
+
- Average of: HY spread z-score, IG spread z-score, HYG return z-score
|
| 330 |
+
- Equal-weighted composite
|
| 331 |
+
|
| 332 |
+
Required data:
|
| 333 |
+
- BAMLH0A0HYM2: HY spread
|
| 334 |
+
- BAMLC0A0CM: IG spread
|
| 335 |
+
- HYG: High Yield ETF
|
| 336 |
+
"""
|
| 337 |
+
|
| 338 |
+
def __init__(self):
|
| 339 |
+
super().__init__(
|
| 340 |
+
name="Corporate Bond Stress",
|
| 341 |
+
category=IndicatorCategory.CREDIT,
|
| 342 |
+
description="Composite corporate credit stress",
|
| 343 |
+
zscore_window=252
|
| 344 |
+
)
|
| 345 |
+
|
| 346 |
+
def compute(self, data: Dict[str, TimeSeriesData]) -> Optional[IndicatorSignal]:
|
| 347 |
+
"""Compute composite credit stress."""
|
| 348 |
+
required = ["BAMLH0A0HYM2", "BAMLC0A0CM", "HYG"]
|
| 349 |
+
if not self._validate_data(data, required):
|
| 350 |
+
return None
|
| 351 |
+
|
| 352 |
+
try:
|
| 353 |
+
hy_spread = data["BAMLH0A0HYM2"].data
|
| 354 |
+
ig_spread = data["BAMLC0A0CM"].data
|
| 355 |
+
hyg_price = data["HYG"].data
|
| 356 |
+
|
| 357 |
+
# Align dates
|
| 358 |
+
combined = pd.DataFrame({
|
| 359 |
+
'hy': hy_spread,
|
| 360 |
+
'ig': ig_spread,
|
| 361 |
+
'hyg': hyg_price
|
| 362 |
+
}).dropna()
|
| 363 |
+
|
| 364 |
+
if len(combined) < self.zscore_window:
|
| 365 |
+
logger.warning(f"{self.name}: Insufficient data")
|
| 366 |
+
return None
|
| 367 |
+
|
| 368 |
+
# Component 1: HY spread z-score
|
| 369 |
+
hy_zscore = self.calculate_zscore(combined['hy'])
|
| 370 |
+
|
| 371 |
+
# Component 2: IG spread z-score
|
| 372 |
+
ig_zscore = self.calculate_zscore(combined['ig'])
|
| 373 |
+
|
| 374 |
+
# Component 3: HYG return z-score (negative = stress)
|
| 375 |
+
hyg_ret = combined['hyg'].pct_change(20).dropna() * 100
|
| 376 |
+
hyg_ret_zscore = -self.calculate_zscore(hyg_ret) # Flip: negative ret = positive stress
|
| 377 |
+
|
| 378 |
+
# Composite: equal-weighted average
|
| 379 |
+
composite_zscore = (hy_zscore + ig_zscore + hyg_ret_zscore) / 3
|
| 380 |
+
|
| 381 |
+
stress_level = self.normalize_to_level(composite_zscore)
|
| 382 |
+
|
| 383 |
+
current_hy = float(hy_spread.iloc[-1])
|
| 384 |
+
current_ig = float(ig_spread.iloc[-1])
|
| 385 |
+
current_hyg_ret = (combined['hyg'].iloc[-1] / combined['hyg'].iloc[-21] - 1) * 100
|
| 386 |
+
|
| 387 |
+
return IndicatorSignal(
|
| 388 |
+
indicator_name=self.name,
|
| 389 |
+
category=self.category,
|
| 390 |
+
value=composite_zscore,
|
| 391 |
+
zscore=composite_zscore,
|
| 392 |
+
stress_level=stress_level,
|
| 393 |
+
description=(
|
| 394 |
+
f"HY: {current_hy:.0f}bps (z={hy_zscore:+.1f}), "
|
| 395 |
+
f"IG: {current_ig:.0f}bps (z={ig_zscore:+.1f}), "
|
| 396 |
+
f"HYG: {current_hyg_ret:+.1f}%"
|
| 397 |
+
),
|
| 398 |
+
interpretation=self._interpret(stress_level, composite_zscore)
|
| 399 |
+
)
|
| 400 |
+
|
| 401 |
+
except Exception as e:
|
| 402 |
+
logger.error(f"{self.name} calculation failed: {e}")
|
| 403 |
+
return None
|
| 404 |
+
|
| 405 |
+
def _interpret(self, stress_level: float, zscore: float) -> str:
|
| 406 |
+
"""Generate interpretation."""
|
| 407 |
+
if stress_level > 75:
|
| 408 |
+
return "🔴 SEVERE: Corporate credit under severe stress"
|
| 409 |
+
elif stress_level > 60:
|
| 410 |
+
return "⚠️ WARNING: Elevated corporate credit stress"
|
| 411 |
+
elif stress_level < 40:
|
| 412 |
+
return "✅ HEALTHY: Corporate credit markets healthy"
|
| 413 |
+
else:
|
| 414 |
+
return "⚡ WATCH: Corporate credit showing early stress signals"
|
src/core/macroeconomics/markets/risk_engine/indicators/fx_funding.py
ADDED
|
@@ -0,0 +1,415 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
FX and funding indicators for market risk detection.
|
| 3 |
+
|
| 4 |
+
Monitors dollar strength, carry trade unwinding, and cross-currency funding stress.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from typing import Dict, Optional
|
| 8 |
+
import pandas as pd
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
from src.core.macroeconomics.markets.risk_engine.models import (
|
| 12 |
+
IndicatorSignal,
|
| 13 |
+
IndicatorCategory,
|
| 14 |
+
TimeSeriesData
|
| 15 |
+
)
|
| 16 |
+
from src.core.macroeconomics.markets.risk_engine.indicators.base import BaseIndicator
|
| 17 |
+
from src.telegram_bot.logger import main_logger as logger
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class DXYSpikeIndicator(BaseIndicator):
|
| 21 |
+
"""
|
| 22 |
+
Dollar Index (DXY) spike detector.
|
| 23 |
+
|
| 24 |
+
Rapid USD strengthening = global funding stress.
|
| 25 |
+
Flight to safety, EM stress, commodity pressure.
|
| 26 |
+
|
| 27 |
+
Signal construction:
|
| 28 |
+
- Calculate DXY level z-score
|
| 29 |
+
- Calculate DXY velocity (rate of rise)
|
| 30 |
+
- Fast rising DXY = stress
|
| 31 |
+
|
| 32 |
+
Required data:
|
| 33 |
+
- DXY: US Dollar Index (from Yahoo or FRED)
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
def __init__(self):
|
| 37 |
+
super().__init__(
|
| 38 |
+
name="DXY Spike",
|
| 39 |
+
category=IndicatorCategory.FX_FUNDING,
|
| 40 |
+
description="Dollar strength (funding stress)",
|
| 41 |
+
zscore_window=252
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
def compute(self, data: Dict[str, TimeSeriesData]) -> Optional[IndicatorSignal]:
|
| 45 |
+
"""Compute DXY spike signal."""
|
| 46 |
+
# Try to get DXY from either source
|
| 47 |
+
if "DXY" in data and len(data["DXY"].data) > 0:
|
| 48 |
+
dxy_data = data["DXY"].data
|
| 49 |
+
elif "DTWEXBGS" in data and len(data["DTWEXBGS"].data) > 0:
|
| 50 |
+
# Trade-weighted dollar index from FRED
|
| 51 |
+
dxy_data = data["DTWEXBGS"].data
|
| 52 |
+
else:
|
| 53 |
+
logger.warning(f"{self.name}: No DXY data available")
|
| 54 |
+
return None
|
| 55 |
+
|
| 56 |
+
try:
|
| 57 |
+
if len(dxy_data) < self.zscore_window:
|
| 58 |
+
logger.warning(f"{self.name}: Insufficient data")
|
| 59 |
+
return None
|
| 60 |
+
|
| 61 |
+
# Calculate z-score of DXY level
|
| 62 |
+
dxy_zscore = self.calculate_zscore(dxy_data)
|
| 63 |
+
|
| 64 |
+
# Calculate DXY velocity (20-day % change)
|
| 65 |
+
dxy_velocity = (dxy_data.iloc[-1] / dxy_data.iloc[-21] - 1) * 100
|
| 66 |
+
|
| 67 |
+
# Calculate acceleration
|
| 68 |
+
dxy_accel = self.calculate_acceleration(dxy_data, periods=5)
|
| 69 |
+
|
| 70 |
+
# Stress logic: Rising DXY = stress
|
| 71 |
+
# Combine level and momentum
|
| 72 |
+
if dxy_zscore > 0 and dxy_velocity > 2:
|
| 73 |
+
# Dollar rising and elevated = amplify signal
|
| 74 |
+
amplified_zscore = dxy_zscore * (1 + min(dxy_velocity / 10, 0.5))
|
| 75 |
+
elif dxy_velocity > 5:
|
| 76 |
+
# Rapid rise even from low levels = stress
|
| 77 |
+
amplified_zscore = dxy_velocity / 5
|
| 78 |
+
else:
|
| 79 |
+
amplified_zscore = dxy_zscore
|
| 80 |
+
|
| 81 |
+
stress_level = self.normalize_to_level(amplified_zscore)
|
| 82 |
+
|
| 83 |
+
current_dxy = float(dxy_data.iloc[-1])
|
| 84 |
+
|
| 85 |
+
return IndicatorSignal(
|
| 86 |
+
indicator_name=self.name,
|
| 87 |
+
category=self.category,
|
| 88 |
+
value=current_dxy,
|
| 89 |
+
zscore=dxy_zscore,
|
| 90 |
+
stress_level=stress_level,
|
| 91 |
+
description=(
|
| 92 |
+
f"DXY: {current_dxy:.2f} "
|
| 93 |
+
f"(z={dxy_zscore:+.1f}), "
|
| 94 |
+
f"20d: {dxy_velocity:+.1f}%"
|
| 95 |
+
),
|
| 96 |
+
interpretation=self._interpret(stress_level, dxy_zscore, dxy_velocity)
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
except Exception as e:
|
| 100 |
+
logger.error(f"{self.name} calculation failed: {e}")
|
| 101 |
+
return None
|
| 102 |
+
|
| 103 |
+
def _interpret(
|
| 104 |
+
self,
|
| 105 |
+
stress_level: float,
|
| 106 |
+
zscore: float,
|
| 107 |
+
velocity: float
|
| 108 |
+
) -> str:
|
| 109 |
+
"""Generate interpretation."""
|
| 110 |
+
if stress_level > 75:
|
| 111 |
+
return f"🔴 SEVERE: Dollar surging (global funding stress)"
|
| 112 |
+
elif stress_level > 60:
|
| 113 |
+
return "⚠️ RISING: Dollar strengthening (EM pressure)"
|
| 114 |
+
elif stress_level < 40:
|
| 115 |
+
if zscore < -1:
|
| 116 |
+
return "✅ WEAK: Dollar weakening (risk-on supportive)"
|
| 117 |
+
else:
|
| 118 |
+
return "✅ STABLE: Dollar at normal levels"
|
| 119 |
+
else:
|
| 120 |
+
return "⚡ WATCH: Dollar moderately strong"
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
class JPYCarryUnwindIndicator(BaseIndicator):
|
| 124 |
+
"""
|
| 125 |
+
JPY carry trade unwind detector.
|
| 126 |
+
|
| 127 |
+
USD/JPY falling = yen strengthening = carry unwind = risk-off.
|
| 128 |
+
Rapid yen strength often coincides with equity selloffs.
|
| 129 |
+
|
| 130 |
+
March 2020: USD/JPY fell 10% in days during panic.
|
| 131 |
+
|
| 132 |
+
Signal construction:
|
| 133 |
+
- Calculate USD/JPY velocity
|
| 134 |
+
- Detect rapid JPY strengthening (USD/JPY falling)
|
| 135 |
+
- Fast unwind = stress
|
| 136 |
+
|
| 137 |
+
Required data:
|
| 138 |
+
- DEXJPUS: Yen/Dollar exchange rate (need to invert)
|
| 139 |
+
"""
|
| 140 |
+
|
| 141 |
+
def __init__(self):
|
| 142 |
+
super().__init__(
|
| 143 |
+
name="JPY Carry Unwind",
|
| 144 |
+
category=IndicatorCategory.FX_FUNDING,
|
| 145 |
+
description="Yen strength (carry trade unwinding)",
|
| 146 |
+
zscore_window=252
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
def compute(self, data: Dict[str, TimeSeriesData]) -> Optional[IndicatorSignal]:
|
| 150 |
+
"""Compute JPY carry unwind signal."""
|
| 151 |
+
if not self._validate_data(data, ["DEXJPUS"]):
|
| 152 |
+
return None
|
| 153 |
+
|
| 154 |
+
try:
|
| 155 |
+
jpyusd = data["DEXJPUS"].data # JPY per USD
|
| 156 |
+
|
| 157 |
+
if len(jpyusd) < self.zscore_window:
|
| 158 |
+
logger.warning(f"{self.name}: Insufficient data")
|
| 159 |
+
return None
|
| 160 |
+
|
| 161 |
+
# Convert to USD/JPY for intuitive interpretation
|
| 162 |
+
usdjpy = 1 / jpyusd
|
| 163 |
+
|
| 164 |
+
# Calculate 20-day velocity of USD/JPY
|
| 165 |
+
# Negative = JPY strengthening = carry unwind = stress
|
| 166 |
+
usdjpy_velocity = (usdjpy.iloc[-1] / usdjpy.iloc[-21] - 1) * 100
|
| 167 |
+
|
| 168 |
+
# Calculate z-score of velocity
|
| 169 |
+
velocity_series = usdjpy.pct_change(20).dropna() * 100
|
| 170 |
+
if len(velocity_series) < self.zscore_window:
|
| 171 |
+
logger.warning(f"{self.name}: Insufficient velocity data")
|
| 172 |
+
return None
|
| 173 |
+
|
| 174 |
+
velocity_zscore = self.calculate_zscore(velocity_series)
|
| 175 |
+
|
| 176 |
+
# Stress logic: Negative velocity (JPY strength) = stress
|
| 177 |
+
# Flip sign so that JPY strength = positive stress signal
|
| 178 |
+
stress_zscore = -velocity_zscore
|
| 179 |
+
|
| 180 |
+
# Amplify if very rapid unwinding
|
| 181 |
+
if usdjpy_velocity < -3: # >3% JPY strength
|
| 182 |
+
stress_zscore *= (1 + abs(usdjpy_velocity) / 10)
|
| 183 |
+
|
| 184 |
+
stress_level = self.normalize_to_level(stress_zscore)
|
| 185 |
+
|
| 186 |
+
current_usdjpy = float(usdjpy.iloc[-1])
|
| 187 |
+
|
| 188 |
+
return IndicatorSignal(
|
| 189 |
+
indicator_name=self.name,
|
| 190 |
+
category=self.category,
|
| 191 |
+
value=current_usdjpy,
|
| 192 |
+
zscore=stress_zscore,
|
| 193 |
+
stress_level=stress_level,
|
| 194 |
+
description=(
|
| 195 |
+
f"USD/JPY: {current_usdjpy:.2f} "
|
| 196 |
+
f"(20d: {usdjpy_velocity:+.1f}%), "
|
| 197 |
+
f"stress_z: {stress_zscore:+.1f}"
|
| 198 |
+
),
|
| 199 |
+
interpretation=self._interpret(stress_level, usdjpy_velocity, stress_zscore)
|
| 200 |
+
)
|
| 201 |
+
|
| 202 |
+
except Exception as e:
|
| 203 |
+
logger.error(f"{self.name} calculation failed: {e}")
|
| 204 |
+
return None
|
| 205 |
+
|
| 206 |
+
def _interpret(
|
| 207 |
+
self,
|
| 208 |
+
stress_level: float,
|
| 209 |
+
velocity: float,
|
| 210 |
+
zscore: float
|
| 211 |
+
) -> str:
|
| 212 |
+
"""Generate interpretation."""
|
| 213 |
+
if stress_level > 75:
|
| 214 |
+
return f"🔴 SEVERE: JPY surging (carry unwind panic, USD/JPY {velocity:+.1f}%)"
|
| 215 |
+
elif stress_level > 60:
|
| 216 |
+
return "⚠️ UNWINDING: JPY strengthening (carry trades closing)"
|
| 217 |
+
elif stress_level < 40:
|
| 218 |
+
if velocity > 2:
|
| 219 |
+
return "✅ CARRY ON: JPY weakening (carry trades profitable)"
|
| 220 |
+
else:
|
| 221 |
+
return "✅ STABLE: JPY at normal levels"
|
| 222 |
+
else:
|
| 223 |
+
return "⚡ WATCH: JPY moderately strong"
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
class CrossCurrencyBasisIndicator(BaseIndicator):
|
| 227 |
+
"""
|
| 228 |
+
Cross-currency basis indicator.
|
| 229 |
+
|
| 230 |
+
True basis swaps not available in free data.
|
| 231 |
+
Use EUR/USD volatility and DXY stress as proxy.
|
| 232 |
+
|
| 233 |
+
Widening basis = dollar funding shortage.
|
| 234 |
+
|
| 235 |
+
Signal construction:
|
| 236 |
+
- Calculate EUR/USD realized volatility
|
| 237 |
+
- High FX vol = funding stress proxy
|
| 238 |
+
|
| 239 |
+
Required data:
|
| 240 |
+
- DEXUSEU: Euro/Dollar exchange rate
|
| 241 |
+
"""
|
| 242 |
+
|
| 243 |
+
def __init__(self):
|
| 244 |
+
super().__init__(
|
| 245 |
+
name="FX Funding Stress",
|
| 246 |
+
category=IndicatorCategory.FX_FUNDING,
|
| 247 |
+
description="Cross-currency funding stress (EUR/USD vol)",
|
| 248 |
+
zscore_window=252
|
| 249 |
+
)
|
| 250 |
+
|
| 251 |
+
def compute(self, data: Dict[str, TimeSeriesData]) -> Optional[IndicatorSignal]:
|
| 252 |
+
"""Compute FX funding stress signal."""
|
| 253 |
+
if not self._validate_data(data, ["DEXUSEU"]):
|
| 254 |
+
return None
|
| 255 |
+
|
| 256 |
+
try:
|
| 257 |
+
eurusd = data["DEXUSEU"].data
|
| 258 |
+
|
| 259 |
+
if len(eurusd) < self.zscore_window + 20:
|
| 260 |
+
logger.warning(f"{self.name}: Insufficient data")
|
| 261 |
+
return None
|
| 262 |
+
|
| 263 |
+
# Calculate log returns
|
| 264 |
+
returns = np.log(eurusd / eurusd.shift(1)).dropna()
|
| 265 |
+
|
| 266 |
+
# Calculate 20-day realized volatility (annualized)
|
| 267 |
+
realized_vol = returns.rolling(20).std() * np.sqrt(252) * 100
|
| 268 |
+
realized_vol = realized_vol.dropna()
|
| 269 |
+
|
| 270 |
+
if len(realized_vol) < self.zscore_window:
|
| 271 |
+
return None
|
| 272 |
+
|
| 273 |
+
# Z-score of FX volatility
|
| 274 |
+
vol_zscore = self.calculate_zscore(realized_vol)
|
| 275 |
+
|
| 276 |
+
# High FX vol = funding stress
|
| 277 |
+
stress_level = self.normalize_to_level(vol_zscore)
|
| 278 |
+
|
| 279 |
+
current_vol = float(realized_vol.iloc[-1])
|
| 280 |
+
|
| 281 |
+
return IndicatorSignal(
|
| 282 |
+
indicator_name=self.name,
|
| 283 |
+
category=self.category,
|
| 284 |
+
value=current_vol,
|
| 285 |
+
zscore=vol_zscore,
|
| 286 |
+
stress_level=stress_level,
|
| 287 |
+
description=(
|
| 288 |
+
f"EUR/USD vol: {current_vol:.1f}% "
|
| 289 |
+
f"(z={vol_zscore:+.1f})"
|
| 290 |
+
),
|
| 291 |
+
interpretation=self._interpret(stress_level, vol_zscore)
|
| 292 |
+
)
|
| 293 |
+
|
| 294 |
+
except Exception as e:
|
| 295 |
+
logger.error(f"{self.name} calculation failed: {e}")
|
| 296 |
+
return None
|
| 297 |
+
|
| 298 |
+
def _interpret(self, stress_level: float, zscore: float) -> str:
|
| 299 |
+
"""Generate interpretation."""
|
| 300 |
+
if stress_level > 75:
|
| 301 |
+
return "🔴 SEVERE: Extreme FX volatility (funding crisis)"
|
| 302 |
+
elif stress_level > 60:
|
| 303 |
+
return "⚠️ ELEVATED: FX volatility rising (funding stress)"
|
| 304 |
+
elif stress_level < 40:
|
| 305 |
+
return "✅ CALM: Low FX volatility (stable funding)"
|
| 306 |
+
else:
|
| 307 |
+
return "⚡ WATCH: FX volatility moderately elevated"
|
| 308 |
+
|
| 309 |
+
|
| 310 |
+
class EmergingMarketStressIndicator(BaseIndicator):
|
| 311 |
+
"""
|
| 312 |
+
Emerging market stress indicator.
|
| 313 |
+
|
| 314 |
+
Monitor gold (safe haven) vs dollar strength.
|
| 315 |
+
Gold rising + Dollar rising = extreme stress/uncertainty.
|
| 316 |
+
|
| 317 |
+
Signal construction:
|
| 318 |
+
- Calculate correlation between GLD and DXY
|
| 319 |
+
- Typically negative (dollar up, gold down)
|
| 320 |
+
- Positive correlation = crisis mode
|
| 321 |
+
|
| 322 |
+
Required data:
|
| 323 |
+
- GLD: Gold ETF
|
| 324 |
+
- DXY: Dollar Index
|
| 325 |
+
"""
|
| 326 |
+
|
| 327 |
+
def __init__(self):
|
| 328 |
+
super().__init__(
|
| 329 |
+
name="EM Stress (Gold/Dollar)",
|
| 330 |
+
category=IndicatorCategory.FX_FUNDING,
|
| 331 |
+
description="Emerging market stress via gold-dollar",
|
| 332 |
+
zscore_window=252
|
| 333 |
+
)
|
| 334 |
+
|
| 335 |
+
def compute(self, data: Dict[str, TimeSeriesData]) -> Optional[IndicatorSignal]:
|
| 336 |
+
"""Compute EM stress signal."""
|
| 337 |
+
# Try to get both GLD and DXY
|
| 338 |
+
has_gld = "GLD" in data and len(data["GLD"].data) > 0
|
| 339 |
+
has_dxy = "DXY" in data and len(data["DXY"].data) > 0
|
| 340 |
+
|
| 341 |
+
if not (has_gld and has_dxy):
|
| 342 |
+
logger.warning(f"{self.name}: Missing GLD or DXY data")
|
| 343 |
+
return None
|
| 344 |
+
|
| 345 |
+
try:
|
| 346 |
+
gld = data["GLD"].data
|
| 347 |
+
dxy = data["DXY"].data
|
| 348 |
+
|
| 349 |
+
# Align dates
|
| 350 |
+
combined = pd.DataFrame({
|
| 351 |
+
'gld': gld,
|
| 352 |
+
'dxy': dxy
|
| 353 |
+
}).dropna()
|
| 354 |
+
|
| 355 |
+
if len(combined) < self.zscore_window:
|
| 356 |
+
logger.warning(f"{self.name}: Insufficient data")
|
| 357 |
+
return None
|
| 358 |
+
|
| 359 |
+
# Calculate returns
|
| 360 |
+
gld_ret = combined['gld'].pct_change(20).dropna() * 100
|
| 361 |
+
dxy_ret = combined['dxy'].pct_change(20).dropna() * 100
|
| 362 |
+
|
| 363 |
+
# Calculate rolling 60-day correlation
|
| 364 |
+
correlation = gld_ret.rolling(60).corr(dxy_ret).dropna()
|
| 365 |
+
|
| 366 |
+
if len(correlation) < self.zscore_window:
|
| 367 |
+
return None
|
| 368 |
+
|
| 369 |
+
# Z-score of correlation
|
| 370 |
+
# Positive correlation (both up) = crisis = stress
|
| 371 |
+
corr_zscore = self.calculate_zscore(correlation)
|
| 372 |
+
|
| 373 |
+
stress_level = self.normalize_to_level(corr_zscore)
|
| 374 |
+
|
| 375 |
+
current_corr = float(correlation.iloc[-1])
|
| 376 |
+
current_gld_ret = float(gld_ret.iloc[-1])
|
| 377 |
+
current_dxy_ret = float(dxy_ret.iloc[-1])
|
| 378 |
+
|
| 379 |
+
return IndicatorSignal(
|
| 380 |
+
indicator_name=self.name,
|
| 381 |
+
category=self.category,
|
| 382 |
+
value=current_corr,
|
| 383 |
+
zscore=corr_zscore,
|
| 384 |
+
stress_level=stress_level,
|
| 385 |
+
description=(
|
| 386 |
+
f"GLD-DXY corr: {current_corr:+.2f} "
|
| 387 |
+
f"(z={corr_zscore:+.1f}), "
|
| 388 |
+
f"GLD: {current_gld_ret:+.1f}%, DXY: {current_dxy_ret:+.1f}%"
|
| 389 |
+
),
|
| 390 |
+
interpretation=self._interpret(stress_level, current_corr, current_gld_ret, current_dxy_ret)
|
| 391 |
+
)
|
| 392 |
+
|
| 393 |
+
except Exception as e:
|
| 394 |
+
logger.error(f"{self.name} calculation failed: {e}")
|
| 395 |
+
return None
|
| 396 |
+
|
| 397 |
+
def _interpret(
|
| 398 |
+
self,
|
| 399 |
+
stress_level: float,
|
| 400 |
+
corr: float,
|
| 401 |
+
gld_ret: float,
|
| 402 |
+
dxy_ret: float
|
| 403 |
+
) -> str:
|
| 404 |
+
"""Generate interpretation."""
|
| 405 |
+
if stress_level > 70 and corr > 0.3:
|
| 406 |
+
if gld_ret > 0 and dxy_ret > 0:
|
| 407 |
+
return "🔴 CRISIS: Gold AND dollar rising (extreme safe haven demand)"
|
| 408 |
+
else:
|
| 409 |
+
return "⚠️ UNUSUAL: Abnormal gold-dollar correlation"
|
| 410 |
+
elif stress_level > 60:
|
| 411 |
+
return "⚠️ STRESS: Gold-dollar correlation rising"
|
| 412 |
+
elif stress_level < 40:
|
| 413 |
+
return "✅ NORMAL: Healthy gold-dollar dynamics"
|
| 414 |
+
else:
|
| 415 |
+
return "⚡ WATCH: Gold-dollar correlation shifting"
|
src/core/macroeconomics/markets/risk_engine/indicators/liquidity.py
ADDED
|
@@ -0,0 +1,429 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Liquidity indicators for market risk detection.
|
| 3 |
+
|
| 4 |
+
Monitors market liquidity, trading costs, and depth.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from typing import Dict, Optional
|
| 8 |
+
import pandas as pd
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
from src.core.macroeconomics.markets.risk_engine.models import (
|
| 12 |
+
IndicatorSignal,
|
| 13 |
+
IndicatorCategory,
|
| 14 |
+
TimeSeriesData
|
| 15 |
+
)
|
| 16 |
+
from src.core.macroeconomics.markets.risk_engine.indicators.base import BaseIndicator
|
| 17 |
+
from src.telegram_bot.logger import main_logger as logger
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class AmihudIlliquidityIndicator(BaseIndicator):
|
| 21 |
+
"""
|
| 22 |
+
Amihud illiquidity measure.
|
| 23 |
+
|
| 24 |
+
Measures price impact per dollar traded.
|
| 25 |
+
Higher values = lower liquidity = harder to trade without moving market.
|
| 26 |
+
|
| 27 |
+
Formula: Illiquidity = |Return| / Volume
|
| 28 |
+
Averaged over rolling window, then z-scored.
|
| 29 |
+
|
| 30 |
+
Spikes during liquidity crises (2008, 2020 March).
|
| 31 |
+
|
| 32 |
+
Required data:
|
| 33 |
+
- SPY: S&P 500 ETF (has volume data)
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
def __init__(self):
|
| 37 |
+
super().__init__(
|
| 38 |
+
name="Amihud Illiquidity",
|
| 39 |
+
category=IndicatorCategory.LIQUIDITY,
|
| 40 |
+
description="Price impact per dollar traded",
|
| 41 |
+
zscore_window=252
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
def compute(self, data: Dict[str, TimeSeriesData]) -> Optional[IndicatorSignal]:
|
| 45 |
+
"""Compute Amihud illiquidity."""
|
| 46 |
+
if not self._validate_data(data, ["SPY"]):
|
| 47 |
+
return None
|
| 48 |
+
|
| 49 |
+
try:
|
| 50 |
+
spy_ts = data["SPY"]
|
| 51 |
+
if spy_ts.volume is None or len(spy_ts.volume) == 0:
|
| 52 |
+
logger.warning(f"{self.name}: No volume data available")
|
| 53 |
+
return None
|
| 54 |
+
|
| 55 |
+
price_data = spy_ts.data
|
| 56 |
+
volume_data = spy_ts.volume
|
| 57 |
+
|
| 58 |
+
# Align price and volume
|
| 59 |
+
combined = pd.DataFrame({
|
| 60 |
+
'price': price_data,
|
| 61 |
+
'volume': volume_data
|
| 62 |
+
}).dropna()
|
| 63 |
+
|
| 64 |
+
if len(combined) < self.zscore_window + 20:
|
| 65 |
+
logger.warning(f"{self.name}: Insufficient data")
|
| 66 |
+
return None
|
| 67 |
+
|
| 68 |
+
# Calculate returns
|
| 69 |
+
returns = combined['price'].pct_change().dropna()
|
| 70 |
+
|
| 71 |
+
# Amihud measure: |return| / volume (in millions)
|
| 72 |
+
# Use volume in millions to get reasonable scale
|
| 73 |
+
amihud = (returns.abs() / (combined['volume'].iloc[1:] / 1e6))
|
| 74 |
+
amihud = amihud.replace([np.inf, -np.inf], np.nan).dropna()
|
| 75 |
+
|
| 76 |
+
# Calculate 20-day average illiquidity
|
| 77 |
+
amihud_ma = amihud.rolling(20).mean().dropna()
|
| 78 |
+
|
| 79 |
+
if len(amihud_ma) < self.zscore_window:
|
| 80 |
+
logger.warning(f"{self.name}: Insufficient data for z-score")
|
| 81 |
+
return None
|
| 82 |
+
|
| 83 |
+
# Z-score of illiquidity
|
| 84 |
+
illiq_zscore = self.calculate_zscore(amihud_ma)
|
| 85 |
+
|
| 86 |
+
# Calculate acceleration
|
| 87 |
+
illiq_accel = self.calculate_acceleration(amihud_ma, periods=5)
|
| 88 |
+
|
| 89 |
+
stress_level = self.normalize_to_level(illiq_zscore)
|
| 90 |
+
|
| 91 |
+
current_illiq = float(amihud_ma.iloc[-1])
|
| 92 |
+
|
| 93 |
+
return IndicatorSignal(
|
| 94 |
+
indicator_name=self.name,
|
| 95 |
+
category=self.category,
|
| 96 |
+
value=current_illiq * 1e6, # Scale for display
|
| 97 |
+
zscore=illiq_zscore,
|
| 98 |
+
stress_level=stress_level,
|
| 99 |
+
description=(
|
| 100 |
+
f"Illiquidity: {current_illiq*1e6:.2f} "
|
| 101 |
+
f"(z={illiq_zscore:+.1f}), "
|
| 102 |
+
f"acceleration: {illiq_accel*1e6:+.2f}"
|
| 103 |
+
),
|
| 104 |
+
interpretation=self._interpret(stress_level, illiq_zscore, illiq_accel)
|
| 105 |
+
)
|
| 106 |
+
|
| 107 |
+
except Exception as e:
|
| 108 |
+
logger.error(f"{self.name} calculation failed: {e}")
|
| 109 |
+
return None
|
| 110 |
+
|
| 111 |
+
def _interpret(
|
| 112 |
+
self,
|
| 113 |
+
stress_level: float,
|
| 114 |
+
zscore: float,
|
| 115 |
+
acceleration: float
|
| 116 |
+
) -> str:
|
| 117 |
+
"""Generate interpretation."""
|
| 118 |
+
if stress_level > 75:
|
| 119 |
+
if acceleration > 0:
|
| 120 |
+
return "🔴 SEVERE: Liquidity evaporating (illiquidity spiking)"
|
| 121 |
+
else:
|
| 122 |
+
return "🔴 STRESSED: Low liquidity (but stabilizing)"
|
| 123 |
+
elif stress_level > 60:
|
| 124 |
+
return "⚠️ WARNING: Liquidity deteriorating"
|
| 125 |
+
elif stress_level < 40:
|
| 126 |
+
return "✅ DEEP: High market liquidity"
|
| 127 |
+
else:
|
| 128 |
+
return "⚡ WATCH: Liquidity moderately thin"
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
class BidAskSpreadIndicator(BaseIndicator):
|
| 132 |
+
"""
|
| 133 |
+
Bid-ask spread proxy using high-low range.
|
| 134 |
+
|
| 135 |
+
True bid-ask spreads not available in free data.
|
| 136 |
+
Use daily high-low range as proxy for trading costs.
|
| 137 |
+
|
| 138 |
+
Wider ranges = higher transaction costs = liquidity stress.
|
| 139 |
+
|
| 140 |
+
Signal construction:
|
| 141 |
+
- Calculate (High - Low) / Close ratio
|
| 142 |
+
- 20-day moving average
|
| 143 |
+
- Z-score vs historical
|
| 144 |
+
|
| 145 |
+
Required data:
|
| 146 |
+
- SPY: S&P 500 ETF
|
| 147 |
+
"""
|
| 148 |
+
|
| 149 |
+
def __init__(self):
|
| 150 |
+
super().__init__(
|
| 151 |
+
name="Bid-Ask Spread Proxy",
|
| 152 |
+
category=IndicatorCategory.LIQUIDITY,
|
| 153 |
+
description="High-low range as liquidity proxy",
|
| 154 |
+
zscore_window=252
|
| 155 |
+
)
|
| 156 |
+
|
| 157 |
+
def compute(self, data: Dict[str, TimeSeriesData]) -> Optional[IndicatorSignal]:
|
| 158 |
+
"""Compute bid-ask spread proxy."""
|
| 159 |
+
if not self._validate_data(data, ["SPY"]):
|
| 160 |
+
return None
|
| 161 |
+
|
| 162 |
+
try:
|
| 163 |
+
spy_ts = data["SPY"]
|
| 164 |
+
|
| 165 |
+
# Check if we have OHLC data
|
| 166 |
+
if (spy_ts.high is None or spy_ts.low is None or
|
| 167 |
+
len(spy_ts.high) == 0 or len(spy_ts.low) == 0):
|
| 168 |
+
logger.warning(f"{self.name}: No OHLC data available")
|
| 169 |
+
return None
|
| 170 |
+
|
| 171 |
+
# Combine all OHLC data
|
| 172 |
+
combined = pd.DataFrame({
|
| 173 |
+
'high': spy_ts.high,
|
| 174 |
+
'low': spy_ts.low,
|
| 175 |
+
'close': spy_ts.data
|
| 176 |
+
}).dropna()
|
| 177 |
+
|
| 178 |
+
if len(combined) < self.zscore_window + 20:
|
| 179 |
+
logger.warning(f"{self.name}: Insufficient data")
|
| 180 |
+
return None
|
| 181 |
+
|
| 182 |
+
# Calculate high-low spread as % of close
|
| 183 |
+
hl_spread = ((combined['high'] - combined['low']) / combined['close'] * 100)
|
| 184 |
+
|
| 185 |
+
# 20-day moving average to smooth noise
|
| 186 |
+
hl_spread_ma = hl_spread.rolling(20).mean().dropna()
|
| 187 |
+
|
| 188 |
+
if len(hl_spread_ma) < self.zscore_window:
|
| 189 |
+
logger.warning(f"{self.name}: Insufficient data for z-score")
|
| 190 |
+
return None
|
| 191 |
+
|
| 192 |
+
# Z-score
|
| 193 |
+
spread_zscore = self.calculate_zscore(hl_spread_ma)
|
| 194 |
+
stress_level = self.normalize_to_level(spread_zscore)
|
| 195 |
+
|
| 196 |
+
current_spread = float(hl_spread_ma.iloc[-1])
|
| 197 |
+
|
| 198 |
+
return IndicatorSignal(
|
| 199 |
+
indicator_name=self.name,
|
| 200 |
+
category=self.category,
|
| 201 |
+
value=current_spread,
|
| 202 |
+
zscore=spread_zscore,
|
| 203 |
+
stress_level=stress_level,
|
| 204 |
+
description=(
|
| 205 |
+
f"H-L spread: {current_spread:.2f}% "
|
| 206 |
+
f"(z={spread_zscore:+.1f})"
|
| 207 |
+
),
|
| 208 |
+
interpretation=self._interpret(stress_level, spread_zscore)
|
| 209 |
+
)
|
| 210 |
+
|
| 211 |
+
except Exception as e:
|
| 212 |
+
logger.error(f"{self.name} calculation failed: {e}")
|
| 213 |
+
return None
|
| 214 |
+
|
| 215 |
+
def _interpret(self, stress_level: float, zscore: float) -> str:
|
| 216 |
+
"""Generate interpretation."""
|
| 217 |
+
if stress_level > 75:
|
| 218 |
+
return "🔴 SEVERE: Very wide spreads (high trading costs)"
|
| 219 |
+
elif stress_level > 60:
|
| 220 |
+
return "⚠️ WARNING: Spreads widening"
|
| 221 |
+
elif stress_level < 40:
|
| 222 |
+
return "✅ TIGHT: Low trading costs"
|
| 223 |
+
else:
|
| 224 |
+
return "⚡ WATCH: Spreads moderately wide"
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
class TreasuryLiquidityIndicator(BaseIndicator):
|
| 228 |
+
"""
|
| 229 |
+
Treasury market liquidity stress.
|
| 230 |
+
|
| 231 |
+
Monitor Treasury ETF (TLT, IEF, SHY) price action.
|
| 232 |
+
Unusual Treasury volatility or selling suggests liquidity issues.
|
| 233 |
+
|
| 234 |
+
During extreme stress (Mar 2020), even Treasuries become illiquid.
|
| 235 |
+
|
| 236 |
+
Signal construction:
|
| 237 |
+
- Calculate Treasury ETF realized volatility
|
| 238 |
+
- Z-score vs historical
|
| 239 |
+
- High vol in "safe" Treasuries = liquidity crisis
|
| 240 |
+
|
| 241 |
+
Required data:
|
| 242 |
+
- TLT: 20+ Year Treasury ETF
|
| 243 |
+
- IEF: 7-10 Year Treasury ETF
|
| 244 |
+
"""
|
| 245 |
+
|
| 246 |
+
def __init__(self):
|
| 247 |
+
super().__init__(
|
| 248 |
+
name="Treasury Liquidity",
|
| 249 |
+
category=IndicatorCategory.LIQUIDITY,
|
| 250 |
+
description="Treasury market liquidity stress",
|
| 251 |
+
zscore_window=252
|
| 252 |
+
)
|
| 253 |
+
|
| 254 |
+
def compute(self, data: Dict[str, TimeSeriesData]) -> Optional[IndicatorSignal]:
|
| 255 |
+
"""Compute Treasury liquidity stress."""
|
| 256 |
+
# Try to use both TLT and IEF if available
|
| 257 |
+
available = []
|
| 258 |
+
if "TLT" in data and len(data["TLT"].data) > 0:
|
| 259 |
+
available.append("TLT")
|
| 260 |
+
if "IEF" in data and len(data["IEF"].data) > 0:
|
| 261 |
+
available.append("IEF")
|
| 262 |
+
|
| 263 |
+
if len(available) == 0:
|
| 264 |
+
logger.warning(f"{self.name}: No Treasury ETF data available")
|
| 265 |
+
return None
|
| 266 |
+
|
| 267 |
+
try:
|
| 268 |
+
# Combine available Treasury ETFs
|
| 269 |
+
tsy_prices = []
|
| 270 |
+
for ticker in available:
|
| 271 |
+
tsy_prices.append(data[ticker].data)
|
| 272 |
+
|
| 273 |
+
# Average price if multiple ETFs
|
| 274 |
+
if len(tsy_prices) == 1:
|
| 275 |
+
tsy_price = tsy_prices[0]
|
| 276 |
+
else:
|
| 277 |
+
combined = pd.DataFrame({f'tsy{i}': p for i, p in enumerate(tsy_prices)})
|
| 278 |
+
combined = combined.dropna()
|
| 279 |
+
tsy_price = combined.mean(axis=1)
|
| 280 |
+
|
| 281 |
+
if len(tsy_price) < self.zscore_window + 20:
|
| 282 |
+
logger.warning(f"{self.name}: Insufficient data")
|
| 283 |
+
return None
|
| 284 |
+
|
| 285 |
+
# Calculate 20-day realized volatility
|
| 286 |
+
returns = np.log(tsy_price / tsy_price.shift(1)).dropna()
|
| 287 |
+
realized_vol = returns.rolling(20).std() * np.sqrt(252) * 100
|
| 288 |
+
realized_vol = realized_vol.dropna()
|
| 289 |
+
|
| 290 |
+
if len(realized_vol) < self.zscore_window:
|
| 291 |
+
return None
|
| 292 |
+
|
| 293 |
+
# Z-score of Treasury volatility
|
| 294 |
+
vol_zscore = self.calculate_zscore(realized_vol)
|
| 295 |
+
|
| 296 |
+
# High Treasury vol = liquidity stress
|
| 297 |
+
stress_level = self.normalize_to_level(vol_zscore)
|
| 298 |
+
|
| 299 |
+
current_vol = float(realized_vol.iloc[-1])
|
| 300 |
+
|
| 301 |
+
return IndicatorSignal(
|
| 302 |
+
indicator_name=self.name,
|
| 303 |
+
category=self.category,
|
| 304 |
+
value=current_vol,
|
| 305 |
+
zscore=vol_zscore,
|
| 306 |
+
stress_level=stress_level,
|
| 307 |
+
description=(
|
| 308 |
+
f"Treasury vol: {current_vol:.1f}% "
|
| 309 |
+
f"(z={vol_zscore:+.1f}) "
|
| 310 |
+
f"[{', '.join(available)}]"
|
| 311 |
+
),
|
| 312 |
+
interpretation=self._interpret(stress_level, vol_zscore)
|
| 313 |
+
)
|
| 314 |
+
|
| 315 |
+
except Exception as e:
|
| 316 |
+
logger.error(f"{self.name} calculation failed: {e}")
|
| 317 |
+
return None
|
| 318 |
+
|
| 319 |
+
def _interpret(self, stress_level: float, zscore: float) -> str:
|
| 320 |
+
"""Generate interpretation."""
|
| 321 |
+
if stress_level > 75:
|
| 322 |
+
return "🔴 CRISIS: Treasury market dysfunction (liquidity crisis)"
|
| 323 |
+
elif stress_level > 60:
|
| 324 |
+
return "⚠️ WARNING: Elevated Treasury volatility"
|
| 325 |
+
elif stress_level < 40:
|
| 326 |
+
return "✅ STABLE: Normal Treasury market function"
|
| 327 |
+
else:
|
| 328 |
+
return "⚡ WATCH: Treasury volatility increasing"
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
class MarketDepthIndicator(BaseIndicator):
|
| 332 |
+
"""
|
| 333 |
+
Market depth indicator using volume analysis.
|
| 334 |
+
|
| 335 |
+
During liquidity stress, trading volume can spike (forced selling)
|
| 336 |
+
or collapse (market freeze). Monitor volume extremes.
|
| 337 |
+
|
| 338 |
+
Signal construction:
|
| 339 |
+
- Calculate volume z-score (both high and low extremes)
|
| 340 |
+
- Volume acceleration
|
| 341 |
+
- Extreme volume = liquidity stress
|
| 342 |
+
|
| 343 |
+
Required data:
|
| 344 |
+
- SPY: S&P 500 ETF with volume
|
| 345 |
+
"""
|
| 346 |
+
|
| 347 |
+
def __init__(self):
|
| 348 |
+
super().__init__(
|
| 349 |
+
name="Market Depth",
|
| 350 |
+
category=IndicatorCategory.LIQUIDITY,
|
| 351 |
+
description="Trading volume extremes",
|
| 352 |
+
zscore_window=252
|
| 353 |
+
)
|
| 354 |
+
|
| 355 |
+
def compute(self, data: Dict[str, TimeSeriesData]) -> Optional[IndicatorSignal]:
|
| 356 |
+
"""Compute market depth signal."""
|
| 357 |
+
if not self._validate_data(data, ["SPY"]):
|
| 358 |
+
return None
|
| 359 |
+
|
| 360 |
+
try:
|
| 361 |
+
spy_ts = data["SPY"]
|
| 362 |
+
if spy_ts.volume is None or len(spy_ts.volume) == 0:
|
| 363 |
+
logger.warning(f"{self.name}: No volume data available")
|
| 364 |
+
return None
|
| 365 |
+
|
| 366 |
+
volume_data = spy_ts.volume
|
| 367 |
+
|
| 368 |
+
if len(volume_data) < self.zscore_window + 20:
|
| 369 |
+
logger.warning(f"{self.name}: Insufficient data")
|
| 370 |
+
return None
|
| 371 |
+
|
| 372 |
+
# Calculate 20-day average volume
|
| 373 |
+
volume_ma = volume_data.rolling(20).mean().dropna()
|
| 374 |
+
|
| 375 |
+
if len(volume_ma) < self.zscore_window:
|
| 376 |
+
return None
|
| 377 |
+
|
| 378 |
+
# Z-score of volume
|
| 379 |
+
vol_zscore = self.calculate_zscore(volume_ma)
|
| 380 |
+
|
| 381 |
+
# Take absolute value: both very high and very low volume = stress
|
| 382 |
+
# Very high = forced selling, Very low = market freeze
|
| 383 |
+
vol_zscore_abs = abs(vol_zscore)
|
| 384 |
+
|
| 385 |
+
stress_level = self.normalize_to_level(vol_zscore_abs)
|
| 386 |
+
|
| 387 |
+
current_vol = float(volume_data.iloc[-1])
|
| 388 |
+
avg_vol = float(volume_ma.iloc[-1])
|
| 389 |
+
|
| 390 |
+
return IndicatorSignal(
|
| 391 |
+
indicator_name=self.name,
|
| 392 |
+
category=self.category,
|
| 393 |
+
value=current_vol,
|
| 394 |
+
zscore=vol_zscore,
|
| 395 |
+
stress_level=stress_level,
|
| 396 |
+
description=(
|
| 397 |
+
f"Volume: {current_vol/1e6:.1f}M "
|
| 398 |
+
f"(20d avg: {avg_vol/1e6:.1f}M, "
|
| 399 |
+
f"z={vol_zscore:+.1f})"
|
| 400 |
+
),
|
| 401 |
+
interpretation=self._interpret(stress_level, vol_zscore, current_vol, avg_vol)
|
| 402 |
+
)
|
| 403 |
+
|
| 404 |
+
except Exception as e:
|
| 405 |
+
logger.error(f"{self.name} calculation failed: {e}")
|
| 406 |
+
return None
|
| 407 |
+
|
| 408 |
+
def _interpret(
|
| 409 |
+
self,
|
| 410 |
+
stress_level: float,
|
| 411 |
+
zscore: float,
|
| 412 |
+
current_vol: float,
|
| 413 |
+
avg_vol: float
|
| 414 |
+
) -> str:
|
| 415 |
+
"""Generate interpretation."""
|
| 416 |
+
if stress_level > 75:
|
| 417 |
+
if zscore > 2:
|
| 418 |
+
return "🔴 EXTREME: Panic selling (volume spike)"
|
| 419 |
+
else:
|
| 420 |
+
return "🔴 FROZEN: Market freeze (volume collapse)"
|
| 421 |
+
elif stress_level > 60:
|
| 422 |
+
if zscore > 0:
|
| 423 |
+
return "⚠️ ELEVATED: High volume (potential distribution)"
|
| 424 |
+
else:
|
| 425 |
+
return "⚠️ THIN: Low volume (reduced liquidity)"
|
| 426 |
+
elif stress_level < 40:
|
| 427 |
+
return "✅ NORMAL: Healthy trading volume"
|
| 428 |
+
else:
|
| 429 |
+
return "⚡ WATCH: Volume at moderately unusual levels"
|
src/core/macroeconomics/markets/risk_engine/indicators/macro_rates.py
ADDED
|
@@ -0,0 +1,551 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Macro and rates indicators for market risk detection.
|
| 3 |
+
|
| 4 |
+
Monitors yield curve, financial conditions, repo market, and bond volatility.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from typing import Dict, Optional
|
| 8 |
+
import pandas as pd
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
from src.core.macroeconomics.markets.risk_engine.models import (
|
| 12 |
+
IndicatorSignal,
|
| 13 |
+
IndicatorCategory,
|
| 14 |
+
TimeSeriesData
|
| 15 |
+
)
|
| 16 |
+
from src.core.macroeconomics.markets.risk_engine.indicators.base import BaseIndicator
|
| 17 |
+
from src.telegram_bot.logger import main_logger as logger
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class YieldCurveIndicator(BaseIndicator):
|
| 21 |
+
"""
|
| 22 |
+
Yield curve inversion and re-steepening detector.
|
| 23 |
+
|
| 24 |
+
Monitors 10Y-2Y Treasury spread:
|
| 25 |
+
- Inversion (spread < 0) = recession warning
|
| 26 |
+
- Re-steepening after inversion = recession imminent
|
| 27 |
+
- Extreme steepening = crisis/Fed intervention
|
| 28 |
+
|
| 29 |
+
Signal construction:
|
| 30 |
+
- Calculate 10Y-2Y spread
|
| 31 |
+
- Z-score of spread
|
| 32 |
+
- Detect inversion and re-steepening dynamics
|
| 33 |
+
|
| 34 |
+
Required data:
|
| 35 |
+
- DGS10: 10-Year Treasury Constant Maturity Rate
|
| 36 |
+
- DGS2: 2-Year Treasury Constant Maturity Rate
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
def __init__(self):
|
| 40 |
+
super().__init__(
|
| 41 |
+
name="Yield Curve",
|
| 42 |
+
category=IndicatorCategory.MACRO_RATES,
|
| 43 |
+
description="10Y-2Y spread (inversion & re-steepening)",
|
| 44 |
+
zscore_window=252
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
def compute(self, data: Dict[str, TimeSeriesData]) -> Optional[IndicatorSignal]:
|
| 48 |
+
"""Compute yield curve signal."""
|
| 49 |
+
required = ["DGS10", "DGS2"]
|
| 50 |
+
if not self._validate_data(data, required):
|
| 51 |
+
return None
|
| 52 |
+
|
| 53 |
+
try:
|
| 54 |
+
dgs10 = data["DGS10"].data
|
| 55 |
+
dgs2 = data["DGS2"].data
|
| 56 |
+
|
| 57 |
+
# Align dates
|
| 58 |
+
combined = pd.DataFrame({
|
| 59 |
+
'dgs10': dgs10,
|
| 60 |
+
'dgs2': dgs2
|
| 61 |
+
}).dropna()
|
| 62 |
+
|
| 63 |
+
if len(combined) < self.zscore_window:
|
| 64 |
+
logger.warning(f"{self.name}: Insufficient data")
|
| 65 |
+
return None
|
| 66 |
+
|
| 67 |
+
# Calculate spread (10Y - 2Y)
|
| 68 |
+
spread = combined['dgs10'] - combined['dgs2']
|
| 69 |
+
|
| 70 |
+
# Check for inversion
|
| 71 |
+
is_inverted = spread.iloc[-1] < 0
|
| 72 |
+
|
| 73 |
+
# Calculate spread z-score
|
| 74 |
+
spread_zscore = self.calculate_zscore(spread)
|
| 75 |
+
|
| 76 |
+
# Calculate spread acceleration (rate of change)
|
| 77 |
+
spread_accel = self.calculate_acceleration(spread, periods=20)
|
| 78 |
+
|
| 79 |
+
# Risk logic:
|
| 80 |
+
# 1. Inverted curve = warning
|
| 81 |
+
# 2. Re-steepening after inversion = imminent risk
|
| 82 |
+
# 3. Extreme steepening = crisis mode
|
| 83 |
+
|
| 84 |
+
if is_inverted:
|
| 85 |
+
# Inverted: moderate risk
|
| 86 |
+
base_stress = 65.0
|
| 87 |
+
# If re-steepening (positive acceleration), increase risk
|
| 88 |
+
if spread_accel > 5: # Steepening > 5bps
|
| 89 |
+
stress_level = min(base_stress + 20, 95.0)
|
| 90 |
+
signal_type = "re-steepening"
|
| 91 |
+
else:
|
| 92 |
+
stress_level = base_stress
|
| 93 |
+
signal_type = "inverted"
|
| 94 |
+
else:
|
| 95 |
+
# Not inverted: use z-score
|
| 96 |
+
# Negative z-score (flattening) = rising risk
|
| 97 |
+
# Positive z-score (steepening) = falling risk unless extreme
|
| 98 |
+
if spread_zscore < -1.5:
|
| 99 |
+
# Flattening toward inversion
|
| 100 |
+
stress_level = self.normalize_to_level(-spread_zscore)
|
| 101 |
+
signal_type = "flattening"
|
| 102 |
+
elif spread_zscore > 2.0:
|
| 103 |
+
# Extreme steepening (crisis intervention)
|
| 104 |
+
stress_level = self.normalize_to_level(spread_zscore * 0.5)
|
| 105 |
+
signal_type = "extreme_steep"
|
| 106 |
+
else:
|
| 107 |
+
# Normal regime
|
| 108 |
+
stress_level = 50.0 - (spread_zscore * 5) # Flatter = higher stress
|
| 109 |
+
stress_level = np.clip(stress_level, 0, 100)
|
| 110 |
+
signal_type = "normal"
|
| 111 |
+
|
| 112 |
+
current_spread = float(spread.iloc[-1])
|
| 113 |
+
|
| 114 |
+
return IndicatorSignal(
|
| 115 |
+
indicator_name=self.name,
|
| 116 |
+
category=self.category,
|
| 117 |
+
value=current_spread,
|
| 118 |
+
zscore=spread_zscore,
|
| 119 |
+
stress_level=stress_level,
|
| 120 |
+
description=(
|
| 121 |
+
f"10Y-2Y: {current_spread:+.0f}bps "
|
| 122 |
+
f"(z={spread_zscore:+.1f}), "
|
| 123 |
+
f"accel: {spread_accel:+.1f}bps [{signal_type}]"
|
| 124 |
+
),
|
| 125 |
+
interpretation=self._interpret(stress_level, signal_type, current_spread, spread_accel)
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
except Exception as e:
|
| 129 |
+
logger.error(f"{self.name} calculation failed: {e}")
|
| 130 |
+
return None
|
| 131 |
+
|
| 132 |
+
def _interpret(
|
| 133 |
+
self,
|
| 134 |
+
stress_level: float,
|
| 135 |
+
signal_type: str,
|
| 136 |
+
spread: float,
|
| 137 |
+
accel: float
|
| 138 |
+
) -> str:
|
| 139 |
+
"""Generate interpretation."""
|
| 140 |
+
if signal_type == "re-steepening":
|
| 141 |
+
return f"🔴 CRITICAL: Curve re-steepening after inversion (recession signal)"
|
| 142 |
+
elif signal_type == "inverted":
|
| 143 |
+
return f"⚠️ INVERTED: Yield curve inverted ({spread:+.0f}bps)"
|
| 144 |
+
elif signal_type == "extreme_steep":
|
| 145 |
+
return f"🔴 CRISIS: Extreme steepening (Fed intervention signal)"
|
| 146 |
+
elif signal_type == "flattening":
|
| 147 |
+
return f"⚠️ FLATTENING: Curve flattening toward inversion"
|
| 148 |
+
else:
|
| 149 |
+
if stress_level < 40:
|
| 150 |
+
return "✅ NORMAL: Healthy yield curve"
|
| 151 |
+
else:
|
| 152 |
+
return "⚡ WATCH: Yield curve moderately flat"
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
class NFCIIndicator(BaseIndicator):
|
| 156 |
+
"""
|
| 157 |
+
National Financial Conditions Index.
|
| 158 |
+
|
| 159 |
+
Chicago Fed's comprehensive financial stress measure.
|
| 160 |
+
Incorporates credit, leverage, and risk indicators.
|
| 161 |
+
|
| 162 |
+
NFCI = 0: Average conditions
|
| 163 |
+
NFCI > 0: Tighter than average (stress)
|
| 164 |
+
NFCI < 0: Looser than average (easy conditions)
|
| 165 |
+
|
| 166 |
+
Signal construction:
|
| 167 |
+
- Use NFCI value directly (already standardized)
|
| 168 |
+
- Calculate acceleration
|
| 169 |
+
- Positive NFCI = stress
|
| 170 |
+
|
| 171 |
+
Required data:
|
| 172 |
+
- NFCI: Chicago Fed National Financial Conditions Index
|
| 173 |
+
"""
|
| 174 |
+
|
| 175 |
+
def __init__(self):
|
| 176 |
+
super().__init__(
|
| 177 |
+
name="NFCI",
|
| 178 |
+
category=IndicatorCategory.MACRO_RATES,
|
| 179 |
+
description="National Financial Conditions Index",
|
| 180 |
+
zscore_window=252
|
| 181 |
+
)
|
| 182 |
+
|
| 183 |
+
def compute(self, data: Dict[str, TimeSeriesData]) -> Optional[IndicatorSignal]:
|
| 184 |
+
"""Compute NFCI signal."""
|
| 185 |
+
if not self._validate_data(data, ["NFCI"]):
|
| 186 |
+
return None
|
| 187 |
+
|
| 188 |
+
try:
|
| 189 |
+
nfci_data = data["NFCI"].data
|
| 190 |
+
|
| 191 |
+
if len(nfci_data) < self.zscore_window:
|
| 192 |
+
logger.warning(f"{self.name}: Insufficient data")
|
| 193 |
+
return None
|
| 194 |
+
|
| 195 |
+
# NFCI is already standardized (mean=0, std=1)
|
| 196 |
+
# So we can use it directly as a z-score
|
| 197 |
+
current_nfci = float(nfci_data.iloc[-1])
|
| 198 |
+
|
| 199 |
+
# Calculate acceleration
|
| 200 |
+
nfci_accel = self.calculate_acceleration(nfci_data, periods=4) # Weekly data
|
| 201 |
+
|
| 202 |
+
# Convert NFCI to stress level
|
| 203 |
+
# NFCI ranges roughly -1.5 to +3 in normal times
|
| 204 |
+
# Map to 0-100 scale
|
| 205 |
+
stress_level = self.normalize_to_level(current_nfci, clip=2.0)
|
| 206 |
+
|
| 207 |
+
return IndicatorSignal(
|
| 208 |
+
indicator_name=self.name,
|
| 209 |
+
category=self.category,
|
| 210 |
+
value=current_nfci,
|
| 211 |
+
zscore=current_nfci, # NFCI is already z-scored
|
| 212 |
+
stress_level=stress_level,
|
| 213 |
+
description=(
|
| 214 |
+
f"NFCI: {current_nfci:+.2f} "
|
| 215 |
+
f"(accel: {nfci_accel:+.2f})"
|
| 216 |
+
),
|
| 217 |
+
interpretation=self._interpret(stress_level, current_nfci, nfci_accel)
|
| 218 |
+
)
|
| 219 |
+
|
| 220 |
+
except Exception as e:
|
| 221 |
+
logger.error(f"{self.name} calculation failed: {e}")
|
| 222 |
+
return None
|
| 223 |
+
|
| 224 |
+
def _interpret(
|
| 225 |
+
self,
|
| 226 |
+
stress_level: float,
|
| 227 |
+
nfci: float,
|
| 228 |
+
acceleration: float
|
| 229 |
+
) -> str:
|
| 230 |
+
"""Generate interpretation."""
|
| 231 |
+
if stress_level > 75:
|
| 232 |
+
if acceleration > 0.1:
|
| 233 |
+
return "🔴 SEVERE: Financial conditions tightening rapidly"
|
| 234 |
+
else:
|
| 235 |
+
return "🔴 TIGHT: Severe financial stress (stabilizing)"
|
| 236 |
+
elif stress_level > 60:
|
| 237 |
+
return "⚠️ ELEVATED: Financial conditions tightening"
|
| 238 |
+
elif stress_level < 40:
|
| 239 |
+
if stress_level < 25:
|
| 240 |
+
return "⚠️ LOOSE: Extremely easy conditions (bubble risk)"
|
| 241 |
+
else:
|
| 242 |
+
return "✅ EASY: Accommodative financial conditions"
|
| 243 |
+
else:
|
| 244 |
+
return "⚡ WATCH: Financial conditions moderately tight"
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
class RepoStressIndicator(BaseIndicator):
|
| 248 |
+
"""
|
| 249 |
+
Repo market stress indicator.
|
| 250 |
+
|
| 251 |
+
Monitors spread between SOFR and EFFR (Fed Funds).
|
| 252 |
+
Wide spread = funding stress in repo market.
|
| 253 |
+
|
| 254 |
+
September 2019: Repo crisis with SOFR spike.
|
| 255 |
+
March 2020: Severe repo dysfunction.
|
| 256 |
+
|
| 257 |
+
Signal construction:
|
| 258 |
+
- Calculate SOFR - EFFR spread
|
| 259 |
+
- Z-score of spread
|
| 260 |
+
- Positive spread = repo stress
|
| 261 |
+
|
| 262 |
+
Required data:
|
| 263 |
+
- SOFR: Secured Overnight Financing Rate
|
| 264 |
+
- DFF: Federal Funds Effective Rate (EFFR)
|
| 265 |
+
"""
|
| 266 |
+
|
| 267 |
+
def __init__(self):
|
| 268 |
+
super().__init__(
|
| 269 |
+
name="Repo Stress",
|
| 270 |
+
category=IndicatorCategory.MACRO_RATES,
|
| 271 |
+
description="SOFR-EFFR spread (repo market stress)",
|
| 272 |
+
zscore_window=252
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
def compute(self, data: Dict[str, TimeSeriesData]) -> Optional[IndicatorSignal]:
|
| 276 |
+
"""Compute repo stress signal."""
|
| 277 |
+
required = ["SOFR", "DFF"]
|
| 278 |
+
if not self._validate_data(data, required):
|
| 279 |
+
return None
|
| 280 |
+
|
| 281 |
+
try:
|
| 282 |
+
sofr = data["SOFR"].data
|
| 283 |
+
effr = data["DFF"].data
|
| 284 |
+
|
| 285 |
+
# Align dates
|
| 286 |
+
combined = pd.DataFrame({
|
| 287 |
+
'sofr': sofr,
|
| 288 |
+
'effr': effr
|
| 289 |
+
}).dropna()
|
| 290 |
+
|
| 291 |
+
if len(combined) < self.zscore_window:
|
| 292 |
+
logger.warning(f"{self.name}: Insufficient data")
|
| 293 |
+
return None
|
| 294 |
+
|
| 295 |
+
# Calculate spread (SOFR - EFFR)
|
| 296 |
+
# Positive spread = repo more expensive than Fed Funds = stress
|
| 297 |
+
spread = combined['sofr'] - combined['effr']
|
| 298 |
+
|
| 299 |
+
# Z-score of spread
|
| 300 |
+
spread_zscore = self.calculate_zscore(spread)
|
| 301 |
+
|
| 302 |
+
# Calculate spread acceleration
|
| 303 |
+
spread_accel = self.calculate_acceleration(spread, periods=5)
|
| 304 |
+
|
| 305 |
+
stress_level = self.normalize_to_level(spread_zscore)
|
| 306 |
+
|
| 307 |
+
current_spread = float(spread.iloc[-1])
|
| 308 |
+
|
| 309 |
+
return IndicatorSignal(
|
| 310 |
+
indicator_name=self.name,
|
| 311 |
+
category=self.category,
|
| 312 |
+
value=current_spread,
|
| 313 |
+
zscore=spread_zscore,
|
| 314 |
+
stress_level=stress_level,
|
| 315 |
+
description=(
|
| 316 |
+
f"SOFR-EFFR: {current_spread:+.1f}bps "
|
| 317 |
+
f"(z={spread_zscore:+.1f}), "
|
| 318 |
+
f"accel: {spread_accel:+.2f}bps"
|
| 319 |
+
),
|
| 320 |
+
interpretation=self._interpret(stress_level, spread_zscore, current_spread)
|
| 321 |
+
)
|
| 322 |
+
|
| 323 |
+
except Exception as e:
|
| 324 |
+
logger.error(f"{self.name} calculation failed: {e}")
|
| 325 |
+
return None
|
| 326 |
+
|
| 327 |
+
def _interpret(
|
| 328 |
+
self,
|
| 329 |
+
stress_level: float,
|
| 330 |
+
zscore: float,
|
| 331 |
+
spread: float
|
| 332 |
+
) -> str:
|
| 333 |
+
"""Generate interpretation."""
|
| 334 |
+
if stress_level > 75:
|
| 335 |
+
return f"🔴 SEVERE: Repo market dysfunction (spread: {spread:+.1f}bps)"
|
| 336 |
+
elif stress_level > 60:
|
| 337 |
+
return "⚠️ ELEVATED: Repo market stress building"
|
| 338 |
+
elif stress_level < 40:
|
| 339 |
+
return "✅ STABLE: Repo market functioning normally"
|
| 340 |
+
else:
|
| 341 |
+
return "⚡ WATCH: Repo market showing mild stress"
|
| 342 |
+
|
| 343 |
+
|
| 344 |
+
class MOVEIndexIndicator(BaseIndicator):
|
| 345 |
+
"""
|
| 346 |
+
MOVE Index - bond market volatility.
|
| 347 |
+
|
| 348 |
+
Merrill Lynch Option Volatility Estimate.
|
| 349 |
+
VIX equivalent for Treasury market.
|
| 350 |
+
|
| 351 |
+
Rising MOVE = bond market stress/uncertainty.
|
| 352 |
+
Often spikes during macro regime changes.
|
| 353 |
+
|
| 354 |
+
Signal construction:
|
| 355 |
+
- Calculate MOVE z-score
|
| 356 |
+
- Monitor acceleration
|
| 357 |
+
- High MOVE = rates volatility = macro uncertainty
|
| 358 |
+
|
| 359 |
+
Required data:
|
| 360 |
+
- MOVE: Merrill Option Volatility Estimate Index
|
| 361 |
+
"""
|
| 362 |
+
|
| 363 |
+
def __init__(self):
|
| 364 |
+
super().__init__(
|
| 365 |
+
name="MOVE Index",
|
| 366 |
+
category=IndicatorCategory.MACRO_RATES,
|
| 367 |
+
description="Bond market volatility (rates VIX)",
|
| 368 |
+
zscore_window=252
|
| 369 |
+
)
|
| 370 |
+
|
| 371 |
+
def compute(self, data: Dict[str, TimeSeriesData]) -> Optional[IndicatorSignal]:
|
| 372 |
+
"""Compute MOVE index signal."""
|
| 373 |
+
if not self._validate_data(data, ["MOVE"]):
|
| 374 |
+
return None
|
| 375 |
+
|
| 376 |
+
try:
|
| 377 |
+
move_data = data["MOVE"].data
|
| 378 |
+
|
| 379 |
+
if len(move_data) < self.zscore_window:
|
| 380 |
+
logger.warning(f"{self.name}: Insufficient data")
|
| 381 |
+
return None
|
| 382 |
+
|
| 383 |
+
# Calculate z-score
|
| 384 |
+
move_zscore = self.calculate_zscore(move_data)
|
| 385 |
+
|
| 386 |
+
# Calculate acceleration
|
| 387 |
+
move_accel = self.calculate_acceleration(move_data, periods=5)
|
| 388 |
+
|
| 389 |
+
stress_level = self.normalize_to_level(move_zscore)
|
| 390 |
+
|
| 391 |
+
current_move = float(move_data.iloc[-1])
|
| 392 |
+
|
| 393 |
+
return IndicatorSignal(
|
| 394 |
+
indicator_name=self.name,
|
| 395 |
+
category=self.category,
|
| 396 |
+
value=current_move,
|
| 397 |
+
zscore=move_zscore,
|
| 398 |
+
stress_level=stress_level,
|
| 399 |
+
description=(
|
| 400 |
+
f"MOVE: {current_move:.1f} "
|
| 401 |
+
f"(z={move_zscore:+.1f}), "
|
| 402 |
+
f"accel: {move_accel:+.1f}"
|
| 403 |
+
),
|
| 404 |
+
interpretation=self._interpret(stress_level, move_zscore, move_accel)
|
| 405 |
+
)
|
| 406 |
+
|
| 407 |
+
except Exception as e:
|
| 408 |
+
logger.error(f"{self.name} calculation failed: {e}")
|
| 409 |
+
return None
|
| 410 |
+
|
| 411 |
+
def _interpret(
|
| 412 |
+
self,
|
| 413 |
+
stress_level: float,
|
| 414 |
+
zscore: float,
|
| 415 |
+
acceleration: float
|
| 416 |
+
) -> str:
|
| 417 |
+
"""Generate interpretation."""
|
| 418 |
+
if stress_level > 75:
|
| 419 |
+
if acceleration > 0:
|
| 420 |
+
return "🔴 EXTREME: Bond volatility surging (macro regime shift)"
|
| 421 |
+
else:
|
| 422 |
+
return "🔴 ELEVATED: High bond volatility (stabilizing)"
|
| 423 |
+
elif stress_level > 60:
|
| 424 |
+
return "⚠️ RISING: Bond market volatility increasing"
|
| 425 |
+
elif stress_level < 40:
|
| 426 |
+
return "✅ CALM: Low bond market volatility"
|
| 427 |
+
else:
|
| 428 |
+
return "⚡ WATCH: Bond volatility moderately elevated"
|
| 429 |
+
|
| 430 |
+
|
| 431 |
+
class RealRatesIndicator(BaseIndicator):
|
| 432 |
+
"""
|
| 433 |
+
Real interest rates indicator.
|
| 434 |
+
|
| 435 |
+
Monitors 10Y Treasury yield adjusted for inflation expectations.
|
| 436 |
+
Negative real rates = loose policy, asset bubbles.
|
| 437 |
+
Rising real rates = tightening, risk-off.
|
| 438 |
+
|
| 439 |
+
Signal construction:
|
| 440 |
+
- Calculate 10Y - expected inflation (proxy: 5Y avg)
|
| 441 |
+
- Monitor real rate level and change
|
| 442 |
+
- Rising real rates = headwind for risk assets
|
| 443 |
+
|
| 444 |
+
Required data:
|
| 445 |
+
- DGS10: 10-Year Treasury yield
|
| 446 |
+
- T10YIE: 10-Year Breakeven Inflation Rate (if available)
|
| 447 |
+
"""
|
| 448 |
+
|
| 449 |
+
def __init__(self):
|
| 450 |
+
super().__init__(
|
| 451 |
+
name="Real Rates",
|
| 452 |
+
category=IndicatorCategory.MACRO_RATES,
|
| 453 |
+
description="Real interest rates (nominal - inflation)",
|
| 454 |
+
zscore_window=252
|
| 455 |
+
)
|
| 456 |
+
|
| 457 |
+
def compute(self, data: Dict[str, TimeSeriesData]) -> Optional[IndicatorSignal]:
|
| 458 |
+
"""Compute real rates signal."""
|
| 459 |
+
# Try to get 10Y yield
|
| 460 |
+
if not self._validate_data(data, ["DGS10"]):
|
| 461 |
+
return None
|
| 462 |
+
|
| 463 |
+
try:
|
| 464 |
+
dgs10 = data["DGS10"].data
|
| 465 |
+
|
| 466 |
+
# Check if we have breakeven inflation
|
| 467 |
+
if "T10YIE" in data and len(data["T10YIE"].data) > 0:
|
| 468 |
+
inflation = data["T10YIE"].data
|
| 469 |
+
combined = pd.DataFrame({
|
| 470 |
+
'nominal': dgs10,
|
| 471 |
+
'inflation': inflation
|
| 472 |
+
}).dropna()
|
| 473 |
+
|
| 474 |
+
if len(combined) < self.zscore_window:
|
| 475 |
+
return None
|
| 476 |
+
|
| 477 |
+
# Real rate = Nominal - Inflation
|
| 478 |
+
real_rate = combined['nominal'] - combined['inflation']
|
| 479 |
+
else:
|
| 480 |
+
# Fallback: use simple moving average of 10Y as inflation proxy
|
| 481 |
+
# This is crude but gives us something
|
| 482 |
+
logger.warning(f"{self.name}: No breakeven inflation data, using approximation")
|
| 483 |
+
if len(dgs10) < self.zscore_window + 252:
|
| 484 |
+
return None
|
| 485 |
+
|
| 486 |
+
inflation_proxy = dgs10.rolling(252).mean() # 1-year average as "expected"
|
| 487 |
+
real_rate = dgs10 - inflation_proxy
|
| 488 |
+
real_rate = real_rate.dropna()
|
| 489 |
+
|
| 490 |
+
if len(real_rate) < self.zscore_window:
|
| 491 |
+
return None
|
| 492 |
+
|
| 493 |
+
# Calculate z-score of real rate level
|
| 494 |
+
real_rate_zscore = self.calculate_zscore(real_rate)
|
| 495 |
+
|
| 496 |
+
# Calculate acceleration (how fast are real rates rising?)
|
| 497 |
+
real_rate_accel = self.calculate_acceleration(real_rate, periods=20)
|
| 498 |
+
|
| 499 |
+
# Stress logic:
|
| 500 |
+
# - Rapidly rising real rates (tightening) = stress
|
| 501 |
+
# - Very negative real rates (bubble territory) = moderate stress
|
| 502 |
+
if real_rate_accel > 0.2: # Rising
|
| 503 |
+
# Tightening = risk-off stress
|
| 504 |
+
stress_level = self.normalize_to_level(real_rate_accel * 10)
|
| 505 |
+
signal_type = "tightening"
|
| 506 |
+
elif real_rate.iloc[-1] < -1.0: # Very negative
|
| 507 |
+
# Bubble territory
|
| 508 |
+
stress_level = 55.0
|
| 509 |
+
signal_type = "bubble"
|
| 510 |
+
else:
|
| 511 |
+
# Normal: falling real rates = lower stress
|
| 512 |
+
stress_level = self.normalize_to_level(-real_rate_accel * 5)
|
| 513 |
+
signal_type = "normal"
|
| 514 |
+
|
| 515 |
+
current_real = float(real_rate.iloc[-1])
|
| 516 |
+
|
| 517 |
+
return IndicatorSignal(
|
| 518 |
+
indicator_name=self.name,
|
| 519 |
+
category=self.category,
|
| 520 |
+
value=current_real,
|
| 521 |
+
zscore=real_rate_zscore,
|
| 522 |
+
stress_level=stress_level,
|
| 523 |
+
description=(
|
| 524 |
+
f"Real rate: {current_real:+.1f}% "
|
| 525 |
+
f"(z={real_rate_zscore:+.1f}), "
|
| 526 |
+
f"accel: {real_rate_accel:+.2f}% [{signal_type}]"
|
| 527 |
+
),
|
| 528 |
+
interpretation=self._interpret(stress_level, signal_type, current_real, real_rate_accel)
|
| 529 |
+
)
|
| 530 |
+
|
| 531 |
+
except Exception as e:
|
| 532 |
+
logger.error(f"{self.name} calculation failed: {e}")
|
| 533 |
+
return None
|
| 534 |
+
|
| 535 |
+
def _interpret(
|
| 536 |
+
self,
|
| 537 |
+
stress_level: float,
|
| 538 |
+
signal_type: str,
|
| 539 |
+
real_rate: float,
|
| 540 |
+
accel: float
|
| 541 |
+
) -> str:
|
| 542 |
+
"""Generate interpretation."""
|
| 543 |
+
if signal_type == "tightening":
|
| 544 |
+
return f"🔴 TIGHTENING: Real rates rising fast (headwind for risk assets)"
|
| 545 |
+
elif signal_type == "bubble":
|
| 546 |
+
return f"⚠️ BUBBLE: Deeply negative real rates ({real_rate:+.1f}%)"
|
| 547 |
+
else:
|
| 548 |
+
if stress_level < 40:
|
| 549 |
+
return "✅ SUPPORTIVE: Falling/stable real rates"
|
| 550 |
+
else:
|
| 551 |
+
return "⚡ WATCH: Real rates moderately elevated"
|
src/core/macroeconomics/markets/risk_engine/indicators/tail_risk.py
ADDED
|
@@ -0,0 +1,538 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Tail risk indicators for market risk detection.
|
| 3 |
+
|
| 4 |
+
Monitors correlation spikes, regime shifts, and extreme events.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from typing import Dict, Optional
|
| 8 |
+
import pandas as pd
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
from src.core.macroeconomics.markets.risk_engine.models import (
|
| 12 |
+
IndicatorSignal,
|
| 13 |
+
IndicatorCategory,
|
| 14 |
+
TimeSeriesData
|
| 15 |
+
)
|
| 16 |
+
from src.core.macroeconomics.markets.risk_engine.indicators.base import BaseIndicator
|
| 17 |
+
from src.telegram_bot.logger import main_logger as logger
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class CorrelationSpikeIndicator(BaseIndicator):
|
| 21 |
+
"""
|
| 22 |
+
Cross-asset correlation spike detector.
|
| 23 |
+
|
| 24 |
+
During crises, correlations spike to 1 (everything falls together).
|
| 25 |
+
Diversification breaks down = systemic stress.
|
| 26 |
+
|
| 27 |
+
Signal construction:
|
| 28 |
+
- Calculate rolling correlation between SPY, TLT, GLD
|
| 29 |
+
- Monitor average pairwise correlation
|
| 30 |
+
- Spike to 1 or -1 = stress
|
| 31 |
+
|
| 32 |
+
Required data:
|
| 33 |
+
- SPY: Equities
|
| 34 |
+
- TLT: Bonds
|
| 35 |
+
- GLD: Gold
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
def __init__(self):
|
| 39 |
+
super().__init__(
|
| 40 |
+
name="Correlation Spike",
|
| 41 |
+
category=IndicatorCategory.TAIL_RISK,
|
| 42 |
+
description="Cross-asset correlation breakdown",
|
| 43 |
+
zscore_window=252
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
def compute(self, data: Dict[str, TimeSeriesData]) -> Optional[IndicatorSignal]:
|
| 47 |
+
"""Compute correlation spike signal."""
|
| 48 |
+
# Require at least 2 of the 3 assets
|
| 49 |
+
available = []
|
| 50 |
+
for ticker in ["SPY", "TLT", "GLD"]:
|
| 51 |
+
if ticker in data and len(data[ticker].data) > 0:
|
| 52 |
+
available.append(ticker)
|
| 53 |
+
|
| 54 |
+
if len(available) < 2:
|
| 55 |
+
logger.warning(f"{self.name}: Insufficient asset data")
|
| 56 |
+
return None
|
| 57 |
+
|
| 58 |
+
try:
|
| 59 |
+
# Combine available assets
|
| 60 |
+
price_dict = {ticker: data[ticker].data for ticker in available}
|
| 61 |
+
prices = pd.DataFrame(price_dict).dropna()
|
| 62 |
+
|
| 63 |
+
if len(prices) < self.zscore_window + 60:
|
| 64 |
+
logger.warning(f"{self.name}: Insufficient data")
|
| 65 |
+
return None
|
| 66 |
+
|
| 67 |
+
# Calculate returns
|
| 68 |
+
returns = prices.pct_change().dropna()
|
| 69 |
+
|
| 70 |
+
# Calculate rolling 60-day correlation matrix
|
| 71 |
+
# Get average absolute correlation (excluding diagonal)
|
| 72 |
+
window = 60
|
| 73 |
+
correlations = []
|
| 74 |
+
|
| 75 |
+
for i in range(window, len(returns)):
|
| 76 |
+
window_rets = returns.iloc[i-window:i]
|
| 77 |
+
corr_matrix = window_rets.corr()
|
| 78 |
+
|
| 79 |
+
# Get off-diagonal correlations
|
| 80 |
+
n_assets = len(available)
|
| 81 |
+
off_diag = []
|
| 82 |
+
for j in range(n_assets):
|
| 83 |
+
for k in range(j+1, n_assets):
|
| 84 |
+
off_diag.append(abs(corr_matrix.iloc[j, k]))
|
| 85 |
+
|
| 86 |
+
avg_corr = np.mean(off_diag) if off_diag else 0.0
|
| 87 |
+
correlations.append(avg_corr)
|
| 88 |
+
|
| 89 |
+
corr_series = pd.Series(
|
| 90 |
+
correlations,
|
| 91 |
+
index=returns.index[window:]
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
if len(corr_series) < self.zscore_window:
|
| 95 |
+
logger.warning(f"{self.name}: Insufficient correlation data")
|
| 96 |
+
return None
|
| 97 |
+
|
| 98 |
+
# Z-score of average correlation
|
| 99 |
+
# High correlation = stress
|
| 100 |
+
corr_zscore = self.calculate_zscore(corr_series)
|
| 101 |
+
|
| 102 |
+
stress_level = self.normalize_to_level(corr_zscore)
|
| 103 |
+
|
| 104 |
+
current_corr = float(corr_series.iloc[-1])
|
| 105 |
+
|
| 106 |
+
return IndicatorSignal(
|
| 107 |
+
indicator_name=self.name,
|
| 108 |
+
category=self.category,
|
| 109 |
+
value=current_corr,
|
| 110 |
+
zscore=corr_zscore,
|
| 111 |
+
stress_level=stress_level,
|
| 112 |
+
description=(
|
| 113 |
+
f"Avg correlation: {current_corr:.2f} "
|
| 114 |
+
f"(z={corr_zscore:+.1f}) "
|
| 115 |
+
f"[{', '.join(available)}]"
|
| 116 |
+
),
|
| 117 |
+
interpretation=self._interpret(stress_level, current_corr, corr_zscore)
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
except Exception as e:
|
| 121 |
+
logger.error(f"{self.name} calculation failed: {e}")
|
| 122 |
+
return None
|
| 123 |
+
|
| 124 |
+
def _interpret(
|
| 125 |
+
self,
|
| 126 |
+
stress_level: float,
|
| 127 |
+
corr: float,
|
| 128 |
+
zscore: float
|
| 129 |
+
) -> str:
|
| 130 |
+
"""Generate interpretation."""
|
| 131 |
+
if stress_level > 75:
|
| 132 |
+
return f"🔴 BREAKDOWN: Correlations spiking (diversification failing, {corr:.2f})"
|
| 133 |
+
elif stress_level > 60:
|
| 134 |
+
return "⚠️ RISING: Correlations increasing (contagion risk)"
|
| 135 |
+
elif stress_level < 40:
|
| 136 |
+
return "✅ DIVERSIFIED: Normal correlation structure"
|
| 137 |
+
else:
|
| 138 |
+
return "⚡ WATCH: Correlations moderately elevated"
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
class VolatilityRegimeIndicator(BaseIndicator):
|
| 142 |
+
"""
|
| 143 |
+
Volatility regime shift detector.
|
| 144 |
+
|
| 145 |
+
Detect transitions between low-vol and high-vol regimes.
|
| 146 |
+
Regime changes = structural shift = risk.
|
| 147 |
+
|
| 148 |
+
Signal construction:
|
| 149 |
+
- Calculate 20-day realized volatility
|
| 150 |
+
- Compare to 252-day average
|
| 151 |
+
- Rapid regime shift = stress
|
| 152 |
+
|
| 153 |
+
Required data:
|
| 154 |
+
- SPY: S&P 500 for volatility calculation
|
| 155 |
+
"""
|
| 156 |
+
|
| 157 |
+
def __init__(self):
|
| 158 |
+
super().__init__(
|
| 159 |
+
name="Volatility Regime",
|
| 160 |
+
category=IndicatorCategory.TAIL_RISK,
|
| 161 |
+
description="Volatility regime shift detection",
|
| 162 |
+
zscore_window=252
|
| 163 |
+
)
|
| 164 |
+
|
| 165 |
+
def compute(self, data: Dict[str, TimeSeriesData]) -> Optional[IndicatorSignal]:
|
| 166 |
+
"""Compute volatility regime signal."""
|
| 167 |
+
if not self._validate_data(data, ["SPY"]):
|
| 168 |
+
return None
|
| 169 |
+
|
| 170 |
+
try:
|
| 171 |
+
spy = data["SPY"].data
|
| 172 |
+
|
| 173 |
+
if len(spy) < self.zscore_window + 20:
|
| 174 |
+
logger.warning(f"{self.name}: Insufficient data")
|
| 175 |
+
return None
|
| 176 |
+
|
| 177 |
+
# Calculate returns
|
| 178 |
+
returns = np.log(spy / spy.shift(1)).dropna()
|
| 179 |
+
|
| 180 |
+
# Calculate short-term (20-day) and long-term (252-day) volatility
|
| 181 |
+
short_vol = returns.rolling(20).std() * np.sqrt(252) * 100
|
| 182 |
+
long_vol = returns.rolling(252).std() * np.sqrt(252) * 100
|
| 183 |
+
|
| 184 |
+
# Align
|
| 185 |
+
vol_data = pd.DataFrame({
|
| 186 |
+
'short': short_vol,
|
| 187 |
+
'long': long_vol
|
| 188 |
+
}).dropna()
|
| 189 |
+
|
| 190 |
+
if len(vol_data) < self.zscore_window:
|
| 191 |
+
return None
|
| 192 |
+
|
| 193 |
+
# Calculate volatility ratio (short / long)
|
| 194 |
+
# Ratio > 1 = elevated regime
|
| 195 |
+
# Ratio >> 1 = regime shift in progress
|
| 196 |
+
vol_ratio = vol_data['short'] / vol_data['long']
|
| 197 |
+
|
| 198 |
+
# Z-score of ratio
|
| 199 |
+
ratio_zscore = self.calculate_zscore(vol_ratio)
|
| 200 |
+
|
| 201 |
+
# Also monitor ratio acceleration (regime shift speed)
|
| 202 |
+
ratio_accel = self.calculate_acceleration(vol_ratio, periods=5)
|
| 203 |
+
|
| 204 |
+
stress_level = self.normalize_to_level(ratio_zscore)
|
| 205 |
+
|
| 206 |
+
current_ratio = float(vol_ratio.iloc[-1])
|
| 207 |
+
current_short = float(vol_data['short'].iloc[-1])
|
| 208 |
+
current_long = float(vol_data['long'].iloc[-1])
|
| 209 |
+
|
| 210 |
+
return IndicatorSignal(
|
| 211 |
+
indicator_name=self.name,
|
| 212 |
+
category=self.category,
|
| 213 |
+
value=current_ratio,
|
| 214 |
+
zscore=ratio_zscore,
|
| 215 |
+
stress_level=stress_level,
|
| 216 |
+
description=(
|
| 217 |
+
f"Vol ratio: {current_ratio:.2f} "
|
| 218 |
+
f"(20d: {current_short:.1f}%, 252d: {current_long:.1f}%), "
|
| 219 |
+
f"z={ratio_zscore:+.1f}"
|
| 220 |
+
),
|
| 221 |
+
interpretation=self._interpret(stress_level, current_ratio, ratio_zscore, ratio_accel)
|
| 222 |
+
)
|
| 223 |
+
|
| 224 |
+
except Exception as e:
|
| 225 |
+
logger.error(f"{self.name} calculation failed: {e}")
|
| 226 |
+
return None
|
| 227 |
+
|
| 228 |
+
def _interpret(
|
| 229 |
+
self,
|
| 230 |
+
stress_level: float,
|
| 231 |
+
ratio: float,
|
| 232 |
+
zscore: float,
|
| 233 |
+
accel: float
|
| 234 |
+
) -> str:
|
| 235 |
+
"""Generate interpretation."""
|
| 236 |
+
if stress_level > 75:
|
| 237 |
+
if accel > 0.05:
|
| 238 |
+
return f"🔴 SHIFT: Regime shifting to high-vol (ratio: {ratio:.2f})"
|
| 239 |
+
else:
|
| 240 |
+
return f"🔴 HIGH-VOL: High volatility regime (ratio: {ratio:.2f})"
|
| 241 |
+
elif stress_level > 60:
|
| 242 |
+
return "⚠️ ELEVATED: Volatility regime changing"
|
| 243 |
+
elif stress_level < 40:
|
| 244 |
+
return "✅ STABLE: Low volatility regime"
|
| 245 |
+
else:
|
| 246 |
+
return "⚡ WATCH: Volatility moderately elevated"
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
class DrawdownIndicator(BaseIndicator):
|
| 250 |
+
"""
|
| 251 |
+
Maximum drawdown monitor.
|
| 252 |
+
|
| 253 |
+
Track current drawdown from recent peak.
|
| 254 |
+
Large drawdowns = tail risk materialized.
|
| 255 |
+
|
| 256 |
+
Signal construction:
|
| 257 |
+
- Calculate running maximum
|
| 258 |
+
- Current drawdown from peak
|
| 259 |
+
- Drawdown magnitude and duration
|
| 260 |
+
|
| 261 |
+
Required data:
|
| 262 |
+
- SPY: S&P 500
|
| 263 |
+
"""
|
| 264 |
+
|
| 265 |
+
def __init__(self):
|
| 266 |
+
super().__init__(
|
| 267 |
+
name="Market Drawdown",
|
| 268 |
+
category=IndicatorCategory.TAIL_RISK,
|
| 269 |
+
description="Drawdown from recent peak",
|
| 270 |
+
zscore_window=252
|
| 271 |
+
)
|
| 272 |
+
|
| 273 |
+
def compute(self, data: Dict[str, TimeSeriesData]) -> Optional[IndicatorSignal]:
|
| 274 |
+
"""Compute drawdown signal."""
|
| 275 |
+
if not self._validate_data(data, ["SPY"]):
|
| 276 |
+
return None
|
| 277 |
+
|
| 278 |
+
try:
|
| 279 |
+
spy = data["SPY"].data
|
| 280 |
+
|
| 281 |
+
if len(spy) < self.zscore_window:
|
| 282 |
+
logger.warning(f"{self.name}: Insufficient data")
|
| 283 |
+
return None
|
| 284 |
+
|
| 285 |
+
# Calculate running maximum
|
| 286 |
+
running_max = spy.expanding().max()
|
| 287 |
+
|
| 288 |
+
# Calculate drawdown
|
| 289 |
+
drawdown = (spy / running_max - 1) * 100
|
| 290 |
+
|
| 291 |
+
# Current drawdown
|
| 292 |
+
current_dd = float(drawdown.iloc[-1])
|
| 293 |
+
|
| 294 |
+
# Days in drawdown
|
| 295 |
+
if current_dd < 0:
|
| 296 |
+
# Find last peak
|
| 297 |
+
peak_idx = running_max.iloc[-1:].index[0]
|
| 298 |
+
days_in_dd = (spy.index[-1] - peak_idx).days
|
| 299 |
+
else:
|
| 300 |
+
days_in_dd = 0
|
| 301 |
+
|
| 302 |
+
# Calculate z-score of drawdown magnitude
|
| 303 |
+
dd_zscore = self.calculate_zscore(drawdown)
|
| 304 |
+
|
| 305 |
+
# Flip sign: deeper drawdown = more negative = higher stress
|
| 306 |
+
stress_zscore = -dd_zscore
|
| 307 |
+
|
| 308 |
+
stress_level = self.normalize_to_level(stress_zscore)
|
| 309 |
+
|
| 310 |
+
return IndicatorSignal(
|
| 311 |
+
indicator_name=self.name,
|
| 312 |
+
category=self.category,
|
| 313 |
+
value=current_dd,
|
| 314 |
+
zscore=stress_zscore,
|
| 315 |
+
stress_level=stress_level,
|
| 316 |
+
description=(
|
| 317 |
+
f"Drawdown: {current_dd:.1f}% "
|
| 318 |
+
f"(z={stress_zscore:+.1f}), "
|
| 319 |
+
f"Duration: {days_in_dd} days"
|
| 320 |
+
),
|
| 321 |
+
interpretation=self._interpret(stress_level, current_dd, days_in_dd)
|
| 322 |
+
)
|
| 323 |
+
|
| 324 |
+
except Exception as e:
|
| 325 |
+
logger.error(f"{self.name} calculation failed: {e}")
|
| 326 |
+
return None
|
| 327 |
+
|
| 328 |
+
def _interpret(
|
| 329 |
+
self,
|
| 330 |
+
stress_level: float,
|
| 331 |
+
drawdown: float,
|
| 332 |
+
duration: int
|
| 333 |
+
) -> str:
|
| 334 |
+
"""Generate interpretation."""
|
| 335 |
+
if stress_level > 80:
|
| 336 |
+
return f"🔴 SEVERE: Deep drawdown ({drawdown:.1f}%, {duration}d)"
|
| 337 |
+
elif stress_level > 65:
|
| 338 |
+
return f"⚠️ ELEVATED: Significant drawdown ({drawdown:.1f}%)"
|
| 339 |
+
elif stress_level < 35:
|
| 340 |
+
if drawdown >= -2:
|
| 341 |
+
return "✅ AT PEAK: Near all-time highs"
|
| 342 |
+
else:
|
| 343 |
+
return "✅ SHALLOW: Minor pullback"
|
| 344 |
+
else:
|
| 345 |
+
return f"⚡ WATCH: Moderate drawdown ({drawdown:.1f}%)"
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
class SkewIndicator(BaseIndicator):
|
| 349 |
+
"""
|
| 350 |
+
Return skewness monitor.
|
| 351 |
+
|
| 352 |
+
Measure return distribution skewness.
|
| 353 |
+
Negative skew = left-tail risk (crash risk).
|
| 354 |
+
|
| 355 |
+
Signal construction:
|
| 356 |
+
- Calculate rolling 60-day return skewness
|
| 357 |
+
- Negative skew = tail risk
|
| 358 |
+
- Monitor for increasingly negative skew
|
| 359 |
+
|
| 360 |
+
Required data:
|
| 361 |
+
- SPY: S&P 500
|
| 362 |
+
"""
|
| 363 |
+
|
| 364 |
+
def __init__(self):
|
| 365 |
+
super().__init__(
|
| 366 |
+
name="Return Skewness",
|
| 367 |
+
category=IndicatorCategory.TAIL_RISK,
|
| 368 |
+
description="Return distribution skew (crash risk)",
|
| 369 |
+
zscore_window=252
|
| 370 |
+
)
|
| 371 |
+
|
| 372 |
+
def compute(self, data: Dict[str, TimeSeriesData]) -> Optional[IndicatorSignal]:
|
| 373 |
+
"""Compute skewness signal."""
|
| 374 |
+
if not self._validate_data(data, ["SPY"]):
|
| 375 |
+
return None
|
| 376 |
+
|
| 377 |
+
try:
|
| 378 |
+
spy = data["SPY"].data
|
| 379 |
+
|
| 380 |
+
if len(spy) < self.zscore_window + 60:
|
| 381 |
+
logger.warning(f"{self.name}: Insufficient data")
|
| 382 |
+
return None
|
| 383 |
+
|
| 384 |
+
# Calculate returns
|
| 385 |
+
returns = spy.pct_change().dropna() * 100
|
| 386 |
+
|
| 387 |
+
# Calculate rolling 60-day skewness
|
| 388 |
+
skew = returns.rolling(60).skew().dropna()
|
| 389 |
+
|
| 390 |
+
if len(skew) < self.zscore_window:
|
| 391 |
+
return None
|
| 392 |
+
|
| 393 |
+
# Z-score of skewness
|
| 394 |
+
skew_zscore = self.calculate_zscore(skew)
|
| 395 |
+
|
| 396 |
+
# Flip sign: negative skew = positive stress
|
| 397 |
+
# (because negative skew means crash risk)
|
| 398 |
+
stress_zscore = -skew_zscore
|
| 399 |
+
|
| 400 |
+
stress_level = self.normalize_to_level(stress_zscore)
|
| 401 |
+
|
| 402 |
+
current_skew = float(skew.iloc[-1])
|
| 403 |
+
|
| 404 |
+
return IndicatorSignal(
|
| 405 |
+
indicator_name=self.name,
|
| 406 |
+
category=self.category,
|
| 407 |
+
value=current_skew,
|
| 408 |
+
zscore=stress_zscore,
|
| 409 |
+
stress_level=stress_level,
|
| 410 |
+
description=(
|
| 411 |
+
f"60d skew: {current_skew:+.2f} "
|
| 412 |
+
f"(stress_z={stress_zscore:+.1f})"
|
| 413 |
+
),
|
| 414 |
+
interpretation=self._interpret(stress_level, current_skew, stress_zscore)
|
| 415 |
+
)
|
| 416 |
+
|
| 417 |
+
except Exception as e:
|
| 418 |
+
logger.error(f"{self.name} calculation failed: {e}")
|
| 419 |
+
return None
|
| 420 |
+
|
| 421 |
+
def _interpret(
|
| 422 |
+
self,
|
| 423 |
+
stress_level: float,
|
| 424 |
+
skew: float,
|
| 425 |
+
zscore: float
|
| 426 |
+
) -> str:
|
| 427 |
+
"""Generate interpretation."""
|
| 428 |
+
if stress_level > 70:
|
| 429 |
+
if skew < -1:
|
| 430 |
+
return f"🔴 SEVERE: Highly negative skew (crash risk, {skew:+.2f})"
|
| 431 |
+
else:
|
| 432 |
+
return "🔴 ELEVATED: Distribution tail risk increasing"
|
| 433 |
+
elif stress_level > 55:
|
| 434 |
+
return f"⚠️ WARNING: Negative skew building ({skew:+.2f})"
|
| 435 |
+
elif stress_level < 40:
|
| 436 |
+
if skew > 0:
|
| 437 |
+
return f"✅ POSITIVE: Upside skew ({skew:+.2f})"
|
| 438 |
+
else:
|
| 439 |
+
return "✅ NORMAL: Moderate skew"
|
| 440 |
+
else:
|
| 441 |
+
return "⚡ WATCH: Skew moderately negative"
|
| 442 |
+
|
| 443 |
+
|
| 444 |
+
class TailEventIndicator(BaseIndicator):
|
| 445 |
+
"""
|
| 446 |
+
Tail event detector.
|
| 447 |
+
|
| 448 |
+
Monitor for extreme daily moves (>2 std dev).
|
| 449 |
+
Frequency of tail events = market fragility.
|
| 450 |
+
|
| 451 |
+
Signal construction:
|
| 452 |
+
- Count tail events in rolling window
|
| 453 |
+
- Compare to historical frequency
|
| 454 |
+
- Rising frequency = fragility
|
| 455 |
+
|
| 456 |
+
Required data:
|
| 457 |
+
- SPY: S&P 500
|
| 458 |
+
"""
|
| 459 |
+
|
| 460 |
+
def __init__(self):
|
| 461 |
+
super().__init__(
|
| 462 |
+
name="Tail Event Frequency",
|
| 463 |
+
category=IndicatorCategory.TAIL_RISK,
|
| 464 |
+
description="Extreme move frequency",
|
| 465 |
+
zscore_window=252
|
| 466 |
+
)
|
| 467 |
+
|
| 468 |
+
def compute(self, data: Dict[str, TimeSeriesData]) -> Optional[IndicatorSignal]:
|
| 469 |
+
"""Compute tail event frequency."""
|
| 470 |
+
if not self._validate_data(data, ["SPY"]):
|
| 471 |
+
return None
|
| 472 |
+
|
| 473 |
+
try:
|
| 474 |
+
spy = data["SPY"].data
|
| 475 |
+
|
| 476 |
+
if len(spy) < self.zscore_window + 60:
|
| 477 |
+
logger.warning(f"{self.name}: Insufficient data")
|
| 478 |
+
return None
|
| 479 |
+
|
| 480 |
+
# Calculate returns
|
| 481 |
+
returns = spy.pct_change().dropna() * 100
|
| 482 |
+
|
| 483 |
+
# Calculate rolling std
|
| 484 |
+
rolling_std = returns.rolling(60).std()
|
| 485 |
+
|
| 486 |
+
# Identify tail events (|return| > 2 std)
|
| 487 |
+
tail_events = (returns.abs() > 2 * rolling_std).astype(int)
|
| 488 |
+
|
| 489 |
+
# Count tail events in rolling 60-day window
|
| 490 |
+
tail_count = tail_events.rolling(60).sum().dropna()
|
| 491 |
+
|
| 492 |
+
if len(tail_count) < self.zscore_window:
|
| 493 |
+
return None
|
| 494 |
+
|
| 495 |
+
# Z-score of tail event frequency
|
| 496 |
+
tail_zscore = self.calculate_zscore(tail_count)
|
| 497 |
+
|
| 498 |
+
stress_level = self.normalize_to_level(tail_zscore)
|
| 499 |
+
|
| 500 |
+
current_count = int(tail_count.iloc[-1])
|
| 501 |
+
|
| 502 |
+
# Recent tail events
|
| 503 |
+
recent_tails = tail_events.iloc[-20:].sum()
|
| 504 |
+
|
| 505 |
+
return IndicatorSignal(
|
| 506 |
+
indicator_name=self.name,
|
| 507 |
+
category=self.category,
|
| 508 |
+
value=float(current_count),
|
| 509 |
+
zscore=tail_zscore,
|
| 510 |
+
stress_level=stress_level,
|
| 511 |
+
description=(
|
| 512 |
+
f"Tail events (60d): {current_count} "
|
| 513 |
+
f"(z={tail_zscore:+.1f}), "
|
| 514 |
+
f"Recent 20d: {recent_tails}"
|
| 515 |
+
),
|
| 516 |
+
interpretation=self._interpret(stress_level, current_count, recent_tails, tail_zscore)
|
| 517 |
+
)
|
| 518 |
+
|
| 519 |
+
except Exception as e:
|
| 520 |
+
logger.error(f"{self.name} calculation failed: {e}")
|
| 521 |
+
return None
|
| 522 |
+
|
| 523 |
+
def _interpret(
|
| 524 |
+
self,
|
| 525 |
+
stress_level: float,
|
| 526 |
+
count: int,
|
| 527 |
+
recent: int,
|
| 528 |
+
zscore: float
|
| 529 |
+
) -> str:
|
| 530 |
+
"""Generate interpretation."""
|
| 531 |
+
if stress_level > 75:
|
| 532 |
+
return f"🔴 FRAGILE: High tail event frequency ({count} in 60d, {recent} recent)"
|
| 533 |
+
elif stress_level > 60:
|
| 534 |
+
return f"⚠️ ELEVATED: Increasing tail events ({count} in 60d)"
|
| 535 |
+
elif stress_level < 40:
|
| 536 |
+
return "✅ STABLE: Low tail event frequency"
|
| 537 |
+
else:
|
| 538 |
+
return f"⚡ WATCH: Moderate tail events ({count} in 60d)"
|
src/core/macroeconomics/markets/risk_engine/indicators/volatility.py
ADDED
|
@@ -0,0 +1,405 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Volatility indicators for market risk detection.
|
| 3 |
+
|
| 4 |
+
Monitors fear gauges, volatility term structure, and volatility of volatility.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from typing import Dict, Optional
|
| 8 |
+
import pandas as pd
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
from src.core.macroeconomics.markets.risk_engine.models import (
|
| 12 |
+
IndicatorSignal,
|
| 13 |
+
IndicatorCategory,
|
| 14 |
+
TimeSeriesData
|
| 15 |
+
)
|
| 16 |
+
from src.core.macroeconomics.markets.risk_engine.indicators.base import BaseIndicator
|
| 17 |
+
from src.telegram_bot.logger import main_logger as logger
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class VIXStressIndicator(BaseIndicator):
|
| 21 |
+
"""
|
| 22 |
+
Detect VIX rising without SPY falling (hidden stress).
|
| 23 |
+
|
| 24 |
+
Classic stress signal: VIX spikes while equity markets are stable.
|
| 25 |
+
Suggests fear building before visible market decline.
|
| 26 |
+
|
| 27 |
+
Signal construction:
|
| 28 |
+
- Calculate VIX z-score over 252-day window
|
| 29 |
+
- Calculate SPY return over 20 days
|
| 30 |
+
- If VIX elevated but SPY not falling → stress building
|
| 31 |
+
|
| 32 |
+
Required data:
|
| 33 |
+
- ^VIX: CBOE Volatility Index
|
| 34 |
+
- SPY: S&P 500 ETF
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
def __init__(self):
|
| 38 |
+
super().__init__(
|
| 39 |
+
name="VIX Stress",
|
| 40 |
+
category=IndicatorCategory.VOLATILITY,
|
| 41 |
+
description="VIX rising without equity decline (hidden stress)",
|
| 42 |
+
zscore_window=252
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
def compute(self, data: Dict[str, TimeSeriesData]) -> Optional[IndicatorSignal]:
|
| 46 |
+
"""Compute VIX stress signal."""
|
| 47 |
+
required = ["^VIX", "SPY"]
|
| 48 |
+
if not self._validate_data(data, required):
|
| 49 |
+
return None
|
| 50 |
+
|
| 51 |
+
try:
|
| 52 |
+
vix_data = data["^VIX"].data
|
| 53 |
+
spy_data = data["SPY"].data
|
| 54 |
+
|
| 55 |
+
# Align dates
|
| 56 |
+
combined = pd.DataFrame({
|
| 57 |
+
'vix': vix_data,
|
| 58 |
+
'spy': spy_data
|
| 59 |
+
}).dropna()
|
| 60 |
+
|
| 61 |
+
if len(combined) < self.zscore_window:
|
| 62 |
+
logger.warning(
|
| 63 |
+
f"{self.name}: Insufficient data "
|
| 64 |
+
f"({len(combined)} < {self.zscore_window})"
|
| 65 |
+
)
|
| 66 |
+
return None
|
| 67 |
+
|
| 68 |
+
# VIX z-score (how elevated is VIX?)
|
| 69 |
+
vix_zscore = self.calculate_zscore(combined['vix'])
|
| 70 |
+
|
| 71 |
+
# SPY 20-day return (is equity actually falling?)
|
| 72 |
+
spy_return_20d = (combined['spy'].iloc[-1] / combined['spy'].iloc[-21] - 1) * 100
|
| 73 |
+
|
| 74 |
+
# Stress signal: VIX elevated but SPY not falling
|
| 75 |
+
# If VIX > +1 std and SPY > -5% = hidden stress
|
| 76 |
+
if vix_zscore > 1.0 and spy_return_20d > -5.0:
|
| 77 |
+
# Amplify signal based on disconnect
|
| 78 |
+
disconnect_factor = (spy_return_20d + 5.0) / 10.0 # 0-1 scale
|
| 79 |
+
stress_level = self.normalize_to_level(vix_zscore) * (1 + disconnect_factor * 0.5)
|
| 80 |
+
stress_level = min(stress_level, 100.0)
|
| 81 |
+
else:
|
| 82 |
+
# Normal regime: just use VIX z-score
|
| 83 |
+
stress_level = self.normalize_to_level(vix_zscore)
|
| 84 |
+
|
| 85 |
+
return IndicatorSignal(
|
| 86 |
+
indicator_name=self.name,
|
| 87 |
+
category=self.category,
|
| 88 |
+
value=float(vix_data.iloc[-1]),
|
| 89 |
+
zscore=vix_zscore,
|
| 90 |
+
stress_level=stress_level,
|
| 91 |
+
description=(
|
| 92 |
+
f"VIX at {vix_data.iloc[-1]:.1f} "
|
| 93 |
+
f"(z={vix_zscore:+.1f}), "
|
| 94 |
+
f"SPY 20d: {spy_return_20d:+.1f}%"
|
| 95 |
+
),
|
| 96 |
+
interpretation=self._interpret(stress_level, vix_zscore, spy_return_20d)
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
except Exception as e:
|
| 100 |
+
logger.error(f"{self.name} calculation failed: {e}")
|
| 101 |
+
return None
|
| 102 |
+
|
| 103 |
+
def _interpret(self, stress_level: float, vix_zscore: float, spy_return: float) -> str:
|
| 104 |
+
"""Generate human-readable interpretation."""
|
| 105 |
+
if stress_level > 75:
|
| 106 |
+
if spy_return > 0:
|
| 107 |
+
return "⚠️ SEVERE: VIX spiking while equities rally (major disconnect)"
|
| 108 |
+
else:
|
| 109 |
+
return "🔴 EXTREME: VIX at extreme levels with equity weakness"
|
| 110 |
+
elif stress_level > 60:
|
| 111 |
+
return "⚠️ ELEVATED: Fear gauge rising, monitor for deterioration"
|
| 112 |
+
elif stress_level < 40:
|
| 113 |
+
return "✅ CALM: VIX at normal levels"
|
| 114 |
+
else:
|
| 115 |
+
return "⚡ WATCH: VIX moderately elevated"
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
class VIXTermStructureIndicator(BaseIndicator):
|
| 119 |
+
"""
|
| 120 |
+
Detect VIX term structure inversion (backwardation).
|
| 121 |
+
|
| 122 |
+
Normal market: VIX term structure in contango (VIX < VXV)
|
| 123 |
+
Stress market: Backwardation (VIX > VXV) suggests imminent fear
|
| 124 |
+
|
| 125 |
+
Signal construction:
|
| 126 |
+
- Calculate VIX/VXV ratio
|
| 127 |
+
- Ratio > 1.0 = backwardation (stress)
|
| 128 |
+
- Z-score of ratio over time
|
| 129 |
+
|
| 130 |
+
Required data:
|
| 131 |
+
- VIXCLS: VIX from FRED (1-month implied vol)
|
| 132 |
+
- VXV or 3-month VIX proxy
|
| 133 |
+
|
| 134 |
+
Note: If VXV unavailable, we use VIX acceleration as proxy
|
| 135 |
+
"""
|
| 136 |
+
|
| 137 |
+
def __init__(self):
|
| 138 |
+
super().__init__(
|
| 139 |
+
name="VIX Term Structure",
|
| 140 |
+
category=IndicatorCategory.VOLATILITY,
|
| 141 |
+
description="VIX term structure inversion (backwardation)",
|
| 142 |
+
zscore_window=252
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
def compute(self, data: Dict[str, TimeSeriesData]) -> Optional[IndicatorSignal]:
|
| 146 |
+
"""Compute term structure signal."""
|
| 147 |
+
# Try VIXCLS from FRED first
|
| 148 |
+
if "VIXCLS" not in data or len(data["VIXCLS"].data) == 0:
|
| 149 |
+
logger.warning(f"{self.name}: VIXCLS not available")
|
| 150 |
+
return None
|
| 151 |
+
|
| 152 |
+
try:
|
| 153 |
+
vix_data = data["VIXCLS"].data
|
| 154 |
+
|
| 155 |
+
if len(vix_data) < self.zscore_window:
|
| 156 |
+
logger.warning(
|
| 157 |
+
f"{self.name}: Insufficient data "
|
| 158 |
+
f"({len(vix_data)} < {self.zscore_window})"
|
| 159 |
+
)
|
| 160 |
+
return None
|
| 161 |
+
|
| 162 |
+
# Calculate VIX acceleration as term structure proxy
|
| 163 |
+
# Rising VIX suggests near-term fear > long-term fear
|
| 164 |
+
vix_acceleration = self.calculate_acceleration(vix_data, periods=5)
|
| 165 |
+
|
| 166 |
+
# Z-score of acceleration
|
| 167 |
+
accel_series = pd.Series([
|
| 168 |
+
self.calculate_acceleration(vix_data.iloc[:i], periods=5)
|
| 169 |
+
for i in range(self.zscore_window, len(vix_data) + 1)
|
| 170 |
+
], index=vix_data.iloc[self.zscore_window-1:].index)
|
| 171 |
+
|
| 172 |
+
if len(accel_series) < 20:
|
| 173 |
+
return None
|
| 174 |
+
|
| 175 |
+
accel_zscore = self.calculate_zscore(accel_series)
|
| 176 |
+
|
| 177 |
+
# Positive acceleration = backwardation signal
|
| 178 |
+
stress_level = self.normalize_to_level(accel_zscore)
|
| 179 |
+
|
| 180 |
+
# Current VIX level
|
| 181 |
+
current_vix = float(vix_data.iloc[-1])
|
| 182 |
+
|
| 183 |
+
return IndicatorSignal(
|
| 184 |
+
indicator_name=self.name,
|
| 185 |
+
category=self.category,
|
| 186 |
+
value=vix_acceleration,
|
| 187 |
+
zscore=accel_zscore,
|
| 188 |
+
stress_level=stress_level,
|
| 189 |
+
description=(
|
| 190 |
+
f"VIX at {current_vix:.1f}, "
|
| 191 |
+
f"acceleration: {vix_acceleration:+.2f} "
|
| 192 |
+
f"(z={accel_zscore:+.1f})"
|
| 193 |
+
),
|
| 194 |
+
interpretation=self._interpret(stress_level, accel_zscore, current_vix)
|
| 195 |
+
)
|
| 196 |
+
|
| 197 |
+
except Exception as e:
|
| 198 |
+
logger.error(f"{self.name} calculation failed: {e}")
|
| 199 |
+
return None
|
| 200 |
+
|
| 201 |
+
def _interpret(self, stress_level: float, accel_zscore: float, vix: float) -> str:
|
| 202 |
+
"""Generate interpretation."""
|
| 203 |
+
if stress_level > 75:
|
| 204 |
+
return "🔴 BACKWARDATION: VIX surging (near-term fear spike)"
|
| 205 |
+
elif stress_level > 60:
|
| 206 |
+
return "⚠️ FLATTENING: Term structure compressing"
|
| 207 |
+
elif stress_level < 40:
|
| 208 |
+
return "✅ CONTANGO: Normal term structure (stable)"
|
| 209 |
+
else:
|
| 210 |
+
return "⚡ WATCH: Term structure shifting"
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
class VVIXIndicator(BaseIndicator):
|
| 214 |
+
"""
|
| 215 |
+
Volatility of volatility indicator.
|
| 216 |
+
|
| 217 |
+
VVIX measures expected volatility of VIX itself.
|
| 218 |
+
Rising VVIX = uncertainty about uncertainty = regime shift risk
|
| 219 |
+
|
| 220 |
+
Often spikes before major market dislocations.
|
| 221 |
+
|
| 222 |
+
Signal construction:
|
| 223 |
+
- Calculate VVIX z-score
|
| 224 |
+
- Calculate VVIX/VIX ratio (normalized vol-of-vol)
|
| 225 |
+
|
| 226 |
+
Required data:
|
| 227 |
+
- ^VVIX: CBOE VVIX Index
|
| 228 |
+
- ^VIX: VIX for normalization
|
| 229 |
+
"""
|
| 230 |
+
|
| 231 |
+
def __init__(self):
|
| 232 |
+
super().__init__(
|
| 233 |
+
name="VVIX (Vol-of-Vol)",
|
| 234 |
+
category=IndicatorCategory.VOLATILITY,
|
| 235 |
+
description="Volatility of volatility (uncertainty squared)",
|
| 236 |
+
zscore_window=252
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
def compute(self, data: Dict[str, TimeSeriesData]) -> Optional[IndicatorSignal]:
|
| 240 |
+
"""Compute VVIX signal."""
|
| 241 |
+
required = ["^VVIX", "^VIX"]
|
| 242 |
+
if not self._validate_data(data, required):
|
| 243 |
+
return None
|
| 244 |
+
|
| 245 |
+
try:
|
| 246 |
+
vvix_data = data["^VVIX"].data
|
| 247 |
+
vix_data = data["^VIX"].data
|
| 248 |
+
|
| 249 |
+
# Align dates
|
| 250 |
+
combined = pd.DataFrame({
|
| 251 |
+
'vvix': vvix_data,
|
| 252 |
+
'vix': vix_data
|
| 253 |
+
}).dropna()
|
| 254 |
+
|
| 255 |
+
if len(combined) < self.zscore_window:
|
| 256 |
+
logger.warning(
|
| 257 |
+
f"{self.name}: Insufficient data "
|
| 258 |
+
f"({len(combined)} < {self.zscore_window})"
|
| 259 |
+
)
|
| 260 |
+
return None
|
| 261 |
+
|
| 262 |
+
# VVIX z-score
|
| 263 |
+
vvix_zscore = self.calculate_zscore(combined['vvix'])
|
| 264 |
+
|
| 265 |
+
# VVIX/VIX ratio (normalized vol-of-vol)
|
| 266 |
+
ratio = combined['vvix'] / combined['vix']
|
| 267 |
+
ratio_zscore = self.calculate_zscore(ratio)
|
| 268 |
+
|
| 269 |
+
# Combined signal: average of VVIX level and ratio
|
| 270 |
+
combined_zscore = (vvix_zscore + ratio_zscore) / 2
|
| 271 |
+
stress_level = self.normalize_to_level(combined_zscore)
|
| 272 |
+
|
| 273 |
+
current_vvix = float(vvix_data.iloc[-1])
|
| 274 |
+
current_vix = float(vix_data.iloc[-1])
|
| 275 |
+
current_ratio = current_vvix / current_vix
|
| 276 |
+
|
| 277 |
+
return IndicatorSignal(
|
| 278 |
+
indicator_name=self.name,
|
| 279 |
+
category=self.category,
|
| 280 |
+
value=current_vvix,
|
| 281 |
+
zscore=combined_zscore,
|
| 282 |
+
stress_level=stress_level,
|
| 283 |
+
description=(
|
| 284 |
+
f"VVIX at {current_vvix:.1f} "
|
| 285 |
+
f"(z={vvix_zscore:+.1f}), "
|
| 286 |
+
f"VVIX/VIX ratio: {current_ratio:.2f} "
|
| 287 |
+
f"(z={ratio_zscore:+.1f})"
|
| 288 |
+
),
|
| 289 |
+
interpretation=self._interpret(stress_level, vvix_zscore, ratio_zscore)
|
| 290 |
+
)
|
| 291 |
+
|
| 292 |
+
except Exception as e:
|
| 293 |
+
logger.error(f"{self.name} calculation failed: {e}")
|
| 294 |
+
return None
|
| 295 |
+
|
| 296 |
+
def _interpret(
|
| 297 |
+
self,
|
| 298 |
+
stress_level: float,
|
| 299 |
+
vvix_zscore: float,
|
| 300 |
+
ratio_zscore: float
|
| 301 |
+
) -> str:
|
| 302 |
+
"""Generate interpretation."""
|
| 303 |
+
if stress_level > 75:
|
| 304 |
+
return "🔴 EXTREME: Uncertainty about uncertainty (regime shift risk)"
|
| 305 |
+
elif stress_level > 60:
|
| 306 |
+
if ratio_zscore > vvix_zscore:
|
| 307 |
+
return "⚠️ ELEVATED: Vol-of-vol rising faster than vol (instability)"
|
| 308 |
+
else:
|
| 309 |
+
return "⚠️ ELEVATED: High volatility uncertainty"
|
| 310 |
+
elif stress_level < 40:
|
| 311 |
+
return "✅ STABLE: Low volatility uncertainty"
|
| 312 |
+
else:
|
| 313 |
+
return "⚡ WATCH: Volatility uncertainty increasing"
|
| 314 |
+
|
| 315 |
+
|
| 316 |
+
class RealizedVolatilityIndicator(BaseIndicator):
|
| 317 |
+
"""
|
| 318 |
+
Realized volatility spike detector.
|
| 319 |
+
|
| 320 |
+
Measures actual market volatility (not implied like VIX).
|
| 321 |
+
Compares current realized vol to historical levels.
|
| 322 |
+
|
| 323 |
+
Signal construction:
|
| 324 |
+
- Calculate 20-day realized volatility from SPY returns
|
| 325 |
+
- Z-score vs historical realized vol
|
| 326 |
+
|
| 327 |
+
Required data:
|
| 328 |
+
- SPY: S&P 500 ETF for realized vol calculation
|
| 329 |
+
"""
|
| 330 |
+
|
| 331 |
+
def __init__(self):
|
| 332 |
+
super().__init__(
|
| 333 |
+
name="Realized Volatility",
|
| 334 |
+
category=IndicatorCategory.VOLATILITY,
|
| 335 |
+
description="Actual market volatility (20-day realized)",
|
| 336 |
+
zscore_window=252
|
| 337 |
+
)
|
| 338 |
+
|
| 339 |
+
def compute(self, data: Dict[str, TimeSeriesData]) -> Optional[IndicatorSignal]:
|
| 340 |
+
"""Compute realized volatility signal."""
|
| 341 |
+
if not self._validate_data(data, ["SPY"]):
|
| 342 |
+
return None
|
| 343 |
+
|
| 344 |
+
try:
|
| 345 |
+
spy_data = data["SPY"].data
|
| 346 |
+
|
| 347 |
+
if len(spy_data) < self.zscore_window + 20:
|
| 348 |
+
logger.warning(f"{self.name}: Insufficient data")
|
| 349 |
+
return None
|
| 350 |
+
|
| 351 |
+
# Calculate log returns
|
| 352 |
+
returns = np.log(spy_data / spy_data.shift(1)).dropna()
|
| 353 |
+
|
| 354 |
+
# Calculate 20-day realized volatility (annualized)
|
| 355 |
+
realized_vol = returns.rolling(20).std() * np.sqrt(252) * 100
|
| 356 |
+
realized_vol = realized_vol.dropna()
|
| 357 |
+
|
| 358 |
+
if len(realized_vol) < self.zscore_window:
|
| 359 |
+
return None
|
| 360 |
+
|
| 361 |
+
# Z-score of current realized vol
|
| 362 |
+
rvol_zscore = self.calculate_zscore(realized_vol)
|
| 363 |
+
stress_level = self.normalize_to_level(rvol_zscore)
|
| 364 |
+
|
| 365 |
+
current_rvol = float(realized_vol.iloc[-1])
|
| 366 |
+
|
| 367 |
+
# Also calculate acceleration (is vol accelerating?)
|
| 368 |
+
rvol_accel = self.calculate_acceleration(realized_vol, periods=5)
|
| 369 |
+
|
| 370 |
+
return IndicatorSignal(
|
| 371 |
+
indicator_name=self.name,
|
| 372 |
+
category=self.category,
|
| 373 |
+
value=current_rvol,
|
| 374 |
+
zscore=rvol_zscore,
|
| 375 |
+
stress_level=stress_level,
|
| 376 |
+
description=(
|
| 377 |
+
f"Realized vol: {current_rvol:.1f}% "
|
| 378 |
+
f"(z={rvol_zscore:+.1f}), "
|
| 379 |
+
f"acceleration: {rvol_accel:+.2f}"
|
| 380 |
+
),
|
| 381 |
+
interpretation=self._interpret(stress_level, rvol_zscore, rvol_accel)
|
| 382 |
+
)
|
| 383 |
+
|
| 384 |
+
except Exception as e:
|
| 385 |
+
logger.error(f"{self.name} calculation failed: {e}")
|
| 386 |
+
return None
|
| 387 |
+
|
| 388 |
+
def _interpret(
|
| 389 |
+
self,
|
| 390 |
+
stress_level: float,
|
| 391 |
+
rvol_zscore: float,
|
| 392 |
+
acceleration: float
|
| 393 |
+
) -> str:
|
| 394 |
+
"""Generate interpretation."""
|
| 395 |
+
if stress_level > 75:
|
| 396 |
+
if acceleration > 0:
|
| 397 |
+
return "🔴 EXTREME: Volatility surging (accelerating)"
|
| 398 |
+
else:
|
| 399 |
+
return "🔴 ELEVATED: High volatility (but decelerating)"
|
| 400 |
+
elif stress_level > 60:
|
| 401 |
+
return "⚠️ ELEVATED: Above-normal market volatility"
|
| 402 |
+
elif stress_level < 40:
|
| 403 |
+
return "✅ CALM: Low realized volatility"
|
| 404 |
+
else:
|
| 405 |
+
return "⚡ WATCH: Volatility moderately elevated"
|