Dmitry Beresnev commited on
Commit
2865657
·
1 Parent(s): 2212d00

add valuation engine

Browse files
.gitignore CHANGED
@@ -33,4 +33,6 @@ src/core/trading/docs/
33
  trades.db
34
  *.db
35
  *.png
36
- *.jpg
 
 
 
33
  trades.db
34
  *.db
35
  *.png
36
+ *.jpg
37
+ #
38
+ *.md
examples/valuation_example.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Example usage of the Valuation Engine
3
+
4
+ This demonstrates how to use the comprehensive valuation engine to value stocks
5
+ using multiple models (DCF, DDM, P/E Multiple, etc.)
6
+ """
7
+
8
+ import asyncio
9
+ from src.core.valuation_engine import ValuationEngine, ValuationAssumptions, DCFAssumptions
10
+
11
+
12
+ async def basic_valuation_example():
13
+ """Basic valuation example with default assumptions"""
14
+ print("=" * 60)
15
+ print("BASIC VALUATION EXAMPLE")
16
+ print("=" * 60)
17
+
18
+ ticker = "AAPL"
19
+
20
+ async with ValuationEngine() as engine:
21
+ # Run comprehensive valuation
22
+ result = await engine.value_stock(ticker)
23
+
24
+ # Print results
25
+ print(f"\n{'='*60}")
26
+ print(f"VALUATION RESULTS FOR {ticker}")
27
+ print(f"{'='*60}")
28
+ print(f"\nCurrent Price: ${result.current_price:.2f}")
29
+
30
+ if result.weighted_fair_value:
31
+ print(f"Weighted Fair Value: ${result.weighted_fair_value:.2f}")
32
+ print(f"Upside/Downside: {result.upside_downside:+.1f}%")
33
+
34
+ print(f"\nFair Value Range:")
35
+ print(f" Pessimistic (P10): ${result.percentile_10:.2f}")
36
+ print(f" Base Case (P50): ${result.percentile_50:.2f}")
37
+ print(f" Optimistic (P90): ${result.percentile_90:.2f}")
38
+
39
+ print(f"\nModels Used: {result.valid_models_count}")
40
+ print(f"Total Confidence: {result.total_confidence:.2f}")
41
+
42
+ print(f"\n{'='*60}")
43
+ print("INDIVIDUAL MODEL RESULTS")
44
+ print(f"{'='*60}")
45
+
46
+ for model, valuation_result in result.model_results.items():
47
+ if valuation_result.is_valid:
48
+ print(f"\n{model.value.upper()}:")
49
+ print(f" Fair Value: ${valuation_result.fair_value_per_share:.2f}")
50
+ print(f" Confidence: {valuation_result.confidence:.2f}")
51
+ if valuation_result.assumptions:
52
+ print(f" Assumptions: {valuation_result.assumptions}")
53
+ else:
54
+ print(f"\n{model.value.upper()}: FAILED")
55
+ print(f" Error: {valuation_result.error}")
56
+
57
+
58
+ async def custom_assumptions_example():
59
+ """Valuation with custom assumptions"""
60
+ print("\n\n" + "=" * 60)
61
+ print("CUSTOM ASSUMPTIONS EXAMPLE")
62
+ print("=" * 60)
63
+
64
+ ticker = "MSFT"
65
+
66
+ # Create custom assumptions
67
+ assumptions = ValuationAssumptions(
68
+ dcf=DCFAssumptions(
69
+ growth_rate=0.08, # 8% growth
70
+ terminal_growth_rate=0.03, # 3% terminal growth
71
+ projection_years=7
72
+ ),
73
+ risk_free_rate=0.045, # 4.5% risk-free rate
74
+ market_return=0.11 # 11% expected market return
75
+ )
76
+
77
+ async with ValuationEngine() as engine:
78
+ result = await engine.value_stock(ticker, assumptions=assumptions)
79
+
80
+ print(f"\nValuation for {ticker} with custom assumptions:")
81
+ print(f" Current Price: ${result.current_price:.2f}")
82
+ if result.weighted_fair_value:
83
+ print(f" Fair Value: ${result.weighted_fair_value:.2f}")
84
+ print(f" Upside/Downside: {result.upside_downside:+.1f}%")
85
+
86
+
87
+ async def multiple_stocks_example():
88
+ """Value multiple stocks in parallel"""
89
+ print("\n\n" + "=" * 60)
90
+ print("MULTIPLE STOCKS EXAMPLE")
91
+ print("=" * 60)
92
+
93
+ tickers = ["AAPL", "MSFT", "JNJ"]
94
+
95
+ async with ValuationEngine() as engine:
96
+ # Run valuations in parallel
97
+ tasks = [engine.value_stock(ticker) for ticker in tickers]
98
+ results = await asyncio.gather(*tasks)
99
+
100
+ # Print summary
101
+ print(f"\n{'Ticker':<10} {'Current':<12} {'Fair Value':<12} {'Upside/Downside':<15} {'Models'}")
102
+ print("-" * 65)
103
+
104
+ for ticker, result in zip(tickers, results):
105
+ upside_str = f"{result.upside_downside:+.1f}%" if result.upside_downside else "N/A"
106
+ fair_value_str = f"${result.weighted_fair_value:.2f}" if result.weighted_fair_value else "N/A"
107
+
108
+ print(f"{ticker:<10} ${result.current_price:<11.2f} {fair_value_str:<12} {upside_str:<15} {result.valid_models_count}")
109
+
110
+
111
+ async def quick_valuation_example():
112
+ """Quick valuation for fast lookup"""
113
+ print("\n\n" + "=" * 60)
114
+ print("QUICK VALUATION EXAMPLE")
115
+ print("=" * 60)
116
+
117
+ ticker = "GOOGL"
118
+
119
+ async with ValuationEngine() as engine:
120
+ # Get just the weighted fair value (faster)
121
+ fair_value = await engine.get_quick_valuation(ticker)
122
+
123
+ print(f"\nQuick valuation for {ticker}:")
124
+ if fair_value:
125
+ print(f" Fair Value: ${fair_value:.2f}")
126
+ else:
127
+ print(f" Could not calculate fair value")
128
+
129
+
130
+ async def dividend_stock_example():
131
+ """Value a dividend-paying stock (DDM model will be used)"""
132
+ print("\n\n" + "=" * 60)
133
+ print("DIVIDEND STOCK EXAMPLE")
134
+ print("=" * 60)
135
+
136
+ ticker = "JNJ" # Johnson & Johnson - dividend aristocrat
137
+
138
+ async with ValuationEngine() as engine:
139
+ result = await engine.value_stock(ticker)
140
+
141
+ print(f"\nValuation for dividend stock {ticker}:")
142
+ print(f" Current Price: ${result.current_price:.2f}")
143
+
144
+ if result.weighted_fair_value:
145
+ print(f" Fair Value: ${result.weighted_fair_value:.2f}")
146
+ print(f" Upside/Downside: {result.upside_downside:+.1f}%")
147
+
148
+ # Check if DDM was used
149
+ from src.core.valuation_engine.core.models import ValuationModel
150
+ if ValuationModel.DDM in result.model_results:
151
+ ddm_result = result.model_results[ValuationModel.DDM]
152
+ if ddm_result.is_valid:
153
+ print(f"\n DDM Model:")
154
+ print(f" Fair Value: ${ddm_result.fair_value_per_share:.2f}")
155
+ print(f" Confidence: {ddm_result.confidence:.2f}")
156
+ print(f" Dividend Growth: {ddm_result.assumptions.get('dividend_growth_rate', 0)*100:.1f}%")
157
+
158
+
159
+ async def main():
160
+ """Run all examples"""
161
+ print("\n" + "=" * 60)
162
+ print("VALUATION ENGINE - EXAMPLE USAGE")
163
+ print("=" * 60)
164
+
165
+ # Run examples
166
+ await basic_valuation_example()
167
+ await custom_assumptions_example()
168
+ await multiple_stocks_example()
169
+ await quick_valuation_example()
170
+ await dividend_stock_example()
171
+
172
+ print("\n" + "=" * 60)
173
+ print("EXAMPLES COMPLETE")
174
+ print("=" * 60)
175
+
176
+
177
+ if __name__ == "__main__":
178
+ asyncio.run(main())
src/core/ticker_scanner/growth_speed_analyzer.py CHANGED
@@ -118,81 +118,27 @@ from sklearn.linear_model import LinearRegression
118
 
119
  from src.core.ticker_scanner.core_enums import GrowthCategory
120
  from src.core.ticker_scanner.growth_metrics import GrowthSpeedMetrics
 
121
  from src.telegram_bot.logger import main_logger as logger
122
 
123
 
124
- # Timeframe configuration with metric weights and thresholds for different analysis windows
125
- TIMEFRAME_CONFIG = {
126
- "1d": {
127
- "weights": {"cmgr": 0.25, "cagr": 0.0, "roc": 0.25, "momentum": 0.20, "r_squared": 0.15, "sharpe": 0.15},
128
- "indicator_bonuses": {"rsi": 0.05, "price_position": 0.03, "support_distance": 0.02},
129
- "thresholds": {"velocity_cap": 100, "roc_threshold": 2.0},
130
- "volatility_penalty": 3.0, # Volatility threshold (% daily). Above = penalize. Set to 0 to disable.
131
- "smoothness_bonus": 0.10, # Bonus (0-1) for consistent growth: reward r_squared * bonus if vol < threshold
132
- "description": "Intraday - High velocity, short-term momentum focus. CMGR emphasis for monthly growth context. Accepts up to 3% daily volatility"
133
- },
134
- "5d": {
135
- "weights": {"cmgr": 0.25, "cagr": 0.0, "roc": 0.30, "momentum": 0.25, "r_squared": 0.15, "sharpe": 0.05},
136
- "indicator_bonuses": {"rsi": 0.05, "price_position": 0.03, "support_distance": 0.02},
137
- "thresholds": {"velocity_cap": 90, "roc_threshold": 1.5},
138
- "volatility_penalty": 2.5, # Volatility threshold (% daily). Above = penalize. Set to 0 to disable.
139
- "smoothness_bonus": 0.15, # Bonus (0-1) for consistent growth: reward r_squared * bonus if vol < threshold
140
- "description": "Weekly - CMGR and momentum emphasis with CAGR context. Accepts up to 2.5% daily volatility"
141
- },
142
- "1mo": {
143
- "weights": {"cmgr": 0.35, "cagr": 0.0, "roc": 0.25, "momentum": 0.25, "r_squared": 0.10, "sharpe": 0.05},
144
- "indicator_bonuses": {"rsi": 0.05, "price_position": 0.03, "support_distance": 0.02},
145
- "thresholds": {"velocity_cap": 85, "roc_threshold": 1.2},
146
- "volatility_penalty": 2.0, # Volatility threshold (% daily). Above = penalize. Set to 0 to disable.
147
- "smoothness_bonus": 0.20, # Bonus (0-1) for consistent growth: reward r_squared * bonus if vol < threshold
148
- "description": "Monthly - CMGR primary metric with CAGR component. Accepts up to 2% daily volatility"
149
- },
150
- "3mo": {
151
- "weights": {"cmgr": 0.40, "cagr": 0.0, "roc": 0.20, "momentum": 0.25, "r_squared": 0.10, "sharpe": 0.05},
152
- "indicator_bonuses": {"rsi": 0.05, "price_position": 0.03, "support_distance": 0.02},
153
- "thresholds": {"velocity_cap": 80, "roc_threshold": 0.8},
154
- "volatility_penalty": 1.5, # Volatility threshold (% daily). Above = penalize. Set to 0 to disable.
155
- "smoothness_bonus": 0.20, # Bonus (0-1) for consistent growth: reward r_squared * bonus if vol < threshold
156
- "description": "Quarterly - CMGR and CAGR emphasis for sustainable growth. Accepts up to 1.5% daily volatility"
157
- },
158
- "6mo": {
159
- "weights": {"cagr": 0.35, "r_squared": 0.20, "acceleration": 0.20, "sharpe": 0.15, "momentum": 0.10},
160
- "indicator_bonuses": {"rsi": 0.03, "price_position": 0.02, "support_distance": 0.01},
161
- "thresholds": {"velocity_cap": 80, "roc_threshold": 0.5},
162
- "volatility_penalty": 1.0, # Volatility threshold (% daily). Above = penalize. Set to 0 to disable.
163
- "smoothness_bonus": 0.0, # No smoothness bonus for medium-term (already has high r_squared weight)
164
- "description": "Half-yearly - CAGR and stability focus. Accepts up to 1% daily volatility"
165
- },
166
- "1y": {
167
- "weights": {"cagr": 0.40, "r_squared": 0.25, "acceleration": 0.20, "sharpe": 0.15, "momentum": 0.00},
168
- "indicator_bonuses": {"rsi": 0.02, "price_position": 0.01, "support_distance": 0.00},
169
- "thresholds": {"velocity_cap": 80, "roc_threshold": 0.3},
170
- "volatility_penalty": 0, # No penalty for long-term (Sharpe ratio already accounts for vol)
171
- "smoothness_bonus": 0.0, # No smoothness bonus for long-term (already has high r_squared weight)
172
- "description": "Yearly - Long-term stability and consistency. Growth stability emphasis"
173
- },
174
- "2y": {
175
- "weights": {"cagr": 0.40, "r_squared": 0.25, "acceleration": 0.20, "sharpe": 0.15, "momentum": 0.00},
176
- "indicator_bonuses": {"rsi": 0.02, "price_position": 0.01, "support_distance": 0.00},
177
- "thresholds": {"velocity_cap": 80, "roc_threshold": 0.25},
178
- "volatility_penalty": 0, # No penalty for long-term (Sharpe ratio already accounts for vol)
179
- "smoothness_bonus": 0.0, # No smoothness bonus for long-term (already has high r_squared weight)
180
- "description": "2-year - Long-term consistency"
181
- },
182
- "5y": {
183
- "weights": {"cagr": 0.40, "r_squared": 0.25, "acceleration": 0.20, "sharpe": 0.15, "momentum": 0.00},
184
- "indicator_bonuses": {"rsi": 0.02, "price_position": 0.01, "support_distance": 0.00},
185
- "thresholds": {"velocity_cap": 80, "roc_threshold": 0.2},
186
- "volatility_penalty": 0, # No penalty for long-term (Sharpe ratio already accounts for vol)
187
- "smoothness_bonus": 0.0, # No smoothness bonus for long-term (already has high r_squared weight)
188
- "description": "5-year - Long-term consistency"
189
- },
190
- }
191
-
192
-
193
  class GrowthSpeedAnalyzer:
194
  """Advanced growth velocity and acceleration analysis with timeframe awareness"""
195
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
196
  @staticmethod
197
  def analyze(prices: np.ndarray, dates: pd.DatetimeIndex, timeframe: str = "1y") -> GrowthSpeedMetrics:
198
  """Comprehensive growth speed analysis with timeframe-aware metrics"""
@@ -203,8 +149,8 @@ class GrowthSpeedAnalyzer:
203
 
204
  # Allow fewer data points (e.g., for 1d or 5d analysis)
205
  # We need at least 2 points to calculate a slope/return
206
- if len(prices) < 2:
207
- raise ValueError(f"Insufficient data points: {len(prices)} < 2")
208
 
209
  # Ensure prices is 1D
210
  if prices.ndim > 1:
@@ -247,22 +193,9 @@ class GrowthSpeedAnalyzer:
247
  r_squared = 0
248
 
249
  # 2. Compound growth metrics
250
- # Use total_seconds to handle intraday data (like 1d/5m) accurately
251
- if len(dates) > 1:
252
- years = (dates[-1] - dates[0]).total_seconds() / (365.25 * 24 * 3600)
253
- else:
254
- years = 0
255
-
256
  total_return = ((prices[-1] / prices[0]) - 1) * 100
257
-
258
- # Avoid division by zero for very short timeframes
259
- if years > 0:
260
- try:
261
- cagr = (pow(prices[-1] / prices[0], 1 / years) - 1) * 100
262
- except:
263
- cagr = 0
264
- else:
265
- cagr = 0
266
 
267
  # Calculate rolling CAGR for recent growth context
268
  # For short timeframes (1d-3mo), use 30-day window; for others use 90-day window
@@ -274,33 +207,13 @@ class GrowthSpeedAnalyzer:
274
 
275
  # 3. Acceleration (second derivative)
276
  returns = np.diff(np.log(prices))
 
277
  if len(returns) > 1:
278
- # Calculate annualization factor dynamically based on actual bar frequency
279
- # Avoid hardcoding assumptions about candle intervals (5m, 1h, 1d, etc)
280
- if len(dates) > 1:
281
- # Calculate average seconds between bars
282
- total_seconds = (dates[-1] - dates[0]).total_seconds()
283
- avg_seconds_per_bar = total_seconds / (len(dates) - 1)
284
-
285
- # For stocks/crypto trading: assume 252 trading days/year, 24/7 for crypto
286
- # Use 365.25 * 24 * 3600 seconds per year for continuous markets
287
- seconds_per_year = 365.25 * 24 * 3600
288
- factor = seconds_per_year / avg_seconds_per_bar
289
-
290
- # Cap factor to prevent extreme values from micro-intervals
291
- # Maximum reasonable annualization is ~252000 (1 second bars)
292
- factor = min(factor, 252000)
293
- else:
294
- factor = 252
295
-
296
  acceleration = np.polyfit(range(len(returns)), returns, 1)[0] * factor
297
- else:
298
- acceleration = 0
299
 
300
  # 4. Recent momentum (last 6 months vs overall)
301
- # fix for safe indexing
302
- six_months_ago = max(0, n - 126)
303
-
304
  recent_return = (prices[-1] / prices[six_months_ago] - 1) * 100 if six_months_ago < n else 0
305
  recent_momentum = recent_return - (total_return / years * 0.5) if years > 0 else 0
306
 
@@ -308,14 +221,15 @@ class GrowthSpeedAnalyzer:
308
  daily_returns = np.diff(prices) / prices[:-1]
309
 
310
  if len(daily_returns) > 0:
311
- mean_return = np.mean(daily_returns) * 252 # Annualized
312
- std_return = np.std(daily_returns) * np.sqrt(252) # Annualized
313
 
314
  sharpe_ratio = mean_return / std_return if std_return > 0 else 0
315
 
316
  # Sortino ratio (downside deviation)
317
  downside_returns = daily_returns[daily_returns < 0]
318
- downside_std = np.std(downside_returns) * np.sqrt(252) if len(downside_returns) > 0 else std_return
 
319
  sortino_ratio = mean_return / downside_std if downside_std > 0 else 0
320
  else:
321
  sharpe_ratio = sortino_ratio = 0
@@ -584,12 +498,12 @@ class GrowthSpeedAnalyzer:
584
  is_short_term = timeframe in ["1d", "5d", "1mo", "3mo"]
585
 
586
  # Normalize core metrics (shared between short and long-term)
587
- cmgr_norm = GrowthSpeedAnalyzer._normalize_metric(cmgr, 25) # Cap CMGR at 25% monthly
588
- cagr_norm = GrowthSpeedAnalyzer._normalize_metric(cagr, 50)
589
  r_squared_norm = max(r_squared, 0) # Already bounded [0,1]
590
  accel_norm = GrowthSpeedAnalyzer._normalize_metric(acceleration, 1, center=True)
591
- sharpe_norm = GrowthSpeedAnalyzer._normalize_metric(sharpe, 3)
592
- momentum_norm = GrowthSpeedAnalyzer._normalize_metric(momentum, 100, center=True)
593
 
594
  if is_short_term:
595
  # For timeframes <= 3 months: use ROC and momentum emphasis
@@ -602,7 +516,10 @@ class GrowthSpeedAnalyzer:
602
  # Penalize downtrends: if price is below starting price, apply severe penalty
603
  if total_return < 0:
604
  # Downtrend detected. Apply downtrend penalty proportional to magnitude
605
- downtrend_penalty = min(abs(total_return) / 10, 50) # Max 50% penalty for -100% loss
 
 
 
606
  downtrend_factor = 1 - (downtrend_penalty / 100)
607
  else:
608
  downtrend_factor = 1.0
@@ -685,6 +602,41 @@ class GrowthSpeedAnalyzer:
685
 
686
  return round(score, 2)
687
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
688
  @staticmethod
689
  def _categorize_growth(velocity_score: float) -> GrowthCategory:
690
  """Classify growth speed with dynamic thresholds"""
 
118
 
119
  from src.core.ticker_scanner.core_enums import GrowthCategory
120
  from src.core.ticker_scanner.growth_metrics import GrowthSpeedMetrics
121
+ from src.core.ticker_scanner.timeframe_config import TIMEFRAME_CONFIG
122
  from src.telegram_bot.logger import main_logger as logger
123
 
124
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
  class GrowthSpeedAnalyzer:
126
  """Advanced growth velocity and acceleration analysis with timeframe awareness"""
127
 
128
+ # Constants for metric calculations
129
+ SECONDS_PER_YEAR = 365.25 * 24 * 3600
130
+ TRADING_DAYS_PER_YEAR = 252
131
+ DAYS_PER_MONTH = 30.4375
132
+ SIX_MONTHS_TRADING_DAYS = 126
133
+ MAX_ANNUALIZATION_FACTOR = 252000
134
+ MIN_DATA_POINTS = 2
135
+ CMGR_NORMALIZATION_CAP = 25
136
+ CAGR_NORMALIZATION_CAP = 50
137
+ SHARPE_NORMALIZATION_CAP = 3.0
138
+ MOMENTUM_NORMALIZATION_RANGE = 100
139
+ DOWNTREND_PENALTY_MAX = 50
140
+ DOWNTREND_PENALTY_DIVISOR = 10
141
+
142
  @staticmethod
143
  def analyze(prices: np.ndarray, dates: pd.DatetimeIndex, timeframe: str = "1y") -> GrowthSpeedMetrics:
144
  """Comprehensive growth speed analysis with timeframe-aware metrics"""
 
149
 
150
  # Allow fewer data points (e.g., for 1d or 5d analysis)
151
  # We need at least 2 points to calculate a slope/return
152
+ if len(prices) < GrowthSpeedAnalyzer.MIN_DATA_POINTS:
153
+ raise ValueError(f"Insufficient data points: {len(prices)} < {GrowthSpeedAnalyzer.MIN_DATA_POINTS}")
154
 
155
  # Ensure prices is 1D
156
  if prices.ndim > 1:
 
193
  r_squared = 0
194
 
195
  # 2. Compound growth metrics
196
+ years = GrowthSpeedAnalyzer._calculate_years_from_dates(dates)
 
 
 
 
 
197
  total_return = ((prices[-1] / prices[0]) - 1) * 100
198
+ cagr = GrowthSpeedAnalyzer._calculate_cagr(prices, years)
 
 
 
 
 
 
 
 
199
 
200
  # Calculate rolling CAGR for recent growth context
201
  # For short timeframes (1d-3mo), use 30-day window; for others use 90-day window
 
207
 
208
  # 3. Acceleration (second derivative)
209
  returns = np.diff(np.log(prices))
210
+ acceleration = 0
211
  if len(returns) > 1:
212
+ factor = GrowthSpeedAnalyzer._calculate_annualization_factor(dates)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
213
  acceleration = np.polyfit(range(len(returns)), returns, 1)[0] * factor
 
 
214
 
215
  # 4. Recent momentum (last 6 months vs overall)
216
+ six_months_ago = max(0, n - GrowthSpeedAnalyzer.SIX_MONTHS_TRADING_DAYS)
 
 
217
  recent_return = (prices[-1] / prices[six_months_ago] - 1) * 100 if six_months_ago < n else 0
218
  recent_momentum = recent_return - (total_return / years * 0.5) if years > 0 else 0
219
 
 
221
  daily_returns = np.diff(prices) / prices[:-1]
222
 
223
  if len(daily_returns) > 0:
224
+ mean_return = np.mean(daily_returns) * GrowthSpeedAnalyzer.TRADING_DAYS_PER_YEAR
225
+ std_return = np.std(daily_returns) * np.sqrt(GrowthSpeedAnalyzer.TRADING_DAYS_PER_YEAR)
226
 
227
  sharpe_ratio = mean_return / std_return if std_return > 0 else 0
228
 
229
  # Sortino ratio (downside deviation)
230
  downside_returns = daily_returns[daily_returns < 0]
231
+ downside_std_factor = np.sqrt(GrowthSpeedAnalyzer.TRADING_DAYS_PER_YEAR)
232
+ downside_std = np.std(downside_returns) * downside_std_factor if len(downside_returns) > 0 else std_return
233
  sortino_ratio = mean_return / downside_std if downside_std > 0 else 0
234
  else:
235
  sharpe_ratio = sortino_ratio = 0
 
498
  is_short_term = timeframe in ["1d", "5d", "1mo", "3mo"]
499
 
500
  # Normalize core metrics (shared between short and long-term)
501
+ cmgr_norm = GrowthSpeedAnalyzer._normalize_metric(cmgr, GrowthSpeedAnalyzer.CMGR_NORMALIZATION_CAP)
502
+ cagr_norm = GrowthSpeedAnalyzer._normalize_metric(cagr, GrowthSpeedAnalyzer.CAGR_NORMALIZATION_CAP)
503
  r_squared_norm = max(r_squared, 0) # Already bounded [0,1]
504
  accel_norm = GrowthSpeedAnalyzer._normalize_metric(acceleration, 1, center=True)
505
+ sharpe_norm = GrowthSpeedAnalyzer._normalize_metric(sharpe, GrowthSpeedAnalyzer.SHARPE_NORMALIZATION_CAP)
506
+ momentum_norm = GrowthSpeedAnalyzer._normalize_metric(momentum, GrowthSpeedAnalyzer.MOMENTUM_NORMALIZATION_RANGE, center=True)
507
 
508
  if is_short_term:
509
  # For timeframes <= 3 months: use ROC and momentum emphasis
 
516
  # Penalize downtrends: if price is below starting price, apply severe penalty
517
  if total_return < 0:
518
  # Downtrend detected. Apply downtrend penalty proportional to magnitude
519
+ downtrend_penalty = min(
520
+ abs(total_return) / GrowthSpeedAnalyzer.DOWNTREND_PENALTY_DIVISOR,
521
+ GrowthSpeedAnalyzer.DOWNTREND_PENALTY_MAX
522
+ )
523
  downtrend_factor = 1 - (downtrend_penalty / 100)
524
  else:
525
  downtrend_factor = 1.0
 
602
 
603
  return round(score, 2)
604
 
605
+ @staticmethod
606
+ def _calculate_years_from_dates(dates: pd.DatetimeIndex) -> float:
607
+ """Calculate elapsed years from date index."""
608
+ if len(dates) < 2:
609
+ return 0
610
+ return (dates[-1] - dates[0]).total_seconds() / GrowthSpeedAnalyzer.SECONDS_PER_YEAR
611
+
612
+ @staticmethod
613
+ def _calculate_annualization_factor(dates: pd.DatetimeIndex) -> float:
614
+ """Calculate dynamic annualization factor based on bar frequency."""
615
+ if len(dates) < 2:
616
+ return GrowthSpeedAnalyzer.TRADING_DAYS_PER_YEAR
617
+
618
+ total_seconds = (dates[-1] - dates[0]).total_seconds()
619
+ avg_seconds_per_bar = total_seconds / (len(dates) - 1)
620
+ factor = GrowthSpeedAnalyzer.SECONDS_PER_YEAR / avg_seconds_per_bar
621
+ return min(factor, GrowthSpeedAnalyzer.MAX_ANNUALIZATION_FACTOR)
622
+
623
+ @staticmethod
624
+ def _calculate_r_squared(prices: np.ndarray, predictions: np.ndarray) -> float:
625
+ """Calculate R-squared goodness of fit."""
626
+ ss_res = np.sum((prices - predictions) ** 2)
627
+ ss_tot = np.sum((prices - np.mean(prices)) ** 2)
628
+ return 1 - (ss_res / ss_tot) if ss_tot > 0 else 0
629
+
630
+ @staticmethod
631
+ def _calculate_cagr(prices: np.ndarray, years: float) -> float:
632
+ """Calculate Compound Annual Growth Rate (CAGR)."""
633
+ if years <= 0 or len(prices) < 2:
634
+ return 0
635
+ try:
636
+ return (pow(prices[-1] / prices[0], 1 / years) - 1) * 100
637
+ except (ValueError, ZeroDivisionError):
638
+ return 0
639
+
640
  @staticmethod
641
  def _categorize_growth(velocity_score: float) -> GrowthCategory:
642
  """Classify growth speed with dynamic thresholds"""
src/core/ticker_scanner/timeframe_config.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Timeframe-specific configuration for growth speed analysis.
3
+
4
+ Defines metric weights, thresholds, and parameters for different analysis timeframes.
5
+ Each timeframe has customized weights to emphasize different aspects of growth analysis.
6
+ """
7
+
8
+ # Timeframe configuration with metric weights and thresholds for different analysis windows
9
+ TIMEFRAME_CONFIG = {
10
+ "1d": {
11
+ "weights": {"cmgr": 0.25, "cagr": 0.0, "roc": 0.25, "momentum": 0.20, "r_squared": 0.15, "sharpe": 0.15},
12
+ "indicator_bonuses": {"rsi": 0.05, "price_position": 0.03, "support_distance": 0.02},
13
+ "thresholds": {"velocity_cap": 100, "roc_threshold": 2.0},
14
+ "volatility_penalty": 3.0, # Volatility threshold (% daily). Above = penalize. Set to 0 to disable.
15
+ "smoothness_bonus": 0.10, # Bonus (0-1) for consistent growth: reward r_squared * bonus if vol < threshold
16
+ "description": "Intraday - High velocity, short-term momentum focus. CMGR emphasis for monthly growth context. Accepts up to 3% daily volatility"
17
+ },
18
+ "5d": {
19
+ "weights": {"cmgr": 0.25, "cagr": 0.0, "roc": 0.30, "momentum": 0.25, "r_squared": 0.15, "sharpe": 0.05},
20
+ "indicator_bonuses": {"rsi": 0.05, "price_position": 0.03, "support_distance": 0.02},
21
+ "thresholds": {"velocity_cap": 90, "roc_threshold": 1.5},
22
+ "volatility_penalty": 2.5, # Volatility threshold (% daily). Above = penalize. Set to 0 to disable.
23
+ "smoothness_bonus": 0.15, # Bonus (0-1) for consistent growth: reward r_squared * bonus if vol < threshold
24
+ "description": "Weekly - CMGR and momentum emphasis with CAGR context. Accepts up to 2.5% daily volatility"
25
+ },
26
+ "1mo": {
27
+ "weights": {"cmgr": 0.35, "cagr": 0.0, "roc": 0.25, "momentum": 0.25, "r_squared": 0.10, "sharpe": 0.05},
28
+ "indicator_bonuses": {"rsi": 0.05, "price_position": 0.03, "support_distance": 0.02},
29
+ "thresholds": {"velocity_cap": 85, "roc_threshold": 1.2},
30
+ "volatility_penalty": 2.0, # Volatility threshold (% daily). Above = penalize. Set to 0 to disable.
31
+ "smoothness_bonus": 0.20, # Bonus (0-1) for consistent growth: reward r_squared * bonus if vol < threshold
32
+ "description": "Monthly - CMGR primary metric with CAGR component. Accepts up to 2% daily volatility"
33
+ },
34
+ "3mo": {
35
+ "weights": {"cmgr": 0.40, "cagr": 0.0, "roc": 0.20, "momentum": 0.25, "r_squared": 0.10, "sharpe": 0.05},
36
+ "indicator_bonuses": {"rsi": 0.05, "price_position": 0.03, "support_distance": 0.02},
37
+ "thresholds": {"velocity_cap": 80, "roc_threshold": 0.8},
38
+ "volatility_penalty": 1.5, # Volatility threshold (% daily). Above = penalize. Set to 0 to disable.
39
+ "smoothness_bonus": 0.20, # Bonus (0-1) for consistent growth: reward r_squared * bonus if vol < threshold
40
+ "description": "Quarterly - CMGR and CAGR emphasis for sustainable growth. Accepts up to 1.5% daily volatility"
41
+ },
42
+ "6mo": {
43
+ "weights": {"cagr": 0.35, "r_squared": 0.20, "acceleration": 0.20, "sharpe": 0.15, "momentum": 0.10},
44
+ "indicator_bonuses": {"rsi": 0.03, "price_position": 0.02, "support_distance": 0.01},
45
+ "thresholds": {"velocity_cap": 80, "roc_threshold": 0.5},
46
+ "volatility_penalty": 1.0, # Volatility threshold (% daily). Above = penalize. Set to 0 to disable.
47
+ "smoothness_bonus": 0.0, # No smoothness bonus for medium-term (already has high r_squared weight)
48
+ "description": "Half-yearly - CAGR and stability focus. Accepts up to 1% daily volatility"
49
+ },
50
+ "1y": {
51
+ "weights": {"cagr": 0.40, "r_squared": 0.25, "acceleration": 0.20, "sharpe": 0.15, "momentum": 0.00},
52
+ "indicator_bonuses": {"rsi": 0.02, "price_position": 0.01, "support_distance": 0.00},
53
+ "thresholds": {"velocity_cap": 80, "roc_threshold": 0.3},
54
+ "volatility_penalty": 0, # No penalty for long-term (Sharpe ratio already accounts for vol)
55
+ "smoothness_bonus": 0.0, # No smoothness bonus for long-term (already has high r_squared weight)
56
+ "description": "Yearly - Long-term stability and consistency. Growth stability emphasis"
57
+ },
58
+ "2y": {
59
+ "weights": {"cagr": 0.40, "r_squared": 0.25, "acceleration": 0.20, "sharpe": 0.15, "momentum": 0.00},
60
+ "indicator_bonuses": {"rsi": 0.02, "price_position": 0.01, "support_distance": 0.00},
61
+ "thresholds": {"velocity_cap": 80, "roc_threshold": 0.25},
62
+ "volatility_penalty": 0, # No penalty for long-term (Sharpe ratio already accounts for vol)
63
+ "smoothness_bonus": 0.0, # No smoothness bonus for long-term (already has high r_squared weight)
64
+ "description": "2-year - Long-term consistency"
65
+ },
66
+ "5y": {
67
+ "weights": {"cagr": 0.40, "r_squared": 0.25, "acceleration": 0.20, "sharpe": 0.15, "momentum": 0.00},
68
+ "indicator_bonuses": {"rsi": 0.02, "price_position": 0.01, "support_distance": 0.00},
69
+ "thresholds": {"velocity_cap": 80, "roc_threshold": 0.2},
70
+ "volatility_penalty": 0, # No penalty for long-term (Sharpe ratio already accounts for vol)
71
+ "smoothness_bonus": 0.0, # No smoothness bonus for long-term (already has high r_squared weight)
72
+ "description": "5-year - Long-term consistency"
73
+ },
74
+ }
src/core/valuation_engine/__init__.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Comprehensive Valuation Engine
3
+
4
+ Provides multi-model stock valuation with:
5
+ - 11+ valuation models (DCF, DDM, RIM, Multiples, etc.)
6
+ - Multi-source data fetching (yfinance, FMP, SEC)
7
+ - Hybrid model selection (rule-based + ML)
8
+ - Risk analysis (WACC, sensitivity, Monte Carlo)
9
+ - Visual reporting
10
+ """
11
+
12
+ from src.core.valuation_engine.orchestrator import ValuationEngine
13
+ from src.core.valuation_engine.core.models import (
14
+ ValuationModel,
15
+ ValuationResult,
16
+ AggregatedValuation,
17
+ ScenarioResult,
18
+ )
19
+ from src.core.valuation_engine.core.assumptions import (
20
+ ValuationAssumptions,
21
+ DCFAssumptions,
22
+ MultiplesAssumptions,
23
+ )
24
+
25
+ __all__ = [
26
+ "ValuationEngine",
27
+ "ValuationModel",
28
+ "ValuationResult",
29
+ "AggregatedValuation",
30
+ "ScenarioResult",
31
+ "ValuationAssumptions",
32
+ "DCFAssumptions",
33
+ "MultiplesAssumptions",
34
+ ]
src/core/valuation_engine/aggregation/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ """Aggregation layer for combining valuation results"""
2
+
3
+ from src.core.valuation_engine.aggregation.ensemble import EnsembleAggregator
4
+ from src.core.valuation_engine.aggregation.percentile_calc import PercentileCalculator
5
+
6
+ __all__ = ["EnsembleAggregator", "PercentileCalculator"]
src/core/valuation_engine/aggregation/ensemble.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Ensemble weighting for combining multiple valuation models"""
2
+
3
+ from typing import Dict
4
+ from src.core.valuation_engine.core.models import ValuationModel, ValuationResult
5
+
6
+
7
+ class EnsembleAggregator:
8
+ """
9
+ Combines multiple valuation model results using confidence-weighted averaging.
10
+ """
11
+
12
+ def __init__(self, min_models_required: int = 3):
13
+ """
14
+ Initialize ensemble aggregator.
15
+
16
+ Args:
17
+ min_models_required: Minimum number of valid models required
18
+ """
19
+ self.min_models_required = min_models_required
20
+
21
+ def aggregate(self, results: Dict[ValuationModel, ValuationResult]) -> Dict:
22
+ """
23
+ Aggregate multiple valuation results into a single weighted fair value.
24
+
25
+ Strategy:
26
+ 1. Filter valid results only
27
+ 2. Normalize confidence scores to sum to 1.0
28
+ 3. Calculate weighted average
29
+ 4. Handle edge cases
30
+
31
+ Args:
32
+ results: Dictionary mapping ValuationModel to ValuationResult
33
+
34
+ Returns:
35
+ Dictionary with:
36
+ - weighted_fair_value: Confidence-weighted average
37
+ - valid_models_count: Number of valid models
38
+ - total_confidence: Sum of all confidence scores
39
+ - model_weights: Normalized weights used
40
+ """
41
+ # Step 1: Filter valid results
42
+ valid_results = {
43
+ model: result
44
+ for model, result in results.items()
45
+ if result.is_valid
46
+ }
47
+
48
+ # Check minimum models requirement
49
+ if len(valid_results) < self.min_models_required:
50
+ return {
51
+ 'weighted_fair_value': None,
52
+ 'valid_models_count': len(valid_results),
53
+ 'total_confidence': 0.0,
54
+ 'model_weights': {},
55
+ 'error': f"Only {len(valid_results)} valid models, minimum required is {self.min_models_required}"
56
+ }
57
+
58
+ # Step 2: Extract fair values and confidences
59
+ fair_values = []
60
+ confidences = []
61
+ model_list = []
62
+
63
+ for model, result in valid_results.items():
64
+ fair_values.append(result.fair_value_per_share)
65
+ confidences.append(result.confidence)
66
+ model_list.append(model)
67
+
68
+ # Step 3: Normalize confidence scores
69
+ total_confidence = sum(confidences)
70
+
71
+ if total_confidence == 0:
72
+ # Fallback: equal weights if all confidences are 0
73
+ normalized_weights = [1.0 / len(confidences)] * len(confidences)
74
+ else:
75
+ normalized_weights = [c / total_confidence for c in confidences]
76
+
77
+ # Step 4: Calculate weighted average
78
+ weighted_fair_value = sum(
79
+ fv * weight
80
+ for fv, weight in zip(fair_values, normalized_weights)
81
+ )
82
+
83
+ # Step 5: Create weights dictionary for transparency
84
+ model_weights = dict(zip(model_list, normalized_weights))
85
+
86
+ return {
87
+ 'weighted_fair_value': weighted_fair_value,
88
+ 'valid_models_count': len(valid_results),
89
+ 'total_confidence': total_confidence,
90
+ 'model_weights': model_weights,
91
+ 'error': None
92
+ }
93
+
94
+ def get_value_range(self, results: Dict[ValuationModel, ValuationResult]) -> tuple:
95
+ """
96
+ Get range of fair values from all valid models.
97
+
98
+ Args:
99
+ results: Dictionary mapping ValuationModel to ValuationResult
100
+
101
+ Returns:
102
+ Tuple of (min_value, max_value) or (None, None) if no valid results
103
+ """
104
+ valid_values = [
105
+ result.fair_value_per_share
106
+ for result in results.values()
107
+ if result.is_valid
108
+ ]
109
+
110
+ if not valid_values:
111
+ return (None, None)
112
+
113
+ return (min(valid_values), max(valid_values))
114
+
115
+ def get_confidence_weighted_median(self,
116
+ results: Dict[ValuationModel, ValuationResult]) -> float:
117
+ """
118
+ Calculate confidence-weighted median (alternative to mean).
119
+
120
+ This is more robust to outliers than weighted mean.
121
+
122
+ Args:
123
+ results: Dictionary mapping ValuationModel to ValuationResult
124
+
125
+ Returns:
126
+ Weighted median fair value or None
127
+ """
128
+ # Filter valid results
129
+ valid_results = [
130
+ (result.fair_value_per_share, result.confidence)
131
+ for result in results.values()
132
+ if result.is_valid
133
+ ]
134
+
135
+ if not valid_results:
136
+ return None
137
+
138
+ # Sort by fair value
139
+ sorted_results = sorted(valid_results, key=lambda x: x[0])
140
+
141
+ # Calculate cumulative weights
142
+ total_weight = sum(conf for _, conf in sorted_results)
143
+ cumulative_weight = 0
144
+ target_weight = total_weight / 2
145
+
146
+ # Find weighted median
147
+ for value, confidence in sorted_results:
148
+ cumulative_weight += confidence
149
+ if cumulative_weight >= target_weight:
150
+ return value
151
+
152
+ return sorted_results[-1][0] # Fallback to highest value
src/core/valuation_engine/aggregation/percentile_calc.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Percentile calculation for fair value ranges"""
2
+
3
+ from typing import Dict, List
4
+ import numpy as np
5
+
6
+ from src.core.valuation_engine.core.models import ValuationModel, ValuationResult
7
+
8
+
9
+ class PercentileCalculator:
10
+ """
11
+ Calculates percentiles (P10, P50, P90) from valuation model results.
12
+ Provides pessimistic, base, and optimistic fair value estimates.
13
+ """
14
+
15
+ def __init__(self, method: str = "linear"):
16
+ """
17
+ Initialize percentile calculator.
18
+
19
+ Args:
20
+ method: Numpy interpolation method ('linear', 'lower', 'higher', 'midpoint', 'nearest')
21
+ """
22
+ self.method = method
23
+
24
+ def calculate_percentiles(self,
25
+ results: Dict[ValuationModel, ValuationResult]) -> Dict[str, float]:
26
+ """
27
+ Calculate P10, P50 (median), and P90 percentiles from all valid model results.
28
+
29
+ Args:
30
+ results: Dictionary mapping ValuationModel to ValuationResult
31
+
32
+ Returns:
33
+ Dictionary with percentile_10, percentile_50, percentile_90
34
+ """
35
+ # Extract valid fair values
36
+ valid_values = [
37
+ result.fair_value_per_share
38
+ for result in results.values()
39
+ if result.is_valid
40
+ ]
41
+
42
+ if not valid_values:
43
+ return {
44
+ 'percentile_10': None,
45
+ 'percentile_50': None,
46
+ 'percentile_90': None
47
+ }
48
+
49
+ # Convert to numpy array for percentile calculation
50
+ values_array = np.array(valid_values)
51
+
52
+ # Calculate percentiles
53
+ p10 = np.percentile(values_array, 10, method=self.method)
54
+ p50 = np.percentile(values_array, 50, method=self.method)
55
+ p90 = np.percentile(values_array, 90, method=self.method)
56
+
57
+ return {
58
+ 'percentile_10': float(p10),
59
+ 'percentile_50': float(p50),
60
+ 'percentile_90': float(p90)
61
+ }
62
+
63
+ def calculate_confidence_weighted_percentiles(self,
64
+ results: Dict[ValuationModel, ValuationResult]) -> Dict[str, float]:
65
+ """
66
+ Calculate percentiles weighted by confidence scores.
67
+
68
+ This gives more weight to models with higher confidence in the distribution.
69
+
70
+ Args:
71
+ results: Dictionary mapping ValuationModel to ValuationResult
72
+
73
+ Returns:
74
+ Dictionary with weighted percentiles
75
+ """
76
+ # Extract valid results
77
+ valid_results = [
78
+ (result.fair_value_per_share, result.confidence)
79
+ for result in results.values()
80
+ if result.is_valid
81
+ ]
82
+
83
+ if not valid_results:
84
+ return {
85
+ 'percentile_10': None,
86
+ 'percentile_50': None,
87
+ 'percentile_90': None
88
+ }
89
+
90
+ # Sort by fair value
91
+ sorted_results = sorted(valid_results, key=lambda x: x[0])
92
+
93
+ # Calculate weighted percentiles
94
+ p10 = self._weighted_percentile(sorted_results, 0.10)
95
+ p50 = self._weighted_percentile(sorted_results, 0.50)
96
+ p90 = self._weighted_percentile(sorted_results, 0.90)
97
+
98
+ return {
99
+ 'percentile_10': p10,
100
+ 'percentile_50': p50,
101
+ 'percentile_90': p90
102
+ }
103
+
104
+ def _weighted_percentile(self,
105
+ sorted_results: List[tuple],
106
+ percentile: float) -> float:
107
+ """
108
+ Calculate weighted percentile from sorted (value, weight) pairs.
109
+
110
+ Args:
111
+ sorted_results: List of (value, weight) tuples sorted by value
112
+ percentile: Target percentile (0.0 to 1.0)
113
+
114
+ Returns:
115
+ Weighted percentile value
116
+ """
117
+ if not sorted_results:
118
+ return None
119
+
120
+ # Calculate total weight and target cumulative weight
121
+ total_weight = sum(weight for _, weight in sorted_results)
122
+ target_weight = total_weight * percentile
123
+
124
+ # Find value at target cumulative weight
125
+ cumulative_weight = 0
126
+ for i, (value, weight) in enumerate(sorted_results):
127
+ cumulative_weight += weight
128
+
129
+ if cumulative_weight >= target_weight:
130
+ # Interpolate if not first element
131
+ if i > 0 and cumulative_weight > target_weight:
132
+ prev_value, prev_weight = sorted_results[i-1]
133
+ # Linear interpolation
134
+ prev_cum_weight = cumulative_weight - weight
135
+ weight_diff = cumulative_weight - prev_cum_weight
136
+ target_offset = target_weight - prev_cum_weight
137
+
138
+ if weight_diff > 0:
139
+ interpolation_factor = target_offset / weight_diff
140
+ return prev_value + interpolation_factor * (value - prev_value)
141
+
142
+ return value
143
+
144
+ # Fallback to last value
145
+ return sorted_results[-1][0]
146
+
147
+ def get_percentile_range(self,
148
+ results: Dict[ValuationModel, ValuationResult],
149
+ lower: float = 0.10,
150
+ upper: float = 0.90) -> tuple:
151
+ """
152
+ Get percentile range (e.g., P10 to P90).
153
+
154
+ Args:
155
+ results: Dictionary mapping ValuationModel to ValuationResult
156
+ lower: Lower percentile (default 0.10)
157
+ upper: Upper percentile (default 0.90)
158
+
159
+ Returns:
160
+ Tuple of (lower_percentile_value, upper_percentile_value)
161
+ """
162
+ valid_values = [
163
+ result.fair_value_per_share
164
+ for result in results.values()
165
+ if result.is_valid
166
+ ]
167
+
168
+ if not valid_values:
169
+ return (None, None)
170
+
171
+ values_array = np.array(valid_values)
172
+
173
+ lower_value = np.percentile(values_array, lower * 100, method=self.method)
174
+ upper_value = np.percentile(values_array, upper * 100, method=self.method)
175
+
176
+ return (float(lower_value), float(upper_value))
177
+
178
+ def calculate_all_percentiles(self,
179
+ results: Dict[ValuationModel, ValuationResult]) -> Dict[str, float]:
180
+ """
181
+ Calculate full set of percentiles (P5, P10, P25, P50, P75, P90, P95).
182
+
183
+ Useful for comprehensive distribution analysis.
184
+
185
+ Args:
186
+ results: Dictionary mapping ValuationModel to ValuationResult
187
+
188
+ Returns:
189
+ Dictionary with all percentiles
190
+ """
191
+ valid_values = [
192
+ result.fair_value_per_share
193
+ for result in results.values()
194
+ if result.is_valid
195
+ ]
196
+
197
+ if not valid_values:
198
+ return {f'percentile_{p}': None for p in [5, 10, 25, 50, 75, 90, 95]}
199
+
200
+ values_array = np.array(valid_values)
201
+
202
+ percentiles = {}
203
+ for p in [5, 10, 25, 50, 75, 90, 95]:
204
+ percentiles[f'percentile_{p}'] = float(
205
+ np.percentile(values_array, p, method=self.method)
206
+ )
207
+
208
+ return percentiles
src/core/valuation_engine/core/__init__.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Core data models and entities for valuation engine"""
2
+
3
+ from src.core.valuation_engine.core.models import (
4
+ ValuationModel,
5
+ DataSource,
6
+ ValuationResult,
7
+ ScenarioResult,
8
+ AggregatedValuation,
9
+ SensitivityAnalysis,
10
+ MonteCarloResult,
11
+ )
12
+ from src.core.valuation_engine.core.assumptions import (
13
+ DCFAssumptions,
14
+ MultiplesAssumptions,
15
+ DDMAssumptions,
16
+ RIMAssumptions,
17
+ ScenarioAssumptions,
18
+ ValuationAssumptions,
19
+ )
20
+ from src.core.valuation_engine.core.stock_entity import StockEntity
21
+
22
+ __all__ = [
23
+ "ValuationModel",
24
+ "DataSource",
25
+ "ValuationResult",
26
+ "ScenarioResult",
27
+ "AggregatedValuation",
28
+ "SensitivityAnalysis",
29
+ "MonteCarloResult",
30
+ "DCFAssumptions",
31
+ "MultiplesAssumptions",
32
+ "DDMAssumptions",
33
+ "RIMAssumptions",
34
+ "ScenarioAssumptions",
35
+ "ValuationAssumptions",
36
+ "StockEntity",
37
+ ]
src/core/valuation_engine/core/assumptions.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Valuation assumptions and configuration dataclasses"""
2
+
3
+ from dataclasses import dataclass, field
4
+ from typing import List, Optional
5
+
6
+
7
+ @dataclass
8
+ class DCFAssumptions:
9
+ """Assumptions for DCF models"""
10
+ growth_rate: float = 0.05 # 5% growth
11
+ terminal_growth_rate: float = 0.025 # 2.5% perpetual growth
12
+ discount_rate: Optional[float] = None # If None, calculate WACC
13
+ projection_years: int = 5
14
+ use_wacc: bool = True # Use WACC for discount rate if not specified
15
+
16
+ def validate(self) -> bool:
17
+ """Validate assumptions are reasonable"""
18
+ if self.growth_rate < -0.5 or self.growth_rate > 1.0:
19
+ return False
20
+ if self.terminal_growth_rate < 0 or self.terminal_growth_rate > 0.05:
21
+ return False
22
+ if self.discount_rate and (self.discount_rate < 0 or self.discount_rate > 0.5):
23
+ return False
24
+ if self.projection_years < 1 or self.projection_years > 20:
25
+ return False
26
+ return True
27
+
28
+
29
+ @dataclass
30
+ class MultiplesAssumptions:
31
+ """Assumptions for multiples-based valuation"""
32
+ peer_group: List[str] = field(default_factory=list)
33
+ use_industry_median: bool = True # Use median vs mean
34
+ use_trailing: bool = True # Use trailing vs forward multiples
35
+ exclude_outliers: bool = True # Exclude values beyond 3 std devs
36
+ min_peer_count: int = 3 # Minimum peers required
37
+
38
+
39
+ @dataclass
40
+ class DDMAssumptions:
41
+ """Assumptions for Dividend Discount Model"""
42
+ dividend_growth_rate: Optional[float] = None # If None, calculate from history
43
+ discount_rate: Optional[float] = None # If None, use WACC or cost of equity
44
+ use_cost_of_equity: bool = True # Use cost of equity for discount rate
45
+ projection_years: int = 5 # For multi-stage DDM
46
+
47
+
48
+ @dataclass
49
+ class RIMAssumptions:
50
+ """Assumptions for Residual Income Model"""
51
+ cost_of_equity: Optional[float] = None # If None, calculate using CAPM
52
+ projection_years: int = 5
53
+ terminal_growth_rate: float = 0.025
54
+ use_capm: bool = True
55
+
56
+
57
+ @dataclass
58
+ class ScenarioAssumptions:
59
+ """Assumptions for scenario-based valuation"""
60
+ # Bear scenario (pessimistic)
61
+ bear_growth_rate: float = 0.02
62
+ bear_margin_adjustment: float = -0.05 # -5% margin
63
+ bear_discount_rate_adjustment: float = 0.02 # +2% discount rate
64
+
65
+ # Base scenario (expected)
66
+ base_growth_rate: float = 0.05
67
+ base_margin_adjustment: float = 0.0
68
+ base_discount_rate_adjustment: float = 0.0
69
+
70
+ # Bull scenario (optimistic)
71
+ bull_growth_rate: float = 0.10
72
+ bull_margin_adjustment: float = 0.05 # +5% margin
73
+ bull_discount_rate_adjustment: float = -0.01 # -1% discount rate
74
+
75
+
76
+ @dataclass
77
+ class ValuationAssumptions:
78
+ """Global valuation assumptions consolidating all model assumptions"""
79
+ # Model-specific assumptions
80
+ dcf: DCFAssumptions = field(default_factory=DCFAssumptions)
81
+ multiples: MultiplesAssumptions = field(default_factory=MultiplesAssumptions)
82
+ ddm: DDMAssumptions = field(default_factory=DDMAssumptions)
83
+ rim: RIMAssumptions = field(default_factory=RIMAssumptions)
84
+ scenario: ScenarioAssumptions = field(default_factory=ScenarioAssumptions)
85
+
86
+ # Global financial assumptions
87
+ risk_free_rate: float = 0.04 # 4% risk-free rate (10-year Treasury)
88
+ market_return: float = 0.10 # 10% expected market return
89
+ tax_rate: float = 0.21 # 21% corporate tax rate
90
+
91
+ @property
92
+ def market_risk_premium(self) -> float:
93
+ """Calculate market risk premium"""
94
+ return self.market_return - self.risk_free_rate
95
+
96
+ def validate(self) -> bool:
97
+ """Validate all assumptions"""
98
+ if not self.dcf.validate():
99
+ return False
100
+ if self.risk_free_rate < 0 or self.risk_free_rate > 0.15:
101
+ return False
102
+ if self.market_return < 0 or self.market_return > 0.30:
103
+ return False
104
+ if self.tax_rate < 0 or self.tax_rate > 1.0:
105
+ return False
106
+ return True
src/core/valuation_engine/core/models.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Core data models for valuation engine"""
2
+
3
+ from dataclasses import dataclass, field
4
+ from typing import Optional, Dict, List
5
+ from enum import Enum
6
+
7
+
8
+ class ValuationModel(Enum):
9
+ """Supported valuation models"""
10
+ DCF = "dcf"
11
+ REVERSE_DCF = "reverse_dcf"
12
+ DDM = "ddm"
13
+ RESIDUAL_INCOME = "residual_income"
14
+ ASSET_BASED = "asset_based"
15
+ PE_MULTIPLE = "pe_multiple"
16
+ EV_EBITDA = "ev_ebitda"
17
+ EV_SALES = "ev_sales"
18
+ PEG = "peg"
19
+ GRAHAM = "graham"
20
+ SCENARIO_BASED = "scenario_based"
21
+ MARKET_IMPLIED = "market_implied"
22
+
23
+
24
+ class DataSource(Enum):
25
+ """Data source for financial data"""
26
+ YFINANCE = "yfinance"
27
+ FMP = "fmp"
28
+ SEC = "sec"
29
+ CACHED = "cached"
30
+
31
+
32
+ @dataclass
33
+ class ValuationResult:
34
+ """Result from a single valuation model"""
35
+ model: ValuationModel
36
+ fair_value_per_share: Optional[float] = None
37
+ intrinsic_value: Optional[float] = None
38
+ confidence: float = 0.0 # 0-1 scale
39
+ data_source: DataSource = DataSource.YFINANCE
40
+ assumptions: Dict = field(default_factory=dict)
41
+ metadata: Dict = field(default_factory=dict)
42
+ error: Optional[str] = None
43
+
44
+ @property
45
+ def is_valid(self) -> bool:
46
+ """Check if this result is valid and can be used"""
47
+ return self.fair_value_per_share is not None and self.error is None
48
+
49
+
50
+ @dataclass
51
+ class ScenarioResult:
52
+ """Results for bear/base/bull scenarios"""
53
+ bear: Optional[float] = None
54
+ base: Optional[float] = None
55
+ bull: Optional[float] = None
56
+
57
+ def to_dict(self) -> Dict[str, Optional[float]]:
58
+ """Convert to dictionary"""
59
+ return {
60
+ "bear": self.bear,
61
+ "base": self.base,
62
+ "bull": self.bull
63
+ }
64
+
65
+
66
+ @dataclass
67
+ class AggregatedValuation:
68
+ """Final aggregated valuation result"""
69
+ ticker: str
70
+ current_price: float
71
+
72
+ # Individual model results
73
+ model_results: Dict[ValuationModel, ValuationResult] = field(default_factory=dict)
74
+
75
+ # Aggregated values
76
+ weighted_fair_value: Optional[float] = None # Single weighted average
77
+ percentile_10: Optional[float] = None # P10 (pessimistic)
78
+ percentile_50: Optional[float] = None # P50 (median)
79
+ percentile_90: Optional[float] = None # P90 (optimistic)
80
+
81
+ # Scenario results
82
+ scenario_results: Optional[ScenarioResult] = None
83
+
84
+ # Metadata
85
+ upside_downside: Optional[float] = None # % from current price to weighted fair value
86
+ valid_models_count: int = 0
87
+ total_confidence: float = 0.0
88
+
89
+ @property
90
+ def fair_value_range(self) -> tuple:
91
+ """Returns (min, max) fair value range"""
92
+ valid_values = [r.fair_value_per_share for r in self.model_results.values() if r.is_valid]
93
+ if not valid_values:
94
+ return (None, None)
95
+ return (min(valid_values), max(valid_values))
96
+
97
+ def calculate_upside_downside(self) -> None:
98
+ """Calculate upside/downside percentage"""
99
+ if self.weighted_fair_value and self.current_price and self.current_price > 0:
100
+ self.upside_downside = ((self.weighted_fair_value - self.current_price) / self.current_price) * 100
101
+
102
+
103
+ @dataclass
104
+ class SensitivityAnalysis:
105
+ """Sensitivity analysis results"""
106
+ base_case_value: float
107
+ parameter_ranges: Dict[str, List[float]] # param_name -> [values]
108
+ value_matrix: Dict[str, List[float]] # param_name -> [resulting values]
109
+
110
+ def get_parameter_impact(self, parameter: str) -> Optional[List[float]]:
111
+ """Get value impact for a specific parameter"""
112
+ return self.value_matrix.get(parameter)
113
+
114
+
115
+ @dataclass
116
+ class MonteCarloResult:
117
+ """Monte Carlo simulation results"""
118
+ simulations: int
119
+ mean_value: float
120
+ median_value: float
121
+ std_dev: float
122
+ percentile_5: float
123
+ percentile_25: float
124
+ percentile_50: float
125
+ percentile_75: float
126
+ percentile_95: float
127
+ all_values: List[float] = field(default_factory=list)
128
+
129
+ @property
130
+ def confidence_interval_90(self) -> tuple:
131
+ """Returns 90% confidence interval (P5 to P95)"""
132
+ return (self.percentile_5, self.percentile_95)
133
+
134
+ @property
135
+ def confidence_interval_50(self) -> tuple:
136
+ """Returns 50% confidence interval (P25 to P75)"""
137
+ return (self.percentile_25, self.percentile_75)
src/core/valuation_engine/core/stock_entity.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Stock entity consolidating data from multiple sources"""
2
+
3
+ from dataclasses import dataclass, field
4
+ from typing import Optional, Dict, Any
5
+ import pandas as pd
6
+
7
+ from src.core.fundamental_analysis.core_models import FinancialMetrics, TickerData
8
+ from src.core.valuation_engine.core.models import DataSource
9
+
10
+
11
+ @dataclass
12
+ class StockEntity:
13
+ """
14
+ Consolidated stock data from multiple sources.
15
+ Integrates with existing FinancialMetrics but adds additional fields needed for valuation.
16
+ """
17
+ # Basic info
18
+ ticker: str
19
+ price: float
20
+ shares_outstanding: Optional[float] = None
21
+ market_cap: Optional[float] = None
22
+
23
+ # Company info
24
+ sector: Optional[str] = None
25
+ industry: Optional[str] = None
26
+ company_name: Optional[str] = None
27
+
28
+ # Financial metrics (from existing calculator)
29
+ financial_metrics: Optional[FinancialMetrics] = None
30
+
31
+ # Cash flow data
32
+ free_cash_flow: Optional[float] = None
33
+ operating_cash_flow: Optional[float] = None
34
+ capex: Optional[float] = None
35
+
36
+ # Income statement
37
+ revenue: Optional[float] = None
38
+ ebitda: Optional[float] = None
39
+ net_income: Optional[float] = None
40
+ eps: Optional[float] = None
41
+ operating_margin: Optional[float] = None
42
+
43
+ # Balance sheet
44
+ total_assets: Optional[float] = None
45
+ total_liabilities: Optional[float] = None
46
+ total_debt: Optional[float] = None
47
+ total_equity: Optional[float] = None
48
+ book_value: Optional[float] = None
49
+ book_value_per_share: Optional[float] = None
50
+
51
+ # Dividend data
52
+ dividend_per_share: Optional[float] = None
53
+ dividend_yield: Optional[float] = None
54
+ dividend_growth_rate: Optional[float] = None
55
+
56
+ # Growth metrics
57
+ revenue_growth: Optional[float] = None
58
+ earnings_growth: Optional[float] = None
59
+
60
+ # Risk metrics
61
+ beta: Optional[float] = None
62
+
63
+ # Historical data (for growth calculations)
64
+ historical_fcf: list = field(default_factory=list)
65
+ historical_eps: list = field(default_factory=list)
66
+ historical_revenue: list = field(default_factory=list)
67
+ historical_dividends: list = field(default_factory=list)
68
+
69
+ # Raw data objects
70
+ ticker_data: Optional[TickerData] = None # yfinance data
71
+ fmp_data: Optional[Dict[str, Any]] = None # FMP API data
72
+ sec_data: Optional[Dict[str, Any]] = None # SEC API data
73
+
74
+ # Metadata
75
+ data_source: DataSource = DataSource.YFINANCE
76
+ data_completeness_score: float = 0.0
77
+ last_updated: Optional[str] = None
78
+
79
+ def calculate_completeness_score(self) -> float:
80
+ """
81
+ Calculate data completeness score (0-1).
82
+ Used for confidence weighting in model selection.
83
+ """
84
+ required_fields = [
85
+ self.price, self.shares_outstanding, self.sector,
86
+ self.free_cash_flow, self.revenue, self.net_income,
87
+ self.total_assets, self.total_liabilities, self.beta
88
+ ]
89
+ optional_fields = [
90
+ self.dividend_per_share, self.operating_margin,
91
+ self.book_value, self.ebitda, self.eps
92
+ ]
93
+
94
+ required_count = sum(1 for field in required_fields if field is not None)
95
+ optional_count = sum(1 for field in optional_fields if field is not None)
96
+
97
+ # Required fields are weighted more heavily
98
+ required_weight = 0.7
99
+ optional_weight = 0.3
100
+
101
+ required_score = required_count / len(required_fields)
102
+ optional_score = optional_count / len(optional_fields)
103
+
104
+ self.data_completeness_score = (required_score * required_weight +
105
+ optional_score * optional_weight)
106
+ return self.data_completeness_score
107
+
108
+ @property
109
+ def has_fcf(self) -> bool:
110
+ """Check if FCF data is available and positive"""
111
+ return self.free_cash_flow is not None and self.free_cash_flow > 0
112
+
113
+ @property
114
+ def has_dividends(self) -> bool:
115
+ """Check if company pays dividends"""
116
+ return (self.dividend_per_share is not None and
117
+ self.dividend_per_share > 0)
118
+
119
+ @property
120
+ def has_positive_earnings(self) -> bool:
121
+ """Check if company has positive earnings"""
122
+ return self.net_income is not None and self.net_income > 0
123
+
124
+ @property
125
+ def has_book_value(self) -> bool:
126
+ """Check if book value data is available"""
127
+ return self.book_value is not None and self.book_value > 0
128
+
129
+ @property
130
+ def debt_to_equity(self) -> Optional[float]:
131
+ """Calculate debt-to-equity ratio"""
132
+ if self.total_debt and self.total_equity and self.total_equity > 0:
133
+ return self.total_debt / self.total_equity
134
+ return None
135
+
136
+ @property
137
+ def is_financial_company(self) -> bool:
138
+ """Check if company is in financial sector"""
139
+ if not self.sector:
140
+ return False
141
+ financial_sectors = ["Financial", "Bank", "Insurance", "REIT"]
142
+ return any(sector.lower() in self.sector.lower() for sector in financial_sectors)
143
+
144
+ @property
145
+ def is_tech_company(self) -> bool:
146
+ """Check if company is in technology sector"""
147
+ if not self.sector:
148
+ return False
149
+ return "tech" in self.sector.lower() or "software" in self.sector.lower()
150
+
151
+ def get_market_cap_category(self) -> str:
152
+ """Categorize market cap (small/mid/large)"""
153
+ if not self.market_cap:
154
+ return "unknown"
155
+
156
+ # Market cap in billions
157
+ market_cap_b = self.market_cap / 1_000_000_000
158
+
159
+ if market_cap_b < 2:
160
+ return "small"
161
+ elif market_cap_b < 10:
162
+ return "mid"
163
+ else:
164
+ return "large"
165
+
166
+ @classmethod
167
+ def from_ticker_data(cls, ticker_data: TickerData, price: float) -> "StockEntity":
168
+ """
169
+ Create StockEntity from yfinance TickerData.
170
+ This is a convenience method for the most common case.
171
+ """
172
+ info = ticker_data.info or {}
173
+
174
+ return cls(
175
+ ticker=ticker_data.ticker,
176
+ price=price,
177
+ shares_outstanding=info.get("sharesOutstanding"),
178
+ market_cap=info.get("marketCap"),
179
+ sector=info.get("sector"),
180
+ industry=info.get("industry"),
181
+ company_name=info.get("longName"),
182
+ beta=info.get("beta"),
183
+ dividend_per_share=info.get("dividendRate"),
184
+ dividend_yield=info.get("dividendYield"),
185
+ ticker_data=ticker_data,
186
+ data_source=DataSource.YFINANCE
187
+ )
src/core/valuation_engine/data/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ """Data fetching layer with multi-source support"""
2
+
3
+ from src.core.valuation_engine.data.multi_source_fetcher import MultiSourceDataFetcher
4
+
5
+ __all__ = ["MultiSourceDataFetcher"]
src/core/valuation_engine/data/multi_source_fetcher.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Multi-source data fetcher with fallback logic.
3
+ Primary: yfinance → Fallback 1: FMP → Fallback 2: SEC
4
+ """
5
+
6
+ import asyncio
7
+ from typing import Optional
8
+ from datetime import datetime
9
+
10
+ from src.core.fundamental_analysis.async_data_fetcher import AsyncDataFetcher
11
+ from src.core.fundamental_analysis.data_extractor import DataExtractor
12
+ from src.core.fundamental_analysis.metrics_calculator import MetricsCalculator
13
+ from src.core.valuation_engine.core.stock_entity import StockEntity
14
+ from src.core.valuation_engine.core.models import DataSource
15
+
16
+
17
+ class MultiSourceDataFetcher:
18
+ """
19
+ Fetches stock data from multiple sources with intelligent fallback.
20
+ Strategy: yfinance (primary) → FMP (fallback) → SEC (fallback)
21
+ """
22
+
23
+ def __init__(self,
24
+ fmp_api_key: Optional[str] = None,
25
+ sec_api_key: Optional[str] = None,
26
+ max_workers: int = 5):
27
+ """
28
+ Initialize multi-source fetcher.
29
+
30
+ Args:
31
+ fmp_api_key: Financial Modeling Prep API key (optional)
32
+ fmp_api_key: Financial Modeling Prep API key (optional)
33
+ sec_api_key: SEC API key (optional)
34
+ max_workers: Max workers for async data fetcher
35
+ """
36
+ self.yf_fetcher = AsyncDataFetcher(max_workers=max_workers)
37
+ self.fmp_api_key = fmp_api_key
38
+ self.sec_api_key = sec_api_key
39
+
40
+ # TODO: Initialize FMP and SEC clients when implemented
41
+ # self.fmp_client = FMPClient(fmp_api_key) if fmp_api_key else None
42
+ # self.sec_client = SECClient(sec_api_key) if sec_api_key else None
43
+
44
+ async def __aenter__(self):
45
+ """Async context manager entry"""
46
+ await self.yf_fetcher.__aenter__()
47
+ return self
48
+
49
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
50
+ """Async context manager exit"""
51
+ await self.yf_fetcher.__aexit__(exc_type, exc_val, exc_tb)
52
+
53
+ async def fetch_comprehensive_data(self, ticker: str) -> StockEntity:
54
+ """
55
+ Fetch comprehensive stock data with fallback logic.
56
+
57
+ Args:
58
+ ticker: Stock ticker symbol
59
+
60
+ Returns:
61
+ StockEntity with consolidated data from all sources
62
+ """
63
+ # Step 1: Try yfinance (primary source)
64
+ stock_entity = await self._fetch_from_yfinance(ticker)
65
+
66
+ # Step 2: If yfinance succeeded, enrich with additional calculations
67
+ if stock_entity and stock_entity.ticker_data:
68
+ stock_entity = await self._enrich_with_calculations(stock_entity)
69
+
70
+ # Step 3: Check data completeness
71
+ if stock_entity:
72
+ stock_entity.calculate_completeness_score()
73
+
74
+ # TODO: Step 4: If data incomplete, try FMP fallback
75
+ # if stock_entity.data_completeness_score < 0.7 and self.fmp_api_key:
76
+ # stock_entity = await self._enrich_with_fmp(stock_entity)
77
+
78
+ # TODO: Step 5: If still incomplete, try SEC fallback
79
+ # if stock_entity.data_completeness_score < 0.6 and self.sec_api_key:
80
+ # stock_entity = await self._enrich_with_sec(stock_entity)
81
+
82
+ # Step 6: Set timestamp
83
+ if stock_entity:
84
+ stock_entity.last_updated = datetime.now().isoformat()
85
+
86
+ return stock_entity
87
+
88
+ async def _fetch_from_yfinance(self, ticker: str) -> Optional[StockEntity]:
89
+ """
90
+ Fetch data from yfinance.
91
+
92
+ Args:
93
+ ticker: Stock ticker symbol
94
+
95
+ Returns:
96
+ StockEntity or None if fetch failed
97
+ """
98
+ try:
99
+ # Fetch ticker data using existing AsyncDataFetcher
100
+ ticker_data = await self.yf_fetcher.fetch_ticker_data(ticker)
101
+
102
+ if not ticker_data or not ticker_data.info:
103
+ return None
104
+
105
+ # Get current price
106
+ price = ticker_data.info.get("currentPrice") or ticker_data.info.get("regularMarketPrice")
107
+ if not price:
108
+ return None
109
+
110
+ # Create StockEntity from ticker data
111
+ stock_entity = StockEntity.from_ticker_data(ticker_data, price)
112
+
113
+ # Extract additional data using DataExtractor
114
+ extractor = DataExtractor(ticker_data)
115
+
116
+ # Cash flow data
117
+ stock_entity.operating_cash_flow = extractor.get_cashflow_item(
118
+ "Operating Cash Flow",
119
+ ["Total Cash From Operating Activities", "CashFlowFromContinuingOperatingActivities"]
120
+ )
121
+ stock_entity.capex = extractor.get_cashflow_item(
122
+ "Capital Expenditure",
123
+ ["Capital Expenditures", "CapitalExpenditures"]
124
+ )
125
+
126
+ # Calculate FCF
127
+ if stock_entity.operating_cash_flow and stock_entity.capex:
128
+ stock_entity.free_cash_flow = stock_entity.operating_cash_flow + stock_entity.capex # capex is negative
129
+
130
+ # Income statement
131
+ stock_entity.revenue = extractor.get_financial_item(
132
+ "Total Revenue",
133
+ ["totalRevenue", "Revenue"]
134
+ )
135
+ stock_entity.ebitda = extractor.get_financial_item(
136
+ "EBITDA",
137
+ ["ebitda"]
138
+ )
139
+ stock_entity.net_income = extractor.get_financial_item(
140
+ "Net Income",
141
+ ["netIncome", "Net Income"]
142
+ )
143
+
144
+ # Balance sheet
145
+ stock_entity.total_assets = extractor.get_balance_sheet_item(
146
+ "Total Assets",
147
+ ["totalAssets"]
148
+ )
149
+ stock_entity.total_liabilities = extractor.get_balance_sheet_item(
150
+ "Total Liabilities Net Minority Interest",
151
+ ["Total Liab", "totalLiabilities"]
152
+ )
153
+ stock_entity.total_debt = extractor.get_balance_sheet_item(
154
+ "Total Debt",
155
+ ["totalDebt", "Long Term Debt"]
156
+ )
157
+ stock_entity.total_equity = extractor.get_balance_sheet_item(
158
+ "Total Equity Gross Minority Interest",
159
+ ["Stockholders Equity", "totalStockholdersEquity"]
160
+ )
161
+
162
+ # Calculate book value
163
+ if stock_entity.total_assets and stock_entity.total_liabilities:
164
+ stock_entity.book_value = stock_entity.total_assets - stock_entity.total_liabilities
165
+
166
+ if stock_entity.book_value and stock_entity.shares_outstanding:
167
+ stock_entity.book_value_per_share = stock_entity.book_value / stock_entity.shares_outstanding
168
+
169
+ # Get historical data for growth calculations
170
+ stock_entity.historical_fcf = extractor.get_cashflow_historical(
171
+ "Free Cash Flow", 5, ["FreeCashFlow"]
172
+ )
173
+ stock_entity.historical_revenue = extractor.get_historical_data(
174
+ "Total Revenue", 5, ["totalRevenue", "Revenue"]
175
+ )
176
+
177
+ # EPS from info
178
+ stock_entity.eps = ticker_data.info.get("trailingEps")
179
+
180
+ # Operating margin
181
+ if stock_entity.net_income and stock_entity.revenue and stock_entity.revenue > 0:
182
+ stock_entity.operating_margin = stock_entity.net_income / stock_entity.revenue
183
+
184
+ return stock_entity
185
+
186
+ except Exception as e:
187
+ print(f"Error fetching from yfinance for {ticker}: {e}")
188
+ return None
189
+
190
+ async def _enrich_with_calculations(self, stock_entity: StockEntity) -> StockEntity:
191
+ """
192
+ Enrich stock entity with calculated metrics using MetricsCalculator.
193
+
194
+ Args:
195
+ stock_entity: StockEntity to enrich
196
+
197
+ Returns:
198
+ Enriched StockEntity
199
+ """
200
+ try:
201
+ if not stock_entity.ticker_data:
202
+ return stock_entity
203
+
204
+ # Use existing MetricsCalculator to calculate financial metrics
205
+ calculator = MetricsCalculator(DataExtractor(stock_entity.ticker_data))
206
+ financial_metrics = await calculator.calculate_all_metrics()
207
+
208
+ # Store financial metrics
209
+ stock_entity.financial_metrics = financial_metrics
210
+
211
+ # Extract specific metrics we need for valuation
212
+ if financial_metrics.roe:
213
+ # ROE is already calculated, we can use it for RIM
214
+
215
+ pass
216
+
217
+ # Calculate growth rates if historical data available
218
+ stock_entity.revenue_growth = self._calculate_growth_rate(
219
+ stock_entity.historical_revenue
220
+ )
221
+
222
+ # Calculate dividend growth rate if historical dividends available
223
+ if len(stock_entity.historical_dividends) > 1:
224
+ stock_entity.dividend_growth_rate = self._calculate_growth_rate(
225
+ stock_entity.historical_dividends
226
+ )
227
+
228
+ return stock_entity
229
+
230
+ except Exception as e:
231
+ print(f"Error enriching with calculations: {e}")
232
+ return stock_entity
233
+
234
+ def _calculate_growth_rate(self, historical_values: list) -> Optional[float]:
235
+ """
236
+ Calculate CAGR from historical values.
237
+
238
+ Args:
239
+ historical_values: List of historical values (most recent first)
240
+
241
+ Returns:
242
+ CAGR as decimal (e.g., 0.05 for 5%) or None
243
+ """
244
+ try:
245
+ # Filter out None values
246
+ values = [v for v in historical_values if v is not None and v > 0]
247
+
248
+ if len(values) < 2:
249
+ return None
250
+
251
+ # CAGR = (Ending Value / Beginning Value) ^ (1 / Number of Years) - 1
252
+ beginning_value = values[-1] # Oldest value
253
+ ending_value = values[0] # Most recent value
254
+ num_years = len(values) - 1
255
+
256
+ if beginning_value <= 0:
257
+ return None
258
+
259
+ cagr = (ending_value / beginning_value) ** (1 / num_years) - 1
260
+ return cagr
261
+
262
+ except Exception:
263
+ return None
264
+
265
+ # TODO: Implement FMP fallback
266
+ # async def _enrich_with_fmp(self, stock_entity: StockEntity) -> StockEntity:
267
+ # """Enrich with FMP API data"""
268
+ # pass
269
+
270
+ # TODO: Implement SEC fallback
271
+ # async def _enrich_with_sec(self, stock_entity: StockEntity) -> StockEntity:
272
+ # """Enrich with SEC EDGAR data"""
273
+ # pass
src/core/valuation_engine/models/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ """Valuation models"""
2
+
3
+ from src.core.valuation_engine.models.base import BaseValuationModel
4
+
5
+ __all__ = ["BaseValuationModel"]
src/core/valuation_engine/models/base.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Abstract base class for all valuation models"""
2
+
3
+ from abc import ABC, abstractmethod
4
+ from typing import List
5
+ import asyncio
6
+
7
+ from src.core.valuation_engine.core.stock_entity import StockEntity
8
+ from src.core.valuation_engine.core.assumptions import ValuationAssumptions
9
+ from src.core.valuation_engine.core.models import ValuationResult, ValuationModel
10
+
11
+
12
+ class BaseValuationModel(ABC):
13
+ """
14
+ Abstract base class for all valuation models.
15
+ All concrete valuation models must implement this interface.
16
+ """
17
+
18
+ def __init__(self):
19
+ self.model_type: ValuationModel = None # Must be set by subclasses
20
+
21
+ @abstractmethod
22
+ def is_applicable(self, stock: StockEntity) -> bool:
23
+ """
24
+ Check if this model can be applied to this stock.
25
+
26
+ Args:
27
+ stock: StockEntity with company data
28
+
29
+ Returns:
30
+ True if model can be applied, False otherwise
31
+ """
32
+ pass
33
+
34
+ @abstractmethod
35
+ async def calculate(self,
36
+ stock: StockEntity,
37
+ assumptions: ValuationAssumptions) -> ValuationResult:
38
+ """
39
+ Calculate fair value using this model.
40
+
41
+ Args:
42
+ stock: StockEntity with company data
43
+ assumptions: ValuationAssumptions with model parameters
44
+
45
+ Returns:
46
+ ValuationResult with fair value and metadata
47
+ """
48
+ pass
49
+
50
+ @abstractmethod
51
+ def get_required_data(self) -> List[str]:
52
+ """
53
+ Return list of required data fields for this model.
54
+
55
+ Returns:
56
+ List of field names required (e.g., ['free_cash_flow', 'shares_outstanding'])
57
+ """
58
+ pass
59
+
60
+ def validate_inputs(self, stock: StockEntity) -> bool:
61
+ """
62
+ Validate that required data is available.
63
+
64
+ Args:
65
+ stock: StockEntity to validate
66
+
67
+ Returns:
68
+ True if all required data is present, False otherwise
69
+ """
70
+ required = self.get_required_data()
71
+ for field in required:
72
+ if not hasattr(stock, field):
73
+ return False
74
+ value = getattr(stock, field)
75
+ if value is None:
76
+ return False
77
+ # Check for numeric fields
78
+ if isinstance(value, (int, float)) and value == 0:
79
+ # Some fields can be zero (e.g., dividends), but not critical ones
80
+ if field in ['shares_outstanding', 'price']:
81
+ return False
82
+ return True
83
+
84
+ async def calculate_safe(self,
85
+ stock: StockEntity,
86
+ assumptions: ValuationAssumptions) -> ValuationResult:
87
+ """
88
+ Safe wrapper around calculate that handles exceptions.
89
+
90
+ Args:
91
+ stock: StockEntity with company data
92
+ assumptions: ValuationAssumptions
93
+
94
+ Returns:
95
+ ValuationResult (with error field populated if calculation failed)
96
+ """
97
+ try:
98
+ # Check if model is applicable
99
+ if not self.is_applicable(stock):
100
+ return ValuationResult(
101
+ model=self.model_type,
102
+ error="Model not applicable to this stock",
103
+ confidence=0.0
104
+ )
105
+
106
+ # Validate inputs
107
+ if not self.validate_inputs(stock):
108
+ return ValuationResult(
109
+ model=self.model_type,
110
+ error=f"Missing required data: {self.get_required_data()}",
111
+ confidence=0.0
112
+ )
113
+
114
+ # Run calculation
115
+ result = await self.calculate(stock, assumptions)
116
+ return result
117
+
118
+ except ValueError as e:
119
+ return ValuationResult(
120
+ model=self.model_type,
121
+ error=f"Invalid inputs: {str(e)}",
122
+ confidence=0.0
123
+ )
124
+ except ZeroDivisionError:
125
+ return ValuationResult(
126
+ model=self.model_type,
127
+ error="Division by zero in calculation",
128
+ confidence=0.0
129
+ )
130
+ except Exception as e:
131
+ return ValuationResult(
132
+ model=self.model_type,
133
+ error=f"Calculation failed: {str(e)}",
134
+ confidence=0.0
135
+ )
136
+
137
+ def _run_in_executor(self, func, *args):
138
+ """
139
+ Run a CPU-bound function in executor to avoid blocking.
140
+
141
+ Args:
142
+ func: Function to run
143
+ *args: Arguments to pass to function
144
+
145
+ Returns:
146
+ Coroutine that resolves to function result
147
+ """
148
+ loop = asyncio.get_event_loop()
149
+ return loop.run_in_executor(None, func, *args)
150
+
151
+ def _calculate_confidence(self,
152
+ stock: StockEntity,
153
+ base_confidence: float = 0.7) -> float:
154
+ """
155
+ Calculate confidence score for this valuation.
156
+ Can be overridden by subclasses.
157
+
158
+ Args:
159
+ stock: StockEntity
160
+ base_confidence: Starting confidence (0-1)
161
+
162
+ Returns:
163
+ Adjusted confidence score (0-1)
164
+ """
165
+ # Adjust based on data completeness
166
+ completeness_score = stock.calculate_completeness_score()
167
+ adjusted_confidence = base_confidence * (0.7 + 0.3 * completeness_score)
168
+
169
+ return max(0.0, min(1.0, adjusted_confidence))
src/core/valuation_engine/models/intrinsic/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ """Intrinsic valuation models"""
2
+
3
+ from src.core.valuation_engine.models.intrinsic.dcf import DCFModel
4
+ from src.core.valuation_engine.models.intrinsic.ddm import DDMModel
5
+
6
+ __all__ = ["DCFModel", "DDMModel"]
src/core/valuation_engine/models/intrinsic/dcf.py ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Discounted Cash Flow (DCF) valuation model"""
2
+
3
+ from typing import List
4
+ import asyncio
5
+
6
+ from src.core.valuation_engine.models.base import BaseValuationModel
7
+ from src.core.valuation_engine.core.stock_entity import StockEntity
8
+ from src.core.valuation_engine.core.assumptions import ValuationAssumptions
9
+ from src.core.valuation_engine.core.models import ValuationResult, ValuationModel, DataSource
10
+
11
+
12
+ class DCFModel(BaseValuationModel):
13
+ """
14
+ Discounted Cash Flow (DCF) valuation model.
15
+
16
+ Formula:
17
+ 1. Project future FCFs for N years
18
+ 2. Calculate terminal value
19
+ 3. Discount all cash flows to present value
20
+ 4. Divide by shares outstanding
21
+
22
+ Best for: Mature companies with predictable FCF
23
+ """
24
+
25
+ def __init__(self):
26
+ super().__init__()
27
+ self.model_type = ValuationModel.DCF
28
+
29
+ def is_applicable(self, stock: StockEntity) -> bool:
30
+ """
31
+ DCF is applicable if:
32
+ - FCF is available and positive
33
+ - Shares outstanding is available
34
+ """
35
+ return stock.has_fcf and stock.shares_outstanding and stock.shares_outstanding > 0
36
+
37
+ def get_required_data(self) -> List[str]:
38
+ """Required data fields for DCF"""
39
+ return ['free_cash_flow', 'shares_outstanding']
40
+
41
+ async def calculate(self,
42
+ stock: StockEntity,
43
+ assumptions: ValuationAssumptions) -> ValuationResult:
44
+ """
45
+ Calculate fair value using DCF.
46
+
47
+ Args:
48
+ stock: StockEntity with company data
49
+ assumptions: ValuationAssumptions
50
+
51
+ Returns:
52
+ ValuationResult with fair value
53
+ """
54
+ dcf_assumptions = assumptions.dcf
55
+
56
+ # Get base FCF
57
+ base_fcf = stock.free_cash_flow
58
+ shares = stock.shares_outstanding
59
+
60
+ # Determine discount rate (use WACC if specified, or from assumptions)
61
+ discount_rate = dcf_assumptions.discount_rate
62
+ if discount_rate is None:
63
+ # TODO: Calculate WACC when risk module is implemented
64
+ # For now, use default 10%
65
+ discount_rate = 0.10
66
+
67
+ # Get growth parameters
68
+ growth_rate = dcf_assumptions.growth_rate
69
+ terminal_growth_rate = dcf_assumptions.terminal_growth_rate
70
+ projection_years = dcf_assumptions.projection_years
71
+
72
+ # Adjust growth rate based on historical data if available
73
+ if stock.historical_fcf and len([f for f in stock.historical_fcf if f]) >= 2:
74
+ historical_growth = self._calculate_historical_growth(stock.historical_fcf)
75
+ if historical_growth is not None:
76
+ # Blend historical and assumed growth (70% historical, 30% assumption)
77
+ growth_rate = 0.7 * historical_growth + 0.3 * growth_rate
78
+
79
+ # Run calculation in executor (CPU-bound)
80
+ loop = asyncio.get_event_loop()
81
+ result_data = await loop.run_in_executor(
82
+ None,
83
+ self._calculate_dcf_sync,
84
+ base_fcf,
85
+ shares,
86
+ growth_rate,
87
+ discount_rate,
88
+ terminal_growth_rate,
89
+ projection_years
90
+ )
91
+
92
+ # Calculate confidence based on data quality
93
+ confidence = self._calculate_confidence_dcf(stock, growth_rate)
94
+
95
+ return ValuationResult(
96
+ model=self.model_type,
97
+ fair_value_per_share=result_data['fair_value_per_share'],
98
+ intrinsic_value=result_data['intrinsic_value'],
99
+ confidence=confidence,
100
+ data_source=stock.data_source,
101
+ assumptions={
102
+ 'growth_rate': growth_rate,
103
+ 'terminal_growth_rate': terminal_growth_rate,
104
+ 'discount_rate': discount_rate,
105
+ 'projection_years': projection_years,
106
+ 'base_fcf': base_fcf
107
+ },
108
+ metadata={
109
+ 'projected_fcfs': result_data['projected_fcfs'],
110
+ 'terminal_value': result_data['terminal_value'],
111
+ 'pv_fcfs': result_data['pv_fcfs'],
112
+ 'pv_terminal': result_data['pv_terminal']
113
+ }
114
+ )
115
+
116
+ def _calculate_dcf_sync(self,
117
+ base_fcf: float,
118
+ shares_outstanding: float,
119
+ growth_rate: float,
120
+ discount_rate: float,
121
+ terminal_growth_rate: float,
122
+ projection_years: int) -> dict:
123
+ """
124
+ Synchronous DCF calculation (CPU-bound).
125
+ Refactored from existing DCFCalculator.
126
+
127
+ Args:
128
+ base_fcf: Base free cash flow
129
+ shares_outstanding: Number of shares
130
+ growth_rate: Projected growth rate
131
+ discount_rate: Discount rate (WACC)
132
+ terminal_growth_rate: Perpetual growth rate
133
+ projection_years: Number of projection years
134
+
135
+ Returns:
136
+ Dictionary with calculation results
137
+ """
138
+ # Step 1: Project future FCFs
139
+ projected_fcfs = []
140
+ for year in range(1, projection_years + 1):
141
+ fcf = base_fcf * ((1 + growth_rate) ** year)
142
+ projected_fcfs.append(fcf)
143
+
144
+ # Step 2: Calculate present values of projected FCFs
145
+ pv_fcfs = []
146
+ for year, fcf in enumerate(projected_fcfs, start=1):
147
+ pv = fcf / ((1 + discount_rate) ** year)
148
+ pv_fcfs.append(pv)
149
+
150
+ # Step 3: Calculate terminal value
151
+ last_fcf = projected_fcfs[-1]
152
+
153
+ # Check if discount rate > terminal growth rate (must be true for perpetuity formula)
154
+ if discount_rate > terminal_growth_rate:
155
+ terminal_value = last_fcf * (1 + terminal_growth_rate) / (discount_rate - terminal_growth_rate)
156
+ else:
157
+ # Fallback: use simple multiple of last FCF if terminal growth is too high
158
+ terminal_value = last_fcf * 10 # 10x last year FCF
159
+
160
+ # Step 4: Discount terminal value to present
161
+ pv_terminal = terminal_value / ((1 + discount_rate) ** projection_years)
162
+
163
+ # Step 5: Sum all present values to get intrinsic value
164
+ intrinsic_value = sum(pv_fcfs) + pv_terminal
165
+
166
+ # Step 6: Calculate fair value per share
167
+ fair_value_per_share = intrinsic_value / shares_outstanding
168
+
169
+ return {
170
+ 'fair_value_per_share': fair_value_per_share,
171
+ 'intrinsic_value': intrinsic_value,
172
+ 'projected_fcfs': projected_fcfs,
173
+ 'pv_fcfs': pv_fcfs,
174
+ 'terminal_value': terminal_value,
175
+ 'pv_terminal': pv_terminal
176
+ }
177
+
178
+ def _calculate_historical_growth(self, historical_fcf: List[float]) -> float:
179
+ """Calculate historical FCF growth rate (CAGR)"""
180
+ # Filter out None and negative values
181
+ valid_fcf = [fcf for fcf in historical_fcf if fcf is not None and fcf > 0]
182
+
183
+ if len(valid_fcf) < 2:
184
+ return None
185
+
186
+ # CAGR = (Ending / Beginning) ^ (1 / years) - 1
187
+ try:
188
+ beginning = valid_fcf[-1] # Oldest
189
+ ending = valid_fcf[0] # Most recent
190
+ years = len(valid_fcf) - 1
191
+
192
+ cagr = (ending / beginning) ** (1 / years) - 1
193
+
194
+ # Cap at reasonable range (-50% to +100%)
195
+ return max(-0.5, min(1.0, cagr))
196
+ except:
197
+ return None
198
+
199
+ def _calculate_confidence_dcf(self, stock: StockEntity, growth_rate: float) -> float:
200
+ """
201
+ Calculate confidence score for DCF valuation.
202
+
203
+ Factors:
204
+ - Data completeness
205
+ - Historical FCF consistency
206
+ - Growth rate reasonableness
207
+ - Company size/maturity
208
+ """
209
+ base_confidence = 0.75 # DCF is generally reliable
210
+
211
+ # Adjust for data completeness
212
+ completeness_factor = stock.calculate_completeness_score()
213
+
214
+ # Adjust for FCF history consistency
215
+ history_factor = 1.0
216
+ if stock.historical_fcf:
217
+ valid_fcf = [f for f in stock.historical_fcf if f is not None and f > 0]
218
+ if len(valid_fcf) >= 3:
219
+ # Check variance in FCF growth
220
+ growth_rates = []
221
+ for i in range(len(valid_fcf) - 1):
222
+ if valid_fcf[i+1] > 0:
223
+ gr = (valid_fcf[i] - valid_fcf[i+1]) / valid_fcf[i+1]
224
+ growth_rates.append(gr)
225
+
226
+ if growth_rates:
227
+ # Calculate coefficient of variation
228
+ import statistics
229
+ std_dev = statistics.stdev(growth_rates) if len(growth_rates) > 1 else 0
230
+ mean_gr = statistics.mean(growth_rates)
231
+ cv = abs(std_dev / mean_gr) if mean_gr != 0 else 2.0
232
+
233
+ # Lower CV = more consistent = higher confidence
234
+ # CV < 0.5: very consistent (bonus +0.1)
235
+ # CV > 1.5: very volatile (penalty -0.2)
236
+ if cv < 0.5:
237
+ history_factor = 1.1
238
+ elif cv > 1.5:
239
+ history_factor = 0.8
240
+
241
+ # Adjust for growth rate reasonableness
242
+ growth_factor = 1.0
243
+ if growth_rate < 0 or growth_rate > 0.3:
244
+ # Very high or negative growth rates reduce confidence
245
+ growth_factor = 0.9
246
+
247
+ # Adjust for company size (larger companies = more predictable)
248
+ size_factor = 1.0
249
+ if stock.market_cap:
250
+ market_cap_category = stock.get_market_cap_category()
251
+ if market_cap_category == "large":
252
+ size_factor = 1.05
253
+ elif market_cap_category == "small":
254
+ size_factor = 0.95
255
+
256
+ # Calculate final confidence
257
+ confidence = base_confidence * completeness_factor * history_factor * growth_factor * size_factor
258
+
259
+ # Ensure confidence is in [0, 1] range
260
+ return max(0.0, min(1.0, confidence))
src/core/valuation_engine/models/intrinsic/ddm.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Dividend Discount Model (DDM) - Gordon Growth Model"""
2
+
3
+ from typing import List
4
+ import asyncio
5
+
6
+ from src.core.valuation_engine.models.base import BaseValuationModel
7
+ from src.core.valuation_engine.core.stock_entity import StockEntity
8
+ from src.core.valuation_engine.core.assumptions import ValuationAssumptions
9
+ from src.core.valuation_engine.core.models import ValuationResult, ValuationModel
10
+
11
+
12
+ class DDMModel(BaseValuationModel):
13
+ """
14
+ Dividend Discount Model (Gordon Growth Model).
15
+
16
+ Formula: Price = D₁ / (r - g)
17
+ Where:
18
+ - D₁ = Next year's dividend
19
+ - r = Required rate of return (cost of equity)
20
+ - g = Dividend growth rate
21
+
22
+ Best for: Dividend-paying companies with stable dividends
23
+ """
24
+
25
+ def __init__(self):
26
+ super().__init__()
27
+ self.model_type = ValuationModel.DDM
28
+
29
+ def is_applicable(self, stock: StockEntity) -> bool:
30
+ """
31
+ DDM is applicable if:
32
+ - Company pays dividends
33
+ - Dividend per share is positive
34
+ """
35
+ return stock.has_dividends
36
+
37
+ def get_required_data(self) -> List[str]:
38
+ """Required data fields for DDM"""
39
+ return ['dividend_per_share']
40
+
41
+ async def calculate(self,
42
+ stock: StockEntity,
43
+ assumptions: ValuationAssumptions) -> ValuationResult:
44
+ """
45
+ Calculate fair value using DDM.
46
+
47
+ Args:
48
+ stock: StockEntity with company data
49
+ assumptions: ValuationAssumptions
50
+
51
+ Returns:
52
+ ValuationResult with fair value
53
+ """
54
+ ddm_assumptions = assumptions.ddm
55
+
56
+ # Get current dividend
57
+ current_dividend = stock.dividend_per_share
58
+
59
+ # Determine dividend growth rate
60
+ dividend_growth = ddm_assumptions.dividend_growth_rate
61
+ if dividend_growth is None:
62
+ # Calculate from historical data if available
63
+ if stock.historical_dividends and len([d for d in stock.historical_dividends if d]) >= 2:
64
+ dividend_growth = self._calculate_dividend_growth(stock.historical_dividends)
65
+ else:
66
+ # Default to 3% if no historical data
67
+ dividend_growth = 0.03
68
+
69
+ # Determine discount rate (cost of equity)
70
+ discount_rate = ddm_assumptions.discount_rate
71
+ if discount_rate is None:
72
+ if ddm_assumptions.use_cost_of_equity:
73
+ # Calculate cost of equity using CAPM
74
+ discount_rate = self._calculate_cost_of_equity(stock, assumptions)
75
+ else:
76
+ # Default to 10% if can't calculate
77
+ discount_rate = 0.10
78
+
79
+ # Validate that r > g (required for Gordon Growth Model)
80
+ if discount_rate <= dividend_growth:
81
+ return ValuationResult(
82
+ model=self.model_type,
83
+ error=f"Discount rate ({discount_rate:.2%}) must be > growth rate ({dividend_growth:.2%})",
84
+ confidence=0.0
85
+ )
86
+
87
+ # Run calculation
88
+ loop = asyncio.get_event_loop()
89
+ fair_value = await loop.run_in_executor(
90
+ None,
91
+ self._calculate_ddm_sync,
92
+ current_dividend,
93
+ dividend_growth,
94
+ discount_rate
95
+ )
96
+
97
+ # Calculate confidence
98
+ confidence = self._calculate_confidence_ddm(stock, dividend_growth)
99
+
100
+ return ValuationResult(
101
+ model=self.model_type,
102
+ fair_value_per_share=fair_value,
103
+ confidence=confidence,
104
+ data_source=stock.data_source,
105
+ assumptions={
106
+ 'current_dividend': current_dividend,
107
+ 'dividend_growth_rate': dividend_growth,
108
+ 'discount_rate': discount_rate
109
+ },
110
+ metadata={
111
+ 'next_year_dividend': current_dividend * (1 + dividend_growth)
112
+ }
113
+ )
114
+
115
+ def _calculate_ddm_sync(self,
116
+ current_dividend: float,
117
+ growth_rate: float,
118
+ discount_rate: float) -> float:
119
+ """
120
+ Synchronous DDM calculation.
121
+
122
+ Gordon Growth Model: P = D₁ / (r - g)
123
+ Where D₁ = D₀ × (1 + g)
124
+
125
+ Args:
126
+ current_dividend: Current dividend per share (D₀)
127
+ growth_rate: Dividend growth rate (g)
128
+ discount_rate: Cost of equity (r)
129
+
130
+ Returns:
131
+ Fair value per share
132
+ """
133
+ # Calculate next year's dividend
134
+ next_dividend = current_dividend * (1 + growth_rate)
135
+
136
+ # Apply Gordon Growth Model
137
+ fair_value = next_dividend / (discount_rate - growth_rate)
138
+
139
+ return fair_value
140
+
141
+ def _calculate_dividend_growth(self, historical_dividends: List[float]) -> float:
142
+ """Calculate historical dividend growth rate (CAGR)"""
143
+ # Filter out None values
144
+ valid_dividends = [d for d in historical_dividends if d is not None and d > 0]
145
+
146
+ if len(valid_dividends) < 2:
147
+ return 0.03 # Default 3%
148
+
149
+ try:
150
+ beginning = valid_dividends[-1] # Oldest
151
+ ending = valid_dividends[0] # Most recent
152
+ years = len(valid_dividends) - 1
153
+
154
+ cagr = (ending / beginning) ** (1 / years) - 1
155
+
156
+ # Cap at reasonable range (0% to 15% for dividend growth)
157
+ return max(0.0, min(0.15, cagr))
158
+ except:
159
+ return 0.03
160
+
161
+ def _calculate_cost_of_equity(self,
162
+ stock: StockEntity,
163
+ assumptions: ValuationAssumptions) -> float:
164
+ """
165
+ Calculate cost of equity using CAPM.
166
+
167
+ CAPM: Re = Rf + β(Rm - Rf)
168
+
169
+ Args:
170
+ stock: StockEntity
171
+ assumptions: ValuationAssumptions
172
+
173
+ Returns:
174
+ Cost of equity
175
+ """
176
+ risk_free_rate = assumptions.risk_free_rate
177
+ market_return = assumptions.market_return
178
+ beta = stock.beta if stock.beta else 1.0 # Default beta = 1
179
+
180
+ # CAPM formula
181
+ cost_of_equity = risk_free_rate + beta * (market_return - risk_free_rate)
182
+
183
+ # Ensure reasonable range (4% to 20%)
184
+ return max(0.04, min(0.20, cost_of_equity))
185
+
186
+ def _calculate_confidence_ddm(self, stock: StockEntity, growth_rate: float) -> float:
187
+ """
188
+ Calculate confidence score for DDM valuation.
189
+
190
+ Factors:
191
+ - Dividend history consistency
192
+ - Dividend payout ratio reasonableness
193
+ - Company maturity
194
+ - Growth rate reasonableness
195
+ """
196
+ base_confidence = 0.70 # DDM is reliable for dividend stocks
197
+
198
+ # Adjust for data completeness
199
+ completeness_factor = stock.calculate_completeness_score()
200
+
201
+ # Adjust for dividend history
202
+ history_factor = 1.0
203
+ if stock.historical_dividends:
204
+ valid_dividends = [d for d in stock.historical_dividends if d is not None and d > 0]
205
+ if len(valid_dividends) >= 3:
206
+ # Check if dividends are consistent (no cuts)
207
+ dividend_cuts = sum(1 for i in range(len(valid_dividends) - 1)
208
+ if valid_dividends[i] < valid_dividends[i + 1])
209
+ if dividend_cuts == 0:
210
+ # No dividend cuts = dividend aristocrat = higher confidence
211
+ history_factor = 1.15
212
+ elif dividend_cuts > 1:
213
+ # Multiple cuts = less reliable
214
+ history_factor = 0.85
215
+
216
+ # Adjust for growth rate reasonableness
217
+ growth_factor = 1.0
218
+ if growth_rate > 0.10:
219
+ # Very high dividend growth is unsustainable
220
+ growth_factor = 0.90
221
+ elif growth_rate < 0:
222
+ # Negative growth (dividend cuts expected)
223
+ growth_factor = 0.80
224
+
225
+ # Adjust for company size (larger = more stable dividends)
226
+ size_factor = 1.0
227
+ if stock.market_cap:
228
+ market_cap_category = stock.get_market_cap_category()
229
+ if market_cap_category == "large":
230
+ size_factor = 1.05
231
+ elif market_cap_category == "small":
232
+ size_factor = 0.95
233
+
234
+ # Calculate final confidence
235
+ confidence = base_confidence * completeness_factor * history_factor * growth_factor * size_factor
236
+
237
+ return max(0.0, min(1.0, confidence))
src/core/valuation_engine/models/relative/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ """Relative valuation models"""
2
+
3
+ from src.core.valuation_engine.models.relative.multiples import PEMultipleModel
4
+
5
+ __all__ = ["PEMultipleModel"]
src/core/valuation_engine/models/relative/multiples.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Price-to-Earnings (P/E) Multiple valuation model"""
2
+
3
+ from typing import List, Optional
4
+ import asyncio
5
+
6
+ from src.core.valuation_engine.models.base import BaseValuationModel
7
+ from src.core.valuation_engine.core.stock_entity import StockEntity
8
+ from src.core.valuation_engine.core.assumptions import ValuationAssumptions
9
+ from src.core.valuation_engine.core.models import ValuationResult, ValuationModel
10
+
11
+
12
+ class PEMultipleModel(BaseValuationModel):
13
+ """
14
+ Price-to-Earnings (P/E) Multiple valuation model.
15
+
16
+ Strategy:
17
+ 1. Get company's current P/E ratio
18
+ 2. Compare to industry/sector median P/E
19
+ 3. Calculate fair value: Fair P/E × EPS
20
+
21
+ Variations:
22
+ - Use peer group average
23
+ - Use historical average
24
+ - Use forward P/E vs trailing P/E
25
+
26
+ Best for: Quick relative valuation across peers
27
+ """
28
+
29
+ def __init__(self):
30
+ super().__init__()
31
+ self.model_type = ValuationModel.PE_MULTIPLE
32
+
33
+ def is_applicable(self, stock: StockEntity) -> bool:
34
+ """
35
+ P/E Multiple is applicable if:
36
+ - Company has positive earnings (EPS > 0)
37
+ - P/E ratio is available
38
+ """
39
+ return (stock.has_positive_earnings and
40
+ stock.eps is not None and
41
+ stock.eps > 0 and
42
+ stock.financial_metrics is not None and
43
+ stock.financial_metrics.pe_ratio is not None)
44
+
45
+ def get_required_data(self) -> List[str]:
46
+ """Required data fields for P/E Multiple"""
47
+ return ['eps', 'price']
48
+
49
+ async def calculate(self,
50
+ stock: StockEntity,
51
+ assumptions: ValuationAssumptions) -> ValuationResult:
52
+ """
53
+ Calculate fair value using P/E Multiple.
54
+
55
+ Args:
56
+ stock: StockEntity with company data
57
+ assumptions: ValuationAssumptions
58
+
59
+ Returns:
60
+ ValuationResult with fair value
61
+ """
62
+ multiples_assumptions = assumptions.multiples
63
+
64
+ # Get current P/E ratio and EPS
65
+ current_pe = stock.financial_metrics.pe_ratio
66
+ eps = stock.eps
67
+
68
+ # Determine fair P/E (benchmark)
69
+ fair_pe = await self._get_fair_pe(stock, multiples_assumptions)
70
+
71
+ if fair_pe is None or fair_pe <= 0:
72
+ return ValuationResult(
73
+ model=self.model_type,
74
+ error="Could not determine fair P/E ratio",
75
+ confidence=0.0
76
+ )
77
+
78
+ # Calculate fair value
79
+ loop = asyncio.get_event_loop()
80
+ fair_value = await loop.run_in_executor(
81
+ None,
82
+ self._calculate_pe_valuation,
83
+ eps,
84
+ fair_pe
85
+ )
86
+
87
+ # Calculate confidence
88
+ confidence = self._calculate_confidence_pe(stock, current_pe, fair_pe)
89
+
90
+ return ValuationResult(
91
+ model=self.model_type,
92
+ fair_value_per_share=fair_value,
93
+ confidence=confidence,
94
+ data_source=stock.data_source,
95
+ assumptions={
96
+ 'current_pe': current_pe,
97
+ 'fair_pe': fair_pe,
98
+ 'eps': eps
99
+ },
100
+ metadata={
101
+ 'pe_premium_discount': ((current_pe - fair_pe) / fair_pe * 100) if fair_pe else None,
102
+ 'sector': stock.sector
103
+ }
104
+ )
105
+
106
+ async def _get_fair_pe(self,
107
+ stock: StockEntity,
108
+ multiples_assumptions) -> Optional[float]:
109
+ """
110
+ Determine fair P/E ratio.
111
+
112
+ Strategy (in order of preference):
113
+ 1. Use peer group average if specified
114
+ 2. Use industry benchmark
115
+ 3. Use historical company average
116
+ 4. Use market average
117
+
118
+ Args:
119
+ stock: StockEntity
120
+ multiples_assumptions: MultiplesAssumptions
121
+
122
+ Returns:
123
+ Fair P/E ratio or None
124
+ """
125
+ # TODO: Implement peer group comparison when data available
126
+ # For now, use industry benchmarks based on sector
127
+
128
+ # Industry P/E benchmarks (approximate averages)
129
+ industry_pe_benchmarks = {
130
+ 'Technology': 28.0,
131
+ 'Healthcare': 25.0,
132
+ 'Financial': 12.0,
133
+ 'Financial Services': 12.0,
134
+ 'Consumer Cyclical': 20.0,
135
+ 'Consumer Defensive': 22.0,
136
+ 'Energy': 15.0,
137
+ 'Industrials': 20.0,
138
+ 'Basic Materials': 18.0,
139
+ 'Real Estate': 20.0,
140
+ 'Utilities': 18.0,
141
+ 'Communication Services': 22.0,
142
+ 'Communication': 22.0,
143
+ }
144
+
145
+ # Try to get sector benchmark
146
+ if stock.sector and stock.sector in industry_pe_benchmarks:
147
+ return industry_pe_benchmarks[stock.sector]
148
+
149
+ # Fallback: use market average
150
+ return 20.0 # S&P 500 approximate average
151
+
152
+ def _calculate_pe_valuation(self, eps: float, fair_pe: float) -> float:
153
+ """
154
+ Calculate fair value using P/E multiple.
155
+
156
+ Formula: Fair Value = EPS × Fair P/E
157
+
158
+ Args:
159
+ eps: Earnings per share
160
+ fair_pe: Fair P/E ratio
161
+
162
+ Returns:
163
+ Fair value per share
164
+ """
165
+ return eps * fair_pe
166
+
167
+ def _calculate_confidence_pe(self,
168
+ stock: StockEntity,
169
+ current_pe: float,
170
+ fair_pe: float) -> float:
171
+ """
172
+ Calculate confidence score for P/E valuation.
173
+
174
+ Factors:
175
+ - Data quality
176
+ - How far current P/E is from fair P/E
177
+ - Earnings quality
178
+ - Company size
179
+ """
180
+ base_confidence = 0.65 # P/E is a quick relative measure, less precise than DCF
181
+
182
+ # Adjust for data completeness
183
+ completeness_factor = stock.calculate_completeness_score()
184
+
185
+ # Adjust for P/E deviation
186
+ deviation_factor = 1.0
187
+ if current_pe and fair_pe:
188
+ pe_ratio = current_pe / fair_pe
189
+ # If current P/E is very different from fair P/E, lower confidence
190
+ # (might indicate special circumstances not captured by simple P/E)
191
+ if pe_ratio < 0.5 or pe_ratio > 2.0:
192
+ # Very different from peer average
193
+ deviation_factor = 0.85
194
+ elif 0.7 < pe_ratio < 1.3:
195
+ # Close to peer average = higher confidence
196
+ deviation_factor = 1.1
197
+
198
+ # Adjust for earnings quality
199
+ quality_factor = 1.0
200
+ if stock.financial_metrics:
201
+ # Check if company has quality metrics
202
+ if stock.financial_metrics.quality_of_earnings:
203
+ # Quality of earnings > 1.0 is good (cash earnings > reported earnings)
204
+ qoe = stock.financial_metrics.quality_of_earnings
205
+ if qoe > 1.0:
206
+ quality_factor = 1.05
207
+ elif qoe < 0.7:
208
+ quality_factor = 0.90
209
+
210
+ # Adjust for company size (larger = more comparable to benchmarks)
211
+ size_factor = 1.0
212
+ if stock.market_cap:
213
+ market_cap_category = stock.get_market_cap_category()
214
+ if market_cap_category == "large":
215
+ size_factor = 1.05 # Large caps more comparable to industry averages
216
+ elif market_cap_category == "small":
217
+ size_factor = 0.95
218
+
219
+ # Calculate final confidence
220
+ confidence = base_confidence * completeness_factor * deviation_factor * quality_factor * size_factor
221
+
222
+ return max(0.0, min(1.0, confidence))
src/core/valuation_engine/orchestrator.py ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Main ValuationEngine orchestrator"""
2
+
3
+ import asyncio
4
+ from typing import Optional, Dict
5
+ from datetime import datetime
6
+
7
+ from src.core.valuation_engine.data.multi_source_fetcher import MultiSourceDataFetcher
8
+ from src.core.valuation_engine.selector.model_selector import ModelSelector
9
+ from src.core.valuation_engine.aggregation.ensemble import EnsembleAggregator
10
+ from src.core.valuation_engine.aggregation.percentile_calc import PercentileCalculator
11
+ from src.core.valuation_engine.core.models import (
12
+ ValuationModel,
13
+ ValuationResult,
14
+ AggregatedValuation
15
+ )
16
+ from src.core.valuation_engine.core.assumptions import ValuationAssumptions
17
+ from src.core.valuation_engine.core.stock_entity import StockEntity
18
+
19
+ # Import implemented models
20
+ from src.core.valuation_engine.models.intrinsic.dcf import DCFModel
21
+ from src.core.valuation_engine.models.intrinsic.ddm import DDMModel
22
+ from src.core.valuation_engine.models.relative.multiples import PEMultipleModel
23
+
24
+
25
+ class ValuationEngine:
26
+ """
27
+ Main orchestrator for comprehensive stock valuation.
28
+
29
+ Coordinates:
30
+ 1. Data fetching (multi-source)
31
+ 2. Model selection
32
+ 3. Parallel model execution
33
+ 4. Result aggregation
34
+ 5. Risk analysis (TODO)
35
+ 6. Report generation (TODO)
36
+ """
37
+
38
+ def __init__(self,
39
+ fmp_api_key: Optional[str] = None,
40
+ sec_api_key: Optional[str] = None,
41
+ max_workers: int = 5,
42
+ min_models_required: int = 2):
43
+ """
44
+ Initialize ValuationEngine.
45
+
46
+ Args:
47
+ fmp_api_key: Financial Modeling Prep API key (optional)
48
+ sec_api_key: SEC API key (optional)
49
+ max_workers: Max workers for async operations
50
+ min_models_required: Minimum valid models required for aggregation
51
+ """
52
+ self.data_fetcher = MultiSourceDataFetcher(
53
+ fmp_api_key=fmp_api_key,
54
+ sec_api_key=sec_api_key,
55
+ max_workers=max_workers
56
+ )
57
+ self.model_selector = ModelSelector()
58
+ self.ensemble_aggregator = EnsembleAggregator(min_models_required=min_models_required)
59
+ self.percentile_calculator = PercentileCalculator()
60
+
61
+ # Registry of implemented models
62
+ self.model_registry = {
63
+ ValuationModel.DCF: DCFModel(),
64
+ ValuationModel.DDM: DDMModel(),
65
+ ValuationModel.PE_MULTIPLE: PEMultipleModel(),
66
+ # TODO: Add remaining models as they're implemented
67
+ }
68
+
69
+ async def __aenter__(self):
70
+ """Async context manager entry"""
71
+ await self.data_fetcher.__aenter__()
72
+ return self
73
+
74
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
75
+ """Async context manager exit"""
76
+ await self.data_fetcher.__aexit__(exc_type, exc_val, exc_tb)
77
+
78
+ async def value_stock(self,
79
+ ticker: str,
80
+ assumptions: Optional[ValuationAssumptions] = None) -> AggregatedValuation:
81
+ """
82
+ Perform comprehensive valuation of a stock.
83
+
84
+ Workflow:
85
+ 1. Fetch comprehensive data
86
+ 2. Select applicable models
87
+ 3. Run models in parallel
88
+ 4. Aggregate results
89
+ 5. Calculate percentiles
90
+ 6. Return AggregatedValuation
91
+
92
+ Args:
93
+ ticker: Stock ticker symbol
94
+ assumptions: Custom valuation assumptions (optional)
95
+
96
+ Returns:
97
+ AggregatedValuation with all results
98
+ """
99
+ # Use default assumptions if not provided
100
+ if assumptions is None:
101
+ assumptions = ValuationAssumptions()
102
+
103
+ # Step 1: Fetch comprehensive data
104
+ print(f"[ValuationEngine] Fetching data for {ticker}...")
105
+ stock_entity = await self.data_fetcher.fetch_comprehensive_data(ticker)
106
+
107
+ if not stock_entity:
108
+ return AggregatedValuation(
109
+ ticker=ticker,
110
+ current_price=0.0,
111
+ model_results={},
112
+ weighted_fair_value=None
113
+ )
114
+
115
+ print(f"[ValuationEngine] Data completeness: {stock_entity.data_completeness_score:.2f}")
116
+
117
+ # Step 2: Select applicable models
118
+ print(f"[ValuationEngine] Selecting applicable models...")
119
+ selected_models = self.model_selector.select_applicable_models(stock_entity)
120
+ print(f"[ValuationEngine] Selected {len(selected_models)} models:")
121
+ for model, confidence in selected_models.items():
122
+ print(f" - {model.value}: confidence {confidence:.2f}")
123
+
124
+ # Step 3: Run applicable models in parallel
125
+ print(f"[ValuationEngine] Running models in parallel...")
126
+ model_results = await self._run_models_parallel(
127
+ stock_entity,
128
+ selected_models,
129
+ assumptions
130
+ )
131
+
132
+ # Count valid results
133
+ valid_count = sum(1 for result in model_results.values() if result.is_valid)
134
+ print(f"[ValuationEngine] {valid_count}/{len(model_results)} models succeeded")
135
+
136
+ # Step 4: Aggregate results
137
+ print(f"[ValuationEngine] Aggregating results...")
138
+ aggregation = self.ensemble_aggregator.aggregate(model_results)
139
+
140
+ # Step 5: Calculate percentiles
141
+ percentiles = self.percentile_calculator.calculate_percentiles(model_results)
142
+
143
+ # Step 6: Build AggregatedValuation object
144
+ aggregated_valuation = AggregatedValuation(
145
+ ticker=ticker,
146
+ current_price=stock_entity.price,
147
+ model_results=model_results,
148
+ weighted_fair_value=aggregation['weighted_fair_value'],
149
+ percentile_10=percentiles['percentile_10'],
150
+ percentile_50=percentiles['percentile_50'],
151
+ percentile_90=percentiles['percentile_90'],
152
+ valid_models_count=aggregation['valid_models_count'],
153
+ total_confidence=aggregation['total_confidence']
154
+ )
155
+
156
+ # Calculate upside/downside
157
+ aggregated_valuation.calculate_upside_downside()
158
+
159
+ print(f"[ValuationEngine] Valuation complete!")
160
+ print(f" Current Price: ${stock_entity.price:.2f}")
161
+ if aggregated_valuation.weighted_fair_value:
162
+ print(f" Fair Value: ${aggregated_valuation.weighted_fair_value:.2f}")
163
+ print(f" Upside/Downside: {aggregated_valuation.upside_downside:.1f}%")
164
+ print(f" Range (P10-P90): ${percentiles['percentile_10']:.2f} - ${percentiles['percentile_90']:.2f}")
165
+
166
+ return aggregated_valuation
167
+
168
+ async def _run_models_parallel(self,
169
+ stock_entity: StockEntity,
170
+ selected_models: Dict[ValuationModel, float],
171
+ assumptions: ValuationAssumptions) -> Dict[ValuationModel, ValuationResult]:
172
+ """
173
+ Run multiple valuation models in parallel.
174
+
175
+ Args:
176
+ stock_entity: StockEntity with company data
177
+ selected_models: Dictionary of selected models with confidence scores
178
+ assumptions: ValuationAssumptions
179
+
180
+ Returns:
181
+ Dictionary mapping ValuationModel to ValuationResult
182
+ """
183
+ tasks = []
184
+ model_types = []
185
+
186
+ # Create tasks for each model
187
+ for model_type, base_confidence in selected_models.items():
188
+ # Check if model is implemented
189
+ if model_type not in self.model_registry:
190
+ print(f"[WARNING] Model {model_type.value} not yet implemented, skipping...")
191
+ continue
192
+
193
+ model_instance = self.model_registry[model_type]
194
+
195
+ # Create task
196
+ task = model_instance.calculate_safe(stock_entity, assumptions)
197
+ tasks.append(task)
198
+ model_types.append(model_type)
199
+
200
+ # Run all models in parallel
201
+ results = await asyncio.gather(*tasks, return_exceptions=True)
202
+
203
+ # Build results dictionary
204
+ model_results = {}
205
+ for model_type, result in zip(model_types, results):
206
+ if isinstance(result, Exception):
207
+ # Handle exceptions
208
+ model_results[model_type] = ValuationResult(
209
+ model=model_type,
210
+ error=f"Model execution failed: {str(result)}",
211
+ confidence=0.0
212
+ )
213
+ else:
214
+ model_results[model_type] = result
215
+
216
+ return model_results
217
+
218
+ async def get_quick_valuation(self, ticker: str) -> Optional[float]:
219
+ """
220
+ Get quick fair value estimate (weighted average only).
221
+
222
+ Faster alternative to full valuation when you just need a number.
223
+
224
+ Args:
225
+ ticker: Stock ticker symbol
226
+
227
+ Returns:
228
+ Weighted fair value or None
229
+ """
230
+ result = await self.value_stock(ticker)
231
+ return result.weighted_fair_value
232
+
233
+ # TODO: Implement risk analysis methods
234
+ # async def calculate_wacc(self, stock_entity: StockEntity) -> float:
235
+ # """Calculate WACC for discount rate"""
236
+ # pass
237
+
238
+ # async def run_monte_carlo(self, stock_entity: StockEntity, iterations: int = 10000):
239
+ # """Run Monte Carlo simulation"""
240
+ # pass
241
+
242
+ # TODO: Implement reporting methods
243
+ # async def generate_report(self, ticker: str, output_format: str = "markdown") -> str:
244
+ # """Generate comprehensive valuation report"""
245
+ # pass
src/core/valuation_engine/selector/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ """Model selection logic"""
2
+
3
+ from src.core.valuation_engine.selector.model_selector import ModelSelector
4
+
5
+ __all__ = ["ModelSelector"]
src/core/valuation_engine/selector/model_selector.py ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Rule-based model selector with confidence scoring"""
2
+
3
+ from typing import Dict, List
4
+ from src.core.valuation_engine.core.models import ValuationModel
5
+ from src.core.valuation_engine.core.stock_entity import StockEntity
6
+ from src.core.valuation_engine.selector.selector_config import (
7
+ MODEL_SELECTION_RULES,
8
+ SECTOR_MODEL_PREFERENCES
9
+ )
10
+
11
+
12
+ class ModelSelector:
13
+ """
14
+ Rule-based model selector that determines which valuation models
15
+ are applicable to a given stock and assigns confidence scores.
16
+ """
17
+
18
+ def __init__(self):
19
+ pass
20
+
21
+ def select_applicable_models(self, stock: StockEntity) -> Dict[ValuationModel, float]:
22
+ """
23
+ Select applicable models and assign base confidence scores.
24
+
25
+ Args:
26
+ stock: StockEntity with company data
27
+
28
+ Returns:
29
+ Dictionary mapping ValuationModel to confidence score (0-1)
30
+ """
31
+ applicable_models = {}
32
+
33
+ # Iterate through all models
34
+ for model, rules in MODEL_SELECTION_RULES.items():
35
+ # Check if model meets requirements
36
+ if self._check_requirements(stock, rules["requires"]):
37
+ # Calculate confidence score
38
+ confidence = self._calculate_model_confidence(stock, model, rules)
39
+
40
+ # Only include if confidence meets minimum
41
+ if confidence >= rules["min_confidence"]:
42
+ applicable_models[model] = confidence
43
+
44
+ return applicable_models
45
+
46
+ def _check_requirements(self, stock: StockEntity, requirements: List[str]) -> bool:
47
+ """
48
+ Check if stock meets all requirements for a model.
49
+
50
+ Args:
51
+ stock: StockEntity
52
+ requirements: List of requirement strings (e.g., "free_cash_flow > 0")
53
+
54
+ Returns:
55
+ True if all requirements met
56
+ """
57
+ for req in requirements:
58
+ if not self._evaluate_requirement(stock, req):
59
+ return False
60
+ return True
61
+
62
+ def _evaluate_requirement(self, stock: StockEntity, requirement: str) -> bool:
63
+ """
64
+ Evaluate a single requirement string.
65
+
66
+ Args:
67
+ stock: StockEntity
68
+ requirement: Requirement string (e.g., "eps > 0")
69
+
70
+ Returns:
71
+ True if requirement is met
72
+ """
73
+ # Parse requirement (format: "field operator value")
74
+ parts = requirement.split()
75
+ if len(parts) != 3:
76
+ return False
77
+
78
+ field_name, operator, threshold_str = parts
79
+
80
+ # Get field value from stock
81
+ value = getattr(stock, field_name, None)
82
+
83
+ # If field doesn't exist or is None, requirement not met
84
+ if value is None:
85
+ return False
86
+
87
+ # Convert threshold to appropriate type
88
+ try:
89
+ threshold = float(threshold_str)
90
+ except ValueError:
91
+ threshold = threshold_str
92
+
93
+ # Evaluate operator
94
+ if operator == ">":
95
+ return value > threshold
96
+ elif operator == ">=":
97
+ return value >= threshold
98
+ elif operator == "<":
99
+ return value < threshold
100
+ elif operator == "<=":
101
+ return value <= threshold
102
+ elif operator == "==":
103
+ return value == threshold
104
+ elif operator == "!=":
105
+ return value != threshold
106
+ else:
107
+ return False
108
+
109
+ def _calculate_model_confidence(self,
110
+ stock: StockEntity,
111
+ model: ValuationModel,
112
+ rules: dict) -> float:
113
+ """
114
+ Calculate confidence score for a model.
115
+
116
+ Factors:
117
+ - Base confidence (from rules)
118
+ - Sector match
119
+ - Market cap match
120
+ - Data completeness
121
+ - Sector-specific preferences
122
+
123
+ Args:
124
+ stock: StockEntity
125
+ model: ValuationModel
126
+ rules: Model rules dictionary
127
+
128
+ Returns:
129
+ Confidence score (0-1)
130
+ """
131
+ base_confidence = rules["base_confidence"]
132
+
133
+ # Factor 1: Sector preference
134
+ sector_factor = 1.0
135
+ if stock.sector:
136
+ preferred_sectors = rules.get("preferred_sectors", [])
137
+
138
+ if preferred_sectors: # If specific sectors are preferred
139
+ if stock.sector in preferred_sectors:
140
+ sector_factor = 1.10 # 10% bonus for preferred sector
141
+ else:
142
+ sector_factor = 0.95 # 5% penalty if not preferred
143
+
144
+ # Check sector-specific model rankings
145
+ sector_prefs = SECTOR_MODEL_PREFERENCES.get(stock.sector, [])
146
+ if model in sector_prefs:
147
+ # Higher ranking = higher confidence
148
+ rank = sector_prefs.index(model) + 1
149
+ # Top 3 models get bonus
150
+ if rank <= 3:
151
+ sector_factor *= 1.05
152
+
153
+ # Factor 2: Market cap preference
154
+ market_cap_factor = 1.0
155
+ if stock.market_cap:
156
+ market_cap_category = stock.get_market_cap_category()
157
+ preferred_caps = rules.get("preferred_market_caps", [])
158
+
159
+ if preferred_caps and market_cap_category in preferred_caps:
160
+ market_cap_factor = 1.05 # 5% bonus
161
+
162
+ # Factor 3: Data completeness
163
+ completeness_score = stock.calculate_completeness_score()
164
+ data_factor = 0.8 + 0.2 * completeness_score # Range: 0.8 to 1.0
165
+
166
+ # Factor 4: Model-specific adjustments
167
+ model_specific_factor = self._get_model_specific_factor(stock, model)
168
+
169
+ # Calculate final confidence
170
+ confidence = (base_confidence *
171
+ sector_factor *
172
+ market_cap_factor *
173
+ data_factor *
174
+ model_specific_factor)
175
+
176
+ # Ensure confidence is in [0, 1] range
177
+ return max(0.0, min(1.0, confidence))
178
+
179
+ def _get_model_specific_factor(self, stock: StockEntity, model: ValuationModel) -> float:
180
+ """
181
+ Get model-specific confidence adjustments based on stock characteristics.
182
+
183
+ Args:
184
+ stock: StockEntity
185
+ model: ValuationModel
186
+
187
+ Returns:
188
+ Adjustment factor (typically 0.8 to 1.2)
189
+ """
190
+ factor = 1.0
191
+
192
+ if model == ValuationModel.DCF:
193
+ # DCF works better with consistent FCF history
194
+ if stock.historical_fcf:
195
+ valid_fcf = [f for f in stock.historical_fcf if f is not None and f > 0]
196
+ if len(valid_fcf) >= 4:
197
+ factor *= 1.10 # Good FCF history
198
+
199
+ elif model == ValuationModel.DDM:
200
+ # DDM works better for dividend aristocrats
201
+ if stock.historical_dividends:
202
+ valid_dividends = [d for d in stock.historical_dividends if d is not None and d > 0]
203
+ if len(valid_dividends) >= 3:
204
+ # Check for consistent dividend growth
205
+ no_cuts = all(valid_dividends[i] <= valid_dividends[i-1]
206
+ for i in range(1, len(valid_dividends)))
207
+ if no_cuts:
208
+ factor *= 1.15 # Dividend aristocrat
209
+
210
+ elif model == ValuationModel.PE_MULTIPLE:
211
+ # P/E works better for stable earnings
212
+ if stock.financial_metrics and stock.financial_metrics.quality_of_earnings:
213
+ if stock.financial_metrics.quality_of_earnings > 1.0:
214
+ factor *= 1.05 # High quality earnings
215
+
216
+ elif model == ValuationModel.ASSET_BASED:
217
+ # Asset-based works better for asset-heavy industries
218
+ if stock.is_financial_company:
219
+ factor *= 1.15 # Banks, REITs, etc.
220
+
221
+ elif model == ValuationModel.PEG:
222
+ # PEG works better for growth companies
223
+ if stock.revenue_growth and stock.revenue_growth > 0.10:
224
+ factor *= 1.10 # High growth
225
+
226
+ elif model == ValuationModel.RESIDUAL_INCOME:
227
+ # RIM works better for financial companies
228
+ if stock.is_financial_company:
229
+ factor *= 1.12
230
+
231
+ return factor
232
+
233
+ def get_recommended_models(self,
234
+ stock: StockEntity,
235
+ max_models: int = 5) -> List[ValuationModel]:
236
+ """
237
+ Get list of recommended models sorted by confidence.
238
+
239
+ Args:
240
+ stock: StockEntity
241
+ max_models: Maximum number of models to return
242
+
243
+ Returns:
244
+ List of ValuationModels sorted by confidence (descending)
245
+ """
246
+ applicable_models = self.select_applicable_models(stock)
247
+
248
+ # Sort by confidence (descending)
249
+ sorted_models = sorted(
250
+ applicable_models.items(),
251
+ key=lambda x: x[1],
252
+ reverse=True
253
+ )
254
+
255
+ # Return top N models
256
+ return [model for model, _ in sorted_models[:max_models]]
src/core/valuation_engine/selector/selector_config.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Configuration for model selection rules"""
2
+
3
+ from src.core.valuation_engine.core.models import ValuationModel
4
+
5
+
6
+ # Model selection rules
7
+ MODEL_SELECTION_RULES = {
8
+ ValuationModel.DCF: {
9
+ "requires": ["free_cash_flow > 0", "shares_outstanding > 0"],
10
+ "preferred_sectors": ["Technology", "Healthcare", "Consumer Cyclical", "Industrials"],
11
+ "preferred_market_caps": ["large", "mid"],
12
+ "base_confidence": 0.80,
13
+ "min_confidence": 0.30,
14
+ },
15
+ ValuationModel.DDM: {
16
+ "requires": ["dividend_per_share > 0"],
17
+ "preferred_sectors": ["Utilities", "Financial Services", "Consumer Defensive", "Real Estate"],
18
+ "preferred_market_caps": ["large", "mid"],
19
+ "base_confidence": 0.75,
20
+ "min_confidence": 0.40,
21
+ },
22
+ ValuationModel.PE_MULTIPLE: {
23
+ "requires": ["eps > 0", "price > 0"],
24
+ "preferred_sectors": [], # Applicable to all sectors
25
+ "preferred_market_caps": ["large", "mid", "small"],
26
+ "base_confidence": 0.70,
27
+ "min_confidence": 0.30,
28
+ },
29
+ ValuationModel.PEG: {
30
+ "requires": ["eps > 0", "earnings_growth > 0"],
31
+ "preferred_sectors": ["Technology", "Healthcare", "Consumer Cyclical"],
32
+ "preferred_market_caps": ["mid", "small"],
33
+ "base_confidence": 0.70,
34
+ "min_confidence": 0.35,
35
+ },
36
+ ValuationModel.EV_EBITDA: {
37
+ "requires": ["ebitda > 0"],
38
+ "preferred_sectors": ["Industrials", "Basic Materials", "Energy", "Technology"],
39
+ "preferred_market_caps": ["large", "mid"],
40
+ "base_confidence": 0.72,
41
+ "min_confidence": 0.30,
42
+ },
43
+ ValuationModel.EV_SALES: {
44
+ "requires": ["revenue > 0"],
45
+ "preferred_sectors": ["Technology", "Healthcare"], # Often for growth companies
46
+ "preferred_market_caps": ["mid", "small"],
47
+ "base_confidence": 0.60,
48
+ "min_confidence": 0.25,
49
+ },
50
+ ValuationModel.RESIDUAL_INCOME: {
51
+ "requires": ["book_value > 0", "roe > 0"],
52
+ "preferred_sectors": ["Financial Services", "Financial", "Real Estate"],
53
+ "preferred_market_caps": ["large", "mid"],
54
+ "base_confidence": 0.75,
55
+ "min_confidence": 0.35,
56
+ },
57
+ ValuationModel.ASSET_BASED: {
58
+ "requires": ["total_assets > 0", "total_liabilities >= 0"],
59
+ "preferred_sectors": ["Financial Services", "Real Estate", "Industrials"],
60
+ "preferred_market_caps": ["large", "mid", "small"],
61
+ "base_confidence": 0.68,
62
+ "min_confidence": 0.30,
63
+ },
64
+ ValuationModel.GRAHAM: {
65
+ "requires": ["eps > 0", "book_value_per_share > 0"],
66
+ "preferred_sectors": ["Consumer Defensive", "Utilities", "Industrials"],
67
+ "preferred_market_caps": ["large", "mid"],
68
+ "base_confidence": 0.65,
69
+ "min_confidence": 0.30,
70
+ },
71
+ ValuationModel.REVERSE_DCF: {
72
+ "requires": ["price > 0", "free_cash_flow > 0"],
73
+ "preferred_sectors": [], # Applicable to all
74
+ "preferred_market_caps": ["large", "mid", "small"],
75
+ "base_confidence": 0.68,
76
+ "min_confidence": 0.25,
77
+ },
78
+ ValuationModel.SCENARIO_BASED: {
79
+ "requires": ["free_cash_flow > 0"],
80
+ "preferred_sectors": [], # Applicable to all
81
+ "preferred_market_caps": ["large", "mid", "small"],
82
+ "base_confidence": 0.72,
83
+ "min_confidence": 0.30,
84
+ },
85
+ ValuationModel.MARKET_IMPLIED: {
86
+ "requires": ["price > 0"],
87
+ "preferred_sectors": [], # Applicable to all
88
+ "preferred_market_caps": ["large", "mid", "small"],
89
+ "base_confidence": 0.60,
90
+ "min_confidence": 0.25,
91
+ },
92
+ }
93
+
94
+
95
+ # Sector-specific model preferences (models ordered by preference)
96
+ SECTOR_MODEL_PREFERENCES = {
97
+ "Technology": [
98
+ ValuationModel.DCF,
99
+ ValuationModel.PEG,
100
+ ValuationModel.EV_SALES,
101
+ ValuationModel.PE_MULTIPLE,
102
+ ValuationModel.SCENARIO_BASED,
103
+ ],
104
+ "Healthcare": [
105
+ ValuationModel.DCF,
106
+ ValuationModel.PEG,
107
+ ValuationModel.PE_MULTIPLE,
108
+ ValuationModel.EV_SALES,
109
+ ],
110
+ "Financial Services": [
111
+ ValuationModel.RESIDUAL_INCOME,
112
+ ValuationModel.PE_MULTIPLE,
113
+ ValuationModel.ASSET_BASED,
114
+ ValuationModel.DDM,
115
+ ],
116
+ "Financial": [
117
+ ValuationModel.RESIDUAL_INCOME,
118
+ ValuationModel.PE_MULTIPLE,
119
+ ValuationModel.ASSET_BASED,
120
+ ValuationModel.DDM,
121
+ ],
122
+ "Consumer Defensive": [
123
+ ValuationModel.DDM,
124
+ ValuationModel.GRAHAM,
125
+ ValuationModel.PE_MULTIPLE,
126
+ ValuationModel.DCF,
127
+ ],
128
+ "Utilities": [
129
+ ValuationModel.DDM,
130
+ ValuationModel.PE_MULTIPLE,
131
+ ValuationModel.DCF,
132
+ ValuationModel.GRAHAM,
133
+ ],
134
+ "Real Estate": [
135
+ ValuationModel.DDM,
136
+ ValuationModel.ASSET_BASED,
137
+ ValuationModel.RESIDUAL_INCOME,
138
+ ValuationModel.PE_MULTIPLE,
139
+ ],
140
+ "Industrials": [
141
+ ValuationModel.DCF,
142
+ ValuationModel.PE_MULTIPLE,
143
+ ValuationModel.EV_EBITDA,
144
+ ValuationModel.ASSET_BASED,
145
+ ],
146
+ "Energy": [
147
+ ValuationModel.DCF,
148
+ ValuationModel.EV_EBITDA,
149
+ ValuationModel.PE_MULTIPLE,
150
+ ],
151
+ "Basic Materials": [
152
+ ValuationModel.DCF,
153
+ ValuationModel.EV_EBITDA,
154
+ ValuationModel.PE_MULTIPLE,
155
+ ],
156
+ "Consumer Cyclical": [
157
+ ValuationModel.DCF,
158
+ ValuationModel.PEG,
159
+ ValuationModel.PE_MULTIPLE,
160
+ ],
161
+ "Communication Services": [
162
+ ValuationModel.DCF,
163
+ ValuationModel.PE_MULTIPLE,
164
+ ValuationModel.EV_SALES,
165
+ ],
166
+ "Communication": [
167
+ ValuationModel.DCF,
168
+ ValuationModel.PE_MULTIPLE,
169
+ ValuationModel.EV_SALES,
170
+ ],
171
+ }