Riy777 commited on
Commit
c9b58fa
·
verified ·
1 Parent(s): 79f30b9

Upload governance_engine_fixed_v1_3_1.py

Browse files
Files changed (1) hide show
  1. governance_engine_fixed_v1_3_1.py +975 -0
governance_engine_fixed_v1_3_1.py ADDED
@@ -0,0 +1,975 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ============================================================
2
+ # 🏛️ governance_engine.py (V1.3.1 - TrendErr Fix)
3
+ # ============================================================
4
+ # Description:
5
+ # Evaluates trade quality using 156 INDICATORS.
6
+ # Fixes: Solved "The truth value of a Series is ambiguous" error.
7
+ # Update: Enhanced error logging to show real causes.
8
+ # ============================================================
9
+
10
+ import numpy as np
11
+ import pandas as pd
12
+ try:
13
+ import pandas_ta as ta
14
+ except Exception as _e:
15
+ ta = None
16
+ from typing import Dict, Any, List
17
+
18
+ class GovernanceEngine:
19
+ def __init__(self):
20
+ # ⚖️ Strategic Weights
21
+ self.WEIGHTS = {
22
+ "order_book": 0.25, # 25%
23
+ "market_structure": 0.20, # 20%
24
+ "trend": 0.15, # 15%
25
+ "momentum": 0.15, # 15%
26
+ "volume": 0.10, # 10%
27
+ "volatility": 0.05, # 5%
28
+ "cycle_math": 0.10 # 10%
29
+ }
30
+ print("🏛️ [Governance Engine V1.3] Stability Patch Applied. Ready.")
31
+
32
+
33
+ async def evaluate_trade(
34
+ self,
35
+ symbol: str,
36
+ ohlcv_data: Dict[str, Any],
37
+ order_book: Dict[str, Any],
38
+ verbose: bool = True,
39
+ include_details: bool = False,
40
+ use_multi_timeframes: bool = False
41
+ ) -> Dict[str, Any]:
42
+ """
43
+ Main Execution Entry.
44
+
45
+ Backwards compatible:
46
+ - Requires '15m' data (same as before)
47
+ - Output schema unchanged unless include_details=True
48
+ - Multi-timeframe aggregation is opt-in (use_multi_timeframes=True)
49
+ """
50
+ try:
51
+ if ta is None:
52
+ return self._create_rejection('Missing dependency: pandas_ta')
53
+
54
+ # 1) Data Prep
55
+ if not isinstance(ohlcv_data, dict) or '15m' not in ohlcv_data:
56
+ return self._create_rejection("No 15m Data")
57
+
58
+ def _get_df(tf: str) -> Any:
59
+ if tf not in ohlcv_data:
60
+ return None
61
+ df_tf = self._prepare_dataframe(ohlcv_data[tf])
62
+ if len(df_tf) < 60:
63
+ return None
64
+ return df_tf
65
+
66
+ df15 = _get_df('15m')
67
+ if df15 is None:
68
+ return self._create_rejection("Insufficient Data Length (<60)")
69
+
70
+ # optional timeframes (only used when enabled)
71
+ df_map: Dict[str, pd.DataFrame] = {'15m': df15}
72
+ if use_multi_timeframes:
73
+ for tf in ('1h', '4h', '1d'):
74
+ d = _get_df(tf)
75
+ if d is not None:
76
+ df_map[tf] = d
77
+
78
+ if verbose:
79
+ print(f"\n📝 [Gov Audit] Opening Session for {symbol}...")
80
+ print("-" * 80)
81
+
82
+ # 2) Calculate Domains (single TF by default for compatibility)
83
+ details_pack = {} # only filled when include_details=True
84
+
85
+ if not use_multi_timeframes:
86
+ s_trend = self._calc_trend_domain(df15, verbose, include_details, details_pack)
87
+ s_mom = self._calc_momentum_domain(df15, verbose, include_details, details_pack)
88
+ s_vol = self._calc_volatility_domain(df15, verbose, include_details, details_pack)
89
+ s_volu = self._calc_volume_domain(df15, verbose, include_details, details_pack)
90
+ s_cycle = self._calc_cycle_math_domain(df15, verbose, include_details, details_pack)
91
+ s_struct = self._calc_structure_domain(df15, verbose, include_details, details_pack)
92
+ else:
93
+ # Weighted by timeframe importance; only timeframes available are used
94
+ tfw = {'15m': 0.50, '1h': 0.30, '4h': 0.20, '1d': 0.10}
95
+
96
+ def _agg(fn, name: str) -> float:
97
+ total_w = 0.0
98
+ acc = 0.0
99
+ per_tf = {}
100
+ for tf, df_tf in df_map.items():
101
+ w = tfw.get(tf, 0.1)
102
+ s = fn(df_tf, False, include_details, details_pack) # per-tf verbose off to avoid noise
103
+ per_tf[tf] = float(s)
104
+ acc += w * float(s)
105
+ total_w += w
106
+ if include_details:
107
+ details_pack[f"{name}_per_tf"] = per_tf
108
+ return (acc / total_w) if total_w > 0 else 0.0
109
+
110
+ s_trend = _agg(self._calc_trend_domain, "trend")
111
+ s_mom = _agg(self._calc_momentum_domain, "momentum")
112
+ s_vol = _agg(self._calc_volatility_domain, "volatility")
113
+ s_volu = _agg(self._calc_volume_domain, "volume")
114
+ s_cycle = _agg(self._calc_cycle_math_domain, "cycle_math")
115
+ s_struct = _agg(self._calc_structure_domain, "structure")
116
+
117
+ if verbose:
118
+ print(f" 🧩 Multi-TF used: {', '.join(df_map.keys())}")
119
+
120
+ s_ob = self._calc_orderbook_domain(order_book, verbose, include_details, details_pack)
121
+
122
+ if verbose:
123
+ print("-" * 80)
124
+
125
+ # 3) Weighted Aggregation (domain scores are in [-1, +1])
126
+ raw_weighted_score = (
127
+ (s_trend * self.WEIGHTS['trend']) +
128
+ (s_mom * self.WEIGHTS['momentum']) +
129
+ (s_vol * self.WEIGHTS['volatility']) +
130
+ (s_volu * self.WEIGHTS['volume']) +
131
+ (s_cycle * self.WEIGHTS['cycle_math']) +
132
+ (s_struct * self.WEIGHTS['market_structure']) +
133
+ (s_ob * self.WEIGHTS['order_book'])
134
+ )
135
+
136
+ # 4) Final Scoring & Grading
137
+ final_score = max(0.0, min(100.0, ((raw_weighted_score + 1) / 2) * 100))
138
+ grade = self._get_grade(final_score)
139
+
140
+ result = {
141
+ "governance_score": round(final_score, 2),
142
+ "grade": grade,
143
+ "components": {
144
+ "trend": round(float(s_trend), 3),
145
+ "momentum": round(float(s_mom), 3),
146
+ "volatility": round(float(s_vol), 3),
147
+ "volume": round(float(s_volu), 3),
148
+ "cycle_math": round(float(s_cycle), 3),
149
+ "structure": round(float(s_struct), 3),
150
+ "order_book": round(float(s_ob), 3),
151
+ },
152
+ "status": "APPROVED" if grade != "REJECT" else "REJECTED",
153
+ }
154
+
155
+ if include_details:
156
+ result["details"] = details_pack
157
+ result["timeframes_used"] = list(df_map.keys()) if use_multi_timeframes else ["15m"]
158
+
159
+ return result
160
+
161
+ except Exception as e:
162
+ if verbose:
163
+ print(f"❌ [Governance Critical Error] {e}")
164
+ return self._create_rejection(f"Exception: {str(e)}")
165
+
166
+
167
+ # ==============================================================================
168
+ # 📈 DOMAIN 1: TREND (Fixed)
169
+ # ==============================================================================
170
+ def _calc_trend_domain(self, df: pd.DataFrame, verbose: bool, include_details: bool = False, details_pack: Any = None) -> float:
171
+ points = 0.0
172
+ details = []
173
+ try:
174
+ c = df['close']
175
+
176
+ # 1. EMA 9 > 21
177
+ ema9 = ta.ema(c, 9); ema21 = ta.ema(c, 21)
178
+ if self._valid(ema9) and self._valid(ema21) and ema9.iloc[-1] > ema21.iloc[-1]:
179
+ points += 1; details.append("EMA9>21")
180
+
181
+ # 2. EMA 21 > 50
182
+ ema50 = ta.ema(c, 50)
183
+ if self._valid(ema21) and self._valid(ema50) and ema21.iloc[-1] > ema50.iloc[-1]:
184
+ points += 1; details.append("EMA21>50")
185
+
186
+ # 3. Price > EMA 200
187
+ ema200 = ta.ema(c, 200)
188
+ if self._valid(ema200):
189
+ if c.iloc[-1] > ema200.iloc[-1]: points += 2; details.append("Price>EMA200")
190
+ else: points -= 2; details.append("Price<EMA200")
191
+
192
+ # 4. Supertrend
193
+ st = ta.supertrend(df['high'], df['low'], c, length=10, multiplier=3)
194
+ if self._valid(st):
195
+ # Supertrend returns [trend, direction, long, short], usually col 0 is trend line
196
+ st_line = st.iloc[:, 0]
197
+ if c.iloc[-1] > st_line.iloc[-1]: points += 1; details.append("ST:Bull")
198
+ else: points -= 1
199
+
200
+ # 5. Parabolic SAR
201
+ psar = ta.psar(df['high'], df['low'], c)
202
+ if self._valid(psar):
203
+ # Handle both single series or dataframe return
204
+ val = psar.iloc[-1]
205
+ if isinstance(val, pd.Series): val = val.dropna().iloc[0] if not val.dropna().empty else 0
206
+
207
+ if val != 0:
208
+ if val < c.iloc[-1]: points += 1; details.append("PSAR:Bull")
209
+ else: points -= 1
210
+
211
+ # 6. ADX
212
+ adx = ta.adx(df['high'], df['low'], c, length=14)
213
+ if self._valid(adx):
214
+ val = adx[adx.columns[0]].iloc[-1]
215
+ dmp = adx[adx.columns[1]].iloc[-1]
216
+ dmn = adx[adx.columns[2]].iloc[-1]
217
+ if val > 25:
218
+ if dmp > dmn: points += 1.5; details.append("ADX:StrongBull")
219
+ else: points -= 1.5; details.append("ADX:StrongBear")
220
+ else: details.append("ADX:Weak")
221
+
222
+ # 7. Ichimoku
223
+ ichi = ta.ichimoku(df['high'], df['low'], c)
224
+ # Ichimoku returns a tuple of (DataFrame, DataFrame)
225
+ if ichi is not None and isinstance(ichi, tuple) and self._valid(ichi[0]):
226
+ span_a = ichi[0][ichi[0].columns[0]].iloc[-1]
227
+ span_b = ichi[0][ichi[0].columns[1]].iloc[-1]
228
+ if c.iloc[-1] > span_a and c.iloc[-1] > span_b: points += 1; details.append("Ichi:AboveCloud")
229
+
230
+ # 8. Vortex
231
+ vortex = ta.vortex(df['high'], df['low'], c)
232
+ if self._valid(vortex):
233
+ if vortex[vortex.columns[0]].iloc[-1] > vortex[vortex.columns[1]].iloc[-1]:
234
+ points += 1; details.append("Vortex:Bull")
235
+
236
+ # 9. Aroon
237
+ aroon = ta.aroon(df['high'], df['low'])
238
+ if self._valid(aroon):
239
+ if aroon[aroon.columns[0]].iloc[-1] > 70: points += 1; details.append("Aroon:Up")
240
+ elif aroon[aroon.columns[1]].iloc[-1] > 70: points -= 1; details.append("Aroon:Down")
241
+
242
+ # 10. Slope
243
+ slope = ta.slope(c, length=14)
244
+ if self._valid(slope) and slope.iloc[-1] > 0: points += 1; details.append("Slope:Pos")
245
+
246
+ # 11. KAMA
247
+ kama = ta.kama(c, length=10)
248
+ if self._valid(kama) and c.iloc[-1] > kama.iloc[-1]: points += 1; details.append("KAMA:Bull")
249
+
250
+ # 12. TRIX
251
+ trix = ta.trix(c, length=30)
252
+ trix_val = self._safe_last(trix, col='trix')
253
+ if np.isfinite(trix_val) and trix_val > 0: points += 1; details.append("TRIX:Bull")
254
+
255
+ # 13. DPO
256
+ dpo = ta.dpo(c, length=20)
257
+ if self._valid(dpo) and dpo.iloc[-1] > 0: points += 1; details.append("DPO:Bull")
258
+
259
+ # 14. SMA Cluster
260
+ sma20 = ta.sma(c, 20); sma50 = ta.sma(c, 50)
261
+ if self._valid(sma20) and self._valid(sma50) and sma20.iloc[-1] > sma50.iloc[-1]:
262
+ points += 1; details.append("SMA20>50")
263
+
264
+ # 15. ZigZag
265
+ if df['high'].iloc[-1] > df['high'].iloc[-5]: points += 1; details.append("ZigZag:Up")
266
+
267
+ # 16. MACD Slope
268
+ macd = ta.macd(c)
269
+ if self._valid(macd):
270
+ ml = macd[macd.columns[0]]
271
+ if ml.iloc[-1] > ml.iloc[-2]: points += 1; details.append("MACD_Slope:Up")
272
+
273
+ # 17. Coppock
274
+ coppock = ta.coppock(c)
275
+ if self._valid(coppock) and coppock.iloc[-1] > 0: points += 0.5; details.append("Coppock:Bull")
276
+
277
+ # 18. HMA
278
+ hma = ta.hma(c, length=9)
279
+ if self._valid(hma) and c.iloc[-1] > hma.iloc[-1]: points += 1; details.append("HMA:Bull")
280
+
281
+ # 19. Donchian
282
+ dc = ta.donchian(df['high'], df['low'])
283
+ if self._valid(dc) and c.iloc[-1] > dc[dc.columns[1]].iloc[-1]:
284
+ points += 1; details.append("Donchian:Upper")
285
+
286
+ # 20. Keltner
287
+ kc = ta.kc(df['high'], df['low'], c)
288
+ if self._valid(kc) and c.iloc[-1] > kc[kc.columns[0]].iloc[-1]:
289
+ points += 0.5; details.append("Keltner:Safe")
290
+
291
+ except Exception as e: details.append(f"TrendErr:{str(e)[:15]}")
292
+
293
+ norm_score = self._normalize(points, max_possible=22.0)
294
+ if include_details and details_pack is not None:
295
+ details_pack['trend'] = details
296
+ if verbose: print(f" 📈 [TREND] Score: {norm_score:.2f} | {', '.join(details)}")
297
+ return norm_score
298
+
299
+ # ==============================================================================
300
+ # 🚀 DOMAIN 2: MOMENTUM (Fixed)
301
+ # ==============================================================================
302
+ def _calc_momentum_domain(self, df: pd.DataFrame, verbose: bool, include_details: bool = False, details_pack: Any = None) -> float:
303
+ points = 0.0
304
+ details = []
305
+ try:
306
+ c = df['close']
307
+
308
+ # 1. RSI
309
+ rsi = ta.rsi(c, length=14)
310
+ if self._valid(rsi):
311
+ val = rsi.iloc[-1]
312
+ if 50 < val < 70: points += 2; details.append(f"RSI:{val:.0f}")
313
+ elif val > 70: points -= 1; details.append("RSI:OB")
314
+ elif val < 30: points += 1; details.append("RSI:OS")
315
+
316
+ # 2. MACD
317
+ macd = ta.macd(c)
318
+ if self._valid(macd):
319
+ if macd[macd.columns[0]].iloc[-1] > macd[macd.columns[2]].iloc[-1]:
320
+ points += 1.5; details.append("MACD:X_Bull")
321
+ if macd[macd.columns[1]].iloc[-1] > 0:
322
+ points += 1; details.append("MACD_Hist:Pos")
323
+
324
+ # 4. Stochastic
325
+ stoch = ta.stoch(df['high'], df['low'], c)
326
+ if self._valid(stoch):
327
+ k = stoch[stoch.columns[0]].iloc[-1]
328
+ d = stoch[stoch.columns[1]].iloc[-1]
329
+ if 20 < k < 80 and k > d: points += 1; details.append("Stoch:Bull")
330
+
331
+ # 5. AO
332
+ ao = ta.ao(df['high'], df['low'])
333
+ if self._valid(ao) and ao.iloc[-1] > 0 and ao.iloc[-1] > ao.iloc[-2]:
334
+ points += 1; details.append("AO:Rising")
335
+
336
+ # 6. CCI
337
+ cci = ta.cci(df['high'], df['low'], c)
338
+ if self._valid(cci):
339
+ val = cci.iloc[-1]
340
+ if val > 100: points += 1; details.append("CCI:>100")
341
+ elif val < -100: points -= 1
342
+
343
+ # 7. Williams %R
344
+ willr = ta.willr(df['high'], df['low'], c)
345
+ if self._valid(willr) and willr.iloc[-1] < -80:
346
+ points += 1; details.append("WillR:OS")
347
+
348
+ # 8. ROC
349
+ roc = ta.roc(c, length=10)
350
+ if self._valid(roc) and roc.iloc[-1] > 0:
351
+ points += 1; details.append(f"ROC:{roc.iloc[-1]:.2f}")
352
+
353
+ # 9. MOM
354
+ mom = ta.mom(c, length=10)
355
+ if self._valid(mom) and mom.iloc[-1] > 0:
356
+ points += 1; details.append("MOM:Pos")
357
+
358
+ # 10. PPO
359
+ ppo = ta.ppo(c)
360
+ if self._valid(ppo) and ppo[ppo.columns[0]].iloc[-1] > 0:
361
+ points += 1; details.append("PPO:Pos")
362
+
363
+ # 11. TSI
364
+ tsi = ta.tsi(c)
365
+ if self._valid(tsi) and tsi[tsi.columns[0]].iloc[-1] > tsi[tsi.columns[1]].iloc[-1]:
366
+ points += 1; details.append("TSI:Bull")
367
+
368
+ # 12. Fisher
369
+ fish = ta.fisher(df['high'], df['low'])
370
+ if self._valid(fish) and fish[fish.columns[0]].iloc[-1] > fish[fish.columns[1]].iloc[-1]:
371
+ points += 1; details.append("Fisher:Bull")
372
+
373
+ # 13. CMO
374
+ cmo = ta.cmo(c, length=14)
375
+ if self._valid(cmo) and cmo.iloc[-1] > 0:
376
+ points += 1; details.append("CMO:Pos")
377
+
378
+ # 14. Squeeze
379
+ bb = ta.bbands(c, length=20)
380
+ kc = ta.kc(df['high'], df['low'], c)
381
+ if self._valid(bb) and self._valid(kc):
382
+ if bb[bb.columns[0]].iloc[-1] < kc[kc.columns[0]].iloc[-1]:
383
+ points += 1; details.append("SQZ:Active")
384
+
385
+ # 15. UO
386
+ uo = ta.uo(df['high'], df['low'], c)
387
+ if self._valid(uo) and uo.iloc[-1] > 50:
388
+ points += 0.5; details.append("UO:>50")
389
+
390
+ # 16. KDJ (kdj returns df)
391
+ kdj = ta.kdj(df['high'], df['low'], c)
392
+ if self._valid(kdj) and kdj[kdj.columns[0]].iloc[-1] > kdj[kdj.columns[1]].iloc[-1]:
393
+ points += 0.5; details.append("KDJ:Bull")
394
+
395
+ # 17. StochRSI
396
+ stochrsi = ta.stochrsi(c)
397
+ if self._valid(stochrsi) and stochrsi[stochrsi.columns[0]].iloc[-1] < 20:
398
+ points += 1; details.append("StochRSI:OS")
399
+
400
+ # 18. Elder Ray
401
+ ema13 = ta.ema(c, 13)
402
+ if self._valid(ema13):
403
+ bull_power = df['high'] - ema13
404
+ if bull_power.iloc[-1] > 0 and bull_power.iloc[-1] > bull_power.iloc[-2]:
405
+ points += 1; details.append("BullPower:Rising")
406
+
407
+ # 19. Streak
408
+ if c.iloc[-1] > c.iloc[-2] and c.iloc[-2] > c.iloc[-3]:
409
+ points += 0.5; details.append("Streak:Up")
410
+
411
+ # 20. Bias
412
+ ema20 = ta.ema(c, 20)
413
+ if self._valid(ema20):
414
+ bias = (c.iloc[-1] - ema20.iloc[-1]) / ema20.iloc[-1]
415
+ if 0 < bias < 0.05: points += 1; details.append("Bias:Healthy")
416
+
417
+ except Exception as e: details.append(f"MomErr:{str(e)[:10]}")
418
+
419
+ norm_score = self._normalize(points, max_possible=20.0)
420
+ if include_details and details_pack is not None:
421
+ details_pack['momentum'] = details
422
+ if verbose: print(f" 🚀 [MOMENTUM] Score: {norm_score:.2f} | {', '.join(details)}")
423
+ return norm_score
424
+
425
+ # ==============================================================================
426
+ # 🌊 DOMAIN 3: VOLATILITY (Fixed)
427
+ # ==============================================================================
428
+ def _calc_volatility_domain(self, df: pd.DataFrame, verbose: bool, include_details: bool = False, details_pack: Any = None) -> float:
429
+ points = 0.0
430
+ details = []
431
+ try:
432
+ # 1. Bollinger Bands (Bandwidth + %B)
433
+ bb = ta.bbands(df['close'], length=20)
434
+ if self._valid(bb):
435
+ # pandas_ta names usually: BBL_, BBM_, BBU_, BBB_ (bandwidth), BBP_ (%B)
436
+ bw_col = self._find_col(bb, ["bbb_", "bandwidth", "bbw"])
437
+ pb_col = self._find_col(bb, ["bbp_", "%b", "percentb", "pb"])
438
+ width = self._safe_last(bb, col=bw_col) if bw_col else np.nan
439
+ pct_b = self._safe_last(bb, col=pb_col) if pb_col else np.nan
440
+
441
+ # Bandwidth: smaller -> squeeze, larger -> expansion
442
+ # Typical BBB values ~ 0.02 - 0.25 in many markets (depends on volatility)
443
+ if np.isfinite(width):
444
+ if width < 0.05:
445
+ points -= 1; details.append("BBW:Squeeze")
446
+ elif width > 0.18:
447
+ points += 1; details.append("BBW:Expand")
448
+
449
+ # %B: location within bands (0..1 typically)
450
+ if np.isfinite(pct_b):
451
+ if pct_b > 0.90:
452
+ points += 0.5; details.append("BB%B:High")
453
+ elif pct_b < 0.10:
454
+ points -= 0.5; details.append("BB%B:Low")
455
+
456
+ # 3. ATR
457
+ atr = ta.atr(df['high'], df['low'], df['close'], length=14)
458
+ if self._valid(atr) and atr.iloc[-1] > atr.iloc[-5]:
459
+ points += 1; details.append("ATR:Rising")
460
+
461
+ # 4. KC Break
462
+ kc = ta.kc(df['high'], df['low'], df['close'])
463
+ if self._valid(kc):
464
+ kcu_col = self._find_col(kc, ['kcu_', 'upper']) or kc.columns[-1]
465
+ if df['close'].iloc[-1] > kc[kcu_col].iloc[-1]:
466
+ points += 2; details.append("KC:Breakout")
467
+
468
+ # 5. Donchian
469
+ dc = ta.donchian(df['high'], df['low'])
470
+ if self._valid(dc):
471
+ dcu_col = self._find_col(dc, ['dcu_', 'upper']) or dc.columns[-1]
472
+ if df['high'].iloc[-1] >= dc[dcu_col].iloc[-2]:
473
+ points += 1; details.append("DC:High")
474
+
475
+ # 6. Mass Index
476
+ mass = ta.massi(df['high'], df['low'])
477
+ if self._valid(mass) and mass.iloc[-1] > 25:
478
+ points -= 1; details.append("Mass:Risk")
479
+
480
+ # 7. Chaikin Vol
481
+ c_vol = ta.stdev(df['close'], 20)
482
+ if self._valid(c_vol) and c_vol.iloc[-1] > c_vol.iloc[-10]:
483
+ points += 1; details.append("Vol:Exp")
484
+
485
+ # 8. Ulcer
486
+ ui = ta.ui(df['close'])
487
+ if self._valid(ui):
488
+ val = ui.iloc[-1]
489
+ if val < 2: points += 1; details.append("UI:Safe")
490
+ else: points -= 1
491
+
492
+ # 9. NATR
493
+ natr = ta.natr(df['high'], df['low'], df['close'])
494
+ if self._valid(natr) and natr.iloc[-1] > 1.0:
495
+ points += 1; details.append(f"NATR:{natr.iloc[-1]:.1f}")
496
+
497
+ # 10. Gap
498
+ if self._valid(atr):
499
+ gap = abs(df['open'].iloc[-1] - df['close'].iloc[-2])
500
+ if gap > atr.iloc[-1] * 0.5: points += 1; details.append("Gap")
501
+
502
+ # 11. Vol Ratio
503
+ if self._valid(atr):
504
+ vr = atr.iloc[-1] / atr.iloc[-20]
505
+ if vr > 1.2: points += 1; details.append("VolRatio:High")
506
+
507
+ # 12. RVI (Proxy)
508
+ if self._valid(c_vol):
509
+ std_rsi = ta.rsi(c_vol, length=14)
510
+ if self._valid(std_rsi) and std_rsi.iloc[-1] > 50: points += 0.5
511
+
512
+ # 13. StdDev Channel
513
+ mean = df['close'].rolling(20).mean()
514
+ std = df['close'].rolling(20).std()
515
+ z = (df['close'].iloc[-1] - mean.iloc[-1]) / std.iloc[-1]
516
+ if abs(z) < 2: points += 0.5
517
+
518
+ # 14. ATS
519
+ if self._valid(atr):
520
+ ats = df['close'].iloc[-1] - (atr.iloc[-1] * 2)
521
+ if df['close'].iloc[-1] > ats: points += 1
522
+
523
+ # 15. Chop
524
+ chop = ta.chop(df['high'], df['low'], df['close'])
525
+ if self._valid(chop):
526
+ val = chop.iloc[-1]
527
+ if val < 38.2: points += 1; details.append("Chop:Trend")
528
+ elif val > 61.8: points -= 1; details.append("Chop:Range")
529
+
530
+ # 16. KC Width
531
+ if self._valid(kc):
532
+ kw = kc[kc.columns[0]].iloc[-1] - kc[kc.columns[2]].iloc[-1]
533
+ if kw > kw * 1.1: points += 0.5
534
+
535
+ # 17. Accel
536
+ if df['close'].diff().iloc[-1] > df['close'].diff().iloc[-2]: points += 0.5
537
+
538
+ # 18. Efficiency
539
+ denom = (df['high'].rolling(10).max() - df['low'].rolling(10).min()).iloc[-1]
540
+ if denom > 0:
541
+ eff = abs(df['close'].iloc[-1] - df['close'].iloc[-10]) / denom
542
+ if eff > 0.5: points += 1; details.append("Eff:High")
543
+
544
+ # 19. Gator
545
+ if ta.ema(df['close'], 5).iloc[-1] > ta.ema(df['close'], 13).iloc[-1]: points += 0.5
546
+
547
+ # 20. Range
548
+ if self._valid(atr):
549
+ rng = df['high'].iloc[-1] - df['low'].iloc[-1]
550
+ if rng > atr.iloc[-1]: points += 1
551
+
552
+ except Exception as e: details.append(f"VolErr:{str(e)[:10]}")
553
+ norm_score = self._normalize(points, max_possible=18.0)
554
+ if include_details and details_pack is not None:
555
+ details_pack['volatility'] = details
556
+ if verbose: print(f" 🌊 [VOLATILITY] Score: {norm_score:.2f} | {', '.join(details)}")
557
+ return norm_score
558
+
559
+ # ==============================================================================
560
+ # ⛽ DOMAIN 4: VOLUME (Fixed)
561
+ # ==============================================================================
562
+ def _calc_volume_domain(self, df: pd.DataFrame, verbose: bool, include_details: bool = False, details_pack: Any = None) -> float:
563
+ points = 0.0
564
+ details = []
565
+ try:
566
+ c = df['close']; v = df['volume']
567
+ # 1. OBV
568
+ obv = ta.obv(c, v)
569
+ if self._valid(obv) and obv.iloc[-1] > obv.iloc[-5]:
570
+ points += 1.5; details.append("OBV:Up")
571
+
572
+ # 2. CMF
573
+ cmf = ta.cmf(df['high'], df['low'], c, v, length=20)
574
+ if self._valid(cmf):
575
+ val = cmf.iloc[-1]
576
+ if val > 0.05: points += 2; details.append(f"CMF:{val:.2f}")
577
+ elif val < -0.05: points -= 2
578
+
579
+ # 3. MFI
580
+ mfi = ta.mfi(df['high'], df['low'], c, v, length=14)
581
+ if self._valid(mfi):
582
+ val = mfi.iloc[-1]
583
+ if 50 < val < 80: points += 1; details.append(f"MFI:{val:.0f}")
584
+
585
+ # 4. Vol > Avg
586
+ vol_ma = v.rolling(20).mean().iloc[-1]
587
+ if v.iloc[-1] > vol_ma: points += 1
588
+
589
+ # 5. Vol Spike
590
+ if v.iloc[-1] > vol_ma * 1.5: points += 1; details.append("Vol:Spike")
591
+
592
+ # 6. EOM
593
+ eom = ta.eom(df['high'], df['low'], c, v)
594
+ if self._valid(eom) and eom.iloc[-1] > 0: points += 1; details.append("EOM:Pos")
595
+
596
+ # 7. VWAP
597
+ vwap = ta.vwap(df['high'], df['low'], c, v)
598
+ if self._valid(vwap) and c.iloc[-1] > vwap.iloc[-1]: points += 1; details.append("Price>VWAP")
599
+
600
+ # 8. NVI
601
+ nvi = ta.nvi(c, v)
602
+ if self._valid(nvi) and nvi.iloc[-1] > nvi.iloc[-5]: points += 1; details.append("NVI:Smart")
603
+
604
+ # 9. PVI
605
+ pvi = ta.pvi(c, v)
606
+ if self._valid(pvi) and pvi.iloc[-1] > pvi.iloc[-5]: points += 0.5
607
+
608
+ # 10. ADL
609
+ adl = ta.ad(df['high'], df['low'], c, v)
610
+ if self._valid(adl) and adl.iloc[-1] > adl.iloc[-2]: points += 1; details.append("ADL:Up")
611
+
612
+ # 11. PVT
613
+ pvt = ta.pvt(c, v)
614
+ if self._valid(pvt) and pvt.iloc[-1] > pvt.iloc[-2]: points += 1
615
+
616
+ # 12. Vol Osc
617
+ if v.rolling(5).mean().iloc[-1] > v.rolling(10).mean().iloc[-1]: points += 1
618
+
619
+ # 13. KVO
620
+ kvo = ta.kvo(df['high'], df['low'], c, v)
621
+ if self._valid(kvo) and kvo[kvo.columns[0]].iloc[-1] > 0: points += 1; details.append("KVO:Bull")
622
+
623
+ # 14. Force
624
+ fi = (c.diff() * v).rolling(13).mean()
625
+ if fi.iloc[-1] > 0: points += 1
626
+
627
+ # 15. MFI (Bill Williams)
628
+ if v.iloc[-1] > 0:
629
+ my_mfi = (df['high'] - df['low']) / v
630
+ if my_mfi.iloc[-1] > my_mfi.iloc[-2] and v.iloc[-1] > v.iloc[-2]: points += 1
631
+
632
+ # 16. Buying Climax
633
+ if v.iloc[-1] > vol_ma * 3 and c.iloc[-1] > df['high'].iloc[-2]: points -= 1
634
+
635
+ # 17. RVOL
636
+ if vol_ma > 0:
637
+ rvol = v.iloc[-1] / vol_ma
638
+ if rvol > 1.2: points += 1; details.append(f"RVOL:{rvol:.1f}")
639
+
640
+ # 18. Delta
641
+ delta = (c.iloc[-1] - df['open'].iloc[-1]) * v.iloc[-1]
642
+ if delta > 0: points += 1
643
+
644
+ # 20. Low Vol Gap
645
+ if self._valid(ta.atr(df['high'], df['low'], c)):
646
+ if v.iloc[-1] < vol_ma * 0.5 and abs(c.diff().iloc[-1]) > ta.atr(df['high'], df['low'], c).iloc[-1]:
647
+ points -= 1
648
+
649
+ except Exception as e: details.append(f"VolErr:{str(e)[:10]}")
650
+ norm_score = self._normalize(points, max_possible=18.0)
651
+ if include_details and details_pack is not None:
652
+ details_pack['volume'] = details
653
+ if verbose: print(f" ⛽ [VOLUME] Score: {norm_score:.2f} | {', '.join(details)}")
654
+ return norm_score
655
+
656
+ # ==============================================================================
657
+ # 🔢 DOMAIN 5: CYCLE & MATH (Fixed)
658
+ # ==============================================================================
659
+ def _calc_cycle_math_domain(self, df: pd.DataFrame, verbose: bool, include_details: bool = False, details_pack: Any = None) -> float:
660
+ points = 0.0
661
+ details = []
662
+ try:
663
+ c = df['close']; h = df['high']; l = df['low']
664
+
665
+ # 1. Pivot
666
+ pp = (h.iloc[-2] + l.iloc[-2] + c.iloc[-2]) / 3
667
+ if c.iloc[-1] > pp: points += 1; details.append("AbovePP")
668
+
669
+ # 2. R1
670
+ r1 = (2 * pp) - l.iloc[-2]
671
+ if c.iloc[-1] > r1: points += 1; details.append("AboveR1")
672
+
673
+ # 3. Fib 618
674
+ range_h = h.rolling(100).max().iloc[-1]
675
+ range_l = l.rolling(100).min().iloc[-1]
676
+ fib_618 = range_l + (range_h - range_l) * 0.618
677
+ if c.iloc[-1] > fib_618: points += 1; details.append("AboveFib")
678
+
679
+ # 4. Z-Score
680
+ zscore = ta.zscore(c, length=30)
681
+ if self._valid(zscore):
682
+ z = zscore.iloc[-1]
683
+ if z < -2: points += 2; details.append("Z:OS")
684
+ elif -1 < z < 1: points += 0.5; details.append("Z:Norm")
685
+
686
+ # 5. Entropy
687
+ entropy = ta.entropy(c, length=10)
688
+ if self._valid(entropy) and entropy.iloc[-1] < 0.5:
689
+ points += 1; details.append(f"Ent:{entropy.iloc[-1]:.2f}")
690
+
691
+ # 6. Kurtosis
692
+ kurt = c.rolling(30).kurt().iloc[-1]
693
+ if kurt > 3: points -= 0.5
694
+
695
+ # 7. Skew
696
+ skew = c.rolling(30).skew().iloc[-1]
697
+ if skew > 0: points += 0.5; details.append("PosSkew")
698
+
699
+ # 8. Variance
700
+ var = ta.variance(c, length=20)
701
+ if self._valid(var): points += 0
702
+
703
+ # 9. StdDev
704
+ std = c.rolling(20).std().iloc[-1]
705
+ if c.iloc[-1] > (c.rolling(20).mean().iloc[-1] + std): points += 0.5
706
+
707
+ # 10. LinReg
708
+ linreg = ta.linreg(c, length=20)
709
+ if self._valid(linreg) and c.iloc[-1] > linreg.iloc[-1]:
710
+ points += 1; details.append("AboveLinReg")
711
+
712
+ # 13. CG
713
+ cg = ta.cg(c, length=10)
714
+ if self._valid(cg) and c.diff().iloc[-1] > 0: points += 0.5
715
+
716
+ # 20. Mean Rev
717
+ dist_mean = abs(c.iloc[-1] - c.rolling(50).mean().iloc[-1])
718
+ if dist_mean > std * 2: points -= 1
719
+ else: points += 0.5
720
+
721
+ except Exception as e: details.append(f"MathErr:{str(e)[:10]}")
722
+ norm_score = self._normalize(points, max_possible=12.0)
723
+ if include_details and details_pack is not None:
724
+ details_pack['cycle_math'] = details
725
+ if verbose: print(f" 🔢 [MATH] Score: {norm_score:.2f} | {', '.join(details)}")
726
+ return norm_score
727
+
728
+ # ==============================================================================
729
+ # 🧱 DOMAIN 6: STRUCTURE (Fixed)
730
+ # ==============================================================================
731
+ def _calc_structure_domain(self, df: pd.DataFrame, verbose: bool, include_details: bool = False, details_pack: Any = None) -> float:
732
+ points = 0.0
733
+ details = []
734
+ try:
735
+ closes = df['close'].values; opens = df['open'].values
736
+ highs = df['high'].values; lows = df['low'].values
737
+
738
+ # 1. HH
739
+ if highs[-1] > highs[-2] and highs[-2] > highs[-3]:
740
+ points += 2; details.append("HH")
741
+
742
+ # 2. HL
743
+ if lows[-1] > lows[-2] and lows[-2] > lows[-3]:
744
+ points += 2; details.append("HL")
745
+
746
+ # 3. Engulfing
747
+ if closes[-1] > opens[-1]:
748
+ if closes[-1] > highs[-2] and opens[-1] < lows[-2]:
749
+ points += 2; details.append("Engulfing")
750
+
751
+ # 4. Hammer
752
+ body = abs(closes[-1] - opens[-1])
753
+ lower_wick = min(closes[-1], opens[-1]) - lows[-1]
754
+ if lower_wick > body * 2:
755
+ points += 2; details.append("Hammer")
756
+
757
+ # 5. BOS
758
+ recent_high = np.max(highs[-11:-1])
759
+ if closes[-1] > recent_high: points += 2; details.append("BOS")
760
+
761
+ # 6. FVG
762
+ if len(closes) > 3 and lows[-1] > highs[-3] * 1.001:
763
+ points += 1; details.append("FVG")
764
+
765
+ # 7. Order Block
766
+ if closes[-2] < opens[-2] and closes[-1] > opens[-1]:
767
+ if (closes[-1] - opens[-1]) > (opens[-2] - closes[-2]) * 2:
768
+ points += 1.5; details.append("OB")
769
+
770
+ # 8. SFP
771
+ if lows[-1] < lows[-2] and closes[-1] > lows[-2]:
772
+ points += 2.5; details.append("SFP")
773
+
774
+ # 9. Inside Bar
775
+ if highs[-1] < highs[-2] and lows[-1] > lows[-2]:
776
+ points -= 0.5; details.append("IB")
777
+
778
+ # 10. Morning Star
779
+ if closes[-3] < opens[-3] and abs(closes[-2]-opens[-2]) < body*0.5 and closes[-1] > opens[-1]:
780
+ points += 2; details.append("MorningStar")
781
+
782
+ # 14. Golden Cross Struct
783
+ m50 = np.mean(closes[-50:]); m200 = np.mean(closes[-200:]) if len(closes)>200 else m50
784
+ if m50 > m200: points += 1
785
+
786
+ # 16. Impulse
787
+ avg_body = np.mean([abs(c-o) for c,o in zip(closes[-10:], opens[-10:])])
788
+ if body > avg_body * 2: points += 1; details.append("Impulse")
789
+
790
+ except Exception as e: details.append(f"PAErr:{str(e)[:10]}")
791
+ norm_score = self._normalize(points, max_possible=18.0)
792
+ if include_details and details_pack is not None:
793
+ details_pack['structure'] = details
794
+ if verbose: print(f" 🧱 [STRUCTURE] Score: {norm_score:.2f} | {', '.join(details)}")
795
+ return norm_score
796
+
797
+ # ==============================================================================
798
+ # 📖 DOMAIN 7: ORDER BOOK (Already Safe, but kept consistent)
799
+ # ==============================================================================
800
+ def _calc_orderbook_domain(self, ob: Dict[str, Any], verbose: bool, include_details: bool = False, details_pack: Any = None) -> float:
801
+ points = 0.0
802
+ details = []
803
+ if not ob or 'bids' not in ob or 'asks' not in ob: return 0.0
804
+
805
+ try:
806
+ bids = np.array(ob['bids'], dtype=float)
807
+ asks = np.array(ob['asks'], dtype=float)
808
+ if len(bids) < 20 or len(asks) < 20: return 0.0
809
+
810
+ bid_vol = np.sum(bids[:20, 1])
811
+ ask_vol = np.sum(asks[:20, 1])
812
+ imbal = (bid_vol - ask_vol) / (bid_vol + ask_vol)
813
+ points += imbal * 5; details.append(f"Imbal:{imbal:.2f}")
814
+
815
+ avg_size = np.mean(bids[:50, 1])
816
+ if np.max(bids[:20, 1]) > avg_size * 5: points += 3; details.append("BidWall")
817
+ if np.max(asks[:20, 1]) > avg_size * 5: points -= 3; details.append("AskWall")
818
+
819
+ spread = (asks[0,0] - bids[0,0]) / bids[0,0] * 100
820
+ if spread < 0.05: points += 1; details.append("TightSpread")
821
+ elif spread > 0.2: points -= 1; details.append("WideSpread")
822
+
823
+ if bid_vol > ask_vol * 1.5: points += 2; details.append("Depth:Bull")
824
+ if bids[0,1] > bids[1,1] and bids[1,1] > bids[2,1]: points += 1; details.append("Slope:Up")
825
+ # Slippage / depth-to-move (normalized; avoids hard-coded thresholds)
826
+ mid = (asks[0, 0] + bids[0, 0]) / 2.0
827
+ target_p = mid * 1.005 # ~0.5% up move
828
+ vol_needed = 0.0
829
+ for p, s in asks:
830
+ if p > target_p:
831
+ break
832
+ vol_needed += float(s)
833
+
834
+ # Normalize by visible depth (top 20)
835
+ visible_ask = float(np.sum(asks[:20, 1])) if len(asks) >= 20 else float(np.sum(asks[:, 1]))
836
+ ratio = (vol_needed / visible_ask) if visible_ask > 0 else 0.0
837
+
838
+ # Higher ratio => more depth needed to move price => thicker book (safer entry)
839
+ if ratio > 0.65:
840
+ points += 1; details.append(f"ThickBook:{ratio:.2f}")
841
+ elif ratio < 0.30:
842
+ points -= 1; details.append(f"ThinBook:{ratio:.2f}")
843
+ else:
844
+ details.append(f"BookOK:{ratio:.2f}")
845
+
846
+ # Best-level dominance (simple slope proxy)
847
+ if bids[0, 1] > asks[0, 1] * 2:
848
+ points += 1; details.append("TopBid>TopAsk*2")
849
+
850
+ top_bid_notional = float(bids[0, 0] * bids[0, 1])
851
+ # Dynamic whale detection vs median level notional (top 20)
852
+ level_notionals = (bids[:20, 0] * bids[:20, 1]).astype(float)
853
+ med_notional = float(np.median(level_notionals)) if len(level_notionals) else 0.0
854
+ if med_notional > 0 and (top_bid_notional / med_notional) >= 8.0:
855
+ points += 1; details.append(f"WhaleBid:{top_bid_notional/med_notional:.1f}x")
856
+
857
+ except Exception as e: details.append("OBErr")
858
+
859
+ norm_score = self._normalize(points, max_possible=15.0)
860
+ if include_details and details_pack is not None:
861
+ details_pack['order_book'] = details
862
+ if verbose: print(f" 📖 [ORDERBOOK] Score: {norm_score:.2f} | {', '.join(details)}")
863
+ return norm_score
864
+
865
+ # ==============================================================================
866
+ # 🔧 Utilities
867
+ # ==============================================================================
868
+ def _valid(self, item, col: Any = None) -> bool:
869
+ """Return True if item has a finite last value (Series) or at least one finite last-row value (DataFrame).
870
+ If col is provided and item is a DataFrame, checks that column's last value.
871
+ """
872
+ if item is None:
873
+ return False
874
+
875
+ # pandas_ta sometimes returns tuples (e.g., ichimoku)
876
+ if isinstance(item, tuple):
877
+ # consider valid if any element is valid
878
+ return any(self._valid(x, col=col) for x in item)
879
+
880
+ try:
881
+ if isinstance(item, pd.Series):
882
+ if item.empty:
883
+ return False
884
+ v = item.iloc[-1]
885
+ return pd.notna(v) and np.isfinite(v)
886
+
887
+ if isinstance(item, pd.DataFrame):
888
+ if item.empty:
889
+ return False
890
+ if col is not None:
891
+ c = self._find_col(item, [col]) or (col if col in item.columns else None)
892
+ if c is None:
893
+ return False
894
+ v = item[c].iloc[-1]
895
+ return pd.notna(v) and np.isfinite(v)
896
+ # any finite in last row
897
+ last = item.iloc[-1]
898
+ if isinstance(last, pd.Series):
899
+ vals = last.values.astype(float, copy=False)
900
+ return np.isfinite(vals).any()
901
+ return False
902
+
903
+ # scalars
904
+ if isinstance(item, (int, float, np.number)):
905
+ return np.isfinite(item)
906
+ return True
907
+
908
+ except Exception:
909
+ return False
910
+
911
+ def _find_col(self, df: pd.DataFrame, contains_any: List[str]) -> Any:
912
+ """Find first column whose name contains any of the provided substrings (case-insensitive)."""
913
+ if df is None or getattr(df, "empty", True):
914
+ return None
915
+ cols = list(df.columns)
916
+ lowered = [str(c).lower() for c in cols]
917
+ needles = [s.lower() for s in contains_any]
918
+ for n in needles:
919
+ for c, lc in zip(cols, lowered):
920
+ if n in lc:
921
+ return c
922
+ return None
923
+
924
+ def _safe_last(self, item, default=np.nan, col: Any = None) -> float:
925
+ """Safely get last finite value from Series/DataFrame (optionally from matched column)."""
926
+ if not self._valid(item, col=col):
927
+ return float(default)
928
+ try:
929
+ if isinstance(item, pd.Series):
930
+ return float(item.iloc[-1])
931
+ if isinstance(item, pd.DataFrame):
932
+ if col is None:
933
+ # pick first finite value in last row
934
+ last = item.iloc[-1]
935
+ for v in last.values:
936
+ if pd.notna(v) and np.isfinite(v):
937
+ return float(v)
938
+ return float(default)
939
+ c = self._find_col(item, [col]) or (col if col in item.columns else None)
940
+ if c is None:
941
+ return float(default)
942
+ return float(item[c].iloc[-1])
943
+ if isinstance(item, (int, float, np.number)):
944
+ return float(item)
945
+ return float(default)
946
+ except Exception:
947
+ return float(default)
948
+
949
+ def _normalize(self, value: float, max_possible: float) -> float:
950
+ if max_possible == 0: return 0.0
951
+ return max(-1.0, min(1.0, value / max_possible))
952
+
953
+ def _prepare_dataframe(self, ohlcv: List) -> pd.DataFrame:
954
+ df = pd.DataFrame(ohlcv, columns=['timestamp', 'open', 'high', 'low', 'close', 'volume'])
955
+ df['timestamp'] = pd.to_datetime(df['timestamp'], unit='ms')
956
+ df.set_index('timestamp', inplace=True)
957
+ cols = ['open', 'high', 'low', 'close', 'volume']
958
+ df[cols] = df[cols].astype(float)
959
+ return df
960
+
961
+ def _get_grade(self, score: float) -> str:
962
+ if score >= 85: return "ULTRA"
963
+ if score >= 70: return "STRONG"
964
+ if score >= 50: return "NORMAL"
965
+ if score >= 35: return "WEAK"
966
+ return "REJECT"
967
+
968
+ def _create_rejection(self, reason: str):
969
+ return {
970
+ "governance_score": 0.0,
971
+ "grade": "REJECT",
972
+ "status": "REJECTED",
973
+ "reason": reason,
974
+ "components": {}
975
+ }