Riy777 commited on
Commit
9b16c7d
ยท
verified ยท
1 Parent(s): 3192b4e

Update ml_engine/processor.py

Browse files
Files changed (1) hide show
  1. ml_engine/processor.py +692 -361
ml_engine/processor.py CHANGED
@@ -1,393 +1,724 @@
1
  # ============================================================
2
- # ๐Ÿง  ml_engine/processor.py (V36.0 - GEM-Architect: The Cybernetic Processor)
3
  # ============================================================
4
 
5
  import asyncio
6
- import traceback
 
 
 
7
  import logging
 
8
  import os
 
 
9
  import sys
10
- import numpy as np
11
- from typing import Dict, Any, List, Optional
12
-
13
- # --- ุงุณุชูŠุฑุงุฏ ุงู„ู…ุญุฑูƒุงุช (ูƒู…ุง ู‡ูŠ) ---
14
- try: from .titan_engine import TitanEngine
15
- except ImportError: TitanEngine = None
16
- try: from .patterns import ChartPatternAnalyzer
17
- except ImportError: ChartPatternAnalyzer = None
18
- try: from .monte_carlo import MonteCarloEngine
19
- except ImportError: MonteCarloEngine = None
20
- try: from .oracle_engine import OracleEngine
21
- except ImportError: OracleEngine = None
22
- try: from .sniper_engine import SniperEngine
23
- except ImportError: SniperEngine = None
24
- try: from .hybrid_guardian import HybridDeepSteward
25
- except ImportError: HybridDeepSteward = None
26
- try: from .guardian_hydra import GuardianHydra
27
- except ImportError: GuardianHydra = None
28
 
29
- # ============================================================
30
- # ๐Ÿ“‚ ู…ุณุงุฑุงุช ุงู„ู†ู…ุงุฐุฌ
31
- # ============================================================
32
- BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
33
- MODELS_L2_DIR = os.path.join(BASE_DIR, "ml_models", "layer2")
34
- MODELS_PATTERN_DIR = os.path.join(BASE_DIR, "ml_models", "xgboost_pattern2")
35
- MODELS_UNIFIED_DIR = os.path.join(BASE_DIR, "ml_models", "Unified_Models_V1")
36
- MODELS_SNIPER_DIR = os.path.join(BASE_DIR, "ml_models", "guard_v2")
37
- MODELS_HYDRA_DIR = os.path.join(BASE_DIR, "ml_models", "guard_v1")
38
- MODEL_V2_PATH = os.path.join(BASE_DIR, "ml_models", "DeepSteward_V2_Production.json")
39
- MODEL_V3_PATH = os.path.join(BASE_DIR, "ml_models", "DeepSteward_V3_Production.json")
40
- MODEL_V3_FEAT = os.path.join(BASE_DIR, "ml_models", "DeepSteward_V3_Features.json")
 
41
 
42
  # ============================================================
43
- # ๐ŸŽ›๏ธ SYSTEM LIMITS & THRESHOLDS (Dynamic DNA Container)
44
  # ============================================================
45
- class SystemLimits:
46
- """
47
- GEM-Architect: The Dynamic Constitution.
48
- ูŠุชู… ุชุญุฏูŠุซ ู‡ุฐู‡ ุงู„ู‚ูŠู… ุขู„ูŠุงู‹ ุจูˆุงุณุทุฉ AdaptiveHub ุจู†ุงุกู‹ ุนู„ู‰ ุญุงู„ุฉ ุงู„ุณูˆู‚ (Bull/Bear/etc).
49
- """
50
-
51
- # --- Layer 1 (Data Manager Control) ---
52
- L1_MIN_AFFINITY_SCORE = 15.0 # ุณูŠุชู… ุงู„ูƒุชุงุจุฉ ุนู„ูŠู‡ุง ู…ู† ุงู„ู€ DNA
53
-
54
- # --- Layer 2 Weights (Dynamic) ---
55
- # ู‡ุฐู‡ ุงู„ุฃูˆุฒุงู† ุชุชุบูŠุฑ ุญุณุจ ุฃุฏุงุก ุงู„ู†ู…ุงุฐุฌ (Tactical Loop) ูˆุญุงู„ุฉ ุงู„ุณูˆู‚
56
- L2_WEIGHT_TITAN = 0.40
57
- L2_WEIGHT_PATTERNS = 0.30
58
- L2_WEIGHT_MC = 0.10
59
- # (Hydra/Sniper ู‚ุฏ ูŠุณุงู‡ู…ูˆู† ููŠ L2 ุฃูˆ L4 ุญุณุจ ุงู„ุชุตู…ูŠู…)
60
-
61
- # ุฅุนุฏุงุฏุงุช ุงู„ุฃู†ู…ุงุท (ุชุชุบูŠุฑ ุญุณุจ ุงู„ุงุณุชุฑุงุชูŠุฌูŠุฉ)
62
- PATTERN_TF_WEIGHTS = {'15m': 0.40, '1h': 0.30, '5m': 0.20, '4h': 0.10, '1d': 0.00}
63
- PATTERN_THRESH_BULLISH = 0.60
64
- PATTERN_THRESH_BEARISH = 0.40
65
-
66
- # --- Layer 3 (Oracle) ---
67
- L3_CONFIDENCE_THRESHOLD = 0.65
68
- L3_WHALE_IMPACT_MAX = 0.10
69
- L3_NEWS_IMPACT_MAX = 0.05
70
- L3_MC_ADVANCED_MAX = 0.10
71
-
72
- # --- Layer 4 (Sniper & Execution) ---
73
- L4_ENTRY_THRESHOLD = 0.40
74
- # ุฃูˆุฒุงู† ุฏุงุฎู„ูŠุฉ ู„ู€ Sniper (ML vs OrderBook)
75
- L4_WEIGHT_ML = 0.60
76
- L4_WEIGHT_OB = 0.40
77
- # ู†ุณุจุฉ ุงู„ุฌุฏุงุฑ ุงู„ู…ุณู…ูˆุญ ุจู‡ุง (ุชุชุบูŠุฑ ุฌุฐุฑูŠุงู‹ ุจูŠู† Bull ูˆ Bear)
78
- L4_OB_WALL_RATIO = 0.40
79
-
80
- # --- Layer 0: Hydra & Guardian Thresholds ---
81
- HYDRA_CRASH_THRESH = 0.60
82
- HYDRA_GIVEBACK_THRESH = 0.70
83
- HYDRA_STAGNATION_THRESH = 0.50
84
-
85
- # Legacy Guard Thresholds
86
- LEGACY_V2_PANIC_THRESH = 0.95
87
- LEGACY_V3_HARD_THRESH = 0.95
88
- LEGACY_V3_SOFT_THRESH = 0.85
89
- LEGACY_V3_ULTRA_THRESH = 0.98
90
-
91
- @classmethod
92
- def to_dict(cls) -> Dict[str, Any]:
93
- return {k: v for k, v in cls.__dict__.items() if not k.startswith('__') and not callable(v)}
94
-
95
- @classmethod
96
- def update_from_dict(cls, config: Dict[str, Any]):
97
- """ุชุญุฏูŠุซ ุงู„ู‚ูŠู… ู…ู† AdaptiveHub"""
98
- if not config: return
99
- for k, v in config.items():
100
- if hasattr(cls, k):
101
- setattr(cls, k, v)
102
- # print(f"๐Ÿ”„ [SystemLimits] Updated. TitanW={cls.L2_WEIGHT_TITAN:.2f}, WallRatio={cls.L4_OB_WALL_RATIO}")
 
 
 
 
 
 
 
 
103
 
104
  # ============================================================
105
- # ๐Ÿง  MLProcessor Class
106
  # ============================================================
107
- class MLProcessor:
108
- def __init__(self, data_manager=None):
109
- self.data_manager = data_manager
110
- self.initialized = False
111
-
112
- self.titan = TitanEngine(model_dir=MODELS_L2_DIR) if TitanEngine else None
113
- self.pattern_engine = ChartPatternAnalyzer(models_dir=MODELS_PATTERN_DIR) if ChartPatternAnalyzer else None
114
- self.mc_analyzer = MonteCarloEngine() if MonteCarloEngine else None
115
- self.oracle = OracleEngine(model_dir=MODELS_UNIFIED_DIR) if OracleEngine else None
116
- self.sniper = SniperEngine(models_dir=MODELS_SNIPER_DIR) if SniperEngine else None
117
 
118
- self.guardian_hydra = None
119
- if GuardianHydra:
120
- self.guardian_hydra = GuardianHydra(model_dir=MODELS_HYDRA_DIR)
121
-
122
- self.guardian_legacy = None
123
- if HybridDeepSteward:
124
- self.guardian_legacy = HybridDeepSteward(
125
- v2_model_path=MODEL_V2_PATH,
126
- v3_model_path=MODEL_V3_PATH,
127
- v3_features_map_path=MODEL_V3_FEAT
128
- )
129
-
130
- print(f"๐Ÿง  [MLProcessor V36.0] Cybernetic Control Active.")
131
-
132
- async def initialize(self):
133
- if self.initialized: return
134
- print("โš™๏ธ [Processor] Initializing Neural Grid...")
135
- try:
136
- tasks = []
137
- if self.titan: tasks.append(self.titan.initialize())
138
-
139
- if self.pattern_engine:
140
- # ุงู„ุชูƒูˆูŠู† ุงู„ุฃูˆู„ูŠ (ุณูŠุชู… ุชุญุฏูŠุซู‡ ู„ุงุญู‚ุงู‹ ุฏูŠู†ุงู…ูŠูƒูŠุงู‹)
141
- self.pattern_engine.configure_thresholds(
142
- weights=SystemLimits.PATTERN_TF_WEIGHTS,
143
- bull_thresh=SystemLimits.PATTERN_THRESH_BULLISH,
144
- bear_thresh=SystemLimits.PATTERN_THRESH_BEARISH
145
- )
146
- tasks.append(self.pattern_engine.initialize())
147
-
148
- if self.oracle:
149
- if hasattr(self.oracle, 'set_threshold'):
150
- self.oracle.set_threshold(SystemLimits.L3_CONFIDENCE_THRESHOLD)
151
- tasks.append(self.oracle.initialize())
152
 
153
- if self.sniper:
154
- # ุงู„ุชูƒูˆูŠู† ุงู„ุฃูˆู„ูŠ
155
- if hasattr(self.sniper, 'configure_settings'):
156
- self.sniper.configure_settings(
157
- threshold=SystemLimits.L4_ENTRY_THRESHOLD,
158
- wall_ratio=SystemLimits.L4_OB_WALL_RATIO,
159
- w_ml=SystemLimits.L4_WEIGHT_ML,
160
- w_ob=SystemLimits.L4_WEIGHT_OB
161
- )
162
- tasks.append(self.sniper.initialize())
163
 
164
- if tasks: await asyncio.gather(*tasks)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165
 
166
- if self.guardian_hydra:
167
- self.guardian_hydra.initialize()
168
- print(" ๐Ÿ›ก๏ธ [Guard 1] Hydra X-Ray: Active")
169
-
170
- if self.guardian_legacy:
171
- if asyncio.iscoroutinefunction(self.guardian_legacy.initialize):
172
- await self.guardian_legacy.initialize()
173
- else:
174
- self.guardian_legacy.initialize()
175
-
176
- # ุชุทุจูŠู‚ ุงู„ุนุชุจุงุช ุงู„ู…ุจุฏุฆูŠุฉ
177
- self.guardian_legacy.configure_thresholds(
178
- v2_panic=SystemLimits.LEGACY_V2_PANIC_THRESH,
179
- v3_hard=SystemLimits.LEGACY_V3_HARD_THRESH,
180
- v3_soft=SystemLimits.LEGACY_V3_SOFT_THRESH,
181
- v3_ultra=SystemLimits.LEGACY_V3_ULTRA_THRESH
182
- )
183
- print(f" ๐Ÿ›ก๏ธ [Guard 2] Legacy Steward: Active")
184
-
185
- self.initialized = True
186
- print("โœ… [Processor] All Systems Operational.")
187
-
188
- except Exception as e:
189
- print(f"โŒ [Processor FATAL] Init failed: {e}")
190
- traceback.print_exc()
191
-
192
- async def process_compound_signal(self, raw_data: Dict[str, Any]) -> Optional[Dict[str, Any]]:
193
- """
194
- L2 Processing:
195
- ู‡ู†ุง ูŠุชู… ุฏู…ุฌ ุงู„ุฏุฑุฌุงุช ุจู†ุงุกู‹ ุนู„ู‰ ุงู„ุฃูˆุฒุงู† ุงู„ุฏูŠู†ุงู…ูŠูƒูŠุฉ ุงู„ุญุงู„ูŠุฉ (SystemLimits).
196
- """
197
- if not self.initialized: await self.initialize()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198
 
199
- symbol = raw_data.get('symbol')
200
- ohlcv_data = raw_data.get('ohlcv')
201
- current_price = raw_data.get('current_price', 0.0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202
 
203
- if not symbol or not ohlcv_data: return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204
 
205
- try:
206
- # 1. Titan Engine
207
- score_titan = 0.5
208
- titan_res = {}
209
- if self.titan:
210
- titan_res = await asyncio.to_thread(self.titan.predict, ohlcv_data)
211
- score_titan = titan_res.get('score', 0.5)
212
-
213
- # 2. Pattern Engine (Inject Dynamic Config First)
214
- score_patterns = 0.5
215
- pattern_res = {}
216
- pattern_name = "Neutral"
217
- if self.pattern_engine:
218
- # ุชุญุฏูŠุซ ุงู„ุชูƒูˆูŠู† ู‚ุจู„ ุงู„ุชุญู„ูŠู„ ู„ุถู…ุงู† ุงุณุชุฎุฏุงู… ุฃุญุฏุซ ุฃูˆุฒุงู† ุงู„ุงุณุชุฑุงุชูŠุฌูŠุฉ
219
- self.pattern_engine.configure_thresholds(
220
- weights=SystemLimits.PATTERN_TF_WEIGHTS,
221
- bull_thresh=SystemLimits.PATTERN_THRESH_BULLISH,
222
- bear_thresh=SystemLimits.PATTERN_THRESH_BEARISH
223
- )
224
- pattern_res = await self.pattern_engine.detect_chart_patterns(ohlcv_data)
225
- score_patterns = pattern_res.get('pattern_confidence', 0.5)
226
- pattern_name = pattern_res.get('pattern_detected', 'Neutral')
227
-
228
- # 3. Monte Carlo (Light)
229
- mc_score = 0.5
230
- if self.mc_analyzer and '1h' in ohlcv_data:
231
- closes = [c[4] for c in ohlcv_data['1h']]
232
- raw_mc = self.mc_analyzer.run_light_check(closes)
233
- mc_score = 0.5 + (raw_mc * 5.0)
234
- mc_score = max(0.0, min(1.0, mc_score))
235
-
236
- # 4. Hybrid Calculation (Using Dynamic Weights from SystemLimits)
237
- # ู‡ุฐู‡ ุงู„ุฃูˆุฒุงู† ูŠุชู… ุชุญุฏูŠุซู‡ุง ุจูˆุงุณุทุฉ AdaptiveHub
238
- w_titan = SystemLimits.L2_WEIGHT_TITAN
239
- w_patt = SystemLimits.L2_WEIGHT_PATTERNS
240
- w_mc = SystemLimits.L2_WEIGHT_MC
241
-
242
- # ุชุทุจูŠุน ุงู„ุฃูˆุฒุงู† (ู„ุถู…ุงู† ุฃู† ู…ุฌู…ูˆุนู‡ุง 1.0 ุชู‚ุฑูŠุจุงู‹)
243
- total_w = w_titan + w_patt + w_mc
244
- if total_w <= 0: total_w = 1.0
245
-
246
- hybrid_score = ((score_titan * w_titan) + (score_patterns * w_patt) + (mc_score * w_mc)) / total_w
247
-
248
- return {
249
- 'symbol': symbol,
250
- 'current_price': current_price,
251
- 'enhanced_final_score': hybrid_score,
252
- 'titan_score': score_titan,
253
- 'patterns_score': score_patterns,
254
- 'mc_score': mc_score,
255
- 'components': {
256
- 'titan_score': score_titan,
257
- 'patterns_score': score_patterns,
258
- 'mc_score': mc_score
259
- },
260
- 'pattern_name': pattern_name,
261
- 'ohlcv': ohlcv_data,
262
- 'titan_details': titan_res,
263
- 'pattern_details': pattern_res.get('details', {})
264
- }
265
- except Exception as e:
266
- print(f"โŒ [Processor] Error processing {symbol}: {e}")
267
- return None
268
-
269
- async def consult_oracle(self, symbol_data: Dict[str, Any]) -> Dict[str, Any]:
270
- """
271
- L3 Processing:
272
- Oracle ูŠุณุชุฎุฏู… ุงู„ุนุชุจุฉ ุงู„ุญุงู„ูŠุฉ ู…ู† SystemLimits.
273
- """
274
- if not self.initialized: await self.initialize()
275
 
276
- if self.oracle:
277
- # ุชุญุฏูŠุซ ุงู„ุนุชุจุฉ ุฏูŠู†ุงู…ูŠูƒูŠุงู‹
278
- if hasattr(self.oracle, 'set_threshold'):
279
- self.oracle.set_threshold(SystemLimits.L3_CONFIDENCE_THRESHOLD)
280
-
281
- decision = await self.oracle.predict(symbol_data)
282
- conf = decision.get('confidence', 0.0)
283
-
284
- # ุทุจู‚ุฉ ุฃู…ุงู† ุฅุถุงููŠุฉ (Redundant Safety)
285
- if decision.get('action') in ['WATCH', 'BUY'] and conf < SystemLimits.L3_CONFIDENCE_THRESHOLD:
286
- decision['action'] = 'WAIT'
287
- decision['reason'] = f"Processor Veto: Conf {conf:.2f} < Limit {SystemLimits.L3_CONFIDENCE_THRESHOLD}"
 
288
 
289
- return decision
290
- return {'action': 'WAIT', 'reason': 'Oracle Engine Missing'}
291
-
292
- async def check_sniper_entry(self, ohlcv_1m_data: List, order_book_data: Dict[str, Any]) -> Dict[str, Any]:
293
- """
294
- L4 Processing:
295
- Sniper ูŠุณุชู„ู… ุฅุนุฏุงุฏุงุช ุฏูุชุฑ ุงู„ุทู„ุจุงุช (DNA) ู‚ุจู„ ุงุชุฎุงุฐ ุงู„ู‚ุฑุงุฑ.
296
- """
297
- if not self.initialized: await self.initialize()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
298
 
299
- if self.sniper:
300
- # โœ… ุงู„ุญู‚ู† ุงู„ุฏูŠู†ุงู…ูŠูƒูŠ ู„ุฅุนุฏุงุฏุงุช Sniper ู‚ุจู„ ุงู„ุชู†ููŠุฐ
301
- if hasattr(self.sniper, 'configure_settings'):
302
- self.sniper.configure_settings(
303
- threshold=SystemLimits.L4_ENTRY_THRESHOLD,
304
- wall_ratio=SystemLimits.L4_OB_WALL_RATIO,
305
- w_ml=SystemLimits.L4_WEIGHT_ML,
306
- w_ob=SystemLimits.L4_WEIGHT_OB
307
- )
308
-
309
- return await self.sniper.check_entry_signal_async(ohlcv_1m_data, order_book_data)
310
-
311
- return {'signal': 'WAIT', 'reason': 'Sniper Engine Missing'}
312
-
313
- def consult_dual_guardians(self, symbol, ohlcv_1m, ohlcv_5m, ohlcv_15m, trade_context, order_book_snapshot=None):
314
- """
315
- L0 Guardians:
316
- ุงู„ุญุฑุงุณ ูŠุณุชุฎุฏู…ูˆู† ุงู„ุนุชุจุงุช ุงู„ุฏูŠู†ุงู…ูŠูƒูŠุฉ (Panic Thresholds) ุงู„ุชูŠ ู‚ุฏ ุชุฎุชู„ู ุจูŠู† Bull ูˆ Bear.
317
- """
318
- response = {'action': 'HOLD', 'detailed_log': '', 'probs': {}}
319
 
320
- # 1. Hydra
321
- hydra_result = {'action': 'HOLD', 'reason': 'Disabled', 'probs': {}}
322
- if self.guardian_hydra and self.guardian_hydra.initialized:
323
- hydra_result = self.guardian_hydra.analyze_position(symbol, ohlcv_1m, ohlcv_5m, ohlcv_15m, trade_context)
324
- h_probs = hydra_result.get('probs', {})
325
- p_crash = h_probs.get('crash', 0.0)
326
- p_giveback = h_probs.get('giveback', 0.0)
327
-
328
- # ุงุณุชุฎุฏุงู… ุงู„ุนุชุจุงุช ุงู„ุญูŠุฉ
329
- if hydra_result['action'] == 'HOLD':
330
- if p_crash >= SystemLimits.HYDRA_CRASH_THRESH:
331
- hydra_result['action'] = 'EXIT_HARD'
332
- hydra_result['reason'] = f"Hydra Crash Risk {p_crash:.2f}"
333
- elif p_giveback >= SystemLimits.HYDRA_GIVEBACK_THRESH:
334
- hydra_result['action'] = 'EXIT_SOFT'
335
- hydra_result['reason'] = f"Hydra Giveback Risk {p_giveback:.2f}"
336
 
337
- # 2. Legacy (Volume-Aware Veto)
338
- legacy_result = {'action': 'HOLD', 'reason': 'Disabled', 'scores': {}}
339
- if self.guardian_legacy and self.guardian_legacy.initialized:
340
- # ุชุญุฏูŠุซ ุงู„ุนุชุจุงุช ู‚ุจู„ ุงู„ุชุญู„ูŠู„
341
- self.guardian_legacy.configure_thresholds(
342
- v2_panic=SystemLimits.LEGACY_V2_PANIC_THRESH,
343
- v3_hard=SystemLimits.LEGACY_V3_HARD_THRESH,
344
- v3_soft=SystemLimits.LEGACY_V3_SOFT_THRESH,
345
- v3_ultra=SystemLimits.LEGACY_V3_ULTRA_THRESH
346
- )
347
 
348
- entry_price = float(trade_context.get('entry_price', 0.0))
349
- vol_30m = trade_context.get('volume_30m_usd', 0.0)
 
350
 
351
- legacy_result = self.guardian_legacy.analyze_position(
352
- ohlcv_1m, ohlcv_5m, ohlcv_15m, entry_price,
353
- order_book=order_book_snapshot,
354
- volume_30m_usd=vol_30m
355
- )
356
-
357
- # 3. Final Arbitration & Display
358
- h_probs = hydra_result.get('probs', {})
359
- l_scores = legacy_result.get('scores', {})
360
-
361
- h_c = h_probs.get('crash', 0.0)
362
- h_g = h_probs.get('giveback', 0.0)
363
- h_s = h_probs.get('stagnation', 0.0)
364
- l_v2 = l_scores.get('v2', 0.0)
365
- l_v3 = l_scores.get('v3', 0.0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
366
 
367
- stamp_str = f"๐Ÿฒ[C:{h_c:.0%}|G:{h_g:.0%}|S:{h_s:.0%}] ๐Ÿ•ธ๏ธ[V2:{l_v2:.0%}|V3:{l_v3:.0%}]"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
368
 
369
- final_action = 'HOLD'
370
- final_reason = f"Safe. {stamp_str}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
371
 
372
- if hydra_result['action'] in ['EXIT_HARD', 'EXIT_SOFT', 'TIGHTEN_SL', 'TRAIL_SL']:
373
- final_action = hydra_result['action']
374
- final_reason = f"๐Ÿฒ HYDRA: {hydra_result['reason']} | {stamp_str}"
375
- elif legacy_result['action'] in ['EXIT_HARD', 'EXIT_SOFT']:
376
- final_action = legacy_result['action']
377
- final_reason = f"๐Ÿ•ธ๏ธ LEGACY: {legacy_result['reason']} | {stamp_str}"
378
-
379
- return {
380
- 'action': final_action,
381
- 'reason': final_reason,
382
- 'detailed_log': f"{final_action} | {stamp_str}",
383
- 'probs': h_probs,
384
- 'scores': l_scores
385
- }
 
 
386
 
387
- async def run_advanced_monte_carlo(self, symbol, timeframe='1h'):
388
- if self.mc_analyzer and self.data_manager:
389
- try:
390
- ohlcv = await self.data_manager.get_latest_ohlcv(symbol, timeframe, limit=300)
391
- if ohlcv: return self.mc_analyzer.run_advanced_simulation([c[4] for c in ohlcv])
392
- except Exception: pass
393
- return 0.0
 
1
  # ============================================================
2
+ # ๐Ÿงช backtest_engine.py (V145.0 - GEM-Architect: Patterns Integrated)
3
  # ============================================================
4
 
5
  import asyncio
6
+ import pandas as pd
7
+ import numpy as np
8
+ import pandas_ta as ta
9
+ import time
10
  import logging
11
+ import itertools
12
  import os
13
+ import glob
14
+ import gc
15
  import sys
16
+ import traceback
17
+ from numpy.lib.stride_tricks import sliding_window_view
18
+ from datetime import datetime, timezone
19
+ from typing import Dict, Any, List
20
+ from scipy.special import expit
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
+ try:
23
+ from ml_engine.processor import MLProcessor, SystemLimits
24
+ from ml_engine.data_manager import DataManager
25
+ from learning_hub.adaptive_hub import StrategyDNA, AdaptiveHub
26
+ from r2 import R2Service
27
+ import ccxt.async_support as ccxt
28
+ import xgboost as xgb
29
+ import lightgbm as lgb
30
+ except ImportError:
31
+ pass
32
+
33
+ logging.getLogger('ml_engine').setLevel(logging.WARNING)
34
+ CACHE_DIR = "backtest_real_scores"
35
 
36
  # ============================================================
37
+ # ๐Ÿ›ก๏ธ GLOBAL HELPERS
38
  # ============================================================
39
+ def sanitize_features(df):
40
+ if df is None or df.empty: return df
41
+ return df.replace([np.inf, -np.inf], np.nan).ffill().bfill().fillna(0.0)
42
+
43
+ def _z_roll(x, w=500):
44
+ if not isinstance(x, pd.Series): x = pd.Series(x)
45
+ r = x.rolling(w).mean()
46
+ s = x.rolling(w).std().replace(0, np.nan)
47
+ return ((x - r) / s).fillna(0)
48
+
49
+ def _revive_score_distribution(scores):
50
+ scores = np.array(scores, dtype=np.float32).flatten()
51
+ if len(scores) < 10: return scores
52
+ s_min, s_max = np.min(scores), np.max(scores)
53
+ std = np.std(scores)
54
+ if std < 0.001: return scores
55
+ if s_max < 0.8 or s_min > 0.2:
56
+ return (scores - s_min) / (s_max - s_min + 1e-9)
57
+ return scores
58
+
59
+ def safe_ta(ind_output, index, fill_method='smart'):
60
+ if ind_output is None:
61
+ return pd.Series(0.0, index=index, dtype='float64')
62
+ if not isinstance(ind_output, pd.Series):
63
+ s = pd.Series(ind_output, index=index)
64
+ else:
65
+ s = ind_output
66
+ s = s.bfill().ffill()
67
+ return s.fillna(0.0).astype('float64')
68
+
69
+ def _zv(x):
70
+ with np.errstate(divide='ignore', invalid='ignore'):
71
+ x = np.asarray(x, dtype="float32")
72
+ m = np.nanmean(x, axis=0)
73
+ s = np.nanstd(x, axis=0) + 1e-9
74
+ return np.nan_to_num((x - m) / s, nan=0.0)
75
+
76
+ def _transform_window_for_pattern(df_window):
77
+ try:
78
+ c = df_window['close'].values.astype('float32')
79
+ o = df_window['open'].values.astype('float32')
80
+ h = df_window['high'].values.astype('float32')
81
+ l = df_window['low'].values.astype('float32')
82
+ v = df_window['volume'].values.astype('float32')
83
+ base = np.stack([o, h, l, c, v], axis=1)
84
+ base_z = _zv(base)
85
+ lr = np.zeros_like(c); lr[1:] = np.diff(np.log1p(c))
86
+ rng = (h - l) / (c + 1e-9)
87
+ extra = np.stack([lr, rng], axis=1)
88
+ extra_z = _zv(extra)
89
+ def _ema(arr, n): return pd.Series(arr).ewm(span=n, adjust=False).mean().values
90
+ ema9 = _ema(c, 9); ema21 = _ema(c, 21); ema50 = _ema(c, 50); ema200 = _ema(c, 200)
91
+ slope21 = np.gradient(ema21); slope50 = np.gradient(ema50)
92
+ delta = np.diff(c, prepend=c[0])
93
+ up, down = delta.copy(), delta.copy()
94
+ up[up < 0] = 0; down[down > 0] = 0
95
+ roll_up = pd.Series(up).ewm(alpha=1/14, adjust=False).mean().values
96
+ roll_down = pd.Series(down).abs().ewm(alpha=1/14, adjust=False).mean().values
97
+ rs = roll_up / (roll_down + 1e-9)
98
+ rsi = 100.0 - (100.0 / (1.0 + rs))
99
+ indicators = np.stack([ema9, ema21, ema50, ema200, slope21, slope50, rsi], axis=1)
100
+ X_seq = np.concatenate([base_z, extra_z, _zv(indicators)], axis=1)
101
+ X_flat = X_seq.flatten()
102
+ X_stat = np.array([0.5, 0.0, 0.5], dtype="float32")
103
+ return np.concatenate([X_flat, X_stat])
104
+ except: return None
105
 
106
  # ============================================================
107
+ # ๐Ÿงช THE BACKTESTER CLASS
108
  # ============================================================
109
+ class HeavyDutyBacktester:
110
+ def __init__(self, data_manager, processor):
111
+ self.dm = data_manager
112
+ self.proc = processor
113
+ self.GRID_DENSITY = 5
114
+ self.INITIAL_CAPITAL = 10.0
115
+ self.TRADING_FEES = 0.001
116
+ self.MAX_SLOTS = 4
 
 
117
 
118
+ # ==============================================================================
119
+ # ๐ŸŽ›๏ธ CONTROL PANEL: FULL GRANULARITY (Including Patterns)
120
+ # ==============================================================================
121
+ self.GRID_RANGES = {
122
+ # --- Entry Models ---
123
+ 'TITAN': [0.10, 0.50],
124
+ 'ORACLE': np.linspace(0.4, 0.7, 3),
125
+ 'SNIPER': np.linspace(0.1, 0.7, 3),
126
+ 'PATTERN': [0.10, 0.50], # โœ… ADDED: Pattern Threshold
127
+ 'L1_SCORE': [10.0],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
 
129
+ # --- Hydra Heads (Exit) ---
130
+ 'HYDRA_CRASH': [0.50, 0.80],
131
+ 'HYDRA_GIVEBACK': [0.20, 0.80],
 
 
 
 
 
 
 
132
 
133
+ # --- Legacy Heads (Exit) ---
134
+ 'LEGACY_V2': [0.50, 0.95],
135
+ 'LEGACY_V3': [0.50, 0.95]
136
+ }
137
+ # ==============================================================================
138
+
139
+ self.TARGET_COINS = [
140
+ 'SOL/USDT', 'XRP/USDT', 'DOGE/USDT', 'ADA/USDT', 'AVAX/USDT', 'LINK/USDT',
141
+ 'TON/USDT', 'INJ/USDT', 'APT/USDT', 'OP/USDT', 'ARB/USDT', 'SUI/USDT',
142
+ 'SEI/USDT', 'TIA/USDT', 'MATIC/USDT', 'NEAR/USDT', 'RUNE/USDT', 'PYTH/USDT',
143
+ 'WIF/USDT', 'PEPE/USDT', 'SHIB/USDT', 'TRX/USDT', 'DOT/USDT', 'UNI/USDT',
144
+ 'ONDO/USDT', 'ENA/USDT', 'HBAR/USDT', 'XLM/USDT', 'TAO/USDT', 'ZK/USDT',
145
+ 'ZRO/USDT', 'KCS/USDT', 'ICP/USDT', 'SAND/USDT', 'AXS/USDT', 'APE/USDT',
146
+ 'GMT/USDT', 'CHZ/USDT', 'CFX/USDT', 'LDO/USDT', 'FET/USDT', 'JTO/USDT',
147
+ 'STRK/USDT', 'BLUR/USDT', 'ALT/USDT', 'JUP/USDT', 'PENDLE/USDT', 'ETHFI/USDT',
148
+ 'MEME/USDT', 'ATOM/USDT'
149
+ ]
150
+
151
+ self.force_start_date = None
152
+ self.force_end_date = None
153
+
154
+ if not os.path.exists(CACHE_DIR): os.makedirs(CACHE_DIR)
155
+ print(f"๐Ÿงช [Backtest V145.0] Full Stack (Titan + Patterns + Oracle + Sniper + Hydra + Legacy).")
156
 
157
+ def set_date_range(self, start_str, end_str):
158
+ self.force_start_date = start_str
159
+ self.force_end_date = end_str
160
+
161
+ async def _fetch_all_data_fast(self, sym, start_ms, end_ms):
162
+ print(f" โšก [Network] Downloading {sym}...", flush=True)
163
+ limit = 1000
164
+ tasks = []
165
+ current = start_ms
166
+ duration_per_batch = limit * 60 * 1000
167
+ while current < end_ms:
168
+ tasks.append(current)
169
+ current += duration_per_batch
170
+ all_candles = []
171
+ sem = asyncio.Semaphore(20)
172
+
173
+ async def _fetch_batch(timestamp):
174
+ async with sem:
175
+ for _ in range(3):
176
+ try:
177
+ return await self.dm.exchange.fetch_ohlcv(sym, '1m', since=timestamp, limit=limit)
178
+ except: await asyncio.sleep(0.5)
179
+ return []
180
+
181
+ chunk_size = 50
182
+ for i in range(0, len(tasks), chunk_size):
183
+ chunk_tasks = tasks[i:i + chunk_size]
184
+ futures = [_fetch_batch(ts) for ts in chunk_tasks]
185
+ results = await asyncio.gather(*futures)
186
+ for res in results:
187
+ if res: all_candles.extend(res)
188
+
189
+ if not all_candles: return None
190
+ df = pd.DataFrame(all_candles, columns=['timestamp', 'o', 'h', 'l', 'c', 'v'])
191
+ df.drop_duplicates('timestamp', inplace=True)
192
+ df = df[(df['timestamp'] >= start_ms) & (df['timestamp'] <= end_ms)]
193
+ df.sort_values('timestamp', inplace=True)
194
+ print(f" โœ… Downloaded {len(df)} candles.", flush=True)
195
+ return df.values.tolist()
196
+
197
+ # ==============================================================
198
+ # ๐ŸŽ๏ธ VECTORIZED INDICATORS
199
+ # ==============================================================
200
+ def _calculate_indicators_vectorized(self, df, timeframe='1m'):
201
+ cols = ['close', 'high', 'low', 'volume', 'open']
202
+ for c in cols: df[c] = df[c].astype(np.float64)
203
+ idx = df.index
204
 
205
+ df['RSI'] = safe_ta(ta.rsi(df['close'], length=14), idx, 50)
206
+ macd = ta.macd(df['close'])
207
+ if macd is not None:
208
+ df['MACD'] = safe_ta(macd.iloc[:, 0], idx, 0)
209
+ df['MACD_h'] = safe_ta(macd.iloc[:, 1], idx, 0)
210
+ else: df['MACD'] = 0.0; df['MACD_h'] = 0.0
211
+ df['CCI'] = safe_ta(ta.cci(df['high'], df['low'], df['close'], length=20), idx, 0)
212
+ adx = ta.adx(df['high'], df['low'], df['close'], length=14)
213
+ if adx is not None: df['ADX'] = safe_ta(adx.iloc[:, 0], idx, 0)
214
+ else: df['ADX'] = 0.0
215
+ if timeframe == '1d': df['Trend_Strong'] = np.where(df['ADX'] > 25, 1.0, 0.0)
216
+ for p in [9, 21, 50, 200]:
217
+ ema = safe_ta(ta.ema(df['close'], length=p), idx, 0)
218
+ df[f'EMA_{p}_dist'] = ((df['close'] / ema.replace(0, np.nan)) - 1).fillna(0)
219
+ df[f'ema{p}'] = ema
220
+ df['ema20'] = safe_ta(ta.ema(df['close'], length=20), idx, df['close'])
221
+ bb = ta.bbands(df['close'], length=20, std=2.0)
222
+ if bb is not None:
223
+ w = ((bb.iloc[:, 2] - bb.iloc[:, 0]) / bb.iloc[:, 1].replace(0, np.nan)).fillna(0)
224
+ p = ((df['close'] - bb.iloc[:, 0]) / (bb.iloc[:, 2] - bb.iloc[:, 0]).replace(0, np.nan)).fillna(0)
225
+ df['BB_w'] = w; df['BB_p'] = p; df['bb_width'] = w
226
+ else: df['BB_w'] = 0; df['BB_p'] = 0; df['bb_width'] = 0
227
+ df['MFI'] = safe_ta(ta.mfi(df['high'], df['low'], df['close'], df['volume'], length=14), idx, 50)
228
+ vwap = ta.vwap(df['high'], df['low'], df['close'], df['volume'])
229
+ if vwap is not None:
230
+ df['VWAP_dist'] = ((df['close'] / vwap.replace(0, np.nan)) - 1).fillna(0)
231
+ df['vwap'] = vwap
232
+ else: df['VWAP_dist'] = 0.0; df['vwap'] = df['close']
233
+ df['atr'] = safe_ta(ta.atr(df['high'], df['low'], df['close'], length=14), idx, 0)
234
+ df['atr_pct'] = (df['atr'] / df['close'].replace(0, np.nan)).fillna(0)
235
+ df['ATR_pct'] = df['atr_pct']
236
+
237
+ if timeframe == '1m':
238
+ df['return_1m'] = df['close'].pct_change().fillna(0)
239
+ df['return_3m'] = df['close'].pct_change(3).fillna(0)
240
+ df['return_5m'] = df['close'].pct_change(5).fillna(0)
241
+ df['return_15m'] = df['close'].pct_change(15).fillna(0)
242
+ df['rsi_14'] = df['RSI']
243
+ e9 = df['ema9'].replace(0, np.nan)
244
+ df['ema_9_slope'] = ((df['ema9'] - df['ema9'].shift(1)) / e9.shift(1)).fillna(0)
245
+ df['ema_21_dist'] = df['EMA_21_dist']
246
+ atr_100 = safe_ta(ta.atr(df['high'], df['low'], df['close'], length=100), idx, 0)
247
+ df['atr_z'] = _z_roll(atr_100)
248
+ df['vol_zscore_50'] = _z_roll(df['volume'], 50)
249
+ rng = (df['high'] - df['low']).replace(0, 1e-9)
250
+ df['candle_range'] = _z_roll(rng, 500)
251
+ df['close_pos_in_range'] = ((df['close'] - df['low']) / rng).fillna(0.5)
252
+ df['dollar_vol'] = df['close'] * df['volume']
253
+ amihud_raw = (df['return_1m'].abs() / df['dollar_vol'].replace(0, np.nan)).fillna(0)
254
+ df['amihud'] = _z_roll(amihud_raw)
255
+ dp = df['close'].diff()
256
+ roll_cov = dp.rolling(64).cov(dp.shift(1))
257
+ roll_spread_raw = (2 * np.sqrt(np.maximum(0, -roll_cov))).fillna(0)
258
+ df['roll_spread'] = _z_roll(roll_spread_raw)
259
+ sign = np.sign(df['close'].diff()).fillna(0)
260
+ signed_vol = sign * df['volume']
261
+ ofi_raw = signed_vol.rolling(30).sum().fillna(0)
262
+ df['ofi'] = _z_roll(ofi_raw)
263
+ buy_vol = (sign > 0) * df['volume']
264
+ sell_vol = (sign < 0) * df['volume']
265
+ imb = (buy_vol.rolling(60).sum() - sell_vol.rolling(60).sum()).abs()
266
+ tot = df['volume'].rolling(60).sum().replace(0, np.nan)
267
+ df['vpin'] = (imb / tot).fillna(0)
268
+ vwap_win = 20
269
+ v_short = (df['dollar_vol'].rolling(vwap_win).sum() / df['volume'].rolling(vwap_win).sum().replace(0, np.nan)).fillna(df['close'])
270
+ df['vwap_dev'] = _z_roll(df['close'] - v_short)
271
+ rv_gk = ((np.log(df['high'] / df['low'])**2) / 2) - ((2 * np.log(2) - 1) * (np.log(df['close'] / df['open'])**2))
272
+ df['rv_gk'] = _z_roll(rv_gk)
273
+ df['L_score'] = (df['vol_zscore_50'] - df['amihud'] - df['roll_spread'] - df['rv_gk'].abs() - df['vwap_dev'].abs() + df['ofi']).fillna(0)
274
+
275
+ df['slope'] = safe_ta(ta.slope(df['close'], length=7), idx, 0)
276
+ vol_mean = df['volume'].rolling(20).mean()
277
+ vol_std = df['volume'].rolling(20).std().replace(0, np.nan)
278
+ df['vol_z'] = ((df['volume'] - vol_mean) / vol_std).fillna(0)
279
+ df['rel_vol'] = df['volume'] / (df['volume'].rolling(50).mean() + 1e-9)
280
+ df['log_ret'] = np.log(df['close'] / df['close'].shift(1).replace(0, np.nan)).fillna(0)
281
+ roll_max = df['high'].rolling(50).max(); roll_min = df['low'].rolling(50).min()
282
+ diff = (roll_max - roll_min).replace(0, 1e-9)
283
+ df['fib_pos'] = ((df['close'] - roll_min) / diff).fillna(0.5)
284
+ e20_s = df['ema20'].shift(5).replace(0, np.nan)
285
+ df['trend_slope'] = ((df['ema20'] - df['ema20'].shift(5)) / e20_s).fillna(0)
286
+ df['volatility'] = (df['atr'] / df['close'].replace(0, np.nan)).fillna(0)
287
+ fib618 = roll_max - (diff * 0.382)
288
+ df['dist_fib618'] = ((df['close'] - fib618) / df['close'].replace(0, np.nan)).fillna(0)
289
+ df['dist_ema50'] = ((df['close'] - df['ema50']) / df['ema50'].replace(0, np.nan)).fillna(0)
290
+ e200 = safe_ta(ta.ema(df['close'], length=200), idx, df['close'])
291
+ df['ema200'] = e200
292
+ df['dist_ema200'] = ((df['close'] - e200) / e200.replace(0, np.nan)).fillna(0)
293
+ if timeframe == '1m':
294
+ for lag in [1, 2, 3, 5, 10, 20]:
295
+ df[f'log_ret_lag_{lag}'] = df['log_ret'].shift(lag).fillna(0)
296
+ df[f'rsi_lag_{lag}'] = (df['RSI'].shift(lag) / 100.0).fillna(0.5)
297
+ df[f'fib_pos_lag_{lag}'] = df['fib_pos'].shift(lag).fillna(0.5)
298
+ df[f'volatility_lag_{lag}'] = df['volatility'].shift(lag).fillna(0)
299
+ df.fillna(0, inplace=True)
300
+ return df
301
+
302
+ # ==============================================================
303
+ # ๐Ÿง  CPU PROCESSING
304
+ # ==============================================================
305
+ async def _process_data_in_memory(self, sym, candles, start_ms, end_ms):
306
+ safe_sym = sym.replace('/', '_')
307
+ period_suffix = f"{start_ms}_{end_ms}"
308
+ scores_file = f"{CACHE_DIR}/{safe_sym}_{period_suffix}_scores.pkl"
309
+ if os.path.exists(scores_file):
310
+ print(f" ๐Ÿ“‚ [{sym}] Data Exists -> Skipping.")
311
+ return
312
+
313
+ print(f" โš™๏ธ [CPU] Analyzing {sym}...", flush=True)
314
+ t0 = time.time()
315
+
316
+ df_1m = pd.DataFrame(candles, columns=['timestamp', 'open', 'high', 'low', 'close', 'volume'])
317
+ df_1m['datetime'] = pd.to_datetime(df_1m['timestamp'], unit='ms')
318
+ df_1m.set_index('datetime', inplace=True)
319
+ df_1m = df_1m.sort_index()
320
+
321
+ frames = {}
322
+ frames['1m'] = self._calculate_indicators_vectorized(df_1m.copy(), timeframe='1m')
323
+ frames['1m']['timestamp'] = frames['1m'].index.floor('1min').astype(np.int64) // 10**6
324
+ fast_1m = {col: frames['1m'][col].values for col in frames['1m'].columns}
325
 
326
+ agg_dict = {'open': 'first', 'high': 'max', 'low': 'min', 'close': 'last', 'volume': 'sum'}
327
+ numpy_htf = {}
328
+ for tf_str, tf_code in [('5m', '5T'), ('15m', '15T'), ('1h', '1h'), ('4h', '4h'), ('1d', '1D')]:
329
+ resampled = df_1m.resample(tf_code).agg(agg_dict).dropna()
330
+ resampled = self._calculate_indicators_vectorized(resampled, timeframe=tf_str)
331
+ resampled['timestamp'] = resampled.index.astype(np.int64) // 10**6
332
+ frames[tf_str] = resampled
333
+ numpy_htf[tf_str] = {col: resampled[col].values for col in resampled.columns}
334
+
335
+ arr_ts_1m = fast_1m['timestamp']
336
+ map_5m = np.clip(np.searchsorted(numpy_htf['5m']['timestamp'], arr_ts_1m), 0, len(numpy_htf['5m']['timestamp']) - 1)
337
+ map_15m = np.clip(np.searchsorted(numpy_htf['15m']['timestamp'], arr_ts_1m), 0, len(numpy_htf['15m']['timestamp']) - 1)
338
+ map_1h = np.clip(np.searchsorted(numpy_htf['1h']['timestamp'], arr_ts_1m), 0, len(numpy_htf['1h']['timestamp']) - 1)
339
+ map_4h = np.clip(np.searchsorted(numpy_htf['4h']['timestamp'], arr_ts_1m), 0, len(numpy_htf['4h']['timestamp']) - 1)
340
+ map_1d = np.clip(np.searchsorted(numpy_htf['1d']['timestamp'], arr_ts_1m), 0, len(numpy_htf['1d']['timestamp']) - 1) if '1d' in numpy_htf else np.zeros(len(arr_ts_1m), dtype=int)
341
+
342
+ hydra_models = getattr(self.proc.guardian_hydra, 'models', {}) if self.proc.guardian_hydra else {}
343
+ hydra_cols = getattr(self.proc.guardian_hydra, 'feature_cols', []) if self.proc.guardian_hydra else []
344
+ legacy_v2 = getattr(self.proc.guardian_legacy, 'model_v2', None)
345
+ legacy_v3 = getattr(self.proc.guardian_legacy, 'model_v3', None)
346
+ v3_feat_names = getattr(self.proc.guardian_legacy, 'v3_feature_names', [])
347
+ oracle_dir = getattr(self.proc.oracle, 'model_direction', None)
348
+ oracle_cols = getattr(self.proc.oracle, 'feature_cols', [])
349
+ sniper_models = getattr(self.proc.sniper, 'models', [])
350
+ sniper_cols = getattr(self.proc.sniper, 'feature_names', [])
351
+ titan_model = getattr(self.proc.titan, 'model', None)
352
+ pattern_models = getattr(self.proc.pattern_engine, 'models', {})
353
 
354
+ # A. TITAN
355
+ global_titan_scores = np.full(len(arr_ts_1m), 0.5, dtype=np.float32)
356
+ if titan_model:
357
+ titan_cols = [
358
+ '5m_open', '5m_high', '5m_low', '5m_close', '5m_volume', '5m_RSI', '5m_MACD', '5m_MACD_h',
359
+ '5m_CCI', '5m_ADX', '5m_EMA_9_dist', '5m_EMA_21_dist', '5m_EMA_50_dist', '5m_EMA_200_dist',
360
+ '5m_BB_w', '5m_BB_p', '5m_MFI', '5m_VWAP_dist', '15m_timestamp', '15m_RSI', '15m_MACD',
361
+ '15m_MACD_h', '15m_CCI', '15m_ADX', '15m_EMA_9_dist', '15m_EMA_21_dist', '15m_EMA_50_dist',
362
+ '15m_EMA_200_dist', '15m_BB_w', '15m_BB_p', '15m_MFI', '15m_VWAP_dist', '1h_timestamp',
363
+ '1h_RSI', '1h_MACD_h', '1h_EMA_50_dist', '1h_EMA_200_dist', '1h_ATR_pct', '4h_timestamp',
364
+ '4h_RSI', '4h_MACD_h', '4h_EMA_50_dist', '4h_EMA_200_dist', '4h_ATR_pct', '1d_timestamp',
365
+ '1d_RSI', '1d_EMA_200_dist', '1d_Trend_Strong'
366
+ ]
367
+ print(" ๐Ÿš€ Running Global Titan...", flush=True)
368
+ try:
369
+ t_vecs = []
370
+ for col in titan_cols:
371
+ parts = col.split('_', 1); tf = parts[0]; feat = parts[1]
372
+ target_arr = numpy_htf.get(tf, None)
373
+ target_map = locals().get(f"map_{tf}", None)
374
+ if target_arr and feat in target_arr: t_vecs.append(target_arr[feat][target_map])
375
+ elif target_arr and feat == 'timestamp': t_vecs.append(target_arr['timestamp'][target_map])
376
+ elif target_arr and feat in ['open','high','low','close','volume']: t_vecs.append(target_arr[feat][target_map])
377
+ else: t_vecs.append(np.zeros(len(arr_ts_1m)))
378
+ X_TITAN = np.column_stack(t_vecs)
379
+ global_titan_scores = _revive_score_distribution(titan_model.predict(xgb.DMatrix(X_TITAN, feature_names=titan_cols)))
380
+ except: pass
381
+
382
+ # B. SNIPER
383
+ global_sniper_scores = np.full(len(arr_ts_1m), 0.5, dtype=np.float32)
384
+ if sniper_models:
385
+ print(" ๐Ÿš€ Running Global Sniper...", flush=True)
386
+ try:
387
+ s_vecs = []
388
+ for col in sniper_cols:
389
+ if col in fast_1m: s_vecs.append(fast_1m[col])
390
+ elif col == 'atr' and 'atr_z' in fast_1m: s_vecs.append(fast_1m['atr_z'])
391
+ else: s_vecs.append(np.zeros(len(arr_ts_1m)))
392
+ X_SNIPER = np.column_stack(s_vecs)
393
+ preds = [m.predict(X_SNIPER) for m in sniper_models]
394
+ global_sniper_scores = _revive_score_distribution(np.mean(preds, axis=0))
395
+ except: pass
396
+
397
+ # C. ORACLE
398
+ global_oracle_scores = np.full(len(arr_ts_1m), 0.5, dtype=np.float32)
399
+ if oracle_dir:
400
+ print(" ๐Ÿš€ Running Global Oracle...", flush=True)
401
+ try:
402
+ o_vecs = []
403
+ for col in oracle_cols:
404
+ if col.startswith('1h_'): o_vecs.append(numpy_htf['1h'].get(col[3:], np.zeros(len(arr_ts_1m)))[map_1h])
405
+ elif col.startswith('15m_'): o_vecs.append(numpy_htf['15m'].get(col[4:], np.zeros(len(arr_ts_1m)))[map_15m])
406
+ elif col.startswith('4h_'): o_vecs.append(numpy_htf['4h'].get(col[3:], np.zeros(len(arr_ts_1m)))[map_4h])
407
+ elif col == 'sim_titan_score': o_vecs.append(global_titan_scores)
408
+ elif col == 'sim_mc_score': o_vecs.append(np.full(len(arr_ts_1m), 0.5))
409
+ elif col == 'sim_pattern_score': o_vecs.append(np.full(len(arr_ts_1m), 0.5))
410
+ else: o_vecs.append(np.zeros(len(arr_ts_1m)))
411
+ X_ORACLE = np.column_stack(o_vecs)
412
+ preds_o = oracle_dir.predict(X_ORACLE)
413
+ preds_o = preds_o if isinstance(preds_o, np.ndarray) and len(preds_o.shape)==1 else preds_o[:, 0]
414
+ global_oracle_scores = _revive_score_distribution(preds_o)
415
+ except: pass
416
+
417
+ # D. LEGACY V2 & V3
418
+ global_v2_scores = np.zeros(len(arr_ts_1m), dtype=np.float32)
419
+ global_v3_scores = np.zeros(len(arr_ts_1m), dtype=np.float32)
 
 
 
 
420
 
421
+ if legacy_v2:
422
+ try:
423
+ l_log = fast_1m['log_ret']; l_rsi = fast_1m['RSI'] / 100.0; l_fib = fast_1m['fib_pos']; l_vol = fast_1m['volatility']
424
+ l5_log = numpy_htf['5m']['log_ret'][map_5m]; l5_rsi = numpy_htf['5m']['RSI'][map_5m] / 100.0; l5_fib = numpy_htf['5m']['fib_pos'][map_5m]; l5_trd = numpy_htf['5m']['trend_slope'][map_5m]
425
+ l15_log = numpy_htf['15m']['log_ret'][map_15m]; l15_rsi = numpy_htf['15m']['RSI'][map_15m] / 100.0; l15_fib618 = numpy_htf['15m']['dist_fib618'][map_15m]; l15_trd = numpy_htf['15m']['trend_slope'][map_15m]
426
+ lags = []
427
+ for lag in [1, 2, 3, 5, 10, 20]:
428
+ lags.extend([fast_1m[f'log_ret_lag_{lag}'], fast_1m[f'rsi_lag_{lag}'], fast_1m[f'fib_pos_lag_{lag}'], fast_1m[f'volatility_lag_{lag}']])
429
+ X_V2 = np.column_stack([l_log, l_rsi, l_fib, l_vol, l5_log, l5_rsi, l5_fib, l5_trd, l15_log, l15_rsi, l15_fib618, l15_trd, *lags])
430
+ preds = legacy_v2.predict(xgb.DMatrix(X_V2))
431
+ global_v2_scores = preds[:, 2] if len(preds.shape) > 1 else preds
432
+ global_v2_scores = global_v2_scores.flatten()
433
+ except: pass
434
 
435
+ if legacy_v3 and v3_feat_names:
436
+ try:
437
+ v3_dict = {}
438
+ v3_dict['rsi'] = fast_1m['RSI']; v3_dict['dist_ema50'] = fast_1m['dist_ema50']
439
+ v3_dict['dist_ema200'] = fast_1m['dist_ema200']; v3_dict['log_ret'] = fast_1m['log_ret']
440
+ v3_dict['rsi_5m'] = numpy_htf['5m']['RSI'][map_5m]; v3_dict['dist_ema50_5m'] = numpy_htf['5m']['dist_ema50'][map_5m]
441
+ v3_dict['dist_ema200_5m'] = numpy_htf['5m']['dist_ema200'][map_5m]; v3_dict['log_ret_5m'] = numpy_htf['5m']['log_ret'][map_5m]
442
+ v3_dict['rsi_15m'] = numpy_htf['15m']['RSI'][map_15m]; v3_dict['dist_ema50_15m'] = numpy_htf['15m']['dist_ema50'][map_15m]
443
+ v3_dict['dist_ema200_15m'] = numpy_htf['15m']['dist_ema200'][map_15m]; v3_dict['log_ret_15m'] = numpy_htf['15m']['log_ret'][map_15m]
444
+ df_v3_data = {k: v3_dict.get(k, np.zeros(len(arr_ts_1m))) for k in v3_feat_names}
445
+ df_v3 = pd.DataFrame(df_v3_data)
446
+ preds_v3 = legacy_v3.predict(xgb.DMatrix(df_v3))
447
+ global_v3_scores = preds_v3.flatten()
448
+ except: pass
449
+
450
+ # Filter
451
+ is_candidate = (numpy_htf['1h']['RSI'][map_1h] <= 70) & (global_titan_scores > 0.4) & (global_oracle_scores > 0.4)
452
+ candidate_indices = np.where(is_candidate)[0]
453
+ start_ts_val = frames['1m'].index[0] + pd.Timedelta(minutes=500)
454
+ start_idx_offset = np.searchsorted(arr_ts_1m, int(start_ts_val.timestamp()*1000))
455
+ candidate_indices = candidate_indices[candidate_indices >= start_idx_offset]
456
+ candidate_indices = candidate_indices[candidate_indices < (len(arr_ts_1m) - 245)]
457
+ print(f" ๐ŸŽฏ Candidates: {len(candidate_indices)}. Running Vectorized Hydra...", flush=True)
458
+
459
+ ai_results = []
460
+ if hydra_models and len(candidate_indices) > 0:
461
+ h_static = np.column_stack([
462
+ fast_1m['RSI'], numpy_htf['5m']['RSI'][map_5m], numpy_htf['15m']['RSI'][map_15m],
463
+ fast_1m['bb_width'], fast_1m['rel_vol'], fast_1m['atr'], fast_1m['close']
464
+ ])
465
+ chunk_size = 5000
466
+ for i in range(0, len(candidate_indices), chunk_size):
467
+ chunk_idxs = candidate_indices[i:i+chunk_size]
468
+ for idx in chunk_idxs:
469
+ # โœ… PATTERNS ON DEMAND
470
+ s_pattern = 0.5
471
+ if pattern_models and '15m' in pattern_models:
472
+ try:
473
+ idx_15m_entry = map_15m[idx]
474
+ if idx_15m_entry > 200:
475
+ p_win = pd.DataFrame({
476
+ 'open': frames['15m']['open'].values[idx_15m_entry-200:idx_15m_entry],
477
+ 'high': frames['15m']['high'].values[idx_15m_entry-200:idx_15m_entry],
478
+ 'low': frames['15m']['low'].values[idx_15m_entry-200:idx_15m_entry],
479
+ 'close': frames['15m']['close'].values[idx_15m_entry-200:idx_15m_entry],
480
+ 'volume': frames['15m']['volume'].values[idx_15m_entry-200:idx_15m_entry]
481
+ })
482
+ vec = _transform_window_for_pattern(p_win)
483
+ if vec is not None:
484
+ s_pattern = pattern_models['15m'].predict(xgb.DMatrix(vec.reshape(1,-1)))[0]
485
+ except: pass
486
+
487
+ sl_st = h_static[idx:idx+240]
488
+ sl_close = sl_st[:, 6]; sl_atr = sl_st[:, 5]
489
+ entry = float(fast_1m['close'][idx])
490
+ dist = np.maximum(1.5 * sl_atr, entry * 0.015)
491
+ pnl = sl_close - entry
492
+ norm_pnl = pnl / dist
493
+ max_pnl_r = (np.maximum.accumulate(sl_close) - entry) / dist
494
+ atr_pct = sl_atr / sl_close
495
+ zeros = np.zeros(240); time_vec = np.arange(1, 241); s_oracle = float(global_oracle_scores[idx])
496
+
497
+ X_H = np.column_stack([
498
+ sl_st[:,0], sl_st[:,1], sl_st[:,2], sl_st[:,3], sl_st[:,4],
499
+ zeros, atr_pct, norm_pnl, max_pnl_r, zeros, zeros, time_vec, zeros,
500
+ np.full(240, s_oracle), np.full(240, 0.7), np.full(240, 3.0)
501
+ ])
502
+ max_hydra = 0.0; max_giveback = 0.0; hydra_time = 0
503
+ try:
504
+ probs = hydra_models['crash'].predict_proba(X_H)[:, 1]
505
+ max_hydra = float(np.max(probs))
506
+ if max_hydra > 0.6:
507
+ t = np.argmax(probs)
508
+ hydra_time = int(fast_1m['timestamp'][idx + t])
509
+ except: pass
510
+ try:
511
+ probs_g = hydra_models['giveback'].predict_proba(X_H)[:, 1]
512
+ max_giveback = float(np.max(probs_g))
513
+ except: pass
514
+
515
+ max_v2 = float(np.max(global_v2_scores[idx:idx+240]))
516
+ max_v3 = float(np.max(global_v3_scores[idx:idx+240]))
517
+ v2_time = 0
518
+ if max_v2 > 0.8:
519
+ t2 = np.argmax(global_v2_scores[idx:idx+240])
520
+ v2_time = int(fast_1m['timestamp'][idx + t2])
521
+
522
+ ai_results.append({
523
+ 'timestamp': int(fast_1m['timestamp'][idx]),
524
+ 'symbol': sym, 'close': entry,
525
+ 'real_titan': float(global_titan_scores[idx]),
526
+ 'oracle_conf': s_oracle,
527
+ 'sniper_score': float(global_sniper_scores[idx]),
528
+ 'pattern_score': float(s_pattern), # โœ… SAVED PATTERN
529
+ 'risk_hydra_crash': max_hydra,
530
+ 'risk_hydra_giveback': max_giveback,
531
+ 'time_hydra_crash': hydra_time,
532
+ 'risk_legacy_v2': max_v2,
533
+ 'risk_legacy_v3': max_v3,
534
+ 'time_legacy_panic': v2_time,
535
+ 'signal_type': 'BREAKOUT', 'l1_score': 50.0
536
+ })
537
+
538
+ dt = time.time() - t0
539
+ if ai_results:
540
+ pd.DataFrame(ai_results).to_pickle(scores_file)
541
+ print(f" โœ… [{sym}] Completed in {dt:.2f} seconds. ({len(ai_results)} signals)", flush=True)
542
+ gc.collect()
543
+
544
+ async def generate_truth_data(self):
545
+ if self.force_start_date:
546
+ dt_s = datetime.strptime(self.force_start_date, "%Y-%m-%d").replace(tzinfo=timezone.utc)
547
+ dt_e = datetime.strptime(self.force_end_date, "%Y-%m-%d").replace(tzinfo=timezone.utc)
548
+ ms_s = int(dt_s.timestamp()*1000); ms_e = int(dt_e.timestamp()*1000)
549
+ print(f"\n๐Ÿšœ [Phase 1] Processing Era: {self.force_start_date} -> {self.force_end_date}")
550
+ for sym in self.TARGET_COINS:
551
+ c = await self._fetch_all_data_fast(sym, ms_s, ms_e)
552
+ if c: await self._process_data_in_memory(sym, c, ms_s, ms_e)
553
+
554
+ @staticmethod
555
+ def _worker_optimize(combinations_batch, scores_files, initial_capital, fees_pct, max_slots):
556
+ print(f" โณ [System] Loading {len(scores_files)} datasets...", flush=True)
557
+ data = []
558
+ for f in scores_files:
559
+ try: data.append(pd.read_pickle(f))
560
+ except: pass
561
+ if not data: return []
562
+ df = pd.concat(data).sort_values('timestamp').reset_index(drop=True)
563
 
564
+ ts = df['timestamp'].values
565
+ close = df['close'].values.astype(float)
566
+ sym = df['symbol'].values
567
+ u_syms = np.unique(sym); sym_map = {s: i for i, s in enumerate(u_syms)}; sym_id = np.array([sym_map[s] for s in sym])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
568
 
569
+ oracle = df['oracle_conf'].values.astype(float)
570
+ sniper = df['sniper_score'].values.astype(float)
571
+ hydra = df['risk_hydra_crash'].values.astype(float)
572
+ if 'risk_hydra_giveback' in df.columns: hydra_give = df['risk_hydra_giveback'].values.astype(float)
573
+ else: hydra_give = np.zeros(len(df))
574
+ titan = df['real_titan'].values.astype(float)
575
+
576
+ # โœ… Load Pattern Score
577
+ if 'pattern_score' in df.columns: pattern = df['pattern_score'].values.astype(float)
578
+ else: pattern = np.full(len(df), 0.5)
579
+
580
+ l1 = df['l1_score'].values.astype(float)
581
+ legacy_v2 = df['risk_legacy_v2'].values.astype(float) if 'risk_legacy_v2' in df else np.zeros(len(df))
582
+ legacy_v3 = df['risk_legacy_v3'].values.astype(float) if 'risk_legacy_v3' in df else np.zeros(len(df))
583
+ h_times = df['time_hydra_crash'].values.astype(np.int64)
 
584
 
585
+ N = len(ts)
586
+ print(f" ๐Ÿš€ [System] Testing {len(combinations_batch)} configs on {N} candles...", flush=True)
587
+ res = []
588
+ for cfg in combinations_batch:
589
+ pos = {}; log = []
590
+ bal = float(initial_capital); alloc = 0.0
 
 
 
 
591
 
592
+ titan_thresh = cfg['TITAN']
593
+ # โœ… Add Pattern Check
594
+ pattern_thresh = cfg.get('PATTERN', 0.10)
595
 
596
+ mask = (l1 >= cfg['L1_SCORE']) & \
597
+ (oracle >= cfg['ORACLE']) & \
598
+ (sniper >= cfg['SNIPER']) & \
599
+ (titan >= titan_thresh) & \
600
+ (pattern >= pattern_thresh) # โœ… MASKED
601
+
602
+ for i in range(N):
603
+ s = sym_id[i]; p = float(close[i]); curr_t = ts[i]
604
+
605
+ if s in pos:
606
+ entry_p, h_risk_val, h_give_val, size_val, h_time_val = pos[s]
607
+ crash_hydra = bool(h_risk_val > cfg['HYDRA_CRASH'])
608
+ giveback_hydra = bool(h_give_val > cfg['HYDRA_GIVEBACK'])
609
+ time_match = bool(h_time_val > 0 and curr_t >= h_time_val)
610
+ panic_legacy_v2 = bool(legacy_v2[i] > cfg['LEGACY_V2'])
611
+ panic_legacy_v3 = bool(legacy_v3[i] > cfg['LEGACY_V3'])
612
+ pnl = (p - entry_p) / entry_p
613
+ should_exit = (crash_hydra and time_match) or giveback_hydra or panic_legacy_v2 or panic_legacy_v3 or (pnl > 0.04) or (pnl < -0.02)
614
+ if should_exit:
615
+ realized = pnl - (fees_pct * 2)
616
+ bal += size_val * (1.0 + realized)
617
+ alloc -= size_val
618
+ del pos[s]
619
+ log.append({'pnl': realized})
620
+
621
+ if len(pos) < max_slots and bool(mask[i]):
622
+ if s not in pos and bal >= 5.0:
623
+ size = min(10.0, bal * 0.98)
624
+ pos[s] = (p, hydra[i], hydra_give[i], size, h_times[i])
625
+ bal -= size; alloc += size
626
+
627
+ final_bal = bal + alloc
628
+ profit = final_bal - initial_capital
629
+ tot = len(log)
630
+ winning = [x for x in log if x['pnl'] > 0]
631
+ losing = [x for x in log if x['pnl'] <= 0]
632
+ win_count = len(winning); loss_count = len(losing)
633
+ win_rate = (win_count/tot*100) if tot > 0 else 0.0
634
+ avg_win = np.mean([x['pnl'] for x in winning]) if winning else 0.0
635
+ avg_loss = np.mean([x['pnl'] for x in losing]) if losing else 0.0
636
+ gross_p = sum([x['pnl'] for x in winning])
637
+ gross_l = abs(sum([x['pnl'] for x in losing]))
638
+ profit_factor = (gross_p / gross_l) if gross_l > 0 else 99.9
639
+ max_win_s = 0; max_loss_s = 0; curr_w = 0; curr_l = 0
640
+ for t in log:
641
+ if t['pnl'] > 0:
642
+ curr_w += 1; curr_l = 0
643
+ if curr_w > max_win_s: max_win_s = curr_w
644
+ else:
645
+ curr_l += 1; curr_w = 0
646
+ if curr_l > max_loss_s: max_loss_s = curr_l
647
+
648
+ res.append({
649
+ 'config': cfg, 'final_balance': final_bal, 'net_profit': profit,
650
+ 'total_trades': tot, 'win_rate': win_rate, 'max_drawdown': 0,
651
+ 'win_count': win_count, 'loss_count': loss_count,
652
+ 'avg_win': avg_win, 'avg_loss': avg_loss,
653
+ 'max_win_streak': max_win_s, 'max_loss_streak': max_loss_s,
654
+ 'profit_factor': profit_factor,
655
+ 'consensus_agreement_rate': 0.0, 'high_consensus_win_rate': 0.0, 'high_consensus_avg_pnl': 0.0
656
+ })
657
+ return res
658
+
659
+ async def run_optimization(self, target_regime="RANGE"):
660
+ await self.generate_truth_data()
661
 
662
+ keys = list(self.GRID_RANGES.keys())
663
+ values = list(self.GRID_RANGES.values())
664
+ combos = [dict(zip(keys, c)) for c in itertools.product(*values)]
665
+
666
+ files = glob.glob(os.path.join(CACHE_DIR, "*.pkl"))
667
+ results_list = self._worker_optimize(combos, files, self.INITIAL_CAPITAL, self.TRADING_FEES, self.MAX_SLOTS)
668
+ if not results_list: return None, {'net_profit': 0.0, 'win_rate': 0.0}
669
+
670
+ results_list.sort(key=lambda x: x['net_profit'], reverse=True)
671
+ best = results_list[0]
672
+
673
+ diag = []
674
+ if best['total_trades'] > 2000 and best['net_profit'] < 10: diag.append("โš ๏ธ Overtrading")
675
+ if best['win_rate'] > 55 and best['net_profit'] < 0: diag.append("โš ๏ธ Fee Burn")
676
+ if abs(best['avg_loss']) > best['avg_win']: diag.append("โš ๏ธ Risk/Reward Inversion")
677
+ if best['max_loss_streak'] > 10: diag.append("โš ๏ธ Consecutive Loss Risk")
678
+ if not diag: diag.append("โœ… System Healthy")
679
 
680
+ print("\n" + "="*60)
681
+ print(f"๐Ÿ† CHAMPION REPORT [{target_regime}]:")
682
+ print(f" ๐Ÿ’ฐ Final Balance: ${best['final_balance']:,.2f}")
683
+ print(f" ๐Ÿš€ Net PnL: ${best['net_profit']:,.2f}")
684
+ print("-" * 60)
685
+ print(f" ๐Ÿ“Š Total Trades: {best['total_trades']}")
686
+ print(f" ๐Ÿ“ˆ Win Rate: {best['win_rate']:.1f}%")
687
+ print(f" โœ… Winning Trades: {best['win_count']} (Avg: {best['avg_win']*100:.2f}%)")
688
+ print(f" โŒ Losing Trades: {best['loss_count']} (Avg: {best['avg_loss']*100:.2f}%)")
689
+ print(f" ๐ŸŒŠ Max Streaks: Win {best['max_win_streak']} | Loss {best['max_loss_streak']}")
690
+ print(f" โš–๏ธ Profit Factor: {best['profit_factor']:.2f}")
691
+ print("-" * 60)
692
+ print(f" ๐Ÿฉบ DIAGNOSIS: {' '.join(diag)}")
693
+ print(f" โš™๏ธ Oracle={best['config']['ORACLE']:.2f} | Sniper={best['config']['SNIPER']:.2f} | Pattern={best['config'].get('PATTERN',0):.2f}")
694
+ print("="*60)
695
+ return best['config'], best
696
+
697
+ async def run_strategic_optimization_task():
698
+ print("\n๐Ÿงช [STRATEGIC BACKTEST] Configurable Grid Mode...")
699
+ r2 = R2Service(); dm = DataManager(None, None, r2); proc = MLProcessor(dm)
700
+ try:
701
+ await dm.initialize(); await proc.initialize()
702
+ if proc.guardian_hydra: proc.guardian_hydra.set_silent_mode(True)
703
+ hub = AdaptiveHub(r2); await hub.initialize()
704
+ opt = HeavyDutyBacktester(dm, proc)
705
 
706
+ scenarios = [
707
+ {"regime": "BULL", "start": "2024-01-01", "end": "2024-03-30"},
708
+ {"regime": "BEAR", "start": "2023-08-01", "end": "2023-09-15"},
709
+ {"regime": "DEAD", "start": "2023-06-01", "end": "2023-08-01"},
710
+ {"regime": "RANGE", "start": "2024-07-01", "end": "2024-09-30"}
711
+ ]
712
+
713
+ for s in scenarios:
714
+ opt.set_date_range(s["start"], s["end"])
715
+ best_cfg, best_stats = await opt.run_optimization(s["regime"])
716
+ if best_cfg: hub.submit_challenger(s["regime"], best_cfg, best_stats)
717
+ await hub._save_state_to_r2()
718
+ print("โœ… [System] DNA Updated.")
719
+ finally:
720
+ print("๐Ÿ”Œ [System] Closing connections...")
721
+ await dm.close()
722
 
723
+ if __name__ == "__main__":
724
+ asyncio.run(run_strategic_optimization_task())