Riy777 commited on
Commit
a3a222f
·
verified ·
1 Parent(s): 67750ee

Update ml_engine/processor.py

Browse files
Files changed (1) hide show
  1. ml_engine/processor.py +656 -359
ml_engine/processor.py CHANGED
@@ -1,393 +1,690 @@
1
  # ============================================================
2
- # 🧠 ml_engine/processor.py (V36.0 - GEM-Architect: The Cybernetic Processor)
3
  # ============================================================
4
 
5
  import asyncio
6
- import traceback
 
 
 
7
  import logging
 
8
  import os
 
9
  import sys
10
- import numpy as np
11
- from typing import Dict, Any, List, Optional
12
-
13
- # --- استيراد المحركات (كما هي) ---
14
- try: from .titan_engine import TitanEngine
15
- except ImportError: TitanEngine = None
16
- try: from .patterns import ChartPatternAnalyzer
17
- except ImportError: ChartPatternAnalyzer = None
18
- try: from .monte_carlo import MonteCarloEngine
19
- except ImportError: MonteCarloEngine = None
20
- try: from .oracle_engine import OracleEngine
21
- except ImportError: OracleEngine = None
22
- try: from .sniper_engine import SniperEngine
23
- except ImportError: SniperEngine = None
24
- try: from .hybrid_guardian import HybridDeepSteward
25
- except ImportError: HybridDeepSteward = None
26
- try: from .guardian_hydra import GuardianHydra
27
- except ImportError: GuardianHydra = None
28
 
29
- # ============================================================
30
- # 📂 مسارات النماذج
31
- # ============================================================
32
- BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
33
- MODELS_L2_DIR = os.path.join(BASE_DIR, "ml_models", "layer2")
34
- MODELS_PATTERN_DIR = os.path.join(BASE_DIR, "ml_models", "xgboost_pattern2")
35
- MODELS_UNIFIED_DIR = os.path.join(BASE_DIR, "ml_models", "Unified_Models_V1")
36
- MODELS_SNIPER_DIR = os.path.join(BASE_DIR, "ml_models", "guard_v2")
37
- MODELS_HYDRA_DIR = os.path.join(BASE_DIR, "ml_models", "guard_v1")
38
- MODEL_V2_PATH = os.path.join(BASE_DIR, "ml_models", "DeepSteward_V2_Production.json")
39
- MODEL_V3_PATH = os.path.join(BASE_DIR, "ml_models", "DeepSteward_V3_Production.json")
40
- MODEL_V3_FEAT = os.path.join(BASE_DIR, "ml_models", "DeepSteward_V3_Features.json")
41
 
42
- # ============================================================
43
- # 🎛️ SYSTEM LIMITS & THRESHOLDS (Dynamic DNA Container)
44
- # ============================================================
45
- class SystemLimits:
46
- """
47
- GEM-Architect: The Dynamic Constitution.
48
- يتم تحديث هذه القيم آلياً بواسطة AdaptiveHub بناءً على حالة السوق (Bull/Bear/etc).
49
- """
50
-
51
- # --- Layer 1 (Data Manager Control) ---
52
- L1_MIN_AFFINITY_SCORE = 15.0 # سيتم الكتابة عليها من الـ DNA
53
-
54
- # --- Layer 2 Weights (Dynamic) ---
55
- # هذه الأوزان تتغير حسب أداء النماذج (Tactical Loop) وحالة السوق
56
- L2_WEIGHT_TITAN = 0.40
57
- L2_WEIGHT_PATTERNS = 0.30
58
- L2_WEIGHT_MC = 0.10
59
- # (Hydra/Sniper قد يساهمون في L2 أو L4 حسب التصميم)
60
-
61
- # إعدادات الأنماط (تتغير حسب الاستراتيجية)
62
- PATTERN_TF_WEIGHTS = {'15m': 0.40, '1h': 0.30, '5m': 0.20, '4h': 0.10, '1d': 0.00}
63
- PATTERN_THRESH_BULLISH = 0.60
64
- PATTERN_THRESH_BEARISH = 0.40
65
-
66
- # --- Layer 3 (Oracle) ---
67
- L3_CONFIDENCE_THRESHOLD = 0.65
68
- L3_WHALE_IMPACT_MAX = 0.10
69
- L3_NEWS_IMPACT_MAX = 0.05
70
- L3_MC_ADVANCED_MAX = 0.10
71
-
72
- # --- Layer 4 (Sniper & Execution) ---
73
- L4_ENTRY_THRESHOLD = 0.40
74
- # أوزان داخلية لـ Sniper (ML vs OrderBook)
75
- L4_WEIGHT_ML = 0.60
76
- L4_WEIGHT_OB = 0.40
77
- # نسبة الجدار المسموح بها (تتغير جذرياً بين Bull و Bear)
78
- L4_OB_WALL_RATIO = 0.40
79
-
80
- # --- Layer 0: Hydra & Guardian Thresholds ---
81
- HYDRA_CRASH_THRESH = 0.60
82
- HYDRA_GIVEBACK_THRESH = 0.70
83
- HYDRA_STAGNATION_THRESH = 0.50
84
-
85
- # Legacy Guard Thresholds
86
- LEGACY_V2_PANIC_THRESH = 0.95
87
- LEGACY_V3_HARD_THRESH = 0.95
88
- LEGACY_V3_SOFT_THRESH = 0.85
89
- LEGACY_V3_ULTRA_THRESH = 0.98
90
-
91
- @classmethod
92
- def to_dict(cls) -> Dict[str, Any]:
93
- return {k: v for k, v in cls.__dict__.items() if not k.startswith('__') and not callable(v)}
94
-
95
- @classmethod
96
- def update_from_dict(cls, config: Dict[str, Any]):
97
- """تحديث القيم من AdaptiveHub"""
98
- if not config: return
99
- for k, v in config.items():
100
- if hasattr(cls, k):
101
- setattr(cls, k, v)
102
- # print(f"🔄 [SystemLimits] Updated. TitanW={cls.L2_WEIGHT_TITAN:.2f}, WallRatio={cls.L4_OB_WALL_RATIO}")
103
 
104
- # ============================================================
105
- # 🧠 MLProcessor Class
106
- # ============================================================
107
- class MLProcessor:
108
- def __init__(self, data_manager=None):
109
- self.data_manager = data_manager
110
- self.initialized = False
111
 
112
- self.titan = TitanEngine(model_dir=MODELS_L2_DIR) if TitanEngine else None
113
- self.pattern_engine = ChartPatternAnalyzer(models_dir=MODELS_PATTERN_DIR) if ChartPatternAnalyzer else None
114
- self.mc_analyzer = MonteCarloEngine() if MonteCarloEngine else None
115
- self.oracle = OracleEngine(model_dir=MODELS_UNIFIED_DIR) if OracleEngine else None
116
- self.sniper = SniperEngine(models_dir=MODELS_SNIPER_DIR) if SniperEngine else None
117
 
118
- self.guardian_hydra = None
119
- if GuardianHydra:
120
- self.guardian_hydra = GuardianHydra(model_dir=MODELS_HYDRA_DIR)
121
-
122
- self.guardian_legacy = None
123
- if HybridDeepSteward:
124
- self.guardian_legacy = HybridDeepSteward(
125
- v2_model_path=MODEL_V2_PATH,
126
- v3_model_path=MODEL_V3_PATH,
127
- v3_features_map_path=MODEL_V3_FEAT
128
- )
129
-
130
- print(f"🧠 [MLProcessor V36.0] Cybernetic Control Active.")
131
-
132
- async def initialize(self):
133
- if self.initialized: return
134
- print("⚙️ [Processor] Initializing Neural Grid...")
135
- try:
136
- tasks = []
137
- if self.titan: tasks.append(self.titan.initialize())
138
-
139
- if self.pattern_engine:
140
- # التكوين الأولي (سيتم تحديثه لاحقاً ديناميكياً)
141
- self.pattern_engine.configure_thresholds(
142
- weights=SystemLimits.PATTERN_TF_WEIGHTS,
143
- bull_thresh=SystemLimits.PATTERN_THRESH_BULLISH,
144
- bear_thresh=SystemLimits.PATTERN_THRESH_BEARISH
145
- )
146
- tasks.append(self.pattern_engine.initialize())
147
-
148
- if self.oracle:
149
- if hasattr(self.oracle, 'set_threshold'):
150
- self.oracle.set_threshold(SystemLimits.L3_CONFIDENCE_THRESHOLD)
151
- tasks.append(self.oracle.initialize())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152
 
153
- if self.sniper:
154
- # التكوين الأولي
155
- if hasattr(self.sniper, 'configure_settings'):
156
- self.sniper.configure_settings(
157
- threshold=SystemLimits.L4_ENTRY_THRESHOLD,
158
- wall_ratio=SystemLimits.L4_OB_WALL_RATIO,
159
- w_ml=SystemLimits.L4_WEIGHT_ML,
160
- w_ob=SystemLimits.L4_WEIGHT_OB
161
- )
162
- tasks.append(self.sniper.initialize())
163
 
164
- if tasks: await asyncio.gather(*tasks)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165
 
166
- if self.guardian_hydra:
167
- self.guardian_hydra.initialize()
168
- print(" 🛡️ [Guard 1] Hydra X-Ray: Active")
 
 
 
 
 
 
 
 
 
 
 
 
 
169
 
170
- if self.guardian_legacy:
171
- if asyncio.iscoroutinefunction(self.guardian_legacy.initialize):
172
- await self.guardian_legacy.initialize()
173
- else:
174
- self.guardian_legacy.initialize()
 
 
 
 
 
 
 
 
 
 
175
 
176
- # تطبيق العتبات المبدئية
177
- self.guardian_legacy.configure_thresholds(
178
- v2_panic=SystemLimits.LEGACY_V2_PANIC_THRESH,
179
- v3_hard=SystemLimits.LEGACY_V3_HARD_THRESH,
180
- v3_soft=SystemLimits.LEGACY_V3_SOFT_THRESH,
181
- v3_ultra=SystemLimits.LEGACY_V3_ULTRA_THRESH
182
- )
183
- print(f" 🛡️ [Guard 2] Legacy Steward: Active")
184
-
185
- self.initialized = True
186
- print("✅ [Processor] All Systems Operational.")
187
-
188
- except Exception as e:
189
- print(f"❌ [Processor FATAL] Init failed: {e}")
190
- traceback.print_exc()
191
-
192
- async def process_compound_signal(self, raw_data: Dict[str, Any]) -> Optional[Dict[str, Any]]:
193
- """
194
- L2 Processing:
195
- هنا يتم دمج الدرجات بناءً على الأوزان الديناميكية الحالية (SystemLimits).
196
- """
197
- if not self.initialized: await self.initialize()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198
 
199
- symbol = raw_data.get('symbol')
200
- ohlcv_data = raw_data.get('ohlcv')
201
- current_price = raw_data.get('current_price', 0.0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202
 
203
- if not symbol or not ohlcv_data: return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204
 
205
- try:
206
- # 1. Titan Engine
207
- score_titan = 0.5
208
- titan_res = {}
209
- if self.titan:
210
- titan_res = await asyncio.to_thread(self.titan.predict, ohlcv_data)
211
- score_titan = titan_res.get('score', 0.5)
212
-
213
- # 2. Pattern Engine (Inject Dynamic Config First)
214
- score_patterns = 0.5
215
- pattern_res = {}
216
- pattern_name = "Neutral"
217
- if self.pattern_engine:
218
- # تحديث التكوين قبل التحليل لضمان استخدام أحدث أوزان الاستراتيجية
219
- self.pattern_engine.configure_thresholds(
220
- weights=SystemLimits.PATTERN_TF_WEIGHTS,
221
- bull_thresh=SystemLimits.PATTERN_THRESH_BULLISH,
222
- bear_thresh=SystemLimits.PATTERN_THRESH_BEARISH
223
- )
224
- pattern_res = await self.pattern_engine.detect_chart_patterns(ohlcv_data)
225
- score_patterns = pattern_res.get('pattern_confidence', 0.5)
226
- pattern_name = pattern_res.get('pattern_detected', 'Neutral')
227
-
228
- # 3. Monte Carlo (Light)
229
- mc_score = 0.5
230
- if self.mc_analyzer and '1h' in ohlcv_data:
231
- closes = [c[4] for c in ohlcv_data['1h']]
232
- raw_mc = self.mc_analyzer.run_light_check(closes)
233
- mc_score = 0.5 + (raw_mc * 5.0)
234
- mc_score = max(0.0, min(1.0, mc_score))
235
-
236
- # 4. Hybrid Calculation (Using Dynamic Weights from SystemLimits)
237
- # هذه الأوزان يتم تحديثها بواسطة AdaptiveHub
238
- w_titan = SystemLimits.L2_WEIGHT_TITAN
239
- w_patt = SystemLimits.L2_WEIGHT_PATTERNS
240
- w_mc = SystemLimits.L2_WEIGHT_MC
241
 
242
- # تطبيع الأوزان (لضمان أن مجموعها 1.0 تقريباً)
243
- total_w = w_titan + w_patt + w_mc
244
- if total_w <= 0: total_w = 1.0
 
 
 
245
 
246
- hybrid_score = ((score_titan * w_titan) + (score_patterns * w_patt) + (mc_score * w_mc)) / total_w
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
247
 
248
- return {
249
- 'symbol': symbol,
250
- 'current_price': current_price,
251
- 'enhanced_final_score': hybrid_score,
252
- 'titan_score': score_titan,
253
- 'patterns_score': score_patterns,
254
- 'mc_score': mc_score,
255
- 'components': {
256
- 'titan_score': score_titan,
257
- 'patterns_score': score_patterns,
258
- 'mc_score': mc_score
259
- },
260
- 'pattern_name': pattern_name,
261
- 'ohlcv': ohlcv_data,
262
- 'titan_details': titan_res,
263
- 'pattern_details': pattern_res.get('details', {})
264
- }
265
- except Exception as e:
266
- print(f"❌ [Processor] Error processing {symbol}: {e}")
267
- return None
268
-
269
- async def consult_oracle(self, symbol_data: Dict[str, Any]) -> Dict[str, Any]:
270
- """
271
- L3 Processing:
272
- Oracle يستخدم العتبة الحالية من SystemLimits.
273
- """
274
- if not self.initialized: await self.initialize()
275
-
276
- if self.oracle:
277
- # تحديث العتبة ديناميكياً
278
- if hasattr(self.oracle, 'set_threshold'):
279
- self.oracle.set_threshold(SystemLimits.L3_CONFIDENCE_THRESHOLD)
280
-
281
- decision = await self.oracle.predict(symbol_data)
282
- conf = decision.get('confidence', 0.0)
283
 
284
- # طبقة أمان إضافية (Redundant Safety)
285
- if decision.get('action') in ['WATCH', 'BUY'] and conf < SystemLimits.L3_CONFIDENCE_THRESHOLD:
286
- decision['action'] = 'WAIT'
287
- decision['reason'] = f"Processor Veto: Conf {conf:.2f} < Limit {SystemLimits.L3_CONFIDENCE_THRESHOLD}"
 
 
 
 
 
 
 
 
 
288
 
289
- return decision
290
- return {'action': 'WAIT', 'reason': 'Oracle Engine Missing'}
291
-
292
- async def check_sniper_entry(self, ohlcv_1m_data: List, order_book_data: Dict[str, Any]) -> Dict[str, Any]:
293
- """
294
- L4 Processing:
295
- Sniper يستلم إعدادات دفتر الطلبات (DNA) قبل اتخاذ القرار.
296
- """
297
- if not self.initialized: await self.initialize()
298
-
299
- if self.sniper:
300
- # ✅ الحقن الديناميكي لإعدادات Sniper قبل التنفيذ
301
- if hasattr(self.sniper, 'configure_settings'):
302
- self.sniper.configure_settings(
303
- threshold=SystemLimits.L4_ENTRY_THRESHOLD,
304
- wall_ratio=SystemLimits.L4_OB_WALL_RATIO,
305
- w_ml=SystemLimits.L4_WEIGHT_ML,
306
- w_ob=SystemLimits.L4_WEIGHT_OB
307
- )
308
 
309
- return await self.sniper.check_entry_signal_async(ohlcv_1m_data, order_book_data)
 
 
310
 
311
- return {'signal': 'WAIT', 'reason': 'Sniper Engine Missing'}
312
-
313
- def consult_dual_guardians(self, symbol, ohlcv_1m, ohlcv_5m, ohlcv_15m, trade_context, order_book_snapshot=None):
314
- """
315
- L0 Guardians:
316
- الحراس يستخدمون العتبات الديناميكية (Panic Thresholds) التي قد تختلف بين Bull و Bear.
317
- """
318
- response = {'action': 'HOLD', 'detailed_log': '', 'probs': {}}
319
-
320
- # 1. Hydra
321
- hydra_result = {'action': 'HOLD', 'reason': 'Disabled', 'probs': {}}
322
- if self.guardian_hydra and self.guardian_hydra.initialized:
323
- hydra_result = self.guardian_hydra.analyze_position(symbol, ohlcv_1m, ohlcv_5m, ohlcv_15m, trade_context)
324
- h_probs = hydra_result.get('probs', {})
325
- p_crash = h_probs.get('crash', 0.0)
326
- p_giveback = h_probs.get('giveback', 0.0)
327
 
328
- # استخدام العتبات الحية
329
- if hydra_result['action'] == 'HOLD':
330
- if p_crash >= SystemLimits.HYDRA_CRASH_THRESH:
331
- hydra_result['action'] = 'EXIT_HARD'
332
- hydra_result['reason'] = f"Hydra Crash Risk {p_crash:.2f}"
333
- elif p_giveback >= SystemLimits.HYDRA_GIVEBACK_THRESH:
334
- hydra_result['action'] = 'EXIT_SOFT'
335
- hydra_result['reason'] = f"Hydra Giveback Risk {p_giveback:.2f}"
336
 
337
- # 2. Legacy (Volume-Aware Veto)
338
- legacy_result = {'action': 'HOLD', 'reason': 'Disabled', 'scores': {}}
339
- if self.guardian_legacy and self.guardian_legacy.initialized:
340
- # تحديث العتبات قبل التحليل
341
- self.guardian_legacy.configure_thresholds(
342
- v2_panic=SystemLimits.LEGACY_V2_PANIC_THRESH,
343
- v3_hard=SystemLimits.LEGACY_V3_HARD_THRESH,
344
- v3_soft=SystemLimits.LEGACY_V3_SOFT_THRESH,
345
- v3_ultra=SystemLimits.LEGACY_V3_ULTRA_THRESH
346
- )
347
-
348
- entry_price = float(trade_context.get('entry_price', 0.0))
349
- vol_30m = trade_context.get('volume_30m_usd', 0.0)
 
 
 
 
 
 
 
 
 
 
350
 
351
- legacy_result = self.guardian_legacy.analyze_position(
352
- ohlcv_1m, ohlcv_5m, ohlcv_15m, entry_price,
353
- order_book=order_book_snapshot,
354
- volume_30m_usd=vol_30m
355
- )
356
-
357
- # 3. Final Arbitration & Display
358
- h_probs = hydra_result.get('probs', {})
359
- l_scores = legacy_result.get('scores', {})
360
 
361
- h_c = h_probs.get('crash', 0.0)
362
- h_g = h_probs.get('giveback', 0.0)
363
- h_s = h_probs.get('stagnation', 0.0)
364
- l_v2 = l_scores.get('v2', 0.0)
365
- l_v3 = l_scores.get('v3', 0.0)
366
 
367
- stamp_str = f"🐲[C:{h_c:.0%}|G:{h_g:.0%}|S:{h_s:.0%}] 🕸️[V2:{l_v2:.0%}|V3:{l_v3:.0%}]"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
368
 
369
- final_action = 'HOLD'
370
- final_reason = f"Safe. {stamp_str}"
 
371
 
372
- if hydra_result['action'] in ['EXIT_HARD', 'EXIT_SOFT', 'TIGHTEN_SL', 'TRAIL_SL']:
373
- final_action = hydra_result['action']
374
- final_reason = f"🐲 HYDRA: {hydra_result['reason']} | {stamp_str}"
375
- elif legacy_result['action'] in ['EXIT_HARD', 'EXIT_SOFT']:
376
- final_action = legacy_result['action']
377
- final_reason = f"🕸️ LEGACY: {legacy_result['reason']} | {stamp_str}"
378
-
379
- return {
380
- 'action': final_action,
381
- 'reason': final_reason,
382
- 'detailed_log': f"{final_action} | {stamp_str}",
383
- 'probs': h_probs,
384
- 'scores': l_scores
385
- }
386
-
387
- async def run_advanced_monte_carlo(self, symbol, timeframe='1h'):
388
- if self.mc_analyzer and self.data_manager:
389
- try:
390
- ohlcv = await self.data_manager.get_latest_ohlcv(symbol, timeframe, limit=300)
391
- if ohlcv: return self.mc_analyzer.run_advanced_simulation([c[4] for c in ohlcv])
392
- except Exception: pass
393
- return 0.0
 
 
 
 
1
  # ============================================================
2
+ # 🧪 backtest_engine.py (V115.0 - GEM-Architect: Full Grid Density)
3
  # ============================================================
4
 
5
  import asyncio
6
+ import pandas as pd
7
+ import numpy as np
8
+ import pandas_ta as ta
9
+ import time
10
  import logging
11
+ import itertools
12
  import os
13
+ import gc
14
  import sys
15
+ import traceback
16
+ from datetime import datetime, timezone
17
+ from typing import Dict, Any, List
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
+ try:
20
+ from ml_engine.processor import MLProcessor, SystemLimits
21
+ from ml_engine.data_manager import DataManager
22
+ from learning_hub.adaptive_hub import StrategyDNA, AdaptiveHub
23
+ from r2 import R2Service
24
+ import ccxt.async_support as ccxt
25
+ import xgboost as xgb
26
+ except ImportError:
27
+ pass
 
 
 
28
 
29
+ logging.getLogger('ml_engine').setLevel(logging.WARNING)
30
+ CACHE_DIR = "backtest_real_scores"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
+ class HeavyDutyBacktester:
33
+ def __init__(self, data_manager, processor):
34
+ self.dm = data_manager
35
+ self.proc = processor
 
 
 
36
 
37
+ # 🎛️ GRID DENSITY CONTROL
38
+ # 3 = Low (243 Scenarios) - Fast
39
+ # 4 = Med (1024 Scenarios) - Balanced
40
+ # 5 = High (3125 Scenarios) - Deep Search
41
+ self.GRID_DENSITY = 3
42
 
43
+ self.INITIAL_CAPITAL = 10.0
44
+ self.TRADING_FEES = 0.001
45
+ self.MAX_SLOTS = 4
46
+
47
+ self.TARGET_COINS = [
48
+ 'SOL/USDT', 'XRP/USDT', 'DOGE/USDT'
49
+ ]
50
+
51
+ self.force_start_date = None
52
+ self.force_end_date = None
53
+
54
+ if not os.path.exists(CACHE_DIR): os.makedirs(CACHE_DIR)
55
+ print(f"🧪 [Backtest V115.0] Grid Density: {self.GRID_DENSITY} | Full Stack Optimization.")
56
+
57
+ def set_date_range(self, start_str, end_str):
58
+ self.force_start_date = start_str
59
+ self.force_end_date = end_str
60
+
61
+ # ==============================================================
62
+ # FAST DATA DOWNLOADER
63
+ # ==============================================================
64
+ async def _fetch_all_data_fast(self, sym, start_ms, end_ms):
65
+ print(f" ⚡ [Network] Downloading {sym}...", flush=True)
66
+ limit = 1000
67
+ duration_per_batch = limit * 60 * 1000
68
+ tasks = []
69
+ current = start_ms
70
+ while current < end_ms:
71
+ tasks.append(current)
72
+ current += duration_per_batch
73
+ all_candles = []
74
+ sem = asyncio.Semaphore(10)
75
+
76
+ async def _fetch_batch(timestamp):
77
+ async with sem:
78
+ for _ in range(3):
79
+ try:
80
+ return await self.dm.exchange.fetch_ohlcv(sym, '1m', since=timestamp, limit=limit)
81
+ except: await asyncio.sleep(1)
82
+ return []
83
+
84
+ chunk_size = 20
85
+ for i in range(0, len(tasks), chunk_size):
86
+ chunk_tasks = tasks[i:i + chunk_size]
87
+ futures = [_fetch_batch(ts) for ts in chunk_tasks]
88
+ results = await asyncio.gather(*futures)
89
+ for res in results:
90
+ if res: all_candles.extend(res)
91
+
92
+ if not all_candles: return None
93
+ filtered = [c for c in all_candles if c[0] >= start_ms and c[0] <= end_ms]
94
+ seen = set(); unique_candles = []
95
+ for c in filtered:
96
+ if c[0] not in seen:
97
+ unique_candles.append(c)
98
+ seen.add(c[0])
99
+ unique_candles.sort(key=lambda x: x[0])
100
+ print(f" ✅ Downloaded {len(unique_candles)} candles.", flush=True)
101
+ return unique_candles
102
+
103
+ # ==============================================================
104
+ # 🏎️ VECTORIZED INDICATORS
105
+ # ==============================================================
106
+ def _calculate_indicators_vectorized(self, df, timeframe='1m'):
107
+ df['close'] = df['close'].astype(float)
108
+ df['high'] = df['high'].astype(float)
109
+ df['low'] = df['low'].astype(float)
110
+ df['volume'] = df['volume'].astype(float)
111
+ df['open'] = df['open'].astype(float)
112
+
113
+ df['rsi'] = ta.rsi(df['close'], length=14)
114
+ df['ema20'] = ta.ema(df['close'], length=20)
115
+ df['ema50'] = ta.ema(df['close'], length=50)
116
+ df['atr'] = ta.atr(df['high'], df['low'], df['close'], length=14)
117
+
118
+ if timeframe == '1m':
119
+ sma20 = df['close'].rolling(20).mean()
120
+ std20 = df['close'].rolling(20).std()
121
+ df['bb_width'] = ((sma20 + 2*std20) - (sma20 - 2*std20)) / sma20
122
+ df['vol_ma50'] = df['volume'].rolling(50).mean()
123
+ df['rel_vol'] = df['volume'] / (df['vol_ma50'] + 1e-9)
124
+
125
+ df['slope'] = ta.slope(df['close'], length=7)
126
+ vol_mean = df['volume'].rolling(20).mean()
127
+ vol_std = df['volume'].rolling(20).std()
128
+ df['vol_z'] = (df['volume'] - vol_mean) / (vol_std + 1e-9)
129
+ df['atr_pct'] = df['atr'] / df['close']
130
+
131
+ if timeframe == '1m':
132
+ df['ret'] = df['close'].pct_change()
133
+ df['dollar_vol'] = df['close'] * df['volume']
134
+ df['amihud'] = (df['ret'].abs() / df['dollar_vol'].replace(0, np.nan)).fillna(0)
135
+ dp = df['close'].diff()
136
+ roll_cov = dp.rolling(64).cov(dp.shift(1))
137
+ df['roll_spread'] = (2 * np.sqrt(np.maximum(0, -roll_cov))).fillna(0)
138
+ sign = np.sign(df['close'].diff()).fillna(0)
139
+ df['signed_vol'] = sign * df['volume']
140
+ df['ofi'] = df['signed_vol'].rolling(30).sum().fillna(0)
141
+ buy_vol = (sign > 0) * df['volume']
142
+ sell_vol = (sign < 0) * df['volume']
143
+ imb = (buy_vol.rolling(60).sum() - sell_vol.rolling(60).sum()).abs()
144
+ tot = df['volume'].rolling(60).sum()
145
+ df['vpin'] = (imb / tot.replace(0, np.nan)).fillna(0)
146
+ vwap = (df['close'] * df['volume']).rolling(20).sum() / df['volume'].rolling(20).sum()
147
+ df['vwap_dev'] = (df['close'] - vwap).fillna(0)
148
+ df['rv_gk'] = (np.log(df['high'] / df['low'])**2) / 2 - (2 * np.log(2) - 1) * (np.log(df['close'] / df['open'])**2)
149
+ df['return_1m'] = df['ret']
150
+ df['return_5m'] = df['close'].pct_change(5)
151
+ df['return_15m'] = df['close'].pct_change(15)
152
+ r = df['volume'].rolling(500).mean()
153
+ s = df['volume'].rolling(500).std()
154
+ df['vol_zscore_50'] = ((df['volume'] - r) / s).fillna(0)
155
+
156
+ df['log_ret'] = np.log(df['close'] / df['close'].shift(1))
157
+ roll_max = df['high'].rolling(50).max()
158
+ roll_min = df['low'].rolling(50).min()
159
+ diff = (roll_max - roll_min).replace(0, 1e-9)
160
+ df['fib_pos'] = (df['close'] - roll_min) / diff
161
+ df['trend_slope'] = (df['ema20'] - df['ema20'].shift(5)) / df['ema20'].shift(5)
162
+ df['volatility'] = df['atr'] / df['close']
163
+ fib618 = roll_max - (diff * 0.382)
164
+ df['dist_fib618'] = (df['close'] - fib618) / df['close']
165
+ df['dist_ema50'] = (df['close'] - df['ema50']) / df['close']
166
+ df['ema200'] = ta.ema(df['close'], length=200)
167
+ df['dist_ema200'] = (df['close'] - df['ema200']) / df['close']
168
+
169
+ # Lags for V2
170
+ if timeframe == '1m':
171
+ for lag in [1, 2, 3, 5, 10, 20]:
172
+ df[f'log_ret_lag_{lag}'] = df['log_ret'].shift(lag).fillna(0)
173
+ df[f'rsi_lag_{lag}'] = (df['rsi'].shift(lag).fillna(50) / 100.0)
174
+ df[f'fib_pos_lag_{lag}'] = df['fib_pos'].shift(lag).fillna(0.5)
175
+ df[f'volatility_lag_{lag}'] = df['volatility'].shift(lag).fillna(0)
176
+
177
+ df.fillna(0, inplace=True)
178
+ return df
179
+
180
+ # ==============================================================
181
+ # 🧠 CPU PROCESSING (PRE-INFERENCE OPTIMIZED)
182
+ # ==============================================================
183
+ async def _process_data_in_memory(self, sym, candles, start_ms, end_ms):
184
+ safe_sym = sym.replace('/', '_')
185
+ period_suffix = f"{start_ms}_{end_ms}"
186
+ scores_file = f"{CACHE_DIR}/{safe_sym}_{period_suffix}_scores.pkl"
187
+
188
+ if os.path.exists(scores_file):
189
+ print(f" 📂 [{sym}] Data Exists -> Skipping.")
190
+ return
191
+
192
+ print(f" ⚙️ [CPU] Analyzing {sym} (Global Pre-Inference)...", flush=True)
193
+ t0 = time.time()
194
+
195
+ df_1m = pd.DataFrame(candles, columns=['timestamp', 'open', 'high', 'low', 'close', 'volume'])
196
+ df_1m['datetime'] = pd.to_datetime(df_1m['timestamp'], unit='ms')
197
+ df_1m.set_index('datetime', inplace=True)
198
+ df_1m = df_1m.sort_index()
199
+
200
+ frames = {}
201
+ agg_dict = {'open': 'first', 'high': 'max', 'low': 'min', 'close': 'last', 'volume': 'sum'}
202
+
203
+ # 1. Calc 1m
204
+ frames['1m'] = self._calculate_indicators_vectorized(df_1m.copy(), timeframe='1m')
205
+ frames['1m']['timestamp'] = frames['1m'].index.floor('1min').astype(np.int64) // 10**6
206
+ fast_1m = {col: frames['1m'][col].values for col in frames['1m'].columns}
207
+
208
+ # 2. Calc HTF
209
+ numpy_htf = {}
210
+ for tf_str, tf_code in [('5m', '5T'), ('15m', '15T'), ('1h', '1h'), ('4h', '4h'), ('1d', '1D')]:
211
+ resampled = df_1m.resample(tf_code).agg(agg_dict).dropna()
212
+ resampled = self._calculate_indicators_vectorized(resampled, timeframe=tf_str)
213
+ resampled['timestamp'] = resampled.index.astype(np.int64) // 10**6
214
+ frames[tf_str] = resampled
215
+ numpy_htf[tf_str] = {col: resampled[col].values for col in resampled.columns}
216
+
217
+ # 3. Global Index Maps
218
+ map_1m_to_1h = np.searchsorted(numpy_htf['1h']['timestamp'], fast_1m['timestamp'])
219
+ map_1m_to_5m = np.searchsorted(numpy_htf['5m']['timestamp'], fast_1m['timestamp'])
220
+ map_1m_to_15m = np.searchsorted(numpy_htf['15m']['timestamp'], fast_1m['timestamp'])
221
+
222
+ # Clip
223
+ max_idx_1h = len(numpy_htf['1h']['timestamp']) - 1
224
+ max_idx_5m = len(numpy_htf['5m']['timestamp']) - 1
225
+ max_idx_15m = len(numpy_htf['15m']['timestamp']) - 1
226
+
227
+ map_1m_to_1h = np.clip(map_1m_to_1h, 0, max_idx_1h)
228
+ map_1m_to_5m = np.clip(map_1m_to_5m, 0, max_idx_5m)
229
+ map_1m_to_15m = np.clip(map_1m_to_15m, 0, max_idx_15m)
230
+
231
+ # 4. Load Models
232
+ hydra_models = getattr(self.proc.guardian_hydra, 'models', {}) if self.proc.guardian_hydra else {}
233
+ hydra_cols = getattr(self.proc.guardian_hydra, 'feature_cols', []) if self.proc.guardian_hydra else []
234
+ legacy_v2 = getattr(self.proc.guardian_legacy, 'model_v2', None)
235
+
236
+ # 5. 🔥 PRE-CALCULATE LEGACY V2 (GLOBAL) 🔥
237
+ global_v2_probs = np.zeros(len(fast_1m['close']))
238
+
239
+ if legacy_v2:
240
+ print(f" 🚀 Pre-calculating Legacy V2 for entire history...", flush=True)
241
+ try:
242
+ # 1m Feats
243
+ l_log = fast_1m['log_ret']
244
+ l_rsi = fast_1m['rsi'] / 100.0
245
+ l_fib = fast_1m['fib_pos']
246
+ l_vol = fast_1m['volatility']
247
+
248
+ # HTF Feats Mapped to 1m
249
+ l5_log = numpy_htf['5m']['log_ret'][map_1m_to_5m]
250
+ l5_rsi = numpy_htf['5m']['rsi'][map_1m_to_5m] / 100.0
251
+ l5_fib = numpy_htf['5m']['fib_pos'][map_1m_to_5m]
252
+ l5_trd = numpy_htf['5m']['trend_slope'][map_1m_to_5m]
253
+
254
+ l15_log = numpy_htf['15m']['log_ret'][map_1m_to_15m]
255
+ l15_rsi = numpy_htf['15m']['rsi'][map_1m_to_15m] / 100.0
256
+ l15_fib618 = numpy_htf['15m']['dist_fib618'][map_1m_to_15m]
257
+ l15_trd = numpy_htf['15m']['trend_slope'][map_1m_to_15m]
258
+
259
+ # Lags
260
+ lag_cols = []
261
+ for lag in [1, 2, 3, 5, 10, 20]:
262
+ lag_cols.append(fast_1m[f'log_ret_lag_{lag}'])
263
+ lag_cols.append(fast_1m[f'rsi_lag_{lag}'])
264
+ lag_cols.append(fast_1m[f'fib_pos_lag_{lag}'])
265
+ lag_cols.append(fast_1m[f'volatility_lag_{lag}'])
266
+
267
+ # Huge Matrix
268
+ X_GLOBAL_V2 = np.column_stack([
269
+ l_log, l_rsi, l_fib, l_vol,
270
+ l5_log, l5_rsi, l5_fib, l5_trd,
271
+ l15_log, l15_rsi, l15_fib618, l15_trd,
272
+ *lag_cols
273
+ ])
274
+
275
+ # Predict All in One Go
276
+ dm_glob = xgb.DMatrix(X_GLOBAL_V2)
277
+ preds_glob = legacy_v2.predict(dm_glob)
278
+ global_v2_probs = preds_glob[:, 2] if len(preds_glob.shape) > 1 else preds_glob
279
+
280
+ except Exception as e: print(f"V2 Error: {e}")
281
+
282
+ # 6. 🔥 PRE-ASSEMBLE HYDRA STATIC (GLOBAL) 🔥
283
+ global_hydra_static = None
284
+ if hydra_models:
285
+ print(f" 🚀 Pre-assembling Hydra features...", flush=True)
286
+ try:
287
+ # Map columns that don't depend on PnL
288
+ h_rsi_1m = fast_1m['rsi']
289
+ h_rsi_5m = numpy_htf['5m']['rsi'][map_1m_to_5m]
290
+ h_rsi_15m = numpy_htf['15m']['rsi'][map_1m_to_15m]
291
+ h_bb = fast_1m['bb_width']
292
+ h_vol = fast_1m['rel_vol']
293
+ h_atr = fast_1m['atr']
294
+ h_close = fast_1m['close']
295
+
296
+ global_hydra_static = np.column_stack([h_rsi_1m, h_rsi_5m, h_rsi_15m, h_bb, h_vol, h_atr, h_close])
297
+ except: pass
298
+
299
+ # 7. Candidate Filtering
300
+ df_1h = frames['1h'].reindex(frames['5m'].index, method='ffill')
301
+ df_5m = frames['5m'].copy()
302
+ is_valid = (df_1h['rsi'] <= 70)
303
+ valid_indices = df_5m[is_valid].index
304
+ start_dt = df_1m.index[0] + pd.Timedelta(minutes=500)
305
+ final_valid_indices = [t for t in valid_indices if t >= start_dt]
306
+
307
+ total_signals = len(final_valid_indices)
308
+ print(f" 🎯 Candidates: {total_signals}. Running Models...", flush=True)
309
+
310
+ oracle_dir_model = getattr(self.proc.oracle, 'model_direction', None)
311
+ oracle_cols = getattr(self.proc.oracle, 'feature_cols', [])
312
+ sniper_models = getattr(self.proc.sniper, 'models', [])
313
+ sniper_cols = getattr(self.proc.sniper, 'feature_names', [])
314
+
315
+ ai_results = []
316
+
317
+ # Pre-allocate Hydra time vector (0 to 240)
318
+ time_vec = np.arange(1, 241)
319
+
320
+ # --- MAIN LOOP (Optimized Lookups) ---
321
+ for i, current_time in enumerate(final_valid_indices):
322
+ ts_val = int(current_time.timestamp() * 1000)
323
+ idx_1m = np.searchsorted(fast_1m['timestamp'], ts_val)
324
 
325
+ if idx_1m < 500 or idx_1m >= len(fast_1m['close']) - 245: continue
 
 
 
 
 
 
 
 
 
326
 
327
+ idx_1h = map_1m_to_1h[idx_1m]
328
+ idx_15m = map_1m_to_15m[idx_1m]
329
+ idx_4h = np.searchsorted(numpy_htf['4h']['timestamp'], ts_val)
330
+ if idx_4h >= len(numpy_htf['4h']['close']): idx_4h = len(numpy_htf['4h']['close']) - 1
331
+
332
+ # === Oracle (Single Call) ===
333
+ oracle_conf = 0.5
334
+ if oracle_dir_model:
335
+ o_vec = []
336
+ for col in oracle_cols:
337
+ val = 0.0
338
+ if col.startswith('1h_'): val = numpy_htf['1h'].get(col[3:], [0])[idx_1h]
339
+ elif col.startswith('15m_'): val = numpy_htf['15m'].get(col[4:], [0])[idx_15m]
340
+ elif col.startswith('4h_'): val = numpy_htf['4h'].get(col[3:], [0])[idx_4h]
341
+ elif col == 'sim_titan_score': val = 0.6
342
+ elif col == 'sim_mc_score': val = 0.5
343
+ elif col == 'sim_pattern_score': val = 0.5
344
+ o_vec.append(val)
345
+ try:
346
+ o_pred = oracle_dir_model.predict(np.array(o_vec).reshape(1, -1))[0]
347
+ oracle_conf = float(o_pred[0]) if isinstance(o_pred, (list, np.ndarray)) else float(o_pred)
348
+ if oracle_conf < 0.5: oracle_conf = 1 - oracle_conf
349
+ except: pass
350
 
351
+ # === Sniper (Single Call) ===
352
+ sniper_score = 0.5
353
+ if sniper_models:
354
+ s_vec = []
355
+ for col in sniper_cols:
356
+ if col in fast_1m: s_vec.append(fast_1m[col][idx_1m])
357
+ elif col == 'L_score': s_vec.append(fast_1m.get('vol_zscore_50', [0])[idx_1m])
358
+ else: s_vec.append(0.0)
359
+ try:
360
+ s_preds = [m.predict(np.array(s_vec).reshape(1, -1))[0] for m in sniper_models]
361
+ sniper_score = np.mean(s_preds)
362
+ except: pass
363
+
364
+ # === RISK SIMULATION (ULTRA FAST) ===
365
+ start_idx = idx_1m + 1
366
+ end_idx = start_idx + 240
367
 
368
+ # 1. LEGACY V2 (Instant Lookup)
369
+ max_legacy_v2 = 0.0; legacy_panic_time = 0
370
+ if legacy_v2:
371
+ # Just slice the pre-calculated array!
372
+ probs_slice = global_v2_probs[start_idx:end_idx]
373
+ max_legacy_v2 = np.max(probs_slice)
374
+ panic_indices = np.where(probs_slice > 0.8)[0]
375
+ if len(panic_indices) > 0:
376
+ legacy_panic_time = int(fast_1m['timestamp'][start_idx + panic_indices[0]])
377
+
378
+ # 2. HYDRA (Semi-Vectorized)
379
+ max_hydra_crash = 0.0; hydra_crash_time = 0
380
+ if hydra_models and global_hydra_static is not None:
381
+ # Slice Static Feats
382
+ sl_static = global_hydra_static[start_idx:end_idx]
383
 
384
+ entry_price = fast_1m['close'][idx_1m]
385
+ sl_close = sl_static[:, 6]
386
+ sl_atr = sl_static[:, 5]
387
+
388
+ # Calc Dynamic Feats
389
+ sl_dist = 1.5 * sl_atr
390
+ sl_dist = np.where(sl_dist > 0, sl_dist, entry_price * 0.015)
391
+
392
+ sl_pnl = sl_close - entry_price
393
+ sl_norm_pnl = sl_pnl / sl_dist
394
+
395
+ sl_cum_max = np.maximum.accumulate(sl_close)
396
+ sl_cum_max = np.maximum(sl_cum_max, entry_price)
397
+ sl_max_pnl_r = (sl_cum_max - entry_price) / sl_dist
398
+
399
+ sl_atr_pct = sl_atr / sl_close
400
+
401
+ zeros = np.zeros(240)
402
+ oracle_arr = np.full(240, oracle_conf)
403
+ l2_arr = np.full(240, 0.7)
404
+ target_arr = np.full(240, 3.0)
405
+
406
+ X_hydra = np.column_stack([
407
+ sl_static[:, 0], sl_static[:, 1], sl_static[:, 2], # RSIs
408
+ sl_static[:, 3], sl_static[:, 4], # BB, Vol
409
+ zeros, # dist_ema
410
+ sl_atr_pct, sl_norm_pnl, sl_max_pnl_r,
411
+ zeros, zeros, # dists
412
+ time_vec, # time
413
+ zeros, oracle_arr, l2_arr, target_arr
414
+ ])
415
+
416
+ try:
417
+ probs_crash = hydra_models['crash'].predict_proba(X_hydra)[:, 1]
418
+ max_hydra_crash = np.max(probs_crash)
419
+ crash_indices = np.where(probs_crash > 0.6)[0]
420
+ if len(crash_indices) > 0:
421
+ hydra_crash_time = int(fast_1m['timestamp'][start_idx + crash_indices[0]])
422
+ except: pass
423
+
424
+ ai_results.append({
425
+ 'timestamp': ts_val, 'symbol': sym, 'close': entry_price,
426
+ 'real_titan': 0.6, # Placeholder for real Titan score if available
427
+ 'oracle_conf': oracle_conf,
428
+ 'sniper_score': sniper_score,
429
+ 'risk_hydra_crash': max_hydra_crash,
430
+ 'time_hydra_crash': hydra_crash_time,
431
+ 'risk_legacy_v2': max_legacy_v2,
432
+ 'time_legacy_panic': legacy_panic_time,
433
+ 'signal_type': 'BREAKOUT',
434
+ 'l1_score': 50.0
435
+ })
436
 
437
+ dt = time.time() - t0
438
+ if ai_results:
439
+ pd.DataFrame(ai_results).to_pickle(scores_file)
440
+ print(f" ✅ [{sym}] Completed {len(ai_results)} signals in {dt:.2f} seconds.", flush=True)
441
+ else:
442
+ print(f" ⚠️ [{sym}] No valid signals. Time: {dt:.2f}s", flush=True)
443
+
444
+ del frames, fast_1m, numpy_htf, global_v2_probs, global_hydra_static
445
+ gc.collect()
446
+
447
+ # ==============================================================
448
+ # PHASE 1 & 2 (Enhanced with Consensus Analytics)
449
+ # ==============================================================
450
+ async def generate_truth_data(self):
451
+ if self.force_start_date and self.force_end_date:
452
+ dt_start = datetime.strptime(self.force_start_date, "%Y-%m-%d").replace(tzinfo=timezone.utc)
453
+ dt_end = datetime.strptime(self.force_end_date, "%Y-%m-%d").replace(tzinfo=timezone.utc)
454
+ start_time_ms = int(dt_start.timestamp() * 1000)
455
+ end_time_ms = int(dt_end.timestamp() * 1000)
456
+ print(f"\n🚜 [Phase 1] Processing Era: {self.force_start_date} -> {self.force_end_date}")
457
+ else: return
458
 
459
+ for sym in self.TARGET_COINS:
460
+ try:
461
+ candles = await self._fetch_all_data_fast(sym, start_time_ms, end_time_ms)
462
+ if candles: await self._process_data_in_memory(sym, candles, start_time_ms, end_time_ms)
463
+ except Exception as e: print(f" ❌ SKIP {sym}: {e}", flush=True)
464
+ gc.collect()
465
+
466
+ @staticmethod
467
+ def _worker_optimize(combinations_batch, scores_files, initial_capital, fees_pct, max_slots):
468
+ results = []
469
+ all_data = []
470
+ for fp in scores_files:
471
+ try:
472
+ df = pd.read_pickle(fp)
473
+ if not df.empty: all_data.append(df)
474
+ except: pass
475
+ if not all_data: return []
476
+ global_df = pd.concat(all_data)
477
+ global_df.sort_values('timestamp', inplace=True)
478
+ grouped_by_time = global_df.groupby('timestamp')
479
 
480
+ for config in combinations_batch:
481
+ wallet = { "balance": initial_capital, "allocated": 0.0, "positions": {}, "trades_history": [] }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
482
 
483
+ # Param Extraction
484
+ oracle_thresh = config.get('oracle_thresh', 0.6)
485
+ sniper_thresh = config.get('sniper_thresh', 0.4)
486
+ hydra_thresh = config['hydra_thresh']
487
+ # Titan & Pattern weights are in config but not used for hard filtering here,
488
+ # they are optimized for the DNA output.
489
 
490
+ peak_balance = initial_capital; max_drawdown = 0.0
491
+
492
+ for ts, group in grouped_by_time:
493
+ active = list(wallet["positions"].keys())
494
+ current_prices = {row['symbol']: row['close'] for _, row in group.iterrows()}
495
+ for sym in active:
496
+ if sym in current_prices:
497
+ curr = current_prices[sym]
498
+ pos = wallet["positions"][sym]
499
+ h_risk = pos.get('risk_hydra_crash', 0)
500
+ h_time = pos.get('time_hydra_crash', 0)
501
+ is_crash = (h_risk > hydra_thresh) and (h_time > 0) and (ts >= h_time)
502
+ pnl = (curr - pos['entry']) / pos['entry']
503
+ if is_crash or pnl > 0.04 or pnl < -0.02:
504
+ wallet['balance'] += pos['size'] * (1 + pnl - (fees_pct*2))
505
+ wallet['allocated'] -= pos['size']
506
+ # Add consensus data to history
507
+ wallet['trades_history'].append({
508
+ 'pnl': pnl,
509
+ 'consensus_score': pos['consensus_score']
510
+ })
511
+ del wallet['positions'][sym]
512
+
513
+ total_eq = wallet['balance'] + wallet['allocated']
514
+ if total_eq > peak_balance: peak_balance = total_eq
515
+ dd = (peak_balance - total_eq) / peak_balance
516
+ if dd > max_drawdown: max_drawdown = dd
517
+
518
+ if len(wallet['positions']) < max_slots:
519
+ for _, row in group.iterrows():
520
+ if row['symbol'] in wallet['positions']: continue
521
+
522
+ # Hard Filters
523
+ if row['oracle_conf'] < oracle_thresh: continue
524
+ if row['sniper_score'] < sniper_thresh: continue
525
+
526
+ # Consensus Calculation (Normalized)
527
+ # Titan (default 0.6) + Oracle + Sniper
528
+ cons_score = (row['real_titan'] + row['oracle_conf'] + row['sniper_score']) / 3.0
529
+
530
+ size = 10.0
531
+ if wallet['balance'] >= size:
532
+ wallet['positions'][row['symbol']] = {
533
+ 'entry': row['close'], 'size': size,
534
+ 'risk_hydra_crash': row['risk_hydra_crash'],
535
+ 'time_hydra_crash': row['time_hydra_crash'],
536
+ 'consensus_score': cons_score
537
+ }
538
+ wallet['balance'] -= size
539
+ wallet['allocated'] += size
540
 
541
+ final_bal = wallet['balance'] + wallet['allocated']
542
+ net_profit = final_bal - initial_capital
543
+ trades = wallet['trades_history']
544
+ total_t = len(trades)
545
+ win_count = len([t for t in trades if t['pnl'] > 0])
546
+ loss_count = len([t for t in trades if t['pnl'] <= 0])
547
+ win_rate = (win_count / total_t * 100) if total_t > 0 else 0
548
+ max_win = max([t['pnl'] for t in trades]) if trades else 0
549
+ max_loss = min([t['pnl'] for t in trades]) if trades else 0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
550
 
551
+ # 1. Fix: Calculate Streaks
552
+ max_win_streak = 0; max_loss_streak = 0; curr_w = 0; curr_l = 0
553
+ for t in trades:
554
+ if t['pnl'] > 0:
555
+ curr_w += 1; curr_l = 0
556
+ if curr_w > max_win_streak: max_win_streak = curr_w
557
+ else:
558
+ curr_l += 1; curr_w = 0
559
+ if curr_l > max_loss_streak: max_loss_streak = curr_l
560
+
561
+ # 2. Fix: Consensus Analytics
562
+ high_cons_trades = [t for t in trades if t['consensus_score'] > 0.65]
563
+ low_cons_trades = [t for t in trades if t['consensus_score'] <= 0.65]
564
 
565
+ hc_count = len(high_cons_trades)
566
+ hc_wins = len([t for t in high_cons_trades if t['pnl'] > 0])
567
+ hc_win_rate = (hc_wins/hc_count*100) if hc_count > 0 else 0
568
+ hc_avg_pnl = (sum([t['pnl'] for t in high_cons_trades]) / hc_count * 100) if hc_count > 0 else 0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
569
 
570
+ lc_count = len(low_cons_trades)
571
+ lc_wins = len([t for t in low_cons_trades if t['pnl'] > 0])
572
+ lc_win_rate = (lc_wins/lc_count*100) if lc_count > 0 else 0
573
 
574
+ agreement_rate = (hc_count / total_t * 100) if total_t > 0 else 0.0
575
+
576
+ results.append({
577
+ 'config': config, 'final_balance': final_bal, 'net_profit': net_profit,
578
+ 'total_trades': total_t, 'win_count': win_count, 'loss_count': loss_count,
579
+ 'win_rate': win_rate, 'max_single_win': max_win, 'max_single_loss': max_loss,
580
+ 'max_drawdown': max_drawdown * 100,
581
+ # New Fields
582
+ 'max_win_streak': max_win_streak,
583
+ 'max_loss_streak': max_loss_streak,
584
+ 'consensus_agreement_rate': agreement_rate,
585
+ 'high_consensus_win_rate': hc_win_rate,
586
+ 'low_consensus_win_rate': lc_win_rate,
587
+ 'high_consensus_avg_pnl': hc_avg_pnl
588
+ })
 
589
 
590
+ return results
591
+
592
+ async def run_optimization(self, target_regime="RANGE"):
593
+ await self.generate_truth_data()
 
 
 
 
594
 
595
+ # 🔥 Dynamic Ranges based on GRID_DENSITY
596
+ density = self.GRID_DENSITY
597
+
598
+ oracle_range = np.linspace(0.5, 0.8, density).tolist()
599
+ sniper_range = np.linspace(0.4, 0.7, density).tolist()
600
+ hydra_range = np.linspace(0.75, 0.95, density).tolist()
601
+
602
+ # New Params (Titan & Pattern)
603
+ titan_range = np.linspace(0.4, 0.7, density).tolist()
604
+ pattern_range = np.linspace(0.2, 0.5, density).tolist()
605
+
606
+ combinations = []
607
+ # Full Stack Loop
608
+ for o, s, h, wt, wp in itertools.product(oracle_range, sniper_range, hydra_range, titan_range, pattern_range):
609
+ combinations.append({
610
+ 'w_titan': wt,
611
+ 'w_struct': wp,
612
+ 'thresh': 0.5,
613
+ 'oracle_thresh': o,
614
+ 'sniper_thresh': s,
615
+ 'hydra_thresh': h,
616
+ 'legacy_thresh': 0.95
617
+ })
618
 
619
+ current_period_files = [os.path.join(CACHE_DIR, f) for f in os.listdir(CACHE_DIR) if f.endswith('_scores.pkl')]
620
+ if not current_period_files: return None, None
 
 
 
 
 
 
 
621
 
622
+ print(f"\n🧩 [Phase 2] Optimizing {len(combinations)} Configs (Full Stack | Density {density}) for {target_regime}...")
623
+ best_res = self._worker_optimize(combinations, current_period_files, self.INITIAL_CAPITAL, self.TRADING_FEES, self.MAX_SLOTS)
624
+ if not best_res: return None, None
625
+ best = sorted(best_res, key=lambda x: x['final_balance'], reverse=True)[0]
 
626
 
627
+ print("\n" + "="*60)
628
+ print(f"🏆 CHAMPION REPORT [{target_regime}]:")
629
+ print(f" 📅 Period: {self.force_start_date} -> {self.force_end_date}")
630
+ print(f" 💰 Final Balance: ${best['final_balance']:,.2f}")
631
+ print(f" 🚀 Net PnL: ${best['net_profit']:,.2f}")
632
+ print("-" * 60)
633
+ print(f" 📊 Total Trades: {best['total_trades']}")
634
+ print(f" ✅ Winning Trades: {best['win_count']}")
635
+ print(f" ❌ Losing Trades: {best['loss_count']}")
636
+ print(f" 📈 Win Rate: {best['win_rate']:.1f}%")
637
+ print("-" * 60)
638
+ print(f" 🧠 CONSENSUS ANALYTICS:")
639
+ print(f" 🤝 Model Agreement Rate: {best['consensus_agreement_rate']:.1f}% (of all trades)")
640
+ print(f" 🌟 High-Consensus Win Rate: {best['high_consensus_win_rate']:.1f}%")
641
+ print(f" 💎 High-Consensus Avg PnL: {best['high_consensus_avg_pnl']:.2f}%")
642
+ print("-" * 60)
643
+ print(f" 🟢 Max Single Win: ${best['max_single_win']:.2f}")
644
+ print(f" 🔴 Max Single Loss: ${best['max_single_loss']:.2f}")
645
+ print(f" 🔥 Max Win Streak: {best['max_win_streak']} trades")
646
+ print(f" 🧊 Max Loss Streak: {best['max_loss_streak']} trades")
647
+ print(f" 📉 Max Drawdown: {best['max_drawdown']:.1f}%")
648
+ print("-" * 60)
649
+ print(f" ⚙️ Oracle={best['config']['oracle_thresh']:.2f} | Sniper={best['config']['sniper_thresh']:.2f} | Hydra={best['config']['hydra_thresh']:.2f}")
650
+ print(f" ⚖️ Weights: Titan={best['config']['w_titan']:.2f} | Patterns={best['config']['w_struct']:.2f}")
651
+ print("="*60)
652
+ return best['config'], best
653
+
654
+ async def run_strategic_optimization_task():
655
+ print("\n🧪 [STRATEGIC BACKTEST] Full Stack Mode...")
656
+ r2 = R2Service()
657
+ dm = DataManager(None, None, r2)
658
+ proc = MLProcessor(dm)
659
+ await dm.initialize(); await proc.initialize()
660
+ if proc.guardian_hydra: proc.guardian_hydra.set_silent_mode(True)
661
 
662
+ try:
663
+ hub = AdaptiveHub(r2); await hub.initialize()
664
+ optimizer = HeavyDutyBacktester(dm, proc)
665
 
666
+ # ADJUST DENSITY HERE IF NEEDED
667
+ # optimizer.GRID_DENSITY = 3 (Default)
668
+
669
+ scenarios = [
670
+ {"regime": "BULL", "start": "2024-01-01", "end": "2024-03-30"},
671
+ {"regime": "BEAR", "start": "2023-08-01", "end": "2023-09-15"},
672
+ {"regime": "DEAD", "start": "2023-06-01", "end": "2023-08-01"},
673
+ {"regime": "RANGE", "start": "2024-07-01", "end": "2024-09-30"}
674
+ ]
675
+
676
+ for scen in scenarios:
677
+ target = scen["regime"]
678
+ optimizer.set_date_range(scen["start"], scen["end"])
679
+ best_cfg, best_stats = await optimizer.run_optimization(target_regime=target)
680
+ if best_cfg:
681
+ hub.submit_challenger(target, best_cfg, best_stats)
682
+
683
+ await hub._save_state_to_r2()
684
+ print("✅ [System] ALL Strategic DNA Updated & Saved.")
685
+
686
+ finally:
687
+ await dm.close()
688
+
689
+ if __name__ == "__main__":
690
+ asyncio.run(run_strategic_optimization_task())