Riy777 commited on
Commit
15e1ac6
·
verified ·
1 Parent(s): 3be23d3

Update backtest_engine.py

Browse files
Files changed (1) hide show
  1. backtest_engine.py +185 -126
backtest_engine.py CHANGED
@@ -1,5 +1,5 @@
1
  # ============================================================
2
- # 🧪 backtest_engine.py (V96.0 - GEM-Architect: Vectorized Logic Mirror)
3
  # ============================================================
4
 
5
  import asyncio
@@ -31,12 +31,13 @@ class HeavyDutyBacktester:
31
  def __init__(self, data_manager, processor):
32
  self.dm = data_manager
33
  self.proc = processor
34
- self.GRID_DENSITY = 10
 
35
  self.INITIAL_CAPITAL = 10.0
36
  self.TRADING_FEES = 0.001
37
  self.MAX_SLOTS = 4
38
 
39
- # القائمة الكاملة (50 عملة)
40
  self.TARGET_COINS = [
41
  'SOL/USDT', 'XRP/USDT', 'DOGE/USDT', 'ADA/USDT', 'AVAX/USDT', 'LINK/USDT',
42
  'TON/USDT', 'INJ/USDT', 'APT/USDT', 'OP/USDT', 'ARB/USDT', 'SUI/USDT',
@@ -53,7 +54,7 @@ class HeavyDutyBacktester:
53
  self.force_end_date = None
54
 
55
  if not os.path.exists(CACHE_DIR): os.makedirs(CACHE_DIR)
56
- print(f"🧪 [Backtest V96.0] Vectorized Logic Mirror (Exact L1 Simulation).")
57
 
58
  def set_date_range(self, start_str, end_str):
59
  self.force_start_date = start_str
@@ -108,40 +109,29 @@ class HeavyDutyBacktester:
108
  return unique_candles
109
 
110
  # ==============================================================
111
- # 🏎️ VECTORIZED INDICATOR CALCULATION
112
  # ==============================================================
113
  def _calculate_indicators_vectorized(self, df):
114
- """
115
- حساب المؤشرات الفنية لكامل البيانات دفعة واحدة باستخدام Pandas Vectorization.
116
- هذا يطابق منطق DataManager._calc_indicators بالضبط ولكن أسرع بـ 1000 مرة.
117
- """
118
- # RSI
119
  delta = df['close'].diff()
120
  gain = (delta.where(delta > 0, 0)).rolling(window=14).mean()
121
  loss = (-delta.where(delta < 0, 0)).rolling(window=14).mean()
122
  rs = gain / loss
123
  df['rsi'] = 100 - (100 / (1 + rs))
124
-
125
- # EMA
126
  df['ema20'] = df['close'].ewm(span=20, adjust=False).mean()
127
  df['ema50'] = df['close'].ewm(span=50, adjust=False).mean()
128
 
129
- # ATR (Simplified Vectorized)
130
  high_low = df['high'] - df['low']
131
  high_close = (df['high'] - df['close'].shift()).abs()
132
  low_close = (df['low'] - df['close'].shift()).abs()
133
  ranges = pd.concat([high_low, high_close, low_close], axis=1)
134
  true_range = ranges.max(axis=1)
135
  df['atr'] = true_range.rolling(14).mean()
136
-
137
- # Volume MA
138
  df['vol_ma20'] = df['volume'].rolling(window=20).mean()
139
-
140
  df.fillna(0, inplace=True)
141
  return df
142
 
143
  # ==============================================================
144
- # 🧠 CPU PROCESSING (VECTORIZED LOGIC MIRROR)
145
  # ==============================================================
146
  async def _process_data_in_memory(self, sym, candles, start_ms, end_ms):
147
  safe_sym = sym.replace('/', '_')
@@ -152,10 +142,9 @@ class HeavyDutyBacktester:
152
  print(f" 📂 [{sym}] Data Exists -> Skipping.")
153
  return
154
 
155
- print(f" ⚙️ [CPU] Analyzing {sym}...", flush=True)
156
  t0 = time.time()
157
 
158
- # 1. Prepare Pandas
159
  df_1m = pd.DataFrame(candles, columns=['timestamp', 'open', 'high', 'low', 'close', 'volume'])
160
  cols = ['open', 'high', 'low', 'close', 'volume']
161
  df_1m[cols] = df_1m[cols].astype('float32')
@@ -163,116 +152,65 @@ class HeavyDutyBacktester:
163
  df_1m.set_index('datetime', inplace=True)
164
  df_1m = df_1m.sort_index()
165
 
166
- # 2. Resample & Calculate Indicators (ONCE)
167
  frames = {}
168
  numpy_frames = {}
169
  time_indices = {}
170
  agg_dict = {'open': 'first', 'high': 'max', 'low': 'min', 'close': 'last', 'volume': 'sum'}
171
 
172
- # 1m Setup
173
  frames['1m'] = df_1m.copy()
174
  frames['1m']['timestamp'] = frames['1m'].index.floor('1min').astype(np.int64) // 10**6
175
  col_order = ['timestamp', 'open', 'high', 'low', 'close', 'volume']
176
  numpy_frames['1m'] = frames['1m'][col_order].values
177
  time_indices['1m'] = frames['1m'].index
178
 
179
- # Resample Others
180
  for tf_str, tf_code in [('5m', '5T'), ('15m', '15T'), ('1h', '1h'), ('4h', '4h'), ('1d', '1D')]:
181
  resampled = df_1m.resample(tf_code).agg(agg_dict).dropna()
182
-
183
- # 🔥🔥 Calculate Indicators HERE (Vectorized) 🔥🔥
184
  if tf_str in ['15m', '1h']:
185
  resampled = self._calculate_indicators_vectorized(resampled)
186
-
187
  resampled['timestamp'] = resampled.index.astype(np.int64) // 10**6
188
  frames[tf_str] = resampled
189
- numpy_frames[tf_str] = resampled[col_order].values # Raw data for passing
190
  time_indices[tf_str] = resampled.index
191
 
192
  ai_results = []
193
  valid_idx_5m = time_indices['5m']
194
 
195
- # 3. 🔥 VECTORIZED LOGIC MATCHING (The Speed Force) 🔥
196
- # بدلاً من حلقة تكرار عمياء، نجد "أماكن الاهتمام" فوراً باستخدام المنطق البولياني
197
- # هذا يطابق شروط DataManager._apply_logic_tree حرفياً
198
-
199
- # نحتاج لمطابقة وقت الـ 5m مع الـ 1h و 15m
200
- # سنقوم بعمل reindex للـ 1h و 15m ليتطابق مع الـ 5m (Forward Fill)
201
- # هذا يسمح لنا بمقارنة الأعمدة مباشرة
202
-
203
  df_5m_aligned = frames['5m'].copy()
204
-
205
- # دمج بيانات الـ 1h مع الـ 5m (Matching times)
206
  df_1h_aligned = frames['1h'].reindex(frames['5m'].index, method='ffill')
207
  df_15m_aligned = frames['15m'].reindex(frames['5m'].index, method='ffill')
208
 
209
- # --- تطبيق شروط L1 (Breakout & Reversal) ---
210
-
211
- # الشروط المشتركة (Common Filters from V15.2)
212
- # 1. 4H Change calculation (approx from 1H data)
213
- # shift(4) في فريم الساعة يقابل shift(48) في فريم 5 دقائق (تقريباً)
214
- # للأمان نستخدم بيانات الساعة المحاذية
215
  change_4h = ((df_1h_aligned['close'] - df_1h_aligned['close'].shift(4)) / df_1h_aligned['close'].shift(4)) * 100
216
-
217
- # فلتر: ممنوع أكثر من 8% صعود في 4 ساعات
218
  cond_not_pump = change_4h <= 8.0
219
- # فلتر: RSI 1H ممنوع فوق 70
220
  cond_rsi_1h_safe = df_1h_aligned['rsi'] <= 70
221
- # فلتر: الامتداد (Deviation)
222
  deviation = (df_1h_aligned['close'] - df_1h_aligned['ema20']) / df_1h_aligned['atr']
223
  cond_deviation_safe = deviation <= 1.8
224
-
225
  filters_pass = cond_not_pump & cond_rsi_1h_safe & cond_deviation_safe
226
 
227
- # --- Breakout Logic ---
228
- # 1. Bullish Structure (1H)
229
  bullish_1h = (df_1h_aligned['ema20'] > df_1h_aligned['ema50']) | (df_1h_aligned['close'] > df_1h_aligned['ema20'])
230
- # 2. RSI 1H Room (45-68)
231
  rsi_1h_ok = (df_1h_aligned['rsi'] >= 45) & (df_1h_aligned['rsi'] <= 68)
232
- # 3. 15M Close > EMA20
233
  close_above_ema_15m = df_15m_aligned['close'] >= df_15m_aligned['ema20']
234
- # 4. Volume 15M Spike
235
  vol_spike_15m = df_15m_aligned['volume'] >= (1.5 * df_15m_aligned['vol_ma20'])
236
-
237
  is_breakout = filters_pass & bullish_1h & rsi_1h_ok & close_above_ema_15m & vol_spike_15m
238
 
239
- # --- Reversal Logic ---
240
- # 1. RSI 1H Oversold (20-40)
241
  rsi_oversold = (df_1h_aligned['rsi'] >= 20) & (df_1h_aligned['rsi'] <= 40)
242
- # 2. Drop in price (change_4h <= -2)
243
  price_drop = change_4h <= -2.0
244
- # 3. Hammer/Rejection on 15M (Vectorized Approximation)
245
- # Hammer: Lower wick > 1.5 * Body
246
  body = (df_15m_aligned['close'] - df_15m_aligned['open']).abs()
247
  lower_wick = df_15m_aligned[['open', 'close']].min(axis=1) - df_15m_aligned['low']
248
  is_hammer = lower_wick > (body * 1.5)
249
  is_green = df_15m_aligned['close'] > df_15m_aligned['open']
250
-
251
  is_reversal = filters_pass & rsi_oversold & price_drop & (is_hammer | is_green)
252
 
253
- # --- Combined Mask ---
254
- # هذه هي اللحظات التي تستحق التحليل فقط!
255
  valid_mask = is_breakout | is_reversal
256
  valid_indices = df_5m_aligned[valid_mask].index
257
 
258
- # --------------------------------------------------------
259
-
260
- # 4. Loop ONLY on Valid Indices (The massive speedup)
261
- # بدلاً من 129,000 لفة، سنجد ربما 2,000 - 5,000 لفة فقط.
262
-
263
  start_dt = df_1m.index[0] + pd.Timedelta(minutes=500)
264
  final_valid_indices = [t for t in valid_indices if t >= start_dt]
265
 
266
  total_hits = len(final_valid_indices)
267
- print(f" 🎯 Found {total_hits} potential setups. Running Titan...", flush=True)
268
 
269
  for i, current_time in enumerate(final_valid_indices):
270
- # قص البيانات (Slicing) لتمريرها للنماذج
271
- # نستخدم searchsorted للسرعة القصوى
272
-
273
- # نحتاج تحويل timestamp الـ index إلى مكان في الـ numpy arrays
274
- # ملاحظة: time_indices['1m'] مرتب، لذا searchsorted يعمل
275
-
276
  idx_1m = time_indices['1m'].searchsorted(current_time, side='right') - 1
277
  idx_5m = time_indices['5m'].searchsorted(current_time, side='right') - 1
278
  idx_15m = time_indices['15m'].searchsorted(current_time, side='right') - 1
@@ -282,13 +220,6 @@ class HeavyDutyBacktester:
282
 
283
  if idx_1m < 500 or idx_4h < 100: continue
284
 
285
- # استخراج نوع الإشارة (لأننا دمجناهم في valid_mask)
286
- # نعيد التحقق السريع لنعرف النوع
287
- # ملاحظة: الوصول هنا سريع جداً لأننا نعرف التوقيت
288
- # أو يمكننا الاعتماد على أن DataManager سيعيد النوع الصحيح
289
-
290
- # نجهز الـ Packet ونرسلها لـ DataManager للتأكيد النهائي واستخراج السكور
291
- # هذا يضمن التطابق 100%
292
  ohlcv_1h = numpy_frames['1h'][idx_1h-60+1 : idx_1h+1].tolist()
293
  ohlcv_15m = numpy_frames['15m'][idx_15m-60+1 : idx_15m+1].tolist()
294
 
@@ -319,16 +250,73 @@ class HeavyDutyBacktester:
319
  if proc_res: real_titan = proc_res.get('titan_score', 0.5)
320
  except: pass
321
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
322
  ts_aligned = int(current_time.timestamp() // 60) * 60 * 1000
323
  ai_results.append({
324
  'timestamp': ts_aligned, 'symbol': sym, 'close': current_price,
325
- 'real_titan': real_titan, 'signal_type': signal_type, 'l1_score': l1_score
 
 
 
 
326
  })
327
 
328
  dt = time.time() - t0
329
  if ai_results:
330
  pd.DataFrame(ai_results).to_pickle(scores_file)
331
- print(f" 💾 [{sym}] Saved {len(ai_results)} verified signals. (Compute: {dt:.1f}s)", flush=True)
332
  else:
333
  print(f" ⚠️ [{sym}] No signals.", flush=True)
334
 
@@ -344,7 +332,7 @@ class HeavyDutyBacktester:
344
  dt_end = datetime.strptime(self.force_end_date, "%Y-%m-%d").replace(tzinfo=timezone.utc)
345
  start_time_ms = int(dt_start.timestamp() * 1000)
346
  end_time_ms = int(dt_end.timestamp() * 1000)
347
- print(f"\n🚜 [Phase 1] Era: {self.force_start_date} -> {self.force_end_date}")
348
  else:
349
  return
350
 
@@ -361,7 +349,7 @@ class HeavyDutyBacktester:
361
  gc.collect()
362
 
363
  # ==============================================================
364
- # PHASE 2: Portfolio Digital Twin Engine
365
  # ==============================================================
366
  @staticmethod
367
  def _worker_optimize(combinations_batch, scores_files, initial_capital, fees_pct, max_slots):
@@ -382,24 +370,44 @@ class HeavyDutyBacktester:
382
 
383
  for config in combinations_batch:
384
  wallet = { "balance": initial_capital, "allocated": 0.0, "positions": {}, "trades_history": [] }
 
 
385
  w_titan = config['w_titan']; w_struct = config['w_struct']; entry_thresh = config['thresh']
 
 
 
386
  peak_balance = initial_capital; max_drawdown = 0.0
387
 
388
  for ts, group in grouped_by_time:
389
  active_symbols = list(wallet["positions"].keys())
390
  current_prices = {row['symbol']: row['close'] for _, row in group.iterrows()}
391
 
392
- # Exit Logic
393
  for sym in active_symbols:
394
  if sym in current_prices:
395
  curr_p = current_prices[sym]
396
  pos = wallet["positions"][sym]
397
  entry_p = pos['entry_price']
 
 
 
 
 
 
 
 
 
 
 
 
 
398
  pct_change = (curr_p - entry_p) / entry_p
399
- if pct_change >= 0.03 or pct_change <= -0.02:
 
400
  gross_pnl = pos['size_usd'] * pct_change
401
  fees = pos['size_usd'] * fees_pct * 2
402
  net_pnl = gross_pnl - fees
 
403
  wallet["allocated"] -= pos['size_usd']
404
  wallet["balance"] += (pos['size_usd'] + net_pnl)
405
  del wallet["positions"][sym]
@@ -410,19 +418,20 @@ class HeavyDutyBacktester:
410
  dd = (peak_balance - current_total_equity) / peak_balance
411
  if dd > max_drawdown: max_drawdown = dd
412
 
413
- # Entry Logic
414
  effective_max_slots = max_slots
415
- if current_total_equity < MIN_CAPITAL_FOR_SPLIT:
416
- effective_max_slots = min(max_slots, 2)
417
 
418
  if len(wallet["positions"]) < effective_max_slots:
419
  free_capital = wallet["balance"]
420
  slots_left = effective_max_slots - len(wallet["positions"])
 
421
  if slots_left > 0 and free_capital >= MIN_TRADE_SIZE:
422
  candidates = []
423
  for _, row in group.iterrows():
424
  sym = row['symbol']
425
  if sym in wallet["positions"]: continue
 
426
  sig_type = row['signal_type']
427
  l1_raw_score = row['l1_score']
428
  real_titan = row['real_titan']
@@ -432,8 +441,17 @@ class HeavyDutyBacktester:
432
  score = 0.0
433
  if (w_titan + w_struct) > 0:
434
  score = ((real_titan * w_titan) + (norm_struct * w_struct)) / (w_titan + w_struct)
 
435
  if score >= entry_thresh:
436
- candidates.append({ 'symbol': sym, 'score': score, 'price': row['close'] })
 
 
 
 
 
 
 
 
437
  candidates.sort(key=lambda x: x['score'], reverse=True)
438
  for cand in candidates[:slots_left]:
439
  if current_total_equity >= MIN_CAPITAL_FOR_SPLIT:
@@ -441,44 +459,57 @@ class HeavyDutyBacktester:
441
  trade_size = min(target_size, wallet["balance"])
442
  else:
443
  trade_size = wallet["balance"] * 0.98
 
444
  if trade_size < MIN_TRADE_SIZE: continue
445
- wallet["positions"][cand['symbol']] = {'entry_price': cand['price'], 'size_usd': trade_size}
 
 
 
 
 
 
 
 
 
 
 
 
 
446
  wallet["allocated"] += trade_size
447
  wallet["balance"] -= trade_size
448
  if wallet["balance"] < MIN_TRADE_SIZE: break
449
 
450
- trades = wallet["trades_history"]
451
- if trades:
452
- final_equity = wallet["balance"] + wallet["allocated"]
453
- net_profit = final_equity - initial_capital
454
- pnls = [t['pnl'] for t in trades]
455
- win_count = len([p for p in pnls if p > 0]); loss_count = len([p for p in pnls if p <= 0])
456
- win_rate = (win_count / len(trades)) * 100
457
- max_single_win = max(pnls) if pnls else 0.0; max_single_loss = min(pnls) if pnls else 0.0
458
- current_win_streak = 0; max_win_streak = 0
459
- current_loss_streak = 0; max_loss_streak = 0
 
 
 
 
 
 
460
  for p in pnls:
461
  if p > 0:
462
- current_win_streak += 1; current_loss_streak = 0
463
- if current_win_streak > max_win_streak: max_win_streak = current_win_streak
464
  else:
465
- current_loss_streak += 1; current_win_streak = 0
466
- if current_loss_streak > max_loss_streak: max_loss_streak = current_loss_streak
467
- results.append({
468
- 'config': config, 'final_balance': final_equity,
469
- 'net_profit': net_profit, 'total_trades': len(trades),
470
- 'win_count': win_count, 'loss_count': loss_count, 'win_rate': win_rate,
471
- 'max_single_win': max_single_win, 'max_single_loss': max_single_loss,
472
- 'max_win_streak': max_win_streak, 'max_loss_streak': max_loss_streak,
473
- 'max_drawdown': max_drawdown * 100
474
- })
475
- else:
476
- results.append({
477
- 'config': config, 'final_balance': initial_capital, 'net_profit': 0.0,
478
- 'total_trades': 0, 'win_count': 0, 'loss_count': 0, 'win_rate': 0.0,
479
- 'max_single_win': 0.0, 'max_single_loss': 0.0, 'max_win_streak': 0,
480
- 'max_loss_streak': 0, 'max_drawdown': 0.0
481
- })
482
  return results
483
 
484
  async def run_optimization(self, target_regime="RANGE"):
@@ -498,37 +529,65 @@ class HeavyDutyBacktester:
498
  return None, None
499
 
500
  print(f"\n🧩 [Phase 2] Optimizing for {target_regime}...")
 
501
  w_titan_range = np.linspace(0.4, 0.9, num=self.GRID_DENSITY)
502
  w_struct_range = np.linspace(0.1, 0.6, num=self.GRID_DENSITY)
503
  thresh_range = np.linspace(0.20, 0.60, num=self.GRID_DENSITY)
504
 
 
 
 
 
 
505
  combinations = []
506
- for wt, ws, th in itertools.product(w_titan_range, w_struct_range, thresh_range):
507
  if 0.9 <= (wt + ws) <= 1.1:
508
- combinations.append({'w_titan': round(wt, 2), 'w_struct': round(ws, 2), 'thresh': round(th, 2)})
 
 
 
 
 
 
509
 
510
  final_results = []
511
- batch_size = 100
 
 
512
 
513
  for i in range(0, len(combinations), batch_size):
514
  batch = combinations[i:i+batch_size]
515
  res = self._worker_optimize(batch, current_period_files, self.INITIAL_CAPITAL, self.TRADING_FEES, self.MAX_SLOTS)
516
  final_results.extend(res)
517
- if i % 1000 == 0: print(f" ...Analyzed {i}/{len(combinations)} configs", flush=True)
518
 
519
  if not final_results: return None, None
520
  best = sorted(final_results, key=lambda x: x['final_balance'], reverse=True)[0]
521
 
522
  print("\n" + "="*60)
523
  print(f"🏆 CHAMPION REPORT [{target_regime}]:")
 
524
  print(f" 💰 Final Balance: ${best['final_balance']:,.2f}")
 
 
 
 
 
525
  print(f" 📈 Win Rate: {best['win_rate']:.1f}%")
526
- print(f" ⚙️ Config: Titan={best['config']['w_titan']} | Struct={best['config']['w_struct']} | Thresh={best['config']['thresh']}")
 
 
 
 
 
 
 
 
527
  print("="*60)
528
  return best['config'], best
529
 
530
  async def run_strategic_optimization_task():
531
- print("\n🧪 [STRATEGIC BACKTEST] Vectorized Logic Mirror Initiated...")
532
  r2 = R2Service()
533
  dm = DataManager(None, None, r2)
534
  proc = MLProcessor(dm)
 
1
  # ============================================================
2
+ # 🧪 backtest_engine.py (V101.0 - GEM-Architect: Smart Adaptive Grid)
3
  # ============================================================
4
 
5
  import asyncio
 
31
  def __init__(self, data_manager, processor):
32
  self.dm = data_manager
33
  self.proc = processor
34
+ # كثافة الشبكة للدخول (Titan/Structure)
35
+ self.GRID_DENSITY = 6
36
  self.INITIAL_CAPITAL = 10.0
37
  self.TRADING_FEES = 0.001
38
  self.MAX_SLOTS = 4
39
 
40
+ # القائمة الكاملة
41
  self.TARGET_COINS = [
42
  'SOL/USDT', 'XRP/USDT', 'DOGE/USDT', 'ADA/USDT', 'AVAX/USDT', 'LINK/USDT',
43
  'TON/USDT', 'INJ/USDT', 'APT/USDT', 'OP/USDT', 'ARB/USDT', 'SUI/USDT',
 
54
  self.force_end_date = None
55
 
56
  if not os.path.exists(CACHE_DIR): os.makedirs(CACHE_DIR)
57
+ print(f"🧪 [Backtest V101.0] Smart Adaptive Grid (Full Dynamic Optimization).")
58
 
59
  def set_date_range(self, start_str, end_str):
60
  self.force_start_date = start_str
 
109
  return unique_candles
110
 
111
  # ==============================================================
112
+ # 🏎️ VECTORIZED INDICATORS
113
  # ==============================================================
114
  def _calculate_indicators_vectorized(self, df):
 
 
 
 
 
115
  delta = df['close'].diff()
116
  gain = (delta.where(delta > 0, 0)).rolling(window=14).mean()
117
  loss = (-delta.where(delta < 0, 0)).rolling(window=14).mean()
118
  rs = gain / loss
119
  df['rsi'] = 100 - (100 / (1 + rs))
 
 
120
  df['ema20'] = df['close'].ewm(span=20, adjust=False).mean()
121
  df['ema50'] = df['close'].ewm(span=50, adjust=False).mean()
122
 
 
123
  high_low = df['high'] - df['low']
124
  high_close = (df['high'] - df['close'].shift()).abs()
125
  low_close = (df['low'] - df['close'].shift()).abs()
126
  ranges = pd.concat([high_low, high_close, low_close], axis=1)
127
  true_range = ranges.max(axis=1)
128
  df['atr'] = true_range.rolling(14).mean()
 
 
129
  df['vol_ma20'] = df['volume'].rolling(window=20).mean()
 
130
  df.fillna(0, inplace=True)
131
  return df
132
 
133
  # ==============================================================
134
+ # 🧠 CPU PROCESSING (WITH TWIN-GUARDIAN PROFILING)
135
  # ==============================================================
136
  async def _process_data_in_memory(self, sym, candles, start_ms, end_ms):
137
  safe_sym = sym.replace('/', '_')
 
142
  print(f" 📂 [{sym}] Data Exists -> Skipping.")
143
  return
144
 
145
+ print(f" ⚙️ [CPU] Analyzing {sym} (Profiling Hydra & Legacy)...", flush=True)
146
  t0 = time.time()
147
 
 
148
  df_1m = pd.DataFrame(candles, columns=['timestamp', 'open', 'high', 'low', 'close', 'volume'])
149
  cols = ['open', 'high', 'low', 'close', 'volume']
150
  df_1m[cols] = df_1m[cols].astype('float32')
 
152
  df_1m.set_index('datetime', inplace=True)
153
  df_1m = df_1m.sort_index()
154
 
 
155
  frames = {}
156
  numpy_frames = {}
157
  time_indices = {}
158
  agg_dict = {'open': 'first', 'high': 'max', 'low': 'min', 'close': 'last', 'volume': 'sum'}
159
 
 
160
  frames['1m'] = df_1m.copy()
161
  frames['1m']['timestamp'] = frames['1m'].index.floor('1min').astype(np.int64) // 10**6
162
  col_order = ['timestamp', 'open', 'high', 'low', 'close', 'volume']
163
  numpy_frames['1m'] = frames['1m'][col_order].values
164
  time_indices['1m'] = frames['1m'].index
165
 
 
166
  for tf_str, tf_code in [('5m', '5T'), ('15m', '15T'), ('1h', '1h'), ('4h', '4h'), ('1d', '1D')]:
167
  resampled = df_1m.resample(tf_code).agg(agg_dict).dropna()
 
 
168
  if tf_str in ['15m', '1h']:
169
  resampled = self._calculate_indicators_vectorized(resampled)
 
170
  resampled['timestamp'] = resampled.index.astype(np.int64) // 10**6
171
  frames[tf_str] = resampled
172
+ numpy_frames[tf_str] = resampled[col_order].values
173
  time_indices[tf_str] = resampled.index
174
 
175
  ai_results = []
176
  valid_idx_5m = time_indices['5m']
177
 
178
+ # --- L1 Logic Vectorized ---
 
 
 
 
 
 
 
179
  df_5m_aligned = frames['5m'].copy()
 
 
180
  df_1h_aligned = frames['1h'].reindex(frames['5m'].index, method='ffill')
181
  df_15m_aligned = frames['15m'].reindex(frames['5m'].index, method='ffill')
182
 
 
 
 
 
 
 
183
  change_4h = ((df_1h_aligned['close'] - df_1h_aligned['close'].shift(4)) / df_1h_aligned['close'].shift(4)) * 100
 
 
184
  cond_not_pump = change_4h <= 8.0
 
185
  cond_rsi_1h_safe = df_1h_aligned['rsi'] <= 70
 
186
  deviation = (df_1h_aligned['close'] - df_1h_aligned['ema20']) / df_1h_aligned['atr']
187
  cond_deviation_safe = deviation <= 1.8
 
188
  filters_pass = cond_not_pump & cond_rsi_1h_safe & cond_deviation_safe
189
 
 
 
190
  bullish_1h = (df_1h_aligned['ema20'] > df_1h_aligned['ema50']) | (df_1h_aligned['close'] > df_1h_aligned['ema20'])
 
191
  rsi_1h_ok = (df_1h_aligned['rsi'] >= 45) & (df_1h_aligned['rsi'] <= 68)
 
192
  close_above_ema_15m = df_15m_aligned['close'] >= df_15m_aligned['ema20']
 
193
  vol_spike_15m = df_15m_aligned['volume'] >= (1.5 * df_15m_aligned['vol_ma20'])
 
194
  is_breakout = filters_pass & bullish_1h & rsi_1h_ok & close_above_ema_15m & vol_spike_15m
195
 
 
 
196
  rsi_oversold = (df_1h_aligned['rsi'] >= 20) & (df_1h_aligned['rsi'] <= 40)
 
197
  price_drop = change_4h <= -2.0
 
 
198
  body = (df_15m_aligned['close'] - df_15m_aligned['open']).abs()
199
  lower_wick = df_15m_aligned[['open', 'close']].min(axis=1) - df_15m_aligned['low']
200
  is_hammer = lower_wick > (body * 1.5)
201
  is_green = df_15m_aligned['close'] > df_15m_aligned['open']
 
202
  is_reversal = filters_pass & rsi_oversold & price_drop & (is_hammer | is_green)
203
 
 
 
204
  valid_mask = is_breakout | is_reversal
205
  valid_indices = df_5m_aligned[valid_mask].index
206
 
 
 
 
 
 
207
  start_dt = df_1m.index[0] + pd.Timedelta(minutes=500)
208
  final_valid_indices = [t for t in valid_indices if t >= start_dt]
209
 
210
  total_hits = len(final_valid_indices)
211
+ print(f" 🎯 Found {total_hits} signals. Profiling Guardians...", flush=True)
212
 
213
  for i, current_time in enumerate(final_valid_indices):
 
 
 
 
 
 
214
  idx_1m = time_indices['1m'].searchsorted(current_time, side='right') - 1
215
  idx_5m = time_indices['5m'].searchsorted(current_time, side='right') - 1
216
  idx_15m = time_indices['15m'].searchsorted(current_time, side='right') - 1
 
220
 
221
  if idx_1m < 500 or idx_4h < 100: continue
222
 
 
 
 
 
 
 
 
223
  ohlcv_1h = numpy_frames['1h'][idx_1h-60+1 : idx_1h+1].tolist()
224
  ohlcv_15m = numpy_frames['15m'][idx_15m-60+1 : idx_15m+1].tolist()
225
 
 
250
  if proc_res: real_titan = proc_res.get('titan_score', 0.5)
251
  except: pass
252
 
253
+ # 🔥 RISK PROFILING (Hydra + Legacy)
254
+ max_hydra_crash = 0.0
255
+ max_hydra_giveback = 0.0
256
+ max_legacy_v2 = 0.0
257
+ max_legacy_v3 = 0.0
258
+
259
+ hydra_crash_time = 0
260
+ legacy_panic_time = 0
261
+
262
+ trade_ctx = {
263
+ 'entry_price': current_price, 'entry_time': str(current_time),
264
+ 'volume_30m_usd': 1000000
265
+ }
266
+
267
+ future_limit = 240
268
+ end_idx_1m = min(idx_1m + future_limit, len(numpy_frames['1m']) - 1)
269
+ check_step = 10
270
+ current_idx_1m = idx_1m
271
+
272
+ while current_idx_1m < end_idx_1m:
273
+ current_idx_1m += check_step
274
+
275
+ future_1m_data = numpy_frames['1m'][current_idx_1m-1000+1 : current_idx_1m+1].tolist()
276
+ future_5m_data = numpy_frames['5m'][idx_5m-300+1 : idx_5m+1].tolist()
277
+ current_ts = int(numpy_frames['1m'][current_idx_1m][0])
278
+
279
+ # 🐉 A. Check Hydra
280
+ if self.proc.guardian_hydra:
281
+ hydra_res = self.proc.guardian_hydra.analyze_position(sym, future_1m_data, future_5m_data, ohlcv_15m, trade_ctx)
282
+ probs = hydra_res.get('probs', {})
283
+ p_crash = probs.get('crash', 0.0)
284
+
285
+ if p_crash > max_hydra_crash:
286
+ max_hydra_crash = p_crash
287
+ if p_crash > 0.6 and hydra_crash_time == 0: hydra_crash_time = current_ts
288
+
289
+ if probs.get('giveback', 0) > max_hydra_giveback: max_hydra_giveback = probs.get('giveback', 0)
290
+
291
+ # 🕸️ B. Check Legacy
292
+ if self.proc.guardian_legacy:
293
+ legacy_res = self.proc.guardian_legacy.analyze_position(
294
+ future_1m_data, future_5m_data, ohlcv_15m, current_price, volume_30m_usd=1000000
295
+ )
296
+ scores = legacy_res.get('scores', {})
297
+ s_v2 = scores.get('v2', 0.0)
298
+ s_v3 = scores.get('v3', 0.0)
299
+
300
+ if s_v2 > max_legacy_v2:
301
+ max_legacy_v2 = s_v2
302
+ if s_v2 > 0.8 and legacy_panic_time == 0: legacy_panic_time = current_ts
303
+
304
+ if s_v3 > max_legacy_v3: max_legacy_v3 = s_v3
305
+
306
  ts_aligned = int(current_time.timestamp() // 60) * 60 * 1000
307
  ai_results.append({
308
  'timestamp': ts_aligned, 'symbol': sym, 'close': current_price,
309
+ 'real_titan': real_titan, 'signal_type': signal_type, 'l1_score': l1_score,
310
+ 'risk_hydra_crash': max_hydra_crash,
311
+ 'time_hydra_crash': hydra_crash_time,
312
+ 'risk_legacy_v2': max_legacy_v2,
313
+ 'time_legacy_panic': legacy_panic_time
314
  })
315
 
316
  dt = time.time() - t0
317
  if ai_results:
318
  pd.DataFrame(ai_results).to_pickle(scores_file)
319
+ print(f" 💾 [{sym}] Saved {len(ai_results)} profiled signals. ({dt:.1f}s)", flush=True)
320
  else:
321
  print(f" ⚠️ [{sym}] No signals.", flush=True)
322
 
 
332
  dt_end = datetime.strptime(self.force_end_date, "%Y-%m-%d").replace(tzinfo=timezone.utc)
333
  start_time_ms = int(dt_start.timestamp() * 1000)
334
  end_time_ms = int(dt_end.timestamp() * 1000)
335
+ print(f"\n🚜 [Phase 1] Processing Era: {self.force_start_date} -> {self.force_end_date}")
336
  else:
337
  return
338
 
 
349
  gc.collect()
350
 
351
  # ==============================================================
352
+ # PHASE 2: Portfolio Digital Twin (✅ SMART DYNAMIC GRID)
353
  # ==============================================================
354
  @staticmethod
355
  def _worker_optimize(combinations_batch, scores_files, initial_capital, fees_pct, max_slots):
 
370
 
371
  for config in combinations_batch:
372
  wallet = { "balance": initial_capital, "allocated": 0.0, "positions": {}, "trades_history": [] }
373
+
374
+ # Configs
375
  w_titan = config['w_titan']; w_struct = config['w_struct']; entry_thresh = config['thresh']
376
+ hydra_thresh = config['hydra_thresh']
377
+ legacy_thresh = config['legacy_thresh']
378
+
379
  peak_balance = initial_capital; max_drawdown = 0.0
380
 
381
  for ts, group in grouped_by_time:
382
  active_symbols = list(wallet["positions"].keys())
383
  current_prices = {row['symbol']: row['close'] for _, row in group.iterrows()}
384
 
385
+ # --- 1. Exit Logic ---
386
  for sym in active_symbols:
387
  if sym in current_prices:
388
  curr_p = current_prices[sym]
389
  pos = wallet["positions"][sym]
390
  entry_p = pos['entry_price']
391
+
392
+ hydra_score = pos.get('risk_hydra_crash', 0.0)
393
+ hydra_time = pos.get('time_hydra_crash', 0)
394
+ exit_hydra = (hydra_score > hydra_thresh) and (hydra_time > 0) and (ts >= hydra_time)
395
+
396
+ legacy_score = pos.get('risk_legacy_v2', 0.0)
397
+ legacy_time = pos.get('time_legacy_panic', 0)
398
+ exit_legacy = (legacy_score > legacy_thresh) and (legacy_time > 0) and (ts >= legacy_time)
399
+
400
+ tp_target = pos.get('tp_target', 0.03)
401
+ sl_target = pos.get('sl_target', -0.02)
402
+ if curr_p > entry_p * 1.025: sl_target = 0.001
403
+
404
  pct_change = (curr_p - entry_p) / entry_p
405
+
406
+ if exit_hydra or exit_legacy or pct_change >= tp_target or pct_change <= sl_target:
407
  gross_pnl = pos['size_usd'] * pct_change
408
  fees = pos['size_usd'] * fees_pct * 2
409
  net_pnl = gross_pnl - fees
410
+
411
  wallet["allocated"] -= pos['size_usd']
412
  wallet["balance"] += (pos['size_usd'] + net_pnl)
413
  del wallet["positions"][sym]
 
418
  dd = (peak_balance - current_total_equity) / peak_balance
419
  if dd > max_drawdown: max_drawdown = dd
420
 
421
+ # --- 2. Entry Logic ---
422
  effective_max_slots = max_slots
423
+ if current_total_equity < MIN_CAPITAL_FOR_SPLIT: effective_max_slots = min(max_slots, 2)
 
424
 
425
  if len(wallet["positions"]) < effective_max_slots:
426
  free_capital = wallet["balance"]
427
  slots_left = effective_max_slots - len(wallet["positions"])
428
+
429
  if slots_left > 0 and free_capital >= MIN_TRADE_SIZE:
430
  candidates = []
431
  for _, row in group.iterrows():
432
  sym = row['symbol']
433
  if sym in wallet["positions"]: continue
434
+
435
  sig_type = row['signal_type']
436
  l1_raw_score = row['l1_score']
437
  real_titan = row['real_titan']
 
441
  score = 0.0
442
  if (w_titan + w_struct) > 0:
443
  score = ((real_titan * w_titan) + (norm_struct * w_struct)) / (w_titan + w_struct)
444
+
445
  if score >= entry_thresh:
446
+ candidates.append({
447
+ 'symbol': sym, 'score': score, 'price': row['close'],
448
+ 'titan': real_titan,
449
+ 'risk_hydra_crash': row.get('risk_hydra_crash', 0),
450
+ 'time_hydra_crash': row.get('time_hydra_crash', 0),
451
+ 'risk_legacy_v2': row.get('risk_legacy_v2', 0),
452
+ 'time_legacy_panic': row.get('time_legacy_panic', 0)
453
+ })
454
+
455
  candidates.sort(key=lambda x: x['score'], reverse=True)
456
  for cand in candidates[:slots_left]:
457
  if current_total_equity >= MIN_CAPITAL_FOR_SPLIT:
 
459
  trade_size = min(target_size, wallet["balance"])
460
  else:
461
  trade_size = wallet["balance"] * 0.98
462
+
463
  if trade_size < MIN_TRADE_SIZE: continue
464
+
465
+ tp_base = 0.03; sl_base = -0.02
466
+ if cand['titan'] > 0.8: tp_base = 0.06; sl_base = -0.025
467
+ elif cand['titan'] < 0.6: tp_base = 0.02; sl_base = -0.015
468
+
469
+ wallet["positions"][cand['symbol']] = {
470
+ 'entry_price': cand['price'],
471
+ 'size_usd': trade_size,
472
+ 'tp_target': tp_base, 'sl_target': sl_base,
473
+ 'risk_hydra_crash': cand['risk_hydra_crash'],
474
+ 'time_hydra_crash': cand['time_hydra_crash'],
475
+ 'risk_legacy_v2': cand['risk_legacy_v2'],
476
+ 'time_legacy_panic': cand['time_legacy_panic']
477
+ }
478
  wallet["allocated"] += trade_size
479
  wallet["balance"] -= trade_size
480
  if wallet["balance"] < MIN_TRADE_SIZE: break
481
 
482
+ final_equity = wallet["balance"] + wallet["allocated"]
483
+ net_profit = final_equity - initial_capital
484
+
485
+ total_trades = len(wallet["trades_history"])
486
+ win_count = 0; loss_count = 0; win_rate = 0.0
487
+ max_single_win = 0.0; max_single_loss = 0.0
488
+ max_win_streak = 0; max_loss_streak = 0
489
+
490
+ if total_trades > 0:
491
+ pnls = [t['pnl'] for t in wallet["trades_history"]]
492
+ win_count = len([p for p in pnls if p > 0])
493
+ loss_count = len([p for p in pnls if p <= 0])
494
+ win_rate = (win_count / total_trades) * 100
495
+ max_single_win = max(pnls) if pnls else 0.0
496
+ max_single_loss = min(pnls) if pnls else 0.0
497
+ curr_win = 0; curr_loss = 0
498
  for p in pnls:
499
  if p > 0:
500
+ curr_win += 1; curr_loss = 0
501
+ if curr_win > max_win_streak: max_win_streak = curr_win
502
  else:
503
+ curr_loss += 1; curr_win = 0
504
+ if curr_loss > max_loss_streak: max_loss_streak = curr_loss
505
+
506
+ results.append({
507
+ 'config': config, 'final_balance': final_equity, 'net_profit': net_profit,
508
+ 'total_trades': total_trades, 'win_count': win_count, 'loss_count': loss_count,
509
+ 'win_rate': win_rate, 'max_single_win': max_single_win, 'max_single_loss': max_single_loss,
510
+ 'max_win_streak': max_win_streak, 'max_loss_streak': max_loss_streak, 'max_drawdown': max_drawdown * 100
511
+ })
512
+
 
 
 
 
 
 
 
513
  return results
514
 
515
  async def run_optimization(self, target_regime="RANGE"):
 
529
  return None, None
530
 
531
  print(f"\n🧩 [Phase 2] Optimizing for {target_regime}...")
532
+
533
  w_titan_range = np.linspace(0.4, 0.9, num=self.GRID_DENSITY)
534
  w_struct_range = np.linspace(0.1, 0.6, num=self.GRID_DENSITY)
535
  thresh_range = np.linspace(0.20, 0.60, num=self.GRID_DENSITY)
536
 
537
+ # ✅ Smart Dynamic Grid for Guardians (Controlled Density)
538
+ GUARD_DENSITY = 4
539
+ hydra_range = np.linspace(0.70, 0.95, num=GUARD_DENSITY)
540
+ legacy_range = np.linspace(0.85, 0.98, num=GUARD_DENSITY)
541
+
542
  combinations = []
543
+ for wt, ws, th, hydra, legacy in itertools.product(w_titan_range, w_struct_range, thresh_range, hydra_range, legacy_range):
544
  if 0.9 <= (wt + ws) <= 1.1:
545
+ combinations.append({
546
+ 'w_titan': round(wt, 2),
547
+ 'w_struct': round(ws, 2),
548
+ 'thresh': round(th, 2),
549
+ 'hydra_thresh': round(hydra, 2),
550
+ 'legacy_thresh': round(legacy, 2)
551
+ })
552
 
553
  final_results = []
554
+ batch_size = 200
555
+
556
+ print(f" 🔥 Testing {len(combinations)} Strategy Combinations...")
557
 
558
  for i in range(0, len(combinations), batch_size):
559
  batch = combinations[i:i+batch_size]
560
  res = self._worker_optimize(batch, current_period_files, self.INITIAL_CAPITAL, self.TRADING_FEES, self.MAX_SLOTS)
561
  final_results.extend(res)
562
+ if i % 2000 == 0: print(f" ...Analyzed {i}/{len(combinations)} configs", flush=True)
563
 
564
  if not final_results: return None, None
565
  best = sorted(final_results, key=lambda x: x['final_balance'], reverse=True)[0]
566
 
567
  print("\n" + "="*60)
568
  print(f"🏆 CHAMPION REPORT [{target_regime}]:")
569
+ print(f" 📅 Period: {self.force_start_date} -> {self.force_end_date}")
570
  print(f" 💰 Final Balance: ${best['final_balance']:,.2f}")
571
+ print(f" 🚀 Net PnL: ${best['net_profit']:,.2f}")
572
+ print("-" * 60)
573
+ print(f" 📊 Total Trades: {best['total_trades']}")
574
+ print(f" ✅ Winning Trades: {best['win_count']}")
575
+ print(f" ❌ Losing Trades: {best['loss_count']}")
576
  print(f" 📈 Win Rate: {best['win_rate']:.1f}%")
577
+ print("-" * 60)
578
+ print(f" 🟢 Max Single Win: ${best['max_single_win']:.2f}")
579
+ print(f" 🔴 Max Single Loss: ${best['max_single_loss']:.2f}")
580
+ print(f" 🔥 Max Win Streak: {best['max_win_streak']} trades")
581
+ print(f" 🧊 Max Loss Streak: {best['max_loss_streak']} trades")
582
+ print(f" 📉 Max Drawdown: {best['max_drawdown']:.1f}%")
583
+ print("-" * 60)
584
+ print(f" ⚙️ Entry: Titan={best['config']['w_titan']} | Struct={best['config']['w_struct']} | Thresh={best['config']['thresh']}")
585
+ print(f" 🛡️ Guard: Hydra={best['config']['hydra_thresh']} | Legacy={best['config']['legacy_thresh']}")
586
  print("="*60)
587
  return best['config'], best
588
 
589
  async def run_strategic_optimization_task():
590
+ print("\n🧪 [STRATEGIC BACKTEST] Smart Adaptive Grid Initiated...")
591
  r2 = R2Service()
592
  dm = DataManager(None, None, r2)
593
  proc = MLProcessor(dm)