Riy777 commited on
Commit
445dda3
·
verified ·
1 Parent(s): 61417d7

Update backtest_engine.py

Browse files
Files changed (1) hide show
  1. backtest_engine.py +64 -59
backtest_engine.py CHANGED
@@ -1,5 +1,5 @@
1
  # ============================================================
2
- # 🧪 backtest_engine.py (V110.1 - GEM-Architect: Fix idx_5m NameError)
3
  # ============================================================
4
 
5
  import asyncio
@@ -55,7 +55,7 @@ class HeavyDutyBacktester:
55
  self.force_end_date = None
56
 
57
  if not os.path.exists(CACHE_DIR): os.makedirs(CACHE_DIR)
58
- print(f"🧪 [Backtest V110.1] Full Stack Simulation (Fixed idx_5m).")
59
 
60
  def set_date_range(self, start_str, end_str):
61
  self.force_start_date = start_str
@@ -104,20 +104,23 @@ class HeavyDutyBacktester:
104
  return unique_candles
105
 
106
  # ==============================================================
107
- # 🏎️ VECTORIZED INDICATORS
108
  # ==============================================================
109
  def _calculate_indicators_vectorized(self, df, timeframe='1m'):
 
110
  df['close'] = df['close'].astype(float)
111
  df['high'] = df['high'].astype(float)
112
  df['low'] = df['low'].astype(float)
113
  df['volume'] = df['volume'].astype(float)
114
  df['open'] = df['open'].astype(float)
115
 
 
116
  df['rsi'] = ta.rsi(df['close'], length=14)
117
  df['ema20'] = ta.ema(df['close'], length=20)
118
  df['ema50'] = ta.ema(df['close'], length=50)
119
  df['atr'] = ta.atr(df['high'], df['low'], df['close'], length=14)
120
 
 
121
  if timeframe == '1m':
122
  sma20 = df['close'].rolling(20).mean()
123
  std20 = df['close'].rolling(20).std()
@@ -125,12 +128,14 @@ class HeavyDutyBacktester:
125
  df['vol_ma50'] = df['volume'].rolling(50).mean()
126
  df['rel_vol'] = df['volume'] / (df['vol_ma50'] + 1e-9)
127
 
 
128
  df['slope'] = ta.slope(df['close'], length=7)
129
  vol_mean = df['volume'].rolling(20).mean()
130
  vol_std = df['volume'].rolling(20).std()
131
  df['vol_z'] = (df['volume'] - vol_mean) / (vol_std + 1e-9)
132
  df['atr_pct'] = df['atr'] / df['close']
133
 
 
134
  if timeframe == '1m':
135
  df['ret'] = df['close'].pct_change()
136
  df['dollar_vol'] = df['close'] * df['volume']
@@ -163,19 +168,16 @@ class HeavyDutyBacktester:
163
  s = df['volume'].rolling(500).std()
164
  df['vol_zscore_50'] = ((df['volume'] - r) / s).fillna(0)
165
 
 
166
  df['log_ret'] = np.log(df['close'] / df['close'].shift(1))
167
-
168
  roll_max = df['high'].rolling(50).max()
169
  roll_min = df['low'].rolling(50).min()
170
  diff = (roll_max - roll_min).replace(0, 1e-9)
171
  df['fib_pos'] = (df['close'] - roll_min) / diff
172
-
173
  df['trend_slope'] = (df['ema20'] - df['ema20'].shift(5)) / df['ema20'].shift(5)
174
  df['volatility'] = df['atr'] / df['close']
175
-
176
  fib618 = roll_max - (diff * 0.382)
177
  df['dist_fib618'] = (df['close'] - fib618) / df['close']
178
-
179
  df['dist_ema50'] = (df['close'] - df['ema50']) / df['close']
180
  df['ema200'] = ta.ema(df['close'], length=200)
181
  df['dist_ema200'] = (df['close'] - df['ema200']) / df['close']
@@ -184,7 +186,7 @@ class HeavyDutyBacktester:
184
  return df
185
 
186
  # ==============================================================
187
- # 🧠 CPU PROCESSING (Full Stack + Logs)
188
  # ==============================================================
189
  async def _process_data_in_memory(self, sym, candles, start_ms, end_ms):
190
  safe_sym = sym.replace('/', '_')
@@ -195,7 +197,7 @@ class HeavyDutyBacktester:
195
  print(f" 📂 [{sym}] Data Exists -> Skipping.")
196
  return
197
 
198
- print(f" ⚙️ [CPU] Analyzing {sym} (Full Stack Injection)...", flush=True)
199
  t0 = time.time()
200
 
201
  df_1m = pd.DataFrame(candles, columns=['timestamp', 'open', 'high', 'low', 'close', 'volume'])
@@ -253,8 +255,6 @@ class HeavyDutyBacktester:
253
 
254
  # --- 4. Main Simulation Loop ---
255
  for i, current_time in enumerate(final_valid_indices):
256
-
257
- # Progress Stamp
258
  if i > 0 and i % 1000 == 0:
259
  percent = (i / total_signals) * 100
260
  print(f" ⏳ [{sym}] Processing... {i}/{total_signals} ({percent:.1f}%)", flush=True)
@@ -263,11 +263,8 @@ class HeavyDutyBacktester:
263
 
264
  # Sync Indices
265
  idx_1m = np.searchsorted(fast_1m['timestamp'], ts_val)
266
-
267
- # ✅ FIX: Define idx_5m explicitly here for Legacy V2/V3 usage
268
- idx_5m = np.searchsorted(numpy_htf['5m']['timestamp'], ts_val)
269
-
270
  idx_1h = np.searchsorted(numpy_htf['1h']['timestamp'], ts_val)
 
271
  idx_15m = np.searchsorted(numpy_htf['15m']['timestamp'], ts_val)
272
  idx_4h = np.searchsorted(numpy_htf['4h']['timestamp'], ts_val)
273
 
@@ -299,7 +296,6 @@ class HeavyDutyBacktester:
299
  elif col == 'sim_mc_score': val = 0.5
300
  elif col == 'sim_pattern_score': val = 0.5
301
  o_vec.append(val)
302
-
303
  try:
304
  o_pred = oracle_dir_model.predict(np.array(o_vec).reshape(1, -1))[0]
305
  oracle_conf = float(o_pred[0]) if isinstance(o_pred, (list, np.ndarray)) else float(o_pred)
@@ -318,13 +314,12 @@ class HeavyDutyBacktester:
318
  s_vec.append(l_val)
319
  else:
320
  s_vec.append(0.0)
321
-
322
  try:
323
  s_preds = [m.predict(np.array(s_vec).reshape(1, -1))[0] for m in sniper_models]
324
  sniper_score = np.mean(s_preds)
325
  except: pass
326
 
327
- # === RISK SIMULATION (Hydra/Legacy - Minute by Minute) ===
328
  entry_price = fast_1m['close'][idx_1m]
329
  highest_price = entry_price
330
 
@@ -338,7 +333,7 @@ class HeavyDutyBacktester:
338
  curr_ts = int(fast_1m['timestamp'][c_idx])
339
  if curr_price > highest_price: highest_price = curr_price
340
 
341
- # A. Hydra Injection
342
  if hydra_models:
343
  atr_val = fast_1m['atr'][c_idx]
344
  sl_dist = 1.5 * atr_val if atr_val > 0 else entry_price * 0.015
@@ -366,7 +361,7 @@ class HeavyDutyBacktester:
366
  if pg > max_hydra_giveback: max_hydra_giveback = pg
367
  except: pass
368
 
369
- # B. Legacy (Full Reconstruction)
370
  if legacy_v2 or legacy_v3:
371
  c_5m_idx = idx_5m + (c_idx - idx_1m) // 5
372
  if c_5m_idx >= len(numpy_htf['5m']['rsi']): c_5m_idx = len(numpy_htf['5m']['rsi']) - 1
@@ -440,24 +435,30 @@ class HeavyDutyBacktester:
440
  del frames, fast_1m, numpy_htf
441
  gc.collect()
442
 
 
 
 
443
  async def generate_truth_data(self):
444
  if self.force_start_date and self.force_end_date:
445
  dt_start = datetime.strptime(self.force_start_date, "%Y-%m-%d").replace(tzinfo=timezone.utc)
446
  dt_end = datetime.strptime(self.force_end_date, "%Y-%m-%d").replace(tzinfo=timezone.utc)
447
  start_time_ms = int(dt_start.timestamp() * 1000)
448
  end_time_ms = int(dt_end.timestamp() * 1000)
449
- print(f"\n🚜 [Phase 1] Processing Era: {self.force_start_date} -> {self.force_end_date}")
450
- else: return
451
-
452
- for sym in self.TARGET_COINS:
453
- try:
454
- candles = await self._fetch_all_data_fast(sym, start_time_ms, end_time_ms)
455
- if candles:
456
- await self._process_data_in_memory(sym, candles, start_time_ms, end_time_ms)
457
- except Exception as e:
458
- print(f" ❌ SKIP {sym}: {e}", flush=True)
459
- gc.collect()
460
 
 
 
 
461
  @staticmethod
462
  def _worker_optimize(combinations_batch, scores_files, initial_capital, fees_pct, max_slots):
463
  results = []
@@ -482,6 +483,7 @@ class HeavyDutyBacktester:
482
  max_drawdown = 0.0
483
 
484
  for ts, group in grouped_by_time:
 
485
  active = list(wallet["positions"].keys())
486
  current_prices = {row['symbol']: row['close'] for _, row in group.iterrows()}
487
 
@@ -496,18 +498,18 @@ class HeavyDutyBacktester:
496
 
497
  pnl = (curr - pos['entry']) / pos['entry']
498
  if is_crash or pnl > 0.04 or pnl < -0.02:
499
- gross = pos['size'] * pnl
500
- net = gross - (pos['size'] * fees_pct * 2)
501
- wallet['balance'] += (pos['size'] + net)
502
  wallet['allocated'] -= pos['size']
503
  del wallet['positions'][sym]
504
- wallet['trades_history'].append({'pnl': net, 'pct': pnl})
505
 
 
506
  total_eq = wallet['balance'] + wallet['allocated']
507
  if total_eq > peak_balance: peak_balance = total_eq
508
  dd = (peak_balance - total_eq) / peak_balance
509
  if dd > max_drawdown: max_drawdown = dd
510
 
 
511
  if len(wallet['positions']) < max_slots:
512
  for _, row in group.iterrows():
513
  if row['symbol'] in wallet['positions']: continue
@@ -524,41 +526,31 @@ class HeavyDutyBacktester:
524
  wallet['balance'] -= size
525
  wallet['allocated'] += size
526
 
 
527
  final_bal = wallet['balance'] + wallet['allocated']
528
  net_profit = final_bal - initial_capital
529
-
530
  trades = wallet['trades_history']
531
  total_t = len(trades)
532
  win_count = len([t for t in trades if t['pnl'] > 0])
533
  loss_count = len([t for t in trades if t['pnl'] <= 0])
534
  win_rate = (win_count / total_t * 100) if total_t > 0 else 0
535
-
536
  max_win = max([t['pnl'] for t in trades]) if trades else 0
537
  max_loss = min([t['pnl'] for t in trades]) if trades else 0
538
 
539
- max_win_streak = 0; max_loss_streak = 0; curr_w = 0; curr_l = 0
540
- for t in trades:
541
- if t['pnl'] > 0:
542
- curr_w += 1; curr_l = 0
543
- if curr_w > max_win_streak: max_win_streak = curr_w
544
- else:
545
- curr_l += 1; curr_w = 0
546
- if curr_l > max_loss_streak: max_loss_streak = curr_l
547
-
548
  results.append({
549
  'config': config, 'final_balance': final_bal, 'net_profit': net_profit,
550
  'total_trades': total_t, 'win_count': win_count, 'loss_count': loss_count,
551
  'win_rate': win_rate, 'max_single_win': max_win, 'max_single_loss': max_loss,
552
- 'max_win_streak': max_win_streak, 'max_loss_streak': max_loss_streak,
553
  'max_drawdown': max_drawdown * 100
554
  })
555
 
556
  return results
557
 
558
  async def run_optimization(self, target_regime="RANGE"):
 
 
559
  await self.generate_truth_data()
560
 
561
- w_titan_range = [0.5]
562
  oracle_range = [0.5, 0.6, 0.7]
563
  sniper_range = [0.4, 0.5, 0.6]
564
  hydra_range = [0.75, 0.85, 0.95]
@@ -572,11 +564,9 @@ class HeavyDutyBacktester:
572
  })
573
 
574
  current_period_files = [os.path.join(CACHE_DIR, f) for f in os.listdir(CACHE_DIR) if f.endswith('_scores.pkl')]
575
- if not current_period_files:
576
- print("❌ No data found.")
577
- return None, None
578
 
579
- print(f"\n🧩 [Phase 2] Optimizing {len(combinations)} Configs (Full Stack)...")
580
  best_res = self._worker_optimize(combinations, current_period_files, self.INITIAL_CAPITAL, self.TRADING_FEES, self.MAX_SLOTS)
581
 
582
  if not best_res: return None, None
@@ -615,14 +605,29 @@ async def run_strategic_optimization_task():
615
  try:
616
  hub = AdaptiveHub(r2); await hub.initialize()
617
  optimizer = HeavyDutyBacktester(dm, proc)
618
- # Set Date Range
619
- optimizer.set_date_range("2024-03-01", "2024-03-05")
620
 
621
- best_cfg, best_stats = await optimizer.run_optimization(target_regime="RANGE")
622
- if best_cfg:
623
- hub.submit_challenger("RANGE", best_cfg, best_stats)
624
- await hub._save_state_to_r2()
625
- print(" [System] DNA Updated.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
626
  finally:
627
  await dm.close()
628
 
 
1
  # ============================================================
2
+ # 🧪 backtest_engine.py (V111.0 - GEM-Architect: Full Regime Loop Restored)
3
  # ============================================================
4
 
5
  import asyncio
 
55
  self.force_end_date = None
56
 
57
  if not os.path.exists(CACHE_DIR): os.makedirs(CACHE_DIR)
58
+ print(f"🧪 [Backtest V111.0] Full Stack + Multi-Regime Strategic Loop.")
59
 
60
  def set_date_range(self, start_str, end_str):
61
  self.force_start_date = start_str
 
104
  return unique_candles
105
 
106
  # ==============================================================
107
+ # 🏎️ VECTORIZED INDICATORS (ALL LAYERS)
108
  # ==============================================================
109
  def _calculate_indicators_vectorized(self, df, timeframe='1m'):
110
+ # 1. Basic Setup
111
  df['close'] = df['close'].astype(float)
112
  df['high'] = df['high'].astype(float)
113
  df['low'] = df['low'].astype(float)
114
  df['volume'] = df['volume'].astype(float)
115
  df['open'] = df['open'].astype(float)
116
 
117
+ # 2. Standard Indicators
118
  df['rsi'] = ta.rsi(df['close'], length=14)
119
  df['ema20'] = ta.ema(df['close'], length=20)
120
  df['ema50'] = ta.ema(df['close'], length=50)
121
  df['atr'] = ta.atr(df['high'], df['low'], df['close'], length=14)
122
 
123
+ # 3. Hydra
124
  if timeframe == '1m':
125
  sma20 = df['close'].rolling(20).mean()
126
  std20 = df['close'].rolling(20).std()
 
128
  df['vol_ma50'] = df['volume'].rolling(50).mean()
129
  df['rel_vol'] = df['volume'] / (df['vol_ma50'] + 1e-9)
130
 
131
+ # 4. Oracle
132
  df['slope'] = ta.slope(df['close'], length=7)
133
  vol_mean = df['volume'].rolling(20).mean()
134
  vol_std = df['volume'].rolling(20).std()
135
  df['vol_z'] = (df['volume'] - vol_mean) / (vol_std + 1e-9)
136
  df['atr_pct'] = df['atr'] / df['close']
137
 
138
+ # 5. Sniper (1m Only)
139
  if timeframe == '1m':
140
  df['ret'] = df['close'].pct_change()
141
  df['dollar_vol'] = df['close'] * df['volume']
 
168
  s = df['volume'].rolling(500).std()
169
  df['vol_zscore_50'] = ((df['volume'] - r) / s).fillna(0)
170
 
171
+ # 6. Legacy
172
  df['log_ret'] = np.log(df['close'] / df['close'].shift(1))
 
173
  roll_max = df['high'].rolling(50).max()
174
  roll_min = df['low'].rolling(50).min()
175
  diff = (roll_max - roll_min).replace(0, 1e-9)
176
  df['fib_pos'] = (df['close'] - roll_min) / diff
 
177
  df['trend_slope'] = (df['ema20'] - df['ema20'].shift(5)) / df['ema20'].shift(5)
178
  df['volatility'] = df['atr'] / df['close']
 
179
  fib618 = roll_max - (diff * 0.382)
180
  df['dist_fib618'] = (df['close'] - fib618) / df['close']
 
181
  df['dist_ema50'] = (df['close'] - df['ema50']) / df['close']
182
  df['ema200'] = ta.ema(df['close'], length=200)
183
  df['dist_ema200'] = (df['close'] - df['ema200']) / df['close']
 
186
  return df
187
 
188
  # ==============================================================
189
+ # 🧠 CPU PROCESSING (Full Stack Injection + Logs)
190
  # ==============================================================
191
  async def _process_data_in_memory(self, sym, candles, start_ms, end_ms):
192
  safe_sym = sym.replace('/', '_')
 
197
  print(f" 📂 [{sym}] Data Exists -> Skipping.")
198
  return
199
 
200
+ print(f" ⚙️ [CPU] Analyzing {sym} (Full Stack: Titan+Oracle+Sniper+Hydra)...", flush=True)
201
  t0 = time.time()
202
 
203
  df_1m = pd.DataFrame(candles, columns=['timestamp', 'open', 'high', 'low', 'close', 'volume'])
 
255
 
256
  # --- 4. Main Simulation Loop ---
257
  for i, current_time in enumerate(final_valid_indices):
 
 
258
  if i > 0 and i % 1000 == 0:
259
  percent = (i / total_signals) * 100
260
  print(f" ⏳ [{sym}] Processing... {i}/{total_signals} ({percent:.1f}%)", flush=True)
 
263
 
264
  # Sync Indices
265
  idx_1m = np.searchsorted(fast_1m['timestamp'], ts_val)
 
 
 
 
266
  idx_1h = np.searchsorted(numpy_htf['1h']['timestamp'], ts_val)
267
+ idx_5m = np.searchsorted(numpy_htf['5m']['timestamp'], ts_val) # ✅ FIXED
268
  idx_15m = np.searchsorted(numpy_htf['15m']['timestamp'], ts_val)
269
  idx_4h = np.searchsorted(numpy_htf['4h']['timestamp'], ts_val)
270
 
 
296
  elif col == 'sim_mc_score': val = 0.5
297
  elif col == 'sim_pattern_score': val = 0.5
298
  o_vec.append(val)
 
299
  try:
300
  o_pred = oracle_dir_model.predict(np.array(o_vec).reshape(1, -1))[0]
301
  oracle_conf = float(o_pred[0]) if isinstance(o_pred, (list, np.ndarray)) else float(o_pred)
 
314
  s_vec.append(l_val)
315
  else:
316
  s_vec.append(0.0)
 
317
  try:
318
  s_preds = [m.predict(np.array(s_vec).reshape(1, -1))[0] for m in sniper_models]
319
  sniper_score = np.mean(s_preds)
320
  except: pass
321
 
322
+ # === RISK SIMULATION ===
323
  entry_price = fast_1m['close'][idx_1m]
324
  highest_price = entry_price
325
 
 
333
  curr_ts = int(fast_1m['timestamp'][c_idx])
334
  if curr_price > highest_price: highest_price = curr_price
335
 
336
+ # A. Hydra
337
  if hydra_models:
338
  atr_val = fast_1m['atr'][c_idx]
339
  sl_dist = 1.5 * atr_val if atr_val > 0 else entry_price * 0.015
 
361
  if pg > max_hydra_giveback: max_hydra_giveback = pg
362
  except: pass
363
 
364
+ # B. Legacy (Full Logic)
365
  if legacy_v2 or legacy_v3:
366
  c_5m_idx = idx_5m + (c_idx - idx_1m) // 5
367
  if c_5m_idx >= len(numpy_htf['5m']['rsi']): c_5m_idx = len(numpy_htf['5m']['rsi']) - 1
 
435
  del frames, fast_1m, numpy_htf
436
  gc.collect()
437
 
438
+ # ==============================================================
439
+ # PHASE 1: Main Loop
440
+ # ==============================================================
441
  async def generate_truth_data(self):
442
  if self.force_start_date and self.force_end_date:
443
  dt_start = datetime.strptime(self.force_start_date, "%Y-%m-%d").replace(tzinfo=timezone.utc)
444
  dt_end = datetime.strptime(self.force_end_date, "%Y-%m-%d").replace(tzinfo=timezone.utc)
445
  start_time_ms = int(dt_start.timestamp() * 1000)
446
  end_time_ms = int(dt_end.timestamp() * 1000)
447
+ print(f"\n🚜 [Phase 1] Processing Forced Era: {self.force_start_date} -> {self.force_end_date}")
448
+ for sym in self.TARGET_COINS:
449
+ try:
450
+ candles = await self._fetch_all_data_fast(sym, start_time_ms, end_time_ms)
451
+ if candles: await self._process_data_in_memory(sym, candles, start_time_ms, end_time_ms)
452
+ except Exception as e: print(f" ❌ SKIP {sym}: {e}", flush=True)
453
+ gc.collect()
454
+ else:
455
+ # If no forced date, we might rely on the Scenario Loop in run_strategic_optimization_task calling this.
456
+ # But the Scenario Loop sets force_start_date.
457
+ pass
458
 
459
+ # ==============================================================
460
+ # PHASE 2: Optimization (Grid Search)
461
+ # ==============================================================
462
  @staticmethod
463
  def _worker_optimize(combinations_batch, scores_files, initial_capital, fees_pct, max_slots):
464
  results = []
 
483
  max_drawdown = 0.0
484
 
485
  for ts, group in grouped_by_time:
486
+ # EXIT
487
  active = list(wallet["positions"].keys())
488
  current_prices = {row['symbol']: row['close'] for _, row in group.iterrows()}
489
 
 
498
 
499
  pnl = (curr - pos['entry']) / pos['entry']
500
  if is_crash or pnl > 0.04 or pnl < -0.02:
501
+ wallet['balance'] += pos['size'] * (1 + pnl - (fees_pct*2))
 
 
502
  wallet['allocated'] -= pos['size']
503
  del wallet['positions'][sym]
504
+ wallet['trades_history'].append({'pnl': pnl})
505
 
506
+ # Stats Update
507
  total_eq = wallet['balance'] + wallet['allocated']
508
  if total_eq > peak_balance: peak_balance = total_eq
509
  dd = (peak_balance - total_eq) / peak_balance
510
  if dd > max_drawdown: max_drawdown = dd
511
 
512
+ # ENTRY
513
  if len(wallet['positions']) < max_slots:
514
  for _, row in group.iterrows():
515
  if row['symbol'] in wallet['positions']: continue
 
526
  wallet['balance'] -= size
527
  wallet['allocated'] += size
528
 
529
+ # Stats
530
  final_bal = wallet['balance'] + wallet['allocated']
531
  net_profit = final_bal - initial_capital
 
532
  trades = wallet['trades_history']
533
  total_t = len(trades)
534
  win_count = len([t for t in trades if t['pnl'] > 0])
535
  loss_count = len([t for t in trades if t['pnl'] <= 0])
536
  win_rate = (win_count / total_t * 100) if total_t > 0 else 0
 
537
  max_win = max([t['pnl'] for t in trades]) if trades else 0
538
  max_loss = min([t['pnl'] for t in trades]) if trades else 0
539
 
 
 
 
 
 
 
 
 
 
540
  results.append({
541
  'config': config, 'final_balance': final_bal, 'net_profit': net_profit,
542
  'total_trades': total_t, 'win_count': win_count, 'loss_count': loss_count,
543
  'win_rate': win_rate, 'max_single_win': max_win, 'max_single_loss': max_loss,
 
544
  'max_drawdown': max_drawdown * 100
545
  })
546
 
547
  return results
548
 
549
  async def run_optimization(self, target_regime="RANGE"):
550
+ # Note: generate_truth_data is called by the Strategy Loop wrapper now
551
+ # so we process data for the specific era set in set_date_range
552
  await self.generate_truth_data()
553
 
 
554
  oracle_range = [0.5, 0.6, 0.7]
555
  sniper_range = [0.4, 0.5, 0.6]
556
  hydra_range = [0.75, 0.85, 0.95]
 
564
  })
565
 
566
  current_period_files = [os.path.join(CACHE_DIR, f) for f in os.listdir(CACHE_DIR) if f.endswith('_scores.pkl')]
567
+ if not current_period_files: return None, None
 
 
568
 
569
+ print(f"\n🧩 [Phase 2] Optimizing {len(combinations)} Configs (Full Stack) for {target_regime}...")
570
  best_res = self._worker_optimize(combinations, current_period_files, self.INITIAL_CAPITAL, self.TRADING_FEES, self.MAX_SLOTS)
571
 
572
  if not best_res: return None, None
 
605
  try:
606
  hub = AdaptiveHub(r2); await hub.initialize()
607
  optimizer = HeavyDutyBacktester(dm, proc)
 
 
608
 
609
+ # RESTORED: The Multi-Regime Strategic Loop
610
+ scenarios = [
611
+ {"regime": "BULL", "start": "2024-01-01", "end": "2024-03-30"},
612
+ {"regime": "BEAR", "start": "2023-08-01", "end": "2023-09-15"},
613
+ {"regime": "DEAD", "start": "2023-06-01", "end": "2023-08-01"},
614
+ {"regime": "RANGE", "start": "2024-07-01", "end": "2024-09-30"}
615
+ ]
616
+
617
+ for scen in scenarios:
618
+ target = scen["regime"]
619
+ optimizer.set_date_range(scen["start"], scen["end"])
620
+
621
+ # Run opt
622
+ best_cfg, best_stats = await optimizer.run_optimization(target_regime=target)
623
+
624
+ # Save
625
+ if best_cfg:
626
+ hub.submit_challenger(target, best_cfg, best_stats)
627
+
628
+ await hub._save_state_to_r2()
629
+ print("✅ [System] ALL Strategic DNA Updated & Saved.")
630
+
631
  finally:
632
  await dm.close()
633