Riy777 commited on
Commit
a0f2e90
·
verified ·
1 Parent(s): e378be0

Update backtest_engine.py

Browse files
Files changed (1) hide show
  1. backtest_engine.py +32 -55
backtest_engine.py CHANGED
@@ -1,10 +1,9 @@
1
  # ============================================================
2
- # 🧪 backtest_engine.py (V51.0 - GEM-Architect: The Grid Master)
3
  # ============================================================
4
  # التحديثات:
5
- # 1. إضافة متغير GRID_DENSITY للتحكم بعدد التوليفات بسهولة.
6
- # 2. استخدام التوازي الكامل (Multiprocessing) بدون خسارة الدقة.
7
- # 3. دمج الفلتر الأولي (Scanner) كجزء من المعادلة.
8
  # ============================================================
9
 
10
  import asyncio
@@ -29,9 +28,8 @@ CACHE_DIR = "backtest_cache_grid"
29
  class MassiveOptimizer:
30
  def __init__(self, data_manager):
31
  self.dm = data_manager
32
- # 🎛️ هذا هو "الزر" الذي طلبته
33
- # 3 = سريع (تجربة) | 5 = متوسط (~3000) | 10 = دقيق (~100k) | 15 = جنوني
34
- self.GRID_DENSITY = 10
35
 
36
  self.TARGET_COINS = [
37
  'BTC/USDT', 'ETH/USDT', 'SOL/USDT', 'BNB/USDT', 'XRP/USDT',
@@ -40,36 +38,38 @@ class MassiveOptimizer:
40
  ]
41
 
42
  if not os.path.exists(CACHE_DIR): os.makedirs(CACHE_DIR)
43
- print(f"🧪 [Backtest Engine V51.0] Grid Density set to: {self.GRID_DENSITY}")
44
 
45
  async def fetch_deep_history(self):
46
  """تحميل البيانات وتجهيزها للمعالجة السريعة"""
47
  print(f"\n⏳ [Data] Pre-fetching history for Grid Search...")
48
  end_time_ms = int(time.time() * 1000)
49
- start_time_ms = end_time_ms - (14 * 24 * 60 * 60 * 1000) # 14 يوم كافية للتحسين التكتيكي
50
 
51
  for sym in self.TARGET_COINS:
52
  safe_sym = sym.replace('/', '_')
53
  file_path = f"{CACHE_DIR}/{safe_sym}.pkl"
54
 
55
- # إذا الملف موجود وحديث، لا نحمله مرة أخرى
56
  if os.path.exists(file_path): continue
57
 
58
  print(f" ⬇️ Downloading {sym}...", end="", flush=True)
59
  try:
60
- # سحب شمعات 15 دقيقة (أسرع وأدق للفلتر الجديد)
61
  candles = await self.dm.exchange.fetch_ohlcv(sym, '15m', since=start_time_ms, limit=1000)
62
  if candles:
63
  df = pd.DataFrame(candles, columns=['timestamp', 'open', 'high', 'low', 'close', 'volume'])
64
  df = df.drop_duplicates(subset=['timestamp']).sort_values('timestamp')
65
  for col in ['open', 'high', 'low', 'close', 'volume']: df[col] = df[col].astype(float)
66
 
67
- # حساب المؤشرات مسبقاً (Vectorized Pre-calculation) لتسريع الباكتست مليون مرة
68
- # 1. Titan Proxy (Trend)
69
  df['ema50'] = df['close'].ewm(span=50).mean()
70
 
71
- # 2. Scanner Proxies (Indicators)
72
- df['rsi'] = 100 - (100 / (1 + df['close'].diff().clip(lower=0).rolling(14).mean() / df['close'].diff().clip(upper=0).abs().rolling(14).mean()))
 
 
 
 
 
73
 
74
  # BB
75
  df['ma20'] = df['close'].rolling(20).mean()
@@ -90,7 +90,6 @@ class MassiveOptimizer:
90
  def _worker_evaluate_batch(combinations_batch, market_data_files):
91
  """
92
  يقوم هذا العامل بتقييم مجموعة من التوليفات (Batch) دفعة واحدة.
93
- يعمل في Process منفصل تماماً = سرعة قصوى.
94
  """
95
  results = []
96
 
@@ -110,37 +109,33 @@ class MassiveOptimizer:
110
 
111
  for df in dfs:
112
  # ---------------------------------------------------
113
- # ⚡ Vectorized Signal Logic (The Core Strategy)
114
  # ---------------------------------------------------
115
- # 1. Titan Score (Simulated): Trend Alignment
116
- # إذا السعر فوق المتوسط = 1.0، وإلا 0.2
117
  titan_score = np.where(df['close'] > df['ema50'], 0.9, 0.3)
118
 
119
- # 2. Scanner Score (Simulated): RSI & BB
120
- # RSI منخفض (فرصة) + اختراق BB
121
- rsi_cond = np.where(df['rsi'] < 60, 1.0, 0.4) # نحب الـ RSI المنخفض للشراء
122
- bb_cond = np.where(df['close'] > df['bb_upper'], 1.0, 0.0) # اختراق
123
 
124
- # دمج المؤشرات للكاشف
125
  scanner_score = (rsi_cond * 0.7) + (bb_cond * 0.3)
126
 
127
  # 3. Final Weighted Score
128
  final_score = (titan_score * w_titan) + (scanner_score * w_scanner)
129
- # Normalize (تقريباً)
130
  final_score = final_score / (w_titan + w_scanner)
131
 
132
- # 4. Generate Entries
133
  signals = (final_score > entry_thresh)
134
 
135
  # 5. Fast Loop for PnL
136
- # (Looping over numpy array is fast enough here)
137
  prices = df['close'].values
138
- sigs = signals.values
 
 
139
 
140
  in_pos = False
141
  entry_p = 0.0
142
 
143
- # محاكاة سريعة
144
  for i in range(len(prices)-1):
145
  if not in_pos and sigs[i]:
146
  in_pos = True
@@ -149,44 +144,36 @@ class MassiveOptimizer:
149
  curr = prices[i]
150
  pnl = (curr - entry_p) / entry_p
151
 
152
- # TP/SL ثابت للسرعة (يمكن جعله متغير أيضاً)
153
- if pnl > 0.03 or pnl < -0.015: # TP 3%, SL 1.5%
154
  total_pnl += pnl
155
  total_trades += 1
156
  in_pos = False
157
 
158
- if total_trades > 5: # تصفية النتائج الضعيفة
159
  results.append({
160
  'config': config,
161
  'pnl': total_pnl,
162
  'trades': total_trades,
163
- 'score': total_pnl * np.log(total_trades) # معادلة تفضيل الربح مع عدد الصفقات المعقول
164
  })
165
 
166
  return results
167
 
168
  # ==============================================================
169
- # 🚀 The Grid Generator (10k -> 100k Scaler)
170
  # ==============================================================
171
  async def run_optimization(self):
172
- # 1. التأكد من البيانات
173
  market_files = [os.path.join(CACHE_DIR, f) for f in os.listdir(CACHE_DIR) if f.endswith('.pkl')]
174
  if not market_files:
175
  await self.fetch_deep_history()
176
  market_files = [os.path.join(CACHE_DIR, f) for f in os.listdir(CACHE_DIR) if f.endswith('.pkl')]
177
 
178
- # 2. توليد الشبكة (The Grid)
179
  print(f"🧩 [Optimizer] Generating Grid with Density={self.GRID_DENSITY}...")
180
 
181
- # استخدام linspace لتوليد أرقام دقيقة بناءً على الكثافة
182
- # كلما زاد self.GRID_DENSITY، زادت دقة الخطوات
183
  w_titan_range = np.linspace(0.2, 0.9, num=self.GRID_DENSITY)
184
  w_scanner_range = np.linspace(0.1, 0.8, num=self.GRID_DENSITY)
185
  thresh_range = np.linspace(0.50, 0.80, num=self.GRID_DENSITY)
186
 
187
- # يمكن إضافة المزيد من المتغيرات هنا لزيادة العدد لـ 100,000+
188
- # مثلاً: scanner_rsi_limit = np.linspace(30, 70, num=5)
189
-
190
  combinations = []
191
  for wt, ws, th in itertools.product(w_titan_range, w_scanner_range, thresh_range):
192
  combinations.append({
@@ -198,12 +185,9 @@ class MassiveOptimizer:
198
  print(f" 📊 Total Unique Combinations: {len(combinations):,}")
199
  print(f" 🚀 Est. Processing Time: {len(combinations)/2000:.1f} minutes (on parallel cores)")
200
 
201
- # 3. التشغيل المتوازي (Multiprocessing)
202
  start_time = time.time()
203
  final_results = []
204
 
205
- # تقسيم التوليفات إلى دفعات (Batches)
206
- # كل نواة (Core) ستأخذ دفعة
207
  batch_size = max(100, len(combinations) // (os.cpu_count() * 4))
208
  batches = [combinations[i:i + batch_size] for i in range(0, len(combinations), batch_size)]
209
 
@@ -222,12 +206,10 @@ class MassiveOptimizer:
222
  elapsed = time.time() - start_time
223
  print(f"✅ Optimization Finished in {elapsed:.2f}s")
224
 
225
- # 4. اختيار الفائز
226
  if not final_results:
227
- print("⚠️ No profitable strategies found.")
228
  return None
229
 
230
- # الترتيب حسب معادلة (الربح × ثبات الصفقات)
231
  best_result = sorted(final_results, key=lambda x: x['score'], reverse=True)[0]
232
 
233
  print("\n" + "="*60)
@@ -240,7 +222,7 @@ class MassiveOptimizer:
240
  return best_result['config']
241
 
242
  async def run_strategic_optimization_task():
243
- print("\n🧪 [STRATEGIC BACKTEST V51.0] Starting Massive Grid Search...")
244
  from r2 import R2Service
245
  r2 = R2Service()
246
  dm = DataManager(None, None, r2)
@@ -253,19 +235,14 @@ async def run_strategic_optimization_task():
253
  hub = AdaptiveHub(r2)
254
  await hub.initialize()
255
 
256
- # تطبيق النتائج (Hot Reload)
257
- regime = "RANGE" # أو اكتشافه
258
  if regime in hub.strategies:
259
  print(f"💉 Injecting new DNA into {regime} Strategy...")
260
  st = hub.strategies[regime]
261
 
262
- # تحديث الأوزان
263
  st.model_weights['titan'] = best_config['w_titan']
264
-
265
- # نفترض أننا نستخدم مفتاح 'patterns' لتخزين وزن الـ Scanner الجديد مؤقتاً
266
- # أو نضيف حقلاً جديداً إذا عدلت الكلاس
267
  st.model_weights['patterns'] = best_config['w_scanner']
268
-
269
  st.filters['l1_min_score'] = best_config['thresh'] * 100
270
 
271
  await hub._save_state_to_r2()
 
1
  # ============================================================
2
+ # 🧪 backtest_engine.py (V51.1 - GEM-Architect: Bug Fix)
3
  # ============================================================
4
  # التحديثات:
5
+ # 1. إصلاح خطأ 'numpy.ndarray object has no attribute values'.
6
+ # 2. تحسين التعامل مع المصفوفات لضمان استقرار الباكتست.
 
7
  # ============================================================
8
 
9
  import asyncio
 
28
  class MassiveOptimizer:
29
  def __init__(self, data_manager):
30
  self.dm = data_manager
31
+ # 3 = سريع (تجربة) | 5 = متوسط (~3000) | 10 = دقيق (~1000)
32
+ self.GRID_DENSITY = 10
 
33
 
34
  self.TARGET_COINS = [
35
  'BTC/USDT', 'ETH/USDT', 'SOL/USDT', 'BNB/USDT', 'XRP/USDT',
 
38
  ]
39
 
40
  if not os.path.exists(CACHE_DIR): os.makedirs(CACHE_DIR)
41
+ print(f"🧪 [Backtest Engine V51.1] Grid Density set to: {self.GRID_DENSITY}")
42
 
43
  async def fetch_deep_history(self):
44
  """تحميل البيانات وتجهيزها للمعالجة السريعة"""
45
  print(f"\n⏳ [Data] Pre-fetching history for Grid Search...")
46
  end_time_ms = int(time.time() * 1000)
47
+ start_time_ms = end_time_ms - (14 * 24 * 60 * 60 * 1000)
48
 
49
  for sym in self.TARGET_COINS:
50
  safe_sym = sym.replace('/', '_')
51
  file_path = f"{CACHE_DIR}/{safe_sym}.pkl"
52
 
 
53
  if os.path.exists(file_path): continue
54
 
55
  print(f" ⬇️ Downloading {sym}...", end="", flush=True)
56
  try:
 
57
  candles = await self.dm.exchange.fetch_ohlcv(sym, '15m', since=start_time_ms, limit=1000)
58
  if candles:
59
  df = pd.DataFrame(candles, columns=['timestamp', 'open', 'high', 'low', 'close', 'volume'])
60
  df = df.drop_duplicates(subset=['timestamp']).sort_values('timestamp')
61
  for col in ['open', 'high', 'low', 'close', 'volume']: df[col] = df[col].astype(float)
62
 
63
+ # حساب المؤشرات مسبقاً (Vectorized)
 
64
  df['ema50'] = df['close'].ewm(span=50).mean()
65
 
66
+ # Scanner Proxies
67
+ # RSI Manual Calculation for speed
68
+ delta = df['close'].diff()
69
+ gain = (delta.where(delta > 0, 0)).rolling(window=14).mean()
70
+ loss = (-delta.where(delta < 0, 0)).rolling(window=14).mean()
71
+ rs = gain / loss
72
+ df['rsi'] = 100 - (100 / (1 + rs))
73
 
74
  # BB
75
  df['ma20'] = df['close'].rolling(20).mean()
 
90
  def _worker_evaluate_batch(combinations_batch, market_data_files):
91
  """
92
  يقوم هذا العامل بتقييم مجموعة من التوليفات (Batch) دفعة واحدة.
 
93
  """
94
  results = []
95
 
 
109
 
110
  for df in dfs:
111
  # ---------------------------------------------------
112
+ # ⚡ Vectorized Signal Logic
113
  # ---------------------------------------------------
114
+ # 1. Titan Score (Simulated)
 
115
  titan_score = np.where(df['close'] > df['ema50'], 0.9, 0.3)
116
 
117
+ # 2. Scanner Score (Simulated)
118
+ rsi_cond = np.where(df['rsi'] < 60, 1.0, 0.4)
119
+ bb_cond = np.where(df['close'] > df['bb_upper'], 1.0, 0.0)
 
120
 
 
121
  scanner_score = (rsi_cond * 0.7) + (bb_cond * 0.3)
122
 
123
  # 3. Final Weighted Score
124
  final_score = (titan_score * w_titan) + (scanner_score * w_scanner)
 
125
  final_score = final_score / (w_titan + w_scanner)
126
 
127
+ # 4. Generate Entries (Boolean Numpy Array)
128
  signals = (final_score > entry_thresh)
129
 
130
  # 5. Fast Loop for PnL
 
131
  prices = df['close'].values
132
+
133
+ # 🔥 FIX: signals هو أصلاً numpy array، لا نحتاج .values
134
+ sigs = signals
135
 
136
  in_pos = False
137
  entry_p = 0.0
138
 
 
139
  for i in range(len(prices)-1):
140
  if not in_pos and sigs[i]:
141
  in_pos = True
 
144
  curr = prices[i]
145
  pnl = (curr - entry_p) / entry_p
146
 
147
+ if pnl > 0.03 or pnl < -0.015:
 
148
  total_pnl += pnl
149
  total_trades += 1
150
  in_pos = False
151
 
152
+ if total_trades > 5:
153
  results.append({
154
  'config': config,
155
  'pnl': total_pnl,
156
  'trades': total_trades,
157
+ 'score': total_pnl * np.log(total_trades)
158
  })
159
 
160
  return results
161
 
162
  # ==============================================================
163
+ # 🚀 The Grid Generator
164
  # ==============================================================
165
  async def run_optimization(self):
 
166
  market_files = [os.path.join(CACHE_DIR, f) for f in os.listdir(CACHE_DIR) if f.endswith('.pkl')]
167
  if not market_files:
168
  await self.fetch_deep_history()
169
  market_files = [os.path.join(CACHE_DIR, f) for f in os.listdir(CACHE_DIR) if f.endswith('.pkl')]
170
 
 
171
  print(f"🧩 [Optimizer] Generating Grid with Density={self.GRID_DENSITY}...")
172
 
 
 
173
  w_titan_range = np.linspace(0.2, 0.9, num=self.GRID_DENSITY)
174
  w_scanner_range = np.linspace(0.1, 0.8, num=self.GRID_DENSITY)
175
  thresh_range = np.linspace(0.50, 0.80, num=self.GRID_DENSITY)
176
 
 
 
 
177
  combinations = []
178
  for wt, ws, th in itertools.product(w_titan_range, w_scanner_range, thresh_range):
179
  combinations.append({
 
185
  print(f" 📊 Total Unique Combinations: {len(combinations):,}")
186
  print(f" 🚀 Est. Processing Time: {len(combinations)/2000:.1f} minutes (on parallel cores)")
187
 
 
188
  start_time = time.time()
189
  final_results = []
190
 
 
 
191
  batch_size = max(100, len(combinations) // (os.cpu_count() * 4))
192
  batches = [combinations[i:i + batch_size] for i in range(0, len(combinations), batch_size)]
193
 
 
206
  elapsed = time.time() - start_time
207
  print(f"✅ Optimization Finished in {elapsed:.2f}s")
208
 
 
209
  if not final_results:
210
+ print("⚠️ No profitable strategies found (Check Data or lowered thresholds).")
211
  return None
212
 
 
213
  best_result = sorted(final_results, key=lambda x: x['score'], reverse=True)[0]
214
 
215
  print("\n" + "="*60)
 
222
  return best_result['config']
223
 
224
  async def run_strategic_optimization_task():
225
+ print("\n🧪 [STRATEGIC BACKTEST V51.1] Starting Massive Grid Search...")
226
  from r2 import R2Service
227
  r2 = R2Service()
228
  dm = DataManager(None, None, r2)
 
235
  hub = AdaptiveHub(r2)
236
  await hub.initialize()
237
 
238
+ regime = "RANGE"
 
239
  if regime in hub.strategies:
240
  print(f"💉 Injecting new DNA into {regime} Strategy...")
241
  st = hub.strategies[regime]
242
 
 
243
  st.model_weights['titan'] = best_config['w_titan']
244
+ # نستخدم 'patterns' لحفظ وزن الـ Scanner مؤقتاً أو كما اتفقنا سابقاً
 
 
245
  st.model_weights['patterns'] = best_config['w_scanner']
 
246
  st.filters['l1_min_score'] = best_config['thresh'] * 100
247
 
248
  await hub._save_state_to_r2()