Spaces:
Paused
Paused
Update ml_engine/patterns.py
Browse files- ml_engine/patterns.py +234 -277
ml_engine/patterns.py
CHANGED
|
@@ -1,294 +1,251 @@
|
|
| 1 |
# ml_engine/patterns.py
|
|
|
|
|
|
|
| 2 |
import pandas as pd
|
| 3 |
import numpy as np
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
|
| 5 |
class ChartPatternAnalyzer:
|
| 6 |
-
def __init__(self):
|
| 7 |
-
self.pattern_cache = {}
|
| 8 |
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
|
| 34 |
-
|
|
|
|
|
|
|
| 35 |
|
| 36 |
-
except Exception as e:
|
| 37 |
-
print(f"❌ خطأ في اكتشاف الأنماط: {e}")
|
| 38 |
-
return patterns
|
| 39 |
-
|
| 40 |
-
def _create_dataframe(self, candles):
|
| 41 |
-
"""إنشاء DataFrame من بيانات الشموع"""
|
| 42 |
try:
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
except Exception as e:
|
| 47 |
-
print(f"❌
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
'details': {}
|
| 58 |
}
|
| 59 |
|
| 60 |
-
|
| 61 |
-
if
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
highs = dataframe['high'].values
|
| 66 |
-
lows = dataframe['low'].values
|
| 67 |
-
current_price = closes[-1]
|
| 68 |
-
|
| 69 |
-
patterns_detected = []
|
| 70 |
-
|
| 71 |
-
double_pattern = self._detect_double_pattern(highs, lows, closes)
|
| 72 |
-
if double_pattern['detected']:
|
| 73 |
-
patterns_detected.append(double_pattern)
|
| 74 |
-
|
| 75 |
-
breakout_pattern = self._detect_breakout_pattern(highs, lows, closes)
|
| 76 |
-
if breakout_pattern['detected']:
|
| 77 |
-
patterns_detected.append(breakout_pattern)
|
| 78 |
-
|
| 79 |
-
trend_pattern = self._detect_trend_pattern(dataframe)
|
| 80 |
-
if trend_pattern['detected']:
|
| 81 |
-
patterns_detected.append(trend_pattern)
|
| 82 |
-
|
| 83 |
-
support_resistance_pattern = self._detect_support_resistance(highs, lows, closes)
|
| 84 |
-
if support_resistance_pattern['detected']:
|
| 85 |
-
patterns_detected.append(support_resistance_pattern)
|
| 86 |
-
|
| 87 |
-
if patterns_detected:
|
| 88 |
-
best_pattern = max(patterns_detected, key=lambda x: x['confidence'])
|
| 89 |
-
pattern_info.update({
|
| 90 |
-
'pattern': best_pattern['pattern'],
|
| 91 |
-
'confidence': best_pattern['confidence'],
|
| 92 |
-
'direction': best_pattern.get('direction', 'neutral'),
|
| 93 |
-
'details': best_pattern.get('details', {})
|
| 94 |
-
})
|
| 95 |
-
|
| 96 |
-
return pattern_info
|
| 97 |
-
|
| 98 |
-
except Exception as e:
|
| 99 |
-
print(f"❌ خطأ في تحليل الأنماط للإطار {timeframe}: {e}")
|
| 100 |
-
return pattern_info
|
| 101 |
-
|
| 102 |
-
def _detect_double_pattern(self, highs, lows, closes):
|
| 103 |
-
"""كشف نمط القمة المزدوجة أو القاع المزدوج"""
|
| 104 |
-
try:
|
| 105 |
-
if len(highs) < 15:
|
| 106 |
-
return {'detected': False}
|
| 107 |
-
|
| 108 |
-
recent_highs = highs[-15:]
|
| 109 |
-
recent_lows = lows[-15:]
|
| 110 |
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
return {'detected': False}
|
| 167 |
-
|
| 168 |
-
def _detect_breakout_pattern(self, highs, lows, closes):
|
| 169 |
-
"""كشف نمط الاختراق"""
|
| 170 |
-
try:
|
| 171 |
-
if len(highs) < 25:
|
| 172 |
-
return {'detected': False}
|
| 173 |
-
|
| 174 |
-
current_price = closes[-1]
|
| 175 |
-
|
| 176 |
-
resistance = np.max(highs[-25:-5])
|
| 177 |
-
support = np.min(lows[-25:-5])
|
| 178 |
-
|
| 179 |
-
if current_price > resistance * 1.01:
|
| 180 |
-
return {
|
| 181 |
-
'detected': True,
|
| 182 |
-
'pattern': 'Breakout Up',
|
| 183 |
-
'confidence': 0.8,
|
| 184 |
-
'direction': 'up',
|
| 185 |
-
'details': {
|
| 186 |
-
'breakout_level': resistance,
|
| 187 |
-
'target_level': resistance * 1.05
|
| 188 |
-
}
|
| 189 |
-
}
|
| 190 |
-
elif current_price < support * 0.99:
|
| 191 |
-
return {
|
| 192 |
-
'detected': True,
|
| 193 |
-
'pattern': 'Breakout Down',
|
| 194 |
-
'confidence': 0.8,
|
| 195 |
-
'direction': 'down',
|
| 196 |
-
'details': {
|
| 197 |
-
'breakdown_level': support,
|
| 198 |
-
'target_level': support * 0.95
|
| 199 |
-
}
|
| 200 |
-
}
|
| 201 |
-
|
| 202 |
-
return {'detected': False}
|
| 203 |
-
|
| 204 |
-
except Exception as e:
|
| 205 |
-
return {'detected': False}
|
| 206 |
-
|
| 207 |
-
def _detect_trend_pattern(self, dataframe):
|
| 208 |
-
"""كشف نمط الاتجاه"""
|
| 209 |
-
try:
|
| 210 |
-
if dataframe is None or dataframe.empty or len(dataframe) < 20:
|
| 211 |
-
return {'detected': False}
|
| 212 |
-
|
| 213 |
-
closes = dataframe['close'].values
|
| 214 |
-
|
| 215 |
-
ma_short = np.mean(closes[-5:])
|
| 216 |
-
ma_medium = np.mean(closes[-13:])
|
| 217 |
-
ma_long = np.mean(closes[-21:])
|
| 218 |
-
|
| 219 |
-
if ma_short > ma_medium > ma_long and closes[-1] > ma_short:
|
| 220 |
-
trend_strength = (ma_short - ma_long) / ma_long
|
| 221 |
-
confidence = min(0.3 + trend_strength * 10, 0.8)
|
| 222 |
-
return {
|
| 223 |
-
'detected': True,
|
| 224 |
-
'pattern': 'Uptrend',
|
| 225 |
-
'confidence': confidence,
|
| 226 |
-
'direction': 'up',
|
| 227 |
-
'details': {
|
| 228 |
-
'trend_strength': trend_strength,
|
| 229 |
-
'support_level': ma_medium
|
| 230 |
-
}
|
| 231 |
-
}
|
| 232 |
-
elif ma_short < ma_medium < ma_long and closes[-1] < ma_short:
|
| 233 |
-
trend_strength = (ma_long - ma_short) / ma_long
|
| 234 |
-
confidence = min(0.3 + trend_strength * 10, 0.8)
|
| 235 |
-
return {
|
| 236 |
-
'detected': True,
|
| 237 |
-
'pattern': 'Downtrend',
|
| 238 |
-
'confidence': confidence,
|
| 239 |
-
'direction': 'down',
|
| 240 |
-
'details': {
|
| 241 |
-
'trend_strength': trend_strength,
|
| 242 |
-
'resistance_level': ma_medium
|
| 243 |
-
}
|
| 244 |
-
}
|
| 245 |
-
|
| 246 |
-
return {'detected': False}
|
| 247 |
-
|
| 248 |
-
except Exception as e:
|
| 249 |
-
return {'detected': False}
|
| 250 |
-
|
| 251 |
-
def _detect_support_resistance(self, highs, lows, closes):
|
| 252 |
-
"""كشف مستويات الدعم والمقاومة"""
|
| 253 |
-
try:
|
| 254 |
-
if len(highs) < 20:
|
| 255 |
-
return {'detected': False}
|
| 256 |
-
|
| 257 |
-
current_price = closes[-1]
|
| 258 |
-
|
| 259 |
-
resistance_level = np.max(highs[-20:])
|
| 260 |
-
support_level = np.min(lows[-20:])
|
| 261 |
-
|
| 262 |
-
position = (current_price - support_level) / (resistance_level - support_level)
|
| 263 |
-
|
| 264 |
-
if position < 0.2:
|
| 265 |
-
return {
|
| 266 |
-
'detected': True,
|
| 267 |
-
'pattern': 'Near Support',
|
| 268 |
-
'confidence': 0.6,
|
| 269 |
-
'direction': 'up',
|
| 270 |
-
'details': {
|
| 271 |
-
'support_level': support_level,
|
| 272 |
-
'resistance_level': resistance_level,
|
| 273 |
-
'position': position
|
| 274 |
-
}
|
| 275 |
-
}
|
| 276 |
-
elif position > 0.8:
|
| 277 |
-
return {
|
| 278 |
-
'detected': True,
|
| 279 |
-
'pattern': 'Near Resistance',
|
| 280 |
-
'confidence': 0.6,
|
| 281 |
-
'direction': 'down',
|
| 282 |
-
'details': {
|
| 283 |
-
'support_level': support_level,
|
| 284 |
-
'resistance_level': resistance_level,
|
| 285 |
-
'position': position
|
| 286 |
-
}
|
| 287 |
-
}
|
| 288 |
-
|
| 289 |
-
return {'detected': False}
|
| 290 |
|
| 291 |
-
|
| 292 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 293 |
|
| 294 |
-
print("✅ ML Module:
|
|
|
|
| 1 |
# ml_engine/patterns.py
|
| 2 |
+
# (V8 - النهائي: يستخدم نموذج ML (58%) القائم على المؤشرات)
|
| 3 |
+
|
| 4 |
import pandas as pd
|
| 5 |
import numpy as np
|
| 6 |
+
import joblib
|
| 7 |
+
import asyncio
|
| 8 |
+
import io
|
| 9 |
+
|
| 10 |
+
# (يجب التأكد من أن pandas-ta مثبت في بيئة Hugging Face)
|
| 11 |
+
try:
|
| 12 |
+
import pandas_ta as ta
|
| 13 |
+
except ImportError:
|
| 14 |
+
print("❌❌ [PatternEngineV8] مكتبة pandas_ta غير موجودة! هذا المحرك سيفشل.")
|
| 15 |
+
ta = None
|
| 16 |
|
| 17 |
class ChartPatternAnalyzer:
|
|
|
|
|
|
|
| 18 |
|
| 19 |
+
def __init__(self, r2_service=None,
|
| 20 |
+
model_key="lgbm_pattern_model_combined.pkl",
|
| 21 |
+
scaler_key="scaler_combined.pkl",
|
| 22 |
+
window_size=60): # (سنستخدم window_size كحد أدنى للبيانات)
|
| 23 |
+
"""
|
| 24 |
+
تهيئة المحرك بتحميل النماذج من R2.
|
| 25 |
+
"""
|
| 26 |
+
self.window_size = window_size
|
| 27 |
+
self.model = None
|
| 28 |
+
self.scaler = None
|
| 29 |
+
# (هذه هي الفئات من ملف evaluation_results.txt، مرتبة)
|
| 30 |
+
self.class_names = ["Bearish Pattern", "Neutral / No Pattern", "Bullish Pattern"] # (IDs: -1, 0, 1)
|
| 31 |
+
self.r2_service = r2_service
|
| 32 |
+
self.model_key = model_key
|
| 33 |
+
self.scaler_key = scaler_key
|
| 34 |
|
| 35 |
+
# (هذه هي "الوصفة" الدقيقة من X_test_combined.csv)
|
| 36 |
+
self.feature_names = [
|
| 37 |
+
'RSI_14', 'MACD_12_26_9', 'MACDh_12_26_9', 'MACDs_12_26_9', 'SMA_20',
|
| 38 |
+
'EMA_20', 'BBL_5_2.0_2.0', 'BBM_5_2.0_2.0', 'BBU_5_2.0_2.0', 'BBB_5_2.0_2.0',
|
| 39 |
+
'BBP_5_2.0_2.0', 'STOCHk_14_3_3', 'STOCHd_14_3_3', 'STOCHh_14_3_3',
|
| 40 |
+
'ADX_14', 'ADXR_14_2', 'DMP_14', 'DMN_14', 'VWAP_D', 'MIDPOINT_14',
|
| 41 |
+
'TEMA_20', 'OBV', 'AD', 'ATRr_14', 'DPO_20', 'KVO_34_55_13',
|
| 42 |
+
'KVOs_34_55_13', 'CMO_14', 'ROC_10', 'WILLR_14'
|
| 43 |
+
]
|
| 44 |
+
# (إزالة الأعمدة الأساسية التي لا تُستخدم كخصائص)
|
| 45 |
+
self.base_cols = ['timestamp', 'open', 'high', 'low', 'close', 'volume']
|
| 46 |
+
# (خصائص المؤشرات فقط)
|
| 47 |
+
self.indicator_features = [col for col in self.feature_names if col not in self.base_cols]
|
| 48 |
+
|
| 49 |
+
if not self.r2_service:
|
| 50 |
+
print("⚠️ [PatternEngineV8] R2Service غير متوفر. يجب التحميل يدوياً.")
|
| 51 |
+
|
| 52 |
+
async def initialize(self):
|
| 53 |
+
"""
|
| 54 |
+
يجب استدعاؤها من app.py أو data_manager لتحميل النماذج.
|
| 55 |
+
"""
|
| 56 |
+
if self.model and self.scaler:
|
| 57 |
+
return True # (تم التحميل مسبقاً)
|
| 58 |
|
| 59 |
+
if not self.r2_service:
|
| 60 |
+
print("❌ [PatternEngineV8] لا يمكن التهيئة بدون R2 Service.")
|
| 61 |
+
return False
|
| 62 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
try:
|
| 64 |
+
# 1. تحميل النموذج
|
| 65 |
+
print(f" > [PatternEngineV8] تحميل {self.model_key} من R2...")
|
| 66 |
+
model_obj = self.r2_service.s3_client.get_object(Bucket=self.r2_service.BUCKET_NAME, Key=self.model_key)
|
| 67 |
+
# (استخدام io.BytesIO لقراءة الجسم)
|
| 68 |
+
model_bytes = io.BytesIO(model_obj['Body'].read())
|
| 69 |
+
self.model = joblib.load(model_bytes)
|
| 70 |
+
|
| 71 |
+
# 2. تحميل المقياس (Scaler)
|
| 72 |
+
print(f" > [PatternEngineV8] تحميل {self.scaler_key} من R2...")
|
| 73 |
+
scaler_obj = self.r2_service.s3_client.get_object(Bucket=self.r2_service.BUCKET_NAME, Key=self.scaler_key)
|
| 74 |
+
scaler_bytes = io.BytesIO(scaler_obj['Body'].read())
|
| 75 |
+
self.scaler = joblib.load(scaler_bytes)
|
| 76 |
+
|
| 77 |
+
print("✅ [PatternEngineV8] تم تحميل النموذج (58%) والمقياس بنجاح.")
|
| 78 |
+
# (التحقق من الخصائص)
|
| 79 |
+
if hasattr(self.scaler, 'feature_names_in_'):
|
| 80 |
+
print(f" > يتوقع المقياس {len(self.scaler.feature_names_in_)} خاصية.")
|
| 81 |
+
|
| 82 |
+
return True
|
| 83 |
+
|
| 84 |
except Exception as e:
|
| 85 |
+
print(f"❌❌ [PatternEngineV8] فشل فادح في تحميل النماذج من R2: {e}")
|
| 86 |
+
self.model = None
|
| 87 |
+
self.scaler = None
|
| 88 |
+
return False
|
| 89 |
+
|
| 90 |
+
def _extract_features(self, df_window: pd.DataFrame) -> pd.DataFrame:
|
| 91 |
+
"""
|
| 92 |
+
(الوصفة V8)
|
| 93 |
+
حساب الـ 30 مؤشراً كما في X_test_combined.csv
|
| 94 |
+
"""
|
| 95 |
+
if not ta:
|
| 96 |
+
raise ImportError("مكتبة pandas-ta غير مثبتة.")
|
| 97 |
+
|
| 98 |
+
# (استراتيجية مخصصة لحساب كل شيء نحتاجه)
|
| 99 |
+
custom_strategy = ta.Strategy(
|
| 100 |
+
name="V8_Features",
|
| 101 |
+
description="Calculates the 30 indicators from X_test_combined.csv",
|
| 102 |
+
ta=[
|
| 103 |
+
{"kind": "rsi", "length": 14},
|
| 104 |
+
{"kind": "macd", "fast": 12, "slow": 26, "signal": 9},
|
| 105 |
+
{"kind": "sma", "length": 20},
|
| 106 |
+
{"kind": "ema", "length": 20},
|
| 107 |
+
{"kind": "bbands", "length": 5, "std": 2.0},
|
| 108 |
+
{"kind": "stoch", "k": 14, "d": 3, "smooth_k": 3},
|
| 109 |
+
{"kind": "adx", "length": 14, "adxr": 2}, # (ADX, ADXR, DMP, DMN)
|
| 110 |
+
{"kind": "vwap"}, # (VWAP_D)
|
| 111 |
+
{"kind": "midpoint", "length": 14},
|
| 112 |
+
{"kind": "tema", "length": 20},
|
| 113 |
+
{"kind": "obv"},
|
| 114 |
+
{"kind": "ad"},
|
| 115 |
+
{"kind": "atr", "percent": True, "length": 14}, # (ATRr_14)
|
| 116 |
+
{"kind": "dpo", "length": 20},
|
| 117 |
+
{"kind": "kvo", "fast": 34, "slow": 55, "signal": 13}, # (KVO_34_55_13, KVOs_34_55_13)
|
| 118 |
+
{"kind": "cmo", "length": 14},
|
| 119 |
+
{"kind": "roc", "length": 10},
|
| 120 |
+
{"kind": "willr", "length": 14},
|
| 121 |
+
]
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
# تطبيق الاستراتيجية على DataFrame
|
| 125 |
+
df_window.ta.strategy(custom_strategy)
|
| 126 |
+
|
| 127 |
+
# (pandas-ta قد يضيف '_2.0' إلى أسماء bbands)
|
| 128 |
+
# (سنقوم بإعادة تسمية الأعمدة لتطابق X_test_combined.csv بالضبط)
|
| 129 |
+
rename_map = {
|
| 130 |
+
'RSI_14': 'RSI_14', 'MACD_12_26_9': 'MACD_12_26_9', 'MACDh_12_26_9': 'MACDh_12_26_9',
|
| 131 |
+
'MACDs_12_26_9': 'MACDs_12_26_9', 'SMA_20': 'SMA_20', 'EMA_20': 'EMA_20',
|
| 132 |
+
'BBL_5_2.0': 'BBL_5_2.0_2.0', 'BBM_5_2.0': 'BBM_5_2.0_2.0', 'BBU_5_2.0': 'BBU_5_2.0_2.0',
|
| 133 |
+
'BBB_5_2.0': 'BBB_5_2.0_2.0', 'BBP_5_2.0': 'BBP_5_2.0_2.0',
|
| 134 |
+
'STOCHk_14_3_3': 'STOCHk_14_3_3', 'STOCHd_14_3_3': 'STOCHd_14_3_3', 'STOCHh_14_3_3': 'STOCHh_14_3_3',
|
| 135 |
+
'ADX_14': 'ADX_14', 'ADXR_14_2': 'ADXR_14_2', 'DMP_14': 'DMP_14', 'DMN_14': 'DMN_14',
|
| 136 |
+
'VWAP_D': 'VWAP_D', 'MIDPOINT_14': 'MIDPOINT_14', 'TEMA_20': 'TEMA_20', 'OBV': 'OBV',
|
| 137 |
+
'AD': 'AD', 'ATRr_14': 'ATRr_14', 'DPO_20': 'DPO_20', 'KVO_34_55_13': 'KVO_34_55_13',
|
| 138 |
+
'KVOs_34_55_13': 'KVOs_34_55_13', 'CMO_14': 'CMO_14', 'ROC_10': 'ROC_10', 'WILLR_14': 'WILLR_14'
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
# (إعادة التسمية الآمنة)
|
| 142 |
+
df_window.rename(columns=rename_map, inplace=True)
|
| 143 |
+
|
| 144 |
+
# (العودة بالصف الأخير فقط)
|
| 145 |
+
last_features = df_window.iloc[-1:]
|
| 146 |
+
|
| 147 |
+
# (ملء أي قيم NaN بـ 0 أو القيمة السابقة)
|
| 148 |
+
last_features.fillna(method='ffill', inplace=True)
|
| 149 |
+
last_features.fillna(0, inplace=True)
|
| 150 |
+
|
| 151 |
+
# (التأكد من أننا نرسل فقط الخصائص الـ 30 المطلوبة)
|
| 152 |
+
# (قد تفشل بعض المؤشرات، لذا سننشئ DataFrame فارغاً ونملأ ما هو موجود)
|
| 153 |
+
final_features = pd.DataFrame(columns=self.indicator_features)
|
| 154 |
+
|
| 155 |
+
# (إضافة البيانات الموجودة فقط)
|
| 156 |
+
for col in self.indicator_features:
|
| 157 |
+
if col in last_features:
|
| 158 |
+
final_features[col] = last_features[col]
|
| 159 |
+
else:
|
| 160 |
+
final_features[col] = 0 # (الافتراضي 0 إذا فشل المؤشر)
|
| 161 |
+
|
| 162 |
+
return final_features
|
| 163 |
+
|
| 164 |
+
async def detect_chart_patterns(self, ohlcv_data: dict) -> dict:
|
| 165 |
+
"""
|
| 166 |
+
(الدالة الرئيسية التي يستدعيها النظام)
|
| 167 |
+
تستخدم نموذج 58% للتنبؤ بالنمط.
|
| 168 |
+
"""
|
| 169 |
+
best_match = {
|
| 170 |
+
'pattern_detected': 'no_clear_pattern',
|
| 171 |
+
'pattern_confidence': 0,
|
| 172 |
+
'predicted_direction': 'neutral',
|
| 173 |
+
'timeframe': None,
|
| 174 |
'details': {}
|
| 175 |
}
|
| 176 |
|
| 177 |
+
if not self.model or not self.scaler:
|
| 178 |
+
if not hasattr(self, '_init_warned'):
|
| 179 |
+
print("⚠️ [PatternEngineV8] النموذج/المقياس غير محمل. يجب استدعاء .initialize() أولاً.")
|
| 180 |
+
self._init_warned = True
|
| 181 |
+
return best_match
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 182 |
|
| 183 |
+
all_results = []
|
| 184 |
+
|
| 185 |
+
for timeframe, candles in ohlcv_data.items():
|
| 186 |
+
# (نحتاج إلى بيانات كافية لحساب المؤشرات، 200 شمعة هي الأفضل)
|
| 187 |
+
if len(candles) >= max(self.window_size, 200):
|
| 188 |
+
try:
|
| 189 |
+
# (استخدام آخر 200 شمعة للحسابات)
|
| 190 |
+
window_candles = candles[-200:]
|
| 191 |
+
df_window = pd.DataFrame(window_candles, columns=['timestamp', 'open', 'high', 'low', 'close', 'volume'])
|
| 192 |
+
|
| 193 |
+
# (يجب أن يكون الفهرس DatetimeIndex لـ VWAP)
|
| 194 |
+
df_window['timestamp'] = pd.to_datetime(df_window['timestamp'], unit='ms')
|
| 195 |
+
df_window.set_index('timestamp', inplace=True)
|
| 196 |
+
|
| 197 |
+
# 1. استخراج الخصائص (الوصفة V8)
|
| 198 |
+
features_df = self._extract_features(df_window)
|
| 199 |
+
|
| 200 |
+
if features_df is None or features_df.empty:
|
| 201 |
+
continue
|
| 202 |
+
|
| 203 |
+
# 2. تطبيع الخصائص (Scaler)
|
| 204 |
+
# (يجب التأكد من أن الأعمدة بنفس الترتيب)
|
| 205 |
+
features_df = features_df[self.scaler.feature_names_in_]
|
| 206 |
+
features_scaled = self.scaler.transform(features_df)
|
| 207 |
+
|
| 208 |
+
# 3. التنبؤ بالاحتماليات (Probabilities)
|
| 209 |
+
probabilities = self.model.predict_proba(features_scaled)[0]
|
| 210 |
+
|
| 211 |
+
# (تقرير التقييم V5 يظهر أن الفئات هي -1, 0, 1)
|
| 212 |
+
# (نفترض أن class_names لدينا مرتب: [Bearish (-1), Neutral (0), Bullish (1)])
|
| 213 |
+
|
| 214 |
+
# (تعديل: نفترض أن مخرجات النموذج هي 0, 1, 2)
|
| 215 |
+
# (Class 0 = -1 (Bearish))
|
| 216 |
+
# (Class 1 = 0 (Neutral))
|
| 217 |
+
# (Class 2 = 1 (Bullish))
|
| 218 |
+
# (هذا يعتمد على كيفية تدريب النموذج، لكننا سنفترض أننا بحاجة لإعادة التعيين)
|
| 219 |
+
|
| 220 |
+
# (تحديث: سنستخدم أسماء الفئات من النموذج مباشرة)
|
| 221 |
+
best_class_index = np.argmax(probabilities)
|
| 222 |
+
confidence = probabilities[best_class_index]
|
| 223 |
+
pattern_name = self.class_names[best_class_index] # (e.g., "Bullish Pattern")
|
| 224 |
+
|
| 225 |
+
if pattern_name != "Neutral / No Pattern" and confidence > 0.5:
|
| 226 |
+
all_results.append({
|
| 227 |
+
'pattern': pattern_name,
|
| 228 |
+
'confidence': float(confidence),
|
| 229 |
+
'timeframe': timeframe
|
| 230 |
+
})
|
| 231 |
+
|
| 232 |
+
except Exception as e:
|
| 233 |
+
print(f"❌ [PatternEngineV8] فشل التنبؤ لـ {timeframe}: {e}")
|
| 234 |
+
|
| 235 |
+
# 4. اختيار أفضل نمط من *جميع* الأطر الزمنية
|
| 236 |
+
if all_results:
|
| 237 |
+
best_result = max(all_results, key=lambda x: x['confidence'])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 238 |
|
| 239 |
+
direction = 'neutral'
|
| 240 |
+
if "Bullish" in best_result['pattern']: direction = 'up'
|
| 241 |
+
elif "Bearish" in best_result['pattern']: direction = 'down'
|
| 242 |
+
|
| 243 |
+
best_match['pattern_detected'] = best_result['pattern']
|
| 244 |
+
best_match['pattern_confidence'] = best_result['confidence']
|
| 245 |
+
best_match['timeframe'] = best_result['timeframe']
|
| 246 |
+
best_match['predicted_direction'] = direction
|
| 247 |
+
best_match['details'] = {'ml_confidence': best_result['confidence']}
|
| 248 |
+
|
| 249 |
+
return best_match
|
| 250 |
|
| 251 |
+
print("✅ ML Module: Pattern Engine V8 (Indicator-Based ML) loaded")
|