Riy777 commited on
Commit
3e1d7b3
·
1 Parent(s): ca1e880

Update LLM.py

Browse files
Files changed (1) hide show
  1. LLM.py +112 -103
LLM.py CHANGED
@@ -1,9 +1,12 @@
1
- # LLM.py (V18.2 - Async Fix)
2
  import os, traceback, json, time, re
3
  import httpx
4
  from datetime import datetime
5
  from typing import List, Dict, Any, Optional
6
 
 
 
 
7
  try:
8
  from r2 import R2Service
9
  from learning_hub.hub_manager import LearningHubManager # (استيراد العقل)
@@ -18,13 +21,19 @@ try:
18
  except ImportError:
19
  NewsFetcher = None
20
 
21
- # إعدادات النموذج
22
- LLM_API_URL = os.getenv("LLM_API_URL", "https://api.groq.com/openai/v1/chat/completions")
23
- LLM_API_KEY = os.getenv("LLM_API_KEY")
24
- LLM_MODEL = os.getenv("LLM_MODEL", "llama3-70b-8192")
 
 
 
 
 
 
 
25
 
26
  # إعدادات العميل
27
- # (زيادة المهلة إلى 5 دقائق للتحليلات المعقدة)
28
  CLIENT_TIMEOUT = 300.0
29
 
30
  class LLMService:
@@ -32,58 +41,68 @@ class LLMService:
32
  if not LLM_API_KEY:
33
  raise ValueError("❌ [LLMService] متغير بيئة LLM_API_KEY غير موجود!")
34
 
35
- self.headers = {
36
- "Authorization": f"Bearer {LLM_API_KEY}",
37
- "Content-Type": "application/json"
38
- }
39
- self.http_client = httpx.AsyncClient(timeout=CLIENT_TIMEOUT)
 
 
 
 
 
 
 
40
 
41
  # --- (الربط بالخدمات الأخرى) ---
42
  self.r2_service: Optional[R2Service] = None
43
  self.learning_hub: Optional[LearningHubManager] = None
44
-
45
- # (V8.1) إضافة NewsFetcher (للاستخدام في إعادة التحليل)
46
  self.news_fetcher: Optional[NewsFetcher] = None
47
-
48
- print(f"✅ [LLMService] مهيأ. النموذج: {LLM_MODEL}")
49
 
50
- async def _call_llm(self, prompt: str, temperature: float = 0.2, max_tokens: int = 1500) -> Optional[str]:
51
  """
52
- (محدث) إجراء استدعاء API للنموذج الضخم (باللغة الإنجليزية).
 
53
  """
 
 
 
 
 
 
 
54
  payload = {
55
  "model": LLM_MODEL,
56
  "messages": [
57
- {"role": "system", "content": "You are an expert AI trading analyst. You MUST respond ONLY with the requested JSON object, with no introductory text, markdown formatting (like ```json), or explanations."},
58
- {"role": "user", "content": prompt}
59
  ],
60
- "temperature": temperature,
61
- "max_tokens": max_tokens,
62
- "top_p": 1,
63
- "stream": False,
 
 
64
  "response_format": {"type": "json_object"}
65
  }
66
 
67
  try:
68
- response = await self.http_client.post(LLM_API_URL, headers=self.headers, json=payload)
69
- response.raise_for_status() # Check for HTTP 4xx/5xx errors
70
-
71
- data = response.json()
72
 
73
- if "choices" in data and len(data["choices"]) > 0:
74
- content = data["choices"][0].get("message", {}).get("content")
75
  if content:
76
  return content.strip()
77
 
78
- print(f"❌ [LLMService] استجابة API غير متوقعة: {data}")
79
  return None
80
 
81
- except httpx.HTTPStatusError as http_err:
82
- print(f"❌ [LLMService] خطأ HTTP: {http_err} - {http_err.response.text}")
83
- except httpx.RequestError as req_err:
84
- print(f"❌ [LLMService] خطأ في الطلب: {req_err}")
85
  except json.JSONDecodeError:
86
- print(f"❌ [LLMService] فشل في تحليل استجابة JSON: {response.text}")
87
  except Exception as e:
88
  print(f"❌ [LLMService] خطأ غير متوقع في _call_llm: {e}")
89
  traceback.print_exc()
@@ -165,10 +184,8 @@ class LLMService:
165
  symbol = candidate_data.get('symbol', 'UNKNOWN')
166
  try:
167
  # 1. (العقل) جلب القواعد (Deltas) من محور التعلم
168
- # (سيتم جلب القواعد العامة + قواعد الاستراتيجية بناءً على المرشح)
169
  learning_context_prompt = "Playbook: No learning context available."
170
  if self.learning_hub:
171
- # (استعلام عام لجلب أفضل القواعد الشاملة)
172
  learning_context_prompt = await self.learning_hub.get_active_context_for_llm(
173
  domain="general",
174
  query=f"{symbol} strategy decision"
@@ -177,12 +194,11 @@ class LLMService:
177
  # 2. إنشاء الـ Prompt (باللغة الإنجليزية)
178
  prompt = self._create_trading_prompt(candidate_data, learning_context_prompt)
179
 
180
- # (اختياري: حفظ الـ Prompt للتدقيق)
181
  if self.r2_service:
182
  await self.r2_service.save_llm_prompts_async(symbol, "trading_decision", prompt, candidate_data)
183
 
184
  # 3. استدعاء النموذج الضخم (LLM)
185
- response_text = await self._call_llm(prompt, temperature=0.1, max_tokens=1000)
186
 
187
  # 4. تحليل الاستجابة (باستخدام المحلل الذكي)
188
  decision_json = self._parse_llm_response_enhanced(
@@ -208,7 +224,6 @@ class LLMService:
208
  # 1. (العقل) جلب القواعد (Deltas) من محور التعلم
209
  learning_context_prompt = "Playbook: No learning context available."
210
  if self.learning_hub:
211
- # (استعلام محدد لإعادة التحليل)
212
  learning_context_prompt = await self.learning_hub.get_active_context_for_llm(
213
  domain="strategy",
214
  query=f"{symbol} re-analysis {trade_data.get('strategy', 'GENERIC')}"
@@ -218,28 +233,22 @@ class LLMService:
218
  latest_news_text = "News data unavailable for re-analysis."
219
  latest_news_score = 0.0
220
  if self.news_fetcher:
221
- # (هذا يجلب أحدث الأخبار في الوقت الفعلي)
222
  latest_news_text = await self.news_fetcher.get_news_for_symbol(symbol)
223
  if self.news_fetcher.vader_analyzer and latest_news_text:
224
  vader_scores = self.news_fetcher.vader_analyzer.polarity_scores(latest_news_text)
225
  latest_news_score = vader_scores.get('compound', 0.0)
226
 
227
- # (إضافة الأخبار المحدثة إلى البيانات الحالية)
228
  current_data['latest_news_text'] = latest_news_text
229
  current_data['latest_news_score'] = latest_news_score
230
 
231
  # 3. إنشاء الـ Prompt (باللغة الإنجليزية)
232
- # 🔴 --- START OF CHANGE (V18.2 - Async Fix) --- 🔴
233
- # (يجب استخدام await لأن الدالة أصبحت async)
234
  prompt = await self._create_reanalysis_prompt(trade_data, current_data, learning_context_prompt)
235
- # 🔴 --- END OF CHANGE --- 🔴
236
 
237
- # (اختياري: حفظ الـ Prompt للتدقيق)
238
  if self.r2_service:
239
  await self.r2_service.save_llm_prompts_async(symbol, "trade_reanalysis", prompt, current_data)
240
 
241
  # 4. استدعاء النموذج الضخم (LLM)
242
- response_text = await self._call_llm(prompt, temperature=0.0, max_tokens=1000) # (درجة حرارة 0.0 للحسم)
243
 
244
  # 5. تحليل الاستجابة (باستخدام المحلل الذكي)
245
  decision_json = self._parse_llm_response_enhanced(
@@ -262,8 +271,9 @@ class LLMService:
262
  candidate_data: Dict[str, Any],
263
  learning_context: str) -> str:
264
  """
265
- (معدل V18.1)
266
  إنشاء الـ Prompt (باللغة الإنجليزية) لاتخاذ قرار التداول الأولي (Explorer).
 
267
  """
268
 
269
  symbol = candidate_data.get('symbol', 'N/A')
@@ -275,13 +285,11 @@ class LLMService:
275
  mc_data = candidate_data.get('monte_carlo_distribution', {})
276
 
277
  # --- 2. استخراج بيانات المشاعر والأخبار (الطبقة 1) ---
278
- # (V8.2) جلب بيانات VADER الإحصائية (المتعلمة)
279
  news_text = candidate_data.get('news_text', 'No news text provided.')
280
- news_score_raw = candidate_data.get('news_score_raw', 0.0) # (درجة VADER الخام)
281
- statistical_news_pnl = candidate_data.get('statistical_news_pnl', 0.0) # (الدرجة الإحصائية المتعلمة)
282
 
283
  # --- 3. استخراج بيانات الحيتان (الطبقة 1) ---
284
- # (ملاحظة: هذا هو القسم الذي سنقوم بتحديثه)
285
  whale_data = candidate_data.get('whale_data', {})
286
  whale_summary = whale_data.get('llm_friendly_summary', {})
287
  exchange_flows = whale_data.get('exchange_flows', {})
@@ -289,18 +297,14 @@ class LLMService:
289
  whale_signal = whale_summary.get('recommended_action', 'HOLD')
290
  whale_confidence = whale_summary.get('confidence', 0.3)
291
  whale_reason = whale_summary.get('whale_activity_summary', 'No whale data.')
292
-
293
- # (البيانات قصيرة المدى - نفترض أنها من أفضل نافذة متعلمة، e.g., 1h)
294
  net_flow_usd = exchange_flows.get('net_flow_usd', 0.0)
295
 
296
- # 🔴 --- START OF CHANGE (V18.1) --- 🔴
297
  # (البيانات طويلة المدى - من تحليل 24 ساعة الجديد)
298
  accumulation_data_24h = whale_data.get('accumulation_analysis_24h', {})
299
  net_flow_24h_usd = accumulation_data_24h.get('net_flow_usd', 0.0)
300
  total_inflow_24h_usd = accumulation_data_24h.get('to_exchanges_usd', 0.0)
301
  total_outflow_24h_usd = accumulation_data_24h.get('from_exchanges_usd', 0.0)
302
  relative_net_flow_24h_percent = accumulation_data_24h.get('relative_net_flow_percent', 0.0)
303
- # 🔴 --- END OF CHANGE --- 🔴
304
 
305
  # --- 4. استخراج بيانات السوق (الطبقة 0) ---
306
  market_context = candidate_data.get('sentiment_data', {})
@@ -309,20 +313,17 @@ class LLMService:
309
 
310
  # --- 5. بناء أقسام الـ Prompt (الإنجليزية) ---
311
 
312
- # (التعلم)
313
  playbook_prompt = f"""
314
  --- START OF LEARNING PLAYBOOK ---
315
  {learning_context}
316
  --- END OF PLAYBOOK ---
317
  """
318
- # (ملخص ML)
319
  ml_summary_prompt = f"""
320
  1. **ML Analysis (Score: {l1_score:.3f}):**
321
  * Reasons: {', '.join(l1_reasons)}
322
  * Chart Pattern: {pattern_data.get('pattern_detected', 'None')} (Conf: {pattern_data.get('pattern_confidence', 0):.2f})
323
  * Monte Carlo (1h): {mc_data.get('probability_of_gain', 0):.1f}% chance of profit (Expected: {mc_data.get('expected_return_pct', 0):.2f}%)
324
  """
325
- # (ملخص الأخبار)
326
  news_prompt = f"""
327
  2. **News & Sentiment Analysis:**
328
  * Market Trend: {market_trend} (BTC: {btc_sentiment})
@@ -330,7 +331,6 @@ class LLMService:
330
  * Statistical PnL (Learned): {statistical_news_pnl:+.2f}%
331
  * News Text: {news_text[:300]}...
332
  """
333
- # (ملخص الحيتان - محدث)
334
  whale_activity_prompt = f"""
335
  3. **Whale Activity (Real-time Flow - Optimized Window):**
336
  * Signal: {whale_signal} (Confidence: {whale_confidence:.2f})
@@ -344,43 +344,50 @@ class LLMService:
344
  * Relative 24h Net Flow (vs Daily Volume): {relative_net_flow_24h_percent:+.2f}%
345
  """
346
 
347
- prompt_sections = [
348
- f"SYSTEM: You are an expert AI trading analyst (Explorer Brain). Analyze the provided data for {symbol} and decide if it's a high-potential candidate to 'WATCH'.",
349
- playbook_prompt,
350
- "--- START OF CANDIDATE DATA ---",
351
- ml_summary_prompt,
352
- news_prompt,
353
- whale_activity_prompt,
354
- "--- END OF CANDIDATE DATA ---",
355
- """
 
 
 
 
 
356
  TASK:
357
- 1. Synthesize all data points (ML, News, Whale Flow, 24h Accumulation).
358
- 2. Consult the "Playbook" for learned rules.
359
- 3. Decide the final action: 'WATCH' (if high confidence) or 'NO_DECISION' (if low confidence or conflicting signals).
360
- 4. If 'WATCH', define the 'strategy_to_watch' (e.g., 'Breakout', 'MeanReversion', 'WhaleAccumulation') and an appropriate 'exit_profile' (e.g., 'Aggressive', 'Standard', 'Patient').
 
 
 
361
 
362
  OUTPUT (JSON Object ONLY):
363
- {
364
  "action": "WATCH" or "NO_DECISION",
365
  "strategy_to_watch": "STRATEGY_NAME",
366
  "confidence_level": 0.0_to_1.0,
367
  "reasoning": "Brief justification (max 40 words) synthesizing all data points.",
368
  "exit_profile": "Aggressive" or "Standard" or "Patient"
369
- }
370
  """
371
- ]
372
 
373
- return "\n".join(prompt_sections)
 
 
374
 
375
- # 🔴 --- START OF CHANGE (V18.2 - Async Fix) --- 🔴
376
- # (يجب أن تكون الدالة async)
377
  async def _create_reanalysis_prompt(self,
378
  trade_data: Dict[str, Any],
379
  current_data: Dict[str, Any],
380
  learning_context: str) -> str:
381
- # 🔴 --- END OF CHANGE --- 🔴
382
  """
383
- (معدل V8.1)
384
  إنشاء الـ Prompt (باللغة الإنجليزية) لإعادة تحليل صفقة مفتوحة (Reflector Brain).
385
  """
386
 
@@ -407,10 +414,7 @@ OUTPUT (JSON Object ONLY):
407
  # --- 4. (العقل) بيانات التعلم الإحصائي ---
408
  statistical_feedback = ""
409
  if self.learning_hub:
410
- # 🔴 --- START OF CHANGE (V18.2 - Async Fix) --- 🔴
411
- # (هذا هو السطر الذي تسبب بالخطأ - يجب استخدام await)
412
  statistical_feedback = await self.learning_hub.get_statistical_feedback_for_llm(original_strategy)
413
- # 🔴 --- END OF CHANGE --- 🔴
414
 
415
  # --- 5. بناء أقسام الـ Prompt (الإنجليزية) ---
416
 
@@ -438,32 +442,37 @@ OUTPUT (JSON Object ONLY):
438
  * Latest News (VADER: {latest_news_score:.3f}): {latest_news_text[:300]}...
439
  """
440
 
441
- prompt_sections = [
442
- f"SYSTEM: You are an expert AI trading analyst (Reflector Brain). An open trade for {symbol} has triggered a mandatory re-analysis. Analyze the new data and decide the next action.",
443
- playbook_prompt,
444
- "--- START OF TRADE DATA ---",
445
- trade_status_prompt,
446
- current_analysis_prompt,
447
- "--- END OF TRADE DATA ---",
448
- """
 
 
 
 
 
449
  TASK:
450
- 1. Analyze the "Open Trade Status" against the "Current Real-time Analysis".
451
- 2. Consult the "Playbook" for learned rules and statistical feedback.
452
- 3. Decide the best course of action:
453
- * 'HOLD': Maintain the current strategy (if analysis is still valid or neutral).
454
- * 'UPDATE_TRADE': Adjust StopLoss/TakeProfit (if risk/reward changed).
455
- * 'CLOSE_TRADE': Immediately close the trade (if analysis is invalidated or high risk).
456
- 4. You MUST provide new SL/TP targets if action is 'UPDATE_TRADE'.
457
 
458
  OUTPUT (JSON Object ONLY):
459
- {
460
  "action": "HOLD" or "UPDATE_TRADE" or "CLOSE_TRADE",
461
  "strategy": "MAINTAIN_CURRENT" or "ADAPTIVE_EXIT" or "IMMEDIATE_EXIT",
462
  "reasoning": "Brief justification (max 40 words) for the decision.",
463
  "new_stop_loss": (float or null, required if action is 'UPDATE_TRADE'),
464
  "new_take_profit": (float or null, required if action is 'UPDATE_TRADE')
465
- }
466
  """
467
- ]
468
 
469
- return "\n".join(prompt_sections)
 
1
+ # LLM.py (V19.2 - NVIDIA Engine + System Trigger Fix)
2
  import os, traceback, json, time, re
3
  import httpx
4
  from datetime import datetime
5
  from typing import List, Dict, Any, Optional
6
 
7
+ # (استخدام مكتبة OpenAI الرسمية بدلاً من httpx)
8
+ from openai import AsyncOpenAI, RateLimitError, APIError
9
+
10
  try:
11
  from r2 import R2Service
12
  from learning_hub.hub_manager import LearningHubManager # (استيراد العقل)
 
21
  except ImportError:
22
  NewsFetcher = None
23
 
24
+ # (تحديث الإعدادات الافتراضية لتطابق NVIDIA)
25
+ LLM_API_URL = os.getenv("LLM_API_URL", "https://integrate.api.nvidia.com/v1")
26
+ LLM_API_KEY = os.getenv("LLM_API_KEY") # (هذا هو المفتاح الذي سيتم استخدامه)
27
+ LLM_MODEL = os.getenv("LLM_MODEL", "nvidia/llama-3.1-nemotron-ultra-253b-v1")
28
+
29
+ # (البارامترات المحددة من طرفك)
30
+ LLM_TEMPERATURE = 0.2
31
+ LLM_TOP_P = 0.7
32
+ LLM_MAX_TOKENS = 16384
33
+ LLM_FREQUENCY_PENALTY = 0.8
34
+ LLM_PRESENCE_PENALTY = 0.5
35
 
36
  # إعدادات العميل
 
37
  CLIENT_TIMEOUT = 300.0
38
 
39
  class LLMService:
 
41
  if not LLM_API_KEY:
42
  raise ValueError("❌ [LLMService] متغير بيئة LLM_API_KEY غير موجود!")
43
 
44
+ try:
45
+ self.client = AsyncOpenAI(
46
+ base_url=LLM_API_URL,
47
+ api_key=LLM_API_KEY,
48
+ timeout=CLIENT_TIMEOUT
49
+ )
50
+ print(f"✅ [LLMService V19.2] مهيأ. النموذج: {LLM_MODEL}")
51
+ print(f" -> Endpoint: {LLM_API_URL}")
52
+ except Exception as e:
53
+ print(f"❌ [LLMService V19.2] فشل تهيئة AsyncOpenAI: {e}")
54
+ traceback.print_exc()
55
+ raise
56
 
57
  # --- (الربط بالخدمات الأخرى) ---
58
  self.r2_service: Optional[R2Service] = None
59
  self.learning_hub: Optional[LearningHubManager] = None
 
 
60
  self.news_fetcher: Optional[NewsFetcher] = None
 
 
61
 
62
+ async def _call_llm(self, prompt: str) -> Optional[str]:
63
  """
64
+ (محدث V19.2)
65
+ إجراء استدعاء API للنموذج الضخم (يستخدم الآن "detailed thinking on" كـ system prompt).
66
  """
67
+
68
+ # 🔴 --- START OF CHANGE (V19.2) --- 🔴
69
+ # (استخدام "detailed thinking on" كـ system prompt كما طلبت)
70
+ system_prompt = "detailed thinking on"
71
+ # (تم نقل جميع التعليمات الأخرى إلى الـ User Prompt)
72
+ # 🔴 --- END OF CHANGE --- 🔴
73
+
74
  payload = {
75
  "model": LLM_MODEL,
76
  "messages": [
77
+ {"role": "system", "content": system_prompt},
78
+ {"role": "user", "content": prompt} # (prompt يحتوي الآن على تعليمات JSON)
79
  ],
80
+ "temperature": LLM_TEMPERATURE,
81
+ "top_p": LLM_TOP_P,
82
+ "max_tokens": LLM_MAX_TOKENS,
83
+ "frequency_penalty": LLM_FREQUENCY_PENALTY,
84
+ "presence_penalty": LLM_PRESENCE_PENALTY,
85
+ "stream": False, # (يجب أن تكون False للحصول على JSON)
86
  "response_format": {"type": "json_object"}
87
  }
88
 
89
  try:
90
+ response = await self.client.chat.completions.create(**payload)
 
 
 
91
 
92
+ if response.choices and len(response.choices) > 0:
93
+ content = response.choices[0].message.content
94
  if content:
95
  return content.strip()
96
 
97
+ print(f"❌ [LLMService] استجابة API غير متوقعة: {response.model_dump_json()}")
98
  return None
99
 
100
+ except RateLimitError as e:
101
+ print(f"❌ [LLMService] خطأ Rate Limit من NVIDIA API: {e}")
102
+ except APIError as e:
103
+ print(f"❌ [LLMService] خطأ API من NVIDIA API: {e}")
104
  except json.JSONDecodeError:
105
+ print(f"❌ [LLMService] فشل في تحليل استجابة JSON.")
106
  except Exception as e:
107
  print(f"❌ [LLMService] خطأ غير متوقع في _call_llm: {e}")
108
  traceback.print_exc()
 
184
  symbol = candidate_data.get('symbol', 'UNKNOWN')
185
  try:
186
  # 1. (العقل) جلب القواعد (Deltas) من محور التعلم
 
187
  learning_context_prompt = "Playbook: No learning context available."
188
  if self.learning_hub:
 
189
  learning_context_prompt = await self.learning_hub.get_active_context_for_llm(
190
  domain="general",
191
  query=f"{symbol} strategy decision"
 
194
  # 2. إنشاء الـ Prompt (باللغة الإنجليزية)
195
  prompt = self._create_trading_prompt(candidate_data, learning_context_prompt)
196
 
 
197
  if self.r2_service:
198
  await self.r2_service.save_llm_prompts_async(symbol, "trading_decision", prompt, candidate_data)
199
 
200
  # 3. استدعاء النموذج الضخم (LLM)
201
+ response_text = await self._call_llm(prompt)
202
 
203
  # 4. تحليل الاستجابة (باستخدام المحلل الذكي)
204
  decision_json = self._parse_llm_response_enhanced(
 
224
  # 1. (العقل) جلب القواعد (Deltas) من محور التعلم
225
  learning_context_prompt = "Playbook: No learning context available."
226
  if self.learning_hub:
 
227
  learning_context_prompt = await self.learning_hub.get_active_context_for_llm(
228
  domain="strategy",
229
  query=f"{symbol} re-analysis {trade_data.get('strategy', 'GENERIC')}"
 
233
  latest_news_text = "News data unavailable for re-analysis."
234
  latest_news_score = 0.0
235
  if self.news_fetcher:
 
236
  latest_news_text = await self.news_fetcher.get_news_for_symbol(symbol)
237
  if self.news_fetcher.vader_analyzer and latest_news_text:
238
  vader_scores = self.news_fetcher.vader_analyzer.polarity_scores(latest_news_text)
239
  latest_news_score = vader_scores.get('compound', 0.0)
240
 
 
241
  current_data['latest_news_text'] = latest_news_text
242
  current_data['latest_news_score'] = latest_news_score
243
 
244
  # 3. إنشاء الـ Prompt (باللغة الإنجليزية)
 
 
245
  prompt = await self._create_reanalysis_prompt(trade_data, current_data, learning_context_prompt)
 
246
 
 
247
  if self.r2_service:
248
  await self.r2_service.save_llm_prompts_async(symbol, "trade_reanalysis", prompt, current_data)
249
 
250
  # 4. استدعاء النموذج الضخم (LLM)
251
+ response_text = await self._call_llm(prompt)
252
 
253
  # 5. تحليل الاستجابة (باستخدام المحلل الذكي)
254
  decision_json = self._parse_llm_response_enhanced(
 
271
  candidate_data: Dict[str, Any],
272
  learning_context: str) -> str:
273
  """
274
+ (معدل V19.2)
275
  إنشاء الـ Prompt (باللغة الإنجليزية) لاتخاذ قرار التداول الأولي (Explorer).
276
+ (تم نقل جميع التعليمات إلى هنا لتناسب system prompt الجديد)
277
  """
278
 
279
  symbol = candidate_data.get('symbol', 'N/A')
 
285
  mc_data = candidate_data.get('monte_carlo_distribution', {})
286
 
287
  # --- 2. استخراج بيانات المشاعر والأخبار (الطبقة 1) ---
 
288
  news_text = candidate_data.get('news_text', 'No news text provided.')
289
+ news_score_raw = candidate_data.get('news_score_raw', 0.0)
290
+ statistical_news_pnl = candidate_data.get('statistical_news_pnl', 0.0)
291
 
292
  # --- 3. استخراج بيانات الحيتان (الطبقة 1) ---
 
293
  whale_data = candidate_data.get('whale_data', {})
294
  whale_summary = whale_data.get('llm_friendly_summary', {})
295
  exchange_flows = whale_data.get('exchange_flows', {})
 
297
  whale_signal = whale_summary.get('recommended_action', 'HOLD')
298
  whale_confidence = whale_summary.get('confidence', 0.3)
299
  whale_reason = whale_summary.get('whale_activity_summary', 'No whale data.')
 
 
300
  net_flow_usd = exchange_flows.get('net_flow_usd', 0.0)
301
 
 
302
  # (البيانات طويلة المدى - من تحليل 24 ساعة الجديد)
303
  accumulation_data_24h = whale_data.get('accumulation_analysis_24h', {})
304
  net_flow_24h_usd = accumulation_data_24h.get('net_flow_usd', 0.0)
305
  total_inflow_24h_usd = accumulation_data_24h.get('to_exchanges_usd', 0.0)
306
  total_outflow_24h_usd = accumulation_data_24h.get('from_exchanges_usd', 0.0)
307
  relative_net_flow_24h_percent = accumulation_data_24h.get('relative_net_flow_percent', 0.0)
 
308
 
309
  # --- 4. استخراج بيانات السوق (الطبقة 0) ---
310
  market_context = candidate_data.get('sentiment_data', {})
 
313
 
314
  # --- 5. بناء أقسام الـ Prompt (الإنجليزية) ---
315
 
 
316
  playbook_prompt = f"""
317
  --- START OF LEARNING PLAYBOOK ---
318
  {learning_context}
319
  --- END OF PLAYBOOK ---
320
  """
 
321
  ml_summary_prompt = f"""
322
  1. **ML Analysis (Score: {l1_score:.3f}):**
323
  * Reasons: {', '.join(l1_reasons)}
324
  * Chart Pattern: {pattern_data.get('pattern_detected', 'None')} (Conf: {pattern_data.get('pattern_confidence', 0):.2f})
325
  * Monte Carlo (1h): {mc_data.get('probability_of_gain', 0):.1f}% chance of profit (Expected: {mc_data.get('expected_return_pct', 0):.2f}%)
326
  """
 
327
  news_prompt = f"""
328
  2. **News & Sentiment Analysis:**
329
  * Market Trend: {market_trend} (BTC: {btc_sentiment})
 
331
  * Statistical PnL (Learned): {statistical_news_pnl:+.2f}%
332
  * News Text: {news_text[:300]}...
333
  """
 
334
  whale_activity_prompt = f"""
335
  3. **Whale Activity (Real-time Flow - Optimized Window):**
336
  * Signal: {whale_signal} (Confidence: {whale_confidence:.2f})
 
344
  * Relative 24h Net Flow (vs Daily Volume): {relative_net_flow_24h_percent:+.2f}%
345
  """
346
 
347
+ # 🔴 --- START OF CHANGE (V19.2) --- 🔴
348
+ # (تم دمج جميع التعليمات هنا في رسالة الـ user)
349
+ task_prompt = f"""
350
+ CONTEXT:
351
+ You are an expert AI trading analyst (Explorer Brain).
352
+ Analyze the provided data for {symbol} and decide if it's a high-potential candidate to 'WATCH'.
353
+ {playbook_prompt}
354
+
355
+ --- START OF CANDIDATE DATA ---
356
+ {ml_summary_prompt}
357
+ {news_prompt}
358
+ {whale_activity_prompt}
359
+ --- END OF CANDIDATE DATA ---
360
+
361
  TASK:
362
+ 1. **Internal Thinking (Private):** Perform a step-by-step analysis (as triggered by the system prompt).
363
+ * Synthesize all data points (ML, News, Whale Flow, 24h Accumulation).
364
+ * Are the signals aligned? (e.g., ML breakout + Whale Accumulation = Strong).
365
+ * Are there conflicts? (e.g., Good ML Score but high 24h Deposits = Risky).
366
+ * Consult the "Playbook" for learned rules.
367
+ 2. **Final Decision:** Based on your internal thinking, decide the final action.
368
+ 3. **Output Constraint:** Provide your final answer ONLY in the requested JSON object format, with no introductory text, markdown formatting, or explanations.
369
 
370
  OUTPUT (JSON Object ONLY):
371
+ {{
372
  "action": "WATCH" or "NO_DECISION",
373
  "strategy_to_watch": "STRATEGY_NAME",
374
  "confidence_level": 0.0_to_1.0,
375
  "reasoning": "Brief justification (max 40 words) synthesizing all data points.",
376
  "exit_profile": "Aggressive" or "Standard" or "Patient"
377
+ }}
378
  """
379
+ # 🔴 --- END OF CHANGE --- 🔴
380
 
381
+ # (نرسل فقط task_prompt لأنه يحتوي الآن على كل شيء)
382
+ return task_prompt
383
+
384
 
 
 
385
  async def _create_reanalysis_prompt(self,
386
  trade_data: Dict[str, Any],
387
  current_data: Dict[str, Any],
388
  learning_context: str) -> str:
 
389
  """
390
+ (معدل V19.2)
391
  إنشاء الـ Prompt (باللغة الإنجليزية) لإعادة تحليل صفقة مفتوحة (Reflector Brain).
392
  """
393
 
 
414
  # --- 4. (العقل) بيانات التعلم الإحصائي ---
415
  statistical_feedback = ""
416
  if self.learning_hub:
 
 
417
  statistical_feedback = await self.learning_hub.get_statistical_feedback_for_llm(original_strategy)
 
418
 
419
  # --- 5. بناء أقسام الـ Prompt (الإنجليزية) ---
420
 
 
442
  * Latest News (VADER: {latest_news_score:.3f}): {latest_news_text[:300]}...
443
  """
444
 
445
+ # 🔴 --- START OF CHANGE (V19.2) --- 🔴
446
+ # (دمج جميع التعليمات في رسالة الـ user)
447
+ task_prompt = f"""
448
+ CONTEXT:
449
+ You are an expert AI trading analyst (Reflector Brain).
450
+ An open trade for {symbol} has triggered a mandatory re-analysis. Analyze the new data and decide the next action.
451
+ {playbook_prompt}
452
+
453
+ --- START OF TRADE DATA ---
454
+ {trade_status_prompt}
455
+ {current_analysis_prompt}
456
+ --- END OF TRADE DATA ---
457
+
458
  TASK:
459
+ 1. **Internal Thinking (Private):** Perform a step-by-step analysis (as triggered by the system prompt).
460
+ * Compare the "Open Trade Status" with the "Current Real-time Analysis".
461
+ * Has the situation improved or deteriorated? (e.g., PnL is good, but new Monte Carlo is negative).
462
+ * Are there new critical news?
463
+ * Consult the "Playbook" for learned rules and statistical feedback.
464
+ 2. **Final Decision:** Based on your internal thinking, decide the best course of action (HOLD, UPDATE_TRADE, CLOSE_TRADE).
465
+ 3. **Output Constraint:** Provide your final answer ONLY in the requested JSON object format, with no introductory text, markdown formatting, or explanations.
466
 
467
  OUTPUT (JSON Object ONLY):
468
+ {{
469
  "action": "HOLD" or "UPDATE_TRADE" or "CLOSE_TRADE",
470
  "strategy": "MAINTAIN_CURRENT" or "ADAPTIVE_EXIT" or "IMMEDIATE_EXIT",
471
  "reasoning": "Brief justification (max 40 words) for the decision.",
472
  "new_stop_loss": (float or null, required if action is 'UPDATE_TRADE'),
473
  "new_take_profit": (float or null, required if action is 'UPDATE_TRADE')
474
+ }}
475
  """
476
+ # 🔴 --- END OF CHANGE --- 🔴
477
 
478
+ return task_prompt