Badumetsibb commited on
Commit
d7f7d8e
·
verified ·
1 Parent(s): 9489b4c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +203 -194
app.py CHANGED
@@ -7,7 +7,6 @@ import torch.nn.functional as F
7
  import requests
8
  import os
9
  import time
10
- import threading
11
  import plotly.graph_objects as go
12
  from plotly.subplots import make_subplots
13
  from sklearn.preprocessing import StandardScaler
@@ -16,7 +15,6 @@ from sklearn.preprocessing import StandardScaler
16
  API_KEY = os.getenv("TWELVEDATA_KEY")
17
  NTFY_TOPIC = os.getenv("NTFY_TOPIC")
18
 
19
- # The Constellation Basket
20
  TARGET_PAIR = "EUR/USD"
21
  SYMBOLS = ["EUR/USD", "GBP/USD", "USD/JPY", "XAU/USD"]
22
  TIMEFRAME = "15min"
@@ -24,13 +22,14 @@ LOOKBACK = 30
24
 
25
  # Global State
26
  GLOBAL_STATE = {
27
- "base_model": None, # The Transformer (The Teacher)
28
- "shadow_model": None, # The Online Learner (The Student)
29
  "last_trade": None,
30
- "last_run_time": 0
 
31
  }
32
 
33
- # --- 2. THE TEACHER: CONSTELLATION TRANSFORMER ---
34
  class PositionalEncoding(nn.Module):
35
  def __init__(self, d_model, max_len=5000):
36
  super(PositionalEncoding, self).__init__()
@@ -62,42 +61,40 @@ class ConstellationTransformer(nn.Module):
62
  mu = self.z_mu(context)
63
  return pi, sigma, mu
64
 
65
- # --- 3. THE STUDENT: META SHADOW LEARNER (ONLINE) ---
66
- class MetaShadowLearner(nn.Module):
67
- def __init__(self, input_size=4, hidden_size=32):
68
- super(MetaShadowLearner, self).__init__()
69
- # Simple MLP that learns FAST
 
 
 
 
 
 
 
70
  self.net = nn.Sequential(
71
- nn.Linear(input_size, hidden_size),
72
- nn.Tanh(), # Tanh for non-linearity
73
- nn.Linear(hidden_size, hidden_size),
74
  nn.ReLU(),
75
- nn.Linear(hidden_size, 1) # Output: The Correction Factor
76
  )
77
- # High learning rate because we learn sample-by-sample
78
  self.optimizer = torch.optim.Adam(self.parameters(), lr=0.01)
79
  self.loss_fn = nn.MSELoss()
80
 
81
  def forward(self, x):
82
  return self.net(x)
83
 
84
- def learn_on_the_spot(self, inputs, target_error):
85
- """
86
- Takes one sample, calculates loss, updates weights instantly.
87
- No batches. Pure online learning.
88
- """
89
  self.train()
90
  self.optimizer.zero_grad()
91
-
92
- predicted_error = self.forward(inputs)
93
-
94
- # We want the shadow model to predict the error of the base model
95
- loss = self.loss_fn(predicted_error, target_error)
96
-
97
  loss.backward()
98
  self.optimizer.step()
99
-
100
- return predicted_error.item()
101
 
102
  # --- 4. DATA PIPELINE ---
103
  def get_constellation_data():
@@ -114,244 +111,256 @@ def get_constellation_data():
114
  df = df[['close']].astype(float)
115
  df.rename(columns={'close': sym}, inplace=True)
116
  dfs.append(df)
117
- time.sleep(0.1)
118
  except: pass
119
 
120
  if not dfs: return None, "❌ Failed to fetch data."
121
  master_df = pd.concat(dfs, axis=1).ffill().dropna()
122
  return master_df, "✅ Constellation Aligned"
123
 
124
- def prepare_tensors(master_df):
125
- # Returns for ALL Pairs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
  feature_cols = []
127
  for sym in SYMBOLS:
128
  col_name = f"{sym}_ret"
129
- master_df[col_name] = master_df[sym].pct_change().fillna(0)
130
  feature_cols.append(col_name)
 
131
 
132
- # Scale
133
  scaler = StandardScaler()
134
- data_scaled = scaler.fit_transform(master_df[feature_cols].values)
135
 
136
- # Windows
137
  X_data = []
138
  for i in range(LOOKBACK, len(data_scaled)):
139
  X_data.append(data_scaled[i-LOOKBACK:i])
140
 
141
  X_tensor = torch.FloatTensor(np.array(X_data))
142
 
143
- # Metadata
144
  target_idx = 0
145
  ret_mean = scaler.mean_[target_idx]
146
  ret_scale = scaler.scale_[target_idx]
147
- ref_prices = master_df[TARGET_PAIR].values[LOOKBACK:]
148
 
149
- return X_tensor, master_df.index[LOOKBACK:], ref_prices, ret_mean, ret_scale, data_scaled
150
 
151
  # --- 5. CORE LOGIC ---
152
  def send_ntfy(message):
153
  if not NTFY_TOPIC: return
154
  try:
155
- requests.post(f"https://ntfy.sh/{NTFY_TOPIC}", data=message.encode('utf-8'), headers={"Title": "Shadow FX V5", "Priority": "high"})
156
  except: pass
157
 
 
 
 
 
 
 
158
  def run_analysis():
159
  log_buffer = []
160
 
161
- # 1. Initialize Models
162
  if GLOBAL_STATE["base_model"] is None:
163
- GLOBAL_STATE["base_model"] = ConstellationTransformer(input_dim=4, d_model=64)
164
  log_buffer.append("🧠 Base Transformer Initialized")
165
-
166
- # Always re-init shadow model for fresh "session" learning or keep it if you want long term
167
- GLOBAL_STATE["shadow_model"] = MetaShadowLearner(input_size=4)
168
 
 
 
 
 
 
 
169
  base_model = GLOBAL_STATE["base_model"]
170
  shadow_model = GLOBAL_STATE["shadow_model"]
171
 
172
- # 2. Get Data
173
  master_df, msg = get_constellation_data()
174
  if master_df is None: return None, msg, msg
 
 
175
 
176
- X_tensor, dates, ref_prices, ret_mean, ret_std, raw_scaled_features = prepare_tensors(master_df)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
177
 
178
- # 3. THE LIVE LEARNING LOOP
179
  final_preds = []
180
  base_preds = []
181
- shadow_corrections = []
182
-
183
- log_buffer.append("🧬 Shadow Model learning on the spot...")
184
 
185
  base_model.eval()
186
 
187
- # We iterate up to len-1 because we need the 'next' value to calculate error
188
- loop_limit = len(X_tensor) - 1
189
 
190
- for i in range(loop_limit):
191
 
192
- # A. Base Model Prediction
193
  with torch.no_grad():
194
- curr_input = X_tensor[i].unsqueeze(0)
195
- pi, sigma, mu = base_model(curr_input)
196
  max_idx = torch.argmax(pi, dim=1)
197
- base_pred_ret = mu[0, max_idx].item()
198
  base_sigma = sigma[0, max_idx].item()
199
 
200
- # B. Construct Shadow Inputs
201
- last_features = raw_scaled_features[LOOKBACK + i - 1]
202
- shadow_input = torch.tensor([[base_pred_ret, base_sigma, last_features[0], last_features[3]]], dtype=torch.float32)
 
 
203
 
204
- # C. Shadow Prediction
 
205
  with torch.no_grad():
206
- correction = shadow_model(shadow_input).item()
207
-
208
- final_ret_pred = base_pred_ret + correction
209
 
210
- # D. The Truth Revealed (Next Candle)
211
- actual_ret_raw = (ref_prices[i+1] - ref_prices[i]) / ref_prices[i]
212
- actual_ret_scaled = (actual_ret_raw - ret_mean) / ret_std
213
 
214
- # E. Base Model Error
215
- base_error = actual_ret_scaled - base_pred_ret
 
 
216
 
217
- # F. TEACH THE SHADOW
218
- target_tensor = torch.tensor([[base_error]], dtype=torch.float32)
219
- shadow_model.learn_on_the_spot(shadow_input, target_tensor)
220
 
221
- base_preds.append(base_pred_ret)
222
- shadow_corrections.append(correction)
223
- final_preds.append(final_ret_pred)
224
 
225
- # --- FIXING THE ARRAY ALIGNMENT ---
226
- # The loop runs 'loop_limit' times.
227
- # If len(X) is 100, loop_limit is 99.
228
- # We generated 99 predictions.
229
- # Prediction i=0 corresponds to the move from Price[0] to Price[1].
230
- # So we align with dates[1:] and ref_prices[1:].
231
-
232
- # Slice the data to match the prediction count
233
  plot_dates = dates[1:1+len(final_preds)]
234
  plot_actual = ref_prices[1:1+len(final_preds)]
235
 
236
- # Reconstruction
237
- pred_prices = []
238
- curr_price = ref_prices[0] # Start from known point
239
 
240
- for k, ret in enumerate(final_preds):
241
- real_ret = (ret * ret_std) + ret_mean
242
- # To avoid compounding error drift in visualization, we use the ACTUAL previous price
243
- # to calculate the NEXT predicted price.
244
- prev_actual_price = ref_prices[k]
245
- next_price = prev_actual_price * (1 + real_ret)
246
- pred_prices.append(next_price)
247
-
248
- # Create DataFrame (Safe Mode)
249
- try:
250
- df = pd.DataFrame({
251
- 'Close': plot_actual,
252
- 'Final_Pred': pred_prices,
253
- 'Base_Pred': [ref_prices[k] * (1 + (bp * ret_std) + ret_mean) for k, bp in enumerate(base_preds)],
254
- 'Correction': shadow_corrections
255
- }, index=plot_dates)
256
- except ValueError as e:
257
- return None, f"❌ Data Size Mismatch: {e}", "\n".join(log_buffer)
258
-
259
- # Calculate Z-Score
260
- df['Gap'] = df['Final_Pred'] - df['Close']
261
- df['Z_Score'] = (df['Gap'] - df['Gap'].rolling(50).mean()) / (df['Gap'].rolling(50).std() + 1e-9)
262
-
263
- if len(df) < 2: return None, "Not enough data yet.", "Waiting..."
264
-
265
- last_z = df['Z_Score'].iloc[-1]
266
- last_price = df['Close'].iloc[-1]
267
-
268
- # Signals
269
- status = "NEUTRAL"
270
- color = "gray"
271
- if last_z > 2.0:
272
- status = "BUY SIGNAL"
273
- color = "#00ff00"
274
- if GLOBAL_STATE["last_trade"] != "BUY":
275
- send_ntfy(f"🚀 BUY EURUSD | Smart-Z: {last_z:.2f}")
276
- GLOBAL_STATE["last_trade"] = "BUY"
277
- elif last_z < -2.0:
278
- status = "SELL SIGNAL"
279
- color = "#ff0000"
280
- if GLOBAL_STATE["last_trade"] != "SELL":
281
- send_ntfy(f"🔻 SELL EURUSD | Smart-Z: {last_z:.2f}")
282
- GLOBAL_STATE["last_trade"] = "SELL"
283
-
284
- GLOBAL_STATE["last_run_time"] = time.time()
285
-
286
- # --- PLOTTING ---
287
- fig = make_subplots(rows=3, cols=1, shared_xaxes=True,
288
- vertical_spacing=0.05,
289
- row_heights=[0.5, 0.25, 0.25],
290
- subplot_titles=("Price vs Self-Learning Prediction", "Shadow Correction (The 'Boost')", "Smart Divergence"))
291
 
292
- # 1. Price
293
- fig.add_trace(go.Scatter(x=df.index, y=df['Close'], mode='lines', name='Actual Price', line=dict(color='gray', width=1)), row=1, col=1)
294
- fig.add_trace(go.Scatter(x=df.index, y=df['Base_Pred'], mode='lines', name='Base Model', line=dict(color='cyan', width=1, dash='dot')), row=1, col=1)
295
- fig.add_trace(go.Scatter(x=df.index, y=df['Final_Pred'], mode='lines', name='Adapted Model', line=dict(color='#ffff00', width=2)), row=1, col=1)
 
 
296
 
297
- # 2. Correction
298
- fig.add_trace(go.Bar(x=df.index, y=df['Correction'], name='AI Correction', marker_color='purple'), row=2, col=1)
 
299
 
300
- # 3. Z-Score
301
- fig.add_trace(go.Bar(x=df.index, y=df['Z_Score'], name='Smart Z-Score', marker_color=df['Z_Score'].apply(lambda x: 'green' if x>0 else 'red')), row=3, col=1)
302
- fig.add_hline(y=2.0, line_dash="dot", line_color="green", row=3, col=1)
303
- fig.add_hline(y=-2.0, line_dash="dot", line_color="red", row=3, col=1)
 
 
 
 
 
 
 
 
 
304
 
305
- fig.update_layout(template="plotly_dark", height=900, title=f"V5 ShadowNet: {status}")
306
-
307
- info_html = f"""
308
- <div style="text-align: center; padding: 10px; background-color: {color}; color: white; border-radius: 5px;">
309
- <h2>{status}</h2>
310
- <p>Smart-Z: {last_z:.3f}</p>
311
- <small>The yellow line learns from the cyan line's mistakes in real-time.</small>
312
- </div>
313
- """
 
 
 
 
 
 
 
 
 
 
314
 
315
- return fig, info_html, "\n".join(log_buffer)
316
-
317
- def hard_reset():
318
- GLOBAL_STATE["base_model"] = None
319
- GLOBAL_STATE["shadow_model"] = None
320
- return None, "Memory Wiped", "Reset Complete"
321
 
322
- # --- 6. BACKGROUND WORKER (Auto-Loop) ---
323
- def background_loop():
324
- while True:
325
- try:
326
- print("⏰ Background Scan Running...")
327
- run_analysis()
328
- print("✅ Scan Complete. Sleeping...")
329
- except Exception as e:
330
- print(f"❌ Background Error: {e}")
331
- time.sleep(900) # Run every 15 minutes (900 seconds)
332
-
333
- # Start Background Thread
334
- t = threading.Thread(target=background_loop, daemon=True)
335
- t.start()
336
-
337
- # --- 7. UI ---
338
- with gr.Blocks(title="ShadowFX V5") as app:
339
- gr.Markdown("# 🧬 V5 Meta-Shadow (Self-Adaptive)")
340
- gr.Markdown("This model has **no fixed weights**. It watches the base model, calculates its error on every candle, and updates its own brain instantly to correct the next prediction.")
341
-
342
  with gr.Row():
343
- refresh_btn = gr.Button(" Run Analysis Now", variant="primary")
344
- reset_btn = gr.Button("🧠 Wipe Memory", variant="stop")
345
-
346
- status_box = gr.HTML()
347
- plot = gr.Plot()
348
- logs = gr.Textbox(label="System Logs")
349
-
350
- refresh_btn.click(fn=run_analysis, outputs=[plot, status_box, logs])
351
- reset_btn.click(fn=hard_reset, outputs=[plot, status_box, logs])
352
 
353
- # Load on start
354
- app.load(fn=run_analysis, outputs=[plot, status_box, logs])
 
355
 
356
  if __name__ == "__main__":
357
  app.launch(ssr_mode=False)
 
7
  import requests
8
  import os
9
  import time
 
10
  import plotly.graph_objects as go
11
  from plotly.subplots import make_subplots
12
  from sklearn.preprocessing import StandardScaler
 
15
  API_KEY = os.getenv("TWELVEDATA_KEY")
16
  NTFY_TOPIC = os.getenv("NTFY_TOPIC")
17
 
 
18
  TARGET_PAIR = "EUR/USD"
19
  SYMBOLS = ["EUR/USD", "GBP/USD", "USD/JPY", "XAU/USD"]
20
  TIMEFRAME = "15min"
 
22
 
23
  # Global State
24
  GLOBAL_STATE = {
25
+ "base_model": None, # The Transformer (Teacher)
26
+ "shadow_model": None, # The Online Learner (Student)
27
  "last_trade": None,
28
+ "scaler": None,
29
+ "is_base_trained": False
30
  }
31
 
32
+ # --- 2. THE TEACHER: CONSTELLATION TRANSFORMER (Your Original Model) ---
33
  class PositionalEncoding(nn.Module):
34
  def __init__(self, d_model, max_len=5000):
35
  super(PositionalEncoding, self).__init__()
 
61
  mu = self.z_mu(context)
62
  return pi, sigma, mu
63
 
64
+ def mdn_loss(pi, sigma, mu, y):
65
+ if y.dim() == 1: y = y.unsqueeze(1)
66
+ dist = torch.distributions.Normal(loc=mu, scale=sigma)
67
+ log_prob = dist.log_prob(y)
68
+ loss = -torch.logsumexp(torch.log(pi + 1e-8) + log_prob, dim=1)
69
+ return torch.mean(loss)
70
+
71
+ # --- 3. THE STUDENT: ONLINE META LEARNER (New) ---
72
+ class OnlineMetaLearner(nn.Module):
73
+ def __init__(self, input_dim=3, hidden_dim=32):
74
+ super(OnlineMetaLearner, self).__init__()
75
+ # It takes [Base_Pred, Base_Sigma, Volatility] as input
76
  self.net = nn.Sequential(
77
+ nn.Linear(input_dim, hidden_dim),
78
+ nn.Tanh(), # Tanh is safer for corrections (bound between -1 and 1)
79
+ nn.Linear(hidden_dim, hidden_dim),
80
  nn.ReLU(),
81
+ nn.Linear(hidden_dim, 1) # Outputs the CORRECTION
82
  )
83
+ # Fast learning rate for "On-the-Spot" adaptation
84
  self.optimizer = torch.optim.Adam(self.parameters(), lr=0.01)
85
  self.loss_fn = nn.MSELoss()
86
 
87
  def forward(self, x):
88
  return self.net(x)
89
 
90
+ def learn_step(self, x, target_error):
 
 
 
 
91
  self.train()
92
  self.optimizer.zero_grad()
93
+ pred_correction = self.forward(x)
94
+ loss = self.loss_fn(pred_correction, target_error)
 
 
 
 
95
  loss.backward()
96
  self.optimizer.step()
97
+ return pred_correction.item()
 
98
 
99
  # --- 4. DATA PIPELINE ---
100
  def get_constellation_data():
 
111
  df = df[['close']].astype(float)
112
  df.rename(columns={'close': sym}, inplace=True)
113
  dfs.append(df)
114
+ time.sleep(0.1)
115
  except: pass
116
 
117
  if not dfs: return None, "❌ Failed to fetch data."
118
  master_df = pd.concat(dfs, axis=1).ffill().dropna()
119
  return master_df, "✅ Constellation Aligned"
120
 
121
+ def get_events_data():
122
+ try:
123
+ url = "https://nfs.faireconomy.media/ff_calendar_thisweek.json"
124
+ r = requests.get(url, headers={"User-Agent": "V23/1.0"}, timeout=5)
125
+ data = r.json()
126
+ parsed = []
127
+ impact_map = {'Low': 1, 'Medium': 2, 'High': 3}
128
+ for i in data:
129
+ if i.get('country') in ['EUR', 'USD']:
130
+ dt = pd.to_datetime(i.get('date'), utc=True).tz_localize(None)
131
+ imp = impact_map.get(i.get('impact'), 0)
132
+ parsed.append({'DateTime': dt, 'Impact_Score': imp})
133
+ df = pd.DataFrame(parsed)
134
+ if not df.empty: df = df.sort_values('DateTime').set_index('DateTime')
135
+ return df
136
+ except: return pd.DataFrame()
137
+
138
+ def prepare_tensors(master_df, event_df):
139
+ if not event_df.empty:
140
+ merged = pd.merge_asof(master_df, event_df, left_index=True, right_index=True, direction='backward', tolerance=pd.Timedelta('4 hours')).fillna(0)
141
+ else:
142
+ merged = master_df.copy(); merged['Impact_Score'] = 0
143
+ merged['Surprise'] = 0.0
144
+
145
  feature_cols = []
146
  for sym in SYMBOLS:
147
  col_name = f"{sym}_ret"
148
+ merged[col_name] = merged[sym].pct_change().fillna(0)
149
  feature_cols.append(col_name)
150
+ feature_cols.extend(['Surprise', 'Impact_Score'])
151
 
 
152
  scaler = StandardScaler()
153
+ data_scaled = scaler.fit_transform(merged[feature_cols].values)
154
 
 
155
  X_data = []
156
  for i in range(LOOKBACK, len(data_scaled)):
157
  X_data.append(data_scaled[i-LOOKBACK:i])
158
 
159
  X_tensor = torch.FloatTensor(np.array(X_data))
160
 
 
161
  target_idx = 0
162
  ret_mean = scaler.mean_[target_idx]
163
  ret_scale = scaler.scale_[target_idx]
164
+ ref_prices = merged[TARGET_PAIR].values[LOOKBACK:]
165
 
166
+ return X_tensor, merged.index[LOOKBACK:], ref_prices, ret_mean, ret_scale, data_scaled
167
 
168
  # --- 5. CORE LOGIC ---
169
  def send_ntfy(message):
170
  if not NTFY_TOPIC: return
171
  try:
172
+ requests.post(f"https://ntfy.sh/{NTFY_TOPIC}", data=message.encode('utf-8'), headers={"Title": "Hybrid V4", "Priority": "high"})
173
  except: pass
174
 
175
+ def hard_reset():
176
+ GLOBAL_STATE["base_model"] = None
177
+ GLOBAL_STATE["shadow_model"] = None
178
+ GLOBAL_STATE["is_base_trained"] = False
179
+ return None, "<div>♻️ Memory Wiped.</div>", "Reset."
180
+
181
  def run_analysis():
182
  log_buffer = []
183
 
184
+ # 1. Initialize Base Model (Teacher)
185
  if GLOBAL_STATE["base_model"] is None:
186
+ GLOBAL_STATE["base_model"] = ConstellationTransformer(input_dim=6, d_model=64, num_layers=2)
187
  log_buffer.append("🧠 Base Transformer Initialized")
 
 
 
188
 
189
+ # 2. Initialize Shadow Model (Student) - Always fresh or persistent?
190
+ # Let's keep it persistent so it gets smarter over time, but reset if hard_reset called
191
+ if GLOBAL_STATE["shadow_model"] is None:
192
+ GLOBAL_STATE["shadow_model"] = OnlineMetaLearner(input_dim=3)
193
+ log_buffer.append("👻 Shadow Learner Initialized")
194
+
195
  base_model = GLOBAL_STATE["base_model"]
196
  shadow_model = GLOBAL_STATE["shadow_model"]
197
 
198
+ # 3. Data
199
  master_df, msg = get_constellation_data()
200
  if master_df is None: return None, msg, msg
201
+ event_df = get_events_data()
202
+ X_tensor, dates, ref_prices, ret_mean, ret_std, raw_features = prepare_tensors(master_df, event_df)
203
 
204
+ # 4. Train Base Model (The "Pre-Knowledge")
205
+ # We KEEP this to prevent the "Drunk" zig-zags. The Base Model must be smart first.
206
+ if not GLOBAL_STATE["is_base_trained"]:
207
+ log_buffer.append("⚙️ Training Base Transformer (50 Epochs)...")
208
+ optimizer = torch.optim.Adam(base_model.parameters(), lr=0.005)
209
+ base_model.train()
210
+
211
+ train_X = X_tensor[:-1]
212
+ actual_returns = np.diff(ref_prices) / ref_prices[:-1]
213
+ actual_returns_scaled = (actual_returns - ret_mean) / ret_std
214
+ train_y = torch.FloatTensor(actual_returns_scaled).unsqueeze(1)
215
+ train_X = train_X[:len(train_y)]
216
+
217
+ dataset = torch.utils.data.TensorDataset(train_X, train_y)
218
+ loader = torch.utils.data.DataLoader(dataset, batch_size=32, shuffle=True)
219
+
220
+ for epoch in range(50):
221
+ for batch_X, batch_y in loader:
222
+ optimizer.zero_grad()
223
+ pi, sigma, mu = base_model(batch_X)
224
+ loss = mdn_loss(pi, sigma, mu, batch_y)
225
+ loss.backward()
226
+ optimizer.step()
227
+
228
+ GLOBAL_STATE["is_base_trained"] = True
229
+ log_buffer.append("✅ Base Calibration Complete.")
230
+
231
+ # 5. HYBRID INFERENCE LOOP
232
+ # We now run through the data again.
233
+ # Base Model predicts -> Shadow Model corrects -> Weights update -> Next candle
234
+
235
+ log_buffer.append("🧬 Running Online Adaptation Loop...")
236
 
 
237
  final_preds = []
238
  base_preds = []
239
+ corrections = []
 
 
240
 
241
  base_model.eval()
242
 
243
+ # We loop through history to let the Shadow Model "learn" the Base Model's weaknesses
244
+ loop_len = len(X_tensor) - 1
245
 
246
+ for i in range(loop_len):
247
 
248
+ # A. Base Model Prediction (Frozen weights here)
249
  with torch.no_grad():
250
+ inp = X_tensor[i].unsqueeze(0)
251
+ pi, sigma, mu = base_model(inp)
252
  max_idx = torch.argmax(pi, dim=1)
253
+ base_ret = mu[0, max_idx].item()
254
  base_sigma = sigma[0, max_idx].item()
255
 
256
+ # B. Prepare Shadow Input
257
+ # [Base_Prediction, Base_Confidence, Volatility]
258
+ # Volatility is approx from raw features (feature 0 is EURUSD ret)
259
+ vol = abs(raw_features[LOOKBACK+i][0])
260
+ shadow_in = torch.tensor([[base_ret, base_sigma, vol]], dtype=torch.float32)
261
 
262
+ # C. Shadow Prediction (Correction)
263
+ # Note: We call forward(), not learn_step() yet because we don't know the future
264
  with torch.no_grad():
265
+ correction = shadow_model(shadow_in).item()
266
+
267
+ final_ret = base_ret + correction
268
 
269
+ # D. Get Real Next Value
270
+ real_ret_raw = (ref_prices[i+1] - ref_prices[i]) / ref_prices[i]
271
+ real_ret_scaled = (real_ret_raw - ret_mean) / ret_std
272
 
273
+ # E. Calculate Error for Shadow
274
+ # The target for the Shadow is: "What should I have added to Base to make it perfect?"
275
+ target_correction = real_ret_scaled - base_ret
276
+ target_tensor = torch.tensor([[target_correction]], dtype=torch.float32)
277
 
278
+ # F. LEARN ON THE SPOT
279
+ shadow_model.learn_step(shadow_in, target_tensor)
 
280
 
281
+ base_preds.append(base_ret)
282
+ corrections.append(correction)
283
+ final_preds.append(final_ret)
284
 
285
+ # 6. Reconstruction & Plotting
 
 
 
 
 
 
 
286
  plot_dates = dates[1:1+len(final_preds)]
287
  plot_actual = ref_prices[1:1+len(final_preds)]
288
 
289
+ # Reconstruct prices from returns
290
+ pred_prices_base = []
291
+ pred_prices_final = []
292
 
293
+ for k in range(len(final_preds)):
294
+ prev_p = ref_prices[k] # Use actual previous to prevent drift
295
+
296
+ # Base
297
+ b_ret = (base_preds[k] * ret_std) + ret_mean
298
+ pred_prices_base.append(prev_p * (1 + b_ret))
299
+
300
+ # Final
301
+ f_ret = (final_preds[k] * ret_std) + ret_mean
302
+ pred_prices_final.append(prev_p * (1 + f_ret))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
303
 
304
+ df = pd.DataFrame({
305
+ 'Close': plot_actual,
306
+ 'Base': pred_prices_base,
307
+ 'Final': pred_prices_final,
308
+ 'Correction': corrections
309
+ }, index=plot_dates)
310
 
311
+ # Z-Score
312
+ df['Gap'] = df['Final'] - df['Close']
313
+ df['Z'] = (df['Gap'] - df['Gap'].rolling(50).mean()) / (df['Gap'].rolling(50).std() + 1e-9)
314
 
315
+ if len(df) > 0:
316
+ last_z = df['Z'].iloc[-1]
317
+ last_p = df['Close'].iloc[-1]
318
+
319
+ status = "NEUTRAL"
320
+ color = "gray"
321
+ if last_z > 2.0: status, color = "BUY SIGNAL", "green"
322
+ if last_z < -2.0: status, color = "SELL SIGNAL", "red"
323
+
324
+ # Check notification
325
+ if "SIGNAL" in status and GLOBAL_STATE["last_trade"] != status:
326
+ send_ntfy(f"{status} EURUSD | Z: {last_z:.2f}")
327
+ GLOBAL_STATE["last_trade"] = status
328
 
329
+ fig = make_subplots(rows=3, cols=1, shared_xaxes=True, row_heights=[0.5, 0.25, 0.25],
330
+ subplot_titles=("Hybrid Price Model", "AI Correction (Shadow)", "Divergence"))
331
+
332
+ # 1. Price
333
+ fig.add_trace(go.Scatter(x=df.index, y=df['Close'], name='Price', line=dict(color='gray')), row=1, col=1)
334
+ fig.add_trace(go.Scatter(x=df.index, y=df['Base'], name='Base Transformer', line=dict(color='cyan', dash='dot')), row=1, col=1)
335
+ fig.add_trace(go.Scatter(x=df.index, y=df['Final'], name='Adapted (Shadow)', line=dict(color='yellow', width=2)), row=1, col=1)
336
+
337
+ # 2. Correction
338
+ fig.add_trace(go.Bar(x=df.index, y=df['Correction'], name='Learned Correction', marker_color='purple'), row=2, col=1)
339
+
340
+ # 3. Z
341
+ fig.add_trace(go.Bar(x=df.index, y=df['Z'], name='Z-Score', marker_color=df['Z'].apply(lambda x: 'green' if x>0 else 'red')), row=3, col=1)
342
+ fig.add_hline(y=2, line_dash="dot", row=3, col=1); fig.add_hline(y=-2, line_dash="dot", row=3, col=1)
343
+
344
+ fig.update_layout(template="plotly_dark", height=800, title=f"Hybrid V4: {status}")
345
+
346
+ info = f"<div style='background:{color};color:white;padding:10px;text-align:center'><h3>{status}</h3>Z: {last_z:.3f}</div>"
347
+ return fig, info, "\n".join(log_buffer)
348
 
349
+ return None, "No Data", "Wait"
 
 
 
 
 
350
 
351
+ # --- 6. UI ---
352
+ with gr.Blocks(title="Hybrid V4") as app:
353
+ gr.Markdown("# 👁️ Hybrid V4: Transformer + Shadow Learner")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
354
  with gr.Row():
355
+ r = gr.Button("🔄 Scan", variant="primary")
356
+ w = gr.Button("⚠️ Wipe", variant="stop")
357
+ s = gr.HTML()
358
+ p = gr.Plot()
359
+ l = gr.Textbox()
 
 
 
 
360
 
361
+ r.click(run_analysis, outputs=[p, s, l])
362
+ w.click(hard_reset, outputs=[p, s, l])
363
+ app.load(run_analysis, outputs=[p, s, l])
364
 
365
  if __name__ == "__main__":
366
  app.launch(ssr_mode=False)