Badumetsibb commited on
Commit
c1defb6
·
verified ·
1 Parent(s): 9da4f4d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -38
app.py CHANGED
@@ -158,38 +158,36 @@ def send_ntfy(message):
158
  def run_analysis():
159
  log_buffer = []
160
 
161
- # 1. Initialize Models if needed
162
  if GLOBAL_STATE["base_model"] is None:
163
  GLOBAL_STATE["base_model"] = ConstellationTransformer(input_dim=4, d_model=64)
164
- # Pre-load base model with random weights (simulating a pre-trained state)
165
- # In a real scenario, you'd load a .pth file here
166
  log_buffer.append("🧠 Base Transformer Initialized")
167
 
168
- # 2. Initialize Shadow Model (Reset every scan to simulate 'fresh' eyes or keep persistent)
169
- # We re-initialize here to prove it learns the CURRENT chart from scratch every time
170
  GLOBAL_STATE["shadow_model"] = MetaShadowLearner(input_size=4)
171
 
172
  base_model = GLOBAL_STATE["base_model"]
173
  shadow_model = GLOBAL_STATE["shadow_model"]
174
 
175
- # 3. Get Data
176
  master_df, msg = get_constellation_data()
177
  if master_df is None: return None, msg, msg
178
 
179
  X_tensor, dates, ref_prices, ret_mean, ret_std, raw_scaled_features = prepare_tensors(master_df)
180
 
181
- # 4. THE LIVE LEARNING LOOP
182
- # We iterate through the chart. The Base Model predicts. The Shadow Model observes the error and updates itself.
183
-
184
  final_preds = []
185
  base_preds = []
186
  shadow_corrections = []
187
 
188
  log_buffer.append("🧬 Shadow Model learning on the spot...")
189
 
190
- base_model.eval() # Base model is frozen/static
 
 
 
191
 
192
- for i in range(len(X_tensor) - 1): # -1 because we need target for next step
193
 
194
  # A. Base Model Prediction
195
  with torch.no_grad():
@@ -200,13 +198,10 @@ def run_analysis():
200
  base_sigma = sigma[0, max_idx].item()
201
 
202
  # B. Construct Shadow Inputs
203
- # The Shadow sees: [Base_Prediction, Base_Confidence, Recent_Volatility, Current_Price_Trend]
204
- # Using raw scaled features from the last step of the window
205
  last_features = raw_scaled_features[LOOKBACK + i - 1]
206
- # Feature vector: [Base_Pred, Base_Sigma, Volatility(approx via EURUSD ret), Gold_Ret]
207
  shadow_input = torch.tensor([[base_pred_ret, base_sigma, last_features[0], last_features[3]]], dtype=torch.float32)
208
 
209
- # C. Shadow Prediction (Before knowing the truth)
210
  with torch.no_grad():
211
  correction = shadow_model(shadow_input).item()
212
 
@@ -219,45 +214,54 @@ def run_analysis():
219
  # E. Base Model Error
220
  base_error = actual_ret_scaled - base_pred_ret
221
 
222
- # F. TEACH THE SHADOW (On The Spot!)
223
- # Target for shadow is the Base Model's error
224
  target_tensor = torch.tensor([[base_error]], dtype=torch.float32)
225
-
226
- # This function updates the weights of shadow_model instantly
227
  shadow_model.learn_on_the_spot(shadow_input, target_tensor)
228
 
229
- # Store for plotting
230
  base_preds.append(base_pred_ret)
231
  shadow_corrections.append(correction)
232
  final_preds.append(final_ret_pred)
233
 
234
- # Convert predictions back to price
235
- plot_dates = dates[1:-1]
236
- plot_actual = ref_prices[1:-1]
 
 
 
237
 
238
- # Reconstruct Prices
 
 
 
 
239
  pred_prices = []
240
- curr_price = ref_prices[0]
241
 
242
- for ret in final_preds:
243
- # Denormalize return
244
  real_ret = (ret * ret_std) + ret_mean
245
- next_price = curr_price * (1 + real_ret)
 
 
 
246
  pred_prices.append(next_price)
247
- curr_price = ref_prices[len(pred_prices)] # Reset to actual to prevent drift for visual comparison
248
 
249
- # Create DataFrame
250
- df = pd.DataFrame({
251
- 'Close': plot_actual,
252
- 'Final_Pred': pred_prices,
253
- 'Base_Pred': [ref_prices[k] * (1 + (bp * ret_std) + ret_mean) for k, bp in enumerate(base_preds)],
254
- 'Correction': shadow_corrections
255
- }, index=plot_dates)
 
 
 
256
 
257
- # Calculate Z-Score on the FINAL Optimized prediction
258
  df['Gap'] = df['Final_Pred'] - df['Close']
259
  df['Z_Score'] = (df['Gap'] - df['Gap'].rolling(50).mean()) / (df['Gap'].rolling(50).std() + 1e-9)
260
 
 
 
261
  last_z = df['Z_Score'].iloc[-1]
262
  last_price = df['Close'].iloc[-1]
263
 
@@ -324,7 +328,7 @@ def background_loop():
324
  print("✅ Scan Complete. Sleeping...")
325
  except Exception as e:
326
  print(f"❌ Background Error: {e}")
327
- time.sleep(300) # Run every 5 minutes (900 seconds)
328
 
329
  # Start Background Thread
330
  t = threading.Thread(target=background_loop, daemon=True)
 
158
  def run_analysis():
159
  log_buffer = []
160
 
161
+ # 1. Initialize Models
162
  if GLOBAL_STATE["base_model"] is None:
163
  GLOBAL_STATE["base_model"] = ConstellationTransformer(input_dim=4, d_model=64)
 
 
164
  log_buffer.append("🧠 Base Transformer Initialized")
165
 
166
+ # Always re-init shadow model for fresh "session" learning or keep it if you want long term
 
167
  GLOBAL_STATE["shadow_model"] = MetaShadowLearner(input_size=4)
168
 
169
  base_model = GLOBAL_STATE["base_model"]
170
  shadow_model = GLOBAL_STATE["shadow_model"]
171
 
172
+ # 2. Get Data
173
  master_df, msg = get_constellation_data()
174
  if master_df is None: return None, msg, msg
175
 
176
  X_tensor, dates, ref_prices, ret_mean, ret_std, raw_scaled_features = prepare_tensors(master_df)
177
 
178
+ # 3. THE LIVE LEARNING LOOP
 
 
179
  final_preds = []
180
  base_preds = []
181
  shadow_corrections = []
182
 
183
  log_buffer.append("🧬 Shadow Model learning on the spot...")
184
 
185
+ base_model.eval()
186
+
187
+ # We iterate up to len-1 because we need the 'next' value to calculate error
188
+ loop_limit = len(X_tensor) - 1
189
 
190
+ for i in range(loop_limit):
191
 
192
  # A. Base Model Prediction
193
  with torch.no_grad():
 
198
  base_sigma = sigma[0, max_idx].item()
199
 
200
  # B. Construct Shadow Inputs
 
 
201
  last_features = raw_scaled_features[LOOKBACK + i - 1]
 
202
  shadow_input = torch.tensor([[base_pred_ret, base_sigma, last_features[0], last_features[3]]], dtype=torch.float32)
203
 
204
+ # C. Shadow Prediction
205
  with torch.no_grad():
206
  correction = shadow_model(shadow_input).item()
207
 
 
214
  # E. Base Model Error
215
  base_error = actual_ret_scaled - base_pred_ret
216
 
217
+ # F. TEACH THE SHADOW
 
218
  target_tensor = torch.tensor([[base_error]], dtype=torch.float32)
 
 
219
  shadow_model.learn_on_the_spot(shadow_input, target_tensor)
220
 
 
221
  base_preds.append(base_pred_ret)
222
  shadow_corrections.append(correction)
223
  final_preds.append(final_ret_pred)
224
 
225
+ # --- FIXING THE ARRAY ALIGNMENT ---
226
+ # The loop runs 'loop_limit' times.
227
+ # If len(X) is 100, loop_limit is 99.
228
+ # We generated 99 predictions.
229
+ # Prediction i=0 corresponds to the move from Price[0] to Price[1].
230
+ # So we align with dates[1:] and ref_prices[1:].
231
 
232
+ # Slice the data to match the prediction count
233
+ plot_dates = dates[1:1+len(final_preds)]
234
+ plot_actual = ref_prices[1:1+len(final_preds)]
235
+
236
+ # Reconstruction
237
  pred_prices = []
238
+ curr_price = ref_prices[0] # Start from known point
239
 
240
+ for k, ret in enumerate(final_preds):
 
241
  real_ret = (ret * ret_std) + ret_mean
242
+ # To avoid compounding error drift in visualization, we use the ACTUAL previous price
243
+ # to calculate the NEXT predicted price.
244
+ prev_actual_price = ref_prices[k]
245
+ next_price = prev_actual_price * (1 + real_ret)
246
  pred_prices.append(next_price)
 
247
 
248
+ # Create DataFrame (Safe Mode)
249
+ try:
250
+ df = pd.DataFrame({
251
+ 'Close': plot_actual,
252
+ 'Final_Pred': pred_prices,
253
+ 'Base_Pred': [ref_prices[k] * (1 + (bp * ret_std) + ret_mean) for k, bp in enumerate(base_preds)],
254
+ 'Correction': shadow_corrections
255
+ }, index=plot_dates)
256
+ except ValueError as e:
257
+ return None, f"❌ Data Size Mismatch: {e}", "\n".join(log_buffer)
258
 
259
+ # Calculate Z-Score
260
  df['Gap'] = df['Final_Pred'] - df['Close']
261
  df['Z_Score'] = (df['Gap'] - df['Gap'].rolling(50).mean()) / (df['Gap'].rolling(50).std() + 1e-9)
262
 
263
+ if len(df) < 2: return None, "Not enough data yet.", "Waiting..."
264
+
265
  last_z = df['Z_Score'].iloc[-1]
266
  last_price = df['Close'].iloc[-1]
267
 
 
328
  print("✅ Scan Complete. Sleeping...")
329
  except Exception as e:
330
  print(f"❌ Background Error: {e}")
331
+ time.sleep(400) # Run every 5 minutes (900 seconds)
332
 
333
  # Start Background Thread
334
  t = threading.Thread(target=background_loop, daemon=True)