Spaces:
Runtime error
Runtime error
| # app.py (v1.6 - Final Robust Version) | |
| import os | |
| import subprocess | |
| import sys | |
| import pickle | |
| import numpy as np | |
| import pandas as pd | |
| import tensorflow as tf | |
| from tensorflow import keras | |
| import gradio as gr | |
| import pandas_ta as ta | |
| from twelvedata import TDClient | |
| # --- ORCHESTRATOR LOGIC --- | |
| print("--- AGI Startup Sequence Initiated ---") | |
| print("Step 1: Running the live event calendar update...") | |
| try: | |
| subprocess.run([sys.executable, "scraper_update.py"], check=True, timeout=600) | |
| print("✅ Event calendar updated successfully.") | |
| except Exception as e: | |
| print(f"⚠️ WARNING: The event scraper failed. The app will proceed with the existing calendar. Error: {e}") | |
| print("Step 2: Initializing the main AGI application...") | |
| tf.get_logger().setLevel('ERROR') | |
| # --- Define Model Architectures --- | |
| from tensorflow.keras import layers, Model | |
| class PositionalEmbedding(layers.Layer): | |
| def __init__(self, max_len, d_model): super().__init__(); self.pos_emb = layers.Embedding(input_dim=max_len, output_dim=d_model) | |
| def call(self, x): positions = tf.range(start=0, limit=tf.shape(x)[1], delta=1); return x + self.pos_emb(positions) | |
| class TransformerBlock(layers.Layer): | |
| def __init__(self, d_model, num_heads, dropout_rate, **kwargs): | |
| super().__init__(**kwargs); self.mha = layers.MultiHeadAttention(num_heads=num_heads, key_dim=d_model // num_heads, dropout=dropout_rate); self.ffn = keras.Sequential([layers.Dense(d_model * 2, activation="gelu"), layers.Dense(d_model)]); self.layernorm1 = layers.LayerNormalization(epsilon=1e-6); self.layernorm2 = layers.LayerNormalization(epsilon=1e-6); self.dropout = layers.Dropout(dropout_rate) | |
| def call(self, inputs, training=False): | |
| attn_output = self.mha(inputs, inputs, inputs, training=training); out1 = self.layernorm1(inputs + attn_output); ffn_output = self.ffn(out1); ffn_output = self.dropout(ffn_output, training=training); return self.layernorm2(out1 + ffn_output) | |
| class MambaInspiredHybridEncoder(Model): | |
| def __init__(self, cfg, name="MambaInspiredHybridEncoder"): | |
| super().__init__(name=name); d = cfg['d_model']; self.input_projection = layers.Dense(d); self.conv1 = layers.Conv1D(d, kernel_size=5, padding='causal', activation='gelu'); self.gru = layers.GRU(d, return_sequences=True); self.norm = layers.LayerNormalization(epsilon=1e-6); self.pos_embedding = PositionalEmbedding(max_len=cfg['context_len'] + cfg['target_len'], d_model=d); self.transformer_blocks = [TransformerBlock(d, cfg['num_heads'], cfg['dropout_rate']) for _ in range(cfg['encoder_layers'])] | |
| def call(self, x, training=False): | |
| x_proj = self.input_projection(x); x_conv = self.conv1(x_proj); x_gru = self.gru(x_conv, training=training); x = self.norm(x_proj + x_gru); x = self.pos_embedding(x) | |
| for block in self.transformer_blocks: x = block(x, training=training) | |
| return x | |
| # --- GLOBAL CONFIGURATION & ARTIFACT LOADING --- | |
| print("--- Initializing AlphaJEPA AGI for Hugging Face ---") | |
| ARTIFACTS_PATH = '.' | |
| CFG = dict(context_len=96, target_len=24, d_model=256, encoder_layers=8, num_heads=8, dropout_rate=0.1, state_size=256 + 1 + 1 + 1, UNCERTAINTY_VETO_THRESHOLD = 0.05) | |
| CFG['num_features'] = 7 | |
| TWELVE_DATA_API_KEY = os.environ.get('TWELVE_DATA_API_KEY') | |
| print("Loading artifacts...") | |
| try: | |
| with open(os.path.join(ARTIFACTS_PATH, 'event_aware_scaler.pkl'), 'rb') as f: SCALER = pickle.load(f) | |
| with open(os.path.join(ARTIFACTS_PATH, 'event_aware_pca.pkl'), 'rb') as f: PCA_MODEL = pickle.load(f) | |
| ENCODER = MambaInspiredHybridEncoder(CFG); ENCODER.build(input_shape=(None, CFG['context_len'], CFG['num_features'])); ENCODER.load_weights(os.path.join(ARTIFACTS_PATH, 'event_aware_encoder.weights.h5')) | |
| PREDICTION_HEAD = keras.models.load_model(os.path.join(ARTIFACTS_PATH, 'alpha_prediction_head.h5'), compile=False) | |
| RL_AGENT = keras.models.load_model(os.path.join(ARTIFACTS_PATH, 'alpha_rl_agent.h5'), compile=False) | |
| with open(os.path.join(ARTIFACTS_PATH, 'strategy_farm.pkl'), 'rb') as f: STRATEGY_FARM = pickle.load(f) | |
| calendar_path = os.path.join(ARTIFACTS_PATH, 'eventsforex_factory_cache.csv') | |
| if os.path.exists(calendar_path) and os.path.getsize(calendar_path) > 0: | |
| CALENDAR_DF = pd.read_csv(calendar_path) | |
| else: | |
| print("WARNING: Event calendar CSV is empty or missing. Creating a default empty calendar.") | |
| CALENDAR_DF = pd.DataFrame(columns=['DateTime', 'Currency', 'Impact', 'Event']) | |
| CALENDAR_DF['DateTime'] = pd.to_datetime(CALENDAR_DF.get('DateTime'), utc=True) | |
| print("✅ All artifacts loaded successfully!") | |
| except Exception as e: | |
| print(f"❌ Error loading artifacts: {e}"); raise e | |
| # --- DATA FETCHING AND PROCESSING --- | |
| def add_technical_indicators(df): | |
| c, h, l = df['close'], df['high'], df['low']; df['log_return'] = np.log(c).diff(); df['rsi'] = ta.rsi(c, length=14); df['atr'] = ta.atr(h, l, c, length=14) | |
| bbands = ta.bbands(c, length=20, std=2) | |
| if bbands is not None and not bbands.empty: df['bb_low'] = bbands.iloc[:, 0]; df['bb_high'] = bbands.iloc[:, 2] | |
| return df | |
| def fetch_live_data_twelve(api_key, symbol='EUR/USD', interval='15min', output_size=200): | |
| if not api_key: raise ValueError("Twelve Data API key not found. Please set it in the Space Secrets.") | |
| td = TDClient(apikey=api_key); ts = td.time_series(symbol=symbol, interval=interval, outputsize=output_size).as_pandas() | |
| ts = ts.iloc[::-1].reset_index(); ts = add_technical_indicators(ts); return ts | |
| def merge_event_data(price_df, calendar_df, symbol): | |
| price_df['datetime'] = pd.to_datetime(price_df['datetime'], utc=True); price_df.set_index('datetime', inplace=True) | |
| currencies_in_symbol = symbol.upper().split('/'); filtered_calendar = calendar_df[calendar_df['Currency'].isin(currencies_in_symbol)].copy() | |
| filtered_calendar.set_index('DateTime', inplace=True); impact_dummies = pd.get_dummies(filtered_calendar['Impact'], prefix='impact').astype(float) | |
| combined_df = price_df.join(impact_dummies, how='left').fillna(0.0) | |
| high_impact_times = filtered_calendar[filtered_calendar['Impact'] == 'High'].index | |
| if not high_impact_times.empty: | |
| next_event_indices = high_impact_times.searchsorted(combined_df.index, side='right'); valid_indices = next_event_indices < len(high_impact_times) | |
| next_event_times = pd.Series(pd.NaT, index=combined_df.index); next_event_times.iloc[valid_indices] = high_impact_times[next_event_indices[valid_indices]] | |
| combined_df['time_to_next_event'] = (next_event_times - combined_df.index).dt.total_seconds() / 3600.0 | |
| combined_df['time_to_next_event'] = np.clip(combined_df['time_to_next_event'], 0, 24) / 24.0; combined_df['time_to_next_event'].fillna(1.0, inplace=True) | |
| else: combined_df['time_to_next_event'] = 1.0 | |
| return combined_df.reset_index() | |
| # --- CORE DECISION PIPELINE --- | |
| def get_live_decision(data_df, current_position): | |
| if len(data_df) < CFG['context_len']: return "WAIT", f"Not enough data. Need {CFG['context_len']} bars, have {len(data_df)}." | |
| recent_data = data_df.tail(CFG['context_len']).copy(); feature_cols = SCALER.feature_names_in_ | |
| for col in feature_cols: | |
| if col not in recent_data.columns: recent_data[col] = 0.0 | |
| data_scaled = SCALER.transform(recent_data[feature_cols]); data_pca = PCA_MODEL.transform(data_scaled) | |
| window = np.expand_dims(data_pca, axis=0).astype(np.float32); final_context_vector = ENCODER.predict(window, verbose=0)[:, -1, :] | |
| mc_preds = [PREDICTION_HEAD(final_context_vector, training=True) for _ in range(50)]; uncertainty_score = np.std(mc_preds) | |
| active_gene_action = 0; active_gene_info = "None" | |
| for i, gene in enumerate(STRATEGY_FARM): | |
| idx, thold, op, act = gene['context_index'], gene['threshold'], gene['operator'], gene['action'] | |
| if (op == '>' and final_context_vector[0, idx] > thold) or (op == '<' and final_context_vector[0, idx] < thold): | |
| active_gene_action = act; active_gene_info = f"Gene #{i} (Action: {'BUY' if act==1 else 'SELL'})"; break | |
| state = np.concatenate([final_context_vector.flatten(), np.array([uncertainty_score, active_gene_action, current_position])]) | |
| rl_q_values = RL_AGENT.predict(np.reshape(state, [1, CFG['state_size']]), verbose=0)[0] | |
| rl_action_map = {0: "HOLD", 1: "BUY", 2: "SELL"}; rl_action_str = rl_action_map[np.argmax(rl_q_values)] | |
| final_decision = rl_action_str | |
| commentary = f"RL Agent recommends: {rl_action_str} (Q-Vals: H={rl_q_values[0]:.2f}, B={rl_q_values[1]:.2f}, S={rl_q_values[2]:.2f}).\n" | |
| commentary += f"Active Strategy Gene: {active_gene_info}.\n" | |
| commentary += f"Uncertainty Score: {uncertainty_score:.4f}." | |
| if uncertainty_score > CFG['UNCERTAINTY_VETO_THRESHOLD']: | |
| final_decision = "NO TRADE"; commentary += f"\n\n**META-CONTROLLER VETO:** Uncertainty ({uncertainty_score:.4f}) exceeds threshold ({CFG['UNCERTAINTY_VETO_THRESHOLD']})." | |
| return final_decision, commentary | |
| # --- GRADIO INTERFACE --- | |
| def gradio_interface(symbol, interval, current_position_str): | |
| try: | |
| price_df = fetch_live_data_twelve(TWELVE_DATA_API_KEY, symbol, interval) | |
| combined_df = merge_event_data(price_df, CALENDAR_DF, symbol) | |
| pos_map = {"Flat": 0, "Long": 1, "Short": -1}; current_position = pos_map.get(current_position_str, 0) | |
| decision, commentary = get_live_decision(combined_df.dropna(), current_position) | |
| return decision, commentary | |
| except Exception as e: return "ERROR", f"An error occurred: {str(e)}" | |
| with gr.Blocks(theme=gr.themes.Soft()) as demo: | |
| gr.Markdown("# AlphaJEPA: The AGI Trading Assistant") | |
| gr.Markdown("Select the asset and your current position, then click 'Analyze' to get a decision from the AGI.") | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| inp_symbol = gr.Dropdown(["EUR/USD", "GBP/USD", "USD/JPY", "AUD/USD"], label="Symbol", value="EUR/USD") | |
| inp_interval = gr.Dropdown(["1min", "5min", "15min"], label="Interval", value="15min") | |
| inp_pos = gr.Radio(["Flat", "Long", "Short"], label="Current Market Position", value="Flat") | |
| btn = gr.Button("Analyze Market & Get Decision", variant="primary") | |
| with gr.Column(scale=2): | |
| out_decision = gr.Textbox(label="Final Decision", interactive=False) | |
| out_commentary = gr.Textbox(label="AGI Commentary", lines=8, interactive=False) | |
| btn.click(fn=gradio_interface, inputs=[inp_symbol, inp_interval, inp_pos], outputs=[out_decision, out_commentary]) | |
| if __name__ == "__main__": | |
| if not TWELVE_DATA_API_KEY: print("WARNING: TWELVE_DATA_API_KEY secret not found.") | |
| print("\n--- Launching Gradio Interface ---"); demo.launch(debug=True) |