Spaces:
Sleeping
Sleeping
| # app.py V6.2 - The Autonomous Agent with Adaptive Meta-Controller | |
| # --- Core Libraries --- | |
| import pandas as pd | |
| import numpy as np | |
| import warnings | |
| import joblib | |
| import json | |
| import os | |
| import gradio as gr | |
| import requests | |
| import time | |
| from datetime import datetime | |
| import pytz | |
| import threading | |
| import csv | |
| import math | |
| import random | |
| from collections import deque, defaultdict | |
| # --- Environment and Dependencies --- | |
| os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' | |
| # --- Machine Learning & Deep Learning Libraries --- | |
| import tensorflow as tf | |
| from sklearn.preprocessing import MinMaxScaler | |
| from tensorflow.keras.models import Model, load_model | |
| # --- NLP Integration (for feature generation) --- | |
| from transformers import BertTokenizer, TFBertModel | |
| # --- Live Data Fetching Configuration --- | |
| from twelvedata import TDClient | |
| from huggingface_hub import hf_hub_download | |
| EVENT_JSON_URL = "https://nfs.faireconomy.media/ff_calendar_thisweek.json" | |
| CACHE_DURATION_SECONDS = 600 | |
| _EVENT_CACHE = {"data": None, "timestamp": 0} | |
| # ---AGENT LOGIC: ALL OUR PROVEN CLASSES --- | |
| class CausalReasoningNetwork: | |
| def __init__(self, processed_data): | |
| self.data = processed_data.copy() | |
| def identify_volatility_regimes(self, volatility_indicator='ATR', trend_indicator='EMA_20'): | |
| atr = self.data[volatility_indicator] | |
| low_vol_threshold = atr.quantile(0.33); high_vol_threshold = atr.quantile(0.66) | |
| ema_slope = self.data[trend_indicator].diff(periods=3) | |
| regimes = [] | |
| for i in range(len(self.data)): | |
| atr_val = atr.iloc[i] | |
| slope_val = ema_slope.iloc[i] if pd.notna(ema_slope.iloc[i]) else 0 | |
| if atr_val > high_vol_threshold: | |
| if abs(slope_val) > ema_slope.quantile(0.75): regimes.append('TRENDING') | |
| else: regimes.append('BREAKOUT') | |
| elif atr_val < low_vol_threshold: regimes.append('RANGING') | |
| else: regimes.append('CHOPPY') | |
| self.data['regime'] = regimes | |
| return self.data | |
| class PredictionCoreTransformer: | |
| def __init__(self, sequence_length=48): | |
| self.scaler = None; self.model = None; self.sequence_length = sequence_length; self.feature_names = None | |
| def load_model_and_scaler(self, model_path, scaler_path, feature_list_path): | |
| print("Loading models for inference...") | |
| self.model = load_model(model_path); self.scaler = joblib.load(scaler_path) | |
| with open(feature_list_path, 'r') as f: self.feature_names = json.load(f) | |
| print("Models loaded successfully.") | |
| def predict_single(self, input_sequence): | |
| input_sequence_numeric = input_sequence[self.feature_names] | |
| scaled_sequence = self.scaler.transform(input_sequence_numeric) | |
| reshaped_sequence = scaled_sequence.reshape(1, self.sequence_length, len(self.feature_names)) | |
| predictions = self.model.predict(reshaped_sequence, verbose=0) | |
| return {"5m": predictions[0][0][0], "15m": predictions[1][0][0], "1h": predictions[2][0][0]} | |
| class RuleBasedSituationRoom: | |
| def __init__(self, params): self.params = params | |
| def generate_thesis(self, predictions, sequence_df): # Predictions can be empty for this strategy | |
| latest_data = sequence_df.iloc[-1]; current_price = latest_data['close'] | |
| # If no multi-horizon predictions, generate a simple thesis based on EMA | |
| if not predictions: | |
| dir_5m = "BUY" if current_price > latest_data['EMA_20'] else "SELL" | |
| dir_15m = dir_5m | |
| dir_1h = dir_5m | |
| else: | |
| dir_5m = "BUY" if predictions['5m'] > current_price else "SELL" | |
| dir_15m = "BUY" if predictions['15m'] > current_price else "SELL" | |
| dir_1h = "BUY" if predictions['1h'] > current_price else "SELL" | |
| action = "NO_TRADE"; confidence = "LOW"; reasoning = "Divergence or weak signals."; strategy = "Range Play" | |
| if dir_5m == dir_15m == dir_1h: action = dir_5m; confidence = "HIGH"; reasoning = f"Strong confluence ({dir_5m})."; strategy = "Trend Following" | |
| elif dir_5m == dir_15m: action = dir_5m; confidence = "MEDIUM"; reasoning = f"Short/Medium confluence ({dir_5m})."; strategy = "Scalp" | |
| if action == "NO_TRADE": return {"action": "NO_TRADE", "confidence": "LOW", "strategy_type": strategy, "reasoning": reasoning} | |
| atr = latest_data['ATR'] | |
| if pd.isna(atr) or atr <= 0: atr = 0.0001 | |
| if action == "BUY": entry = current_price; stop_loss = entry - (self.params.get('sl_atr_multiplier', 2.0) * atr); take_profit = entry + (self.params.get('tp_atr_multiplier', 4.0) * atr) | |
| else: entry = current_price; stop_loss = entry + (self.params.get('sl_atr_multiplier', 2.0) * atr); take_profit = entry - (self.params.get('tp_atr_multiplier', 4.0) * atr) | |
| return {"action": action, "entry": f"{entry:.5f}", "stop_loss": f"{stop_loss:.5f}", "take_profit": f"{take_profit:.5f}", | |
| "confidence": confidence, "reasoning": reasoning, "strategy_type": strategy} | |
| class MarketRegimeFilter: | |
| def __init__(self): self.allowed_strategies = {'TRENDING': ['Trend Following'], 'BREAKOUT': ['Trend Following', 'Scalp'], 'CHOPPY': ['Scalp'], 'RANGING': []} | |
| def should_trade(self, current_regime, trade_thesis): | |
| if trade_thesis['action'] == 'NO_TRADE': return False | |
| return trade_thesis['strategy_type'] in self.allowed_strategies.get(current_regime, []) | |
| def fetch_live_events_with_cache(): | |
| current_time = time.time() | |
| if _EVENT_CACHE["data"] and (current_time - _EVENT_CACHE["timestamp"] < CACHE_DURATION_SECONDS): return _EVENT_CACHE["data"] | |
| try: | |
| response = requests.get(EVENT_JSON_URL, headers={"User-Agent": "V6-Agent/1.0"}, timeout=10) | |
| response.raise_for_status(); data = response.json() | |
| _EVENT_CACHE["data"], _EVENT_CACHE["timestamp"] = data, current_time | |
| return data | |
| except requests.RequestException as e: | |
| print(f"Error fetching event data: {e}"); return _EVENT_CACHE.get("data", []) | |
| def fetch_twelvedata_prices(api_key, symbol='EUR/USD', interval='5min', output_size=200): | |
| try: | |
| td = TDClient(apikey=api_key); ts = td.time_series(symbol=symbol, interval=interval, outputsize=output_size) | |
| df = ts.as_pandas().sort_index(ascending=True); df.index.name = 'Datetime'; df.reset_index(inplace=True) | |
| return df | |
| except Exception as e: | |
| print(f"Error fetching price data: {e}"); return pd.DataFrame() | |
| def create_feature_set_for_inference(price_df, events_json, finbert_tokenizer, finbert_model): | |
| price_features = price_df.copy(); price_features['Datetime'] = pd.to_datetime(price_features['Datetime']); price_features.set_index('Datetime', inplace=True) | |
| if price_features.index.tz is None: price_features = price_features.tz_localize('UTC') | |
| else: price_features = price_features.tz_convert('UTC') | |
| price_features.rename(columns={'close': 'Price', 'open':'Open', 'high':'High', 'low':'Low'}, inplace=True) | |
| delta = price_features['Price'].diff(); gain = (delta.where(delta > 0, 0)).rolling(window=14).mean(); loss = (-delta.where(delta < 0, 0)).rolling(window=14).mean() | |
| price_features['RSI'] = 100 - (100 / (1 + (gain / loss))); price_features['EMA_20'] = price_features['Price'].ewm(span=20, adjust=False).mean() | |
| high_low = price_features['High'] - price_features['Low']; high_close = np.abs(price_features['High'] - price_features['Price'].shift()); low_close = np.abs(price_features['Low'] - price_features['Price'].shift()) | |
| tr = pd.concat([high_low, high_close, low_close], axis=1).max(axis=1); price_features['ATR'] = tr.rolling(window=14).mean() | |
| price_features.rename(columns={'Price':'close', 'Open':'open', 'High':'high', 'Low':'low'}, inplace=True) | |
| events = pd.DataFrame(events_json) | |
| if not events.empty: | |
| def parse_financial_number(s): | |
| if not isinstance(s, str) or not s: return np.nan | |
| s = s.strip().upper(); multipliers = {'B': 1e9, 'M': 1e6, 'K': 1e3, '%': 0.01} | |
| val_str = s; multiplier = 1.0 | |
| if s.endswith(tuple(multipliers.keys())): val_str = s[:-1]; multiplier = multipliers[s[-1]] | |
| try: return float(val_str) * multiplier | |
| except (ValueError, TypeError): return np.nan | |
| if 'actual' in events.columns and 'forecast' in events.columns: events['surprise'] = (events['actual'].apply(parse_financial_number) - events['forecast'].apply(parse_financial_number)).fillna(0) | |
| else: events['surprise'] = 0 | |
| datetimes = pd.to_datetime(events['date'], utc=True) | |
| if datetimes.dt.tz is None: events['datetime'] = datetimes.dt.tz_localize(pytz.UTC) | |
| else: events['datetime'] = datetimes | |
| events['detail'] = events['title'].fillna('') + ' ' + events['country'].fillna('') | |
| events.set_index('datetime', inplace=True); events.sort_index(inplace=True) | |
| inputs = finbert_tokenizer(events['detail'].tolist(), return_tensors='tf', padding=True, truncation=True, max_length=64) | |
| embeddings = finbert_model(inputs).last_hidden_state[:, 0, :].numpy() | |
| processed_events = pd.concat([events, pd.DataFrame(embeddings, columns=[f'finbert_{i}' for i in range(768)], index=events.index)], axis=1) | |
| else: processed_events = pd.DataFrame() | |
| merged_data = pd.merge_asof(left=price_features.sort_index(), right=processed_events, left_index=True, right_index=True, direction='backward', tolerance=pd.Timedelta(minutes=30)) | |
| high_impact_events = events[events['impact'] == 'High'].index if 'impact' in events.columns and not events.empty else pd.Index([]) | |
| if not high_impact_events.empty: | |
| df_index_sec = merged_data.index.astype(np.int64).to_numpy() // 10**9; event_times_sec = high_impact_events.astype(np.int64).to_numpy() // 10**9 | |
| time_diffs = df_index_sec[:, None] - event_times_sec[None, :] | |
| merged_data['time_since_event'] = np.min(np.where(time_diffs >= 0, time_diffs, np.inf), axis=1) / 3600 | |
| merged_data['time_to_event'] = np.min(np.where(time_diffs <= 0, -time_diffs, np.inf), axis=1) / 3600 | |
| else: merged_data['time_since_event'] = 999; merged_data['time_to_event'] = 999 | |
| merged_data.replace([np.inf, -np.inf], 999, inplace=True) | |
| merged_data['hour_of_day'] = merged_data.index.hour; merged_data['day_of_week'] = merged_data.index.dayofweek | |
| merged_data['session_london'] = ((merged_data['hour_of_day'] >= 7) & (merged_data['hour_of_day'] <= 16)).astype(int) | |
| merged_data['session_ny'] = ((merged_data['hour_of_day'] >= 12) & (merged_data['hour_of_day'] <= 21)).astype(int) | |
| merged_data['session_asian'] = ((merged_data['hour_of_day'] >= 22) | (merged_data['hour_of_day'] <= 7)).astype(int) | |
| merged_data.fillna(0, inplace=True); merged_data.dropna(inplace=True) | |
| return merged_data | |
| def download_models_from_hf(repo_id, hf_token): | |
| print("Downloading agent models from Hugging Face Hub...") | |
| try: | |
| model_path = hf_hub_download(repo_id=repo_id, filename="multi_horizon_model.keras", token=hf_token) | |
| scaler_path = hf_hub_download(repo_id=repo_id, filename="multi_horizon_scaler.joblib", token=hf_token) | |
| features_path = hf_hub_download(repo_id=repo_id, filename="multi_horizon_features.json", token=hf_token) | |
| print("Models downloaded successfully.") | |
| return model_path, scaler_path, features_path | |
| except Exception as e: | |
| print(f"FATAL: Failed to download models: {e}"); raise | |
| def send_ntfy_notification(topic, trade_thesis): | |
| if not topic: | |
| print("NTFY topic not set. Skipping notification.") | |
| return | |
| title = f"New Trade Signal: {trade_thesis.get('action')} EUR/USD" | |
| message = ( | |
| f"Confidence: {trade_thesis.get('confidence')} ({trade_thesis.get('strategy_type')})\n" | |
| f"Reasoning: {trade_thesis.get('reasoning')}\n" | |
| f"Entry: {trade_thesis.get('entry')}\n" | |
| f"SL: {trade_thesis.get('stop_loss')} | TP: {trade_thesis.get('take_profit')}" | |
| ) | |
| try: | |
| requests.post( | |
| f"https://ntfy.sh/{topic}", | |
| data=message.encode(encoding='utf-8'), | |
| headers={"Title": title} | |
| ) | |
| print("ntfy notification sent successfully!") | |
| except requests.exceptions.RequestException as e: | |
| print(f"Failed to send ntfy notification: {e}") | |
| # =============================================== | |
| # START: ADAPTIVE META-CONTROLLER (V2 — Contextual LinUCB) | |
| # =============================================== | |
| class LinUCBBandit: | |
| """A simple LinUCB contextual bandit implementation.""" | |
| def __init__(self, strategies, d, alpha=1.0, regularization=1.0): | |
| self.strategies = list(strategies) | |
| self.d = d | |
| self.alpha = alpha | |
| self.reg = regularization | |
| self.A = {s: (self.reg * np.eye(self.d)) for s in self.strategies} | |
| self.b = {s: np.zeros(self.d) for s in self.strategies} | |
| def _get_ucb(self, s, x): | |
| A_inv = np.linalg.inv(self.A[s]) | |
| theta = A_inv.dot(self.b[s]) | |
| mean = theta.dot(x) | |
| var = x.dot(A_inv).dot(x) | |
| bonus = self.alpha * math.sqrt(max(var, 0.0)) | |
| return mean + bonus, mean | |
| def select(self, context_vector): | |
| scores = {} | |
| for s in self.strategies: | |
| ucb, mean = self._get_ucb(s, context_vector) | |
| scores[s] = ucb | |
| chosen = max(scores, key=scores.get) | |
| return chosen | |
| def update(self, strategy, context_vector, reward): | |
| x = context_vector.reshape(-1) | |
| self.A[strategy] += np.outer(x, x) | |
| self.b[strategy] += reward * x | |
| class PerformanceLogger: | |
| """Append signals and outcomes to a CSV for meta-learning and replay.""" | |
| def __init__(self, path="agent_signals_log.csv"): | |
| self.path = path | |
| header = ["timestamp","strategy","action","entry","stop_loss","take_profit","price_at_signal","eval_time","pnl","reward","context_hash"] | |
| if not os.path.exists(self.path): | |
| with open(self.path, "w", newline='') as f: | |
| writer = csv.writer(f) | |
| writer.writerow(header) | |
| def log_signal(self, ts, strategy, action, entry, sl, tp, price, eval_time, context_hash): | |
| with open(self.path, "a", newline='') as f: | |
| writer = csv.writer(f) | |
| writer.writerow([ts, strategy, action, entry, sl, tp, price, eval_time, "", "", context_hash]) | |
| def update_outcome(self, ts, pnl, reward): | |
| rows = [] | |
| filled = False | |
| with open(self.path, "r", newline='') as f: | |
| rows = list(csv.reader(f)) | |
| for i in range(len(rows)-1, 0, -1): | |
| if rows[i][0] == ts and rows[i][8] == "": | |
| rows[i][8] = f"{pnl:.6f}" | |
| rows[i][9] = f"{reward:.6f}" | |
| filled = True | |
| break | |
| if filled: | |
| with open(self.path, "w", newline='') as f: | |
| writer = csv.writer(f) | |
| writer.writerows(rows) | |
| class PageHinkley: | |
| """Page-Hinkley change detector for streaming losses/returns.""" | |
| def __init__(self, delta=0.0001, lambda_=40, alpha=1-1e-3): | |
| self.mean = 0.0 | |
| self.delta = delta | |
| self.lambda_ = lambda_ | |
| self.alpha = alpha | |
| self.cumulative = 0.0 | |
| def update(self, x): | |
| self.mean = self.mean * self.alpha + x * (1 - self.alpha) | |
| self.cumulative = min(self.cumulative + x - self.mean - self.delta, 0) | |
| if -self.cumulative > self.lambda_: | |
| self.cumulative = 0 | |
| return True | |
| return False | |
| class StrategyManager: | |
| """Wrap strategies with a uniform callable interface.""" | |
| def __init__(self, situation_room, prediction_engine): | |
| self.situation_room = situation_room | |
| self.prediction_engine = prediction_engine | |
| def list_strategies(self): | |
| # The canonical rule-based strategy using full multi-horizon predictions | |
| def predictive_strategy(seq): | |
| preds = self.prediction_engine.predict_single(seq) | |
| return self.situation_room.generate_thesis(preds, seq) | |
| # A simpler strategy that does not use the transformer predictions | |
| def ema_crossover_strategy(seq): | |
| return self.situation_room.generate_thesis({}, seq) | |
| all_strat = { | |
| "predictive_rule_based": predictive_strategy, | |
| "ema_crossover": ema_crossover_strategy | |
| } | |
| return all_strat | |
| def context_hash_from_df(df): | |
| r = df.iloc[-1] | |
| keys = [k for k in ["close","ATR","EMA_20","RSI","session_london"] if k in r.index] | |
| vals = [f"{r[k]:.6f}" for k in keys] | |
| return "_".join(vals) if vals else f"{float(r.get('close', 0.0)):.6f}" | |
| def fetch_current_price_or_last(seq): | |
| return float(seq.iloc[-1]['close']) | |
| def build_context_vector_from_features(df, d=16): | |
| """Create a fixed-size numeric context vector from the features DataFrame's last row.""" | |
| last = df.iloc[-1] | |
| feature_keys = [k for k in ['close','ATR','EMA_20','RSI','volume', 'time_since_event', 'time_to_event', 'hour_of_day'] if k in last.index] | |
| vec = [float(last.get(k, 0.0)) for k in feature_keys if math.isfinite(float(last.get(k, 0.0)))] | |
| close = float(last.get('close', 1.0) or 1.0) | |
| vec = [v/close for v in vec] | |
| if len(vec) >= d: vec = vec[:d] | |
| else: vec = vec + [0.0]*(d - len(vec)) | |
| return np.array(vec, dtype=float) | |
| def evaluate_pending_signals(perf_logger_path, bandit, change_detector, price_fetch_func): | |
| now = pd.Timestamp.now(tz='UTC') | |
| rows = []; updated = False | |
| try: | |
| with open(perf_logger_path, "r", newline='') as f: rows = list(csv.reader(f)) | |
| except FileNotFoundError: return | |
| latest_features = price_fetch_func() | |
| if latest_features is None or latest_features.empty: return | |
| for i in range(1, len(rows)): | |
| if rows[i][8] != "": continue | |
| try: | |
| eval_time = pd.to_datetime(rows[i][7]) | |
| if eval_time > now: continue | |
| strategy, action, entry = rows[i][1], rows[i][2], float(rows[i][3]) | |
| price_now = fetch_current_price_or_last(latest_features) | |
| pnl = (price_now - entry) if action == "BUY" else (entry - price_now) | |
| reward = 1.0 if pnl > 0 else 0.0 | |
| rows[i][8] = f"{pnl:.6f}"; rows[i][9] = f"{reward:.6f}" | |
| ctx = build_context_vector_from_features(latest_features) | |
| bandit.update(strategy, ctx, reward) | |
| if change_detector.update(-pnl): print("! MODEL DRIFT DETECTED by Page-Hinkley test !") | |
| updated = True | |
| except (ValueError, IndexError) as e: | |
| print(f"Skipping evaluation of malformed row {i}: {e}") | |
| continue | |
| if updated: | |
| with open(perf_logger_path, "w", newline='') as f: | |
| writer = csv.writer(f) | |
| writer.writerows(rows) | |
| def main_worker(): | |
| print("--- [Adaptive v2] Background Worker Thread Started ---") | |
| print("WORKER: Loading secrets...") | |
| api_key = os.environ.get('TWELVE_DATA_API_KEY') | |
| hf_token = os.environ.get('HF_TOKEN') | |
| ntfy_topic = os.environ.get('NTFY_TOPIC') | |
| HF_REPO_ID = "Badumetsibb/conscious-trading-agent-models" | |
| if not all([api_key, hf_token, ntfy_topic, HF_REPO_ID]): | |
| print("FATAL: Worker secrets missing (TWELVE_DATA_API_KEY, HF_TOKEN, NTFY_TOPIC). Shutting down.") | |
| with open('status.json', 'w') as f: | |
| json.dump({"signal": "FATAL ERROR", "reasoning": "One or more secrets are missing. Please check Space settings."}, f) | |
| return | |
| print("WORKER: Downloading models...") | |
| model_path, scaler_path, features_path = download_models_from_hf(HF_REPO_ID, hf_token) | |
| print("WORKER: Initializing agent components...") | |
| prediction_engine = PredictionCoreTransformer() | |
| prediction_engine.load_model_and_scaler(model_path, scaler_path, features_path) | |
| finbert_tokenizer = BertTokenizer.from_pretrained('ProsusAI/finbert') | |
| finbert_model = TFBertModel.from_pretrained('ProsusAI/finbert', from_pt=True) | |
| BEST_PARAMS = {'sl_atr_multiplier': 2.5, 'tp_atr_multiplier': 4.0, 'medium_conf_risk_scaler': 0.5} | |
| situation_room = RuleBasedSituationRoom(BEST_PARAMS) | |
| regime_filter = MarketRegimeFilter() | |
| strategy_manager = StrategyManager(situation_room, prediction_engine) | |
| d = 16 # Context vector dimensions | |
| bandit = LinUCBBandit(strategy_manager.list_strategies().keys(), d=d, alpha=1.5) | |
| perf_logger = PerformanceLogger() | |
| change_detector = PageHinkley() | |
| def _feature_provider(): | |
| price_data = fetch_twelvedata_prices(api_key, output_size=500) # Fetch more data for feature stability | |
| if price_data.empty: return None | |
| events_data = fetch_live_events_with_cache() | |
| return create_feature_set_for_inference(price_data, events_data, finbert_tokenizer, finbert_model) | |
| print("--- WORKER: Initialization Complete. Starting main adaptive loop. ---") | |
| while True: | |
| try: | |
| print(f"WORKER: [{pd.Timestamp.now(tz='UTC')}] Waking up...") | |
| # 1. Fetch latest features | |
| features = _feature_provider() | |
| if features is None or len(features) < prediction_engine.sequence_length: | |
| print("WORKER: Not enough data points for analysis. Waiting...") | |
| time.sleep(300); continue | |
| input_sequence = features.iloc[-prediction_engine.sequence_length:] | |
| # 2. Build context vector and select strategy | |
| ctx_vec = build_context_vector_from_features(input_sequence, d=d) | |
| available_strategies = strategy_manager.list_strategies() | |
| chosen_strategy_name = bandit.select(ctx_vec) | |
| # 3. Generate trade thesis from chosen strategy | |
| trade_thesis = available_strategies[chosen_strategy_name](input_sequence) | |
| # 4. Filter signal by market regime | |
| causal_engine = CausalReasoningNetwork(input_sequence) | |
| final_sequence_with_regime = causal_engine.identify_volatility_regimes() | |
| current_regime = final_sequence_with_regime.iloc[-1]['regime'] | |
| is_tradeable = regime_filter.should_trade(current_regime, trade_thesis) | |
| final_action = trade_thesis['action'] if is_tradeable else "NO TRADE (FILTERED)" | |
| # 5. Log signal and notify | |
| ts = str(pd.Timestamp.now(tz='UTC')) | |
| if final_action in ["BUY", "SELL"]: | |
| context_hash = context_hash_from_df(input_sequence) | |
| eval_horizon_minutes = 30 | |
| perf_logger.log_signal( | |
| ts, chosen_strategy_name, final_action, | |
| trade_thesis.get('entry'), trade_thesis.get('stop_loss'), trade_thesis.get('take_profit'), | |
| fetch_current_price_or_last(input_sequence), | |
| (pd.Timestamp.now(tz='UTC') + pd.Timedelta(minutes=eval_horizon_minutes)).isoformat(), | |
| context_hash | |
| ) | |
| augmented_thesis = trade_thesis.copy() | |
| augmented_thesis['reasoning'] = f"Strategy: {chosen_strategy_name}. {augmented_thesis.get('reasoning', '')}" | |
| send_ntfy_notification(ntfy_topic, augmented_thesis) | |
| # 6. Evaluate past signals and update bandit | |
| evaluate_pending_signals(perf_logger.path, bandit, change_detector, _feature_provider) | |
| # 7. Update dashboard status | |
| status = { | |
| "last_checked": ts, | |
| "market_price": f"{input_sequence.iloc[-1]['close']:.5f}", | |
| "market_regime": current_regime, | |
| "signal": final_action, | |
| "reasoning": (f"Bandit chose '{chosen_strategy_name}'. " + | |
| (trade_thesis['reasoning'] if is_tradeable else f"Strategy '{trade_thesis['strategy_type']}' not allowed in current '{current_regime}' regime.")) | |
| } | |
| with open('status.json', 'w') as f: json.dump(status, f) | |
| print(f"WORKER: Analysis complete. Chosen Strategy: {chosen_strategy_name}. Signal: {final_action}. Sleeping for 5 minutes.") | |
| time.sleep(300) | |
| except Exception as e: | |
| print(f"WORKER ERROR: {e}"); import traceback; traceback.print_exc(); time.sleep(60) | |
| # =============================================== | |
| # END: ADAPTIVE META-CONTROLLER | |
| # =============================================== | |
| # --- GRADIO DASHBOARD INTERFACE --- | |
| def get_latest_status(): | |
| try: | |
| if not os.path.exists('status.json'): return "Worker has not completed first cycle.", "", "", "", "" | |
| with open('status.json', 'r') as f: status = json.load(f) | |
| return (f"Status from worker at: {status.get('last_checked', 'N/A')}", | |
| status.get('market_price', 'N/A'), status.get('market_regime', 'N/A'), | |
| status.get('signal', 'N/A'), status.get('reasoning', 'N/A')) | |
| except Exception as e: return f"Error reading status file: {e}", "", "", "", "" | |
| with gr.Blocks(theme=gr.themes.Soft()) as demo: | |
| gr.Markdown("# 🧠 V6.2 Autonomous Trading Agent Dashboard (Adaptive)") | |
| gr.Markdown("This dashboard displays the real-time status of the 24/7 adaptive worker agent running in the background of this Space.") | |
| secret_status = "✅ API secrets appear to be set." if all([os.environ.get(k) for k in ['TWELVE_DATA_API_KEY', 'NTFY_TOPIC', 'HF_TOKEN']]) else "❌ One or more secrets are MISSING. Please set them in Settings and restart." | |
| gr.Markdown(f"**Secrets Status:** {secret_status}") | |
| refresh_btn = gr.Button("Refresh Status", variant="primary") | |
| status_output = gr.Textbox(label="Status", interactive=False) | |
| gr.Markdown("## Agent's Last Analysis") | |
| with gr.Row(): | |
| price_output = gr.Textbox(label="Last Market Price"); regime_output = gr.Textbox(label="Last Market Regime") | |
| action_output = gr.Textbox(label="Last Signal / Action") | |
| reasoning_output = gr.Textbox(label="Last Reasoning", lines=3) | |
| refresh_btn.click(fn=get_latest_status, inputs=[], outputs=[status_output, price_output, regime_output, action_output, reasoning_output]) | |
| # --- APPLICATION STARTUP --- | |
| if __name__ == "__main__": | |
| worker_thread = threading.Thread(target=main_worker, daemon=True) | |
| worker_thread.start() | |
| demo.launch() |