Riy777 commited on
Commit
0ba6752
·
verified ·
1 Parent(s): 6bc8791

Update ml_engine/pattern_engine.py

Browse files
Files changed (1) hide show
  1. ml_engine/pattern_engine.py +70 -24
ml_engine/pattern_engine.py CHANGED
@@ -1,9 +1,10 @@
1
  # ==============================================================================
2
- # 🧠 ml_engine/pattern_engine.py (Formerly Titan)
3
  # ==============================================================================
4
  # GEM-Architect Approved
5
- # - Renamed from TitanEngine to PatternEngine (Semantic Consistency).
6
- # - Logic: ResNet-1D for Pattern Recognition (Neural Network).
 
7
  # ==============================================================================
8
 
9
  import os
@@ -18,7 +19,7 @@ import warnings
18
  warnings.filterwarnings('ignore')
19
 
20
  # ------------------------------------------------------------------------------
21
- # 1. Model Architecture (ResNet-1D)
22
  # ------------------------------------------------------------------------------
23
  class ResidualBlock(nn.Module):
24
  def __init__(self, channels, kernel_size=3, dropout=0.2):
@@ -88,15 +89,16 @@ class PatternResNet(nn.Module):
88
  # ------------------------------------------------------------------------------
89
  class PatternEngine:
90
  def __init__(self, model_dir="ml_models/Unified_Models_V1"):
91
- # We assume the model file name remains 'cnn_best.pt' inside the folder
92
  self.model_path = os.path.join(model_dir, "cnn_best.pt")
93
  self.scaler_path = os.path.join(model_dir, "seq_scaler.pkl")
94
 
95
  self.model = None
96
  self.scaler = None
97
- self.device = torch.device("cpu")
98
  self.initialized = False
99
 
 
100
  self.features_list = [
101
  "log_ret","vol_spike","taker_buy_ratio","proxy_spread","amihud","avg_ticket_usd",
102
  "upper_wick_ratio","lower_wick_ratio","body_to_range","atr_pct_signal"
@@ -104,33 +106,40 @@ class PatternEngine:
104
  self.WINDOW_SIZE = 64
105
 
106
  async def initialize(self):
 
107
  if self.initialized: return True
108
 
109
- print(f"🧠 [PatternNet] Initializing Neural Network from {self.model_path}...")
110
  try:
111
  if not os.path.exists(self.model_path) or not os.path.exists(self.scaler_path):
112
- print(f"❌ [PatternNet] Artifacts missing in {self.model_path}")
113
  return False
114
 
 
115
  self.scaler = joblib.load(self.scaler_path)
 
 
116
  self.model = PatternResNet(in_ch=len(self.features_list)).to(self.device)
117
 
 
118
  checkpoint = torch.load(self.model_path, map_location=self.device)
119
  if isinstance(checkpoint, dict) and 'model' in checkpoint:
120
  self.model.load_state_dict(checkpoint['model'])
121
  else:
122
  self.model.load_state_dict(checkpoint)
123
 
124
- self.model.eval()
 
125
  self.initialized = True
126
- print(f"✅ [PatternNet] Online. ResNet-1D Ready for Pattern Recognition.")
127
  return True
128
 
129
  except Exception as e:
130
- print(f"❌ [PatternNet] Init Error: {e}")
131
  traceback.print_exc()
132
  return False
133
 
 
134
  def _wilder_rma(self, x, n):
135
  x = np.asarray(x, dtype=float)
136
  return pd.Series(x).ewm(alpha=1.0/n, adjust=False).mean().values
@@ -142,80 +151,117 @@ class PatternEngine:
142
  return pd.Series(x).rolling(w, min_periods=1).median().values
143
 
144
  def preprocess_live_data(self, df):
 
 
 
145
  try:
146
  df = df.copy()
147
- if 'timestamp' in df.columns: df = df.sort_values('timestamp')
 
 
148
 
 
149
  close = df['close'].values.astype(float)
150
  high = df['high'].values.astype(float)
151
  low = df['low'].values.astype(float)
152
  open_ = df['open'].values.astype(float)
153
 
154
- if 'quote_volume' in df.columns: vol_usd = df['quote_volume'].values.astype(float)
155
- else: vol_usd = (close * df['volume'].values).astype(float)
 
 
 
156
 
157
  vol_usd = np.maximum(vol_usd, 1.0)
158
 
 
159
  prev_close = np.roll(close, 1); prev_close[0] = close[0]
160
  tr = np.maximum(high - low, np.maximum(np.abs(high - prev_close), np.abs(low - prev_close)))
161
  atr = self._wilder_rma(tr, 14)
162
  atr_safe = np.maximum(atr, 1e-9)
163
 
 
164
  df['log_ret'] = np.log(close / np.maximum(prev_close, 1e-9))
 
165
  vol_ma = self._rolling_mean(vol_usd, 20)
166
  df['vol_spike'] = vol_usd / np.maximum(vol_ma, 1e-9)
167
 
 
168
  if 'taker_buy_base_asset_volume' in df.columns:
169
  taker_vol = df['taker_buy_base_asset_volume'].values * close
170
  df['taker_buy_ratio'] = taker_vol / vol_usd
171
- else: df['taker_buy_ratio'] = 0.5
 
172
 
173
  raw_spread = (high - low) / np.maximum(close, 1e-9)
174
  df['proxy_spread'] = self._rolling_median(raw_spread, 14) * 0.5
 
175
  df['amihud'] = np.abs(df['log_ret']) / vol_usd
176
 
177
- if 'num_trades' in df.columns: num_trades = df['num_trades'].values
178
- else: num_trades = vol_usd / 1000.0
 
 
 
179
  df['avg_ticket_usd'] = vol_usd / np.maximum(num_trades, 1.0)
180
 
181
  rng = np.maximum(high - low, 1e-9)
182
  df['upper_wick_ratio'] = (high - np.maximum(open_, close)) / rng
183
  df['lower_wick_ratio'] = (np.minimum(open_, close) - low) / rng
184
  df['body_to_range'] = np.abs(close - open_) / rng
 
185
  df['atr_pct_signal'] = atr_safe / close
186
 
 
187
  df = df.replace([np.inf, -np.inf], np.nan).fillna(0)
188
 
189
- if len(df) < self.WINDOW_SIZE: return None
 
 
 
190
  feature_matrix = df[self.features_list].iloc[-self.WINDOW_SIZE:].values.astype(np.float32)
 
191
  return feature_matrix
192
 
193
  except Exception as e:
194
- print(f"❌ [PatternNet] Preprocessing Error: {e}")
195
  return None
196
 
197
  def predict(self, ohlcv_data: dict) -> dict:
 
 
 
198
  if not self.initialized: return {'score': 0.0, 'error': 'Not Initialized'}
199
 
200
  try:
201
  target_tf = '15m'
202
  raw_data = ohlcv_data.get(target_tf)
203
- if raw_data is None: return {'score': 0.0, 'error': 'No 15m Data'}
 
 
204
 
205
  if isinstance(raw_data, list):
206
  df = pd.DataFrame(raw_data, columns=['timestamp', 'open', 'high', 'low', 'close', 'volume'])
207
  elif isinstance(raw_data, pd.DataFrame):
208
  df = raw_data
209
- else: return {'score': 0.0, 'error': f'Invalid Data Type'}
 
210
 
211
- if df.empty: return {'score': 0.0, 'error': 'Empty Data'}
 
212
 
 
213
  X_raw = self.preprocess_live_data(df)
214
- if X_raw is None: return {'score': 0.0, 'error': 'Not enough data'}
 
215
 
 
216
  X_scaled = self.scaler.transform(X_raw)
 
 
217
  X_tensor = torch.tensor(X_scaled.T).unsqueeze(0).to(self.device)
218
 
 
219
  with torch.no_grad():
220
  logits = self.model(X_tensor)
221
  probs = torch.softmax(logits, dim=1).cpu().numpy()[0]
@@ -227,6 +273,6 @@ class PatternEngine:
227
  }
228
 
229
  except Exception as e:
230
- print(f"❌ [PatternNet] Inference Error: {e}")
231
  traceback.print_exc()
232
  return {'score': 0.0, 'error': str(e)}
 
1
  # ==============================================================================
2
+ # 🧠 ml_engine/pattern_engine.py (Refactored TitanEngine)
3
  # ==============================================================================
4
  # GEM-Architect Approved
5
+ # - Restored full feature engineering logic from original Titan.
6
+ # - Renamed Class: TitanEngine -> PatternEngine.
7
+ # - Renamed Model: TitanResNet -> PatternResNet.
8
  # ==============================================================================
9
 
10
  import os
 
19
  warnings.filterwarnings('ignore')
20
 
21
  # ------------------------------------------------------------------------------
22
+ # 1. Model Architecture (Must match training EXACTLY)
23
  # ------------------------------------------------------------------------------
24
  class ResidualBlock(nn.Module):
25
  def __init__(self, channels, kernel_size=3, dropout=0.2):
 
89
  # ------------------------------------------------------------------------------
90
  class PatternEngine:
91
  def __init__(self, model_dir="ml_models/Unified_Models_V1"):
92
+ # Expecting the same model file names
93
  self.model_path = os.path.join(model_dir, "cnn_best.pt")
94
  self.scaler_path = os.path.join(model_dir, "seq_scaler.pkl")
95
 
96
  self.model = None
97
  self.scaler = None
98
+ self.device = torch.device("cpu") # Inference on CPU is safer for stability
99
  self.initialized = False
100
 
101
+ # Exact Features used in Training (Critical to keep)
102
  self.features_list = [
103
  "log_ret","vol_spike","taker_buy_ratio","proxy_spread","amihud","avg_ticket_usd",
104
  "upper_wick_ratio","lower_wick_ratio","body_to_range","atr_pct_signal"
 
106
  self.WINDOW_SIZE = 64
107
 
108
  async def initialize(self):
109
+ """Load Model and Scaler"""
110
  if self.initialized: return True
111
 
112
+ print(f"🧠 [PatternEngine] Initializing PyTorch Engine from {self.model_path}...")
113
  try:
114
  if not os.path.exists(self.model_path) or not os.path.exists(self.scaler_path):
115
+ print(f"❌ [PatternEngine] Artifacts missing in {self.model_path}")
116
  return False
117
 
118
+ # 1. Load Scaler
119
  self.scaler = joblib.load(self.scaler_path)
120
+
121
+ # 2. Load Model
122
  self.model = PatternResNet(in_ch=len(self.features_list)).to(self.device)
123
 
124
+ # Safe loading for CPU
125
  checkpoint = torch.load(self.model_path, map_location=self.device)
126
  if isinstance(checkpoint, dict) and 'model' in checkpoint:
127
  self.model.load_state_dict(checkpoint['model'])
128
  else:
129
  self.model.load_state_dict(checkpoint)
130
 
131
+ self.model.eval() # Set to evaluation mode
132
+
133
  self.initialized = True
134
+ print(f"✅ [PatternEngine] Online. ResNet-1D Loaded successfully.")
135
  return True
136
 
137
  except Exception as e:
138
+ print(f"❌ [PatternEngine] Init Error: {e}")
139
  traceback.print_exc()
140
  return False
141
 
142
+ # --- Feature Engineering Helpers (Restored Fully) ---
143
  def _wilder_rma(self, x, n):
144
  x = np.asarray(x, dtype=float)
145
  return pd.Series(x).ewm(alpha=1.0/n, adjust=False).mean().values
 
151
  return pd.Series(x).rolling(w, min_periods=1).median().values
152
 
153
  def preprocess_live_data(self, df):
154
+ """
155
+ Turns raw OHLCV DataFrame into the exact Feature Matrix used for training.
156
+ """
157
  try:
158
  df = df.copy()
159
+ # Ensure sorting
160
+ if 'timestamp' in df.columns:
161
+ df = df.sort_values('timestamp')
162
 
163
+ # Basic conversions
164
  close = df['close'].values.astype(float)
165
  high = df['high'].values.astype(float)
166
  low = df['low'].values.astype(float)
167
  open_ = df['open'].values.astype(float)
168
 
169
+ # Use quote volume if available
170
+ if 'quote_volume' in df.columns:
171
+ vol_usd = df['quote_volume'].values.astype(float)
172
+ else:
173
+ vol_usd = (close * df['volume'].values).astype(float)
174
 
175
  vol_usd = np.maximum(vol_usd, 1.0)
176
 
177
+ # 1. ATR (14)
178
  prev_close = np.roll(close, 1); prev_close[0] = close[0]
179
  tr = np.maximum(high - low, np.maximum(np.abs(high - prev_close), np.abs(low - prev_close)))
180
  atr = self._wilder_rma(tr, 14)
181
  atr_safe = np.maximum(atr, 1e-9)
182
 
183
+ # 2. Features Calculation
184
  df['log_ret'] = np.log(close / np.maximum(prev_close, 1e-9))
185
+
186
  vol_ma = self._rolling_mean(vol_usd, 20)
187
  df['vol_spike'] = vol_usd / np.maximum(vol_ma, 1e-9)
188
 
189
+ # Taker buy ratio
190
  if 'taker_buy_base_asset_volume' in df.columns:
191
  taker_vol = df['taker_buy_base_asset_volume'].values * close
192
  df['taker_buy_ratio'] = taker_vol / vol_usd
193
+ else:
194
+ df['taker_buy_ratio'] = 0.5
195
 
196
  raw_spread = (high - low) / np.maximum(close, 1e-9)
197
  df['proxy_spread'] = self._rolling_median(raw_spread, 14) * 0.5
198
+
199
  df['amihud'] = np.abs(df['log_ret']) / vol_usd
200
 
201
+ # Num trades proxy
202
+ if 'num_trades' in df.columns:
203
+ num_trades = df['num_trades'].values
204
+ else:
205
+ num_trades = vol_usd / 1000.0
206
  df['avg_ticket_usd'] = vol_usd / np.maximum(num_trades, 1.0)
207
 
208
  rng = np.maximum(high - low, 1e-9)
209
  df['upper_wick_ratio'] = (high - np.maximum(open_, close)) / rng
210
  df['lower_wick_ratio'] = (np.minimum(open_, close) - low) / rng
211
  df['body_to_range'] = np.abs(close - open_) / rng
212
+
213
  df['atr_pct_signal'] = atr_safe / close
214
 
215
+ # Filter NaNs
216
  df = df.replace([np.inf, -np.inf], np.nan).fillna(0)
217
 
218
+ # Extract only the needed window
219
+ if len(df) < self.WINDOW_SIZE:
220
+ return None
221
+
222
  feature_matrix = df[self.features_list].iloc[-self.WINDOW_SIZE:].values.astype(np.float32)
223
+
224
  return feature_matrix
225
 
226
  except Exception as e:
227
+ print(f"❌ [PatternEngine] Preprocessing Error: {e}")
228
  return None
229
 
230
  def predict(self, ohlcv_data: dict) -> dict:
231
+ """
232
+ Main Interface used by Processor.
233
+ """
234
  if not self.initialized: return {'score': 0.0, 'error': 'Not Initialized'}
235
 
236
  try:
237
  target_tf = '15m'
238
  raw_data = ohlcv_data.get(target_tf)
239
+
240
+ if raw_data is None:
241
+ return {'score': 0.0, 'error': 'No 15m Data'}
242
 
243
  if isinstance(raw_data, list):
244
  df = pd.DataFrame(raw_data, columns=['timestamp', 'open', 'high', 'low', 'close', 'volume'])
245
  elif isinstance(raw_data, pd.DataFrame):
246
  df = raw_data
247
+ else:
248
+ return {'score': 0.0, 'error': f'Invalid Data Type: {type(raw_data)}'}
249
 
250
+ if df.empty:
251
+ return {'score': 0.0, 'error': 'Empty Data'}
252
 
253
+ # Preprocess
254
  X_raw = self.preprocess_live_data(df)
255
+ if X_raw is None:
256
+ return {'score': 0.0, 'error': 'Not enough data for window'}
257
 
258
+ # Scale
259
  X_scaled = self.scaler.transform(X_raw)
260
+
261
+ # Prepare Tensor
262
  X_tensor = torch.tensor(X_scaled.T).unsqueeze(0).to(self.device)
263
 
264
+ # Inference
265
  with torch.no_grad():
266
  logits = self.model(X_tensor)
267
  probs = torch.softmax(logits, dim=1).cpu().numpy()[0]
 
273
  }
274
 
275
  except Exception as e:
276
+ print(f"❌ [PatternEngine] Inference Error: {e}")
277
  traceback.print_exc()
278
  return {'score': 0.0, 'error': str(e)}