monstaws commited on
Commit
03eaaca
·
verified ·
1 Parent(s): be5d7ae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -63
app.py CHANGED
@@ -5,10 +5,10 @@ import numpy as np
5
  import pandas as pd
6
  from fastapi import FastAPI
7
  from pydantic import BaseModel
8
- from typing import List, Dict, Any
9
 
10
  # ==========================================
11
- # 1. DEFINE THE NETWORK (Standard SAC Actor)
12
  # ==========================================
13
  class Actor(nn.Module):
14
  def __init__(self, state_dim, action_dim, hidden_dim=256):
@@ -31,43 +31,24 @@ class Actor(nn.Module):
31
  return mean, log_std
32
 
33
  # ==========================================
34
- # 2. DYNAMIC SCALER (The Fix)
35
  # ==========================================
36
  class DynamicScaler:
37
- def __init__(self):
38
- self.mean = None
39
- self.scale = None
40
-
41
  def fit_transform(self, data):
42
- """
43
- Fits to the provided history (batch x features) and scales it.
44
- Returns the scaled data.
45
- """
46
- # Convert to numpy if list
47
  X = np.array(data, dtype=np.float32)
48
-
49
- # Calculate stats along axis 0 (time)
50
- self.mean = np.mean(X, axis=0)
51
- self.scale = np.std(X, axis=0)
52
-
53
- # Avoid division by zero
54
- self.scale[self.scale < 1e-8] = 1.0
55
-
56
- # Z-Score Normalization: (X - Mean) / Std
57
- X_scaled = (X - self.mean) / self.scale
58
-
59
- # Clip to remove extreme outliers (just like training)
60
- X_scaled = np.clip(X_scaled, -5, 5)
61
-
62
- return X_scaled
63
 
64
  # ==========================================
65
- # 3. SETUP API & LOAD MODEL
66
  # ==========================================
67
  app = FastAPI()
68
 
69
- # Configuration
70
- STATE_DIM = 96
71
  ACTION_DIM = 1
72
  HIDDEN_DIM = 256
73
 
@@ -75,60 +56,54 @@ device = torch.device("cpu")
75
  actor = Actor(STATE_DIM, ACTION_DIM, HIDDEN_DIM).to(device)
76
  scaler = DynamicScaler()
77
 
 
78
  try:
79
  print("Loading model...")
80
  checkpoint = torch.load("sac_v9_pytorch_best_eval.pt", map_location=device)
81
  actor.load_state_dict(checkpoint['actor'])
82
  actor.eval()
83
- print("✅ Model loaded successfully!")
84
  except Exception as e:
85
  print(f"❌ Error loading model: {e}")
86
 
87
  # ==========================================
88
- # 4. API ENDPOINT
89
  # ==========================================
90
  class InputData(BaseModel):
91
- # Expects a list of lists (History of features)
92
- # e.g., [[feat1, feat2...], [feat1, feat2...]]
93
  history: List[List[float]]
94
- portfolio: List[float] # [pos, ret, dd, last_ret, last_rsi]
95
 
96
  @app.post("/predict")
97
  def predict(data: InputData):
98
  try:
99
- # 1. Get History & Scale It
100
- raw_history = data.history
101
-
102
- if len(raw_history) < 2:
103
- return {"error": "Need at least 2 rows of history to calculate scaling"}
104
 
105
- # Fit scaler on history and transform
106
- scaled_history = scaler.fit_transform(raw_history)
107
 
108
- # Take the very last row (Current State)
109
- current_market_features = scaled_history[-1]
110
 
111
- # 2. Append Portfolio Info (Passed separately, not scaled)
112
- # Note: In training, portfolio info was normalized implicitly by limits
113
- # We clip it to be safe
114
- portfolio_features = np.array(data.portfolio, dtype=np.float32)
115
- portfolio_features = np.clip(portfolio_features, -10, 10)
116
 
117
- # 3. Combine
118
- state = np.concatenate([current_market_features, portfolio_features])
 
 
 
 
 
 
 
 
 
119
 
120
- # Check size
121
- if len(state) != STATE_DIM:
122
- # Auto-pad with zeros if mismatch (Emergency fallback)
123
- if len(state) < STATE_DIM:
124
- padding = np.zeros(STATE_DIM - len(state))
125
- state = np.concatenate([state, padding])
126
- else:
127
- state = state[:STATE_DIM]
128
-
129
  # 4. Inference
130
  state_tensor = torch.FloatTensor(state).unsqueeze(0).to(device)
131
-
132
  with torch.no_grad():
133
  mean, _ = actor(state_tensor)
134
  action = torch.tanh(mean).item()
@@ -136,8 +111,7 @@ def predict(data: InputData):
136
  return {
137
  "action": action,
138
  "signal": "BUY" if action > 0 else "SELL",
139
- "confidence": abs(action),
140
- "scaled_input_sample": current_market_features[:5].tolist() # Debug
141
  }
142
 
143
  except Exception as e:
 
5
  import pandas as pd
6
  from fastapi import FastAPI
7
  from pydantic import BaseModel
8
+ from typing import List
9
 
10
  # ==========================================
11
+ # 1. NETWORK DEFINITION
12
  # ==========================================
13
  class Actor(nn.Module):
14
  def __init__(self, state_dim, action_dim, hidden_dim=256):
 
31
  return mean, log_std
32
 
33
  # ==========================================
34
+ # 2. DYNAMIC SCALER
35
  # ==========================================
36
  class DynamicScaler:
 
 
 
 
37
  def fit_transform(self, data):
 
 
 
 
 
38
  X = np.array(data, dtype=np.float32)
39
+ mean = np.mean(X, axis=0)
40
+ scale = np.std(X, axis=0)
41
+ scale[scale < 1e-8] = 1.0
42
+ X_scaled = (X - mean) / scale
43
+ return np.clip(X_scaled, -5, 5)
 
 
 
 
 
 
 
 
 
 
44
 
45
  # ==========================================
46
+ # 3. SETUP
47
  # ==========================================
48
  app = FastAPI()
49
 
50
+ # !!! THE FIX: 53 DIMENSIONS !!!
51
+ STATE_DIM = 53
52
  ACTION_DIM = 1
53
  HIDDEN_DIM = 256
54
 
 
56
  actor = Actor(STATE_DIM, ACTION_DIM, HIDDEN_DIM).to(device)
57
  scaler = DynamicScaler()
58
 
59
+ # Load Model
60
  try:
61
  print("Loading model...")
62
  checkpoint = torch.load("sac_v9_pytorch_best_eval.pt", map_location=device)
63
  actor.load_state_dict(checkpoint['actor'])
64
  actor.eval()
65
+ print(f"✅ Model loaded! Expecting {STATE_DIM} inputs.")
66
  except Exception as e:
67
  print(f"❌ Error loading model: {e}")
68
 
69
  # ==========================================
70
+ # 4. ENDPOINT
71
  # ==========================================
72
  class InputData(BaseModel):
 
 
73
  history: List[List[float]]
74
+ portfolio: List[float]
75
 
76
  @app.post("/predict")
77
  def predict(data: InputData):
78
  try:
79
+ # 1. Scale History
80
+ if len(data.history) < 2:
81
+ return {"error": "Need at least 2 rows of history"}
 
 
82
 
83
+ scaled_history = scaler.fit_transform(data.history)
84
+ current_market = scaled_history[-1] # This likely has ~96 features
85
 
86
+ # 2. Prepare Portfolio (5 items)
87
+ portfolio = np.array(data.portfolio, dtype=np.float32)
88
 
89
+ # 3. Smart Concatenation to match 53 Inputs
90
+ # The model needs 53 inputs.
91
+ # Structure: [Market Features (48)] + [Portfolio (5)]
 
 
92
 
93
+ needed_market = STATE_DIM - len(portfolio) # 48
94
+
95
+ # Take the first 48 market features (most important ones like RSI, MACD usually first)
96
+ if len(current_market) >= needed_market:
97
+ market_part = current_market[:needed_market]
98
+ else:
99
+ # Pad with zeros if we somehow have less
100
+ padding = np.zeros(needed_market - len(current_market))
101
+ market_part = np.concatenate([current_market, padding])
102
+
103
+ state = np.concatenate([market_part, portfolio])
104
 
 
 
 
 
 
 
 
 
 
105
  # 4. Inference
106
  state_tensor = torch.FloatTensor(state).unsqueeze(0).to(device)
 
107
  with torch.no_grad():
108
  mean, _ = actor(state_tensor)
109
  action = torch.tanh(mean).item()
 
111
  return {
112
  "action": action,
113
  "signal": "BUY" if action > 0 else "SELL",
114
+ "confidence": abs(action)
 
115
  }
116
 
117
  except Exception as e: