File size: 9,661 Bytes
a9bcd08
 
 
 
 
 
 
 
 
 
 
 
 
 
a58e46f
a9bcd08
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import datetime
import yfinance as yf
import joblib
from sklearn.preprocessing import MinMaxScaler
import json
from tqdm import tqdm
import os
from typing import List, Dict, Any, Union, Tuple

class BiLSTMModel(nn.Module):
    def __init__(self, input_size=1, hidden_size=64, num_layers=2, output_size=1):
        super(BiLSTMModel, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        
        # BiLSTM layers
        self.lstm = nn.LSTM(
            input_size=input_size,
            hidden_size=hidden_size,
            num_layers=num_layers,
            batch_first=True,
            bidirectional=True
        )
        
        # Fully connected layer
        self.fc = nn.Linear(hidden_size * 2, output_size)  # *2 because bidirectional
    
    def forward(self, x):
        # Initialize hidden state and cell state
        batch_size = x.size(0)
        h0 = torch.zeros(self.num_layers * 2, batch_size, self.hidden_size).to(x.device)  # *2 because bidirectional
        c0 = torch.zeros(self.num_layers * 2, batch_size, self.hidden_size).to(x.device)
        
        # Forward propagate LSTM
        out, _ = self.lstm(x, (h0, c0))
        
        # Get output from last time step
        out = self.fc(out[:, -1, :])
        
        return out

def predict_future(model, last_sequence, steps, scaler_diff, current_price):
    """Predict future values using trained model and GBM."""
    model.eval()
    
    # Initialize arrays for differences and actual prices
    future_prices = []
    future_prices.append(current_price)
    
    # Create a copy of the last sequence for prediction
    current_sequence = last_sequence.clone()
    
    # Parameters for Geometric Brownian Motion
    # Using default parameters if historical data isn't available
    daily_mu = 0.0002  # Default daily drift
    daily_sigma = 0.02  # Default daily volatility
    
    device = next(model.parameters()).device
    
    for _ in range(steps):
        with torch.no_grad():
            # Get model prediction for next difference
            current_sequence_tensor = current_sequence.unsqueeze(0).to(device)
            pred_diff_scaled = model(current_sequence_tensor)
            
            # Inverse transform to get actual difference
            pred_diff = scaler_diff.inverse_transform(pred_diff_scaled.cpu().numpy())[0][0]
            
            # Use GBM to add stochastic component to the predicted difference
            dt = 1  # One day
            drift = (daily_mu - 0.5 * daily_sigma**2) * dt
            diffusion = daily_sigma * np.sqrt(dt) * np.random.normal(0, 1)
            
            # Combine model prediction with GBM
            stochastic_factor = np.exp(drift + diffusion)
            adjustment = current_price * (stochastic_factor - 1)
            
            # Blend model prediction with GBM
            blend_weight = 0.7  # Higher weight to model prediction
            blended_diff = (blend_weight * pred_diff) + ((1 - blend_weight) * adjustment)
            
            # Calculate next price
            next_price = current_price + blended_diff
            
            # Ensure price doesn't go negative
            next_price = max(0.01, next_price)
            
            # Store results
            future_prices.append(next_price)
            
            # Update current price
            current_price = next_price
            
            # Update sequence for next prediction (with the scaled difference)
            new_diff_scaled = torch.tensor([[pred_diff_scaled.item()]], dtype=torch.float32)
            current_sequence = torch.cat([current_sequence[1:], new_diff_scaled], dim=0)
    
    future_prices = np.array(future_prices[1:]).reshape(-1, 1)  # Remove the initial price
    
    return future_prices

def fetch_and_prepare_data(ticker_symbol: str, seq_length: int) -> Tuple[np.ndarray, float, pd.DatetimeIndex]:
    """Fetch ticker data and prepare it for prediction."""
    # Fetch data using yfinance
    ticker = yf.Ticker(ticker_symbol)
    df = ticker.history(period="max",interval='1d')
    
    # Make sure the data has a Close column
    if 'Close' not in df.columns:
        raise ValueError(f"No 'Close' price data available for {ticker_symbol}")
    
    # Extract closing prices
    close_prices = df['Close'].values.astype(float).reshape(-1, 1)
    
    # Create differenced data
    diff_close_prices = np.diff(close_prices, axis=0)
    
    # Get the last price (for starting predictions)
    last_price = close_prices[-1][0]
    
    # Get the dates
    dates = df.index
    
    # If we don't have enough data for the sequence length, pad with zeros
    if len(diff_close_prices) < seq_length:
        padding = np.zeros((seq_length - len(diff_close_prices), 1))
        diff_close_prices = np.vstack([padding, diff_close_prices])
    
    return diff_close_prices, last_price, dates, df

def predict_stock_prices(
    ticker_symbols: List[str], 
    model_path: str, 
    scaler_path: str, 
    metadata_path: str
) -> Dict[str, Any]:
    """
    Predict stock prices for multiple ticker symbols for -15 to +15 years.
    
    Args:
        ticker_symbols: List of ticker symbols to predict
        model_path: Path to the trained BiLSTM model
        scaler_path: Path to the saved scaler for differences
        metadata_path: Path to the saved model metadata
    
    Returns:
        Dictionary with ticker symbols as keys and arrays of dates and prices as values
    """
    # Set random seeds for reproducibility
    torch.manual_seed(42)
    np.random.seed(42)
    
    # Load the model, scaler, and metadata
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    
    # Load model metadata
    model_metadata = joblib.load(metadata_path)
    seq_length = model_metadata['seq_length']
    
    # Initialize and load the model
    model = BiLSTMModel().to(device)
    model.load_state_dict(torch.load(model_path, map_location=device))
    model.eval()
    
    # Load the scaler
    scaler_diff = joblib.load(scaler_path)
    
    # Trading days per year (approximately)
    trading_days_per_year = 252
    
    # Prepare the result dictionary
    result = {}
    
    # Process each ticker symbol
    for symbol in tqdm(ticker_symbols, desc="Processing tickers"):
        try:
            # Fetch and prepare data
            diff_close_prices, last_price, historical_dates, df = fetch_and_prepare_data(symbol, seq_length)
            
            # Scale the differenced data
            diff_scaled = scaler_diff.transform(diff_close_prices[-seq_length:])
            
            # Convert to tensor
            last_diff_sequence = torch.tensor(diff_scaled, dtype=torch.float32)
            
            # Calculate the number of days to predict (15 years)
            future_days = trading_days_per_year * 15
            
            # Predict future prices
            future_prices = predict_future(model, last_diff_sequence, future_days, scaler_diff, last_price)
            
            # Create future dates
            last_date = historical_dates[-1]
            future_dates = [last_date + datetime.timedelta(days=i+1) for i in range(future_days)]
            
            # Format dates to strings for JSON serialization
            future_dates_str = [date.strftime('%Y-%m-%d') for date in future_dates]
            
            # Get historical dates for past 15 years or as many as available
            past_days = min(len(historical_dates), trading_days_per_year * 15)
            historical_subset = historical_dates[-past_days:]
            historical_prices = df['Close'].values[-past_days:]
            
            # Format historical dates to strings
            historical_dates_str = [date.strftime('%Y-%m-%d') for date in historical_subset]
            
            # Combine historical and future data
            all_dates = historical_dates_str + future_dates_str
            all_prices = np.concatenate([historical_prices, future_prices.flatten()])
            
            # Store in result dictionary
            result[symbol] = [
                {"date": date, "value": float(value)} for date, value in zip(all_dates, all_prices)
            ]

            
        except Exception as e:
            print(f"Error processing {symbol}: {str(e)}")
            result[symbol] = {"error": str(e)}
    
    return result

def batch_predict_to_json(
    ticker_symbols: List[str], 
    model_path: str, 
    scaler_path: str, 
    metadata_path: str, 
    output_path: str = "stock_predictions.json"
) -> str:
    """
    Batch predict stock prices and save to JSON file.
    
    Args:
        ticker_symbols: List of ticker symbols
        model_path: Path to the trained model
        scaler_path: Path to the saved scaler
        metadata_path: Path to the saved metadata
        output_path: Path to save the output JSON
        
    Returns:
        Path to the saved JSON file
    """
    # Get predictions
    predictions = predict_stock_prices(ticker_symbols, model_path, scaler_path, metadata_path)
    
    return predictions

# Example usage
def get_stock_predictions(tickers):
    # Example ticker list
    # tickers = ["AAPL", "MSFT", "GOOGL", "AMZN", "META"]
    
    # Paths to saved model files
    model_path = "bilstm_stock_model.pth"
    scaler_path = "scaler_diff.pkl"
    metadata_path = "model_metadata.pkl"
    
    # Run batch prediction
    print('ok')
    output_file = batch_predict_to_json(tickers, model_path, scaler_path, metadata_path)
    return output_file