Ti-sha commited on
Commit
4204769
·
verified ·
1 Parent(s): 580072d

Create inference.py

Browse files
Files changed (1) hide show
  1. inference.py +79 -0
inference.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # inference.py (final robust)
2
+ import pandas as pd
3
+ import numpy as np
4
+ from statsmodels.tsa.arima.model import ARIMA
5
+ from sklearn.preprocessing import MinMaxScaler
6
+ from tensorflow.keras.models import Sequential
7
+ from tensorflow.keras.layers import LSTM, Dense
8
+
9
+ def arima_forecast(ts_data, order=(5,1,0), steps=5):
10
+ ts_series = pd.Series(ts_data)
11
+ model = ARIMA(ts_series, order=order)
12
+ model_fit = model.fit()
13
+ forecast = model_fit.forecast(steps=steps)
14
+ return forecast.tolist()
15
+
16
+ def lstm_forecast(ts_data, look_back=60, steps=5, epochs=20):
17
+ # Adjust look_back if input is too short
18
+ if len(ts_data) < look_back + 1:
19
+ look_back = max(1, len(ts_data) - 1)
20
+
21
+ # Normalize
22
+ scaler = MinMaxScaler(feature_range=(0, 1))
23
+ scaled_data = scaler.fit_transform(np.array(ts_data).reshape(-1,1))
24
+
25
+ # Create sequences
26
+ def create_sequences(dataset, look_back):
27
+ X, Y = [], []
28
+ for i in range(len(dataset) - look_back):
29
+ X.append(dataset[i:(i+look_back), 0])
30
+ Y.append(dataset[i + look_back, 0])
31
+ return np.array(X), np.array(Y)
32
+
33
+ X, y = create_sequences(scaled_data, look_back)
34
+ if X.size == 0:
35
+ # fallback: train on the whole series as single sample (not ideal but avoids crash)
36
+ X = scaled_data[:-1].reshape(1, -1, 1)
37
+ y = np.array([scaled_data[-1,0]])
38
+ else:
39
+ X = X.reshape((X.shape[0], X.shape[1], 1))
40
+
41
+ # Build LSTM
42
+ model = Sequential()
43
+ # prefer Input layer to avoid warnings
44
+ model.add(LSTM(50, return_sequences=True, input_shape=(X.shape[1], 1)))
45
+ model.add(LSTM(50))
46
+ model.add(Dense(1))
47
+ model.compile(optimizer='adam', loss='mean_squared_error')
48
+
49
+ # Train
50
+ model.fit(X, y, epochs=epochs, batch_size=32, verbose=0)
51
+
52
+ # Forecast future steps
53
+ last_seq = scaled_data[-look_back:].reshape(1, look_back, 1)
54
+ predictions = []
55
+
56
+ for _ in range(steps):
57
+ pred = model.predict(last_seq, verbose=0) # pred shape can vary by TF version
58
+ val = float(np.asarray(pred).reshape(-1)[0]) # extract scalar safely
59
+ predictions.append(val)
60
+ # make pred into shape (1,1,1)
61
+ pred_reshaped = np.asarray(pred).reshape(1,1,1)
62
+ last_seq = np.concatenate([last_seq[:,1:,:], pred_reshaped], axis=1)
63
+
64
+ predictions = scaler.inverse_transform(np.array(predictions).reshape(-1,1))
65
+ return predictions.flatten().tolist()
66
+
67
+ def infer(model_type: str, input_data: list, steps: int = 5, epochs: int = 20):
68
+ """
69
+ model_type: 'arima' or 'lstm'
70
+ input_data: list of recent stock prices
71
+ steps: number of future days to forecast
72
+ epochs: LSTM training epochs (only used for LSTM)
73
+ """
74
+ if model_type.lower() == 'arima':
75
+ return arima_forecast(input_data, steps=steps)
76
+ elif model_type.lower() == 'lstm':
77
+ return lstm_forecast(input_data, steps=steps, epochs=epochs)
78
+ else:
79
+ return {"error": "Invalid model_type. Use 'arima' or 'lstm'."}