Spaces:
Sleeping
Sleeping
Jasper Siebelink
commited on
Commit
·
13afdbd
1
Parent(s):
7f01234
Caching of LSTM model
Browse files
app.py
CHANGED
|
@@ -68,7 +68,6 @@ if st.session_state.json_content:
|
|
| 68 |
# Generating synthetic data
|
| 69 |
rng = np.random.RandomState(42)
|
| 70 |
|
| 71 |
-
print(algorithm_box)
|
| 72 |
selected_algorithm_index = options.index(algorithm_box)
|
| 73 |
if selected_algorithm_index == 0:
|
| 74 |
plotted_result = apply_isolation_forest(rng,
|
|
|
|
| 68 |
# Generating synthetic data
|
| 69 |
rng = np.random.RandomState(42)
|
| 70 |
|
|
|
|
| 71 |
selected_algorithm_index = options.index(algorithm_box)
|
| 72 |
if selected_algorithm_index == 0:
|
| 73 |
plotted_result = apply_isolation_forest(rng,
|
lstm.py
CHANGED
|
@@ -1,11 +1,12 @@
|
|
| 1 |
-
# Long Short-Term Memory
|
| 2 |
|
| 3 |
-
|
| 4 |
import numpy as np
|
| 5 |
-
from sklearn.preprocessing import StandardScaler
|
| 6 |
import tensorflow as tf
|
|
|
|
| 7 |
|
| 8 |
-
|
|
|
|
| 9 |
scaled_features = StandardScaler().fit_transform(all_data)
|
| 10 |
|
| 11 |
time_steps = 1
|
|
@@ -13,28 +14,32 @@ def apply_lstm(all_data: np.ndarray) -> np.ndarray:
|
|
| 13 |
features = scaled_features.shape[1]
|
| 14 |
lstm_input = scaled_features.reshape(samples, time_steps, features)
|
| 15 |
|
| 16 |
-
# Define the model
|
| 17 |
model = tf.keras.Sequential([
|
| 18 |
tf.keras.layers.LSTM(50, input_shape=(time_steps, features)),
|
| 19 |
tf.keras.layers.Dropout(0.2),
|
| 20 |
tf.keras.layers.Dense(features)
|
| 21 |
])
|
| 22 |
-
|
| 23 |
-
# Compile the model
|
| 24 |
model.compile(optimizer='adam', loss='mae')
|
| 25 |
|
| 26 |
# Fit model
|
| 27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
|
| 29 |
# Prediction and error calculation
|
| 30 |
predictions = model.predict(lstm_input)
|
| 31 |
mse = np.mean(np.power(scaled_features - predictions, 2), axis=1)
|
| 32 |
|
| 33 |
-
#
|
| 34 |
threshold = np.quantile(mse, 0.8)
|
| 35 |
|
| 36 |
-
#
|
| 37 |
outliers = mse > threshold
|
| 38 |
|
| 39 |
-
|
| 40 |
-
return
|
|
|
|
| 1 |
+
# Long Short-Term Memory
|
| 2 |
|
| 3 |
+
import streamlit as st
|
| 4 |
import numpy as np
|
|
|
|
| 5 |
import tensorflow as tf
|
| 6 |
+
from sklearn.preprocessing import StandardScaler
|
| 7 |
|
| 8 |
+
@st.cache_resource
|
| 9 |
+
def train_lstm_model(all_data: np.ndarray, epochs: int = 50, batch_size: int = 1):
|
| 10 |
scaled_features = StandardScaler().fit_transform(all_data)
|
| 11 |
|
| 12 |
time_steps = 1
|
|
|
|
| 14 |
features = scaled_features.shape[1]
|
| 15 |
lstm_input = scaled_features.reshape(samples, time_steps, features)
|
| 16 |
|
| 17 |
+
# Define and compile the model
|
| 18 |
model = tf.keras.Sequential([
|
| 19 |
tf.keras.layers.LSTM(50, input_shape=(time_steps, features)),
|
| 20 |
tf.keras.layers.Dropout(0.2),
|
| 21 |
tf.keras.layers.Dense(features)
|
| 22 |
])
|
|
|
|
|
|
|
| 23 |
model.compile(optimizer='adam', loss='mae')
|
| 24 |
|
| 25 |
# Fit model
|
| 26 |
+
model.fit(lstm_input, scaled_features, epochs=epochs, batch_size=batch_size, verbose=1)
|
| 27 |
+
return model, scaled_features, lstm_input
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def apply_lstm(all_data: np.ndarray) -> np.ndarray:
|
| 31 |
+
# Train or retrieve cached model and scaled features
|
| 32 |
+
model, scaled_features, lstm_input = train_lstm_model(all_data)
|
| 33 |
|
| 34 |
# Prediction and error calculation
|
| 35 |
predictions = model.predict(lstm_input)
|
| 36 |
mse = np.mean(np.power(scaled_features - predictions, 2), axis=1)
|
| 37 |
|
| 38 |
+
# Threshold for outliers
|
| 39 |
threshold = np.quantile(mse, 0.8)
|
| 40 |
|
| 41 |
+
# Determine outliers
|
| 42 |
outliers = mse > threshold
|
| 43 |
|
| 44 |
+
# Map to usable -1/1 format
|
| 45 |
+
return np.where(outliers, -1, 1)
|