Files changed (1) hide show
  1. app.py +251 -14
app.py CHANGED
@@ -1,25 +1,262 @@
1
  import gradio as gr
2
  import numpy as np
 
3
  from tensorflow.keras.models import load_model
 
4
  from tkan import TKAN
5
- from tkat import TKAT
6
- from keras.utils import custom_object_scope
 
 
 
 
7
 
8
- # Load the model with custom objects
9
- with custom_object_scope({"TKAN": TKAN, "TKAT": TKAT}):
10
- model = load_model("best_model_TKAN_nahead_1 (2).keras")
11
 
12
- # Define predict function
13
- def predict(pm25, pm10, co, temp):
14
- input_data = np.array([[pm25, pm10, co, temp]])
15
- output = model.predict(input_data)
16
- return float(output[0][0])
 
 
 
 
 
 
 
 
 
 
17
 
18
- # Gradio interface
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  interface = gr.Interface(
20
  fn=predict,
21
- inputs=[gr.Number(), gr.Number(), gr.Number(), gr.Number()],
22
- outputs=gr.Number()
23
  )
24
 
25
- interface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import numpy as np
3
+ import tensorflow as tf
4
  from tensorflow.keras.models import load_model
5
+ # Assuming TKAN and TKAT are available after installing the respective packages
6
  from tkan import TKAN
7
+ # If TKAT is from a different library, import it similarly
8
+ try:
9
+ from tkat import TKAT
10
+ except ImportError:
11
+ print("TKAT library not found. If your model uses TKAT, make sure the library is installed.")
12
+ TKAT = None # Set to None if TKAT is not available
13
 
14
+ from tensorflow.keras.utils import custom_object_scope
15
+ import pickle # Used for saving/loading the scaler
 
16
 
17
+ # --- Your MinMaxScaler Class (Copied from Notebook) ---
18
+ class MinMaxScaler:
19
+ def __init__(self, feature_axis=None, minmax_range=(0, 1)):
20
+ """
21
+ Initialize the MinMaxScaler.
22
+ Args:
23
+ feature_axis (int, optional): The axis that represents the feature dimension if applicable.
24
+ Use only for 3D data to specify which axis is the feature axis.
25
+ Default is None, automatically managed based on data dimensions.
26
+ """
27
+ self.feature_axis = feature_axis
28
+ self.min_ = None
29
+ self.max_ = None
30
+ self.scale_ = None
31
+ self.minmax_range = minmax_range # Default range for scaling (min, max)
32
 
33
+ def fit(self, X):
34
+ """
35
+ Fit the scaler to the data based on its dimensionality.
36
+ Args:
37
+ X (np.array): The data to fit the scaler on.
38
+ """
39
+ if X.ndim == 3 and self.feature_axis is not None: # 3D data
40
+ axis = tuple(i for i in range(X.ndim) if i != self.feature_axis)
41
+ self.min_ = np.min(X, axis=axis)
42
+ self.max_ = np.max(X, axis=axis)
43
+ elif X.ndim == 2: # 2D data
44
+ self.min_ = np.min(X, axis=0)
45
+ self.max_ = np.max(X, axis=0)
46
+ elif X.ndim == 1: # 1D data
47
+ self.min_ = np.min(X)
48
+ self.max_ = np.max(X)
49
+ else:
50
+ raise ValueError("Data must be 1D, 2D, or 3D.")
51
+
52
+ self.scale_ = self.max_ - self.min_
53
+ return self
54
+
55
+ def transform(self, X):
56
+ """
57
+ Transform the data using the fitted scaler.
58
+ Args:
59
+ X (np.array): The data to transform.
60
+ Returns:
61
+ np.array: The scaled data.
62
+ """
63
+ X_scaled = (X - self.min_) / self.scale_
64
+ X_scaled = X_scaled * (self.minmax_range[1] - self.minmax_range[0]) + self.minmax_range[0]
65
+ return X_scaled
66
+
67
+ def fit_transform(self, X):
68
+ """
69
+ Fit to data, then transform it.
70
+ Args:
71
+ X (np.array): The data to fit and transform.
72
+ Returns:
73
+ np.array: The scaled data.
74
+ """
75
+ return self.fit(X).transform(X)
76
+
77
+ def inverse_transform(self, X_scaled):
78
+ """
79
+ Inverse transform the scaled data to original data.
80
+ Args:
81
+ X_scaled (np.array): The scaled data to inverse transform.
82
+ Returns:
83
+ np.array: The original data scale.
84
+ """
85
+ X = (X_scaled - self.minmax_range[0]) / (self.minmax_range[1] - self.minmax_range[0])
86
+ X = X * self.scale_ + self.min_
87
+ return X
88
+ # --- End of MinMaxScaler Class ---
89
+
90
+
91
+ # --- Configuration ---
92
+ MODEL_PATH = "best_model_TKAN_nahead_1 (2).keras"
93
+ INPUT_SCALER_PATH = "input_scaler.pkl" # You need to save your X_scaler to this file
94
+ # TARGET_SCALER_PATH = "target_scaler.pkl" # You might also need this if you predict scaled target
95
+ SEQUENCE_LENGTH = 24 # Matches the notebook
96
+ NUM_INPUT_FEATURES = 5 # ['calculated_aqi', 'temp', 'pm25', 'pm10', 'co']
97
+ N_AHEAD = 1 # Matches the notebook
98
+
99
+ # --- Load Model and Scalers ---
100
+ custom_objects = {"TKAN": TKAN}
101
+ if TKAT is not None:
102
+ custom_objects["TKAT"] = TKAT
103
+ # Also add your custom MinMaxScaler to custom_objects for loading the scaler object
104
+ custom_objects["MinMaxScaler"] = MinMaxScaler
105
+
106
+
107
+ model = None
108
+ input_scaler = None
109
+ # target_scaler = None # Load if needed
110
+
111
+ try:
112
+ with custom_object_scope(custom_objects):
113
+ model = load_model(MODEL_PATH)
114
+ print("Model loaded successfully!")
115
+ model.summary()
116
+
117
+ except Exception as e:
118
+ print(f"Error loading model from {MODEL_PATH}: {e}")
119
+ import traceback
120
+ traceback.print_exc()
121
+ import sys
122
+ sys.exit("Failed to load the model. Exiting.")
123
+
124
+ try:
125
+ # When loading the scaler, you need custom_objects if it's your custom class
126
+ with custom_object_scope(custom_objects):
127
+ with open(INPUT_SCALER_PATH, 'rb') as f:
128
+ input_scaler = pickle.load(f)
129
+ print(f"Input scaler loaded successfully from {INPUT_SCALER_PATH}")
130
+
131
+ # If you also scaled your target variable and need to inverse transform the prediction,
132
+ # load the target scaler here as well.
133
+ # with custom_object_scope(custom_objects):
134
+ # with open(TARGET_SCALER_PATH, 'rb') as f:
135
+ # target_scaler = pickle.load(f)
136
+ # print(f"Target scaler loaded successfully from {TARGET_SCALER_PATH}")
137
+
138
+ except FileNotFoundError as e:
139
+ print(f"Error loading scaler: {e}. Make sure your scaler files are in the correct path.")
140
+ import sys
141
+ sys.exit("Failed to load scaler(s). Exiting.")
142
+ except Exception as e:
143
+ print(f"Error loading scaler: {e}")
144
+ import traceback
145
+ traceback.print_exc()
146
+ import sys
147
+ sys.exit("Failed to load scaler(s). Exiting.")
148
+
149
+
150
+ # --- Data Preparation (get_latest_data_sequence needs implementation) ---
151
+
152
+ def get_latest_data_sequence(sequence_length, num_features):
153
+ """
154
+ Retrieves the latest sequence of data for the required features.
155
+
156
+ This function needs to be implemented based on your data source in the
157
+ deployment environment. It should return a numpy array with shape
158
+ (sequence_length, num_features).
159
+
160
+ Args:
161
+ sequence_length (int): The length of the historical sequence required.
162
+ num_features (int): The number of features in each time step.
163
+
164
+ Returns:
165
+ np.ndarray: A numpy array containing the historical data sequence.
166
+ Shape: (sequence_length, num_features)
167
+ """
168
+ print("WARNING: Using dummy data sequence. Implement get_latest_data_sequence.")
169
+ # --- REPLACE THIS WITH YOUR ACTUAL DATA RETRIEVAL LOGIC ---
170
+ # Example: Load from a database, fetch from an API, read from a file.
171
+ # The data should be in the correct order (oldest to newest time step).
172
+ # The columns should be in the order ['calculated_aqi', 'temp', 'pm25', 'pm10', 'co'].
173
+
174
+ # For now, returning a placeholder with the correct shape.
175
+ dummy_data = np.zeros((sequence_length, num_features))
176
+ # Populate dummy_data with some values for testing if you can load historical data
177
+ # For example, load a small sample of your training data's X_test_unscaled
178
+ # and use it here temporarily to verify the scaling and prediction pipeline.
179
+ return dummy_data
180
+ # --- END OF PLACEHOLDER ---
181
+
182
+
183
+ # --- Define Predict Function ---
184
+
185
+ def predict(): # Modify inputs as needed based on how you get data
186
+ """
187
+ Retrieves the latest data sequence, preprocesses it, and makes a prediction.
188
+
189
+ The Gradio interface will need to trigger this function.
190
+ The input parameters to this function might change depending on how you
191
+ provide the necessary historical data sequence via the Gradio interface.
192
+ """
193
+ if model is None or input_scaler is None:
194
+ return "Model or scaler not loaded. Check logs."
195
+
196
+ # 1. Get the latest historical data sequence
197
+ latest_data_sequence = get_latest_data_sequence(SEQUENCE_LENGTH, NUM_INPUT_FEATURES)
198
+
199
+ # Ensure the retrieved data has the correct shape
200
+ if latest_data_sequence.shape != (SEQUENCE_LENGTH, NUM_INPUT_FEATURES):
201
+ return f"Error: Retrieved data has incorrect shape {latest_data_sequence.shape}. Expected ({SEQUENCE_LENGTH}, {NUM_INPUT_FEATURES})."
202
+
203
+ # 2. Scale the data sequence using the loaded input scaler
204
+ # Your MinMaxScaler from the notebook had feature_axis=2 for 3D data (samples, sequence, features).
205
+ # So, for a single sequence (2D array), you'll likely need to add a batch dimension (1) before scaling.
206
+ latest_data_sequence_with_batch = latest_data_sequence[np.newaxis, :, :]
207
+ scaled_input_data = input_scaler.transform(latest_data_sequence_with_batch)
208
+
209
+ # 3. Perform prediction
210
+ # The model expects input shape (batch_size, sequence_length, num_features)
211
+ output = model.predict(scaled_input_data)
212
+
213
+ # 4. Process the output
214
+ # The output shape is (batch_size, n_ahead). Since n_ahead=1, shape is (batch_size, 1).
215
+ predicted_scaled_value = output[0][0] # Get the first prediction for the first sample
216
+
217
+ # 5. Inverse transform the prediction if the target was scaled
218
+ # If you scaled the target variable (calculated_aqi) before training,
219
+ # you need to inverse transform the prediction back to the original scale.
220
+ # This requires saving and loading the target_scaler as well and using it here.
221
+
222
+ # Example if you need to inverse transform the target:
223
+ # if target_scaler is not None:
224
+ # predicted_original_scale = target_scaler.inverse_transform(np.array([[predicted_scaled_value]]))[0][0]
225
+ # else:
226
+ # predicted_original_scale = predicted_scaled_value
227
+ # predicted_value = predicted_original_scale
228
+
229
+ # For now, assuming the model outputs directly in the desired scale or
230
+ # you handle inverse transformation elsewhere if needed.
231
+ predicted_value = predicted_scaled_value # Adjust this if inverse transformation is needed
232
+
233
+ return float(predicted_value)
234
+
235
+
236
+ # --- Gradio Interface ---
237
+ # The Gradio interface needs to allow the user to provide the necessary
238
+ # historical data for the `predict` function. The current inputs (pm25, pm10, co, temp)
239
+ # are NOT used by the predict function as written, since predict calls `get_latest_data_sequence`.
240
+ # You need to decide how the user will provide the historical data.
241
+
242
+ # Option 1: No direct inputs to `predict` function, it fetches data internally.
243
+ # In this case, the Gradio interface might just have a button to trigger the prediction.
244
  interface = gr.Interface(
245
  fn=predict,
246
+ inputs=None, # `predict` function doesn't take direct inputs from Gradio
247
+ outputs=gr.Number(label=f"Predicted AQI (Next {N_AHEAD} Hour(s))")
248
  )
249
 
250
+ # Option 2: Modify `predict` to accept some parameters that help retrieve the data.
251
+ # For example, if you pass the current time, and `get_latest_data_sequence` uses that
252
+ # to find the last 24 hours ending at that time.
253
+
254
+ # Option 3: Design a more complex Gradio interface to input the full sequence.
255
+ # This is more complex but directly aligns with the model input.
256
+
257
+ # Choose the Gradio interface definition that matches how your `predict` function
258
+ # will receive the data it needs. Option 1 is shown below.
259
+
260
+ # --- Launch Gradio Interface ---
261
+ if __name__ == "__main__":
262
+ interface.launch()