dsid271 commited on
Commit
0f35fac
·
verified ·
1 Parent(s): 82f2feb

Update app.py

Browse files

everything filled n except dummy data part

Files changed (1) hide show
  1. app.py +58 -63
app.py CHANGED
@@ -2,6 +2,7 @@ import gradio as gr
2
  import numpy as np
3
  import tensorflow as tf
4
  from tensorflow.keras.models import load_model
 
5
  # Assuming TKAN and TKAT are available after installing the respective packages
6
  from tkan import TKAN
7
  # If TKAT is from a different library, import it similarly
@@ -13,6 +14,37 @@ except ImportError:
13
 
14
  from tensorflow.keras.utils import custom_object_scope
15
  import pickle # Used for saving/loading the scaler
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
  # --- Your MinMaxScaler Class (Copied from Notebook) ---
18
  class MinMaxScaler:
@@ -87,20 +119,7 @@ class MinMaxScaler:
87
  return X
88
  # --- End of MinMaxScaler Class ---
89
 
90
-
91
- # --- Configuration ---
92
- MODEL_PATH = "best_model_TKAN_nahead_1 (2).keras"
93
- INPUT_SCALER_PATH = "input_scaler.pkl" # You need to save your X_scaler to this file
94
- # TARGET_SCALER_PATH = "target_scaler.pkl" # You might also need this if you predict scaled target
95
- SEQUENCE_LENGTH = 24 # Matches the notebook
96
- NUM_INPUT_FEATURES = 5 # ['calculated_aqi', 'temp', 'pm25', 'pm10', 'co']
97
- N_AHEAD = 1 # Matches the notebook
98
-
99
- # --- Load Model and Scalers ---
100
- custom_objects = {"TKAN": TKAN}
101
- if TKAT is not None:
102
- custom_objects["TKAT"] = TKAT
103
- # Also add your custom MinMaxScaler to custom_objects for loading the scaler object
104
  custom_objects["MinMaxScaler"] = MinMaxScaler
105
 
106
 
@@ -109,42 +128,29 @@ input_scaler = None
109
  # target_scaler = None # Load if needed
110
 
111
  try:
 
112
  with custom_object_scope(custom_objects):
113
  model = load_model(MODEL_PATH)
114
- print("Model loaded successfully!")
115
- model.summary()
116
-
117
- except Exception as e:
118
- print(f"Error loading model from {MODEL_PATH}: {e}")
119
- import traceback
120
- traceback.print_exc()
121
- import sys
122
- sys.exit("Failed to load the model. Exiting.")
123
 
124
- try:
125
- # When loading the scaler, you need custom_objects if it's your custom class
126
- with custom_object_scope(custom_objects):
127
  with open(INPUT_SCALER_PATH, 'rb') as f:
128
  input_scaler = pickle.load(f)
129
- print(f"Input scaler loaded successfully from {INPUT_SCALER_PATH}")
130
 
131
  # If you also scaled your target variable and need to inverse transform the prediction,
132
  # load the target scaler here as well.
133
- # with custom_object_scope(custom_objects):
134
  # with open(TARGET_SCALER_PATH, 'rb') as f:
135
  # target_scaler = pickle.load(f)
136
  # print(f"Target scaler loaded successfully from {TARGET_SCALER_PATH}")
137
 
138
- except FileNotFoundError as e:
139
- print(f"Error loading scaler: {e}. Make sure your scaler files are in the correct path.")
140
- import sys
141
- sys.exit("Failed to load scaler(s). Exiting.")
142
  except Exception as e:
143
- print(f"Error loading scaler: {e}")
144
  import traceback
145
  traceback.print_exc()
146
  import sys
147
- sys.exit("Failed to load scaler(s). Exiting.")
148
 
149
 
150
  # --- Data Preparation (get_latest_data_sequence needs implementation) ---
@@ -167,15 +173,14 @@ def get_latest_data_sequence(sequence_length, num_features):
167
  """
168
  print("WARNING: Using dummy data sequence. Implement get_latest_data_sequence.")
169
  # --- REPLACE THIS WITH YOUR ACTUAL DATA RETRIEVAL LOGIC ---
170
- # Example: Load from a database, fetch from an API, read from a file.
171
  # The data should be in the correct order (oldest to newest time step).
172
  # The columns should be in the order ['calculated_aqi', 'temp', 'pm25', 'pm10', 'co'].
173
 
174
  # For now, returning a placeholder with the correct shape.
175
  dummy_data = np.zeros((sequence_length, num_features))
176
  # Populate dummy_data with some values for testing if you can load historical data
177
- # For example, load a small sample of your training data's X_test_unscaled
178
- # and use it here temporarily to verify the scaling and prediction pipeline.
179
  return dummy_data
180
  # --- END OF PLACEHOLDER ---
181
 
@@ -187,8 +192,6 @@ def predict(): # Modify inputs as needed based on how you get data
187
  Retrieves the latest data sequence, preprocesses it, and makes a prediction.
188
 
189
  The Gradio interface will need to trigger this function.
190
- The input parameters to this function might change depending on how you
191
- provide the necessary historical data sequence via the Gradio interface.
192
  """
193
  if model is None or input_scaler is None:
194
  return "Model or scaler not loaded. Check logs."
@@ -202,7 +205,7 @@ def predict(): # Modify inputs as needed based on how you get data
202
 
203
  # 2. Scale the data sequence using the loaded input scaler
204
  # Your MinMaxScaler from the notebook had feature_axis=2 for 3D data (samples, sequence, features).
205
- # So, for a single sequence (2D array), you'll likely need to add a batch dimension (1) before scaling.
206
  latest_data_sequence_with_batch = latest_data_sequence[np.newaxis, :, :]
207
  scaled_input_data = input_scaler.transform(latest_data_sequence_with_batch)
208
 
@@ -220,43 +223,35 @@ def predict(): # Modify inputs as needed based on how you get data
220
  # This requires saving and loading the target_scaler as well and using it here.
221
 
222
  # Example if you need to inverse transform the target:
223
- # if target_scaler is not None:
224
- # predicted_original_scale = target_scaler.inverse_transform(np.array([[predicted_scaled_value]]))[0][0]
225
- # else:
226
- # predicted_original_scale = predicted_scaled_value
227
- # predicted_value = predicted_original_scale
 
 
 
 
 
 
 
 
228
 
229
  # For now, assuming the model outputs directly in the desired scale or
230
  # you handle inverse transformation elsewhere if needed.
231
- predicted_value = predicted_scaled_value # Adjust this if inverse transformation is needed
232
 
233
  return float(predicted_value)
234
 
235
 
236
  # --- Gradio Interface ---
237
- # The Gradio interface needs to allow the user to provide the necessary
238
- # historical data for the `predict` function. The current inputs (pm25, pm10, co, temp)
239
- # are NOT used by the predict function as written, since predict calls `get_latest_data_sequence`.
240
- # You need to decide how the user will provide the historical data.
241
-
242
- # Option 1: No direct inputs to `predict` function, it fetches data internally.
243
- # In this case, the Gradio interface might just have a button to trigger the prediction.
244
  interface = gr.Interface(
245
  fn=predict,
246
  inputs=None, # `predict` function doesn't take direct inputs from Gradio
247
  outputs=gr.Number(label=f"Predicted AQI (Next {N_AHEAD} Hour(s))")
248
  )
249
 
250
- # Option 2: Modify `predict` to accept some parameters that help retrieve the data.
251
- # For example, if you pass the current time, and `get_latest_data_sequence` uses that
252
- # to find the last 24 hours ending at that time.
253
-
254
- # Option 3: Design a more complex Gradio interface to input the full sequence.
255
- # This is more complex but directly aligns with the model input.
256
-
257
- # Choose the Gradio interface definition that matches how your `predict` function
258
- # will receive the data it needs. Option 1 is shown below.
259
-
260
  # --- Launch Gradio Interface ---
261
  if __name__ == "__main__":
262
  interface.launch()
 
2
  import numpy as np
3
  import tensorflow as tf
4
  from tensorflow.keras.models import load_model
5
+ from tensorflow.keras.layers import Input # Explicitly import Input
6
  # Assuming TKAN and TKAT are available after installing the respective packages
7
  from tkan import TKAN
8
  # If TKAT is from a different library, import it similarly
 
14
 
15
  from tensorflow.keras.utils import custom_object_scope
16
  import pickle # Used for saving/loading the scaler
17
+ import os # For checking file existence
18
+
19
+ # --- Configuration ---
20
+ MODEL_PATH = "best_model_TKAN_nahead_1 (2).keras" # Your saved model file
21
+ INPUT_SCALER_PATH = "input_scaler.pkl" # Your saved input scaler file
22
+ SEQUENCE_LENGTH = 24 # Matches the notebook
23
+ NUM_INPUT_FEATURES = 5 # ['calculated_aqi', 'temp', 'pm25', 'pm10', 'co']
24
+ N_AHEAD = 1 # Matches the notebook
25
+
26
+ # --- Ensure Required Files Exist ---
27
+ if not os.path.exists(MODEL_PATH):
28
+ print(f"Error: Model file not found at {MODEL_PATH}")
29
+ import sys
30
+ sys.exit("Model file missing. Exiting.")
31
+
32
+ if not os.path.exists(INPUT_SCALER_PATH):
33
+ print(f"Error: Input scaler file not found at {INPUT_SCALER_PATH}")
34
+ import sys
35
+ sys.exit("Input scaler file missing. Exiting.")
36
+
37
+
38
+ # --- Load Model and Scalers ---
39
+ # Define custom objects dictionary
40
+ custom_objects = {"TKAN": TKAN}
41
+ if TKAT is not None:
42
+ custom_objects["TKAT"] = TKAT
43
+ # Add your custom MinMaxScaler to custom_objects if you are using one that you defined
44
+ # in your own code (not from a library). If your scaler is from scikit-learn, you
45
+ # generally don't need to include it in custom_objects for pickle loading, but if it's
46
+ # a custom implementation, you do. Based on your notebook, you have a custom MinMaxScaler.
47
+ # Include the custom MinMaxScaler class definition here as well.
48
 
49
  # --- Your MinMaxScaler Class (Copied from Notebook) ---
50
  class MinMaxScaler:
 
119
  return X
120
  # --- End of MinMaxScaler Class ---
121
 
122
+ # Add your custom MinMaxScaler to custom_objects
 
 
 
 
 
 
 
 
 
 
 
 
 
123
  custom_objects["MinMaxScaler"] = MinMaxScaler
124
 
125
 
 
128
  # target_scaler = None # Load if needed
129
 
130
  try:
131
+ # Use custom_object_scope for both model and scaler loading
132
  with custom_object_scope(custom_objects):
133
  model = load_model(MODEL_PATH)
134
+ print("Model loaded successfully!")
135
+ model.summary() # Verify the model structure after loading
 
 
 
 
 
 
 
136
 
 
 
 
137
  with open(INPUT_SCALER_PATH, 'rb') as f:
138
  input_scaler = pickle.load(f)
139
+ print(f"Input scaler loaded successfully from {INPUT_SCALER_PATH}")
140
 
141
  # If you also scaled your target variable and need to inverse transform the prediction,
142
  # load the target scaler here as well.
143
+ # with custom_object_scope(custom_objects): # Need custom_object_scope if target scaler is custom
144
  # with open(TARGET_SCALER_PATH, 'rb') as f:
145
  # target_scaler = pickle.load(f)
146
  # print(f"Target scaler loaded successfully from {TARGET_SCALER_PATH}")
147
 
 
 
 
 
148
  except Exception as e:
149
+ print(f"Error during loading: {e}")
150
  import traceback
151
  traceback.print_exc()
152
  import sys
153
+ sys.exit("Failed to load model or scaler. Exiting.")
154
 
155
 
156
  # --- Data Preparation (get_latest_data_sequence needs implementation) ---
 
173
  """
174
  print("WARNING: Using dummy data sequence. Implement get_latest_data_sequence.")
175
  # --- REPLACE THIS WITH YOUR ACTUAL DATA RETRIEVAL LOGIC ---
 
176
  # The data should be in the correct order (oldest to newest time step).
177
  # The columns should be in the order ['calculated_aqi', 'temp', 'pm25', 'pm10', 'co'].
178
 
179
  # For now, returning a placeholder with the correct shape.
180
  dummy_data = np.zeros((sequence_length, num_features))
181
  # Populate dummy_data with some values for testing if you can load historical data
182
+ # Example: If you saved a sample of X_test_unscaled, load it here temporarily.
183
+ # You need to ensure this dummy data has the correct structure and feature order.
184
  return dummy_data
185
  # --- END OF PLACEHOLDER ---
186
 
 
192
  Retrieves the latest data sequence, preprocesses it, and makes a prediction.
193
 
194
  The Gradio interface will need to trigger this function.
 
 
195
  """
196
  if model is None or input_scaler is None:
197
  return "Model or scaler not loaded. Check logs."
 
205
 
206
  # 2. Scale the data sequence using the loaded input scaler
207
  # Your MinMaxScaler from the notebook had feature_axis=2 for 3D data (samples, sequence, features).
208
+ # So, for a single sequence (2D array), you need to add a batch dimension (1) before scaling.
209
  latest_data_sequence_with_batch = latest_data_sequence[np.newaxis, :, :]
210
  scaled_input_data = input_scaler.transform(latest_data_sequence_with_batch)
211
 
 
223
  # This requires saving and loading the target_scaler as well and using it here.
224
 
225
  # Example if you need to inverse transform the target:
226
+ if target_scaler is not None:
227
+ # # Need to put the single predicted value into an array with the shape
228
+ # # that the target_scaler's inverse_transform expects.
229
+ # # Assuming y_scaler was fitted on a shape like (samples, n_ahead, 1) or (samples, 1)
230
+ # # and inverse_transform works on a similar shape.
231
+ # # If y_train shape was (samples, n_ahead):
232
+ predicted_original_scale = target_scaler.inverse_transform(np.array([[predicted_scaled_value]]))[0][0]
233
+ # # If y_train shape was (samples, n_ahead, 1):
234
+ # # predicted_original_scale = target_scaler.inverse_transform(np.array([[[predicted_scaled_value]]]))[0][0][0]
235
+ # pass # Implement the correct inverse transform based on how y_scaler was used
236
+ else:
237
+ predicted_original_scale = predicted_scaled_value
238
+ predicted_value = predicted_original_scale
239
 
240
  # For now, assuming the model outputs directly in the desired scale or
241
  # you handle inverse transformation elsewhere if needed.
242
+ # predicted_value = predicted_scaled_value # Adjust this if inverse transformation is needed
243
 
244
  return float(predicted_value)
245
 
246
 
247
  # --- Gradio Interface ---
248
+ # Keep inputs=None as the predict function gets data internally.
 
 
 
 
 
 
249
  interface = gr.Interface(
250
  fn=predict,
251
  inputs=None, # `predict` function doesn't take direct inputs from Gradio
252
  outputs=gr.Number(label=f"Predicted AQI (Next {N_AHEAD} Hour(s))")
253
  )
254
 
 
 
 
 
 
 
 
 
 
 
255
  # --- Launch Gradio Interface ---
256
  if __name__ == "__main__":
257
  interface.launch()