Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import numpy as np | |
| import tensorflow as tf | |
| from tensorflow.keras.models import load_model | |
| from tensorflow.keras.layers import Input | |
| # Assuming TKAN and TKAT are available after installing the respective packages | |
| from tkan import TKAN | |
| # If TKAT is from a different library, import it similarly | |
| try: | |
| from tkat import TKAT | |
| except ImportError: | |
| print("TKAT library not found. If your model uses TKAT, make sure the library is installed.") | |
| TKAT = None | |
| from tensorflow.keras.utils import custom_object_scope | |
| import pickle | |
| import os | |
| import requests | |
| import pandas as pd | |
| from datetime import datetime, timedelta, timezone | |
| import pytz # For timezone handling | |
| # --- Your MinMaxScaler Class (Copied from Notebook) --- | |
| # (Keep the MinMaxScaler class definition here as before) | |
| class MinMaxScaler: | |
| # ... (MinMaxScaler class definition) ... | |
| def __init__(self, feature_axis=None, minmax_range=(0, 1)): | |
| self.feature_axis = feature_axis | |
| self.min_ = None | |
| self.max_ = None | |
| self.scale_ = None | |
| self.minmax_range = minmax_range | |
| def fit(self, X): | |
| if X.ndim == 3 and self.feature_axis is not None: | |
| axis = tuple(i for i in range(X.ndim) if i != self.feature_axis) | |
| self.min_ = np.min(X, axis=axis) | |
| self.max_ = np.max(X, axis=axis) | |
| elif X.ndim == 2: | |
| self.min_ = np.min(X, axis=0) | |
| self.max_ = np.max(X, axis=0) | |
| elif X.ndim == 1: | |
| self.min_ = np.min(X) | |
| self.max_ = np.max(X) | |
| else: | |
| raise ValueError("Data must be 1D, 2D, or 3D.") | |
| self.scale_ = self.max_ - self.min_ | |
| return self | |
| def transform(self, X): | |
| X_scaled = (X - self.min_) / self.scale_ | |
| X_scaled = X_scaled * (self.minmax_range[1] - self.minmax_range[0]) + self.minmax_range[0] | |
| return X_scaled | |
| def fit_transform(self, X): | |
| return self.fit(X).transform(X) | |
| def inverse_transform(self, X_scaled): | |
| X = (X_scaled - self.minmax_range[0]) / (self.minmax_range[1] - self.minmax_range[0]) | |
| X = X * self.scale_ + self.min_ | |
| return X | |
| # --- AQI Breakpoints and Calculation Logic (Copied from Notebook) --- | |
| # (Keep the aqi_breakpoints and calculate_overall_aqi functions here as before) | |
| aqi_breakpoints = { | |
| 'pm25': [(0, 50, 0, 50), (51, 100, 51, 100), (101, 200, 101, 200), (201, 300, 201, 300)], | |
| 'pm10': [(0, 50, 0, 50), (51, 100, 51, 100), (101, 250, 101, 200), (251, 350, 201, 300)], | |
| 'co': [(0, 1.0, 0, 50), (1.1, 2.0, 51, 100), (2.1, 10.0, 101, 200), (10.1, 17.0, 201, 300)] | |
| } | |
| def calculate_sub_aqi(concentration, breakpoints): | |
| for i_low, i_high, c_low, c_high in breakpoints: | |
| if c_low <= concentration <= c_high: | |
| if c_high == c_low: | |
| return i_low | |
| return ((i_high - i_low) / (c_high - c_low)) * (concentration - c_low) + i_low | |
| if concentration < breakpoints[0][2]: | |
| return breakpoints[0][0] | |
| elif concentration > breakpoints[-1][3]: | |
| return breakpoints[-1][1] | |
| else: | |
| return np.nan | |
| def calculate_overall_aqi(row, aqi_breakpoints): | |
| sub_aqis = [] | |
| pollutant_mapping = { | |
| 'pm2_5': 'pm25', | |
| 'pm10': 'pm10', | |
| 'carbon_monoxide': 'co', | |
| } | |
| for api_pollutant, internal_pollutant in pollutant_mapping.items(): | |
| concentration = row.get(api_pollutant, np.nan) | |
| if not np.isnan(concentration): | |
| sub_aqi = calculate_sub_aqi(concentration, aqi_breakpoints.get(internal_pollutant, [])) | |
| sub_aqis.append(sub_aqi) | |
| else: | |
| sub_aqis.append(np.nan) | |
| return np.nanmax(sub_aqis) if sub_aqis and not all(np.isnan(sub_aqis)) else np.nan | |
| # --- Configuration --- | |
| MODEL_PATH = "best_model_TKAN_nahead_1 (2).keras" | |
| INPUT_SCALER_PATH = "input_scaler.pkl" | |
| TARGET_SCALER_PATH = "target_scaler.pkl" | |
| SEQUENCE_LENGTH = 24 # Matches the notebook | |
| NUM_INPUT_FEATURES = 5 # ['calculated_aqi', 'temp', 'pm25', 'pm10', 'co'] | |
| N_AHEAD = 1 # Matches the notebook | |
| # --- Open-Meteo API Configuration --- | |
| OPENMETEO_AIR_QUALITY_API_URL = "https://air-quality-api.open-meteo.com/v1/air-quality" | |
| # You will also need the standard weather API for temperature | |
| OPENMETEO_WEATHER_API_URL = "https://api.open-meteo.com/v1/forecast" # Using forecast for recent hourly data | |
| # Replace with the actual latitude and longitude for your location | |
| LATITUDE = 17.33 | |
| LONGITUDE = 78.27 | |
| AIR_QUALITY_PARAMETERS = ["pm10", "pm2_5", "carbon_monoxide"] | |
| WEATHER_PARAMETERS_FOR_TEMP = ["temperature_2m"] # Parameter name for temperature | |
| TIMEZONE = "auto" | |
| # --- Ensure Required Files Exist --- | |
| # (Keep the file existence checks here as before) | |
| if not os.path.exists(MODEL_PATH): | |
| print(f"Error: Model file not found at {MODEL_PATH}") | |
| import sys | |
| sys.exit("Model file missing. Exiting.") | |
| if not os.path.exists(INPUT_SCALER_PATH): | |
| print(f"Error: Input scaler file not found at {INPUT_SCALER_PATH}") | |
| import sys | |
| sys.exit("Input scaler file missing. Exiting.") | |
| if not os.path.exists(TARGET_SCALER_PATH): | |
| print(f"Error: Target scaler file not found at {TARGET_SCALER_PATH}") | |
| import sys | |
| sys.exit("Target scaler file missing. Exiting.") | |
| # --- Load Model and Scalers --- | |
| # (Keep the loading logic here as before) | |
| custom_objects = {"TKAN": TKAN, "MinMaxScaler": MinMaxScaler} | |
| if TKAT is not None: | |
| custom_objects["TKAT"] = TKAT | |
| model = None | |
| input_scaler = None | |
| target_scaler = None | |
| try: | |
| with custom_object_scope(custom_objects): | |
| model = load_model(MODEL_PATH) | |
| print("Model loaded successfully!") | |
| model.summary() | |
| with open(INPUT_SCALER_PATH, 'rb') as f: | |
| input_scaler = pickle.load(f) | |
| print(f"Input scaler loaded successfully from {INPUT_SCALER_PATH}") | |
| with open(TARGET_SCALER_PATH, 'rb') as f: | |
| target_scaler = pickle.load(f) | |
| print(f"Target scaler loaded successfully from {TARGET_SCALER_PATH}") | |
| except Exception as e: | |
| print(f"Error during loading: {e}") | |
| import traceback | |
| traceback.print_exc() | |
| import sys | |
| sys.exit("Failed to load model or scaler(s). Exiting.") | |
| # --- Data Retrieval from Open-Meteo API --- | |
| def get_latest_data_sequence(sequence_length): | |
| """ | |
| Retrieves the latest sequence of air quality and temperature data from Open-Meteo | |
| for the previous `sequence_length` hours based on the current hour, | |
| calculates historical AQI, and formats it for model input. | |
| Args: | |
| sequence_length (int): The length of the historical sequence required (e.g., 24). | |
| Returns: | |
| np.ndarray: A numpy array containing the historical data sequence. | |
| Shape: (sequence_length, NUM_INPUT_FEATURES) | |
| Returns None or raises an error on failure. | |
| """ | |
| print(f"Attempting to retrieve data for the last {sequence_length} hours from Open-Meteo...") | |
| # Determine the exact start and end time for the last `sequence_length` hours | |
| # The API uses YYYY-MM-DD format for dates. | |
| # We need data from the hour `sequence_length` hours ago up to the current completed hour. | |
| now_utc = datetime.now(timezone.utc) | |
| # Round down to the nearest hour | |
| current_hour_utc = now_utc.replace(minute=0, second=0, microsecond=0) | |
| # The end date for the API request is the current date | |
| end_date_api = current_hour_utc.strftime('%Y-%m-%d') | |
| # The start date is `sequence_length` hours before the *start* of the current hour. | |
| # So, `sequence_length` hours before `current_hour_utc`. | |
| start_time_utc = current_hour_utc - timedelta(hours=sequence_length) | |
| start_date_api = start_time_utc.strftime('%Y-%m-%d') | |
| # --- Fetch Air Quality Data --- | |
| aq_params = { | |
| "latitude": LATITUDE, | |
| "longitude": LONGITUDE, | |
| "hourly": ",".join(AIR_QUALITY_PARAMETERS), | |
| "timezone": TIMEZONE, | |
| "start_date": start_date_api, | |
| "end_date": end_date_api, | |
| "domains": "auto" | |
| } | |
| try: | |
| aq_response = requests.get(OPENMETEO_AIR_QUALITY_API_URL, params=aq_params) | |
| aq_response.raise_for_status() | |
| aq_data = aq_response.json() | |
| print("Air quality data retrieved.") | |
| if 'hourly' not in aq_data or 'time' not in aq_data['hourly']: | |
| print("Error: 'hourly' or 'time' not found in AQ response.") | |
| return None | |
| aq_hourly_data = aq_data['hourly'] | |
| aq_timestamps = aq_hourly_data['time'] | |
| aq_extracted_data = {param: aq_hourly_data.get(param, []) for param in AIR_QUALITY_PARAMETERS} | |
| df_aq = pd.DataFrame(aq_extracted_data, index=pd.to_datetime(aq_timestamps)) | |
| except requests.exceptions.RequestException as e: | |
| print(f"Error fetching air quality data: {e}") | |
| return None | |
| except Exception as e: | |
| print(f"Error processing air quality data: {e}") | |
| import traceback | |
| traceback.print_exc() | |
| return None | |
| # --- Fetch Temperature Data --- | |
| temp_params = { | |
| "latitude": LATITUDE, | |
| "longitude": LONGITUDE, | |
| "hourly": ",".join(WEATHER_PARAMETERS_FOR_TEMP), | |
| "timezone": TIMEZONE, | |
| "start_date": start_date_api, | |
| "end_date": end_date_api, | |
| "models": "best_match" | |
| } | |
| try: | |
| temp_response = requests.get(OPENMETEO_WEATHER_API_URL, params=temp_params) | |
| temp_response.raise_for_status() | |
| temp_data = temp_response.json() | |
| print("Temperature data retrieved.") | |
| if 'hourly' not in temp_data or 'time' not in temp_data['hourly']: | |
| print("Error: 'hourly' or 'time' not found in temperature response.") | |
| # Decide how to handle missing temperature data - return None, fill with NaNs, etc. | |
| print("Skipping temperature data due to missing fields.") | |
| df_temp = pd.DataFrame(index=df_aq.index) # Create empty DataFrame with AQ index | |
| for param in WEATHER_PARAMETERS_FOR_TEMP: | |
| df_temp[param] = np.nan # Add NaN columns for expected temperature parameters | |
| else: | |
| temp_hourly_data = temp_data['hourly'] | |
| temp_timestamps = temp_hourly_data['time'] | |
| temp_extracted_data = {param: temp_hourly_data.get(param, []) for param in WEATHER_PARAMETERS_FOR_TEMP} | |
| df_temp = pd.DataFrame(temp_extracted_data, index=pd.to_datetime(temp_timestamps)) | |
| except requests.exceptions.RequestException as e: | |
| print(f"Error fetching temperature data: {e}") | |
| print("Skipping temperature data due to API error.") | |
| df_temp = pd.DataFrame(index=df_aq.index) # Create empty DataFrame with AQ index | |
| for param in WEATHER_PARAMETERS_FOR_TEMP: | |
| df_temp[param] = np.nan # Add NaN columns for expected temperature parameters | |
| except Exception as e: | |
| print(f"Error processing temperature data: {e}") | |
| import traceback | |
| traceback.print_exc() | |
| print("Skipping temperature data due to processing error.") | |
| df_temp = pd.DataFrame(index=df_aq.index) # Create empty DataFrame with AQ index | |
| for param in WEATHER_PARAMETERS_FOR_TEMP: | |
| df_temp[param] = np.nan # Add NaN columns for expected temperature parameters | |
| # --- Merge DataFrames --- | |
| # Merge air quality and temperature data based on timestamp | |
| df_merged = pd.merge(df_aq, df_temp, left_index=True, right_index=True, how='outer') | |
| # --- Calculate Historical AQI --- | |
| # Calculate the 'calculated_aqi' for each row using your function | |
| df_merged['calculated_aqi'] = df_merged.apply( | |
| lambda row: calculate_overall_aqi( | |
| {'pm2_5': row.get('pm2_5'), 'pm10': row.get('pm10'), 'carbon_monoxide': row.get('carbon_monoxide')}, | |
| aqi_breakpoints | |
| ), | |
| axis=1 | |
| ) | |
| # --- Process and Filter Merged Data --- | |
| # Ensure the index is a proper datetime index and sort | |
| df_merged.index = pd.to_datetime(df_merged.index) | |
| df_merged.sort_index(inplace=True) | |
| # Resample to ensure hourly frequency and fill missing gaps | |
| # Use forward fill then backward fill for robustness | |
| df_processed = df_merged.resample('H').ffill().bfill() | |
| # Filter to the exact time range for the sequence (last SEQUENCE_LENGTH hours) | |
| # Find the timestamp corresponding to the start of the desired sequence | |
| # We want the `sequence_length` hours ending at `current_hour_utc` | |
| sequence_start_time_utc = current_hour_utc - timedelta(hours=sequence_length -1) | |
| # Filter the DataFrame to include only the timestamps within the sequence | |
| # Use loc with inclusive endpoints | |
| df_sequence = df_processed.loc[sequence_start_time_utc:current_hour_utc] | |
| # Ensure you have exactly SEQUENCE_LENGTH data points | |
| if len(df_sequence) != sequence_length: | |
| print(f"Error: Retrieved and processed data length ({len(df_sequence)}) does not match sequence length ({sequence_length}).") | |
| print(f"Expected timestamps from {sequence_start_time_utc} to {current_hour_utc}. Got {df_sequence.index.min()} to {df_sequence.index.max()}.") | |
| print("Check API request time range and data availability.") | |
| return None | |
| # Reorder columns to match your model's expected input feature order: | |
| # ['calculated_aqi', 'temp', 'pm25', 'pm10', 'co'] | |
| # Ensure 'temp' is the column from temperature_2m, and pollutant names are mapped. | |
| # Rename Open-Meteo columns to match your model's expected feature names | |
| # (This mapping was partly in calculate_overall_aqi, but needed for the DataFrame columns) | |
| column_rename_map = { | |
| 'temperature_2m': 'temp', | |
| 'pm2_5': 'pm25', | |
| 'pm10': 'pm10', | |
| 'carbon_monoxide': 'co', | |
| # 'calculated_aqi' is already correct after calculation | |
| } | |
| df_sequence.rename(columns=column_rename_map, inplace=True) | |
| # Ensure all expected features are present and in the correct order | |
| model_features_order = ['calculated_aqi', 'temp', 'pm25', 'pm10', 'co'] | |
| missing_columns = [col for col in model_features_order if col not in df_sequence.columns] | |
| if missing_columns: | |
| print(f"Error: Missing required columns in final sequence data: {missing_columns}") | |
| print("Ensure all expected features are fetched and named correctly.") | |
| return None | |
| # Select and reorder columns to match the model's expected input | |
| df_final_sequence = df_sequence[model_features_order] | |
| # Convert to numpy array | |
| data_sequence = df_final_sequence.values | |
| # Ensure the final numpy array has the correct shape (redundant but safe) | |
| if data_sequence.shape != (sequence_length, NUM_INPUT_FEATURES): | |
| print(f"Error: Final data sequence shape {data_sequence.shape} does not match expected shape ({sequence_length}, {NUM_INPUT_FEATURES}).") | |
| return None | |
| print(f"Successfully prepared data sequence with shape {data_sequence.shape}") | |
| return data_sequence | |
| # --- Define Predict Function --- | |
| # (Keep the predict function as before, it calls get_latest_data_sequence) | |
| def predict(): | |
| """ | |
| Retrieves the latest data sequence from Open-Meteo, preprocesses it, | |
| and makes a prediction. | |
| """ | |
| if model is None or input_scaler is None or target_scaler is None: | |
| return "Model or scaler(s) not loaded. Check logs." | |
| # 1. Get the latest historical data sequence from Open-Meteo | |
| latest_data_sequence = get_latest_data_sequence(SEQUENCE_LENGTH) | |
| if latest_data_sequence is None: | |
| return "Failed to retrieve or process latest data sequence." | |
| # Ensure the retrieved data has the correct shape (redundant check, but safe) | |
| if latest_data_sequence.shape != (SEQUENCE_LENGTH, NUM_INPUT_FEATURES): | |
| return f"Error: Retrieved data has incorrect shape {latest_data_sequence.shape}. Expected ({SEQUENCE_LENGTH}, {NUM_INPUT_FEATURES})." | |
| # 2. Scale the data sequence using the loaded input scaler | |
| latest_data_sequence_with_batch = latest_data_sequence[np.newaxis, :, :] | |
| scaled_input_data = input_scaler.transform(latest_data_sequence_with_batch) | |
| # 3. Perform prediction (outputs scaled target) | |
| output = model.predict(scaled_input_data) | |
| # 4. Process the output (get the scaled predicted value) | |
| predicted_scaled_value = output[0][0] | |
| # 5. Inverse transform the prediction using the target scaler | |
| predicted_original_scale = target_scaler.inverse_transform(np.array([[predicted_scaled_value]]))[0][0] | |
| predicted_value = predicted_original_scale | |
| return float(predicted_value) | |
| # --- Gradio Interface --- | |
| # (Keep the Gradio interface as before, inputs=None) | |
| interface = gr.Interface( | |
| fn=predict, | |
| inputs=None, | |
| outputs=gr.Number(label=f"Predicted AQI (Next {N_AHEAD} Hour(s))") | |
| ) | |
| # --- Launch Gradio Interface --- | |
| if __name__ == "__main__": | |
| interface.launch() |