| | import gradio as gr |
| | import pandas as pd |
| | import numpy as np |
| | import plotly.express as px |
| | import plotly.graph_objects as go |
| | import joblib |
| | import os |
| | import warnings |
| |
|
| | warnings.filterwarnings('ignore') |
| |
|
| | |
| | try: |
| | def find_file(filename, search_paths=['./', './data/']): |
| | for path in search_paths: |
| | filepath = os.path.join(path, filename) |
| | if os.path.exists(filepath): |
| | print(f"Found '{filename}' at: {filepath}") |
| | return filepath |
| | return None |
| |
|
| | scaler_path = find_file('scaler.joblib') |
| | kmeans_path = find_file('kmeans_model.joblib') |
| | forecasting_path = find_file('forecasting_models.joblib') |
| | data_path = find_file('consolidated_farm_data.csv') |
| | |
| | if not all([scaler_path, kmeans_path, forecasting_path, data_path]): |
| | raise FileNotFoundError("Could not find all required model (.joblib) and data (.csv) files.") |
| |
|
| | scaler = joblib.load(scaler_path) |
| | kmeans_model = joblib.load(kmeans_path) |
| | forecasting_models = joblib.load(forecasting_path) |
| | df_historical = pd.read_csv(data_path) |
| | df_historical['timestamp'] = pd.to_datetime(df_historical['timestamp']) |
| |
|
| | ALL_FARMS = sorted(df_historical['farm_name'].unique()) |
| | FARM_COORDINATES = { |
| | 'alia': [24.434117, 39.624376], 'Abdula altazi': [24.499210, 39.661664], |
| | 'albadr': [24.499454, 39.666633], 'alhabibah': [24.499002, 39.667079], |
| | 'alia almadinah': [24.450111, 39.627500], 'almarbad': [24.442014, 39.628323], |
| | 'alosba': [24.431591, 39.605149], 'abuonoq': [24.494620, 39.623123], |
| | 'wahaa nakeel': [24.442692, 39.623028], 'wahaa 2': [24.442388, 39.621116] |
| | } |
| | farm_coords_df = pd.DataFrame.from_dict(FARM_COORDINATES, orient='index', columns=['lat', 'lon']).reset_index().rename(columns={'index':'farm_name'}) |
| |
|
| | except FileNotFoundError as e: |
| | raise FileNotFoundError(f"CRITICAL ERROR: {e}") |
| |
|
| |
|
| | |
| | def get_performance_report(): |
| | kpi_df = df_historical.groupby('farm_name').agg( |
| | mean_ndvi=('NDVI', 'mean'), mean_evi=('EVI', 'mean'), std_ndvi=('NDVI', 'std') |
| | ).reset_index().dropna() |
| | features = kpi_df[['mean_ndvi', 'mean_evi', 'std_ndvi']] |
| | scaled_features = scaler.transform(features) |
| | kpi_df['cluster'] = kmeans_model.predict(scaled_features) |
| | cluster_centers = pd.DataFrame(scaler.inverse_transform(kmeans_model.cluster_centers_), columns=['mean_ndvi', 'mean_evi', 'std_ndvi']) |
| | sorted_clusters = cluster_centers.sort_values(by='mean_ndvi', ascending=False).index |
| | tier_map = {sorted_clusters[0]: 'Tier 1 (High)', sorted_clusters[1]: 'Tier 2 (Medium)', sorted_clusters[2]: 'Tier 3 (Low)'} |
| | kpi_df['Performance Tier'] = kpi_df['cluster'].map(tier_map) |
| | return kpi_df[['farm_name', 'Performance Tier', 'mean_ndvi', 'mean_evi']].sort_values('Performance Tier') |
| |
|
| | def detect_and_classify_anomalies(farm_name): |
| | farm_data = df_historical[df_historical['farm_name'] == farm_name].set_index('timestamp').sort_index() |
| | df_resampled = farm_data[['NDVI', 'NDWI', 'SAR_VV']].resample('W').mean().interpolate(method='linear') |
| | df_change = df_resampled.diff().dropna() |
| | rolling_std = df_change.rolling(window=12, min_periods=4).std() |
| | thresholds = {'NDVI': rolling_std['NDVI'] * 1.5, 'NDWI': rolling_std['NDWI'] * 1.5, 'SAR_VV': rolling_std['SAR_VV'] * 1.5} |
| | anomalies_found = [] |
| | for date, row in df_change.iterrows(): |
| | ndvi_change, ndwi_change, sar_vv_change = row['NDVI'], row['NDWI'], row['SAR_VV'] |
| | ndvi_thresh, ndwi_thresh, sar_thresh = thresholds['NDVI'].get(date, 0.07), thresholds['NDWI'].get(date, 0.07), thresholds['SAR_VV'].get(date, 1.0) |
| | classification = "Normal" |
| | if ndvi_change < -ndvi_thresh and sar_vv_change < -sar_thresh: |
| | classification = 'Harvest Event' |
| | elif ndvi_change < -ndvi_thresh and ndwi_change < -ndwi_thresh: |
| | classification = 'Potential Drought Stress' |
| | elif ndvi_change < -ndvi_thresh: |
| | classification = 'General Stress Event' |
| | if classification != "Normal": |
| | anomalies_found.append({'Date': date, 'Classification': classification, 'NDVI Change': f"{ndvi_change:.3f}"}) |
| | |
| | fig = go.Figure() |
| | fig.add_trace(go.Scatter(x=farm_data.index, y=farm_data['NDVI'], mode='lines', name='NDVI', line=dict(color='green'))) |
| | colors = {'Harvest Event': 'red', 'Potential Drought Stress': 'orange', 'General Stress Event': 'purple'} |
| | |
| | |
| | for anomaly in anomalies_found: |
| | anomaly_date = anomaly['Date'].to_pydatetime() |
| | line_color = colors.get(anomaly['Classification']) |
| | |
| | |
| | fig.add_shape( |
| | type='line', |
| | x0=anomaly_date, y0=0, x1=anomaly_date, y1=1, |
| | yref='paper', |
| | line=dict(color=line_color, width=2, dash='dash') |
| | ) |
| |
|
| | |
| | fig.add_annotation( |
| | x=anomaly_date, y=1.0, yref='paper', |
| | text=anomaly['Classification'], |
| | showarrow=False, |
| | yshift=10, |
| | font=dict(color=line_color) |
| | ) |
| | |
| | fig.update_layout(title=f'NDVI Timeline & Detected Anomalies for {farm_name}', xaxis_title='Date', yaxis_title='NDVI') |
| | |
| | display_anomalies = [{'Date': a['Date'].strftime('%Y-%m-%d'), 'Classification': a['Classification'], 'NDVI Change': a['NDVI Change']} for a in anomalies_found] |
| | return pd.DataFrame(display_anomalies), fig |
| |
|
| | def run_forecast(farm_name): |
| | model = forecasting_models.get(farm_name) |
| | last_date = df_historical['timestamp'].max() |
| | future_dates = pd.to_datetime(pd.date_range(start=last_date, periods=12, freq='W')) |
| | future_df = pd.DataFrame(index=future_dates) |
| | future_df['day_of_year'] = future_df.index.dayofyear |
| | farm_data = df_historical[df_historical['farm_name'] == farm_name] |
| | future_df['EVI'] = farm_data['EVI'].iloc[-1] |
| | future_df['NDWI'] = farm_data['NDWI'].iloc[-1] |
| | predictions = model.predict(future_df[['day_of_year', 'EVI', 'NDWI']]) |
| | |
| | fig = go.Figure() |
| | fig.add_trace(go.Scatter(x=farm_data['timestamp'], y=farm_data['NDVI'], mode='lines', name='Historical NDVI')) |
| | fig.add_trace(go.Scatter(x=future_dates, y=predictions, mode='lines', name='Forecasted NDVI', line=dict(color='red', dash='dash'))) |
| | fig.update_layout(title=f'3-Month NDVI Forecast for {farm_name}') |
| | return fig, pd.DataFrame({'Forecast Date': future_dates.strftime('%Y-%m-%d'), 'Predicted NDVI': np.round(predictions, 3)}) |
| |
|
| | def plot_tier_distribution(report_df): |
| | tier_counts = report_df['Performance Tier'].value_counts().reset_index() |
| | tier_counts.columns = ['Performance Tier', 'Count'] |
| | fig = px.bar(tier_counts, x='Performance Tier', y='Count', title='Farm Distribution by Performance Tier', |
| | color='Performance Tier', text_auto=True, |
| | color_discrete_map={'Tier 1 (High)': 'green', 'Tier 2 (Medium)': 'orange', 'Tier 3 (Low)': 'red'}) |
| | fig.update_layout(showlegend=False) |
| | return fig |
| |
|
| | |
| | df_performance_report = get_performance_report() |
| |
|
| | with gr.Blocks(theme=gr.themes.Soft(), title="Palm Farm Intelligence") as demo: |
| | gr.Markdown("# Palm Farm Intelligence Platform") |
| | |
| | with gr.Tabs(): |
| | with gr.TabItem("Performance Overview"): |
| | with gr.Row(): |
| | with gr.Column(scale=1): |
| | gr.Markdown("### All Farms Performance Tiers") |
| | gr.DataFrame(df_performance_report) |
| | gr.Markdown("### Tier Distribution") |
| | tier_plot = gr.Plot() |
| | with gr.Column(scale=2): |
| | gr.Markdown("### Farm Locations") |
| | map_plot = gr.Plot() |
| | |
| | with gr.TabItem(" Anomaly Detection"): |
| | gr.Markdown("### Intelligent Anomaly Detection") |
| | anomaly_farm_selector = gr.Dropdown(ALL_FARMS, label="Select a Farm", value=ALL_FARMS[0]) |
| | with gr.Row(): |
| | anomaly_table = gr.DataFrame(headers=["Date", "Classification", "NDVI Change"]) |
| | anomaly_plot = gr.Plot() |
| |
|
| | with gr.TabItem(" NDVI Forecasting"): |
| | gr.Markdown("### 3-Month Vegetation Health Forecast") |
| | forecast_farm_selector = gr.Dropdown(ALL_FARMS, label="Select Farm to Forecast", value=ALL_FARMS[0]) |
| | forecast_plot = gr.Plot() |
| | forecast_data = gr.DataFrame() |
| |
|
| | def update_anomaly_view(farm_name): |
| | return detect_and_classify_anomalies(farm_name) |
| | anomaly_farm_selector.change(fn=update_anomaly_view, inputs=anomaly_farm_selector, outputs=[anomaly_table, anomaly_plot]) |
| |
|
| | def update_forecast_view(farm_name): |
| | return run_forecast(farm_name) |
| | forecast_farm_selector.change(fn=update_forecast_view, inputs=forecast_farm_selector, outputs=[forecast_plot, forecast_data]) |
| |
|
| | def initial_load(): |
| | fig_map = px.scatter_mapbox(farm_coords_df, lat="lat", lon="lon", hover_name="farm_name", |
| | color_discrete_sequence=["green"], zoom=8, height=500) |
| | fig_map.update_layout(mapbox_style="open-street-map", margin={"r":0,"t":0,"l":0,"b":0}) |
| | fig_tier = plot_tier_distribution(df_performance_report) |
| | an_table, an_plot = detect_and_classify_anomalies(ALL_FARMS[0]) |
| | fc_plot, fc_data = run_forecast(ALL_FARMS[0]) |
| | return fig_map, fig_tier, an_table, an_plot, fc_plot, fc_data |
| |
|
| | demo.load(fn=initial_load, outputs=[map_plot, tier_plot, anomaly_table, anomaly_plot, forecast_plot, forecast_data]) |
| |
|
| | if __name__ == "__main__": |
| | demo.launch(debug=True) |