AzizWazir's picture
Update app.py
0dd468a verified
import pandas as pd
import numpy as np
import folium
from sklearn.cluster import KMeans
import gradio as gr
from sklearn.preprocessing import StandardScaler
# Function to load data from CSV file
def load_data(file):
data = pd.read_csv(file)
return data
# Function to preprocess and apply clustering model to classify soil moisture
def process_data(data):
# Preprocessing data (standardizing)
X = data[['temperature', 'humidity', 'soil_type']] # Example feature columns
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X) # Scaling the data
# Clustering model: KMeans (you can replace with regression models as well)
kmeans = KMeans(n_clusters=3, random_state=42)
data['moisture_category'] = kmeans.fit_predict(X_scaled) # Classify into moisture categories (Low, Medium, High)
return data
# Function to create an interactive folium map
def create_map(data):
# Initializing the map at the center of the provided data
m = folium.Map(location=[data['latitude'].mean(), data['longitude'].mean()], zoom_start=12)
# Plotting data points on the map
for _, row in data.iterrows():
color = 'blue' if row['moisture_category'] == 0 else 'green' if row['moisture_category'] == 1 else 'red'
folium.CircleMarker(location=[row['latitude'], row['longitude']],
radius=10,
color=color,
fill=True).add_to(m)
return m
# Main function to process the uploaded file, analyze data, and create the map
def soil_moisture_mapping(file):
# Load data
data = load_data(file)
# Process data and apply the model
processed_data = process_data(data)
# Generate map with the results
map_result = create_map(processed_data)
return map_result
# Setting up the Gradio interface for file upload
iface = gr.Interface(fn=soil_moisture_mapping, inputs="file", outputs="html", live=True)
# Launch the Gradio application
iface.launch()