|
|
import pandas as pd
|
|
|
import numpy as np
|
|
|
import joblib
|
|
|
from sklearn.model_selection import train_test_split
|
|
|
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
|
|
|
from sklearn.metrics import mean_squared_error, accuracy_score, classification_report
|
|
|
from sklearn.cluster import KMeans
|
|
|
from sklearn.preprocessing import LabelEncoder
|
|
|
from sklearn.decomposition import PCA
|
|
|
|
|
|
|
|
|
DATA_PATH = "data/processed/california_wildfire.csv"
|
|
|
MODEL_DIR = "models"
|
|
|
|
|
|
def train_all_tasks():
|
|
|
print("π Loading Processed Data...")
|
|
|
df = pd.read_csv(DATA_PATH)
|
|
|
|
|
|
|
|
|
if df.empty:
|
|
|
print("β Error: Dataset is empty. Run preprocess.py first.")
|
|
|
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("\nπ₯ Training Task 1: Regression (Predict Burning Index)...")
|
|
|
|
|
|
|
|
|
reg_features = ['tmmn', 'tmmx', 'rmin', 'rmax', 'vs', 'pr', 'erc']
|
|
|
target_reg = 'bi'
|
|
|
|
|
|
X_reg = df[reg_features]
|
|
|
y_reg = df[target_reg]
|
|
|
|
|
|
X_train_r, X_test_r, y_train_r, y_test_r = train_test_split(X_reg, y_reg, test_size=0.2, random_state=42)
|
|
|
|
|
|
|
|
|
reg_model = RandomForestRegressor(n_estimators=50, max_depth=10, n_jobs=-1, random_state=42)
|
|
|
reg_model.fit(X_train_r, y_train_r)
|
|
|
|
|
|
rmse = np.sqrt(mean_squared_error(y_test_r, reg_model.predict(X_test_r)))
|
|
|
print(f"β
Regression RMSE: {rmse:.4f}")
|
|
|
|
|
|
joblib.dump(reg_model, f"{MODEL_DIR}/regression_model.pkl")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("\nβ οΈ Training Task 2: Classification (Fire Risk Level)...")
|
|
|
|
|
|
def get_risk_level(bi_val):
|
|
|
if bi_val < 40: return 'Low'
|
|
|
elif bi_val < 80: return 'Medium'
|
|
|
else: return 'High'
|
|
|
|
|
|
df['risk_level'] = df['bi'].apply(get_risk_level)
|
|
|
|
|
|
|
|
|
le = LabelEncoder()
|
|
|
y_clf = le.fit_transform(df['risk_level'])
|
|
|
|
|
|
|
|
|
X_clf = df[reg_features]
|
|
|
|
|
|
X_train_c, X_test_c, y_train_c, y_test_c = train_test_split(X_clf, y_clf, test_size=0.2, random_state=42)
|
|
|
|
|
|
clf_model = RandomForestClassifier(n_estimators=50, max_depth=10, n_jobs=-1, random_state=42)
|
|
|
clf_model.fit(X_train_c, y_train_c)
|
|
|
|
|
|
acc = accuracy_score(y_test_c, clf_model.predict(X_test_c))
|
|
|
print(f"β
Classification Accuracy: {acc:.4f}")
|
|
|
|
|
|
|
|
|
joblib.dump(clf_model, f"{MODEL_DIR}/classification_model.pkl")
|
|
|
joblib.dump(le, f"{MODEL_DIR}/label_encoder.pkl")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("\nπ Training Task 3: Clustering (Recovery Zones)...")
|
|
|
|
|
|
X_cluster = df[['latitude', 'longitude', 'bi']]
|
|
|
|
|
|
kmeans = KMeans(n_clusters=5, random_state=42, n_init=10)
|
|
|
kmeans.fit(X_cluster)
|
|
|
|
|
|
joblib.dump(kmeans, f"{MODEL_DIR}/clustering_model.pkl")
|
|
|
print("β
Clustering Model Saved.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("\nπ§© Training Task 4: Dimensionality Reduction (PCA)...")
|
|
|
|
|
|
|
|
|
pca = PCA(n_components=2)
|
|
|
pca.fit(df[reg_features])
|
|
|
|
|
|
|
|
|
joblib.dump(pca, f"{MODEL_DIR}/pca_model.pkl")
|
|
|
print("β
PCA Model Saved.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("\nπ Training Task 5: Time Series (Seasonality)...")
|
|
|
|
|
|
|
|
|
if 'datetime' in df.columns:
|
|
|
df['datetime'] = pd.to_datetime(df['datetime'])
|
|
|
df['month'] = df['datetime'].dt.month
|
|
|
|
|
|
|
|
|
seasonal_trend = df.groupby('month')['bi'].mean().to_dict()
|
|
|
|
|
|
|
|
|
joblib.dump(seasonal_trend, f"{MODEL_DIR}/seasonal_model.pkl")
|
|
|
print("β
Seasonal Model Saved.")
|
|
|
else:
|
|
|
print("β οΈ Skipping Seasonality Task: 'datetime' column not found")
|
|
|
|
|
|
print("\nπ All Systems Go! Models are ready in 'models/'")
|
|
|
print("π Models created:")
|
|
|
print(" - regression_model.pkl")
|
|
|
print(" - classification_model.pkl")
|
|
|
print(" - label_encoder.pkl")
|
|
|
print(" - clustering_model.pkl")
|
|
|
print(" - pca_model.pkl")
|
|
|
print(" - seasonal_model.pkl")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
train_all_tasks() |