HashirAwaiz commited on
Commit
ee8f3db
Β·
verified Β·
1 Parent(s): 7b37976

Upload train.py

Browse files
Files changed (1) hide show
  1. src/train.py +140 -0
src/train.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import numpy as np
3
+ import joblib
4
+ from sklearn.model_selection import train_test_split
5
+ from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
6
+ from sklearn.metrics import mean_squared_error, accuracy_score, classification_report
7
+ from sklearn.cluster import KMeans
8
+ from sklearn.preprocessing import LabelEncoder
9
+ from sklearn.decomposition import PCA
10
+
11
+ # Paths
12
+ DATA_PATH = "data/processed/california_wildfire.csv"
13
+ MODEL_DIR = "models"
14
+
15
+ def train_all_tasks():
16
+ print("πŸš€ Loading Processed Data...")
17
+ df = pd.read_csv(DATA_PATH)
18
+
19
+ # Ensure we have data
20
+ if df.empty:
21
+ print("❌ Error: Dataset is empty. Run preprocess.py first.")
22
+ return
23
+
24
+ # ==========================================
25
+ # TASK 1: REGRESSION (Predict Fire Intensity)
26
+ # Target: 'bi' (Burning Index)
27
+ # Features: Weather metrics (Temp, Humidity, Wind, Rain)
28
+ # ==========================================
29
+ print("\nπŸ”₯ Training Task 1: Regression (Predict Burning Index)...")
30
+
31
+ # Features: Temp Min/Max, Humidity Min/Max, Wind Speed, Precipitation, Energy Release Component
32
+ reg_features = ['tmmn', 'tmmx', 'rmin', 'rmax', 'vs', 'pr', 'erc']
33
+ target_reg = 'bi'
34
+
35
+ X_reg = df[reg_features]
36
+ y_reg = df[target_reg]
37
+
38
+ X_train_r, X_test_r, y_train_r, y_test_r = train_test_split(X_reg, y_reg, test_size=0.2, random_state=42)
39
+
40
+ # Train Random Forest Regressor
41
+ reg_model = RandomForestRegressor(n_estimators=50, max_depth=10, n_jobs=-1, random_state=42)
42
+ reg_model.fit(X_train_r, y_train_r)
43
+
44
+ rmse = np.sqrt(mean_squared_error(y_test_r, reg_model.predict(X_test_r)))
45
+ print(f"βœ… Regression RMSE: {rmse:.4f}")
46
+
47
+ joblib.dump(reg_model, f"{MODEL_DIR}/regression_model.pkl")
48
+
49
+ # ==========================================
50
+ # TASK 2: CLASSIFICATION (Predict Risk Level)
51
+ # Target: Custom 'Risk_Level' based on Burning Index
52
+ # Logic: 0-40 Low, 40-80 Medium, >80 High
53
+ # ==========================================
54
+ print("\n⚠️ Training Task 2: Classification (Fire Risk Level)...")
55
+
56
+ def get_risk_level(bi_val):
57
+ if bi_val < 40: return 'Low'
58
+ elif bi_val < 80: return 'Medium'
59
+ else: return 'High'
60
+
61
+ df['risk_level'] = df['bi'].apply(get_risk_level)
62
+
63
+ # Encode Target (Low=0, Medium=1, High=2)
64
+ le = LabelEncoder()
65
+ y_clf = le.fit_transform(df['risk_level'])
66
+
67
+ # Use same weather features for classification
68
+ X_clf = df[reg_features]
69
+
70
+ X_train_c, X_test_c, y_train_c, y_test_c = train_test_split(X_clf, y_clf, test_size=0.2, random_state=42)
71
+
72
+ clf_model = RandomForestClassifier(n_estimators=50, max_depth=10, n_jobs=-1, random_state=42)
73
+ clf_model.fit(X_train_c, y_train_c)
74
+
75
+ acc = accuracy_score(y_test_c, clf_model.predict(X_test_c))
76
+ print(f"βœ… Classification Accuracy: {acc:.4f}")
77
+
78
+ # Save Model + Encoder (needed to decode predictions later)
79
+ joblib.dump(clf_model, f"{MODEL_DIR}/classification_model.pkl")
80
+ joblib.dump(le, f"{MODEL_DIR}/label_encoder.pkl")
81
+
82
+ # ==========================================
83
+ # TASK 3: CLUSTERING (Recovery Zones)
84
+ # Group by Location (Lat/Lon) and Fire Intensity (bi)
85
+ # ==========================================
86
+ print("\n🌍 Training Task 3: Clustering (Recovery Zones)...")
87
+
88
+ X_cluster = df[['latitude', 'longitude', 'bi']]
89
+
90
+ kmeans = KMeans(n_clusters=5, random_state=42, n_init=10)
91
+ kmeans.fit(X_cluster)
92
+
93
+ joblib.dump(kmeans, f"{MODEL_DIR}/clustering_model.pkl")
94
+ print("βœ… Clustering Model Saved.")
95
+
96
+ # ==========================================
97
+ # TASK 4: DIMENSIONALITY REDUCTION (PCA)
98
+ # Reduce weather features to 2D for visualization
99
+ # ==========================================
100
+ print("\n🧩 Training Task 4: Dimensionality Reduction (PCA)...")
101
+
102
+ # Fit PCA on the weather features to reduce to 2D
103
+ pca = PCA(n_components=2)
104
+ pca.fit(df[reg_features])
105
+
106
+ # Save the PCA model
107
+ joblib.dump(pca, f"{MODEL_DIR}/pca_model.pkl")
108
+ print("βœ… PCA Model Saved.")
109
+
110
+ # ==========================================
111
+ # TASK 5: TIME SERIES (SEASONALITY)
112
+ # Calculate monthly average burning index trends
113
+ # ==========================================
114
+ print("\nπŸ“ˆ Training Task 5: Time Series (Seasonality)...")
115
+
116
+ # Ensure datetime is correct
117
+ if 'datetime' in df.columns:
118
+ df['datetime'] = pd.to_datetime(df['datetime'])
119
+ df['month'] = df['datetime'].dt.month
120
+
121
+ # Calculate average Burning Index (BI) per month
122
+ seasonal_trend = df.groupby('month')['bi'].mean().to_dict()
123
+
124
+ # Save this dictionary (Month -> Avg BI)
125
+ joblib.dump(seasonal_trend, f"{MODEL_DIR}/seasonal_model.pkl")
126
+ print("βœ… Seasonal Model Saved.")
127
+ else:
128
+ print("⚠️ Skipping Seasonality Task: 'datetime' column not found")
129
+
130
+ print("\nπŸŽ‰ All Systems Go! Models are ready in 'models/'")
131
+ print("πŸ“ Models created:")
132
+ print(" - regression_model.pkl")
133
+ print(" - classification_model.pkl")
134
+ print(" - label_encoder.pkl")
135
+ print(" - clustering_model.pkl")
136
+ print(" - pca_model.pkl")
137
+ print(" - seasonal_model.pkl")
138
+
139
+ if __name__ == "__main__":
140
+ train_all_tasks()