HashirAwaiz commited on
Commit
081093c
·
verified ·
1 Parent(s): 0acc00b

Create flows/ingestion_flow.py

Browse files
Files changed (1) hide show
  1. flows/ingestion_flow.py +44 -0
flows/ingestion_flow.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from prefect import flow, task
2
+ import os
3
+ import pandas as pd
4
+
5
+ # Define paths
6
+ RAW_DATA_PATH = "data/raw/california_wildfire.csv"
7
+ PROCESSED_DATA_PATH = "data/processed/Wildfire_Dataset.csv"
8
+
9
+ @task(name="Check Raw Data")
10
+ def check_raw_data_exists():
11
+ """Simulates checking a data lake or Kaggle for new files."""
12
+ if os.path.exists(RAW_DATA_PATH):
13
+ print(f"✅ Raw data found at: {RAW_DATA_PATH}")
14
+ return True
15
+ else:
16
+ print("❌ Raw data missing! Please download the dataset.")
17
+ raise FileNotFoundError("Raw dataset not found.")
18
+
19
+ @task(name="Validate Schema")
20
+ def validate_schema():
21
+ """Ensures the raw data has the columns we need for training."""
22
+ # We read just the header to be fast
23
+ df = pd.read_csv(RAW_DATA_PATH, nrows=5)
24
+
25
+ # Updated columns based on your dataset
26
+ required_columns = ['latitude', 'longitude', 'tmmn', 'tmmx', 'bi']
27
+ missing = [col for col in required_columns if col not in df.columns]
28
+
29
+ if missing:
30
+ raise ValueError(f"❌ Data Validation Failed. Missing columns: {missing}")
31
+ print("✅ Schema Validation Passed.")
32
+
33
+ @flow(name="Wildfire Data Ingestion")
34
+ def data_ingestion_flow():
35
+ """The Main Control Flow for Data."""
36
+ print("🌊 Starting Data Ingestion Pipeline...")
37
+ exists = check_raw_data_exists()
38
+ if exists:
39
+ validate_schema()
40
+ # In a real app, you would trigger the preprocessing script here
41
+ print("🚀 Data is ready for the Training Pipeline.")
42
+
43
+ if __name__ == "__main__":
44
+ data_ingestion_flow()