California_ISO / scripts /clean_and_impute.py
Xu Zhijian
init commit
c2786b1
import pandas as pd
import numpy as np
import json
import os
# --- Global Constants ---
INPUT_FILE = './data/California_ISO/raw_data/all_channel_raw/raw_fuelsource.parquet'
OUTPUT_FILE = './data/California_ISO/raw_data/fuelsource.parquet'
# NEW: Changed to a more descriptive name for the global log
OUTPUT_JSON = './data/California_ISO/downtime_log.json'
# Add any column names you want to drop from the DataFrame here.
COLUMNS_TO_DROP = ['OTHER']
# If a NaN segment's length is greater than or equal to this threshold,
# it's considered a downtime event and filled with 0.
NAN_DOWNTIME_THRESHOLD = 24
# The time frequency to resample the data to (5T = 5 minutes).
TIME_FREQUENCY = '5T'
def find_nan_segments(series: pd.Series) -> list:
"""
Finds consecutive NaN segments in a pandas Series.
"""
segments = []
is_nan = series.isna()
in_segment = False
segment_start = 0
for i, value in enumerate(is_nan):
if value and not in_segment:
in_segment = True
segment_start = i
elif not value and in_segment:
in_segment = False
segment_end = i
segments.append((segment_start, segment_end, segment_end - segment_start))
if in_segment:
segment_end = len(series)
segments.append((segment_start, segment_end, segment_end - segment_start))
return segments
def process_data(input_path: str, output_parquet_path: str, output_json_path: str):
"""
Loads, cleans, and processes time-series data, appending new downtime events
to a central log file.
"""
# --- 1. Load and Perform Initial Cleaning ---
if not os.path.exists(input_path):
print(f"Error: Input file not found -> {input_path}")
return
print(f"Loading data from {input_path}...")
demand = pd.read_parquet(input_path)
print("Performing initial data cleaning...")
demand['Time'] = pd.to_datetime(demand['Time'])
time_diffs = demand['Time'].diff().dt.total_seconds()
invalid_time_indices = time_diffs[time_diffs <= 0].index
if not invalid_time_indices.empty:
print(f"Found and removed {len(invalid_time_indices)} rows with timestamp anomalies.")
demand = demand.drop(invalid_time_indices)
demand = demand.sort_values('Time').reset_index(drop=True)
for col_to_drop in COLUMNS_TO_DROP:
if col_to_drop in demand.columns:
demand.drop(columns=[col_to_drop], inplace=True)
print(f"Successfully removed specified column: '{col_to_drop}'")
# --- 2. Resample to a Uniform Time Grid ---
print(f"Resampling data to a uniform {TIME_FREQUENCY} frequency...")
demand.set_index('Time', inplace=True)
demand_resampled = demand.resample(TIME_FREQUENCY).asfreq()
demand_resampled.reset_index(inplace=True)
print(f"Resampling complete. Total rows: {len(demand_resampled)}")
# --- 3. Load existing downtime records from the central JSON log ---
print(f"Loading existing downtime records from {output_json_path}...")
if os.path.exists(output_json_path):
try:
with open(output_json_path, 'r', encoding='utf-8') as f:
all_downtime_events = json.load(f)
except json.JSONDecodeError:
print(f"Warning: Could not decode JSON from {output_json_path}. Starting with an empty log.")
all_downtime_events = []
else:
print("No existing downtime log found. A new one will be created.")
all_downtime_events = []
# Create a set for fast duplicate checking. The signature of an event is its channel and start time.
existing_events_set = {
(event['start_time'], event['end_time']) for event in all_downtime_events
}
print(f"Found {len(all_downtime_events)} existing downtime records.")
# --- 4. Process NaN Segments Channel by Channel ---
print("\nProcessing missing data channel by channel...")
data_cols = demand_resampled.columns.drop('Time')
dataset_name = os.path.splitext(os.path.basename(output_parquet_path))[0]
new_events_found = 0
for col in data_cols:
print(f" - Processing channel: {col}")
channel_key = f"{dataset_name}_{col.replace(' ', '_')}"
nan_segments = find_nan_segments(demand_resampled[col])
for start_idx, end_idx, length in nan_segments:
if length >= NAN_DOWNTIME_THRESHOLD:
demand_resampled.loc[start_idx:end_idx - 1, col] = 0
start_time_dt = demand_resampled.at[start_idx, 'Time']
end_time_dt = demand_resampled.at[end_idx - 1, 'Time']
start_time_str = start_time_dt.strftime('%Y-%m-%d %H:%M:%S')
end_time_str = end_time_dt.strftime('%Y-%m-%d %H:%M:%S')
# Check if this event is already recorded
event_signature = (channel_key, start_time_str, end_time_str)
if event_signature not in existing_events_set:
# If not, create a new event object and append it
new_event = {
"channel_key": channel_key,
"start_time": start_time_str,
"end_time": end_time_str,
"start_index": int(start_idx),
"end_index": int(end_idx - 1),
"duration_in_steps": length
}
all_downtime_events.append(new_event)
existing_events_set.add(event_signature) # Update the set to prevent re-adding
new_events_found += 1
print(f" -> NEW downtime event found (length: {length}).")
else:
# If it exists, just skip
print(f" -> Already recorded downtime event (length: {length}).")
# Linearly interpolate the entire column to fill any remaining (shorter) NaN gaps
demand_resampled[col].interpolate(method='linear', inplace=True)
print(f"\nFound and recorded {new_events_found} new downtime events in total.")
# --- 5. Final Check and Save Results ---
print("\nFinal data check...")
final_nan_counts = demand_resampled.isna().sum()
if final_nan_counts.sum() == 0:
print("All missing values have been successfully filled.")
else:
print("Warning: Missing values still exist in the data!")
print(final_nan_counts[final_nan_counts > 0])
print(f"Saving processed data to {output_parquet_path}...")
demand_resampled.to_parquet(output_parquet_path, index=False)
# --- NEW: Save the updated central downtime log ---
print(f"Saving all {len(all_downtime_events)} downtime records to {output_json_path}...")
with open(output_json_path, 'w', encoding='utf-8') as f:
# Sort events by start time for better readability before saving
all_downtime_events.sort(key=lambda x: x['start_time'])
json.dump(all_downtime_events, f, indent=4)
print("\nProcessing complete!")
# --- Script Execution Entry Point ---
if __name__ == "__main__":
process_data(
input_path=INPUT_FILE,
output_parquet_path=OUTPUT_FILE,
output_json_path=OUTPUT_JSON
)