Datasets:
File size: 7,429 Bytes
c2786b1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 | import pandas as pd
import numpy as np
import json
import os
# --- Global Constants ---
INPUT_FILE = './data/California_ISO/raw_data/all_channel_raw/raw_fuelsource.parquet'
OUTPUT_FILE = './data/California_ISO/raw_data/fuelsource.parquet'
# NEW: Changed to a more descriptive name for the global log
OUTPUT_JSON = './data/California_ISO/downtime_log.json'
# Add any column names you want to drop from the DataFrame here.
COLUMNS_TO_DROP = ['OTHER']
# If a NaN segment's length is greater than or equal to this threshold,
# it's considered a downtime event and filled with 0.
NAN_DOWNTIME_THRESHOLD = 24
# The time frequency to resample the data to (5T = 5 minutes).
TIME_FREQUENCY = '5T'
def find_nan_segments(series: pd.Series) -> list:
"""
Finds consecutive NaN segments in a pandas Series.
"""
segments = []
is_nan = series.isna()
in_segment = False
segment_start = 0
for i, value in enumerate(is_nan):
if value and not in_segment:
in_segment = True
segment_start = i
elif not value and in_segment:
in_segment = False
segment_end = i
segments.append((segment_start, segment_end, segment_end - segment_start))
if in_segment:
segment_end = len(series)
segments.append((segment_start, segment_end, segment_end - segment_start))
return segments
def process_data(input_path: str, output_parquet_path: str, output_json_path: str):
"""
Loads, cleans, and processes time-series data, appending new downtime events
to a central log file.
"""
# --- 1. Load and Perform Initial Cleaning ---
if not os.path.exists(input_path):
print(f"Error: Input file not found -> {input_path}")
return
print(f"Loading data from {input_path}...")
demand = pd.read_parquet(input_path)
print("Performing initial data cleaning...")
demand['Time'] = pd.to_datetime(demand['Time'])
time_diffs = demand['Time'].diff().dt.total_seconds()
invalid_time_indices = time_diffs[time_diffs <= 0].index
if not invalid_time_indices.empty:
print(f"Found and removed {len(invalid_time_indices)} rows with timestamp anomalies.")
demand = demand.drop(invalid_time_indices)
demand = demand.sort_values('Time').reset_index(drop=True)
for col_to_drop in COLUMNS_TO_DROP:
if col_to_drop in demand.columns:
demand.drop(columns=[col_to_drop], inplace=True)
print(f"Successfully removed specified column: '{col_to_drop}'")
# --- 2. Resample to a Uniform Time Grid ---
print(f"Resampling data to a uniform {TIME_FREQUENCY} frequency...")
demand.set_index('Time', inplace=True)
demand_resampled = demand.resample(TIME_FREQUENCY).asfreq()
demand_resampled.reset_index(inplace=True)
print(f"Resampling complete. Total rows: {len(demand_resampled)}")
# --- 3. Load existing downtime records from the central JSON log ---
print(f"Loading existing downtime records from {output_json_path}...")
if os.path.exists(output_json_path):
try:
with open(output_json_path, 'r', encoding='utf-8') as f:
all_downtime_events = json.load(f)
except json.JSONDecodeError:
print(f"Warning: Could not decode JSON from {output_json_path}. Starting with an empty log.")
all_downtime_events = []
else:
print("No existing downtime log found. A new one will be created.")
all_downtime_events = []
# Create a set for fast duplicate checking. The signature of an event is its channel and start time.
existing_events_set = {
(event['start_time'], event['end_time']) for event in all_downtime_events
}
print(f"Found {len(all_downtime_events)} existing downtime records.")
# --- 4. Process NaN Segments Channel by Channel ---
print("\nProcessing missing data channel by channel...")
data_cols = demand_resampled.columns.drop('Time')
dataset_name = os.path.splitext(os.path.basename(output_parquet_path))[0]
new_events_found = 0
for col in data_cols:
print(f" - Processing channel: {col}")
channel_key = f"{dataset_name}_{col.replace(' ', '_')}"
nan_segments = find_nan_segments(demand_resampled[col])
for start_idx, end_idx, length in nan_segments:
if length >= NAN_DOWNTIME_THRESHOLD:
demand_resampled.loc[start_idx:end_idx - 1, col] = 0
start_time_dt = demand_resampled.at[start_idx, 'Time']
end_time_dt = demand_resampled.at[end_idx - 1, 'Time']
start_time_str = start_time_dt.strftime('%Y-%m-%d %H:%M:%S')
end_time_str = end_time_dt.strftime('%Y-%m-%d %H:%M:%S')
# Check if this event is already recorded
event_signature = (channel_key, start_time_str, end_time_str)
if event_signature not in existing_events_set:
# If not, create a new event object and append it
new_event = {
"channel_key": channel_key,
"start_time": start_time_str,
"end_time": end_time_str,
"start_index": int(start_idx),
"end_index": int(end_idx - 1),
"duration_in_steps": length
}
all_downtime_events.append(new_event)
existing_events_set.add(event_signature) # Update the set to prevent re-adding
new_events_found += 1
print(f" -> NEW downtime event found (length: {length}).")
else:
# If it exists, just skip
print(f" -> Already recorded downtime event (length: {length}).")
# Linearly interpolate the entire column to fill any remaining (shorter) NaN gaps
demand_resampled[col].interpolate(method='linear', inplace=True)
print(f"\nFound and recorded {new_events_found} new downtime events in total.")
# --- 5. Final Check and Save Results ---
print("\nFinal data check...")
final_nan_counts = demand_resampled.isna().sum()
if final_nan_counts.sum() == 0:
print("All missing values have been successfully filled.")
else:
print("Warning: Missing values still exist in the data!")
print(final_nan_counts[final_nan_counts > 0])
print(f"Saving processed data to {output_parquet_path}...")
demand_resampled.to_parquet(output_parquet_path, index=False)
# --- NEW: Save the updated central downtime log ---
print(f"Saving all {len(all_downtime_events)} downtime records to {output_json_path}...")
with open(output_json_path, 'w', encoding='utf-8') as f:
# Sort events by start time for better readability before saving
all_downtime_events.sort(key=lambda x: x['start_time'])
json.dump(all_downtime_events, f, indent=4)
print("\nProcessing complete!")
# --- Script Execution Entry Point ---
if __name__ == "__main__":
process_data(
input_path=INPUT_FILE,
output_parquet_path=OUTPUT_FILE,
output_json_path=OUTPUT_JSON
) |