|
|
import pandas as pd
|
|
|
import numpy as np
|
|
|
import json
|
|
|
import os
|
|
|
|
|
|
|
|
|
INPUT_FILE = './data/California_ISO/raw_data/all_channel_raw/raw_fuelsource.parquet'
|
|
|
OUTPUT_FILE = './data/California_ISO/raw_data/fuelsource.parquet'
|
|
|
|
|
|
OUTPUT_JSON = './data/California_ISO/downtime_log.json'
|
|
|
|
|
|
|
|
|
COLUMNS_TO_DROP = ['OTHER']
|
|
|
|
|
|
|
|
|
|
|
|
NAN_DOWNTIME_THRESHOLD = 24
|
|
|
|
|
|
TIME_FREQUENCY = '5T'
|
|
|
|
|
|
def find_nan_segments(series: pd.Series) -> list:
|
|
|
"""
|
|
|
Finds consecutive NaN segments in a pandas Series.
|
|
|
"""
|
|
|
segments = []
|
|
|
is_nan = series.isna()
|
|
|
in_segment = False
|
|
|
segment_start = 0
|
|
|
|
|
|
for i, value in enumerate(is_nan):
|
|
|
if value and not in_segment:
|
|
|
in_segment = True
|
|
|
segment_start = i
|
|
|
elif not value and in_segment:
|
|
|
in_segment = False
|
|
|
segment_end = i
|
|
|
segments.append((segment_start, segment_end, segment_end - segment_start))
|
|
|
|
|
|
if in_segment:
|
|
|
segment_end = len(series)
|
|
|
segments.append((segment_start, segment_end, segment_end - segment_start))
|
|
|
|
|
|
return segments
|
|
|
|
|
|
|
|
|
def process_data(input_path: str, output_parquet_path: str, output_json_path: str):
|
|
|
"""
|
|
|
Loads, cleans, and processes time-series data, appending new downtime events
|
|
|
to a central log file.
|
|
|
"""
|
|
|
|
|
|
if not os.path.exists(input_path):
|
|
|
print(f"Error: Input file not found -> {input_path}")
|
|
|
return
|
|
|
|
|
|
print(f"Loading data from {input_path}...")
|
|
|
demand = pd.read_parquet(input_path)
|
|
|
|
|
|
print("Performing initial data cleaning...")
|
|
|
demand['Time'] = pd.to_datetime(demand['Time'])
|
|
|
time_diffs = demand['Time'].diff().dt.total_seconds()
|
|
|
invalid_time_indices = time_diffs[time_diffs <= 0].index
|
|
|
if not invalid_time_indices.empty:
|
|
|
print(f"Found and removed {len(invalid_time_indices)} rows with timestamp anomalies.")
|
|
|
demand = demand.drop(invalid_time_indices)
|
|
|
|
|
|
demand = demand.sort_values('Time').reset_index(drop=True)
|
|
|
|
|
|
for col_to_drop in COLUMNS_TO_DROP:
|
|
|
if col_to_drop in demand.columns:
|
|
|
demand.drop(columns=[col_to_drop], inplace=True)
|
|
|
print(f"Successfully removed specified column: '{col_to_drop}'")
|
|
|
|
|
|
|
|
|
print(f"Resampling data to a uniform {TIME_FREQUENCY} frequency...")
|
|
|
demand.set_index('Time', inplace=True)
|
|
|
demand_resampled = demand.resample(TIME_FREQUENCY).asfreq()
|
|
|
demand_resampled.reset_index(inplace=True)
|
|
|
print(f"Resampling complete. Total rows: {len(demand_resampled)}")
|
|
|
|
|
|
|
|
|
print(f"Loading existing downtime records from {output_json_path}...")
|
|
|
if os.path.exists(output_json_path):
|
|
|
try:
|
|
|
with open(output_json_path, 'r', encoding='utf-8') as f:
|
|
|
all_downtime_events = json.load(f)
|
|
|
except json.JSONDecodeError:
|
|
|
print(f"Warning: Could not decode JSON from {output_json_path}. Starting with an empty log.")
|
|
|
all_downtime_events = []
|
|
|
else:
|
|
|
print("No existing downtime log found. A new one will be created.")
|
|
|
all_downtime_events = []
|
|
|
|
|
|
|
|
|
existing_events_set = {
|
|
|
(event['start_time'], event['end_time']) for event in all_downtime_events
|
|
|
}
|
|
|
print(f"Found {len(all_downtime_events)} existing downtime records.")
|
|
|
|
|
|
|
|
|
|
|
|
print("\nProcessing missing data channel by channel...")
|
|
|
data_cols = demand_resampled.columns.drop('Time')
|
|
|
dataset_name = os.path.splitext(os.path.basename(output_parquet_path))[0]
|
|
|
new_events_found = 0
|
|
|
|
|
|
for col in data_cols:
|
|
|
print(f" - Processing channel: {col}")
|
|
|
channel_key = f"{dataset_name}_{col.replace(' ', '_')}"
|
|
|
nan_segments = find_nan_segments(demand_resampled[col])
|
|
|
|
|
|
for start_idx, end_idx, length in nan_segments:
|
|
|
if length >= NAN_DOWNTIME_THRESHOLD:
|
|
|
demand_resampled.loc[start_idx:end_idx - 1, col] = 0
|
|
|
|
|
|
start_time_dt = demand_resampled.at[start_idx, 'Time']
|
|
|
end_time_dt = demand_resampled.at[end_idx - 1, 'Time']
|
|
|
start_time_str = start_time_dt.strftime('%Y-%m-%d %H:%M:%S')
|
|
|
end_time_str = end_time_dt.strftime('%Y-%m-%d %H:%M:%S')
|
|
|
|
|
|
|
|
|
event_signature = (channel_key, start_time_str, end_time_str)
|
|
|
if event_signature not in existing_events_set:
|
|
|
|
|
|
new_event = {
|
|
|
"channel_key": channel_key,
|
|
|
"start_time": start_time_str,
|
|
|
"end_time": end_time_str,
|
|
|
"start_index": int(start_idx),
|
|
|
"end_index": int(end_idx - 1),
|
|
|
"duration_in_steps": length
|
|
|
}
|
|
|
all_downtime_events.append(new_event)
|
|
|
existing_events_set.add(event_signature)
|
|
|
new_events_found += 1
|
|
|
print(f" -> NEW downtime event found (length: {length}).")
|
|
|
else:
|
|
|
|
|
|
print(f" -> Already recorded downtime event (length: {length}).")
|
|
|
|
|
|
|
|
|
demand_resampled[col].interpolate(method='linear', inplace=True)
|
|
|
|
|
|
print(f"\nFound and recorded {new_events_found} new downtime events in total.")
|
|
|
|
|
|
|
|
|
print("\nFinal data check...")
|
|
|
final_nan_counts = demand_resampled.isna().sum()
|
|
|
if final_nan_counts.sum() == 0:
|
|
|
print("All missing values have been successfully filled.")
|
|
|
else:
|
|
|
print("Warning: Missing values still exist in the data!")
|
|
|
print(final_nan_counts[final_nan_counts > 0])
|
|
|
|
|
|
print(f"Saving processed data to {output_parquet_path}...")
|
|
|
demand_resampled.to_parquet(output_parquet_path, index=False)
|
|
|
|
|
|
|
|
|
print(f"Saving all {len(all_downtime_events)} downtime records to {output_json_path}...")
|
|
|
with open(output_json_path, 'w', encoding='utf-8') as f:
|
|
|
|
|
|
all_downtime_events.sort(key=lambda x: x['start_time'])
|
|
|
json.dump(all_downtime_events, f, indent=4)
|
|
|
|
|
|
print("\nProcessing complete!")
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
process_data(
|
|
|
input_path=INPUT_FILE,
|
|
|
output_parquet_path=OUTPUT_FILE,
|
|
|
output_json_path=OUTPUT_JSON
|
|
|
) |