|
|
import os |
|
|
|
|
|
import numpy as np |
|
|
import pandas as pd |
|
|
|
|
|
|
|
|
class StandardScaler: |
|
|
""" |
|
|
This scaler code is borrowed from https://github.com/liuxu77/LargeST/blob/main/data/generate_data_for_training.py |
|
|
""" |
|
|
def __init__(self, mean, std): |
|
|
self.mean = mean |
|
|
self.std = std |
|
|
|
|
|
def transform(self, data): |
|
|
return (data - self.mean) / self.std |
|
|
|
|
|
def inverse_transform(self, data): |
|
|
return (data * self.std) + self.mean |
|
|
|
|
|
|
|
|
def generate_data_and_idx(df: pd.DataFrame, x_offsets, y_offsets, add_time_of_day, add_day_of_week): |
|
|
num_samples, num_nodes = df.shape |
|
|
data = np.expand_dims(df.values, axis=-1) |
|
|
|
|
|
feature_list = [data] |
|
|
if add_time_of_day: |
|
|
time_ind = (df.index.values - df.index.values.astype('datetime64[D]')) / np.timedelta64(1, 'D') |
|
|
time_of_day = np.tile(time_ind, [1, num_nodes, 1]).transpose((2, 1, 0)) |
|
|
feature_list.append(time_of_day) |
|
|
if add_day_of_week: |
|
|
dow = df.index.dayofweek |
|
|
dow_tiled = np.tile(dow, [1, num_nodes, 1]).transpose((2, 1, 0)) |
|
|
day_of_week = dow_tiled / 7 |
|
|
feature_list.append(day_of_week) |
|
|
|
|
|
data = np.concatenate(feature_list, axis=-1) |
|
|
|
|
|
min_t = abs(min(x_offsets)) |
|
|
max_t = abs(num_samples - abs(max(y_offsets))) |
|
|
print('idx min & max:', min_t, max_t) |
|
|
idx = np.arange(min_t, max_t, 1) |
|
|
return data, idx |
|
|
|
|
|
|
|
|
def new_and_dying_sensors(df: pd.DataFrame): |
|
|
df = df.sort_index() |
|
|
|
|
|
isna = df.isna() |
|
|
valid = ~isna |
|
|
|
|
|
has_before = valid.cumsum(axis=0).gt(0) |
|
|
has_after = valid[::-1].cumsum(axis=0)[::-1].gt(0) |
|
|
|
|
|
leading_nan = isna & ~has_before |
|
|
trailing_nan = isna & ~has_after |
|
|
internal_nan = isna & has_before & has_after |
|
|
|
|
|
newborn_cols = leading_nan.any(axis=0) |
|
|
dying_cols = trailing_nan.any(axis=0) |
|
|
invalid_cols = internal_nan.any(axis=0) | isna.all(axis=0) |
|
|
|
|
|
newborn_sensors = df.columns[newborn_cols].tolist() |
|
|
dying_sensors = df.columns[dying_cols].tolist() |
|
|
invalid_sensors = df.columns[invalid_cols].tolist() |
|
|
|
|
|
return newborn_sensors, dying_sensors, invalid_sensors |
|
|
|
|
|
|
|
|
def generate_largest_data(df: pd.DataFrame, output_folder: str, sensors: list = None, seq_len_x: int = 12, seq_len_y: int = 12, |
|
|
splits: dict[str, float] = None): |
|
|
|
|
|
if splits is None: |
|
|
splits = {'train': 0.6, 'val': 0.2} |
|
|
|
|
|
x_offsets = np.sort(np.arange(-(seq_len_x - 1), 1, 1)) |
|
|
y_offsets = np.sort(np.arange(1, seq_len_y + 1, 1)) |
|
|
|
|
|
if sensors is not None: |
|
|
df = df[df['sensor_id'].isin(sensors)] |
|
|
|
|
|
df['traffic_intensity'] = df['traffic_intensity'] / 4 |
|
|
df_pivot = df.pivot(index='entry_date', columns='sensor_id', values='traffic_intensity') |
|
|
print('original data shape:', df_pivot.shape) |
|
|
|
|
|
newborn, dying, invalid = new_and_dying_sensors(df_pivot) |
|
|
if len(invalid) > 0: |
|
|
raise Exception("invalid sensors (with nans inside) found") |
|
|
|
|
|
to_drop = set(newborn) | set(dying) |
|
|
df_clean = df_pivot.drop(columns=to_drop) |
|
|
data, idx = generate_data_and_idx(df_clean, x_offsets, y_offsets, add_time_of_day=True, add_day_of_week=True) |
|
|
print('final data shape:', data.shape, 'idx shape:', idx.shape) |
|
|
|
|
|
|
|
|
num_samples = len(idx) |
|
|
num_train = int(num_samples * splits['train']) |
|
|
num_val = int(num_samples * splits['val']) |
|
|
num_test = num_samples - num_train - num_val |
|
|
idx_train = idx[:num_train] |
|
|
idx_val = idx[num_train:num_train + num_val] |
|
|
idx_test = idx[num_train + num_val:] |
|
|
|
|
|
|
|
|
x_train = data[:idx_val[0] - seq_len_x, :, 0] |
|
|
scaler = StandardScaler(mean=x_train.mean(), std=x_train.std()) |
|
|
data[..., 0] = scaler.transform(data[..., 0]) |
|
|
|
|
|
|
|
|
os.makedirs(output_folder, exist_ok=True) |
|
|
np.savez_compressed(os.path.join(output_folder, 'his.npz'), data=data, mean=scaler.mean, std=scaler.std) |
|
|
np.save(os.path.join(output_folder, 'idx_train.npy'), idx_train) |
|
|
np.save(os.path.join(output_folder, 'idx_val.npy'), idx_val) |
|
|
np.save(os.path.join(output_folder, 'idx_test.npy'), idx_test) |