File size: 4,104 Bytes
41d09e1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
import os

import numpy as np
import pandas as pd


class StandardScaler:
    """
    This scaler code is borrowed from https://github.com/liuxu77/LargeST/blob/main/data/generate_data_for_training.py
    """
    def __init__(self, mean, std):
        self.mean = mean
        self.std = std

    def transform(self, data):
        return (data - self.mean) / self.std

    def inverse_transform(self, data):
        return (data * self.std) + self.mean


def generate_data_and_idx(df: pd.DataFrame, x_offsets, y_offsets, add_time_of_day, add_day_of_week):
    num_samples, num_nodes = df.shape
    data = np.expand_dims(df.values, axis=-1)

    feature_list = [data]
    if add_time_of_day:
        time_ind = (df.index.values - df.index.values.astype('datetime64[D]')) / np.timedelta64(1, 'D')
        time_of_day = np.tile(time_ind, [1, num_nodes, 1]).transpose((2, 1, 0))
        feature_list.append(time_of_day)
    if add_day_of_week:
        dow = df.index.dayofweek
        dow_tiled = np.tile(dow, [1, num_nodes, 1]).transpose((2, 1, 0))
        day_of_week = dow_tiled / 7
        feature_list.append(day_of_week)

    data = np.concatenate(feature_list, axis=-1)

    min_t = abs(min(x_offsets))
    max_t = abs(num_samples - abs(max(y_offsets)))  # Exclusive
    print('idx min & max:', min_t, max_t)
    idx = np.arange(min_t, max_t, 1)
    return data, idx


def new_and_dying_sensors(df: pd.DataFrame):
    df = df.sort_index()

    isna   = df.isna()
    valid  = ~isna

    has_before = valid.cumsum(axis=0).gt(0)
    has_after  = valid[::-1].cumsum(axis=0)[::-1].gt(0)

    leading_nan = isna & ~has_before
    trailing_nan = isna & ~has_after
    internal_nan = isna & has_before & has_after

    newborn_cols = leading_nan.any(axis=0)
    dying_cols   = trailing_nan.any(axis=0)
    invalid_cols = internal_nan.any(axis=0) | isna.all(axis=0)

    newborn_sensors = df.columns[newborn_cols].tolist()
    dying_sensors   = df.columns[dying_cols].tolist()
    invalid_sensors = df.columns[invalid_cols].tolist()

    return newborn_sensors, dying_sensors, invalid_sensors


def generate_largest_data(df: pd.DataFrame, output_folder: str, sensors: list = None, seq_len_x: int = 12, seq_len_y: int = 12,
                          splits: dict[str, float] = None):

    if splits is None:
        splits = {'train': 0.6, 'val': 0.2}

    x_offsets = np.sort(np.arange(-(seq_len_x - 1), 1, 1))
    y_offsets = np.sort(np.arange(1, seq_len_y + 1, 1))

    if sensors is not None:
        df = df[df['sensor_id'].isin(sensors)]

    df['traffic_intensity'] = df['traffic_intensity'] / 4   # data is a 15-min interval but represented as per hour
    df_pivot = df.pivot(index='entry_date', columns='sensor_id', values='traffic_intensity')
    print('original data shape:', df_pivot.shape)

    newborn, dying, invalid = new_and_dying_sensors(df_pivot)
    if len(invalid) > 0:
        raise Exception("invalid sensors (with nans inside) found")

    to_drop = set(newborn) | set(dying)
    df_clean = df_pivot.drop(columns=to_drop)
    data, idx = generate_data_and_idx(df_clean, x_offsets, y_offsets, add_time_of_day=True, add_day_of_week=True)
    print('final data shape:', data.shape, 'idx shape:', idx.shape)

    # generate splits
    num_samples = len(idx)
    num_train = int(num_samples * splits['train'])
    num_val = int(num_samples * splits['val'])
    num_test = num_samples - num_train - num_val
    idx_train = idx[:num_train]
    idx_val = idx[num_train:num_train + num_val]
    idx_test = idx[num_train + num_val:]

    # normalize data
    x_train = data[:idx_val[0] - seq_len_x, :, 0]
    scaler = StandardScaler(mean=x_train.mean(), std=x_train.std())
    data[..., 0] = scaler.transform(data[..., 0])

    # save data
    os.makedirs(output_folder, exist_ok=True)
    np.savez_compressed(os.path.join(output_folder, 'his.npz'), data=data, mean=scaler.mean, std=scaler.std)
    np.save(os.path.join(output_folder, 'idx_train.npy'), idx_train)
    np.save(os.path.join(output_folder, 'idx_val.npy'), idx_val)
    np.save(os.path.join(output_folder, 'idx_test.npy'), idx_test)