dmariaa commited on
Commit
41d09e1
·
1 Parent(s): 1e36484

wq!New: Largest CODE

Browse files
code/LargeST/to_largest.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import numpy as np
4
+ import pandas as pd
5
+
6
+
7
+ class StandardScaler:
8
+ """
9
+ This scaler code is borrowed from https://github.com/liuxu77/LargeST/blob/main/data/generate_data_for_training.py
10
+ """
11
+ def __init__(self, mean, std):
12
+ self.mean = mean
13
+ self.std = std
14
+
15
+ def transform(self, data):
16
+ return (data - self.mean) / self.std
17
+
18
+ def inverse_transform(self, data):
19
+ return (data * self.std) + self.mean
20
+
21
+
22
+ def generate_data_and_idx(df: pd.DataFrame, x_offsets, y_offsets, add_time_of_day, add_day_of_week):
23
+ num_samples, num_nodes = df.shape
24
+ data = np.expand_dims(df.values, axis=-1)
25
+
26
+ feature_list = [data]
27
+ if add_time_of_day:
28
+ time_ind = (df.index.values - df.index.values.astype('datetime64[D]')) / np.timedelta64(1, 'D')
29
+ time_of_day = np.tile(time_ind, [1, num_nodes, 1]).transpose((2, 1, 0))
30
+ feature_list.append(time_of_day)
31
+ if add_day_of_week:
32
+ dow = df.index.dayofweek
33
+ dow_tiled = np.tile(dow, [1, num_nodes, 1]).transpose((2, 1, 0))
34
+ day_of_week = dow_tiled / 7
35
+ feature_list.append(day_of_week)
36
+
37
+ data = np.concatenate(feature_list, axis=-1)
38
+
39
+ min_t = abs(min(x_offsets))
40
+ max_t = abs(num_samples - abs(max(y_offsets))) # Exclusive
41
+ print('idx min & max:', min_t, max_t)
42
+ idx = np.arange(min_t, max_t, 1)
43
+ return data, idx
44
+
45
+
46
+ def new_and_dying_sensors(df: pd.DataFrame):
47
+ df = df.sort_index()
48
+
49
+ isna = df.isna()
50
+ valid = ~isna
51
+
52
+ has_before = valid.cumsum(axis=0).gt(0)
53
+ has_after = valid[::-1].cumsum(axis=0)[::-1].gt(0)
54
+
55
+ leading_nan = isna & ~has_before
56
+ trailing_nan = isna & ~has_after
57
+ internal_nan = isna & has_before & has_after
58
+
59
+ newborn_cols = leading_nan.any(axis=0)
60
+ dying_cols = trailing_nan.any(axis=0)
61
+ invalid_cols = internal_nan.any(axis=0) | isna.all(axis=0)
62
+
63
+ newborn_sensors = df.columns[newborn_cols].tolist()
64
+ dying_sensors = df.columns[dying_cols].tolist()
65
+ invalid_sensors = df.columns[invalid_cols].tolist()
66
+
67
+ return newborn_sensors, dying_sensors, invalid_sensors
68
+
69
+
70
+ def generate_largest_data(df: pd.DataFrame, output_folder: str, sensors: list = None, seq_len_x: int = 12, seq_len_y: int = 12,
71
+ splits: dict[str, float] = None):
72
+
73
+ if splits is None:
74
+ splits = {'train': 0.6, 'val': 0.2}
75
+
76
+ x_offsets = np.sort(np.arange(-(seq_len_x - 1), 1, 1))
77
+ y_offsets = np.sort(np.arange(1, seq_len_y + 1, 1))
78
+
79
+ if sensors is not None:
80
+ df = df[df['sensor_id'].isin(sensors)]
81
+
82
+ df['traffic_intensity'] = df['traffic_intensity'] / 4 # data is a 15-min interval but represented as per hour
83
+ df_pivot = df.pivot(index='entry_date', columns='sensor_id', values='traffic_intensity')
84
+ print('original data shape:', df_pivot.shape)
85
+
86
+ newborn, dying, invalid = new_and_dying_sensors(df_pivot)
87
+ if len(invalid) > 0:
88
+ raise Exception("invalid sensors (with nans inside) found")
89
+
90
+ to_drop = set(newborn) | set(dying)
91
+ df_clean = df_pivot.drop(columns=to_drop)
92
+ data, idx = generate_data_and_idx(df_clean, x_offsets, y_offsets, add_time_of_day=True, add_day_of_week=True)
93
+ print('final data shape:', data.shape, 'idx shape:', idx.shape)
94
+
95
+ # generate splits
96
+ num_samples = len(idx)
97
+ num_train = int(num_samples * splits['train'])
98
+ num_val = int(num_samples * splits['val'])
99
+ num_test = num_samples - num_train - num_val
100
+ idx_train = idx[:num_train]
101
+ idx_val = idx[num_train:num_train + num_val]
102
+ idx_test = idx[num_train + num_val:]
103
+
104
+ # normalize data
105
+ x_train = data[:idx_val[0] - seq_len_x, :, 0]
106
+ scaler = StandardScaler(mean=x_train.mean(), std=x_train.std())
107
+ data[..., 0] = scaler.transform(data[..., 0])
108
+
109
+ # save data
110
+ os.makedirs(output_folder, exist_ok=True)
111
+ np.savez_compressed(os.path.join(output_folder, 'his.npz'), data=data, mean=scaler.mean, std=scaler.std)
112
+ np.save(os.path.join(output_folder, 'idx_train.npy'), idx_train)
113
+ np.save(os.path.join(output_folder, 'idx_val.npy'), idx_val)
114
+ np.save(os.path.join(output_folder, 'idx_test.npy'), idx_test)
code/README.md ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # GO-MO traffic dataset source code
2
+
3
+ Here you can find code for several tasks related to the GO-MO traffic dataset:
4
+ * Exporting the road network graph to a derived line graph, where the streets/edges are transformed into vertices and
5
+ juntctions/vertices are transformed into edges.
6
+ * Exporting the road network graph or the route graph to Pytorch Geometric data format.
7
+ * Exporting the road network graph or the route graph to a numpy adjacency matrix.
8
+
9
+ In the LargeST folder you can find code to:
10
+ * Generate data and adjacency matrix compatibles with LargeST benchmark to reproduce benchmarking experiments present in
11
+ the GO-MO paper.
code/graph_format.py CHANGED
@@ -3,8 +3,6 @@ import torch
3
  from torch_geometric.data import Data
4
  import networkx as nx
5
 
6
- from stats import print_graph_stats
7
-
8
 
9
  def _get_float_value(v):
10
  if isinstance(v, str) and "|" in v:
 
3
  from torch_geometric.data import Data
4
  import networkx as nx
5
 
 
 
6
 
7
  def _get_float_value(v):
8
  if isinstance(v, str) and "|" in v:
code/load_dataset.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from datasets import load_dataset, Value
3
+
4
+ from LargeST.to_largest import generate_largest_data
5
+
6
+ repo_id = "double-blind-anonymous/go-mo-dataset"
7
+
8
+ data = load_dataset(repo_id, data_files={'train': "traffic_data_2024.csv"}, split="train")
9
+ # data = data.cast_column("entry_date", Value("timestamp[s]"))
10
+ df = data.to_pandas()
11
+ print(df.head())
12
+ print(df.shape)
13
+
14
+ generate_largest_data(df, "/mnt/raid/code/dmariaa/go-mo-dataset/code/data")