Xu Zhijian commited on
Commit ·
4733a1f
1
Parent(s): 67b662e
update: new short subjects; merged general report
Browse files- .gitattributes +1 -0
- README.md +8 -0
- expand_id_info_short_new.json +52 -0
- id_info_imputed.json +0 -0
- id_info_imputed_short_new.json +0 -0
- scripts/cleaning_abandoned.ipynb +0 -0
- scripts/embed.ipynb +0 -0
- scripts/impute_expand.ipynb +0 -0
- scripts/impute_expand_abandoned.py +207 -0
- scripts/merge_general_report.py +58 -0
- scripts/split_abandoned.ipynb +0 -0
- static_info_embeddings.pkl +1 -1
- static_info_embeddings_short_new.pkl +3 -0
- static_info_short_new.json +1 -0
- time_series_short_new/id_141.parquet +3 -0
- time_series_short_new/id_150.parquet +3 -0
- time_series_short_new/id_159.parquet +3 -0
- time_series_short_new/id_164.parquet +3 -0
- time_series_short_new/id_195.parquet +3 -0
- time_series_short_new/id_199.parquet +3 -0
- time_series_short_new/id_351.parquet +3 -0
- time_series_short_new/id_364.parquet +3 -0
- time_series_short_new/id_402.parquet +3 -0
- time_series_short_new/id_406.parquet +3 -0
- time_series_short_new/id_info_imputed_short_new.json +0 -0
- weather/merged_general_report/merged_general_weather_report.json +3 -0
- weather/merged_report_embedding/static_info_embeddings.pkl +3 -0
- weather/merged_report_embedding/static_info_embeddings_short_new.pkl +3 -0
.gitattributes
CHANGED
|
@@ -57,3 +57,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 57 |
# Video files - compressed
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 57 |
# Video files - compressed
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 60 |
+
weather/merged_general_report/merged_general_weather_report.json filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
|
@@ -1,5 +1,9 @@
|
|
| 1 |
# WIATS: Weather-centric Intervention-Aware Time Series Multimodal Dataset
|
| 2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
## Dataset Structure
|
| 4 |
|
| 5 |
The dataset is organized into the following structure:
|
|
@@ -46,6 +50,10 @@ The dataset is organized into the following structure:
|
|
| 46 |
| | | |-- version_2
|
| 47 |
| | | |-- ...
|
| 48 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
| |-- scripts # Scripts for data processing, model training, and evaluation
|
| 50 |
| |-- id_info.json # Metadata for whole dataset without preprocessing
|
| 51 |
| |-- static_info.json # Static information for this dataset, including the dataset information, channel information, downtime reasons.
|
|
|
|
| 1 |
# WIATS: Weather-centric Intervention-Aware Time Series Multimodal Dataset
|
| 2 |
|
| 3 |
+
## Data source:
|
| 4 |
+
|
| 5 |
+
- [NYC_Traffic_Speed](https://data.cityofnewyork.us/Transportation/DOT-Traffic-Speeds-NBE/i4gi-tjb9/about_data)
|
| 6 |
+
|
| 7 |
## Dataset Structure
|
| 8 |
|
| 9 |
The dataset is organized into the following structure:
|
|
|
|
| 50 |
| | | |-- version_2
|
| 51 |
| | | |-- ...
|
| 52 |
|
| 53 |
+
| | |-- merged_general_report # merged general report for multiple needed locations (optional)
|
| 54 |
+
| | | |-- xxx.json
|
| 55 |
+
| | | |-- ...
|
| 56 |
+
|
| 57 |
| |-- scripts # Scripts for data processing, model training, and evaluation
|
| 58 |
| |-- id_info.json # Metadata for whole dataset without preprocessing
|
| 59 |
| |-- static_info.json # Static information for this dataset, including the dataset information, channel information, downtime reasons.
|
expand_id_info_short_new.json
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"159": {
|
| 3 |
+
"borough": "Bronx",
|
| 4 |
+
"link": "BRP N WATSON AVENUE - FORDHAM ROAD",
|
| 5 |
+
"len": 322911
|
| 6 |
+
},
|
| 7 |
+
"351": {
|
| 8 |
+
"borough": "Staten Island",
|
| 9 |
+
"link": "MLK S - SIE W WALKER STREET - RICHMOND AVENUE",
|
| 10 |
+
"len": 322890
|
| 11 |
+
},
|
| 12 |
+
"150": {
|
| 13 |
+
"borough": "Manhattan",
|
| 14 |
+
"link": "BQE N Atlantic Ave - MAN Bridge Manhattan Side",
|
| 15 |
+
"len": 317848
|
| 16 |
+
},
|
| 17 |
+
"364": {
|
| 18 |
+
"borough": "Manhattan",
|
| 19 |
+
"link": "QMT E Manhattan Side - Toll Plaza",
|
| 20 |
+
"len": 321989
|
| 21 |
+
},
|
| 22 |
+
"195": {
|
| 23 |
+
"borough": "Bronx",
|
| 24 |
+
"link": "CBE W U/LEV AMSTERDAM AVE - I95 S EXP LNS",
|
| 25 |
+
"len": 321922
|
| 26 |
+
},
|
| 27 |
+
"199": {
|
| 28 |
+
"borough": "Queens",
|
| 29 |
+
"link": "CIP N Hempstead Tpk - LIE",
|
| 30 |
+
"len": 322896
|
| 31 |
+
},
|
| 32 |
+
"406": {
|
| 33 |
+
"borough": "Queens",
|
| 34 |
+
"link": "TNB S Toll Plaza - Queens Anchorage",
|
| 35 |
+
"len": 321972
|
| 36 |
+
},
|
| 37 |
+
"402": {
|
| 38 |
+
"borough": "Queens",
|
| 39 |
+
"link": "TNB N Queens Anchorage - Toll Plaza",
|
| 40 |
+
"len": 321977
|
| 41 |
+
},
|
| 42 |
+
"164": {
|
| 43 |
+
"borough": "Queens",
|
| 44 |
+
"link": "BWB N Queens Anchorage - Toll Plaza",
|
| 45 |
+
"len": 322011
|
| 46 |
+
},
|
| 47 |
+
"141": {
|
| 48 |
+
"borough": "Queens",
|
| 49 |
+
"link": "BE S TBB EXIT RAMP - QUEENS ANCHORAGE",
|
| 50 |
+
"len": 322000
|
| 51 |
+
}
|
| 52 |
+
}
|
id_info_imputed.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
id_info_imputed_short_new.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
scripts/cleaning_abandoned.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
scripts/embed.ipynb
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
scripts/impute_expand.ipynb
ADDED
|
File without changes
|
scripts/impute_expand_abandoned.py
ADDED
|
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import numpy as np
|
| 3 |
+
from matplotlib import pyplot as plt
|
| 4 |
+
import json
|
| 5 |
+
import os
|
| 6 |
+
from tqdm import tqdm
|
| 7 |
+
from concurrent.futures import ThreadPoolExecutor
|
| 8 |
+
|
| 9 |
+
# disable the warning from pandas
|
| 10 |
+
pd.options.mode.chained_assignment = None
|
| 11 |
+
|
| 12 |
+
id_info = json.load(open('./expand_id_info_short_new.json'))
|
| 13 |
+
id_list = list(id_info.keys())
|
| 14 |
+
|
| 15 |
+
print('Reading data...')
|
| 16 |
+
dfs = {}
|
| 17 |
+
|
| 18 |
+
def read_parquet(id):
|
| 19 |
+
return id, pd.read_parquet(f'./scripts/slim_data/id_{id}.parquet')
|
| 20 |
+
|
| 21 |
+
with ThreadPoolExecutor(max_workers=100) as executor:
|
| 22 |
+
futures = [executor.submit(read_parquet, id) for id in id_list]
|
| 23 |
+
for future in tqdm(futures):
|
| 24 |
+
id, df = future.result()
|
| 25 |
+
dfs[id] = df
|
| 26 |
+
|
| 27 |
+
sensor_downtimes = {}
|
| 28 |
+
|
| 29 |
+
for id in tqdm(id_list):
|
| 30 |
+
|
| 31 |
+
tqdm.write(f"Preparing ID: {id}")
|
| 32 |
+
|
| 33 |
+
df = dfs[str(id)].copy()
|
| 34 |
+
|
| 35 |
+
df['SPEED'] = pd.to_numeric(df['SPEED'], errors='coerce')
|
| 36 |
+
df['TRAVEL_TIME'] = pd.to_numeric(df['TRAVEL_TIME'], errors='coerce')
|
| 37 |
+
df['DATA_AS_OF'] = pd.to_datetime(df['DATA_AS_OF'])
|
| 38 |
+
|
| 39 |
+
# round the DATA_AS_OF to the nearest 5 minutes
|
| 40 |
+
df['DATA_AS_OF'] = df['DATA_AS_OF'].dt.round('5min')
|
| 41 |
+
tqdm.write(f"De-duplicating ID: {id}")
|
| 42 |
+
#find duplicate timestamps
|
| 43 |
+
missing_gap0 = df['DATA_AS_OF'].diff().dt.total_seconds() == 0
|
| 44 |
+
# missing_gap5 = df['DATA_AS_OF'].diff().dt.total_seconds() > (60*5)
|
| 45 |
+
missing_gap0 = missing_gap0[missing_gap0].index
|
| 46 |
+
|
| 47 |
+
to_remove=[]
|
| 48 |
+
for ind in missing_gap0:
|
| 49 |
+
# if the DATA_AS_OF of ind is 10min smaller than ind+1 then add 5 min to ind
|
| 50 |
+
if ind+1 == len(df):
|
| 51 |
+
to_remove.append(ind)
|
| 52 |
+
elif df['DATA_AS_OF'].iloc[ind] + pd.Timedelta('10min') == df['DATA_AS_OF'].iloc[ind+1]:
|
| 53 |
+
# print(f'adjust {ind}')
|
| 54 |
+
df['DATA_AS_OF'].iloc[ind] = df['DATA_AS_OF'].iloc[ind] + pd.Timedelta('5min')
|
| 55 |
+
else:
|
| 56 |
+
# remove the row of ind
|
| 57 |
+
to_remove.append(ind)
|
| 58 |
+
df = df.drop(to_remove)
|
| 59 |
+
df = df.reset_index(drop=True)
|
| 60 |
+
# check missing_gap0 again
|
| 61 |
+
missing_gap0 = df['DATA_AS_OF'].diff().dt.total_seconds() == 0
|
| 62 |
+
missing_gap0 = missing_gap0[missing_gap0].index
|
| 63 |
+
assert len(missing_gap0) == 0, 'There are still duplicate timestamps'
|
| 64 |
+
|
| 65 |
+
tqdm.write(f"Small gaps ID: {id}")
|
| 66 |
+
# get the missing gaps of 5~2h
|
| 67 |
+
threshold_time = 120 # in min
|
| 68 |
+
|
| 69 |
+
missing_gap15 = df['DATA_AS_OF'].diff().dt.total_seconds() <= (60*threshold_time) # make it 2h
|
| 70 |
+
missing_gap5 = df['DATA_AS_OF'].diff().dt.total_seconds() > (60*5)
|
| 71 |
+
missing_gap = missing_gap15 & missing_gap5
|
| 72 |
+
|
| 73 |
+
missing_gap = missing_gap[missing_gap].index
|
| 74 |
+
|
| 75 |
+
# convert the str values in df['SPEED'] and df['TRAVEL_TIME'] to float
|
| 76 |
+
# convert the timestamp to datetime
|
| 77 |
+
|
| 78 |
+
def linear_impute(start_idx, end_idx):
|
| 79 |
+
start_time = df['DATA_AS_OF'][start_idx]
|
| 80 |
+
end_time = df['DATA_AS_OF'][end_idx]
|
| 81 |
+
start_speed = df['SPEED'][start_idx]
|
| 82 |
+
end_speed = df['SPEED'][end_idx]
|
| 83 |
+
start_travel_time = df['TRAVEL_TIME'][start_idx]
|
| 84 |
+
end_travel_time = df['TRAVEL_TIME'][end_idx]
|
| 85 |
+
gap = end_time - start_time
|
| 86 |
+
gap = gap.total_seconds()
|
| 87 |
+
new_rows = []
|
| 88 |
+
for j in range(1, int(gap // 300)):
|
| 89 |
+
new_rows.append({
|
| 90 |
+
'DATA_AS_OF': start_time + pd.Timedelta(f'{j*5}min'),
|
| 91 |
+
'SPEED': start_speed + (end_speed - start_speed) * j / (gap // 300),
|
| 92 |
+
'TRAVEL_TIME': start_travel_time + (end_travel_time - start_travel_time) * j / (gap // 300)
|
| 93 |
+
})
|
| 94 |
+
return new_rows
|
| 95 |
+
|
| 96 |
+
with ThreadPoolExecutor(max_workers=100) as executor:
|
| 97 |
+
futures = [executor.submit(linear_impute, i - 1, i) for i in missing_gap]
|
| 98 |
+
results = [future.result() for future in tqdm(futures)]
|
| 99 |
+
|
| 100 |
+
# Flatten the list of lists
|
| 101 |
+
new_rows = [item for sublist in results for item in sublist]
|
| 102 |
+
|
| 103 |
+
# Create a DataFrame from the new rows and concatenate it with the original DataFrame
|
| 104 |
+
new_df = pd.DataFrame(new_rows)
|
| 105 |
+
df = pd.concat([df, new_df], ignore_index=True)
|
| 106 |
+
|
| 107 |
+
# sort by the DATA_AS_OF
|
| 108 |
+
df = df.sort_values('DATA_AS_OF')
|
| 109 |
+
df = df.reset_index(drop=True)
|
| 110 |
+
|
| 111 |
+
# check again
|
| 112 |
+
missing_gap15 = df['DATA_AS_OF'].diff().dt.total_seconds() <= (60*threshold_time)
|
| 113 |
+
missing_gap5 = df['DATA_AS_OF'].diff().dt.total_seconds() > (60*5)
|
| 114 |
+
missing_gap = missing_gap15 & missing_gap5
|
| 115 |
+
|
| 116 |
+
missing_gap = missing_gap[missing_gap].index
|
| 117 |
+
assert len(missing_gap) == 0, 'There are still missing gaps'
|
| 118 |
+
|
| 119 |
+
tqdm.write(f"Large gaps ID: {id}")
|
| 120 |
+
missing_gaplarge = df['DATA_AS_OF'].diff().dt.total_seconds() > (60*threshold_time) # make it 2h #30 min is ok
|
| 121 |
+
|
| 122 |
+
missing_gaplarge = missing_gaplarge[missing_gaplarge].index
|
| 123 |
+
|
| 124 |
+
def zero_impute(start_idx, end_idx):
|
| 125 |
+
start_time = df['DATA_AS_OF'][start_idx]
|
| 126 |
+
end_time = df['DATA_AS_OF'][end_idx]
|
| 127 |
+
gap = end_time - start_time
|
| 128 |
+
gap = gap.total_seconds()
|
| 129 |
+
new_rows = []
|
| 130 |
+
for j in range(1, int(gap // 300)):
|
| 131 |
+
new_rows.append({
|
| 132 |
+
'DATA_AS_OF': start_time + pd.Timedelta(f'{j*5}min'),
|
| 133 |
+
'SPEED': 0,
|
| 134 |
+
'TRAVEL_TIME': 0
|
| 135 |
+
})
|
| 136 |
+
return new_rows
|
| 137 |
+
|
| 138 |
+
with ThreadPoolExecutor(max_workers=100) as executor:
|
| 139 |
+
futures = [executor.submit(zero_impute, i - 1, i) for i in missing_gaplarge]
|
| 140 |
+
results = [future.result() for future in tqdm(futures)]
|
| 141 |
+
|
| 142 |
+
# Flatten the list of lists
|
| 143 |
+
new_rows = [item for sublist in results for item in sublist]
|
| 144 |
+
|
| 145 |
+
# Create a DataFrame from the new rows and concatenate it with the original DataFrame
|
| 146 |
+
new_df = pd.DataFrame(new_rows)
|
| 147 |
+
df = pd.concat([df, new_df], ignore_index=True)
|
| 148 |
+
|
| 149 |
+
# sort by the DATA_AS_OF
|
| 150 |
+
df = df.sort_values('DATA_AS_OF')
|
| 151 |
+
df = df.reset_index(drop=True)
|
| 152 |
+
|
| 153 |
+
# check again
|
| 154 |
+
missing_anygap = df['DATA_AS_OF'].diff().dt.total_seconds() > (60*5)
|
| 155 |
+
missing_anygap = missing_anygap[missing_anygap].index
|
| 156 |
+
assert len(missing_anygap) == 0, 'There are still missing gaps'
|
| 157 |
+
|
| 158 |
+
tqdm.write(f"Sensor downtime ID: {id}")
|
| 159 |
+
# get sensor downtime
|
| 160 |
+
# get all the SPEED=0
|
| 161 |
+
zero_speed = df['SPEED']==0
|
| 162 |
+
speed_goes_down = df['SPEED'].diff() < 0
|
| 163 |
+
speed_goes_up = df['SPEED'].diff(-1) < 0
|
| 164 |
+
|
| 165 |
+
speed_goto_zero = zero_speed & speed_goes_down
|
| 166 |
+
speed_goto_zero = speed_goto_zero[speed_goto_zero].index
|
| 167 |
+
|
| 168 |
+
speed_gofrom_zero = zero_speed & speed_goes_up
|
| 169 |
+
speed_gofrom_zero = speed_gofrom_zero[speed_gofrom_zero].index
|
| 170 |
+
|
| 171 |
+
threshold_step = threshold_time//5
|
| 172 |
+
sensor_downtime = {}
|
| 173 |
+
i=0
|
| 174 |
+
for start, end in zip(speed_goto_zero, speed_gofrom_zero):
|
| 175 |
+
if end - start > threshold_step:
|
| 176 |
+
sensor_downtime[i] = {'time':(df['DATA_AS_OF'][start], df['DATA_AS_OF'][end]), 'index':(start, end)}
|
| 177 |
+
i+=1
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
# if the downtime is between 0:00 to 6:00 then remove the downtime from dictionary
|
| 181 |
+
def check_22_6(time):
|
| 182 |
+
if time.hour >= 0 and time.hour < 6:
|
| 183 |
+
return True
|
| 184 |
+
elif time.hour >=22:
|
| 185 |
+
return True
|
| 186 |
+
else:
|
| 187 |
+
return False
|
| 188 |
+
for key in list(sensor_downtime.keys()):
|
| 189 |
+
if check_22_6(sensor_downtime[key]['time'][0]) and check_22_6(sensor_downtime[key]['time'][1]):
|
| 190 |
+
del sensor_downtime[key]
|
| 191 |
+
|
| 192 |
+
# convert the 'time' segment to string
|
| 193 |
+
for key in sensor_downtime.keys():
|
| 194 |
+
sensor_downtime[key]['time'] = (str(sensor_downtime[key]['time'][0]), str(sensor_downtime[key]['time'][1]))
|
| 195 |
+
|
| 196 |
+
sensor_downtime = dict(enumerate(sensor_downtime.values()))
|
| 197 |
+
|
| 198 |
+
df.to_parquet(f'./time_series_short_new/id_{id}.parquet')
|
| 199 |
+
sensor_downtimes[id] = sensor_downtime
|
| 200 |
+
id_info[str(id)]['sensor_downtime'] = sensor_downtime
|
| 201 |
+
id_info[str(id)]['len'] = len(df)
|
| 202 |
+
|
| 203 |
+
json.dump(id_info, open('./time_series_short_new/id_info_zero_shot_imputed.json', 'w'), indent=4)
|
| 204 |
+
|
| 205 |
+
json.dump(sensor_downtimes, open('./time_series_short_new/sensor_downtimes.json', 'w'), indent=4)
|
| 206 |
+
print('Done!')
|
| 207 |
+
|
scripts/merge_general_report.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
import glob
|
| 4 |
+
|
| 5 |
+
# Configuration for paths and filenames
|
| 6 |
+
base_directory = 'weather'
|
| 7 |
+
output_filename = 'merged_general_weather_report.json'
|
| 8 |
+
|
| 9 |
+
# Define a search pattern to find the relevant JSON files
|
| 10 |
+
file_pattern = os.path.join(base_directory, '*', 'weather_report', 'fast_general_weather_forecast_*.json')
|
| 11 |
+
all_files = glob.glob(file_pattern)
|
| 12 |
+
|
| 13 |
+
if not all_files:
|
| 14 |
+
print(f"No files found for pattern '{file_pattern}'. Please check the path and script location.")
|
| 15 |
+
else:
|
| 16 |
+
print(f"Found {len(all_files)} files to merge. Starting process...")
|
| 17 |
+
|
| 18 |
+
# The master dictionary will hold the final nested structure.
|
| 19 |
+
# It is structured as: { 'timestamp1': {'regionA': {...}, 'regionB': {...}}, 'timestamp2': ... }
|
| 20 |
+
master_data = {}
|
| 21 |
+
|
| 22 |
+
for file_path in all_files:
|
| 23 |
+
try:
|
| 24 |
+
# Extract the region name from the file path.
|
| 25 |
+
# e.g., 'Bayreuth-TenneT' is the 3rd last part of the path.
|
| 26 |
+
path_parts = os.path.normpath(file_path).split(os.sep)
|
| 27 |
+
region = path_parts[-3]
|
| 28 |
+
|
| 29 |
+
# The data in each file is a dictionary like {'20160101': {...}, '20160102': {...}}
|
| 30 |
+
with open(file_path, 'r', encoding='utf-8') as f:
|
| 31 |
+
region_data = json.load(f)
|
| 32 |
+
|
| 33 |
+
# Iterate through all (timestamp, report_details) pairs in the current file
|
| 34 |
+
for timestamp, report_details in region_data.items():
|
| 35 |
+
timestamp_entry = master_data.setdefault(timestamp, {})
|
| 36 |
+
# Under the entry for this timestamp, add the report details keyed by the region name.
|
| 37 |
+
timestamp_entry[region] = report_details
|
| 38 |
+
|
| 39 |
+
print(f" - Processed and integrated: {region}")
|
| 40 |
+
|
| 41 |
+
except Exception as e:
|
| 42 |
+
print(f"An error occurred while processing file {file_path}: {e}")
|
| 43 |
+
|
| 44 |
+
# Write the consolidated data to a new JSON file
|
| 45 |
+
if master_data:
|
| 46 |
+
print("\nWriting all data to the final file...")
|
| 47 |
+
|
| 48 |
+
# Sort the master dictionary by timestamp (key) for better readability
|
| 49 |
+
sorted_master_data = dict(sorted(master_data.items()))
|
| 50 |
+
|
| 51 |
+
with open(output_filename, 'w', encoding='utf-8') as f:
|
| 52 |
+
json.dump(sorted_master_data, f, indent=4, ensure_ascii=False)
|
| 53 |
+
|
| 54 |
+
print(f"Merge complete!")
|
| 55 |
+
print(f"All data has been aggregated by timestamp into: {output_filename}")
|
| 56 |
+
print(f"The final file contains {len(master_data)} unique timestamps.")
|
| 57 |
+
else:
|
| 58 |
+
print("No files were processed successfully. The output file was not generated.")
|
scripts/split_abandoned.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
static_info_embeddings.pkl
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 97059
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b5035104782b5887cbb1e8a0b14506da5a0ad40d4f49193c9b1edae700f2adcc
|
| 3 |
size 97059
|
static_info_embeddings_short_new.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:035999dbfb730295d353bfe27f6132bfd5bc8f0ef0e85928c6501ba7b6ffee3c
|
| 3 |
+
size 13283
|
static_info_short_new.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"general_info": "This dataset contains Average Speed of a Vehicle Traveled Between End Points data in km/h collected from various locations in New York City by sensors. The sampling rate is every 5 minutes. When no car is detected in the period, the speed is set to 0.", "downtime_prompt": "The sensor is down for unknown reasons, readings set to 0. ", "channel_info": {"159": "Sensor 159 is located at Bronx, with segment of BRP N WATSON AVENUE - FORDHAM ROAD.", "351": "Sensor 351 is located at Staten Island, with segment of MLK S - SIE W WALKER STREET - RICHMOND AVENUE.", "150": "Sensor 150 is located at Manhattan, with segment of BQE N Atlantic Ave - MAN Bridge Manhattan Side.", "364": "Sensor 364 is located at Manhattan, with segment of QMT E Manhattan Side - Toll Plaza.", "195": "Sensor 195 is located at Bronx, with segment of CBE W U/LEV AMSTERDAM AVE - I95 S EXP LNS.", "199": "Sensor 199 is located at Queens, with segment of CIP N Hempstead Tpk - LIE.", "406": "Sensor 406 is located at Queens, with segment of TNB S Toll Plaza - Queens Anchorage.", "402": "Sensor 402 is located at Queens, with segment of TNB N Queens Anchorage - Toll Plaza.", "164": "Sensor 164 is located at Queens, with segment of BWB N Queens Anchorage - Toll Plaza.", "141": "Sensor 141 is located at Queens, with segment of BE S TBB EXIT RAMP - QUEENS ANCHORAGE."}}
|
time_series_short_new/id_141.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c12177e53843d6e719a53aca1d20d2e97698c47c94eeaaff3779a858b12bef23
|
| 3 |
+
size 4124668
|
time_series_short_new/id_150.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7b2a6bf47398276993c91943f23cb52cd1449b882d0d01908eda73951ea76fd8
|
| 3 |
+
size 4277041
|
time_series_short_new/id_159.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5e73e26d126bfb491d9fc58794d6291babc96a89beee6cd58a47fd04725279b2
|
| 3 |
+
size 4100374
|
time_series_short_new/id_164.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f8038cab399d7b7c7d8816dccafa735553316dbd204ab7b7a2030878c6eac367
|
| 3 |
+
size 4072800
|
time_series_short_new/id_195.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:14eb2b3d6a96ca9427c292e851fc9b751e8d706fa4fc08e8250c0d8b4c495330
|
| 3 |
+
size 4165798
|
time_series_short_new/id_199.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ce1ad95935b69adc837c82057548d056931a5cd010365039b7ae8ffc41d6a689
|
| 3 |
+
size 4276394
|
time_series_short_new/id_351.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:aa2358cb250053401e5ec3040865b1032de267777891989e93d72deb0a82f30d
|
| 3 |
+
size 4070889
|
time_series_short_new/id_364.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cf4dbeb4555334b6372659c3db974bad9c0482e08617018f112cf5a8cf91d201
|
| 3 |
+
size 4101331
|
time_series_short_new/id_402.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ce1d0efa0b6e222ab699fce4c0808d93e4b890406e2c2fa6ec18dca507ffde45
|
| 3 |
+
size 4027501
|
time_series_short_new/id_406.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c8c22d5de234843c5cc40a1dd500de501b8acadb4bb5e90fb28a006dc271de74
|
| 3 |
+
size 4167903
|
time_series_short_new/id_info_imputed_short_new.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
weather/merged_general_report/merged_general_weather_report.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b5acb2c070d736815cf0fa9e9c1fec4d8a41f1529e6434f892c1af40c3153d9a
|
| 3 |
+
size 12828355
|
weather/merged_report_embedding/static_info_embeddings.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b5035104782b5887cbb1e8a0b14506da5a0ad40d4f49193c9b1edae700f2adcc
|
| 3 |
+
size 97059
|
weather/merged_report_embedding/static_info_embeddings_short_new.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:035999dbfb730295d353bfe27f6132bfd5bc8f0ef0e85928c6501ba7b6ffee3c
|
| 3 |
+
size 13283
|