Xu Zhijian commited on
Commit
f634eb1
·
1 Parent(s): 19ec435

Add: data during 2024-2025; weather report; re-embedding

Browse files
id_info.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "weather_2014-18": {
3
  "sensor_downtime": {}
4
  }
5
 
 
1
  {
2
+ "weather_large": {
3
  "sensor_downtime": {}
4
  }
5
 
weather_2014-18.parquet → raw_data/weather_2014-18.parquet RENAMED
File without changes
weather_2019-23.parquet → raw_data/weather_2019-22.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c2c1917d5a5db6c5009ad4c22b29245bd423a8b9c1102f0e5ea37f78eb8d7f9c
3
- size 8155570
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e6f42740a21cd6c6415b1a279db4f6a53f4f367235450e41e0d0292b4d3f663
3
+ size 8903879
embeddings.pkl → raw_data/weather_2023-25.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0baba5a83e687c4f24763a4f71ac03c6ab379ac6d040ef365c67e707a70775d7
3
- size 78848178
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8da4d39f5892c4074bfec126116d1292b3cbfc58d16871fff93049cb6ecb3c98
3
+ size 5924749
scripts/embed.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ import json
4
+ import glob
5
+ import joblib
6
+ import os
7
+ import torch
8
+ import numpy as np # Required for array operations
9
+ from transformers import AutoModel
10
+
11
+ # Set proxy (replace with your proxy address and port)
12
+ # os.environ['HTTP_PROXY'] = 'http://localhost:1080'
13
+ # os.environ['HTTPS_PROXY'] = 'http://localhost:1080'
14
+
15
+ # --- Configuration ---
16
+ # Assuming your data directory structure is as follows:
17
+ # ./data/
18
+ # ├── san-francisco/
19
+ # │ ├── fast_general_..._forecast_2017.json
20
+ # │ └── ...
21
+ # ├── san-diego/
22
+ # │ ├── fast_general_..._forecast_2017.json
23
+ # │ └── ...
24
+ # └── id_info_imputed.json
25
+
26
+ DATA_DIR = "./weather/weather_report" # Your main data directory
27
+ EMBEDDING_MODEL = "jinaai/jina-embeddings-v3"
28
+ TRUNCATE_DIM = 256
29
+ BATCH_SIZE = 1500 # Adjust based on your hardware
30
+
31
+ # --- New Configuration: Define the cities ---
32
+ BOROUGHS = ['formal_report']
33
+ BASE_BOROUGH = 'formal_report' # We will use this city's files as the baseline for finding corresponding files
34
+
35
+ def initialize_model():
36
+ """
37
+ Initializes and loads the embedding model, prioritizing GPU usage.
38
+ """
39
+ print("Initializing embedding model...")
40
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
41
+ print(f"Using device: {device}")
42
+
43
+ model = AutoModel.from_pretrained(
44
+ EMBEDDING_MODEL,
45
+ trust_remote_code=True
46
+ ).to(device)
47
+
48
+ print("Model loaded successfully.")
49
+ return model
50
+
51
+ def process_dynamic_data_for_borough(model, borough):
52
+ """
53
+ Processes all dynamic forecast JSON files for a specific city,
54
+ generating and saving their embeddings.
55
+ """
56
+ print(f"\n--- Starting processing for city: {borough} ---")
57
+ borough_path = os.path.join(DATA_DIR, borough)
58
+ json_files = glob.glob(os.path.join(borough_path, "wm_messages_??.json"))
59
+
60
+ if not json_files:
61
+ print(f"No dynamic forecast JSON files found for {borough}. Skipping.")
62
+ return
63
+
64
+ for file_path in json_files:
65
+ print(f"\nProcessing file: {file_path}")
66
+ with open(file_path, "r") as f:
67
+ data = json.load(f)
68
+
69
+ timestamps = list(data.keys())
70
+ if not timestamps:
71
+ print(f"File {file_path} is empty. Skipping.")
72
+ continue
73
+ print(f"Loaded {len(data)} records from {file_path}")
74
+
75
+ emb_dict = {}
76
+ num_batches = (len(timestamps) + BATCH_SIZE - 1) // BATCH_SIZE
77
+ for i in range(0, len(timestamps), BATCH_SIZE):
78
+ batch_timestamps = timestamps[i:i + BATCH_SIZE]
79
+ print(f" Processing batch {i // BATCH_SIZE + 1}/{num_batches}")
80
+
81
+ batch_texts, len_list = [], [0]
82
+ for ts in batch_timestamps:
83
+ # Preprocess text uniformly, adding city information
84
+ texts = [f'{borough.replace("-", " ").title()}: {text}' for text in data[ts].values()]
85
+ batch_texts.extend(texts)
86
+ len_list.append(len(batch_texts))
87
+
88
+ embeddings = model.encode(batch_texts, truncate_dim=TRUNCATE_DIM)
89
+
90
+ for j, ts in enumerate(batch_timestamps):
91
+ start_idx, end_idx = len_list[j], len_list[j+1]
92
+ emb_dict[ts] = embeddings[start_idx:end_idx, :]
93
+
94
+ output_file_name = os.path.basename(file_path).replace("forecast", "embeddings").replace(".json", ".pkl")
95
+ output_path = os.path.join(borough_path, output_file_name)
96
+
97
+ with open(output_path, "wb") as f:
98
+ joblib.dump(emb_dict, f)
99
+ print(f"Saved city-specific embeddings to {output_path}")
100
+
101
+ def merge_borough_embeddings():
102
+ """
103
+ Merges the embedding files from all specified cities.
104
+ """
105
+ print("\n--- Starting merging of city embeddings ---")
106
+ base_path = os.path.join(DATA_DIR, 'weather', BASE_BOROUGH)
107
+ base_embedding_files = glob.glob(os.path.join(base_path, "fast_general_*_embeddings_*.pkl"))
108
+
109
+ if not base_embedding_files:
110
+ print("No base embedding files found to merge. Skipping.")
111
+ return
112
+
113
+ for base_file in base_embedding_files:
114
+ print(f"\nMerging based on: {base_file}")
115
+
116
+ # Load the corresponding files for all cities
117
+ data_files = {}
118
+ try:
119
+ for borough in BOROUGHS:
120
+ target_file = base_file.replace(BASE_BOROUGH, borough)
121
+ with open(target_file, "rb") as f:
122
+ data_files[borough] = joblib.load(f)
123
+ except FileNotFoundError as e:
124
+ print(f"Could not find a corresponding file for {e.filename}. Skipping this year.")
125
+ continue
126
+
127
+ merged_embeddings = {}
128
+ # Iterate through the timestamps of the base file
129
+ for timestamp in data_files[BASE_BOROUGH].keys():
130
+ embeddings_to_merge = []
131
+ # Ensure each city has data for this timestamp
132
+ if all(timestamp in data_files[b] for b in BOROUGHS):
133
+ for borough in BOROUGHS:
134
+ embeddings_to_merge.append(data_files[borough][timestamp])
135
+
136
+ # Concatenate the embeddings using numpy
137
+ merged_embeddings[timestamp] = np.concatenate(embeddings_to_merge, axis=0)
138
+
139
+ output_filename = os.path.basename(base_file)
140
+ final_output_path = os.path.join(DATA_DIR, 'weather', output_filename)
141
+ joblib.dump(merged_embeddings, final_output_path)
142
+ print(f"Saved final merged embeddings to {final_output_path}")
143
+
144
+ def process_static_data(model):
145
+ """
146
+ Processes the static info JSON file to generate and save embeddings.
147
+ """
148
+ print("\n--- Starting processing of static info data ---")
149
+ static_info_path = os.path.join(DATA_DIR, "expanded_impute_data", "id_info_imputed.json")
150
+
151
+ if not os.path.exists(static_info_path):
152
+ print(f"Static info source file not found at {static_info_path}. Skipping.")
153
+ return
154
+
155
+ with open(static_info_path, "r") as f:
156
+ id_info = json.load(f)
157
+
158
+ # 1. Create a dictionary for the text information
159
+ static_info_text = {}
160
+ static_info_text['general_info'] = 'This dataset contains Average Speed of a Vehicle Traveled Between End Points data in km/h collected from various locations in New York City by sensors. The sampling rate is every 5 minutes. When no car is detected in the period, the speed is set to 0.'
161
+ static_info_text['downtime_prompt'] = "The sensor is down for unknown reasons, readings set to 0. "
162
+ static_info_text['channel_info'] = {
163
+ ch: f"Sensor {ch} is located at {info['borough']}, with segment of {info['link']}."
164
+ for ch, info in id_info.items()
165
+ }
166
+ # Save the text version
167
+ text_output_path = os.path.join(DATA_DIR, 'weather', "static_info.json")
168
+ with open(text_output_path, 'w') as f:
169
+ json.dump(static_info_text, f, indent=2)
170
+ print(f"Saved static info text to {text_output_path}")
171
+
172
+ # 2. Collect all texts for embedding
173
+ channels = list(static_info_text['channel_info'].keys())
174
+ texts_to_embed = (
175
+ [static_info_text['general_info'], static_info_text['downtime_prompt']] +
176
+ [static_info_text['channel_info'][ch] for ch in channels]
177
+ )
178
+
179
+ print(f"Embedding {len(texts_to_embed)} static text entries...")
180
+ embeddings = model.encode(texts_to_embed, truncate_dim=TRUNCATE_DIM)
181
+
182
+ # 3. Create a new dictionary to store the embeddings
183
+ static_info_embeddings = {}
184
+ static_info_embeddings['general_info'] = embeddings[0:1, :]
185
+ static_info_embeddings['downtime_prompt'] = embeddings[1:2, :]
186
+ static_info_embeddings['channel_info'] = {
187
+ ch: embeddings[i+2:i+3, :]
188
+ for i, ch in enumerate(channels)
189
+ }
190
+
191
+ # 4. Save the dictionary containing the embeddings
192
+ output_path = os.path.join(DATA_DIR, 'weather', "static_info_embeddings.pkl")
193
+ joblib.dump(static_info_embeddings, output_path)
194
+ print(f"Saved static info embeddings to {output_path}")
195
+
196
+ def main():
197
+ """
198
+ Main execution function
199
+ """
200
+ model = initialize_model()
201
+
202
+ # 1. Generate embeddings for each city individually
203
+ for borough in BOROUGHS:
204
+ process_dynamic_data_for_borough(model, borough)
205
+
206
+ # 2. Merge the embeddings from all cities
207
+ merge_borough_embeddings()
208
+
209
+ # 3. Process the static information
210
+ process_static_data(model)
211
+
212
+ print("\nAll processing complete.")
213
+
214
+ if __name__ == "__main__":
215
+ main()
scripts/embed_static.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import joblib
3
+ import torch
4
+ import os
5
+ from transformers import AutoModel
6
+
7
+ # Set proxy (replace with your proxy address and port)
8
+ os.environ['HTTP_PROXY'] = 'http://localhost:1080'
9
+ os.environ['HTTPS_PROXY'] = 'http://localhost:1080'
10
+
11
+ def create_static_embeddings(
12
+ input_path="./data/Jena_Atmospheric_Physics/static_info.json",
13
+ output_path="./data/Jena_Atmospheric_Physics/static_info_embeddings.pkl",
14
+ ):
15
+ """
16
+ Loads static information from a JSON file, generates embeddings for the text fields,
17
+ and saves the result as a pickle file.
18
+
19
+ Args:
20
+ input_path (str): Path to the input static_info.json file.
21
+ output_path (str): Path to save the output .pkl file with embeddings.
22
+ """
23
+ # --- 1. Sanity Checks ---
24
+ if not os.path.exists(input_path):
25
+ print(f"Error: Input file not found at '{input_path}'")
26
+ # Create a dummy file for demonstration if it doesn't exist
27
+ print("Creating a dummy 'static_info.json' for demonstration purposes.")
28
+ dummy_data = {
29
+ "general_info": "This dataset contains the solar power generation data of 16 solar panels in Calgary, Alberta, Canada. The data is collected hourly. ",
30
+ "downtime_prompt": "The system is shutdown, thus no power generation.",
31
+ "channel_info": {
32
+ "CFH_HQ": "The solar panel is located at Calgary Fire Hall Headquarters.",
33
+ "WMSC": "The solar panel is located at Whitehorn Multi-Service Centre.",
34
+ "SLC": "The solar panel is located at Southland Leisure Centre."
35
+ }
36
+ }
37
+ os.makedirs(os.path.dirname(input_path), exist_ok=True)
38
+ with open(input_path, 'w') as f:
39
+ json.dump(dummy_data, f, indent=2)
40
+ print(f"Dummy file created at '{input_path}'. Please run the script again.")
41
+ return
42
+
43
+ # --- 2. Initialize Model ---
44
+ print("Initializing embedding model...")
45
+ # Set up device (use GPU if available)
46
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
47
+ print(f"Using device: {device}")
48
+
49
+ # Load the pre-trained model
50
+ model = AutoModel.from_pretrained(
51
+ "jinaai/jina-embeddings-v3",
52
+ trust_remote_code=True
53
+ ).to(device=device)
54
+
55
+ # --- 3. Load and Prepare Data ---
56
+ print(f"Loading data from '{input_path}'...")
57
+ with open(input_path, "r") as f:
58
+ static_info = json.load(f)
59
+
60
+ # Extract all text pieces to be embedded into a single list
61
+ channels = list(static_info['channel_info'].keys())
62
+ texts_to_embed = (
63
+ [static_info['general_info'], static_info['downtime_prompt']]
64
+ + [static_info['channel_info'][key] for key in channels]
65
+ )
66
+
67
+ print(f"Found {len(texts_to_embed)} text snippets to embed.")
68
+
69
+ # --- 4. Generate Embeddings ---
70
+ print("Generating embeddings...")
71
+ embeddings = model.encode(
72
+ texts_to_embed,
73
+ truncate_dim=256 # Truncate to 256 dimensions as in the notebook
74
+ )
75
+ print(f"Embeddings generated with shape: {embeddings.shape}")
76
+
77
+ # --- 5. Replace Text with Embeddings in the Dictionary ---
78
+ print("Replacing text data with embeddings...")
79
+ # The original static_info dictionary is modified in place
80
+ static_info['general_info'] = embeddings[0:1, :]
81
+ static_info['downtime_prompt'] = embeddings[1:2, :]
82
+
83
+ for i, key in enumerate(channels):
84
+ # The slice [i+2:i+3, :] keeps the result as a 2D array (1, 256)
85
+ static_info['channel_info'][key] = embeddings[i+2:i+3, :]
86
+
87
+ # --- 6. Save the Result ---
88
+ # Ensure the output directory exists
89
+ output_dir = os.path.dirname(output_path)
90
+ if output_dir:
91
+ os.makedirs(output_dir, exist_ok=True)
92
+
93
+ print(f"Saving embeddings to '{output_path}'...")
94
+ with open(output_path, "wb") as f:
95
+ joblib.dump(static_info, f)
96
+
97
+ print("Process completed successfully!")
98
+
99
+
100
+ if __name__ == "__main__":
101
+ create_static_embeddings()
static_info.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "general_info": "This dataset contains comprehensive weather and climate measurements recorded in Jena, Germany, spanning multiple years.",
3
+ "downtime_prompt": "No reading at this time for unknown reason, set 0 as default, ignore.",
4
+ "channel_info": {
5
+ "p (mbar)": "Atmospheric pressure measured in millibars. It indicates the weight of the air above the point of measurement.",
6
+ "T (degC)": "Temperature at the point of observation, measured in degrees Celsius.",
7
+ "Tpot (K)": "Potential temperature, given in Kelvin. This is the temperature that a parcel of air would have if it were brought adiabatically to a standard reference pressure, often used to compare temperatures at different pressures in a thermodynamically consistent way.",
8
+ "Tdew (degC)": "Dew point temperature in degrees Celsius. It's the temperature to which air must be cooled, at constant pressure and water vapor content, for saturation to occur. A lower dew point means dryer air.",
9
+ "rh (%)": "Relative humidity, expressed as a percentage. It measures the amount of moisture in the air relative to the maximum amount of moisture the air can hold at that temperature.",
10
+ "VPmax (mbar)": "Maximum vapor pressure, in millibars. It represents the maximum amount of moisture that the air can hold at a given temperature.",
11
+ "VPact (mbar)": "Actual vapor pressure, in millibars. It's the current amount of water vapor present in the air.",
12
+ "VPdef (mbar)": "Vapor pressure deficit, in millibars. The difference between the maximum vapor pressure and the actual vapor pressure; it indicates how much more moisture the air can hold before saturation.",
13
+ "sh (g/kg)": "Specific humidity, the mass of water vapor in a given mass of air, including the water vapor. It's measured in grams of water vapor per kilogram of air.",
14
+ "H2OC (mmol/mol)": "Water vapor concentration, expressed in millimoles of water per mole of air. It's another way to quantify the amount of moisture in the air.",
15
+ "rho (g/m³)": "Air density, measured in grams per cubic meter. It indicates the mass of air in a given volume and varies with temperature, pressure, and moisture content.",
16
+ "wv (m/s)": "Wind velocity, the speed of the wind measured in meters per second.",
17
+ "max. wv (m/s)": "Maximum wind velocity observed in the given time period, measured in meters per second.",
18
+ "wd (deg)": "Wind direction, in degrees from true north. This indicates the direction from which the wind is coming.",
19
+ "rain (mm)": "Rainfall amount, measured in millimeters. It indicates how much rain has fallen during the observation period.",
20
+ "raining (s)": "Duration of rainfall, measured in seconds. It specifies how long it has rained during the observation period.",
21
+ "SWDR (W/m²)": "Shortwave Downward Radiation, the amount of solar radiation reaching the ground, measured in watts per square meter.",
22
+ "PAR (μmol/m²/s)": "Photosynthetically Active Radiation, the amount of light available for photosynthesis, measured in micromoles of photons per square meter per second.",
23
+ "max. PAR (μmol/m²/s)": "Maximum Photosynthetically Active Radiation observed in the given time period, indicating the peak light availability for photosynthesis.",
24
+ "Tlog (degC)": "Likely a logged temperature measurement in degrees Celsius. It could be a specific type of temperature measurement or recording method used in the dataset.",
25
+ "CO2 (ppm)": "Carbon dioxide concentration in the air, measured in parts per million. It's a key greenhouse gas and indicator of air quality."
26
+ }
27
+ }
time_series/id_info.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "weather_large": {
3
+ "sensor_downtime": {}
4
+ }
5
+
6
+ }
weather_large.parquet → time_series/weather_large.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c709908bebb7e853059c25f9aa336999fbc991e3122312c048460b7e00e9dabe
3
- size 16863314
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b31c944dc04ffda92aaaba76473eb1d6db3e6055c6f4d070ce76c27014a1e3e
3
+ size 23421608
static_info_embeddings_new.pkl → weather/report_embedding/formal_report/static_info_embeddings.pkl RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bae20678402088d2b279d59f9270b02f862fa7a25d30215423c8211fd16e6a88
3
- size 38867
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84dbe5a639b337a2db7de4d5a743f8dead59f587658763a124e216d4f3aabb7e
3
+ size 25331
weather/report_embedding/formal_report/wm_messages_v1.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:356203ad37b282c541b547add40c1c5407b60da44363b08782cdcd9dc2603da1
3
+ size 121469122
weather/report_embedding/formal_report/wm_messages_v2.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c7faa27686c628b8a46cffe4f1d393767efb66f25d3000c4e1834e7885ed2ef
3
+ size 121469122
weather/report_embedding/formal_report/wm_messages_v3.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5740a96972f767e903beb14546d2c76a01da5a4e24eceb6cc4a01b9dc451c5ae
3
+ size 121469122
weather/weather_report/formal_report/wm_messages_v1.json ADDED
The diff for this file is too large to render. See raw diff
 
weather/weather_report/formal_report/wm_messages_v2.json ADDED
The diff for this file is too large to render. See raw diff
 
weather/weather_report/formal_report/wm_messages_v3.json ADDED
The diff for this file is too large to render. See raw diff