Xu Zhijian commited on
Commit
213da60
·
1 Parent(s): a00744f

update: merged general report

Browse files
README.md CHANGED
@@ -14,7 +14,8 @@ size_categories:
14
 
15
  ## Data source:
16
 
17
- - [California ISO](https://www.caiso.com/todays-outlook)
 
18
 
19
  ## Dataset Structure
20
 
@@ -25,8 +26,8 @@ The dataset is organized into the following structure:
25
 
26
  | |-- raw_data # Original data files
27
  | |-- time_series # Rule-based Imputed data files
28
- | | |-- id_1.parquet # Time series data for each subject can be multivariate, can be in csv, parquet, etc.
29
- | | |-- id_2.parquet
30
  | | |-- ...
31
  | | |-- id_info.json # Metadata for each subject
32
 
@@ -41,13 +42,13 @@ The dataset is organized into the following structure:
41
  | | | | |-- ...
42
  | | | |-- weather_report (can be flattened and use regex to extract the version)
43
  | | | | |-- version_1
44
- | | | | | |-- weather_report_????.json
45
  | | | | | |-- ...
46
  | | | | |-- version_2
47
  | | | | |-- ...
48
  | | | |-- report_embedding # embedding for the weather report
49
  | | | | |-- version_1
50
- | | | | | |-- report_embedding_????.pkl
51
  | | | | | |-- ...
52
  | | | | |-- version_2
53
  | | | | |-- ...
@@ -56,10 +57,11 @@ The dataset is organized into the following structure:
56
  | | |-- ...
57
 
58
  | | |-- merged_report_embedding # merged embedding for multiple needed locations (optional)
59
- | | | |-- version_1
60
- | | | | |-- report_embedding_????.pkl
61
- | | | | |-- ...
62
- | | | |-- version_2
 
63
  | | | |-- ...
64
 
65
  | |-- scripts # Scripts for data processing, model training, and evaluation
 
14
 
15
  ## Data source:
16
 
17
+ - [California ISO]
18
+ - https://www.caiso.com/todays-outlook
19
 
20
  ## Dataset Structure
21
 
 
26
 
27
  | |-- raw_data # Original data files
28
  | |-- time_series # Rule-based Imputed data files
29
+ | | |-- all_version_1.parquet # Time series data for each subject can be multivariate, can be in csv, parquet, etc.
30
+ | | |-- all_version_2.parquet
31
  | | |-- ...
32
  | | |-- id_info.json # Metadata for each subject
33
 
 
42
  | | | | |-- ...
43
  | | | |-- weather_report (can be flattened and use regex to extract the version)
44
  | | | | |-- version_1
45
+ | | | | | |-- xxx_weather_report_????.json
46
  | | | | | |-- ...
47
  | | | | |-- version_2
48
  | | | | |-- ...
49
  | | | |-- report_embedding # embedding for the weather report
50
  | | | | |-- version_1
51
+ | | | | | |-- xxx_report_embedding_????.pkl
52
  | | | | | |-- ...
53
  | | | | |-- version_2
54
  | | | | |-- ...
 
57
  | | |-- ...
58
 
59
  | | |-- merged_report_embedding # merged embedding for multiple needed locations (optional)
60
+ | | | |-- xxx_embeddings_????.pkl
61
+ | | | |-- ...
62
+
63
+ | | |-- merged_general_report # merged general report for multiple needed locations (optional)
64
+ | | | |-- xxx_report.json
65
  | | | |-- ...
66
 
67
  | |-- scripts # Scripts for data processing, model training, and evaluation
scripts/merge_general_report.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import glob
4
+
5
+ # Configuration for paths and filenames
6
+ base_directory = 'weather'
7
+ output_filename = 'merged_general_weather_forecast.json'
8
+
9
+ # Define a precise search pattern to find the relevant JSON files
10
+ report_type_folder = 'general_report'
11
+ file_prefix = 'general_weather_forecast'
12
+ file_pattern = os.path.join(base_directory, '*', 'weather_report', report_type_folder, f'{file_prefix}_*.json')
13
+
14
+ # Find all file paths that match the specified pattern
15
+ all_files = glob.glob(file_pattern)
16
+
17
+ if not all_files:
18
+ print(f"No files found for pattern '{file_pattern}'. Please check the path and script location.")
19
+ else:
20
+ print(f"Found {len(all_files)} files to merge. Starting process...")
21
+
22
+ # The master dictionary will hold the final nested structure.
23
+ # It is structured as: { 'timestamp1': {'regionA': {...}, 'regionB': {...}}, 'timestamp2': ... }
24
+ master_data = {}
25
+
26
+ for file_path in all_files:
27
+ try:
28
+ # Extract the region name from the file path.
29
+ # e.g., 'Bayreuth-TenneT' is the 3rd last part of the path.
30
+ path_parts = os.path.normpath(file_path).split(os.sep)
31
+ region = path_parts[-3]
32
+
33
+ # The data in each file is a dictionary like {'20160101': {...}, '20160102': {...}}
34
+ with open(file_path, 'r', encoding='utf-8') as f:
35
+ region_data = json.load(f)
36
+
37
+ # Iterate through all (timestamp, report_details) pairs in the current file
38
+ for timestamp, report_details in region_data.items():
39
+ timestamp_entry = master_data.setdefault(timestamp, {})
40
+ # Under the entry for this timestamp, add the report details keyed by the region name.
41
+ timestamp_entry[region] = report_details
42
+
43
+ print(f" - Processed and integrated: {region}")
44
+
45
+ except Exception as e:
46
+ print(f"An error occurred while processing file {file_path}: {e}")
47
+
48
+ # Write the consolidated data to a new JSON file
49
+ if master_data:
50
+ print("\nWriting all data to the final file...")
51
+
52
+ # Sort the master dictionary by timestamp (key) for better readability
53
+ sorted_master_data = dict(sorted(master_data.items()))
54
+
55
+ with open(output_filename, 'w', encoding='utf-8') as f:
56
+ json.dump(sorted_master_data, f, indent=4, ensure_ascii=False)
57
+
58
+ print(f"Merge complete!")
59
+ print(f"All data has been aggregated by timestamp into: {output_filename}")
60
+ print(f"The final file contains {len(master_data)} unique timestamps.")
61
+ else:
62
+ print("No files were processed successfully. The output file was not generated.")
weather/merged_general_report/merged_general_weather_forecast.json ADDED
The diff for this file is too large to render. See raw diff