Datasets:
Add files using upload-large-folder tool
Browse files- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_10478.csv +9 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_1067.csv +10 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_11159.csv +31 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_11169.csv +15 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_11194.csv +26 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_1121.csv +21 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_11297.csv +30 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_11391.csv +32 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_11829.csv +10 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_11993.csv +31 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_1216.csv +32 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_12478.csv +27 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_12539.csv +26 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_12565.csv +26 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_13670.csv +14 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_13799.csv +9 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_13903.csv +14 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_14303.csv +23 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_14349.csv +19 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_15152.csv +25 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_15750.csv +29 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_16254.csv +19 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_16900.csv +8 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_17049.csv +17 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_17143.csv +12 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_18073.csv +18 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_18087.csv +28 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_2451.csv +14 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_3518.csv +9 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_4951.csv +13 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_5170.csv +32 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_5206.csv +11 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_5699.csv +17 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_6034.csv +14 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_6218.csv +26 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_6497.csv +27 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_6766.csv +9 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_7972.csv +24 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_863.csv +31 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_8720.csv +21 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_9059.csv +22 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_9176.csv +21 -0
- Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_9245.csv +10 -0
- README.md +235 -3
- process/eval.py +584 -0
- process/infer_eval_utils.py +142 -0
- process/infer_template.py +296 -0
- process/process_ETT.py +86 -0
- process/process_iNaturalist.py +65 -0
- process/requirements.txt +7 -0
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_10478.csv
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
0.7254
|
| 2 |
+
0.7488
|
| 3 |
+
0.8034
|
| 4 |
+
0.8658
|
| 5 |
+
0.9048
|
| 6 |
+
0.9204
|
| 7 |
+
0.936
|
| 8 |
+
0.9204
|
| 9 |
+
0.9126
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_1067.csv
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-0.1638
|
| 2 |
+
-0.1092
|
| 3 |
+
0.0156
|
| 4 |
+
0.1248
|
| 5 |
+
0.1872
|
| 6 |
+
0.2262
|
| 7 |
+
0.2496
|
| 8 |
+
0.2964
|
| 9 |
+
0.3198
|
| 10 |
+
0.3354
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_11159.csv
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-0.3276
|
| 2 |
+
-0.2652
|
| 3 |
+
-0.2496
|
| 4 |
+
-0.234
|
| 5 |
+
-0.234
|
| 6 |
+
-0.273
|
| 7 |
+
-0.3198
|
| 8 |
+
-0.3744
|
| 9 |
+
-0.4446
|
| 10 |
+
-0.507
|
| 11 |
+
-0.5382
|
| 12 |
+
-0.6084
|
| 13 |
+
-0.6162
|
| 14 |
+
-0.624
|
| 15 |
+
-0.6552
|
| 16 |
+
-0.702
|
| 17 |
+
-0.7956
|
| 18 |
+
-0.8502
|
| 19 |
+
-0.8502
|
| 20 |
+
-0.8268
|
| 21 |
+
-0.8424
|
| 22 |
+
-0.9282
|
| 23 |
+
-1.0062
|
| 24 |
+
-1.0374
|
| 25 |
+
-1.053
|
| 26 |
+
-1.0608
|
| 27 |
+
-1.0218
|
| 28 |
+
-0.9126
|
| 29 |
+
-0.7956
|
| 30 |
+
-0.702
|
| 31 |
+
-0.546
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_11169.csv
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
149.0
|
| 2 |
+
140.0
|
| 3 |
+
129.5
|
| 4 |
+
117.0
|
| 5 |
+
104.0
|
| 6 |
+
88.5
|
| 7 |
+
75.5
|
| 8 |
+
62.0
|
| 9 |
+
49.0
|
| 10 |
+
36.5
|
| 11 |
+
24.5
|
| 12 |
+
14.0
|
| 13 |
+
6.5
|
| 14 |
+
-2.0
|
| 15 |
+
-9.5
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_11194.csv
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
3.0
|
| 2 |
+
3.0
|
| 3 |
+
1.0
|
| 4 |
+
-1.0
|
| 5 |
+
-1.5
|
| 6 |
+
-3.5
|
| 7 |
+
-6.5
|
| 8 |
+
-5.5
|
| 9 |
+
-7.5
|
| 10 |
+
-8.0
|
| 11 |
+
-9.0
|
| 12 |
+
-10.5
|
| 13 |
+
-11.5
|
| 14 |
+
-13.5
|
| 15 |
+
-15.5
|
| 16 |
+
-17.0
|
| 17 |
+
-19.5
|
| 18 |
+
-21.5
|
| 19 |
+
-23.0
|
| 20 |
+
-24.0
|
| 21 |
+
-27.0
|
| 22 |
+
-29.0
|
| 23 |
+
-29.5
|
| 24 |
+
-31.0
|
| 25 |
+
-33.0
|
| 26 |
+
-34.0
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_1121.csv
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
32.38
|
| 2 |
+
28.97
|
| 3 |
+
24.5
|
| 4 |
+
19.34
|
| 5 |
+
14.75
|
| 6 |
+
10.97
|
| 7 |
+
9.28
|
| 8 |
+
8.88
|
| 9 |
+
10.5
|
| 10 |
+
13.91
|
| 11 |
+
16.5
|
| 12 |
+
20.44
|
| 13 |
+
23.91
|
| 14 |
+
25.81
|
| 15 |
+
26.0
|
| 16 |
+
26.5
|
| 17 |
+
24.91
|
| 18 |
+
22.97
|
| 19 |
+
21.94
|
| 20 |
+
20.03
|
| 21 |
+
18.44
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_11297.csv
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
35.47
|
| 2 |
+
34.38
|
| 3 |
+
33.78
|
| 4 |
+
31.88
|
| 5 |
+
31.34
|
| 6 |
+
30.19
|
| 7 |
+
30.88
|
| 8 |
+
29.56
|
| 9 |
+
30.53
|
| 10 |
+
31.19
|
| 11 |
+
31.44
|
| 12 |
+
31.75
|
| 13 |
+
31.88
|
| 14 |
+
33.56
|
| 15 |
+
34.5
|
| 16 |
+
36.25
|
| 17 |
+
39.0
|
| 18 |
+
40.31
|
| 19 |
+
40.75
|
| 20 |
+
41.16
|
| 21 |
+
41.03
|
| 22 |
+
40.34
|
| 23 |
+
40.34
|
| 24 |
+
41.31
|
| 25 |
+
42.5
|
| 26 |
+
43.62
|
| 27 |
+
43.84
|
| 28 |
+
43.66
|
| 29 |
+
42.66
|
| 30 |
+
41.59
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_11391.csv
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
0.4223
|
| 2 |
+
0.4203
|
| 3 |
+
0.4193
|
| 4 |
+
0.4193
|
| 5 |
+
0.4184
|
| 6 |
+
0.4184
|
| 7 |
+
0.4184
|
| 8 |
+
0.4184
|
| 9 |
+
0.4184
|
| 10 |
+
0.4184
|
| 11 |
+
0.4174
|
| 12 |
+
0.4164
|
| 13 |
+
0.4164
|
| 14 |
+
0.4154
|
| 15 |
+
0.4154
|
| 16 |
+
0.4164
|
| 17 |
+
0.4174
|
| 18 |
+
0.4174
|
| 19 |
+
0.4184
|
| 20 |
+
0.4184
|
| 21 |
+
0.4184
|
| 22 |
+
0.4184
|
| 23 |
+
0.4184
|
| 24 |
+
0.4174
|
| 25 |
+
0.4164
|
| 26 |
+
0.4164
|
| 27 |
+
0.4164
|
| 28 |
+
0.4184
|
| 29 |
+
0.4233
|
| 30 |
+
0.4301
|
| 31 |
+
0.4409
|
| 32 |
+
0.4555
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_11829.csv
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
26.22
|
| 2 |
+
26.81
|
| 3 |
+
28.56
|
| 4 |
+
27.84
|
| 5 |
+
27.53
|
| 6 |
+
27.16
|
| 7 |
+
25.31
|
| 8 |
+
25.88
|
| 9 |
+
25.31
|
| 10 |
+
26.31
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_11993.csv
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-35.5
|
| 2 |
+
-34.0
|
| 3 |
+
-31.0
|
| 4 |
+
-27.5
|
| 5 |
+
-25.5
|
| 6 |
+
-20.0
|
| 7 |
+
-14.0
|
| 8 |
+
-9.0
|
| 9 |
+
-3.5
|
| 10 |
+
-0.5
|
| 11 |
+
8.5
|
| 12 |
+
15.5
|
| 13 |
+
20.0
|
| 14 |
+
27.5
|
| 15 |
+
33.5
|
| 16 |
+
42.0
|
| 17 |
+
49.0
|
| 18 |
+
56.0
|
| 19 |
+
62.5
|
| 20 |
+
67.5
|
| 21 |
+
75.0
|
| 22 |
+
82.0
|
| 23 |
+
86.0
|
| 24 |
+
88.0
|
| 25 |
+
89.5
|
| 26 |
+
94.5
|
| 27 |
+
94.5
|
| 28 |
+
92.5
|
| 29 |
+
88.5
|
| 30 |
+
85.0
|
| 31 |
+
82.5
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_1216.csv
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
0.3978
|
| 2 |
+
0.39
|
| 3 |
+
0.3822
|
| 4 |
+
0.351
|
| 5 |
+
0.3276
|
| 6 |
+
0.2964
|
| 7 |
+
0.2652
|
| 8 |
+
0.2496
|
| 9 |
+
0.234
|
| 10 |
+
0.2262
|
| 11 |
+
0.2184
|
| 12 |
+
0.2184
|
| 13 |
+
0.2262
|
| 14 |
+
0.2418
|
| 15 |
+
0.2808
|
| 16 |
+
0.3276
|
| 17 |
+
0.3822
|
| 18 |
+
0.4758
|
| 19 |
+
0.5772
|
| 20 |
+
0.7332
|
| 21 |
+
0.9048
|
| 22 |
+
1.0998
|
| 23 |
+
1.3026
|
| 24 |
+
1.482
|
| 25 |
+
1.6068
|
| 26 |
+
1.677
|
| 27 |
+
1.7238
|
| 28 |
+
1.755
|
| 29 |
+
1.7238
|
| 30 |
+
1.6302
|
| 31 |
+
1.5288
|
| 32 |
+
1.4196
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_12478.csv
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-24.72
|
| 2 |
+
-23.09
|
| 3 |
+
-21.88
|
| 4 |
+
-21.78
|
| 5 |
+
-21.97
|
| 6 |
+
-23.22
|
| 7 |
+
-23.53
|
| 8 |
+
-23.84
|
| 9 |
+
-24.25
|
| 10 |
+
-25.41
|
| 11 |
+
-25.97
|
| 12 |
+
-27.44
|
| 13 |
+
-29.59
|
| 14 |
+
-31.16
|
| 15 |
+
-31.47
|
| 16 |
+
-33.28
|
| 17 |
+
-32.47
|
| 18 |
+
-30.53
|
| 19 |
+
-29.47
|
| 20 |
+
-27.34
|
| 21 |
+
-24.66
|
| 22 |
+
-23.28
|
| 23 |
+
-21.25
|
| 24 |
+
-20.31
|
| 25 |
+
-18.66
|
| 26 |
+
-16.78
|
| 27 |
+
-17.56
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_12539.csv
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
0.6608
|
| 2 |
+
0.6745
|
| 3 |
+
0.6862
|
| 4 |
+
0.694
|
| 5 |
+
0.7009
|
| 6 |
+
0.7048
|
| 7 |
+
0.7067
|
| 8 |
+
0.7067
|
| 9 |
+
0.7067
|
| 10 |
+
0.7048
|
| 11 |
+
0.7019
|
| 12 |
+
0.697
|
| 13 |
+
0.6891
|
| 14 |
+
0.6794
|
| 15 |
+
0.6667
|
| 16 |
+
0.652
|
| 17 |
+
0.6285
|
| 18 |
+
0.6012
|
| 19 |
+
0.5806
|
| 20 |
+
0.564
|
| 21 |
+
0.5464
|
| 22 |
+
0.5308
|
| 23 |
+
0.5152
|
| 24 |
+
0.5024
|
| 25 |
+
0.4897
|
| 26 |
+
0.48
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_12565.csv
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
0.5
|
| 2 |
+
1.0
|
| 3 |
+
2.0
|
| 4 |
+
3.5
|
| 5 |
+
4.0
|
| 6 |
+
5.0
|
| 7 |
+
5.5
|
| 8 |
+
7.0
|
| 9 |
+
7.0
|
| 10 |
+
8.5
|
| 11 |
+
11.5
|
| 12 |
+
11.5
|
| 13 |
+
15.5
|
| 14 |
+
19.5
|
| 15 |
+
22.0
|
| 16 |
+
25.0
|
| 17 |
+
30.0
|
| 18 |
+
35.5
|
| 19 |
+
39.5
|
| 20 |
+
43.0
|
| 21 |
+
45.5
|
| 22 |
+
47.0
|
| 23 |
+
50.0
|
| 24 |
+
50.0
|
| 25 |
+
49.0
|
| 26 |
+
48.5
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_13670.csv
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
0.4457
|
| 2 |
+
0.434
|
| 3 |
+
0.4233
|
| 4 |
+
0.4135
|
| 5 |
+
0.4057
|
| 6 |
+
0.3969
|
| 7 |
+
0.3871
|
| 8 |
+
0.3773
|
| 9 |
+
0.3685
|
| 10 |
+
0.3587
|
| 11 |
+
0.349
|
| 12 |
+
0.3392
|
| 13 |
+
0.3294
|
| 14 |
+
0.3196
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_13799.csv
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
0.3978
|
| 2 |
+
0.4524
|
| 3 |
+
0.4524
|
| 4 |
+
0.4602
|
| 5 |
+
0.4524
|
| 6 |
+
0.4524
|
| 7 |
+
0.4602
|
| 8 |
+
0.4602
|
| 9 |
+
0.468
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_13903.csv
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
36.0
|
| 2 |
+
22.5
|
| 3 |
+
10.5
|
| 4 |
+
-4.5
|
| 5 |
+
-13.5
|
| 6 |
+
-24.0
|
| 7 |
+
-33.5
|
| 8 |
+
-43.0
|
| 9 |
+
-50.0
|
| 10 |
+
-55.0
|
| 11 |
+
-61.0
|
| 12 |
+
-64.0
|
| 13 |
+
-67.5
|
| 14 |
+
-71.0
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_14303.csv
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
0.1716
|
| 2 |
+
0.1872
|
| 3 |
+
0.2262
|
| 4 |
+
0.2418
|
| 5 |
+
0.2574
|
| 6 |
+
0.2808
|
| 7 |
+
0.312
|
| 8 |
+
0.2886
|
| 9 |
+
0.2418
|
| 10 |
+
0.1716
|
| 11 |
+
0.0702
|
| 12 |
+
-0.039
|
| 13 |
+
-0.1248
|
| 14 |
+
-0.1326
|
| 15 |
+
-0.1014
|
| 16 |
+
-0.078
|
| 17 |
+
-0.1014
|
| 18 |
+
-0.1872
|
| 19 |
+
-0.273
|
| 20 |
+
-0.3276
|
| 21 |
+
-0.3432
|
| 22 |
+
-0.3432
|
| 23 |
+
-0.3198
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_14349.csv
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
17.91
|
| 2 |
+
20.28
|
| 3 |
+
24.81
|
| 4 |
+
27.5
|
| 5 |
+
31.5
|
| 6 |
+
36.44
|
| 7 |
+
40.31
|
| 8 |
+
43.56
|
| 9 |
+
44.28
|
| 10 |
+
45.06
|
| 11 |
+
44.19
|
| 12 |
+
41.03
|
| 13 |
+
38.0
|
| 14 |
+
34.78
|
| 15 |
+
32.06
|
| 16 |
+
30.34
|
| 17 |
+
28.75
|
| 18 |
+
28.75
|
| 19 |
+
29.97
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_15152.csv
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
0.819
|
| 2 |
+
0.7722
|
| 3 |
+
0.7332
|
| 4 |
+
0.7098
|
| 5 |
+
0.6786
|
| 6 |
+
0.6474
|
| 7 |
+
0.624
|
| 8 |
+
0.6084
|
| 9 |
+
0.6006
|
| 10 |
+
0.6084
|
| 11 |
+
0.6162
|
| 12 |
+
0.624
|
| 13 |
+
0.663
|
| 14 |
+
0.6864
|
| 15 |
+
0.6942
|
| 16 |
+
0.7176
|
| 17 |
+
0.741
|
| 18 |
+
0.78
|
| 19 |
+
0.8112
|
| 20 |
+
0.8424
|
| 21 |
+
0.8736
|
| 22 |
+
0.9204
|
| 23 |
+
0.9516
|
| 24 |
+
0.9906
|
| 25 |
+
1.0296
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_15750.csv
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
0.4751
|
| 2 |
+
0.4741
|
| 3 |
+
0.4731
|
| 4 |
+
0.4731
|
| 5 |
+
0.4721
|
| 6 |
+
0.4702
|
| 7 |
+
0.4672
|
| 8 |
+
0.4643
|
| 9 |
+
0.4614
|
| 10 |
+
0.4585
|
| 11 |
+
0.4545
|
| 12 |
+
0.4516
|
| 13 |
+
0.4477
|
| 14 |
+
0.4438
|
| 15 |
+
0.4389
|
| 16 |
+
0.434
|
| 17 |
+
0.4291
|
| 18 |
+
0.4252
|
| 19 |
+
0.4233
|
| 20 |
+
0.4213
|
| 21 |
+
0.4184
|
| 22 |
+
0.4174
|
| 23 |
+
0.4164
|
| 24 |
+
0.4145
|
| 25 |
+
0.4145
|
| 26 |
+
0.4145
|
| 27 |
+
0.4135
|
| 28 |
+
0.4125
|
| 29 |
+
0.4115
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_16254.csv
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
17.94
|
| 2 |
+
19.03
|
| 3 |
+
19.94
|
| 4 |
+
20.22
|
| 5 |
+
19.44
|
| 6 |
+
18.5
|
| 7 |
+
17.25
|
| 8 |
+
16.09
|
| 9 |
+
15.03
|
| 10 |
+
14.72
|
| 11 |
+
14.84
|
| 12 |
+
15.19
|
| 13 |
+
15.56
|
| 14 |
+
15.81
|
| 15 |
+
17.0
|
| 16 |
+
16.81
|
| 17 |
+
17.78
|
| 18 |
+
18.12
|
| 19 |
+
19.12
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_16900.csv
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-318.0
|
| 2 |
+
-303.5
|
| 3 |
+
-284.5
|
| 4 |
+
-264.5
|
| 5 |
+
-239.5
|
| 6 |
+
-212.5
|
| 7 |
+
-184.5
|
| 8 |
+
-153.5
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_17049.csv
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
20.19
|
| 2 |
+
23.53
|
| 3 |
+
27.97
|
| 4 |
+
30.62
|
| 5 |
+
31.25
|
| 6 |
+
30.66
|
| 7 |
+
27.16
|
| 8 |
+
22.75
|
| 9 |
+
18.5
|
| 10 |
+
16.81
|
| 11 |
+
16.28
|
| 12 |
+
16.12
|
| 13 |
+
17.25
|
| 14 |
+
18.66
|
| 15 |
+
17.66
|
| 16 |
+
16.69
|
| 17 |
+
14.66
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_17143.csv
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
16.16
|
| 2 |
+
16.91
|
| 3 |
+
16.62
|
| 4 |
+
16.41
|
| 5 |
+
16.31
|
| 6 |
+
16.41
|
| 7 |
+
14.53
|
| 8 |
+
15.94
|
| 9 |
+
16.03
|
| 10 |
+
16.06
|
| 11 |
+
17.0
|
| 12 |
+
17.5
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_18073.csv
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
35.28
|
| 2 |
+
34.25
|
| 3 |
+
32.81
|
| 4 |
+
32.5
|
| 5 |
+
30.78
|
| 6 |
+
29.81
|
| 7 |
+
26.5
|
| 8 |
+
24.72
|
| 9 |
+
22.66
|
| 10 |
+
21.69
|
| 11 |
+
21.03
|
| 12 |
+
21.41
|
| 13 |
+
22.28
|
| 14 |
+
22.34
|
| 15 |
+
20.81
|
| 16 |
+
19.78
|
| 17 |
+
16.62
|
| 18 |
+
13.97
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_18087.csv
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
0.69
|
| 2 |
+
0.47
|
| 3 |
+
1.31
|
| 4 |
+
1.41
|
| 5 |
+
3.44
|
| 6 |
+
5.28
|
| 7 |
+
7.19
|
| 8 |
+
9.25
|
| 9 |
+
10.31
|
| 10 |
+
11.12
|
| 11 |
+
12.0
|
| 12 |
+
13.16
|
| 13 |
+
13.5
|
| 14 |
+
14.12
|
| 15 |
+
15.72
|
| 16 |
+
16.25
|
| 17 |
+
15.59
|
| 18 |
+
16.06
|
| 19 |
+
15.69
|
| 20 |
+
14.25
|
| 21 |
+
14.25
|
| 22 |
+
13.31
|
| 23 |
+
13.31
|
| 24 |
+
13.59
|
| 25 |
+
13.12
|
| 26 |
+
13.03
|
| 27 |
+
13.03
|
| 28 |
+
13.97
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_2451.csv
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-0.5538
|
| 2 |
+
-0.5616
|
| 3 |
+
-0.5694
|
| 4 |
+
-0.585
|
| 5 |
+
-0.585
|
| 6 |
+
-0.5772
|
| 7 |
+
-0.5694
|
| 8 |
+
-0.5226
|
| 9 |
+
-0.4602
|
| 10 |
+
-0.39
|
| 11 |
+
-0.3354
|
| 12 |
+
-0.2886
|
| 13 |
+
-0.2652
|
| 14 |
+
-0.2028
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_3518.csv
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
36.5
|
| 2 |
+
28.0
|
| 3 |
+
17.5
|
| 4 |
+
8.0
|
| 5 |
+
-3.5
|
| 6 |
+
-12.5
|
| 7 |
+
-21.0
|
| 8 |
+
-27.5
|
| 9 |
+
-33.5
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_4951.csv
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
8.88
|
| 2 |
+
9.69
|
| 3 |
+
8.44
|
| 4 |
+
8.56
|
| 5 |
+
7.31
|
| 6 |
+
7.16
|
| 7 |
+
5.44
|
| 8 |
+
3.81
|
| 9 |
+
4.19
|
| 10 |
+
4.56
|
| 11 |
+
4.12
|
| 12 |
+
5.59
|
| 13 |
+
8.06
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_5170.csv
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
0.1872
|
| 2 |
+
0.1716
|
| 3 |
+
0.156
|
| 4 |
+
0.1404
|
| 5 |
+
0.1404
|
| 6 |
+
0.1248
|
| 7 |
+
0.1404
|
| 8 |
+
0.156
|
| 9 |
+
0.1638
|
| 10 |
+
0.1638
|
| 11 |
+
0.1638
|
| 12 |
+
0.1638
|
| 13 |
+
0.1716
|
| 14 |
+
0.1638
|
| 15 |
+
0.1716
|
| 16 |
+
0.2262
|
| 17 |
+
0.2886
|
| 18 |
+
0.3588
|
| 19 |
+
0.3588
|
| 20 |
+
0.3432
|
| 21 |
+
0.2964
|
| 22 |
+
0.2652
|
| 23 |
+
0.2496
|
| 24 |
+
0.2418
|
| 25 |
+
0.2418
|
| 26 |
+
0.273
|
| 27 |
+
0.2964
|
| 28 |
+
0.3276
|
| 29 |
+
0.3354
|
| 30 |
+
0.3822
|
| 31 |
+
0.3978
|
| 32 |
+
0.4212
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_5206.csv
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
22.56
|
| 2 |
+
23.84
|
| 3 |
+
22.91
|
| 4 |
+
21.88
|
| 5 |
+
19.75
|
| 6 |
+
18.91
|
| 7 |
+
17.25
|
| 8 |
+
16.22
|
| 9 |
+
17.06
|
| 10 |
+
17.56
|
| 11 |
+
18.59
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_5699.csv
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
0.6207
|
| 2 |
+
0.6031
|
| 3 |
+
0.5836
|
| 4 |
+
0.563
|
| 5 |
+
0.5415
|
| 6 |
+
0.52
|
| 7 |
+
0.4995
|
| 8 |
+
0.48
|
| 9 |
+
0.4604
|
| 10 |
+
0.4428
|
| 11 |
+
0.4262
|
| 12 |
+
0.4115
|
| 13 |
+
0.3998
|
| 14 |
+
0.3891
|
| 15 |
+
0.3803
|
| 16 |
+
0.3734
|
| 17 |
+
0.3675
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_6034.csv
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-14.0
|
| 2 |
+
-12.5
|
| 3 |
+
-12.0
|
| 4 |
+
-12.0
|
| 5 |
+
-10.5
|
| 6 |
+
-6.0
|
| 7 |
+
-3.0
|
| 8 |
+
-1.0
|
| 9 |
+
3.0
|
| 10 |
+
7.5
|
| 11 |
+
12.0
|
| 12 |
+
17.0
|
| 13 |
+
21.0
|
| 14 |
+
24.5
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_6218.csv
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
1.2714
|
| 2 |
+
1.1934
|
| 3 |
+
1.0842
|
| 4 |
+
0.9594
|
| 5 |
+
0.8424
|
| 6 |
+
0.741
|
| 7 |
+
0.6318
|
| 8 |
+
0.5226
|
| 9 |
+
0.4134
|
| 10 |
+
0.3276
|
| 11 |
+
0.2496
|
| 12 |
+
0.1716
|
| 13 |
+
0.117
|
| 14 |
+
0.0312
|
| 15 |
+
-0.0234
|
| 16 |
+
-0.1014
|
| 17 |
+
-0.1482
|
| 18 |
+
-0.1872
|
| 19 |
+
-0.2184
|
| 20 |
+
-0.2496
|
| 21 |
+
-0.273
|
| 22 |
+
-0.2964
|
| 23 |
+
-0.3276
|
| 24 |
+
-0.3588
|
| 25 |
+
-0.351
|
| 26 |
+
-0.3588
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_6497.csv
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
34.59
|
| 2 |
+
35.06
|
| 3 |
+
36.28
|
| 4 |
+
35.66
|
| 5 |
+
35.06
|
| 6 |
+
33.69
|
| 7 |
+
32.56
|
| 8 |
+
31.44
|
| 9 |
+
29.69
|
| 10 |
+
30.56
|
| 11 |
+
30.88
|
| 12 |
+
31.72
|
| 13 |
+
33.06
|
| 14 |
+
34.16
|
| 15 |
+
36.41
|
| 16 |
+
37.69
|
| 17 |
+
39.19
|
| 18 |
+
41.59
|
| 19 |
+
42.59
|
| 20 |
+
44.03
|
| 21 |
+
45.41
|
| 22 |
+
44.22
|
| 23 |
+
42.66
|
| 24 |
+
40.62
|
| 25 |
+
36.81
|
| 26 |
+
33.47
|
| 27 |
+
31.25
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_6766.csv
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
0.8658
|
| 2 |
+
0.858
|
| 3 |
+
0.8112
|
| 4 |
+
0.78
|
| 5 |
+
0.7566
|
| 6 |
+
0.7254
|
| 7 |
+
0.702
|
| 8 |
+
0.6786
|
| 9 |
+
0.6786
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_7972.csv
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-0.0234
|
| 2 |
+
-0.039
|
| 3 |
+
-0.039
|
| 4 |
+
-0.0312
|
| 5 |
+
-0.0468
|
| 6 |
+
-0.039
|
| 7 |
+
-0.0468
|
| 8 |
+
-0.0468
|
| 9 |
+
-0.0468
|
| 10 |
+
-0.0546
|
| 11 |
+
-0.0468
|
| 12 |
+
-0.039
|
| 13 |
+
-0.039
|
| 14 |
+
-0.0312
|
| 15 |
+
-0.0312
|
| 16 |
+
-0.0156
|
| 17 |
+
-0.0234
|
| 18 |
+
-0.0078
|
| 19 |
+
-0.0078
|
| 20 |
+
0.0
|
| 21 |
+
0.0
|
| 22 |
+
0.0078
|
| 23 |
+
0.0078
|
| 24 |
+
0.0234
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_863.csv
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
13.5
|
| 2 |
+
13.0
|
| 3 |
+
14.5
|
| 4 |
+
15.0
|
| 5 |
+
15.5
|
| 6 |
+
18.5
|
| 7 |
+
21.0
|
| 8 |
+
25.0
|
| 9 |
+
27.0
|
| 10 |
+
30.0
|
| 11 |
+
32.5
|
| 12 |
+
34.5
|
| 13 |
+
37.5
|
| 14 |
+
39.5
|
| 15 |
+
40.5
|
| 16 |
+
44.0
|
| 17 |
+
46.5
|
| 18 |
+
49.0
|
| 19 |
+
52.5
|
| 20 |
+
54.5
|
| 21 |
+
57.5
|
| 22 |
+
57.0
|
| 23 |
+
58.5
|
| 24 |
+
59.0
|
| 25 |
+
59.0
|
| 26 |
+
57.5
|
| 27 |
+
55.5
|
| 28 |
+
54.5
|
| 29 |
+
52.0
|
| 30 |
+
50.0
|
| 31 |
+
46.0
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_8720.csv
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
0.2262
|
| 2 |
+
0.2262
|
| 3 |
+
0.2262
|
| 4 |
+
0.2496
|
| 5 |
+
0.2652
|
| 6 |
+
0.2262
|
| 7 |
+
0.1872
|
| 8 |
+
0.1404
|
| 9 |
+
0.078
|
| 10 |
+
-0.0078
|
| 11 |
+
-0.0624
|
| 12 |
+
-0.0936
|
| 13 |
+
-0.0858
|
| 14 |
+
-0.0468
|
| 15 |
+
-0.0078
|
| 16 |
+
-0.0234
|
| 17 |
+
-0.0702
|
| 18 |
+
-0.117
|
| 19 |
+
-0.1482
|
| 20 |
+
-0.1404
|
| 21 |
+
-0.117
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_9059.csv
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-0.3978
|
| 2 |
+
-0.3042
|
| 3 |
+
-0.234
|
| 4 |
+
-0.156
|
| 5 |
+
-0.078
|
| 6 |
+
-0.0234
|
| 7 |
+
0.0156
|
| 8 |
+
0.0468
|
| 9 |
+
0.0624
|
| 10 |
+
0.078
|
| 11 |
+
0.0624
|
| 12 |
+
0.0468
|
| 13 |
+
0.0078
|
| 14 |
+
-0.0312
|
| 15 |
+
-0.0936
|
| 16 |
+
-0.1326
|
| 17 |
+
-0.1794
|
| 18 |
+
-0.2106
|
| 19 |
+
-0.2496
|
| 20 |
+
-0.273
|
| 21 |
+
-0.3432
|
| 22 |
+
-0.4446
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_9176.csv
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
0.5093
|
| 2 |
+
0.5054
|
| 3 |
+
0.5024
|
| 4 |
+
0.4976
|
| 5 |
+
0.4936
|
| 6 |
+
0.4888
|
| 7 |
+
0.4829
|
| 8 |
+
0.476
|
| 9 |
+
0.4682
|
| 10 |
+
0.4604
|
| 11 |
+
0.4536
|
| 12 |
+
0.4467
|
| 13 |
+
0.4409
|
| 14 |
+
0.436
|
| 15 |
+
0.4321
|
| 16 |
+
0.4281
|
| 17 |
+
0.4252
|
| 18 |
+
0.4223
|
| 19 |
+
0.4193
|
| 20 |
+
0.4164
|
| 21 |
+
0.4154
|
Physiology-TS_MQA-Physiological_signal-Forecasting/raw_gt_data/output_Health_forecasting_9245.csv
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
22.0
|
| 2 |
+
22.0
|
| 3 |
+
27.0
|
| 4 |
+
27.0
|
| 5 |
+
26.0
|
| 6 |
+
10.0
|
| 7 |
+
1.5
|
| 8 |
+
4.0
|
| 9 |
+
0.5
|
| 10 |
+
7.0
|
README.md
CHANGED
|
@@ -1,3 +1,235 @@
|
|
| 1 |
-
---
|
| 2 |
-
license: cc-by-nc-sa-4.0
|
| 3 |
-
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: cc-by-nc-sa-4.0
|
| 3 |
+
---
|
| 4 |
+
|
| 5 |
+
# SciTS: Scientific Time Series Understanding and Generation with LLMs
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
This repository contains the official dataset for [**SciTS: Scientific Time Series Understanding and Generation with LLMs** (ICLR 2026)](https://openreview.net/forum?id=5YXccEP6uc). SciTS is a large-scale benchmark designed to evaluate the capabilities of large language models on complex scientific time series data. It spans 12 scientific disciplines, 43 distinct tasks, and includes 54,023 instances.
|
| 9 |
+
|
| 10 |
+

|
| 11 |
+
|
| 12 |
+
## Dataset Structure
|
| 13 |
+
|
| 14 |
+
The benchmark is organized into a main `meta_data.jsonl` file, a `process` directory for handling restricted datasets, and 38 individual dataset folders. Each folder is named using the convention: `Domain-DatasetName-Scene-Task`.
|
| 15 |
+
|
| 16 |
+
```
|
| 17 |
+
├── process/
|
| 18 |
+
│ ├── process_ETT.py
|
| 19 |
+
│ ├── process_iNaturalist.py
|
| 20 |
+
│ ├── infer_template.py
|
| 21 |
+
│ ├── eval.py
|
| 22 |
+
│ └── requirements.txt
|
| 23 |
+
├── Domain-DatasetName-Scene-Task_1/
|
| 24 |
+
│ ├── raw_input_data/
|
| 25 |
+
│ └── raw_gt_data/ (for generation tasks)
|
| 26 |
+
├── Domain-DatasetName-Scene-Task_2/
|
| 27 |
+
│ └── raw_input_data/
|
| 28 |
+
...
|
| 29 |
+
├── Domain-DatasetName-Scene-Task_38/
|
| 30 |
+
│ ├── raw_input_data/
|
| 31 |
+
│ └── raw_gt_data/
|
| 32 |
+
└── meta_data.jsonl
|
| 33 |
+
```
|
| 34 |
+
|
| 35 |
+
- **`process/`**: Contains utility scripts, including `process_ETT.py` and `process_iNaturalist.py` for processing restricted datasets which cannot be released directly due to license restrictions, `infer_template.py` as an inference template, `eval.py` for evaluation, and `requirements.txt` for dependency installation.
|
| 36 |
+
- **`Dataset Folders`**: Each of the 38 folders contains the raw time series data for a specific dataset. `raw_input_data` holds the input signals, while `raw_gt_data` (present only for generation tasks) holds the ground truth output signals.
|
| 37 |
+
- **`meta_data.jsonl`**: A JSON Lines file containing metadata for every instance in the benchmark. Each line corresponds to one data sample.
|
| 38 |
+
|
| 39 |
+
### Dataset Collection
|
| 40 |
+
|
| 41 |
+
The 38 released datasets are listed below:
|
| 42 |
+
|
| 43 |
+
| Domain | Dataset Folder Name | Task ID |
|
| 44 |
+
| :--- | :--- | :--- |
|
| 45 |
+
| Astronomy | `Astronomy-GWOSC_GW_Event-Gravitational_wave-Anomaly_detection+Event_localisation` | ASU01, ASG02 |
|
| 46 |
+
| | `Astronomy-LEAVES-Light_curve-Classification` | ASU03 |
|
| 47 |
+
| Earth Science | `Earth_Science-STEAD-Earthquake-Anomaly_detection+Event_localisation` | EAU01, EAG02 |
|
| 48 |
+
| Bioacoustics | `Bioacoustics-Powdermill-Birds_vocalisation-Classification` | BIU01 |
|
| 49 |
+
| | `Bioacoustics-MarmAudio-Marmoset_vocalisation-Classification` | BIU03 |
|
| 50 |
+
| Meteorology | `Meteorology-TS_MQA-Weather-Anomaly_detection` | MEU01 |
|
| 51 |
+
| | `Meteorology-TIMECAP-Rainfall-Anomaly_detection` | MEU02 |
|
| 52 |
+
| | `Meteorology-MT_bench-Temperature-Forecasting` | MEG03 |
|
| 53 |
+
| | `Meteorology-MT_bench-Temperature-MCQ` | MEU04 |
|
| 54 |
+
| Economics | `Economics-FinMultiTime-Stock_closing_price-Forecasting` | ECG01 |
|
| 55 |
+
| | `Economics-MT_bench-Stock_price-Forecasting` | ECG02 |
|
| 56 |
+
| | `Economics-MT_bench-Stock-MCQ` | ECU03 |
|
| 57 |
+
| Neuroscience | `Neuroscience-MDD-Depressive_disorder-Anomaly_detection` | NEU01 |
|
| 58 |
+
| | `Neuroscience-TUEV-EEG_pattern-Classification` | NEU02 |
|
| 59 |
+
| | `Neuroscience-TS_MQA-EEG_signal-Forecasting` | NEG03 |
|
| 60 |
+
| | `Neuroscience-TS_MQA-EEG_signal-Imputation` | NEG04 |
|
| 61 |
+
| | `Neuroscience-WBCIC_SHU-Motor_imagery-Classification` | NEU05 |
|
| 62 |
+
| | `Neuroscience-Sleep-Sleep_staging-Classification` | NEU06 |
|
| 63 |
+
| Energy | `Energy-NewsForecast-Electronic_load-Forecasting` | ENG01 |
|
| 64 |
+
| | `Energy-TextETT-Sensor_signal_trend-Synthesis` | ENG03 |
|
| 65 |
+
| | `Energy-TS_MQA-Comprehensive_electricity-Forecasting` | ENG04 |
|
| 66 |
+
| | `Energy-TS_MQA-Comprehensive_electricity-Imputation` | ENG05 |
|
| 67 |
+
| Physiology | `Physiology-PTB_XL-ECG_status-Classification` | PHU01 |
|
| 68 |
+
| | `Physiology-TS_MQA-Physiological_signal-Forecasting` | PHG02 |
|
| 69 |
+
| | `Physiology-TS_MQA-Physiological_signal-Imputation` | PHG03 |
|
| 70 |
+
| | `Physiology-TS_MQA-ECG-Anomaly_detection` | PHU04 |
|
| 71 |
+
| | `Physiology-TS_MQA-Gait_freezing-Anomaly_detection` | PHU05 |
|
| 72 |
+
| | `Physiology-TS_MQA-Human_activity-Classification` | PHU06 |
|
| 73 |
+
| Urbanism | `Urbanism-NewsForecast-Traffic_flow-Forecasting` | URG01 |
|
| 74 |
+
| | `Urbanism-TS_MQA-Pedestrian_flow-Forecasting` | URG02 |
|
| 75 |
+
| | `Urbanism-TS_MQA-Pedestrian_flow-Imputation` | URG03 |
|
| 76 |
+
| | `Urbanism-TS_MQA-Traffic_flow-Anomaly_detection` | URU04 |
|
| 77 |
+
| | `Urbanism-MetroTraffic-Traffic_volume-Forecasting` | URG05 |
|
| 78 |
+
| Manufacturing | `Manufacturing-CWRU-Bearings_fault_location+Bearings_fault_size-Classification` | MFU01, MFU02 |
|
| 79 |
+
| | `Manufacturing-MIMII_Due-Machine_malfunction-Anomaly_detection` | MFU03 |
|
| 80 |
+
| Radar | `Radar-RadSeg-Coding_scheme-Classification` | RAU01 |
|
| 81 |
+
| | `Radar-RadarCom-Modes_and_modulation-Classification` | RAU02 |
|
| 82 |
+
| Math | `Math-Chaotic-Chaotic_system-Forecasting` | MAG01 |
|
| 83 |
+
|
| 84 |
+
## `meta_data.jsonl` Format
|
| 85 |
+
|
| 86 |
+
Each line in this file is a JSON object with the following structure, providing all necessary metadata to load and use a data sample.
|
| 87 |
+
|
| 88 |
+
```json
|
| 89 |
+
{
|
| 90 |
+
"task_id": ["TASK_ID"], // List of task IDs associated with this sample (e.g., ["ASU03"] or ["ASU01", "ASG02"] for merged datasets)
|
| 91 |
+
"id": "DATASET_ID", // Unique identifier of this sample within the dataset
|
| 92 |
+
"data_type": "csv"/"npy"/"wav"/"flac", // File format of the raw time series data
|
| 93 |
+
"input_ts":{
|
| 94 |
+
"num_channel": int, // Number of channels (dimensions) in the input signal
|
| 95 |
+
"channel_detail": [], // List of channel names, empty if none
|
| 96 |
+
"path": "raw_input_data/sample_001_input.npy",
|
| 97 |
+
"length": int, // Length of the input time series
|
| 98 |
+
"timestamps": [], // Auxiliary timestamp information, empty if none
|
| 99 |
+
"fs": int // Sampling frequency in Hz
|
| 100 |
+
},
|
| 101 |
+
"input_text": "INPUT_TEXT", // Textual prompt or task instruction provided as input
|
| 102 |
+
"gt_text": "GT_TEXT", // Ground truth textual answer (for understanding tasks; empty for generation tasks)
|
| 103 |
+
"gt_ts": {
|
| 104 |
+
"path": "raw_gt_data/sample_001_output.npy",
|
| 105 |
+
"length": int
|
| 106 |
+
},
|
| 107 |
+
"gt_result": { ... }, // Structured ground truth result; format varies by task type (see below)
|
| 108 |
+
"meta_data": {} // Additional metadata from the original data source
|
| 109 |
+
}
|
| 110 |
+
```
|
| 111 |
+
|
| 112 |
+
### `gt_result` Field Format
|
| 113 |
+
|
| 114 |
+
The structure of the `gt_result` field varies depending on the task type. This field provides the original ground truth for metric computation.
|
| 115 |
+
|
| 116 |
+
**1. MCQ**
|
| 117 |
+
```json
|
| 118 |
+
"gt_result": {
|
| 119 |
+
"answer": "TEXT" // The correct textual answer
|
| 120 |
+
}
|
| 121 |
+
```
|
| 122 |
+
|
| 123 |
+
**2. Synthesis, Forecasting, Imputation**
|
| 124 |
+
```json
|
| 125 |
+
"gt_result": {
|
| 126 |
+
"num_channel": int, // Number of channels (dimensions) in the ground truth signal
|
| 127 |
+
"channel_detail": [], // List of channel names, empty if none
|
| 128 |
+
"timestamps": [] // Auxiliary timestamp information, empty if none
|
| 129 |
+
}
|
| 130 |
+
```
|
| 131 |
+
|
| 132 |
+
**3. Classification**
|
| 133 |
+
|
| 134 |
+
For the `CWRU` dataset, which involves two classification sub-tasks, the category keys in class_list and gt_class are `"diameter"` and `"position"` respectively. For all other classification tasks, the category key is `"default"`.
|
| 135 |
+
|
| 136 |
+
```json
|
| 137 |
+
"gt_result": {
|
| 138 |
+
"class_list": {
|
| 139 |
+
"default": ["class_A", "class_B"], // List of candidate classes for each category
|
| 140 |
+
...
|
| 141 |
+
},
|
| 142 |
+
"gt_class": {
|
| 143 |
+
"default": ["GT_CLASS"], // Ground truth class label for each category
|
| 144 |
+
...
|
| 145 |
+
}
|
| 146 |
+
}
|
| 147 |
+
```
|
| 148 |
+
|
| 149 |
+
**4. Anomaly Detection**
|
| 150 |
+
```json
|
| 151 |
+
"gt_result": {
|
| 152 |
+
"contain": Boolean // Boolean indicating if the required event is present
|
| 153 |
+
}
|
| 154 |
+
```
|
| 155 |
+
|
| 156 |
+
**5. Anomaly Detection + Event Localisation**
|
| 157 |
+
|
| 158 |
+
For the `GWOSC GW Event` and `STEAD` datasets, each of which includes both an `Anomaly Detection` task and an `Event Localisation` task, the gt_result field is defined in the following combined format:
|
| 159 |
+
|
| 160 |
+
```json
|
| 161 |
+
"gt_result": {
|
| 162 |
+
"contain": Boolean, // Boolean indicating if the required event is present
|
| 163 |
+
"start_time": int // The event index if contain is true, else null
|
| 164 |
+
}
|
| 165 |
+
```
|
| 166 |
+
|
| 167 |
+
## Handling Restricted Datasets
|
| 168 |
+
|
| 169 |
+
Due to license restrictions, the **ETT** (`ENG02`) and **iNaturalist** (`BIU02`) datasets are not directly included in this repository. To use them, the user need to download the original data and run the provided processing scripts.
|
| 170 |
+
|
| 171 |
+
**Step 1: Download the Data**
|
| 172 |
+
|
| 173 |
+
- **ETT**: Download `ETTh1.csv` from the official repository: [https://github.com/zhouhaoyi/ETDataset](https://github.com/zhouhaoyi/ETDataset)
|
| 174 |
+
- **iNaturalist**: Download the `Test Recordings` from the official repository: [https://github.com/visipedia/inat_sounds/tree/main/2024](https://github.com/visipedia/inat_sounds/tree/main/2024)
|
| 175 |
+
|
| 176 |
+
**Step 2: Install Dependencies**
|
| 177 |
+
|
| 178 |
+
Before running the processing scripts, install the required Python packages:
|
| 179 |
+
|
| 180 |
+
```shell
|
| 181 |
+
pip install -r process/requirements.txt
|
| 182 |
+
```
|
| 183 |
+
|
| 184 |
+
**Step 3: Run the Processing Script**
|
| 185 |
+
|
| 186 |
+
Place the downloaded files into a local directory. Then, from the root of this repository, run the corresponding script to process the data into the standard benchmark format.
|
| 187 |
+
|
| 188 |
+
- For ETT:
|
| 189 |
+
```shell
|
| 190 |
+
python process/process_ETT.py --data_path /path/to/your/ETTh1.csv
|
| 191 |
+
```
|
| 192 |
+
|
| 193 |
+
- For iNaturalist:
|
| 194 |
+
```shell
|
| 195 |
+
python process/process_iNaturalist.py --data_folder /path/to/your/iNaturalist/test
|
| 196 |
+
```
|
| 197 |
+
|
| 198 |
+
This will generate the `Energy-ETT-Transformer_sensor_signal-Forecasting` and `Bioacoustics-INaturalist-Animal_vocalisation-Classification` folders along with their `raw_input_data`, `raw_gt_data` subdirectories, as well as the processed test files.
|
| 199 |
+
|
| 200 |
+
## Baseline Inference and Evaluation
|
| 201 |
+
|
| 202 |
+
The `process` directory also includes scripts for running inference and evaluating the results.
|
| 203 |
+
|
| 204 |
+
### Inference
|
| 205 |
+
|
| 206 |
+
`process/infer_template.py`: Template code for the inference script. Implement the `initialize_model` function, then inference can be done by running:
|
| 207 |
+
|
| 208 |
+
```shell
|
| 209 |
+
python process/infer_template.py --scits_dir /path/to/scits_dir --output_dir /path/to/output_dir
|
| 210 |
+
```
|
| 211 |
+
|
| 212 |
+
### Evaluation
|
| 213 |
+
|
| 214 |
+
`process/eval.py`: Evaluation script. Run:
|
| 215 |
+
|
| 216 |
+
```shell
|
| 217 |
+
python process/eval.py evaluate --infer_dir /path/to/infer_dir
|
| 218 |
+
```
|
| 219 |
+
|
| 220 |
+
The evaluation results will be saved to `/path/to/infer_dir/results/`.
|
| 221 |
+
|
| 222 |
+
## Citation
|
| 223 |
+
|
| 224 |
+
If you use the SciTS benchmark, please cite the paper:
|
| 225 |
+
|
| 226 |
+
```bibtex
|
| 227 |
+
@inproceedings{
|
| 228 |
+
wu2026scits,
|
| 229 |
+
title={Sci{TS}: {S}cientific Time Series Understanding and Generation with {LLM}s},
|
| 230 |
+
author={Wen Wu and Ziyang Zhang and Liwei Liu and Xuenan Xu and Jimin Zhuang and Ke Fan and Qitan Lv and Junlin Liu and Chen Zhang and Zheqi Yuan and Siyuan Hou and Tianyi Lin and Kai Chen and Bowen Zhou and Chao Zhang},
|
| 231 |
+
booktitle={The Fourteenth International Conference on Learning Representations},
|
| 232 |
+
year={2026},
|
| 233 |
+
url={https://openreview.net/forum?id=5YXccEP6uc}
|
| 234 |
+
}
|
| 235 |
+
```
|
process/eval.py
ADDED
|
@@ -0,0 +1,584 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import fire
|
| 6 |
+
import h5py
|
| 7 |
+
from sklearn.metrics import accuracy_score, mean_absolute_error, f1_score
|
| 8 |
+
|
| 9 |
+
from infer_eval_utils import read_time_series_data, concat_base_path, non_zero_rel_mae, DATASET_TO_TASK
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class Runner(object):
|
| 13 |
+
|
| 14 |
+
def multitask_classification(self,
|
| 15 |
+
infer_path: str = "",
|
| 16 |
+
gts: list = [],
|
| 17 |
+
preds: list = []):
|
| 18 |
+
tasks = gts[0].keys()
|
| 19 |
+
output_fpath = Path(
|
| 20 |
+
infer_path).parent / f"results/{Path(infer_path).stem}.json"
|
| 21 |
+
output_fpath.parent.mkdir(parents=True, exist_ok=True)
|
| 22 |
+
res_dict = {}
|
| 23 |
+
success = 0
|
| 24 |
+
fail = 0
|
| 25 |
+
for task in tasks:
|
| 26 |
+
correct_count = 0
|
| 27 |
+
for gt, pred in zip(gts, preds):
|
| 28 |
+
try:
|
| 29 |
+
if pred[task] == gt[task][0] or pred[task].lower(
|
| 30 |
+
) == gt[task][0]:
|
| 31 |
+
correct_count += 1
|
| 32 |
+
success += 1
|
| 33 |
+
except:
|
| 34 |
+
fail += 1
|
| 35 |
+
acc = correct_count / len(gts)
|
| 36 |
+
print(f"Accuracy for {task}: {acc}")
|
| 37 |
+
class_f1s = []
|
| 38 |
+
task_gts = [gt[task][0] for gt in gts]
|
| 39 |
+
labels = list(set(task_gts))
|
| 40 |
+
for label in labels:
|
| 41 |
+
# Find all samples with this true label
|
| 42 |
+
true_indices = [
|
| 43 |
+
i for i, gt in enumerate(task_gts) if gt == label
|
| 44 |
+
]
|
| 45 |
+
if len(true_indices) == 0:
|
| 46 |
+
recall = 0.0
|
| 47 |
+
else:
|
| 48 |
+
correct_predictions = 0
|
| 49 |
+
for idx in true_indices:
|
| 50 |
+
pred = preds[idx][task]
|
| 51 |
+
gt = task_gts[idx]
|
| 52 |
+
if pred == gt or pred.lower() == gt:
|
| 53 |
+
correct_predictions += 1
|
| 54 |
+
|
| 55 |
+
recall = correct_predictions / len(true_indices)
|
| 56 |
+
|
| 57 |
+
pred_indices = [
|
| 58 |
+
i for i, pred in enumerate(preds)
|
| 59 |
+
if pred[task].lower() == label or pred[task] == label
|
| 60 |
+
]
|
| 61 |
+
if len(pred_indices) == 0:
|
| 62 |
+
precision = 0.0
|
| 63 |
+
else:
|
| 64 |
+
correct_predictions = 0
|
| 65 |
+
for idx in pred_indices:
|
| 66 |
+
pred = preds[idx][task]
|
| 67 |
+
gt = task_gts[idx]
|
| 68 |
+
if pred == gt or pred.lower() == gt:
|
| 69 |
+
correct_predictions += 1
|
| 70 |
+
precision = correct_predictions / len(pred_indices)
|
| 71 |
+
|
| 72 |
+
f1 = (2 * recall * precision) / (recall + precision + 1e-6)
|
| 73 |
+
class_f1s.append(f1)
|
| 74 |
+
|
| 75 |
+
res_dict[task] = {
|
| 76 |
+
"acc": acc,
|
| 77 |
+
"f1": f1,
|
| 78 |
+
"success": success,
|
| 79 |
+
"fail": fail,
|
| 80 |
+
"success_rate": success / (success + fail)
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
res_dict["overall"] = {
|
| 84 |
+
"f1": np.mean(class_f1s),
|
| 85 |
+
"acc": np.mean([r["acc"] for r in res_dict.values()]),
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
with open(output_fpath, "w") as writer:
|
| 89 |
+
json.dump(res_dict, writer, indent=4)
|
| 90 |
+
writer.write("\n")
|
| 91 |
+
|
| 92 |
+
def multichoice_classification(self,
|
| 93 |
+
infer_path: str = "",
|
| 94 |
+
gts: list = [],
|
| 95 |
+
preds: list = []):
|
| 96 |
+
all_labels = set()
|
| 97 |
+
for gt in gts:
|
| 98 |
+
if isinstance(gt, list):
|
| 99 |
+
all_labels.update(gt)
|
| 100 |
+
else:
|
| 101 |
+
all_labels.add(gt)
|
| 102 |
+
|
| 103 |
+
all_labels = sorted(list(all_labels))
|
| 104 |
+
|
| 105 |
+
# Convert ground truth and predictions to multi-label format
|
| 106 |
+
y_true_multilabel = []
|
| 107 |
+
y_pred_multilabel = []
|
| 108 |
+
success = 0
|
| 109 |
+
fail = 0
|
| 110 |
+
for gt, pred in zip(gts, preds):
|
| 111 |
+
# Process ground truth
|
| 112 |
+
if isinstance(gt, list):
|
| 113 |
+
gt_labels = gt
|
| 114 |
+
else:
|
| 115 |
+
gt_labels = [gt]
|
| 116 |
+
|
| 117 |
+
# Process predictions
|
| 118 |
+
while '\n\n' in pred:
|
| 119 |
+
pred = pred.replace('\n\n', '\n')
|
| 120 |
+
pred_labels = [x.strip() for x in pred.split("\n")]
|
| 121 |
+
|
| 122 |
+
# Convert to binary vectors
|
| 123 |
+
gt_binary = [
|
| 124 |
+
1 if label in gt_labels else 0 for label in all_labels
|
| 125 |
+
]
|
| 126 |
+
pred_binary = []
|
| 127 |
+
for label in all_labels:
|
| 128 |
+
is_found = False
|
| 129 |
+
for pred_label in pred_labels:
|
| 130 |
+
if pred_label == label or pred_label.lower() == label:
|
| 131 |
+
is_found = True
|
| 132 |
+
break
|
| 133 |
+
if is_found:
|
| 134 |
+
pred_binary.append(1)
|
| 135 |
+
else:
|
| 136 |
+
pred_binary.append(0)
|
| 137 |
+
|
| 138 |
+
y_true_multilabel.append(gt_binary)
|
| 139 |
+
y_pred_multilabel.append(pred_binary)
|
| 140 |
+
|
| 141 |
+
y_true_multilabel = np.array(y_true_multilabel)
|
| 142 |
+
y_pred_multilabel = np.array(y_pred_multilabel)
|
| 143 |
+
|
| 144 |
+
# Calculate F1 score for each class
|
| 145 |
+
f1_scores = []
|
| 146 |
+
for i, label in enumerate(all_labels):
|
| 147 |
+
f1 = f1_score(y_true_multilabel[:, i],
|
| 148 |
+
y_pred_multilabel[:, i],
|
| 149 |
+
zero_division=0)
|
| 150 |
+
f1_scores.append(f1)
|
| 151 |
+
print(f"F1 score for class {label}: {f1:.4f}")
|
| 152 |
+
|
| 153 |
+
# Calculate mean of F1 scores
|
| 154 |
+
macro_f1 = np.mean(f1_scores)
|
| 155 |
+
print(f"Macro F1 score (mean of all classes): {macro_f1:.4f}")
|
| 156 |
+
|
| 157 |
+
# Save results
|
| 158 |
+
output_fpath = Path(
|
| 159 |
+
infer_path).parent / f"results/{Path(infer_path).stem}.json"
|
| 160 |
+
output_fpath.parent.mkdir(parents=True, exist_ok=True)
|
| 161 |
+
|
| 162 |
+
results = {
|
| 163 |
+
"macro_f1": macro_f1,
|
| 164 |
+
"per_class_f1": dict(zip(all_labels, f1_scores))
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
with open(output_fpath, "w") as writer:
|
| 168 |
+
json.dump(results, writer, indent=4)
|
| 169 |
+
writer.write("\n")
|
| 170 |
+
|
| 171 |
+
def classification(self, infer_path: str = ""):
|
| 172 |
+
gts, preds = [], []
|
| 173 |
+
with open(infer_path, "r") as f:
|
| 174 |
+
for line in f:
|
| 175 |
+
item = json.loads(line)
|
| 176 |
+
if "id" not in item:
|
| 177 |
+
continue
|
| 178 |
+
gts.append(item["ground_truth"])
|
| 179 |
+
preds.append(item["output"])
|
| 180 |
+
|
| 181 |
+
if any(isinstance(gt, list) for gt in gts):
|
| 182 |
+
return self.multichoice_classification(infer_path, gts, preds)
|
| 183 |
+
|
| 184 |
+
if isinstance(preds[0], dict):
|
| 185 |
+
return self.multitask_classification(infer_path, gts, preds)
|
| 186 |
+
# Custom comparison function: consider both exact match and case-insensitive match
|
| 187 |
+
# This is because LLM outputs sometimes capitalize the first letter to follow English grammar
|
| 188 |
+
correct_count = 0
|
| 189 |
+
for gt, pred in zip(gts, preds):
|
| 190 |
+
if pred == gt or pred.lower() == gt:
|
| 191 |
+
correct_count += 1
|
| 192 |
+
|
| 193 |
+
acc = correct_count / len(gts)
|
| 194 |
+
print(f"Accuracy: {acc}")
|
| 195 |
+
|
| 196 |
+
# Using the same case-insensitive matching as accuracy calculation
|
| 197 |
+
labels = list(set(gts))
|
| 198 |
+
|
| 199 |
+
# Manually calculate recall for each class
|
| 200 |
+
class_recalls = []
|
| 201 |
+
class_precisions = []
|
| 202 |
+
class_f1s = []
|
| 203 |
+
for label in labels:
|
| 204 |
+
# Find all samples with this true label
|
| 205 |
+
true_indices = [i for i, gt in enumerate(gts) if gt == label]
|
| 206 |
+
if len(true_indices) == 0:
|
| 207 |
+
recall = 0.0
|
| 208 |
+
class_recalls.append(0.0)
|
| 209 |
+
else:
|
| 210 |
+
# Calculate recall for this label
|
| 211 |
+
correct_predictions = 0
|
| 212 |
+
for idx in true_indices:
|
| 213 |
+
pred = preds[idx]
|
| 214 |
+
gt = gts[idx]
|
| 215 |
+
if pred == gt or pred.lower() == gt:
|
| 216 |
+
correct_predictions += 1
|
| 217 |
+
|
| 218 |
+
recall = correct_predictions / len(true_indices)
|
| 219 |
+
class_recalls.append(recall)
|
| 220 |
+
|
| 221 |
+
pred_indices = [
|
| 222 |
+
i for i, pred in enumerate(preds)
|
| 223 |
+
if pred.lower() == label or pred == label
|
| 224 |
+
]
|
| 225 |
+
if len(pred_indices) == 0:
|
| 226 |
+
precision = 0.0
|
| 227 |
+
class_precisions.append(0.0)
|
| 228 |
+
else:
|
| 229 |
+
correct_predictions = 0
|
| 230 |
+
for idx in pred_indices:
|
| 231 |
+
pred = preds[idx]
|
| 232 |
+
gt = gts[idx]
|
| 233 |
+
if pred == gt or pred.lower() == gt:
|
| 234 |
+
correct_predictions += 1
|
| 235 |
+
|
| 236 |
+
precision = correct_predictions / len(pred_indices)
|
| 237 |
+
class_precisions.append(precision)
|
| 238 |
+
|
| 239 |
+
f1 = (2 * recall * precision) / (recall + precision + 1e-6)
|
| 240 |
+
class_f1s.append(f1)
|
| 241 |
+
|
| 242 |
+
output_fpath = Path(
|
| 243 |
+
infer_path).parent / f"results/{Path(infer_path).stem}.json"
|
| 244 |
+
output_fpath.parent.mkdir(parents=True, exist_ok=True)
|
| 245 |
+
with open(output_fpath, "w") as writer:
|
| 246 |
+
json.dump({
|
| 247 |
+
"acc": acc,
|
| 248 |
+
"f1": np.mean(class_f1s),
|
| 249 |
+
},
|
| 250 |
+
writer,
|
| 251 |
+
indent=4)
|
| 252 |
+
writer.write("\n")
|
| 253 |
+
|
| 254 |
+
def mcq(self, infer_path: str = ""):
|
| 255 |
+
gts, preds = [], []
|
| 256 |
+
with open(infer_path, "r") as f:
|
| 257 |
+
for line in f:
|
| 258 |
+
item = json.loads(line)
|
| 259 |
+
if "id" not in item:
|
| 260 |
+
continue
|
| 261 |
+
gts.append(item["ground_truth"])
|
| 262 |
+
preds.append(item["output"])
|
| 263 |
+
|
| 264 |
+
# Custom comparison function: consider both exact match and case-insensitive match
|
| 265 |
+
# This is because LLM outputs sometimes capitalize the first letter to follow English grammar
|
| 266 |
+
correct_count = 0
|
| 267 |
+
for gt, pred in zip(gts, preds):
|
| 268 |
+
if pred == gt or pred.lower() == gt:
|
| 269 |
+
correct_count += 1
|
| 270 |
+
|
| 271 |
+
acc = correct_count / len(gts)
|
| 272 |
+
print(f"Accuracy: {acc}")
|
| 273 |
+
|
| 274 |
+
# Calculate recall with custom matching logic
|
| 275 |
+
# Using the same case-insensitive matching as accuracy calculation
|
| 276 |
+
try:
|
| 277 |
+
labels = list(set(gts))
|
| 278 |
+
|
| 279 |
+
# Manually calculate recall for each class
|
| 280 |
+
class_recalls = []
|
| 281 |
+
for label in labels:
|
| 282 |
+
# Find all samples with this true label
|
| 283 |
+
true_indices = [i for i, gt in enumerate(gts) if gt == label]
|
| 284 |
+
if len(true_indices) == 0:
|
| 285 |
+
class_recalls.append(0.0)
|
| 286 |
+
continue
|
| 287 |
+
|
| 288 |
+
# Calculate recall for this label (using lenient matching)
|
| 289 |
+
correct_predictions = 0
|
| 290 |
+
for idx in true_indices:
|
| 291 |
+
pred = preds[idx]
|
| 292 |
+
gt = gts[idx]
|
| 293 |
+
if pred == gt or pred.lower() == gt:
|
| 294 |
+
correct_predictions += 1
|
| 295 |
+
|
| 296 |
+
recall = correct_predictions / len(true_indices)
|
| 297 |
+
class_recalls.append(recall)
|
| 298 |
+
|
| 299 |
+
# Calculate macro average recall
|
| 300 |
+
weighted_recall = np.mean(class_recalls)
|
| 301 |
+
print(f"Weighted Recall: {weighted_recall}")
|
| 302 |
+
|
| 303 |
+
output_fpath = Path(
|
| 304 |
+
infer_path).parent / f"results/{Path(infer_path).stem}.json"
|
| 305 |
+
output_fpath.parent.mkdir(parents=True, exist_ok=True)
|
| 306 |
+
with open(output_fpath, "w") as writer:
|
| 307 |
+
json.dump({
|
| 308 |
+
"acc": acc,
|
| 309 |
+
"uar": weighted_recall
|
| 310 |
+
},
|
| 311 |
+
writer,
|
| 312 |
+
indent=4)
|
| 313 |
+
writer.write("\n")
|
| 314 |
+
|
| 315 |
+
except Exception as e:
|
| 316 |
+
print(f"Error calculating Weighted Recall: {e}")
|
| 317 |
+
print(
|
| 318 |
+
"Possible reasons: labels are not numeric or contain non-numeric labels"
|
| 319 |
+
)
|
| 320 |
+
|
| 321 |
+
def anomaly_detection(self, infer_path: str = ""):
|
| 322 |
+
gts, preds = [], []
|
| 323 |
+
success, fail = 0, 0
|
| 324 |
+
with open(infer_path, "r") as f:
|
| 325 |
+
for line in f:
|
| 326 |
+
item = json.loads(line)
|
| 327 |
+
if "id" not in item:
|
| 328 |
+
continue
|
| 329 |
+
|
| 330 |
+
if item["output"].lower() == "yes":
|
| 331 |
+
preds.append(True)
|
| 332 |
+
elif item["output"].lower() == "no":
|
| 333 |
+
preds.append(False)
|
| 334 |
+
else:
|
| 335 |
+
fail += 1
|
| 336 |
+
continue
|
| 337 |
+
success += 1
|
| 338 |
+
gts.append(item["ground_truth"])
|
| 339 |
+
|
| 340 |
+
correct_count = 0
|
| 341 |
+
for gt, pred in zip(gts, preds):
|
| 342 |
+
if pred == gt:
|
| 343 |
+
correct_count += 1
|
| 344 |
+
print(f"Success: {success}, Fail: {fail}")
|
| 345 |
+
acc = correct_count / len(gts)
|
| 346 |
+
print(f"Accuracy: {acc}")
|
| 347 |
+
|
| 348 |
+
f1 = f1_score(gts, preds)
|
| 349 |
+
print(f"F1 Score: {f1}")
|
| 350 |
+
output_fpath = Path(
|
| 351 |
+
infer_path).parent / f"results/{Path(infer_path).stem}.json"
|
| 352 |
+
output_fpath.parent.mkdir(parents=True, exist_ok=True)
|
| 353 |
+
with open(output_fpath, "w") as writer:
|
| 354 |
+
json.dump({
|
| 355 |
+
"acc": acc,
|
| 356 |
+
"f1": f1,
|
| 357 |
+
"success": success,
|
| 358 |
+
"fail": fail
|
| 359 |
+
},
|
| 360 |
+
writer,
|
| 361 |
+
indent=4)
|
| 362 |
+
writer.write("\n")
|
| 363 |
+
|
| 364 |
+
def forecasting(self, infer_path: str = ""):
|
| 365 |
+
gt_arrs = []
|
| 366 |
+
pred_arrs = []
|
| 367 |
+
success = 0
|
| 368 |
+
fail = 0
|
| 369 |
+
with h5py.File(infer_path, "r") as f:
|
| 370 |
+
base_path = Path(f["base_path"][()].decode("utf-8"))
|
| 371 |
+
for id in f.keys():
|
| 372 |
+
try:
|
| 373 |
+
if id not in [
|
| 374 |
+
"base_path", "dataset_name", "domain", "task",
|
| 375 |
+
"scene"
|
| 376 |
+
]:
|
| 377 |
+
gt_path = concat_base_path(
|
| 378 |
+
base_path,
|
| 379 |
+
f[id]["gt_path"][()].decode("utf-8").strip("/"))
|
| 380 |
+
gt_data = read_time_series_data(gt_path)
|
| 381 |
+
gt_data = np.array(gt_data, dtype=np.float32)
|
| 382 |
+
pred = f[id]["pred_result"][()]
|
| 383 |
+
|
| 384 |
+
if pred.shape != gt_data.shape:
|
| 385 |
+
raise ValueError(
|
| 386 |
+
f"Pred shape {pred.shape} does not match gt shape {gt_data.shape}"
|
| 387 |
+
)
|
| 388 |
+
gt_arrs.append(gt_data.reshape(-1))
|
| 389 |
+
pred_arrs.append(pred.reshape(-1))
|
| 390 |
+
success += 1
|
| 391 |
+
except Exception as e:
|
| 392 |
+
print(f"Error processing {id}: {e}")
|
| 393 |
+
fail += 1
|
| 394 |
+
|
| 395 |
+
if len(gt_arrs) == 0:
|
| 396 |
+
mae = "N/A"
|
| 397 |
+
rel_mae = "N/A"
|
| 398 |
+
else:
|
| 399 |
+
gt_arrs = np.concatenate(gt_arrs)
|
| 400 |
+
pred_arrs = np.concatenate(pred_arrs)
|
| 401 |
+
|
| 402 |
+
# mse = mean_squared_error(gt_arrs, pred_arrs)
|
| 403 |
+
mae = mean_absolute_error(gt_arrs, pred_arrs)
|
| 404 |
+
rel_mae = non_zero_rel_mae(gt_arrs, pred_arrs)
|
| 405 |
+
print(
|
| 406 |
+
f"MAE: {mae}, REL_MAE: {rel_mae}, Success: {success}, Fail: {fail}"
|
| 407 |
+
)
|
| 408 |
+
output_fpath = Path(
|
| 409 |
+
infer_path).parent / f"results/{Path(infer_path).stem}.json"
|
| 410 |
+
output_fpath.parent.mkdir(parents=True, exist_ok=True)
|
| 411 |
+
with open(output_fpath, "w") as writer:
|
| 412 |
+
json.dump(
|
| 413 |
+
{
|
| 414 |
+
"rel_mae": rel_mae,
|
| 415 |
+
"mae": mae,
|
| 416 |
+
"success": success,
|
| 417 |
+
"fail": fail,
|
| 418 |
+
"success_rate": success / (success + fail)
|
| 419 |
+
},
|
| 420 |
+
writer,
|
| 421 |
+
indent=4)
|
| 422 |
+
writer.write("\n")
|
| 423 |
+
|
| 424 |
+
def synthesize(self, infer_path: str = ""):
|
| 425 |
+
return self.forecasting(infer_path)
|
| 426 |
+
|
| 427 |
+
def imputation(self, infer_path: str = ""):
|
| 428 |
+
gt_arrs = []
|
| 429 |
+
pred_arrs = []
|
| 430 |
+
success = 0
|
| 431 |
+
fail = 0
|
| 432 |
+
with h5py.File(infer_path, "r") as f:
|
| 433 |
+
base_path = Path(f["base_path"][()].decode("utf-8"))
|
| 434 |
+
for id in f.keys():
|
| 435 |
+
try:
|
| 436 |
+
if id not in [
|
| 437 |
+
"base_path", "dataset_name", "domain", "task",
|
| 438 |
+
"scene"
|
| 439 |
+
]:
|
| 440 |
+
# gt_path = base_path / f[id]["gt_path"][
|
| 441 |
+
# ()].decode("utf-8")
|
| 442 |
+
gt_path = concat_base_path(
|
| 443 |
+
base_path,
|
| 444 |
+
f[id]["gt_path"][()].decode("utf-8").strip("/"))
|
| 445 |
+
gt_data = read_time_series_data(gt_path)
|
| 446 |
+
|
| 447 |
+
# input_path = base_path / f[id]["input_ts_path"][
|
| 448 |
+
# ()].decode("utf-8")
|
| 449 |
+
input_path = concat_base_path(
|
| 450 |
+
base_path, f[id]["input_ts_path"][(
|
| 451 |
+
)].decode("utf-8").strip("/"))
|
| 452 |
+
input_data = read_time_series_data(input_path)
|
| 453 |
+
|
| 454 |
+
pred_indices = np.where(input_data == "X")[0]
|
| 455 |
+
pred = f[id]["pred_result"][()]
|
| 456 |
+
|
| 457 |
+
pred = pred[pred_indices]
|
| 458 |
+
gt_data = gt_data[pred_indices]
|
| 459 |
+
if len(pred) != len(gt_data):
|
| 460 |
+
length_mismatch += 1
|
| 461 |
+
else:
|
| 462 |
+
success += 1
|
| 463 |
+
if len(pred) < len(gt_data):
|
| 464 |
+
pred = pred[:len(gt_data)]
|
| 465 |
+
if len(pred) > len(gt_data):
|
| 466 |
+
gt_data = gt_data[:len(pred)]
|
| 467 |
+
gt_arrs.append(gt_data)
|
| 468 |
+
pred_arrs.append(pred)
|
| 469 |
+
# success += 1
|
| 470 |
+
except Exception as e:
|
| 471 |
+
print(f"Error processing {id}: {e}")
|
| 472 |
+
fail += 1
|
| 473 |
+
|
| 474 |
+
gt_arrs = np.concatenate(gt_arrs)
|
| 475 |
+
pred_arrs = np.concatenate(pred_arrs)
|
| 476 |
+
# mse = mean_squared_error(gt_arrs, pred_arrs)
|
| 477 |
+
rel_mae = non_zero_rel_mae(gt_arrs, pred_arrs)
|
| 478 |
+
mae = mean_absolute_error(gt_arrs, pred_arrs)
|
| 479 |
+
|
| 480 |
+
print(
|
| 481 |
+
f"REL_MAE: {rel_mae}, MAE: {mae}, Success: {success}, Fail: {fail}"
|
| 482 |
+
)
|
| 483 |
+
output_fpath = Path(
|
| 484 |
+
infer_path).parent / f"results/{Path(infer_path).stem}.json"
|
| 485 |
+
output_fpath.parent.mkdir(parents=True, exist_ok=True)
|
| 486 |
+
with open(output_fpath, "w") as writer:
|
| 487 |
+
json.dump(
|
| 488 |
+
{
|
| 489 |
+
"rel_mae": rel_mae,
|
| 490 |
+
"mae": mae,
|
| 491 |
+
"success": success,
|
| 492 |
+
"fail": fail,
|
| 493 |
+
"success_rate": success / (success + fail)
|
| 494 |
+
},
|
| 495 |
+
writer,
|
| 496 |
+
indent=4)
|
| 497 |
+
writer.write("\n")
|
| 498 |
+
|
| 499 |
+
def event_detection(self, infer_path: str = ""):
|
| 500 |
+
event_gts, event_preds = [], []
|
| 501 |
+
seq_length = None
|
| 502 |
+
success = 0
|
| 503 |
+
total = 0
|
| 504 |
+
timestamp_gts, timestamp_preds = [], []
|
| 505 |
+
with open(infer_path, "r") as f:
|
| 506 |
+
for line in f:
|
| 507 |
+
item = json.loads(line)
|
| 508 |
+
if "id" not in item:
|
| 509 |
+
seq_length = item["seq_length"]
|
| 510 |
+
continue
|
| 511 |
+
event_gt = item["ground_truth"]["contain"]
|
| 512 |
+
event_gts.append(1 if event_gt else 0)
|
| 513 |
+
|
| 514 |
+
if "\n" in item["output"]:
|
| 515 |
+
while '\n\n' in item["output"]:
|
| 516 |
+
item["output"] = item["output"].replace('\n\n', '\n')
|
| 517 |
+
event_pred, *timestamps = item["output"].split("\n")
|
| 518 |
+
else:
|
| 519 |
+
event_pred = item["output"]
|
| 520 |
+
timestamps = None
|
| 521 |
+
event_preds.append(1 if event_pred.lower() == "yes" else 0)
|
| 522 |
+
|
| 523 |
+
if event_gt:
|
| 524 |
+
if "start_time" in item["ground_truth"]:
|
| 525 |
+
gt_timestamps = [item["ground_truth"]["start_time"]]
|
| 526 |
+
elif "start_time_p" in item["ground_truth"]:
|
| 527 |
+
gt_timestamps = [
|
| 528 |
+
item["ground_truth"]["start_time_p"],
|
| 529 |
+
item["ground_truth"]["start_time_s"]
|
| 530 |
+
]
|
| 531 |
+
|
| 532 |
+
if timestamps is None:
|
| 533 |
+
pass
|
| 534 |
+
else:
|
| 535 |
+
try:
|
| 536 |
+
assert len(timestamps) == len(gt_timestamps)
|
| 537 |
+
for pred_timestamp, gt_timestamp in zip(
|
| 538 |
+
timestamps, gt_timestamps):
|
| 539 |
+
pred_timestamp = eval(pred_timestamp)
|
| 540 |
+
timestamp_preds.append(pred_timestamp)
|
| 541 |
+
timestamp_gts.append(gt_timestamp)
|
| 542 |
+
success += 1
|
| 543 |
+
except Exception as e:
|
| 544 |
+
print(str(e))
|
| 545 |
+
total += 1
|
| 546 |
+
|
| 547 |
+
event_acc = accuracy_score(event_gts, event_preds)
|
| 548 |
+
event_f1 = f1_score(event_gts, event_preds)
|
| 549 |
+
timestamp_gts = np.array(timestamp_gts)
|
| 550 |
+
timestamp_preds = np.array(timestamp_preds)
|
| 551 |
+
mape = non_zero_rel_mae(timestamp_gts, timestamp_preds)
|
| 552 |
+
output_fpath = Path(
|
| 553 |
+
infer_path).parent / f"results/{Path(infer_path).stem}.json"
|
| 554 |
+
output_fpath.parent.mkdir(parents=True, exist_ok=True)
|
| 555 |
+
with open(output_fpath, "w") as writer:
|
| 556 |
+
json.dump(
|
| 557 |
+
{
|
| 558 |
+
"acc": event_acc,
|
| 559 |
+
"f1": event_f1,
|
| 560 |
+
"mape": mape,
|
| 561 |
+
"success_rate": success / total
|
| 562 |
+
},
|
| 563 |
+
writer,
|
| 564 |
+
indent=4)
|
| 565 |
+
writer.write("\n")
|
| 566 |
+
print({
|
| 567 |
+
"acc": event_acc,
|
| 568 |
+
"f1": event_f1,
|
| 569 |
+
"mape": mape,
|
| 570 |
+
"success_rate": success / total
|
| 571 |
+
})
|
| 572 |
+
|
| 573 |
+
def evaluate(self, infer_dir: str):
|
| 574 |
+
for infer_path in Path(infer_dir).glob("*"):
|
| 575 |
+
if infer_path.is_dir():
|
| 576 |
+
continue
|
| 577 |
+
dataset_id = infer_path.stem
|
| 578 |
+
task = DATASET_TO_TASK[dataset_id]
|
| 579 |
+
print(f"evaluating {dataset_id} ...")
|
| 580 |
+
getattr(self, task)(infer_path)
|
| 581 |
+
|
| 582 |
+
|
| 583 |
+
if __name__ == "__main__":
|
| 584 |
+
fire.Fire(Runner)
|
process/infer_eval_utils.py
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pathlib import Path
|
| 2 |
+
from typing import Sequence
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import librosa
|
| 6 |
+
from sklearn.metrics import mean_absolute_percentage_error
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
GENERATION_TASK_IDS = [
|
| 10 |
+
"MEG03",
|
| 11 |
+
"ECG01",
|
| 12 |
+
"ECG02",
|
| 13 |
+
"NEG03",
|
| 14 |
+
"NEG04",
|
| 15 |
+
"ENG01",
|
| 16 |
+
"ENG02",
|
| 17 |
+
"ENG03",
|
| 18 |
+
"ENG04",
|
| 19 |
+
"ENG05",
|
| 20 |
+
"PHG02",
|
| 21 |
+
"PHG03",
|
| 22 |
+
"URG01",
|
| 23 |
+
"URG02",
|
| 24 |
+
"URG03",
|
| 25 |
+
"URG05",
|
| 26 |
+
"MAG01",
|
| 27 |
+
]
|
| 28 |
+
|
| 29 |
+
IMPUTATION_TASK_IDS = [
|
| 30 |
+
"NEG04",
|
| 31 |
+
"ENG05",
|
| 32 |
+
"PHG03",
|
| 33 |
+
"URG03",
|
| 34 |
+
]
|
| 35 |
+
|
| 36 |
+
CLASSIFICATION_TASK_IDS = [
|
| 37 |
+
"ASU03",
|
| 38 |
+
"BIU01",
|
| 39 |
+
"BIU02",
|
| 40 |
+
"BIU03",
|
| 41 |
+
"NEU02",
|
| 42 |
+
"NEU05",
|
| 43 |
+
"NEU06",
|
| 44 |
+
"PHU01",
|
| 45 |
+
"PHU06",
|
| 46 |
+
"MFU01_MFU02",
|
| 47 |
+
"RAU01",
|
| 48 |
+
"RAU02",
|
| 49 |
+
]
|
| 50 |
+
|
| 51 |
+
EVENT_DETECTION_TASK_IDS = ["ASU01_ASG02", "EAU01_EAG02"]
|
| 52 |
+
|
| 53 |
+
ANOMALY_DETECTION_TASK_IDS = [
|
| 54 |
+
"MEU01",
|
| 55 |
+
"MEU02",
|
| 56 |
+
"NEU01",
|
| 57 |
+
"PHU04",
|
| 58 |
+
"PHU05",
|
| 59 |
+
"URU04",
|
| 60 |
+
"MFU03",
|
| 61 |
+
]
|
| 62 |
+
|
| 63 |
+
MCQ_TASK_IDS = ["MEU04", "ECU03"]
|
| 64 |
+
|
| 65 |
+
DATASET_TO_TASK = {
|
| 66 |
+
"ASU01_ASG02": "event_detection",
|
| 67 |
+
"ASU03": "classification",
|
| 68 |
+
"EAU01_EAG02": "event_detection",
|
| 69 |
+
"BIU01": "classification",
|
| 70 |
+
"BIU02": "classification",
|
| 71 |
+
"BIU03": "classification",
|
| 72 |
+
"MEU01": "anomaly_detection",
|
| 73 |
+
"MEU02": "anomaly_detection",
|
| 74 |
+
"MEG03": "forecasting",
|
| 75 |
+
"MEU04": "mcq",
|
| 76 |
+
"ECG01": "forecasting",
|
| 77 |
+
"ECG02": "forecasting",
|
| 78 |
+
"ECU03": "mcq",
|
| 79 |
+
"NEU01": "anomaly_detection",
|
| 80 |
+
"NEU02": "classification",
|
| 81 |
+
"NEG03": "forecasting",
|
| 82 |
+
"NEG04": "imputation",
|
| 83 |
+
"NEU05": "classification",
|
| 84 |
+
"NEU06": "classification",
|
| 85 |
+
"ENG01": "synthesize",
|
| 86 |
+
"ENG02": "forecasting",
|
| 87 |
+
"ENG03": "forecasting",
|
| 88 |
+
"ENG04": "forecasting",
|
| 89 |
+
"ENG05": "imputation",
|
| 90 |
+
"PHU01": "classification",
|
| 91 |
+
"PHG02": "forecasting",
|
| 92 |
+
"PHG03": "imputation",
|
| 93 |
+
"PHU04": "anomaly_detection",
|
| 94 |
+
"PHU05": "anomaly_detection",
|
| 95 |
+
"PHU06": "classification",
|
| 96 |
+
"URG01": "forecasting",
|
| 97 |
+
"URG02": "forecasting",
|
| 98 |
+
"URG03": "imputation",
|
| 99 |
+
"URU04": "anomaly_detection",
|
| 100 |
+
"URG05": "forecasting",
|
| 101 |
+
"MFU01_MFU02": "classification",
|
| 102 |
+
"MFU03": "anomaly_detection",
|
| 103 |
+
"RAU01": "classification",
|
| 104 |
+
"RAU02": "classification",
|
| 105 |
+
"MAG01": "forecasting"
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def read_time_series_data(path: str | Path) -> Sequence:
|
| 110 |
+
path_str = path.__str__()
|
| 111 |
+
data = []
|
| 112 |
+
if path_str.endswith(".csv"):
|
| 113 |
+
with open(path) as raw_data_reader:
|
| 114 |
+
for line in raw_data_reader.readlines():
|
| 115 |
+
line = line.strip("\ufeff")
|
| 116 |
+
if "," in line:
|
| 117 |
+
data.append(line.strip().split(","))
|
| 118 |
+
else:
|
| 119 |
+
data.append(line.strip())
|
| 120 |
+
if "X" not in data:
|
| 121 |
+
data = np.array(data, dtype=np.float32)
|
| 122 |
+
else:
|
| 123 |
+
data = np.array(data)
|
| 124 |
+
elif path_str.endswith(".npy"):
|
| 125 |
+
data = np.load(path)
|
| 126 |
+
elif path_str.endswith(".wav") or path_str.endswith(".flac"):
|
| 127 |
+
data, _ = librosa.core.load(path, mono=False)
|
| 128 |
+
else:
|
| 129 |
+
raise ValueError(f"Unsupported data type {path_str.endswith()}")
|
| 130 |
+
return data
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def concat_base_path(base_path: Path, path: str) -> Path:
|
| 134 |
+
if (base_path / path).exists():
|
| 135 |
+
return base_path / path
|
| 136 |
+
else:
|
| 137 |
+
return base_path.parent / path
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def non_zero_rel_mae(y_true: np.ndarray, y_pred: np.ndarray) -> float:
|
| 141 |
+
idxs = np.where(y_true != 0)[0]
|
| 142 |
+
return mean_absolute_percentage_error(y_true[idxs], y_pred[idxs])
|
process/infer_template.py
ADDED
|
@@ -0,0 +1,296 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import random
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
from typing import Sequence, Callable
|
| 5 |
+
|
| 6 |
+
import fire
|
| 7 |
+
import h5py
|
| 8 |
+
import numpy as np
|
| 9 |
+
from tqdm import tqdm
|
| 10 |
+
|
| 11 |
+
from infer_eval_utils import (
|
| 12 |
+
read_time_series_data,
|
| 13 |
+
GENERATION_TASK_IDS,
|
| 14 |
+
CLASSIFICATION_TASK_IDS,
|
| 15 |
+
EVENT_DETECTION_TASK_IDS,
|
| 16 |
+
ANOMALY_DETECTION_TASK_IDS,
|
| 17 |
+
MCQ_TASK_IDS,
|
| 18 |
+
IMPUTATION_TASK_IDS
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def read_raw_data(path: str | None) -> Sequence:
|
| 23 |
+
if path is None:
|
| 24 |
+
return []
|
| 25 |
+
|
| 26 |
+
return read_time_series_data(path)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def transform_raw_data_single_channel(raw_data: list | np.ndarray,
|
| 30 |
+
dataset_id: str) -> str:
|
| 31 |
+
|
| 32 |
+
if dataset_id in GENERATION_TASK_IDS:
|
| 33 |
+
data_str_list = []
|
| 34 |
+
for x in raw_data:
|
| 35 |
+
if x != "X":
|
| 36 |
+
data_str_list.append(f"{float(x):.3f}")
|
| 37 |
+
else:
|
| 38 |
+
data_str_list.append(x)
|
| 39 |
+
data_text = " ".join(data_str_list)
|
| 40 |
+
else:
|
| 41 |
+
data_text = " ".join([f"{float(x):.3f}" for x in raw_data])
|
| 42 |
+
|
| 43 |
+
return data_text
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def transform_raw_data_to_text(raw_data: list | np.ndarray, dataset_id: str,
|
| 47 |
+
channel_detail: list[str]) -> str:
|
| 48 |
+
|
| 49 |
+
if isinstance(raw_data, np.ndarray):
|
| 50 |
+
if raw_data.ndim > 1 and raw_data.shape[1] > 1 and len(
|
| 51 |
+
channel_detail) == 0:
|
| 52 |
+
channel_detail = [f"channel {i}" for i in range(raw_data.shape[1])]
|
| 53 |
+
|
| 54 |
+
if len(channel_detail) <= 1:
|
| 55 |
+
data_text = transform_raw_data_single_channel(raw_data, dataset_id)
|
| 56 |
+
else:
|
| 57 |
+
data_text = ""
|
| 58 |
+
for channel_idx, channel_name in enumerate(channel_detail):
|
| 59 |
+
channel_data = raw_data[:, channel_idx]
|
| 60 |
+
channel_data_text = transform_raw_data_single_channel(
|
| 61 |
+
channel_data, dataset_id)
|
| 62 |
+
data_text += f"{channel_name}: {channel_data_text} "
|
| 63 |
+
else:
|
| 64 |
+
data_text = transform_raw_data_single_channel(raw_data, dataset_id)
|
| 65 |
+
return data_text
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def transform_gt_data_to_text(gt_data: list | np.ndarray,
|
| 69 |
+
dataset_id: str) -> str:
|
| 70 |
+
gt_data = np.array(gt_data)
|
| 71 |
+
if gt_data.ndim == 1:
|
| 72 |
+
data_text = transform_raw_data_single_channel(gt_data, dataset_id)
|
| 73 |
+
else:
|
| 74 |
+
data_text = ""
|
| 75 |
+
for channel_idx in range(gt_data.shape[1]):
|
| 76 |
+
channel_data = gt_data[:, channel_idx]
|
| 77 |
+
channel_data_text = transform_raw_data_single_channel(
|
| 78 |
+
channel_data, dataset_id)
|
| 79 |
+
data_text += f"{channel_data_text}\n"
|
| 80 |
+
|
| 81 |
+
return data_text
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def get_extra_instruction(dataset_id: str, ) -> str:
|
| 85 |
+
extra_instruction = ""
|
| 86 |
+
if dataset_id == "ASU01_ASG02":
|
| 87 |
+
extra_instruction = "Answer yes or no in the first line. If the Gravitational Wave is detected, answer the index of the starting time point in the second line."
|
| 88 |
+
elif dataset_id == "EAU01_EAG02":
|
| 89 |
+
extra_instruction = "Answer yes or no in the first line. If an Earthquake event is detected, answer the starting time point index of the P-wave in the second line, " \
|
| 90 |
+
"answer the starting time point index of the S-wave in the third line."
|
| 91 |
+
elif dataset_id == "MFU01_MFU02":
|
| 92 |
+
extra_instruction = "Output the diameter in the first line, and the position in the second line."
|
| 93 |
+
elif dataset_id == "PHU01":
|
| 94 |
+
extra_instruction = "Give each answer in a line. For example, if the answer is ['NORM', 'MI'], you should output: NORM\nMI."
|
| 95 |
+
elif dataset_id == "MAG01":
|
| 96 |
+
extra_instruction = "Give answer of each channel in a line so the number of predicted time points in each line should match the given one. For example, if " \
|
| 97 |
+
"it is required to predict the next 5 time points, and the predicted x0, x1, x2 are [[0.1, 0.2, 0.3, 0.4, 0.5], [0.4, 0.5, 0.6, 0.7, 0.8], [0.7, 0.8, 0.9, 0.1, 0.2]], " \
|
| 98 |
+
"you should output: 0.1 0.2 0.3 0.4 0.5\n0.4 0.5 0.6 0.7 0.8\n0.7 0.8 0.9 0.1 0.2."
|
| 99 |
+
elif dataset_id in ANOMALY_DETECTION_TASK_IDS:
|
| 100 |
+
extra_instruction = "Answer yes if anomaly points are detected, and no if there are only normal points."
|
| 101 |
+
elif dataset_id in GENERATION_TASK_IDS:
|
| 102 |
+
extra_instruction = "Output the values separated by spaces."
|
| 103 |
+
return extra_instruction
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def extract_gt(data: dict, dataset_id: str) -> str | dict | Path:
|
| 107 |
+
if dataset_id in CLASSIFICATION_TASK_IDS:
|
| 108 |
+
gt = data["gt_result"]["gt_class"]
|
| 109 |
+
if isinstance(gt, dict) and len(gt) == 1:
|
| 110 |
+
gt = gt["default"]
|
| 111 |
+
if isinstance(gt, list) and len(gt) == 1:
|
| 112 |
+
gt = gt[0]
|
| 113 |
+
elif dataset_id in GENERATION_TASK_IDS:
|
| 114 |
+
gt = data["gt_ts"]["path"].strip("/")
|
| 115 |
+
elif dataset_id in EVENT_DETECTION_TASK_IDS:
|
| 116 |
+
gt = data["gt_result"]
|
| 117 |
+
elif dataset_id in ANOMALY_DETECTION_TASK_IDS:
|
| 118 |
+
gt = data["gt_result"]["contain"]
|
| 119 |
+
elif dataset_id in MCQ_TASK_IDS:
|
| 120 |
+
gt = data["gt_result"]["answer"]
|
| 121 |
+
else:
|
| 122 |
+
raise ValueError(f"Unsupported dataset id: {dataset_id}")
|
| 123 |
+
return gt
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def initialize_model() -> Callable:
|
| 127 |
+
"""
|
| 128 |
+
Initialize the model here. The model can be called by:
|
| 129 |
+
|
| 130 |
+
```python
|
| 131 |
+
response = model(prompt)
|
| 132 |
+
# or
|
| 133 |
+
response = model(prompt, max_tokens=max_tokens) # to limit the response length
|
| 134 |
+
```
|
| 135 |
+
"""
|
| 136 |
+
pass
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def infer_dataset(model: Callable, dataset_data: list, scits_dir: Path,
|
| 140 |
+
dataset_id: str, output_path: Path):
|
| 141 |
+
print(f"Inferring {dataset_id}")
|
| 142 |
+
|
| 143 |
+
if dataset_id in GENERATION_TASK_IDS:
|
| 144 |
+
ext = "h5"
|
| 145 |
+
else:
|
| 146 |
+
ext = "jsonl"
|
| 147 |
+
|
| 148 |
+
output_path = Path(output_path) / f"{dataset_id}.{ext}"
|
| 149 |
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
| 150 |
+
completed_ids = []
|
| 151 |
+
|
| 152 |
+
if str(output_path).endswith(".jsonl"):
|
| 153 |
+
has_metadata = False
|
| 154 |
+
|
| 155 |
+
if output_path.exists():
|
| 156 |
+
if str(output_path).endswith(".jsonl"):
|
| 157 |
+
with open(output_path, 'r') as f:
|
| 158 |
+
for line in f.readlines():
|
| 159 |
+
data = json.loads(line)
|
| 160 |
+
if "id" in data:
|
| 161 |
+
completed_ids.append(data["id"])
|
| 162 |
+
else:
|
| 163 |
+
has_metadata = True
|
| 164 |
+
elif str(output_path).endswith(".h5"):
|
| 165 |
+
with h5py.File(output_path, 'r') as f:
|
| 166 |
+
completed_ids = list(f.keys())
|
| 167 |
+
|
| 168 |
+
completed_ids = set(completed_ids)
|
| 169 |
+
random.shuffle(dataset_data)
|
| 170 |
+
dataset_data = dataset_data[:10]
|
| 171 |
+
|
| 172 |
+
try:
|
| 173 |
+
seq_length = dataset_data[0]["input_ts"]["length"]
|
| 174 |
+
except:
|
| 175 |
+
seq_length = None
|
| 176 |
+
|
| 177 |
+
for sample in tqdm(dataset_data):
|
| 178 |
+
id = sample["id"].replace(
|
| 179 |
+
"/", "%2F") # to avoid errors related to "/" in hdf5
|
| 180 |
+
|
| 181 |
+
if id in completed_ids:
|
| 182 |
+
continue
|
| 183 |
+
|
| 184 |
+
# Load raw data
|
| 185 |
+
if sample["input_ts"] is None:
|
| 186 |
+
raw_data_path = None
|
| 187 |
+
channel_detail = None
|
| 188 |
+
else:
|
| 189 |
+
raw_data_path = scits_dir / sample["input_ts"]["path"].strip("/")
|
| 190 |
+
channel_detail = sample["input_ts"]["channel_detail"]
|
| 191 |
+
|
| 192 |
+
raw_data = read_raw_data(raw_data_path)
|
| 193 |
+
raw_data_text = transform_raw_data_to_text(raw_data, dataset_id,
|
| 194 |
+
channel_detail)
|
| 195 |
+
|
| 196 |
+
gt = extract_gt(sample, dataset_id)
|
| 197 |
+
extra_instruction = get_extra_instruction(dataset_id)
|
| 198 |
+
|
| 199 |
+
if dataset_id in GENERATION_TASK_IDS:
|
| 200 |
+
# give max_tokens to save cost for generation tasks
|
| 201 |
+
gt_data = read_time_series_data(scits_dir / gt)
|
| 202 |
+
gt_data_text = transform_gt_data_to_text(
|
| 203 |
+
gt_data, dataset_id)
|
| 204 |
+
max_tokens = len(gt_data_text)
|
| 205 |
+
else:
|
| 206 |
+
max_tokens = None
|
| 207 |
+
|
| 208 |
+
prompt_text = f'{sample["input_text"]} {extra_instruction} Give me the answer directly, ' \
|
| 209 |
+
f'without any other extra content (including punctuation). ' \
|
| 210 |
+
f'{raw_data_text}'
|
| 211 |
+
output_text = model(text=prompt_text, max_tokens=max_tokens)
|
| 212 |
+
# print(f"output_text: {output_text}")
|
| 213 |
+
|
| 214 |
+
if dataset_id not in GENERATION_TASK_IDS:
|
| 215 |
+
with open(output_path, 'a') as writer:
|
| 216 |
+
if not has_metadata:
|
| 217 |
+
metadata = {}
|
| 218 |
+
if dataset_id in EVENT_DETECTION_TASK_IDS:
|
| 219 |
+
metadata["seq_length"] = seq_length
|
| 220 |
+
writer.write(json.dumps(metadata) + "\n")
|
| 221 |
+
has_metadata = True
|
| 222 |
+
|
| 223 |
+
pred_results = output_text
|
| 224 |
+
if "class_list" in sample["gt_result"] and isinstance(
|
| 225 |
+
sample["gt_result"]["class_list"], dict) and len(
|
| 226 |
+
sample["gt_result"]["class_list"]) > 1:
|
| 227 |
+
if len(output_text.split("\n")) != len(
|
| 228 |
+
sample["gt_result"]["class_list"]):
|
| 229 |
+
pred_results = "NA"
|
| 230 |
+
else:
|
| 231 |
+
pred_results = {}
|
| 232 |
+
for class_name, pred_result in zip(
|
| 233 |
+
sample["gt_result"]["class_list"],
|
| 234 |
+
output_text.split("\n")):
|
| 235 |
+
pred_results[class_name] = pred_result
|
| 236 |
+
writer.write(
|
| 237 |
+
json.dumps({
|
| 238 |
+
"id": id,
|
| 239 |
+
"output": pred_results,
|
| 240 |
+
"ground_truth": gt
|
| 241 |
+
}) + "\n")
|
| 242 |
+
else:
|
| 243 |
+
if "\n" not in output_text:
|
| 244 |
+
pred_result = np.fromstring(output_text.strip(),
|
| 245 |
+
dtype=np.float32,
|
| 246 |
+
sep=' ')
|
| 247 |
+
else:
|
| 248 |
+
try:
|
| 249 |
+
pred_result = np.vstack([
|
| 250 |
+
np.fromstring(x.strip(), dtype=np.float32, sep=' ')
|
| 251 |
+
for x in output_text.split("\n")
|
| 252 |
+
]).transpose()
|
| 253 |
+
except ValueError:
|
| 254 |
+
pred_result = np.array([])
|
| 255 |
+
|
| 256 |
+
with h5py.File(output_path, 'a') as writer:
|
| 257 |
+
writer[f"{id}/pred_result"] = pred_result
|
| 258 |
+
writer[f"{id}/gt_path"] = gt.__str__().encode("utf-8")
|
| 259 |
+
if dataset_id in IMPUTATION_TASK_IDS:
|
| 260 |
+
writer[f"{id}/input_ts_path"] = sample["input_ts"][
|
| 261 |
+
"path"].strip("/").encode("utf-8")
|
| 262 |
+
if "base_path" not in writer:
|
| 263 |
+
writer["base_path"] = scits_dir.__str__().encode("utf-8")
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
def infer(
|
| 268 |
+
scits_dir: str,
|
| 269 |
+
output_dir: str,
|
| 270 |
+
):
|
| 271 |
+
|
| 272 |
+
# Initialize caller
|
| 273 |
+
model: Callable = initialize_model()
|
| 274 |
+
|
| 275 |
+
scits_dir = Path(scits_dir)
|
| 276 |
+
output_dir = Path(output_dir)
|
| 277 |
+
dataset_data = []
|
| 278 |
+
prev_dataset_id = None
|
| 279 |
+
with open(scits_dir / "meta_data.jsonl", 'r') as f:
|
| 280 |
+
for line in f.readlines():
|
| 281 |
+
sample = json.loads(line)
|
| 282 |
+
dataset_id = "_".join(sample["task_id"])
|
| 283 |
+
|
| 284 |
+
if dataset_id != prev_dataset_id:
|
| 285 |
+
if prev_dataset_id is not None:
|
| 286 |
+
infer_dataset(model, dataset_data, scits_dir, prev_dataset_id,
|
| 287 |
+
output_dir)
|
| 288 |
+
dataset_data = []
|
| 289 |
+
prev_dataset_id = dataset_id
|
| 290 |
+
dataset_data.append(sample)
|
| 291 |
+
|
| 292 |
+
infer_dataset(model, dataset_data, scits_dir, prev_dataset_id, output_dir)
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
if __name__ == '__main__':
|
| 296 |
+
fire.Fire(infer)
|
process/process_ETT.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import os
|
| 3 |
+
from tqdm import tqdm
|
| 4 |
+
import argparse
|
| 5 |
+
|
| 6 |
+
def main():
|
| 7 |
+
"""
|
| 8 |
+
Main function to process CSV data and generate input and gt data files for ETT forecasting.
|
| 9 |
+
"""
|
| 10 |
+
# Set up argument parser
|
| 11 |
+
parser = argparse.ArgumentParser(description="Process ETT forecasting data.")
|
| 12 |
+
parser.add_argument("--data_path", type=str, required=True, help="Path to the ETTh1.csv file.")
|
| 13 |
+
args = parser.parse_args()
|
| 14 |
+
|
| 15 |
+
try:
|
| 16 |
+
# Define the source folder containing the original files
|
| 17 |
+
csv_file_path = args.data_path
|
| 18 |
+
|
| 19 |
+
# Read the CSV file into a DataFrame
|
| 20 |
+
df = pd.read_csv(csv_file_path)
|
| 21 |
+
|
| 22 |
+
# Define target folders for input and ground truth data
|
| 23 |
+
target_folder = "Energy-ETT-Transformer_sensor_signal-Forecasting"
|
| 24 |
+
input_data_path = os.path.join(target_folder, "raw_input_data")
|
| 25 |
+
gt_data_path = os.path.join(target_folder, "raw_gt_data")
|
| 26 |
+
|
| 27 |
+
# Create directories if they don't exist
|
| 28 |
+
os.makedirs(input_data_path, exist_ok=True)
|
| 29 |
+
os.makedirs(gt_data_path, exist_ok=True)
|
| 30 |
+
|
| 31 |
+
# Define sequence and prediction lengths
|
| 32 |
+
seq_len_list = [96, 96]
|
| 33 |
+
pred_len_list = [96, 720]
|
| 34 |
+
label = 0 # No overloap
|
| 35 |
+
|
| 36 |
+
# Specify the type of data to generate (e.g., "train", "val", "test")
|
| 37 |
+
generate_data_type = "test"
|
| 38 |
+
|
| 39 |
+
# Iterate over sequence and prediction lengths
|
| 40 |
+
for seq_len, pred_len in zip(seq_len_list, pred_len_list):
|
| 41 |
+
|
| 42 |
+
# Define start and end indices for different data types
|
| 43 |
+
start_idx = {
|
| 44 |
+
"train": 0,
|
| 45 |
+
"val": 12 * 30 * 24 - pred_len,
|
| 46 |
+
"test": (12 + 4) * 30 * 24 - pred_len,
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
end_idx = {
|
| 50 |
+
"train": 12 * 30 * 24 - seq_len - pred_len,
|
| 51 |
+
"val": (12 + 4) * 30 * 24 - seq_len - pred_len,
|
| 52 |
+
"test": (12 + 8) * 30 * 24 - seq_len - pred_len,
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
# Iterate over the specified range of indices
|
| 56 |
+
for i in tqdm(range(start_idx[generate_data_type], end_idx[generate_data_type] + 1), desc=f"Generating data: context_length: {seq_len}, prediction_length: {pred_len}"):
|
| 57 |
+
# Extract input and gt data
|
| 58 |
+
data_input = df.iloc[i : i + seq_len]
|
| 59 |
+
data_gt = df.iloc[i + seq_len : i + seq_len + pred_len]
|
| 60 |
+
|
| 61 |
+
# Save input and gt data to CSV files, select 'OT' column
|
| 62 |
+
data_input[['OT']].to_csv(
|
| 63 |
+
os.path.join(input_data_path, f'seq{seq_len}_label{label}_pred{pred_len}_index{i}_input_ts_OT.csv'),
|
| 64 |
+
index=False,
|
| 65 |
+
header=False,
|
| 66 |
+
encoding='utf-8'
|
| 67 |
+
)
|
| 68 |
+
data_gt[['OT']].to_csv(
|
| 69 |
+
os.path.join(gt_data_path, f'seq{seq_len}_label{label}_pred{pred_len}_index{i}_target_ts_OT.csv'),
|
| 70 |
+
index=False,
|
| 71 |
+
header=False,
|
| 72 |
+
encoding='utf-8'
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
except FileNotFoundError:
|
| 76 |
+
print(f"Error: File {csv_file_path} not found. Please check the path or filename.")
|
| 77 |
+
except pd.errors.EmptyDataError:
|
| 78 |
+
print(f"Error: File {csv_file_path} is empty.")
|
| 79 |
+
except pd.errors.ParserError:
|
| 80 |
+
print(f"Error: File {csv_file_path} is not a valid CSV file.")
|
| 81 |
+
except Exception as e:
|
| 82 |
+
print(f"An unexpected error occurred: {e}")
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
if __name__ == "__main__":
|
| 86 |
+
main()
|
process/process_iNaturalist.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
import shutil
|
| 4 |
+
from tqdm import tqdm
|
| 5 |
+
import argparse
|
| 6 |
+
|
| 7 |
+
def read_jsonl(path):
|
| 8 |
+
data_list = []
|
| 9 |
+
try:
|
| 10 |
+
with open(path, "r", encoding="utf-8") as file:
|
| 11 |
+
for line in tqdm(file, desc="Reading JSONL file"):
|
| 12 |
+
data = json.loads(line)
|
| 13 |
+
data_list.append(data)
|
| 14 |
+
return data_list
|
| 15 |
+
except FileNotFoundError:
|
| 16 |
+
raise FileNotFoundError(f"Error: File {path} not found.")
|
| 17 |
+
except json.JSONDecodeError:
|
| 18 |
+
raise json.JSONDecodeError(f"Error: Invalid JSON format in file {path}.")
|
| 19 |
+
except Exception as e:
|
| 20 |
+
raise Exception(f"An unexpected error occurred while reading {path}: {e}")
|
| 21 |
+
|
| 22 |
+
def main():
|
| 23 |
+
"""
|
| 24 |
+
Main function to process metadata and copy relevant files from iNaturalist test data.
|
| 25 |
+
"""
|
| 26 |
+
# Set up argument parser
|
| 27 |
+
parser = argparse.ArgumentParser(description="Process iNaturalist data.")
|
| 28 |
+
parser.add_argument("--data_folder", type=str, required=True, help="Path to the iNaturalist test folder.")
|
| 29 |
+
args = parser.parse_args()
|
| 30 |
+
|
| 31 |
+
try:
|
| 32 |
+
# Define the source folder containing the original files
|
| 33 |
+
data_folder = args.data_folder
|
| 34 |
+
|
| 35 |
+
meta_data = read_jsonl("meta_data.jsonl")
|
| 36 |
+
|
| 37 |
+
# Copying iNaturalist test data in the metadata
|
| 38 |
+
for data in tqdm(meta_data, desc="Copying iNaturalist test data"):
|
| 39 |
+
if data["task_id"][0] == "BIU02":
|
| 40 |
+
tmp_path = data["input_ts"]["path"]
|
| 41 |
+
target_folder = os.path.dirname(tmp_path)
|
| 42 |
+
|
| 43 |
+
# Create the target folder if it doesn't exist
|
| 44 |
+
os.makedirs(target_folder, exist_ok=True)
|
| 45 |
+
|
| 46 |
+
# Extract the file name and parent folder from the path
|
| 47 |
+
data_name = os.path.basename(tmp_path).split("_")[-1]
|
| 48 |
+
folder = os.path.basename(tmp_path).replace(data_name, "")[:-1]
|
| 49 |
+
|
| 50 |
+
# Copy the file if it doesn't exist in the target path
|
| 51 |
+
if not os.path.exists(tmp_path):
|
| 52 |
+
source_path = os.path.join(data_folder, os.path.join(folder, data_name))
|
| 53 |
+
try:
|
| 54 |
+
shutil.copy(source_path, tmp_path)
|
| 55 |
+
except FileNotFoundError:
|
| 56 |
+
print(f"Warning: Source file {source_path} not found. Skipping.")
|
| 57 |
+
except Exception as e:
|
| 58 |
+
print(f"Warning: Failed to copy {source_path} to {tmp_path}. Error: {e}")
|
| 59 |
+
|
| 60 |
+
except Exception as e:
|
| 61 |
+
print(f"An error occurred during execution: {e}")
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
if __name__ == "__main__":
|
| 65 |
+
main()
|
process/requirements.txt
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
pandas
|
| 2 |
+
numpy
|
| 3 |
+
librosa
|
| 4 |
+
scikit-learn
|
| 5 |
+
fire
|
| 6 |
+
h5py
|
| 7 |
+
tqdm
|