File size: 11,243 Bytes
aadeb6f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
import json
import random
from pathlib import Path
from typing import Sequence, Callable

import fire
import h5py
import numpy as np
from tqdm import tqdm

from infer_eval_utils import (
    read_time_series_data,
    GENERATION_TASK_IDS, 
    CLASSIFICATION_TASK_IDS, 
    EVENT_DETECTION_TASK_IDS, 
    ANOMALY_DETECTION_TASK_IDS,
    MCQ_TASK_IDS, 
    IMPUTATION_TASK_IDS
)


def read_raw_data(path: str | None) -> Sequence:
    if path is None:
        return []

    return read_time_series_data(path)


def transform_raw_data_single_channel(raw_data: list | np.ndarray,
                                      dataset_id: str) -> str:

    if dataset_id in GENERATION_TASK_IDS:
        data_str_list = []
        for x in raw_data:
            if x != "X":
                data_str_list.append(f"{float(x):.3f}")
            else:
                data_str_list.append(x)
        data_text = " ".join(data_str_list)
    else:
        data_text = " ".join([f"{float(x):.3f}" for x in raw_data])

    return data_text


def transform_raw_data_to_text(raw_data: list | np.ndarray, dataset_id: str,
                               channel_detail: list[str]) -> str:

    if isinstance(raw_data, np.ndarray):
        if raw_data.ndim > 1 and raw_data.shape[1] > 1 and len(
                channel_detail) == 0:
            channel_detail = [f"channel {i}" for i in range(raw_data.shape[1])]

        if len(channel_detail) <= 1:
            data_text = transform_raw_data_single_channel(raw_data, dataset_id)
        else:
            data_text = ""
            for channel_idx, channel_name in enumerate(channel_detail):
                channel_data = raw_data[:, channel_idx]
                channel_data_text = transform_raw_data_single_channel(
                    channel_data, dataset_id)
                data_text += f"{channel_name}: {channel_data_text} "
    else:
        data_text = transform_raw_data_single_channel(raw_data, dataset_id)
    return data_text


def transform_gt_data_to_text(gt_data: list | np.ndarray,
                              dataset_id: str) -> str:
    gt_data = np.array(gt_data)
    if gt_data.ndim == 1:
        data_text = transform_raw_data_single_channel(gt_data, dataset_id)
    else:
        data_text = ""
        for channel_idx in range(gt_data.shape[1]):
            channel_data = gt_data[:, channel_idx]
            channel_data_text = transform_raw_data_single_channel(
                channel_data, dataset_id)
            data_text += f"{channel_data_text}\n"

    return data_text


def get_extra_instruction(dataset_id: str, ) -> str:
    extra_instruction = ""
    if dataset_id == "ASU01_ASG02":
        extra_instruction = "Answer yes or no in the first line. If the Gravitational Wave is detected, answer the index of the starting time point in the second line."
    elif dataset_id == "EAU01_EAG02":
        extra_instruction = "Answer yes or no in the first line. If an Earthquake event is detected, answer the starting time point index of the P-wave in the second line, " \
                            "answer the starting time point index of the S-wave in the third line."
    elif dataset_id == "MFU01_MFU02":
        extra_instruction = "Output the diameter in the first line, and the position in the second line."
    elif dataset_id == "PHU01":
        extra_instruction = "Give each answer in a line. For example, if the answer is ['NORM', 'MI'], you should output: NORM\nMI."
    elif dataset_id == "MAG01":
        extra_instruction = "Give answer of each channel in a line so the number of predicted time points in each line should match the given one. For example, if " \
            "it is required to predict the next 5 time points, and the predicted x0, x1, x2 are [[0.1, 0.2, 0.3, 0.4, 0.5], [0.4, 0.5, 0.6, 0.7, 0.8], [0.7, 0.8, 0.9, 0.1, 0.2]], " \
            "you should output: 0.1 0.2 0.3 0.4 0.5\n0.4 0.5 0.6 0.7 0.8\n0.7 0.8 0.9 0.1 0.2."
    elif dataset_id in ANOMALY_DETECTION_TASK_IDS:
        extra_instruction = "Answer yes if anomaly points are detected, and no if there are only normal points."
    elif dataset_id in GENERATION_TASK_IDS:
        extra_instruction = "Output the values separated by spaces."
    return extra_instruction


def extract_gt(data: dict, dataset_id: str) -> str | dict | Path:
    if dataset_id in CLASSIFICATION_TASK_IDS:
        gt = data["gt_result"]["gt_class"]
        if isinstance(gt, dict) and len(gt) == 1:
            gt = gt["default"]
        if isinstance(gt, list) and len(gt) == 1:
            gt = gt[0]
    elif dataset_id in GENERATION_TASK_IDS:
        gt = data["gt_ts"]["path"].strip("/")
    elif dataset_id in EVENT_DETECTION_TASK_IDS:
        gt = data["gt_result"]
    elif dataset_id in ANOMALY_DETECTION_TASK_IDS:
        gt = data["gt_result"]["contain"]
    elif dataset_id in MCQ_TASK_IDS:
        gt = data["gt_result"]["answer"]
    else:
        raise ValueError(f"Unsupported dataset id: {dataset_id}")
    return gt


def initialize_model() -> Callable:
    """
    Initialize the model here. The model can be called by:
    
    ```python
    response = model(prompt) 
    # or
    response = model(prompt, max_tokens=max_tokens)  # to limit the response length
    ```
    """
    pass


def infer_dataset(model: Callable, dataset_data: list, scits_dir: Path,
                  dataset_id: str, output_path: Path):
    print(f"Inferring {dataset_id}")

    if dataset_id in GENERATION_TASK_IDS:
        ext = "h5"
    else:
        ext = "jsonl"

    output_path = Path(output_path) / f"{dataset_id}.{ext}"
    output_path.parent.mkdir(parents=True, exist_ok=True)
    completed_ids = []

    if str(output_path).endswith(".jsonl"):
        has_metadata = False

    if output_path.exists():
        if str(output_path).endswith(".jsonl"):
            with open(output_path, 'r') as f:
                for line in f.readlines():
                    data = json.loads(line)
                    if "id" in data:
                        completed_ids.append(data["id"])
                    else:
                        has_metadata = True
        elif str(output_path).endswith(".h5"):
            with h5py.File(output_path, 'r') as f:
                completed_ids = list(f.keys())

    completed_ids = set(completed_ids)
    random.shuffle(dataset_data)
    dataset_data = dataset_data[:10]

    try:
        seq_length = dataset_data[0]["input_ts"]["length"]
    except:
        seq_length = None

    for sample in tqdm(dataset_data):
        id = sample["id"].replace(
            "/", "%2F")  # to avoid errors related to "/" in hdf5

        if id in completed_ids:
            continue

        # Load raw data
        if sample["input_ts"] is None:
            raw_data_path = None
            channel_detail = None
        else:
            raw_data_path = scits_dir / sample["input_ts"]["path"].strip("/")
            channel_detail = sample["input_ts"]["channel_detail"]

        raw_data = read_raw_data(raw_data_path)
        raw_data_text = transform_raw_data_to_text(raw_data, dataset_id,
                                                   channel_detail)

        gt = extract_gt(sample, dataset_id)
        extra_instruction = get_extra_instruction(dataset_id)

        if dataset_id in GENERATION_TASK_IDS:
            # give max_tokens to save cost for generation tasks
            gt_data = read_time_series_data(scits_dir / gt)
            gt_data_text = transform_gt_data_to_text(
                gt_data, dataset_id)
            max_tokens = len(gt_data_text)
        else:
            max_tokens = None

        prompt_text = f'{sample["input_text"]} {extra_instruction} Give me the answer directly, ' \
                    f'without any other extra content (including punctuation). ' \
                    f'{raw_data_text}'
        output_text = model(text=prompt_text, max_tokens=max_tokens)
        # print(f"output_text: {output_text}")

        if dataset_id not in GENERATION_TASK_IDS:
            with open(output_path, 'a') as writer:
                if not has_metadata:
                    metadata = {}
                    if dataset_id in EVENT_DETECTION_TASK_IDS:
                        metadata["seq_length"] = seq_length
                    writer.write(json.dumps(metadata) + "\n")
                    has_metadata = True

                pred_results = output_text
                if "class_list" in sample["gt_result"] and isinstance(
                        sample["gt_result"]["class_list"], dict) and len(
                            sample["gt_result"]["class_list"]) > 1:
                    if len(output_text.split("\n")) != len(
                            sample["gt_result"]["class_list"]):
                        pred_results = "NA"
                    else:
                        pred_results = {}
                        for class_name, pred_result in zip(
                                sample["gt_result"]["class_list"],
                                output_text.split("\n")):
                            pred_results[class_name] = pred_result
                writer.write(
                    json.dumps({
                        "id": id,
                        "output": pred_results,
                        "ground_truth": gt
                    }) + "\n")
        else:
            if "\n" not in output_text:
                pred_result = np.fromstring(output_text.strip(),
                                            dtype=np.float32,
                                            sep=' ')
            else:
                try:
                    pred_result = np.vstack([
                        np.fromstring(x.strip(), dtype=np.float32, sep=' ')
                        for x in output_text.split("\n")
                    ]).transpose()
                except ValueError:
                    pred_result = np.array([])

            with h5py.File(output_path, 'a') as writer:
                writer[f"{id}/pred_result"] = pred_result
                writer[f"{id}/gt_path"] = gt.__str__().encode("utf-8")
                if dataset_id in IMPUTATION_TASK_IDS:
                    writer[f"{id}/input_ts_path"] = sample["input_ts"][
                        "path"].strip("/").encode("utf-8")
                if "base_path" not in writer:
                    writer["base_path"] = scits_dir.__str__().encode("utf-8")



def infer(
    scits_dir: str,
    output_dir: str,
):

    # Initialize caller
    model: Callable = initialize_model()

    scits_dir = Path(scits_dir)
    output_dir = Path(output_dir)
    dataset_data = []
    prev_dataset_id = None
    with open(scits_dir / "meta_data.jsonl", 'r') as f:
        for line in f.readlines():
            sample = json.loads(line)
            dataset_id = "_".join(sample["task_id"])

            if dataset_id != prev_dataset_id:
                if prev_dataset_id is not None:
                    infer_dataset(model, dataset_data, scits_dir, prev_dataset_id,
                                  output_dir)
                dataset_data = []
                prev_dataset_id = dataset_id
            dataset_data.append(sample)

    infer_dataset(model, dataset_data, scits_dir, prev_dataset_id, output_dir)


if __name__ == '__main__':
    fire.Fire(infer)