SciTS / process /infer_eval_utils.py
lwLiu's picture
Add files using upload-large-folder tool
aadeb6f verified
raw
history blame
3.32 kB
from pathlib import Path
from typing import Sequence
import numpy as np
import librosa
from sklearn.metrics import mean_absolute_percentage_error
GENERATION_TASK_IDS = [
"MEG03",
"ECG01",
"ECG02",
"NEG03",
"NEG04",
"ENG01",
"ENG02",
"ENG03",
"ENG04",
"ENG05",
"PHG02",
"PHG03",
"URG01",
"URG02",
"URG03",
"URG05",
"MAG01",
]
IMPUTATION_TASK_IDS = [
"NEG04",
"ENG05",
"PHG03",
"URG03",
]
CLASSIFICATION_TASK_IDS = [
"ASU03",
"BIU01",
"BIU02",
"BIU03",
"NEU02",
"NEU05",
"NEU06",
"PHU01",
"PHU06",
"MFU01_MFU02",
"RAU01",
"RAU02",
]
EVENT_DETECTION_TASK_IDS = ["ASU01_ASG02", "EAU01_EAG02"]
ANOMALY_DETECTION_TASK_IDS = [
"MEU01",
"MEU02",
"NEU01",
"PHU04",
"PHU05",
"URU04",
"MFU03",
]
MCQ_TASK_IDS = ["MEU04", "ECU03"]
DATASET_TO_TASK = {
"ASU01_ASG02": "event_detection",
"ASU03": "classification",
"EAU01_EAG02": "event_detection",
"BIU01": "classification",
"BIU02": "classification",
"BIU03": "classification",
"MEU01": "anomaly_detection",
"MEU02": "anomaly_detection",
"MEG03": "forecasting",
"MEU04": "mcq",
"ECG01": "forecasting",
"ECG02": "forecasting",
"ECU03": "mcq",
"NEU01": "anomaly_detection",
"NEU02": "classification",
"NEG03": "forecasting",
"NEG04": "imputation",
"NEU05": "classification",
"NEU06": "classification",
"ENG01": "synthesize",
"ENG02": "forecasting",
"ENG03": "forecasting",
"ENG04": "forecasting",
"ENG05": "imputation",
"PHU01": "classification",
"PHG02": "forecasting",
"PHG03": "imputation",
"PHU04": "anomaly_detection",
"PHU05": "anomaly_detection",
"PHU06": "classification",
"URG01": "forecasting",
"URG02": "forecasting",
"URG03": "imputation",
"URU04": "anomaly_detection",
"URG05": "forecasting",
"MFU01_MFU02": "classification",
"MFU03": "anomaly_detection",
"RAU01": "classification",
"RAU02": "classification",
"MAG01": "forecasting"
}
def read_time_series_data(path: str | Path) -> Sequence:
path_str = path.__str__()
data = []
if path_str.endswith(".csv"):
with open(path) as raw_data_reader:
for line in raw_data_reader.readlines():
line = line.strip("\ufeff")
if "," in line:
data.append(line.strip().split(","))
else:
data.append(line.strip())
if "X" not in data:
data = np.array(data, dtype=np.float32)
else:
data = np.array(data)
elif path_str.endswith(".npy"):
data = np.load(path)
elif path_str.endswith(".wav") or path_str.endswith(".flac"):
data, _ = librosa.core.load(path, mono=False)
else:
raise ValueError(f"Unsupported data type {path_str.endswith()}")
return data
def concat_base_path(base_path: Path, path: str) -> Path:
if (base_path / path).exists():
return base_path / path
else:
return base_path.parent / path
def non_zero_rel_mae(y_true: np.ndarray, y_pred: np.ndarray) -> float:
idxs = np.where(y_true != 0)[0]
return mean_absolute_percentage_error(y_true[idxs], y_pred[idxs])