SciTS / process /eval.py
lwLiu's picture
Add files using upload-large-folder tool
aadeb6f verified
import json
from pathlib import Path
import numpy as np
import fire
import h5py
from sklearn.metrics import accuracy_score, mean_absolute_error, f1_score
from infer_eval_utils import read_time_series_data, concat_base_path, non_zero_rel_mae, DATASET_TO_TASK
class Runner(object):
def multitask_classification(self,
infer_path: str = "",
gts: list = [],
preds: list = []):
tasks = gts[0].keys()
output_fpath = Path(
infer_path).parent / f"results/{Path(infer_path).stem}.json"
output_fpath.parent.mkdir(parents=True, exist_ok=True)
res_dict = {}
success = 0
fail = 0
for task in tasks:
correct_count = 0
for gt, pred in zip(gts, preds):
try:
if pred[task] == gt[task][0] or pred[task].lower(
) == gt[task][0]:
correct_count += 1
success += 1
except:
fail += 1
acc = correct_count / len(gts)
print(f"Accuracy for {task}: {acc}")
class_f1s = []
task_gts = [gt[task][0] for gt in gts]
labels = list(set(task_gts))
for label in labels:
# Find all samples with this true label
true_indices = [
i for i, gt in enumerate(task_gts) if gt == label
]
if len(true_indices) == 0:
recall = 0.0
else:
correct_predictions = 0
for idx in true_indices:
pred = preds[idx][task]
gt = task_gts[idx]
if pred == gt or pred.lower() == gt:
correct_predictions += 1
recall = correct_predictions / len(true_indices)
pred_indices = [
i for i, pred in enumerate(preds)
if pred[task].lower() == label or pred[task] == label
]
if len(pred_indices) == 0:
precision = 0.0
else:
correct_predictions = 0
for idx in pred_indices:
pred = preds[idx][task]
gt = task_gts[idx]
if pred == gt or pred.lower() == gt:
correct_predictions += 1
precision = correct_predictions / len(pred_indices)
f1 = (2 * recall * precision) / (recall + precision + 1e-6)
class_f1s.append(f1)
res_dict[task] = {
"acc": acc,
"f1": f1,
"success": success,
"fail": fail,
"success_rate": success / (success + fail)
}
res_dict["overall"] = {
"f1": np.mean(class_f1s),
"acc": np.mean([r["acc"] for r in res_dict.values()]),
}
with open(output_fpath, "w") as writer:
json.dump(res_dict, writer, indent=4)
writer.write("\n")
def multichoice_classification(self,
infer_path: str = "",
gts: list = [],
preds: list = []):
all_labels = set()
for gt in gts:
if isinstance(gt, list):
all_labels.update(gt)
else:
all_labels.add(gt)
all_labels = sorted(list(all_labels))
# Convert ground truth and predictions to multi-label format
y_true_multilabel = []
y_pred_multilabel = []
success = 0
fail = 0
for gt, pred in zip(gts, preds):
# Process ground truth
if isinstance(gt, list):
gt_labels = gt
else:
gt_labels = [gt]
# Process predictions
while '\n\n' in pred:
pred = pred.replace('\n\n', '\n')
pred_labels = [x.strip() for x in pred.split("\n")]
# Convert to binary vectors
gt_binary = [
1 if label in gt_labels else 0 for label in all_labels
]
pred_binary = []
for label in all_labels:
is_found = False
for pred_label in pred_labels:
if pred_label == label or pred_label.lower() == label:
is_found = True
break
if is_found:
pred_binary.append(1)
else:
pred_binary.append(0)
y_true_multilabel.append(gt_binary)
y_pred_multilabel.append(pred_binary)
y_true_multilabel = np.array(y_true_multilabel)
y_pred_multilabel = np.array(y_pred_multilabel)
# Calculate F1 score for each class
f1_scores = []
for i, label in enumerate(all_labels):
f1 = f1_score(y_true_multilabel[:, i],
y_pred_multilabel[:, i],
zero_division=0)
f1_scores.append(f1)
print(f"F1 score for class {label}: {f1:.4f}")
# Calculate mean of F1 scores
macro_f1 = np.mean(f1_scores)
print(f"Macro F1 score (mean of all classes): {macro_f1:.4f}")
# Save results
output_fpath = Path(
infer_path).parent / f"results/{Path(infer_path).stem}.json"
output_fpath.parent.mkdir(parents=True, exist_ok=True)
results = {
"macro_f1": macro_f1,
"per_class_f1": dict(zip(all_labels, f1_scores))
}
with open(output_fpath, "w") as writer:
json.dump(results, writer, indent=4)
writer.write("\n")
def classification(self, infer_path: str = ""):
gts, preds = [], []
with open(infer_path, "r") as f:
for line in f:
item = json.loads(line)
if "id" not in item:
continue
gts.append(item["ground_truth"])
preds.append(item["output"])
if any(isinstance(gt, list) for gt in gts):
return self.multichoice_classification(infer_path, gts, preds)
if isinstance(preds[0], dict):
return self.multitask_classification(infer_path, gts, preds)
# Custom comparison function: consider both exact match and case-insensitive match
# This is because LLM outputs sometimes capitalize the first letter to follow English grammar
correct_count = 0
for gt, pred in zip(gts, preds):
if pred == gt or pred.lower() == gt:
correct_count += 1
acc = correct_count / len(gts)
print(f"Accuracy: {acc}")
# Using the same case-insensitive matching as accuracy calculation
labels = list(set(gts))
# Manually calculate recall for each class
class_recalls = []
class_precisions = []
class_f1s = []
for label in labels:
# Find all samples with this true label
true_indices = [i for i, gt in enumerate(gts) if gt == label]
if len(true_indices) == 0:
recall = 0.0
class_recalls.append(0.0)
else:
# Calculate recall for this label
correct_predictions = 0
for idx in true_indices:
pred = preds[idx]
gt = gts[idx]
if pred == gt or pred.lower() == gt:
correct_predictions += 1
recall = correct_predictions / len(true_indices)
class_recalls.append(recall)
pred_indices = [
i for i, pred in enumerate(preds)
if pred.lower() == label or pred == label
]
if len(pred_indices) == 0:
precision = 0.0
class_precisions.append(0.0)
else:
correct_predictions = 0
for idx in pred_indices:
pred = preds[idx]
gt = gts[idx]
if pred == gt or pred.lower() == gt:
correct_predictions += 1
precision = correct_predictions / len(pred_indices)
class_precisions.append(precision)
f1 = (2 * recall * precision) / (recall + precision + 1e-6)
class_f1s.append(f1)
output_fpath = Path(
infer_path).parent / f"results/{Path(infer_path).stem}.json"
output_fpath.parent.mkdir(parents=True, exist_ok=True)
with open(output_fpath, "w") as writer:
json.dump({
"acc": acc,
"f1": np.mean(class_f1s),
},
writer,
indent=4)
writer.write("\n")
def mcq(self, infer_path: str = ""):
gts, preds = [], []
with open(infer_path, "r") as f:
for line in f:
item = json.loads(line)
if "id" not in item:
continue
gts.append(item["ground_truth"])
preds.append(item["output"])
# Custom comparison function: consider both exact match and case-insensitive match
# This is because LLM outputs sometimes capitalize the first letter to follow English grammar
correct_count = 0
for gt, pred in zip(gts, preds):
if pred == gt or pred.lower() == gt:
correct_count += 1
acc = correct_count / len(gts)
print(f"Accuracy: {acc}")
# Calculate recall with custom matching logic
# Using the same case-insensitive matching as accuracy calculation
try:
labels = list(set(gts))
# Manually calculate recall for each class
class_recalls = []
for label in labels:
# Find all samples with this true label
true_indices = [i for i, gt in enumerate(gts) if gt == label]
if len(true_indices) == 0:
class_recalls.append(0.0)
continue
# Calculate recall for this label (using lenient matching)
correct_predictions = 0
for idx in true_indices:
pred = preds[idx]
gt = gts[idx]
if pred == gt or pred.lower() == gt:
correct_predictions += 1
recall = correct_predictions / len(true_indices)
class_recalls.append(recall)
# Calculate macro average recall
weighted_recall = np.mean(class_recalls)
print(f"Weighted Recall: {weighted_recall}")
output_fpath = Path(
infer_path).parent / f"results/{Path(infer_path).stem}.json"
output_fpath.parent.mkdir(parents=True, exist_ok=True)
with open(output_fpath, "w") as writer:
json.dump({
"acc": acc,
"uar": weighted_recall
},
writer,
indent=4)
writer.write("\n")
except Exception as e:
print(f"Error calculating Weighted Recall: {e}")
print(
"Possible reasons: labels are not numeric or contain non-numeric labels"
)
def anomaly_detection(self, infer_path: str = ""):
gts, preds = [], []
success, fail = 0, 0
with open(infer_path, "r") as f:
for line in f:
item = json.loads(line)
if "id" not in item:
continue
if item["output"].lower() == "yes":
preds.append(True)
elif item["output"].lower() == "no":
preds.append(False)
else:
fail += 1
continue
success += 1
gts.append(item["ground_truth"])
correct_count = 0
for gt, pred in zip(gts, preds):
if pred == gt:
correct_count += 1
print(f"Success: {success}, Fail: {fail}")
acc = correct_count / len(gts)
print(f"Accuracy: {acc}")
f1 = f1_score(gts, preds)
print(f"F1 Score: {f1}")
output_fpath = Path(
infer_path).parent / f"results/{Path(infer_path).stem}.json"
output_fpath.parent.mkdir(parents=True, exist_ok=True)
with open(output_fpath, "w") as writer:
json.dump({
"acc": acc,
"f1": f1,
"success": success,
"fail": fail
},
writer,
indent=4)
writer.write("\n")
def forecasting(self, infer_path: str = ""):
gt_arrs = []
pred_arrs = []
success = 0
fail = 0
with h5py.File(infer_path, "r") as f:
base_path = Path(f["base_path"][()].decode("utf-8"))
for id in f.keys():
try:
if id not in [
"base_path", "dataset_name", "domain", "task",
"scene"
]:
gt_path = concat_base_path(
base_path,
f[id]["gt_path"][()].decode("utf-8").strip("/"))
gt_data = read_time_series_data(gt_path)
gt_data = np.array(gt_data, dtype=np.float32)
pred = f[id]["pred_result"][()]
if pred.shape != gt_data.shape:
raise ValueError(
f"Pred shape {pred.shape} does not match gt shape {gt_data.shape}"
)
gt_arrs.append(gt_data.reshape(-1))
pred_arrs.append(pred.reshape(-1))
success += 1
except Exception as e:
print(f"Error processing {id}: {e}")
fail += 1
if len(gt_arrs) == 0:
mae = "N/A"
rel_mae = "N/A"
else:
gt_arrs = np.concatenate(gt_arrs)
pred_arrs = np.concatenate(pred_arrs)
# mse = mean_squared_error(gt_arrs, pred_arrs)
mae = mean_absolute_error(gt_arrs, pred_arrs)
rel_mae = non_zero_rel_mae(gt_arrs, pred_arrs)
print(
f"MAE: {mae}, REL_MAE: {rel_mae}, Success: {success}, Fail: {fail}"
)
output_fpath = Path(
infer_path).parent / f"results/{Path(infer_path).stem}.json"
output_fpath.parent.mkdir(parents=True, exist_ok=True)
with open(output_fpath, "w") as writer:
json.dump(
{
"rel_mae": rel_mae,
"mae": mae,
"success": success,
"fail": fail,
"success_rate": success / (success + fail)
},
writer,
indent=4)
writer.write("\n")
def synthesize(self, infer_path: str = ""):
return self.forecasting(infer_path)
def imputation(self, infer_path: str = ""):
gt_arrs = []
pred_arrs = []
success = 0
fail = 0
with h5py.File(infer_path, "r") as f:
base_path = Path(f["base_path"][()].decode("utf-8"))
for id in f.keys():
try:
if id not in [
"base_path", "dataset_name", "domain", "task",
"scene"
]:
# gt_path = base_path / f[id]["gt_path"][
# ()].decode("utf-8")
gt_path = concat_base_path(
base_path,
f[id]["gt_path"][()].decode("utf-8").strip("/"))
gt_data = read_time_series_data(gt_path)
# input_path = base_path / f[id]["input_ts_path"][
# ()].decode("utf-8")
input_path = concat_base_path(
base_path, f[id]["input_ts_path"][(
)].decode("utf-8").strip("/"))
input_data = read_time_series_data(input_path)
pred_indices = np.where(input_data == "X")[0]
pred = f[id]["pred_result"][()]
pred = pred[pred_indices]
gt_data = gt_data[pred_indices]
if len(pred) != len(gt_data):
length_mismatch += 1
else:
success += 1
if len(pred) < len(gt_data):
pred = pred[:len(gt_data)]
if len(pred) > len(gt_data):
gt_data = gt_data[:len(pred)]
gt_arrs.append(gt_data)
pred_arrs.append(pred)
# success += 1
except Exception as e:
print(f"Error processing {id}: {e}")
fail += 1
gt_arrs = np.concatenate(gt_arrs)
pred_arrs = np.concatenate(pred_arrs)
# mse = mean_squared_error(gt_arrs, pred_arrs)
rel_mae = non_zero_rel_mae(gt_arrs, pred_arrs)
mae = mean_absolute_error(gt_arrs, pred_arrs)
print(
f"REL_MAE: {rel_mae}, MAE: {mae}, Success: {success}, Fail: {fail}"
)
output_fpath = Path(
infer_path).parent / f"results/{Path(infer_path).stem}.json"
output_fpath.parent.mkdir(parents=True, exist_ok=True)
with open(output_fpath, "w") as writer:
json.dump(
{
"rel_mae": rel_mae,
"mae": mae,
"success": success,
"fail": fail,
"success_rate": success / (success + fail)
},
writer,
indent=4)
writer.write("\n")
def event_detection(self, infer_path: str = ""):
event_gts, event_preds = [], []
seq_length = None
success = 0
total = 0
timestamp_gts, timestamp_preds = [], []
with open(infer_path, "r") as f:
for line in f:
item = json.loads(line)
if "id" not in item:
seq_length = item["seq_length"]
continue
event_gt = item["ground_truth"]["contain"]
event_gts.append(1 if event_gt else 0)
if "\n" in item["output"]:
while '\n\n' in item["output"]:
item["output"] = item["output"].replace('\n\n', '\n')
event_pred, *timestamps = item["output"].split("\n")
else:
event_pred = item["output"]
timestamps = None
event_preds.append(1 if event_pred.lower() == "yes" else 0)
if event_gt:
if "start_time" in item["ground_truth"]:
gt_timestamps = [item["ground_truth"]["start_time"]]
elif "start_time_p" in item["ground_truth"]:
gt_timestamps = [
item["ground_truth"]["start_time_p"],
item["ground_truth"]["start_time_s"]
]
if timestamps is None:
pass
else:
try:
assert len(timestamps) == len(gt_timestamps)
for pred_timestamp, gt_timestamp in zip(
timestamps, gt_timestamps):
pred_timestamp = eval(pred_timestamp)
timestamp_preds.append(pred_timestamp)
timestamp_gts.append(gt_timestamp)
success += 1
except Exception as e:
print(str(e))
total += 1
event_acc = accuracy_score(event_gts, event_preds)
event_f1 = f1_score(event_gts, event_preds)
timestamp_gts = np.array(timestamp_gts)
timestamp_preds = np.array(timestamp_preds)
mape = non_zero_rel_mae(timestamp_gts, timestamp_preds)
output_fpath = Path(
infer_path).parent / f"results/{Path(infer_path).stem}.json"
output_fpath.parent.mkdir(parents=True, exist_ok=True)
with open(output_fpath, "w") as writer:
json.dump(
{
"acc": event_acc,
"f1": event_f1,
"mape": mape,
"success_rate": success / total
},
writer,
indent=4)
writer.write("\n")
print({
"acc": event_acc,
"f1": event_f1,
"mape": mape,
"success_rate": success / total
})
def evaluate(self, infer_dir: str):
for infer_path in Path(infer_dir).glob("*"):
if infer_path.is_dir():
continue
dataset_id = infer_path.stem
task = DATASET_TO_TASK[dataset_id]
print(f"evaluating {dataset_id} ...")
getattr(self, task)(infer_path)
if __name__ == "__main__":
fire.Fire(Runner)