File size: 4,390 Bytes
b5beb60 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 |
import argparse
import logging
import os
import json
from collections import defaultdict
def gen_score(input_file, output_file, logger=logging.getLogger(__name__)):
with open(input_file, "r") as f:
data = json.load(f)
total_correct = 0
total_count = 0
subject_stats = defaultdict(lambda: {"correct": 0, "total": 0})
type_stats = defaultdict(lambda: {"correct": 0, "total": 0})
category_stats = defaultdict(lambda: defaultdict(lambda: {"correct": 0, "total": 0}))
task_stats = defaultdict(lambda: {"correct": 0, "total": 0})
for key, entry in data.items():
total_count += 1
is_correct = 1 if entry["true_false"] else 0
total_correct += is_correct
subject = entry["subject"]
question_type = entry["type"].lower()
if entry["category"]:
if subject == "Coding":
category_list = entry["category"].split(';')
for category in category_list:
category = category.strip()
category_stats[subject][category]["total"] += 1
category_stats[subject][category]["correct"] += is_correct
else:
category = entry["category"]
category_stats[subject][category]["total"] += 1
category_stats[subject][category]["correct"] += is_correct
if entry["task"]:
task = subject + '_' + entry["task"]
task_stats[task]["total"] += 1
task_stats[task]["correct"] += is_correct
subject_stats[subject]["total"] += 1
subject_stats[subject]["correct"] += is_correct
type_stats[question_type]["total"] += 1
type_stats[question_type]["correct"] += is_correct
average_accuracy = total_correct / total_count if total_count > 0 else 0
logger.info(f"Average accuracy: {average_accuracy}")
score = {
"average": {
"accuracy": average_accuracy,
"correct": total_correct,
"total": total_count
},
"subject": {
subject: {
"accuracy": stats["correct"] / stats["total"] if stats["total"] > 0 else 0,
"correct": stats["correct"],
"total": stats["total"]
} for subject, stats in subject_stats.items()
},
"question_type": {
question_type: {
"accuracy": stats["correct"] / stats["total"] if stats["total"] > 0 else 0,
"correct": stats["correct"],
"total": stats["total"]
} for question_type, stats in type_stats.items()
},
"category": {
subject:{
category: {
"accuracy": stats["correct"] / stats["total"] if stats["total"] > 0 else 0,
"correct": stats["correct"],
"total": stats["total"]
} for category, stats in categories.items()
}for subject, categories in category_stats.items()
},
"task": {
task: {
"accuracy": stats["correct"] / stats["total"] if stats["total"] > 0 else 0,
"correct": stats["correct"],
"total": stats["total"]
} for task, stats in task_stats.items()
}
}
with open(output_file, "w") as f:
f.write(json.dumps(score, indent=2))
def main():
parser = argparse.ArgumentParser()
# output
parser.add_argument('--results_dir', type=str, default='')
args = parser.parse_args()
for root, dirs, files in os.walk(args.results_dir):
for file in files:
if file.endswith(".json") and not file.endswith("_result.json"):
gen_score(os.path.join(root, file), os.path.join(root, file).replace('.json', '_result.json'))
if __name__ == "__main__":
logging.basicConfig(
level=os.environ.get("LOGLEVEL", "INFO").upper(),
format="[%(name)s] %(message)s",
datefmt="[%X]"
)
logger_blocklist = [
"asyncio",
"azure",
"azureml",
"datasets",
"httpx",
"httpcore",
"filelock",
"fsspec",
"msal",
"msrest",
"openai",
"PIL",
"urllib3",
]
for module in logger_blocklist:
logging.getLogger(module).setLevel(logging.WARNING)
main() |