| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| import os |
| import datasets |
| import pandas as pd |
|
|
| _CITATION = "https://arxiv.org/abs/2310.09550" |
|
|
| _DESCRIPTION = """\ |
| The Ancient Chinese Language Understanding Evaluation (ACLUE) is an evaluation benchmark focused on ancient Chinese language comprehension. It aims to assess the performance of large-scale language models on understanding ancient Chinese. |
| """ |
| _HOMEPAGE = "https://github.com/isen-zhang/ACLUE" |
|
|
| _URL = r"https://huggingface.co/datasets/tyouisen/aclue/resolve/main/aclue_v1_0_0.zip" |
|
|
| task_list = ['polysemy_resolution', |
| 'poetry_sentiment_analysis', |
| 'named_entity_recognition', |
| 'basic_ancient_chinese', |
| 'poetry_context_prediction', |
| 'sentence_segmentation', |
| 'couplet_prediction', |
| 'poetry_appreciate', |
| 'ancient_chinese_culture', |
| 'ancient_phonetics', |
| 'homographic_character_resolution', |
| 'ancient_literature', |
| 'ancient_medical', |
| 'poetry_quality_assessment', |
| 'reading_comprehension'] |
| class ACLUEConfig(datasets.BuilderConfig): |
| def __init__(self, **kwargs): |
| super().__init__(version=datasets.Version("1.0.0"), **kwargs) |
| |
|
|
|
|
| |
| class ACLUE(datasets.GeneratorBasedBuilder): |
| BUILDER_CONFIGS = [ |
| ACLUEConfig(name=task_name) for task_name in task_list |
| ] |
|
|
| def _info(self): |
| features = datasets.Features( |
| { |
| "Question": datasets.Value("string"), |
| "A": datasets.Value("string"), |
| "B": datasets.Value("string"), |
| "C": datasets.Value("string"), |
| "D": datasets.Value("string"), |
| "Answer": datasets.Value("string"), |
| } |
| ) |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=features, |
| homepage=_HOMEPAGE, |
| citation=_CITATION, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| data_dir = dl_manager.download_and_extract(_URL) |
| task_name = self.config.name |
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| gen_kwargs={ |
| "filepath": os.path.join(data_dir, f"test/{task_name}.csv"), |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split("dev"), |
| gen_kwargs={ |
| "filepath": os.path.join(data_dir, f"dev/{task_name}.csv"), |
| }, |
| ), |
| ] |
|
|
| def _generate_examples(self, filepath): |
| df = pd.read_csv(filepath, header=0, encoding="utf-8") |
| for i, instance in enumerate(df.to_dict(orient="records")): |
| yield i, instance |