diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimgsm/en_cot/afrimgsm_en_cot_eng.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimgsm/en_cot/afrimgsm_en_cot_eng.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c62bf206a3ff5644c5d213ef394f4f0cbe3667d0 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimgsm/en_cot/afrimgsm_en_cot_eng.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: eng +doc_to_target: '{% if answer is not none %}{{answer[21:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Question: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - 'Question:' + - + - <|im_end|> +include: cot_yaml +task: afrimgsm_en_cot_eng diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimgsm/en_cot/afrimgsm_en_cot_hau.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimgsm/en_cot/afrimgsm_en_cot_hau.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2a397baf1e40185883569b53ffc9bb82265b4257 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimgsm/en_cot/afrimgsm_en_cot_hau.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: hau +doc_to_target: '{% if answer is not none %}{{answer[21:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Question: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - 'Question:' + - + - <|im_end|> +include: cot_yaml +task: afrimgsm_en_cot_hau diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimgsm/en_cot/afrimgsm_en_cot_ibo.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimgsm/en_cot/afrimgsm_en_cot_ibo.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9bd7bf62b4c9fed96aa01280c9d157a08cc04efb --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimgsm/en_cot/afrimgsm_en_cot_ibo.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: ibo +doc_to_target: '{% if answer is not none %}{{answer[21:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Question: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - 'Question:' + - + - <|im_end|> +include: cot_yaml +task: afrimgsm_en_cot_ibo diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimgsm/en_cot/afrimgsm_en_cot_lug.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimgsm/en_cot/afrimgsm_en_cot_lug.yaml new file mode 100644 index 0000000000000000000000000000000000000000..84c05bb292fdec783de75f708002ad5e53c3e3fc --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimgsm/en_cot/afrimgsm_en_cot_lug.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: lug +doc_to_target: '{% if answer is not none %}{{answer[21:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Question: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - 'Question:' + - + - <|im_end|> +include: cot_yaml +task: afrimgsm_en_cot_lug diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimgsm/en_cot/afrimgsm_en_cot_sot.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimgsm/en_cot/afrimgsm_en_cot_sot.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ae443f1833c3b248941bd0cdbae2e0a058625d4a --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimgsm/en_cot/afrimgsm_en_cot_sot.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: sot +doc_to_target: '{% if answer is not none %}{{answer[21:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Question: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - 'Question:' + - + - <|im_end|> +include: cot_yaml +task: afrimgsm_en_cot_sot diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimgsm/en_cot/afrimgsm_en_cot_swa.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimgsm/en_cot/afrimgsm_en_cot_swa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1aa2d07d0e132e0cf2787d75ab6e7281b4302f97 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimgsm/en_cot/afrimgsm_en_cot_swa.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: swa +doc_to_target: '{% if answer is not none %}{{answer[21:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Question: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - 'Question:' + - + - <|im_end|> +include: cot_yaml +task: afrimgsm_en_cot_swa diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimgsm/en_cot/afrimgsm_en_cot_twi.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimgsm/en_cot/afrimgsm_en_cot_twi.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2957cb378e5ec6b27f0911eeab048aa91bf40e43 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimgsm/en_cot/afrimgsm_en_cot_twi.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: twi +doc_to_target: '{% if answer is not none %}{{answer[21:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Question: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - 'Question:' + - + - <|im_end|> +include: cot_yaml +task: afrimgsm_en_cot_twi diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimgsm/en_cot/cot_yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimgsm/en_cot/cot_yaml new file mode 100644 index 0000000000000000000000000000000000000000..7b320046526f51725a1b01cc407793a82223439b --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimgsm/en_cot/cot_yaml @@ -0,0 +1,37 @@ +# This file will be included in the generated language-specific task configs. +# It doesn't have a yaml file extension as it is not meant to be imported directly by the harness. +group: + - afrimgsm + - afrimgsm_en_cot +dataset_path: masakhane/afrimgsm +dataset_name: null # Overridden by language-specific config. +output_type: generate_until +training_split: train +test_split: test +generation_kwargs: + until: + - "\n\n" + - "\n" + do_sample: false + temperature: 0.0 +target_delimiter: " " +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +filter_list: + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "The answer is (\\-?[0-9\\.\\,]+)" + - function: "take_first" + - filter: + - function: regex + group_select: -1 + regex_pattern: (-?[$0-9.,]{2,})|(-?[0-9]+) + - function: take_first + name: flexible-extract +metadata: + version: 2.0 diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/README.md b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/README.md new file mode 100644 index 0000000000000000000000000000000000000000..90de14b7fc6fb5295b7c597379a3d120abbb5ad7 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/README.md @@ -0,0 +1,40 @@ +# ArabicMMLU + +### Paper + +Title: ArabicMMLU: Assessing Massive Multitask Language Understanding in Arabic + +Abstract: https://arxiv.org/abs/2402.12840 + +The focus of language model evaluation has +transitioned towards reasoning and knowledge intensive tasks, driven by advancements in pretraining large models. While state-of-the-art models are partially trained on large Arabic texts, evaluating their performance in Arabic remains challenging due to the limited availability of relevant datasets. To bridge this gap, we present ArabicMMLU, the first multi-task language understanding benchmark for Arabic language, sourced from school exams across diverse educational levels in different countries spanning North Africa, the Levant, and the Gulf regions. Our data comprises 40 tasks and 14,575 multiple-choice questions in Modern Standard Arabic (MSA), and is carefully constructed by collaborating with native speakers in the region. Our comprehensive evaluations of 35 models reveal substantial room for improvement, particularly among the best open-source models. Notably, BLOOMZ, mT0, LLama2, and Falcon struggle to achieve a score of 50%, while even the top-performing Arabic centric model only achieves a score of 62.3%. + +The authors of the paper conducted studies by varying the language of the initial prompt and answer keys between English and Arabic. However, they set English initial prompts and answer keys as the standard, which is the version implemented in this task. + +Homepage: https://github.com/mbzuai-nlp/ArabicMMLU + + +### Citation + +``` +@misc{koto2024arabicmmlu, + title={ArabicMMLU: Assessing Massive Multitask Language Understanding in Arabic}, + author={Fajri Koto and Haonan Li and Sara Shatnawi and Jad Doughman and Abdelrahman Boda Sadallah and Aisha Alraeesi and Khalid Almubarak and Zaid Alyafeai and Neha Sengupta and Shady Shehata and Nizar Habash and Preslav Nakov and Timothy Baldwin}, + year={2024}, + eprint={2402.12840}, + archivePrefix={arXiv}, + primaryClass={id='cs.CL' full_name='Computation and Language' is_active=True alt_name='cmp-lg' in_archive='cs' is_general=False description='Covers natural language processing. Roughly includes material in ACM Subject Class I.2.7. Note that work on artificial languages (programming languages, logics, formal systems) that does not explicitly address natural-language issues broadly construed (natural-language processing, computational linguistics, speech, text retrieval, etc.) is not appropriate for this area.'} +} +``` + +### Groups and Tasks + +#### Groups + +* `arabicmmlu`: evaluates all ArabicMMLU tasks. + +* `arabicmmlu_stem`: evaluates STEM ArabicMMLU tasks. +* `arabicmmlu_stem_social_science`: evaluates social science ArabicMMLU tasks. +* `arabicmmlu_stem_humanities`: evaluates humanities ArabicMMLU tasks. +* `arabicmmlu_stem_language`: evaluates Arabic language ArabicMMLU tasks. +* `arabicmmlu_stem_other`: evaluates other ArabicMMLU tasks. diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/_arabicmmlu.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/_arabicmmlu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..58cf795aef2ef87d5e5421d549b2de9ec61617f0 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/_arabicmmlu.yaml @@ -0,0 +1,12 @@ +group: arabicmmlu +task: +- arabicmmlu_other +- arabicmmlu_social_science +- arabicmmlu_humanities +- arabicmmlu_stem +- arabicmmlu_language +aggregate_metric_list: + - metric: acc + weight_by_size: True +metadata: + version: 0 diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/_arabicmmlu_humanities.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/_arabicmmlu_humanities.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6f61004a9c45c15606d4ef2385270e6525e4fe1c --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/_arabicmmlu_humanities.yaml @@ -0,0 +1,9 @@ +group: arabicmmlu_humanities +group_alias: Humanities +task: + - arabicmmlu_humanities_tasks +aggregate_metric_list: + - metric: acc + weight_by_size: True +metadata: + version: 0 diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/_arabicmmlu_language.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/_arabicmmlu_language.yaml new file mode 100644 index 0000000000000000000000000000000000000000..90e57ae09bcdae6a581a88faaa2530d85df8ce28 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/_arabicmmlu_language.yaml @@ -0,0 +1,9 @@ +group: arabicmmlu_language +group_alias: Language +task: + - arabicmmlu_language_tasks +aggregate_metric_list: + - metric: acc + weight_by_size: True +metadata: + version: 0 diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/_arabicmmlu_other.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/_arabicmmlu_other.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3e989b8c6c884b9dfea9cde8fbd8ff0351e7adf2 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/_arabicmmlu_other.yaml @@ -0,0 +1,9 @@ +group: arabicmmlu_other +group_alias: Other +task: + - arabicmmlu_other_tasks +aggregate_metric_list: + - metric: acc + weight_by_size: True +metadata: + version: 0 diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/_arabicmmlu_social_science.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/_arabicmmlu_social_science.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1ece047b356aaac727da458ee9013fdab2b56c4a --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/_arabicmmlu_social_science.yaml @@ -0,0 +1,9 @@ +group: arabicmmlu_social_science +group_alias: Social Science +task: + - arabicmmlu_social_science_tasks +aggregate_metric_list: + - metric: acc + weight_by_size: True +metadata: + version: 0 diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/_arabicmmlu_stem.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/_arabicmmlu_stem.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a464a62a9825edf5e9158df85f062b9093ad25e6 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/_arabicmmlu_stem.yaml @@ -0,0 +1,9 @@ +group: arabicmmlu_stem +group_alias: STEM +task: + - arabicmmlu_stem_tasks +aggregate_metric_list: + - metric: acc + weight_by_size: True +metadata: + version: 0 diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/_default_arabicmmlu_template_yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/_default_arabicmmlu_template_yaml new file mode 100644 index 0000000000000000000000000000000000000000..eac235773278bdf117108f631fd6780aab1c5349 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/_default_arabicmmlu_template_yaml @@ -0,0 +1,15 @@ +dataset_path: yazeed7/ArabicMMLU +test_split: test +fewshot_split: dev +fewshot_config: + sampler: first_n +output_type: multiple_choice +doc_to_text: !function utils.doc_to_text +doc_to_choice: !function utils.doc_to_choice +doc_to_target: "Answer Key" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 0.0 diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/_generate_configs.py b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/_generate_configs.py new file mode 100644 index 0000000000000000000000000000000000000000..4d091e126c52d5cdc1f7f16b13f74fad6e4667d3 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/_generate_configs.py @@ -0,0 +1,118 @@ +""" +Take in a YAML, and output all "other" splits with this YAML +""" + +import argparse +import logging +import os + +import yaml +from tqdm import tqdm + + +eval_logger = logging.getLogger("lm-eval") + + +SUBJECTS = { + "Driving Test": "other", + "High Geography": "social_science", + "High History": "humanities", + "Islamic Studies": "humanities", + "Univ Accounting": "social_science", + "Primary General Knowledge": "other", + "Univ Political Science": "social_science", + "Primary Math": "stem", + "Middle General Knowledge": "other", + "High Biology": "stem", + "Primary Natural Science": "stem", + "High Economics": "social_science", + "Middle Natural Science": "stem", + "Middle Geography": "social_science", + "Primary Social Science": "social_science", + "Middle Computer Science": "stem", + "Middle Islamic Studies": "humanities", + "Primary Computer Science": "stem", + "High Physics": "stem", + "Middle Social Science": "social_science", + "Middle Civics": "social_science", + "High Computer Science": "stem", + "General Knowledge": "other", + "High Civics": "social_science", + "Prof Law": "humanities", + "High Islamic Studies": "humanities", + "Primary Arabic Language": "language", + "High Arabic Language": "language", + "Arabic Language (Grammar)": "language", + "Primary History": "humanities", + "Middle History": "humanities", + "Univ Economics": "social_science", + "Arabic Language (General)": "language", + "Univ Computer Science": "stem", + "Primary Islamic Studies": "humanities", + "Primary Geography": "social_science", + "High Philosophy": "humanities", + "Middle Arabic Language": "language", + "Middle Economics": "social_science", + "Univ Management": "other", +} + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--base_yaml_path", default="_default_arabicmmlu_template_yaml") + parser.add_argument("--save_prefix_path", default="arabicmmlu") + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_args() + + # get filename of base_yaml so we can `"include": ` it in our "other" YAMLs. + base_yaml_name = os.path.split(args.base_yaml_path)[-1] + with open(args.base_yaml_path, encoding="utf-8") as f: + base_yaml = yaml.full_load(f) + + ALL_CATEGORIES = [] + for subject, category in tqdm(SUBJECTS.items()): + if category not in ALL_CATEGORIES: + ALL_CATEGORIES.append(category) + + # description = f"The following are multiple choice questions (with answers) about {' '.join(subject.split('_'))}.\n\n" + + yaml_dict = { + "include": base_yaml_name, + "tag": f"arabicmmlu_{category}", + "task": f"arabicmmlu_{subject.lower().replace(' ', '_')}", + "task_alias": subject, + "dataset_name": subject, + # "description": description, + } + + file_save_path = ( + args.save_prefix_path + + f"_{subject.lower().replace(' ', '_').replace('(', '').replace(')', '')}.yaml" + ) + eval_logger.info(f"Saving yaml for subset {subject} to {file_save_path}") + with open(file_save_path, "w", encoding="utf-8") as yaml_file: + yaml.dump( + yaml_dict, + yaml_file, + allow_unicode=True, + default_style='"', + ) + + arabicmmlu_subcategories = [f"arabicmmlu_{category}" for category in ALL_CATEGORIES] + + file_save_path = args.save_prefix_path + ".yaml" + + eval_logger.info(f"Saving benchmark config to {file_save_path}") + with open(file_save_path, "w", encoding="utf-8") as yaml_file: + yaml.dump( + { + "group": "arabicmmlu", + "task": arabicmmlu_subcategories, + }, + yaml_file, + indent=4, + default_flow_style=False, + ) diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_arabic_language_grammar.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_arabic_language_grammar.yaml new file mode 100644 index 0000000000000000000000000000000000000000..baf32676283101b319325e8a5e19ebae78e2d115 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_arabic_language_grammar.yaml @@ -0,0 +1,5 @@ +"dataset_name": "Arabic Language (Grammar)" +"tag": "arabicmmlu_language_tasks" +"include": "_default_arabicmmlu_template_yaml" +"task": "arabicmmlu_arabic_language_(grammar)" +"task_alias": "Arabic Language (Grammar)" diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_driving_test.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_driving_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d40c9eb9d69b50b6fb6196733c40789e52dd2621 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_driving_test.yaml @@ -0,0 +1,5 @@ +"dataset_name": "Driving Test" +"tag": "arabicmmlu_other_tasks" +"include": "_default_arabicmmlu_template_yaml" +"task": "arabicmmlu_driving_test" +"task_alias": "Driving Test" diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_general_knowledge.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_general_knowledge.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fbd8839dba0a3b4fb552a5e9aae82da8f32d63cb --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_general_knowledge.yaml @@ -0,0 +1,5 @@ +"dataset_name": "General Knowledge" +"tag": "arabicmmlu_other_tasks" +"include": "_default_arabicmmlu_template_yaml" +"task": "arabicmmlu_general_knowledge" +"task_alias": "General Knowledge" diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_high_arabic_language.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_high_arabic_language.yaml new file mode 100644 index 0000000000000000000000000000000000000000..17d17bc8b0421424e1fcb4332f83ad5a3bc1a8ae --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_high_arabic_language.yaml @@ -0,0 +1,5 @@ +"dataset_name": "High Arabic Language" +"tag": "arabicmmlu_language_tasks" +"include": "_default_arabicmmlu_template_yaml" +"task": "arabicmmlu_high_arabic_language" +"task_alias": "High Arabic Language" diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_high_biology.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_high_biology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2b5baf0bb958b2f8dbad17d7d280e59e82ef23ab --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_high_biology.yaml @@ -0,0 +1,5 @@ +"dataset_name": "High Biology" +"tag": "arabicmmlu_stem_tasks" +"include": "_default_arabicmmlu_template_yaml" +"task": "arabicmmlu_high_biology" +"task_alias": "High Biology" diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_high_civics.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_high_civics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..870509229c7d0035ba89a814e67fc36798faaab9 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_high_civics.yaml @@ -0,0 +1,5 @@ +"dataset_name": "High Civics" +"tag": "arabicmmlu_social_science_tasks" +"include": "_default_arabicmmlu_template_yaml" +"task": "arabicmmlu_high_civics" +"task_alias": "High Civics" diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_high_computer_science.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_high_computer_science.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f1a66a5ce87bb987b5fd389781835bee6fdd2079 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_high_computer_science.yaml @@ -0,0 +1,5 @@ +"dataset_name": "High Computer Science" +"tag": "arabicmmlu_stem_tasks" +"include": "_default_arabicmmlu_template_yaml" +"task": "arabicmmlu_high_computer_science" +"task_alias": "High Computer Science" diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_high_geography.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_high_geography.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ad9804328b836253fd441536bec9aa4c9b21b8b0 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_high_geography.yaml @@ -0,0 +1,5 @@ +"dataset_name": "High Geography" +"tag": "arabicmmlu_social_science_tasks" +"include": "_default_arabicmmlu_template_yaml" +"task": "arabicmmlu_high_geography" +"task_alias": "High Geography" diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_high_history.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_high_history.yaml new file mode 100644 index 0000000000000000000000000000000000000000..49c82669df895fcce907a7d9ad151b40983b7a90 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_high_history.yaml @@ -0,0 +1,5 @@ +"dataset_name": "High History" +"tag": "arabicmmlu_humanities_tasks" +"include": "_default_arabicmmlu_template_yaml" +"task": "arabicmmlu_high_history" +"task_alias": "High History" diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_high_islamic_studies.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_high_islamic_studies.yaml new file mode 100644 index 0000000000000000000000000000000000000000..15b5358bd0a2d21917c4500bdeac6cabacdbd5d1 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_high_islamic_studies.yaml @@ -0,0 +1,5 @@ +"dataset_name": "High Islamic Studies" +"tag": "arabicmmlu_humanities_tasks" +"include": "_default_arabicmmlu_template_yaml" +"task": "arabicmmlu_high_islamic_studies" +"task_alias": "High Islamic Studies" diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_high_philosophy.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_high_philosophy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e0b20e306dc269064eaa248849a80faef1a920ae --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_high_philosophy.yaml @@ -0,0 +1,5 @@ +"dataset_name": "High Philosophy" +"tag": "arabicmmlu_humanities_tasks" +"include": "_default_arabicmmlu_template_yaml" +"task": "arabicmmlu_high_philosophy" +"task_alias": "High Philosophy" diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_high_physics.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_high_physics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a7fe5eccdba8eb490f6d8aa0aca6f7b56d74ba49 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_high_physics.yaml @@ -0,0 +1,5 @@ +"dataset_name": "High Physics" +"tag": "arabicmmlu_stem_tasks" +"include": "_default_arabicmmlu_template_yaml" +"task": "arabicmmlu_high_physics" +"task_alias": "High Physics" diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_islamic_studies.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_islamic_studies.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bacd5ace3feea344305c6579c3fdf98c55df04ae --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_islamic_studies.yaml @@ -0,0 +1,5 @@ +"dataset_name": "Islamic Studies" +"tag": "arabicmmlu_humanities_tasks" +"include": "_default_arabicmmlu_template_yaml" +"task": "arabicmmlu_islamic_studies" +"task_alias": "Islamic Studies" diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_arabic_language.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_arabic_language.yaml new file mode 100644 index 0000000000000000000000000000000000000000..14a2ab1a5286857cf3747bc00a01839eeb348682 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_arabic_language.yaml @@ -0,0 +1,5 @@ +"dataset_name": "Middle Arabic Language" +"tag": "arabicmmlu_language_tasks" +"include": "_default_arabicmmlu_template_yaml" +"task": "arabicmmlu_middle_arabic_language" +"task_alias": "Middle Arabic Language" diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_computer_science.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_computer_science.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8dd4136fb03ef49576e898a8576f2c33b37be3e8 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_computer_science.yaml @@ -0,0 +1,5 @@ +"dataset_name": "Middle Computer Science" +"tag": "arabicmmlu_stem_tasks" +"include": "_default_arabicmmlu_template_yaml" +"task": "arabicmmlu_middle_computer_science" +"task_alias": "Middle Computer Science" diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_economics.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_economics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..312fa2e332f0eb8b5ff0801f27b9964d370862fc --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_economics.yaml @@ -0,0 +1,5 @@ +"dataset_name": "Middle Economics" +"tag": "arabicmmlu_social_science_tasks" +"include": "_default_arabicmmlu_template_yaml" +"task": "arabicmmlu_middle_economics" +"task_alias": "Middle Economics" diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_general_knowledge.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_general_knowledge.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c359d85ac77792fb7373213ca32b5c7893e53c7c --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_general_knowledge.yaml @@ -0,0 +1,5 @@ +"dataset_name": "Middle General Knowledge" +"tag": "arabicmmlu_other_tasks" +"include": "_default_arabicmmlu_template_yaml" +"task": "arabicmmlu_middle_general_knowledge" +"task_alias": "Middle General Knowledge" diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_geography.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_geography.yaml new file mode 100644 index 0000000000000000000000000000000000000000..111b13cfe059afde2727bdc16ff3a2493304fa15 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_geography.yaml @@ -0,0 +1,5 @@ +"dataset_name": "Middle Geography" +"tag": "arabicmmlu_social_science_tasks" +"include": "_default_arabicmmlu_template_yaml" +"task": "arabicmmlu_middle_geography" +"task_alias": "Middle Geography" diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_natural_science.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_natural_science.yaml new file mode 100644 index 0000000000000000000000000000000000000000..265cdbaa03b9ebf6015913b17579087d75f28365 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_natural_science.yaml @@ -0,0 +1,5 @@ +"dataset_name": "Middle Natural Science" +"tag": "arabicmmlu_stem_tasks" +"include": "_default_arabicmmlu_template_yaml" +"task": "arabicmmlu_middle_natural_science" +"task_alias": "Middle Natural Science" diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_social_science.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_social_science.yaml new file mode 100644 index 0000000000000000000000000000000000000000..84c247dd17e04a595c3c783171e452b6e7f4667f --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_social_science.yaml @@ -0,0 +1,5 @@ +"dataset_name": "Middle Social Science" +"tag": "arabicmmlu_social_science_tasks" +"include": "_default_arabicmmlu_template_yaml" +"task": "arabicmmlu_middle_social_science" +"task_alias": "Middle Social Science" diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_primary_computer_science.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_primary_computer_science.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b89089cd2f6eb278ac452b0dcc95bde643bcef20 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_primary_computer_science.yaml @@ -0,0 +1,5 @@ +"dataset_name": "Primary Computer Science" +"tag": "arabicmmlu_stem_tasks" +"include": "_default_arabicmmlu_template_yaml" +"task": "arabicmmlu_primary_computer_science" +"task_alias": "Primary Computer Science" diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_primary_history.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_primary_history.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f7d69ca975eb8a735cf50b4ceaeef53b292a6674 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_primary_history.yaml @@ -0,0 +1,5 @@ +"dataset_name": "Primary History" +"tag": "arabicmmlu_humanities_tasks" +"include": "_default_arabicmmlu_template_yaml" +"task": "arabicmmlu_primary_history" +"task_alias": "Primary History" diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_primary_math.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_primary_math.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0e53adcfa7fcccfafc69e1f763dafec8dd4425db --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_primary_math.yaml @@ -0,0 +1,5 @@ +"dataset_name": "Primary Math" +"tag": "arabicmmlu_stem_tasks" +"include": "_default_arabicmmlu_template_yaml" +"task": "arabicmmlu_primary_math" +"task_alias": "Primary Math" diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_primary_natural_science.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_primary_natural_science.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4e208c76faca4f773d88738d0bec0e6ca55895a8 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_primary_natural_science.yaml @@ -0,0 +1,5 @@ +"dataset_name": "Primary Natural Science" +"tag": "arabicmmlu_stem_tasks" +"include": "_default_arabicmmlu_template_yaml" +"task": "arabicmmlu_primary_natural_science" +"task_alias": "Primary Natural Science" diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_univ_accounting.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_univ_accounting.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6d1d94125edcb6ffb7ee13853ce3b8d0014dbd18 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_univ_accounting.yaml @@ -0,0 +1,5 @@ +"dataset_name": "Univ Accounting" +"tag": "arabicmmlu_social_science_tasks" +"include": "_default_arabicmmlu_template_yaml" +"task": "arabicmmlu_univ_accounting" +"task_alias": "Univ Accounting" diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_univ_computer_science.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_univ_computer_science.yaml new file mode 100644 index 0000000000000000000000000000000000000000..42e7e89a965a006f7e8a6ae171057662d3ac93ba --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_univ_computer_science.yaml @@ -0,0 +1,5 @@ +"dataset_name": "Univ Computer Science" +"tag": "arabicmmlu_stem_tasks" +"include": "_default_arabicmmlu_template_yaml" +"task": "arabicmmlu_univ_computer_science" +"task_alias": "Univ Computer Science" diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_univ_economics.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_univ_economics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..21015ffad848c98398a0b4d54e71ea8e4dd58ba4 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_univ_economics.yaml @@ -0,0 +1,5 @@ +"dataset_name": "Univ Economics" +"tag": "arabicmmlu_social_science_tasks" +"include": "_default_arabicmmlu_template_yaml" +"task": "arabicmmlu_univ_economics" +"task_alias": "Univ Economics" diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/gsm8k/README.md b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/gsm8k/README.md new file mode 100644 index 0000000000000000000000000000000000000000..13339dfa46366298389e3ad0d3910b00db2c417e --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/gsm8k/README.md @@ -0,0 +1,59 @@ +# GSM8k + +## Paper +Training Verifiers to Solve Math Word Problems +https://arxiv.org/abs/2110.14168 + +State-of-the-art language models can match human performance on many tasks, but +they still struggle to robustly perform multi-step mathematical reasoning. To +diagnose the failures of current models and support research, we introduce GSM8K, +a dataset of 8.5K high quality linguistically diverse grade school math word problems. +We find that even the largest transformer models fail to achieve high test performance, +despite the conceptual simplicity of this problem distribution. + +NOTE: See the official implementation of the task: + https://github.com/openai/grade-school-math/blob/master/grade_school_math/calculator.py +for how to make use of the dataset's calculator annotations in your language +model's sample/generation function. + +Homepage: https://github.com/openai/grade-school-math + + +## Citation +``` +@misc{cobbe2021training, + title={Training Verifiers to Solve Math Word Problems}, + author={Karl Cobbe and Vineet Kosaraju and Mohammad Bavarian and Jacob Hilton and Reiichiro Nakano and Christopher Hesse and John Schulman}, + year={2021}, + eprint={2110.14168}, + archivePrefix={arXiv}, + primaryClass={cs.LG} +} +``` + +### Groups and Tasks + +#### Groups + +- `math_word_problems` +- `chain_of_thought` +- `self_consistency` + +#### Tasks + +- `gsm8k_yaml` +- `gsm8k_cot`: GSM8K with Chain-of-Thought +- `gsm8k_cot_self_consistency`: GSM8K with Chain-of-Thought and Self-Consistency + +### Checklist + +- [x] Is in Eval-harness v1.0 ? +- [ ] Has been checked for regression from v1.0? +- [ ] Has been checked for equivalence with original paper methodology? +- [ ] "Main" checked variant clearly denoted? + +### Variant Wishlist + +- [ ] Variant with Calculator (see https://github.com/openai/grade-school-math/blob/master/grade_school_math/calculator.py for example implementation) +- [ ] Using Verifiers +- [ ] Majority voting "without CoT" diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/gsm8k/gsm8k-cot-self-consistency.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/gsm8k/gsm8k-cot-self-consistency.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0994081b049c0815ae85b9539b627e4c8df00dd3 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/gsm8k/gsm8k-cot-self-consistency.yaml @@ -0,0 +1,34 @@ +include: gsm8k-cot.yaml +tag: + - chain_of_thought + - self_consistency +task: gsm8k_cot_self_consistency +generation_kwargs: + until: + - "Q:" + - "\n\n" + do_sample: true + temperature: 0.2 +repeats: 64 +filter_list: + - name: "score-first" # pick only the first response, and report metrics on that + filter: + - function: "regex" + regex_pattern: "The answer is (\\-?[0-9\\.\\,]*[0-9]+)" + - function: "take_first" + - name: "maj@64" + filter: + - function: "regex" + regex_pattern: "The answer is (\\-?[0-9\\.\\,]*[0-9]+)" + - function: "majority_vote" + - function: "take_first" + - name: "maj@8" # get Maj@8 , via selecting the first 8 responses. Using a better estimator would be optimal. + filter: + - function: "take_first_k" + k: 8 + - function: "regex" + regex_pattern: "The answer is (\\-?[0-9\\.\\,]*[0-9]+)" + - function: "majority_vote" + - function: "take_first" +metadata: + version: 2.0 diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/gsm8k/gsm8k-cot-zeroshot.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/gsm8k/gsm8k-cot-zeroshot.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c112d324acf707e5934432068abd2ad6143438ac --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/gsm8k/gsm8k-cot-zeroshot.yaml @@ -0,0 +1,44 @@ +tag: + - math_word_problems +task: gsm8k_cot_zeroshot +dataset_path: gsm8k +dataset_name: main +output_type: generate_until +training_split: train +fewshot_split: train +test_split: test +doc_to_text: "Q: {{question}}\nA: Let's think step by step." +doc_to_target: "{{answer}}" #" {{answer.split('### ')[-1].rstrip()}}" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: false + regexes_to_ignore: + - "," + - "\\$" + - "(?s).*#### " + - "\\.$" +generation_kwargs: + until: + - "Q:" + - "" + - "<|im_end|>" + do_sample: false +repeats: 1 +num_fewshot: 0 +filter_list: + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "The answer is (\\-?[0-9\\.\\,]+)." + - function: "take_first" + - name: "flexible-extract" + filter: + - function: "regex" + group_select: -1 + regex_pattern: "(-?[$0-9.,]{2,})|(-?[0-9]+)" + - function: "take_first" +metadata: + version: 3.0 diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/gsm8k/gsm8k-cot.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/gsm8k/gsm8k-cot.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d125b0198535122fd5b12a388e903b03ee5f6020 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/gsm8k/gsm8k-cot.yaml @@ -0,0 +1,83 @@ +dataset_name: main +dataset_path: gsm8k +doc_to_target: '{{answer.split(''####'')[-1].strip() if answer is defined else target}}' +doc_to_text: 'Q: {{question}} + + A:' +fewshot_config: + sampler: first_n + samples: + - question: There are 15 trees in the grove. Grove workers will plant trees in the + grove today. After they are done, there will be 21 trees. How many trees did + the grove workers plant today? + target: There are 15 trees originally. Then there were 21 trees after some more + were planted. So there must have been 21 - 15 = 6. The answer is 6. + - question: If there are 3 cars in the parking lot and 2 more cars arrive, how many + cars are in the parking lot? + target: There are originally 3 cars. 2 more cars arrive. 3 + 2 = 5. The answer + is 5. + - question: Leah had 32 chocolates and her sister had 42. If they ate 35, how many + pieces do they have left in total? + target: Originally, Leah had 32 chocolates. Her sister had 42. So in total they + had 32 + 42 = 74. After eating 35, they had 74 - 35 = 39. The answer is 39. + - question: Jason had 20 lollipops. He gave Denny some lollipops. Now Jason has 12 + lollipops. How many lollipops did Jason give to Denny? + target: Jason started with 20 lollipops. Then he had 12 after giving some to Denny. + So he gave Denny 20 - 12 = 8. The answer is 8. + - question: Shawn has five toys. For Christmas, he got two toys each from his mom and + dad. How many toys does he have now? + target: Shawn started with 5 toys. If he got 2 toys each from his mom and dad, + then that is 4 more toys. 5 + 4 = 9. The answer is 9. + - question: There were nine computers in the server room. Five more computers were + installed each day, from monday to thursday. How many computers are now in the + server room? + target: There were originally 9 computers. For each of 4 days, 5 more computers + were added. So 5 * 4 = 20 computers were added. 9 + 20 is 29. The answer is + 29. + - question: Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, + he lost 2 more. How many golf balls did he have at the end of wednesday? + target: Michael started with 58 golf balls. After losing 23 on tuesday, he had + 58 - 23 = 35. After losing 2 more, he had 35 - 2 = 33 golf balls. The answer + is 33. + - question: Olivia has $23. She bought five bagels for $3 each. How much money does + she have left? + target: Olivia had 23 dollars. 5 bagels for 3 dollars each will be 5 x 3 = 15 + dollars. So she has 23 - 15 dollars left. 23 - 15 is 8. The answer is 8. +filter_list: +- filter: + - function: regex + regex_pattern: The answer is (\-?[0-9\.\,]+). + - function: take_first + name: strict-match +- filter: + - function: regex + group_select: -1 + regex_pattern: (-?[$0-9.,]{2,})|(-?[0-9]+) + - function: take_first + name: flexible-extract +generation_kwargs: + do_sample: false + until: + - 'Q:' + - + - <|im_end|> +tag: +- chain_of_thought +metadata: + version: 3.0 +metric_list: +- aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: false + metric: exact_match + regexes_to_ignore: + - ',' + - \$ + - '(?s).*#### ' + - \.$ +num_fewshot: 8 +output_type: generate_until +repeats: 1 +task: gsm8k_cot +test_split: test diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/gsm8k/gsm8k.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/gsm8k/gsm8k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a9d5bb39aedc0e2b991f0d79f2de6face47a31cf --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/gsm8k/gsm8k.yaml @@ -0,0 +1,45 @@ +tag: + - math_word_problems +task: gsm8k +dataset_path: gsm8k +dataset_name: main +output_type: generate_until +training_split: train +fewshot_split: train +test_split: test +doc_to_text: "Question: {{question}}\nAnswer:" +doc_to_target: "{{answer}}" #" {{answer.split('### ')[-1].rstrip()}}" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: false + regexes_to_ignore: + - "," + - "\\$" + - "(?s).*#### " + - "\\.$" +generation_kwargs: + until: + - "Question:" + - "" + - "<|im_end|>" + do_sample: false + temperature: 0.0 +repeats: 1 +num_fewshot: 5 +filter_list: + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "#### (\\-?[0-9\\.\\,]+)" + - function: "take_first" + - name: "flexible-extract" + filter: + - function: "regex" + group_select: -1 + regex_pattern: "(-?[$0-9.,]{2,})|(-?[0-9]+)" + - function: "take_first" +metadata: + version: 3.0 diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/gsm_plus/README.md b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/gsm_plus/README.md new file mode 100644 index 0000000000000000000000000000000000000000..173a8a5e8225c9c69314d93241c4304802b54bc6 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/gsm_plus/README.md @@ -0,0 +1,48 @@ +# gsm_plus + +### Paper + +Title: `GSM-PLUS: A Comprehensive Benchmark for Evaluating the Robustness of LLMs as Mathematical Problem Solvers` + +Abstract: `Large language models (LLMs) have achieved impressive performance across various mathematical reasoning benchmarks. However, there are increasing debates regarding whether these models truly understand and apply mathematical knowledge or merely rely on shortcuts for mathematical reasoning. One essential and frequently occurring evidence is that when the math questions are slightly changed, LLMs can behave incorrectly. This motivates us to evaluate the robustness of LLMs’ math reasoning capability by testing a wide range of question variations. We introduce the adversarial grade school math (GSM-PLUS) dataset, an extension of GSM8K augmented with various mathematical perturbations. Our experiments on 25 LLMs and 4 prompting techniques show that while LLMs exhibit different levels of math reasoning abilities, their performances are far from robust. In particular, even for problems that have been solved in GSM8K, LLMs can make mistakes when new statements are added or the question targets are altered. We also explore whether more robust performance can be achieved by composing existing prompting methods, in which we try an iterative method that generates and verifies each intermediate thought based on its reasoning goal and calculation result.` + +Homepage: https://huggingface.co/datasets/qintongli/GSM-Plus + +### Citation + +```bibtex +@misc{li2024gsmpluscomprehensivebenchmarkevaluating, + title={GSM-Plus: A Comprehensive Benchmark for Evaluating the Robustness of LLMs as Mathematical Problem Solvers}, + author={Qintong Li and Leyang Cui and Xueliang Zhao and Lingpeng Kong and Wei Bi}, + year={2024}, + eprint={2402.19255}, + archivePrefix={arXiv}, + primaryClass={cs.CL}, + url={https://arxiv.org/abs/2402.19255}, +} +``` + +### Groups and Tasks + +#### Groups + +* Not part of a group yet + +#### Tasks + +The following tasks evaluate subjects in the gsm_plus dataset +- `gsm_plus` +- `gsm_plus_mini` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/gsm_plus/gsm_plus.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/gsm_plus/gsm_plus.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6eb08f1ad8a543dc07d89ee02aed7d9e986c844b --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/gsm_plus/gsm_plus.yaml @@ -0,0 +1,44 @@ +tag: + - math_word_problems +task: gsm_plus +dataset_path: qintongli/GSM-Plus +output_type: generate_until +training_split: test +fewshot_split: test +test_split: test +doc_to_text: "Question: {{question}}\nAnswer:" +doc_to_target: "{{solution}}" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: false + regexes_to_ignore: + - "," + - "\\$" + - "(?s).*#### " + - "\\.$" +generation_kwargs: + until: + - "Question:" + - "" + - "<|im_end|>" + do_sample: false + temperature: 0.0 +repeats: 1 +num_fewshot: 5 +filter_list: + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "#### (\\-?[0-9\\.\\,]+)" + - function: "take_first" + - name: "flexible-extract" + filter: + - function: "regex" + group_select: -1 + regex_pattern: "(-?[$0-9.,]{2,})|(-?[0-9]+)" + - function: "take_first" +metadata: + version: 1.0 diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/gsm_plus/gsm_plus_mini.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/gsm_plus/gsm_plus_mini.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b03cfd2ea8dd639c1fc2135fc288cd2f7367cd9f --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/gsm_plus/gsm_plus_mini.yaml @@ -0,0 +1,44 @@ +tag: + - math_word_problems +task: gsm_plus_mini +dataset_path: qintongli/GSM-Plus +output_type: generate_until +training_split: testmini +fewshot_split: testmini +test_split: testmini +doc_to_text: "Question: {{question}}\nAnswer:" +doc_to_target: "{{solution}}" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: false + regexes_to_ignore: + - "," + - "\\$" + - "(?s).*#### " + - "\\.$" +generation_kwargs: + until: + - "Question:" + - "" + - "<|im_end|>" + do_sample: false + temperature: 0.0 +repeats: 1 +num_fewshot: 5 +filter_list: + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "#### (\\-?[0-9\\.\\,]+)" + - function: "take_first" + - name: "flexible-extract" + filter: + - function: "regex" + group_select: -1 + regex_pattern: "(-?[$0-9.,]{2,})|(-?[0-9]+)" + - function: "take_first" +metadata: + version: 1.0 diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/haerae/README.md b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/haerae/README.md new file mode 100644 index 0000000000000000000000000000000000000000..108626ae34ba4deb88d22b2ca02f43c54d2fcb5d --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/haerae/README.md @@ -0,0 +1,49 @@ +# HAE-RAE BENCH + +### Paper + +Title: `HAE-RAE Bench: Evaluation of Korean Knowledge in Language Models` + +Abstract: `Large Language Models (LLMs) trained on massive corpora demonstrate impressive capabilities in a wide range of tasks. While there are ongoing efforts to adapt these models to languages beyond English, the attention given to their evaluation methodologies remains limited. Current multilingual benchmarks often rely on back translations or re-implementations of English tests, limiting their capacity to capture unique cultural and linguistic nuances. To bridge this gap for the Korean language, we introduce HAE-RAE Bench, a dataset curated to challenge models lacking Korean cultural and contextual depth. The dataset encompasses six downstream tasks across four domains: vocabulary, history, general knowledge, and reading comprehension. Contrary to traditional evaluation suites focused on token or sequence classification and specific mathematical or logical reasoning, HAE-RAE Bench emphasizes a model's aptitude for recalling Korean-specific knowledge and cultural contexts. Comparative analysis with prior Korean benchmarks indicates that the HAE-RAE Bench presents a greater challenge to non-native models, by disturbing abilities and knowledge learned from English being transferred.` + +Homepage: https://huggingface.co/datasets/HAERAE-HUB/HAE_RAE_BENCH + +### Citation + +@misc{son2023haerae, + title={HAE-RAE Bench: Evaluation of Korean Knowledge in Language Models}, + author={Guijin Son and Hanwool Lee and Suwan Kim and Huiseo Kim and Jaecheol Lee and Je Won Yeom and Jihyu Jung and Jung Woo Kim and Songseong Kim}, + year={2023}, + eprint={2309.02706}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} + +### Groups and Tasks + +#### Groups + +* `haerae`: 'It consists of five tasks provided in the HAERAE-BENCH paper. 'Reading Comprehension' was excluded from the implementation due to copyright issues. We will include it in the next haerae update. For other tasks, some part of data may be replaced or increased with the production of Haerae v1.1. Please note this when using it.' + +#### Tasks + +The following tasks evaluate subjects in the HaeRae dataset + +- `haerae_standard_nomenclature` +- `haerae_loan_word` +- `haerae_rare_word` +- `haerae_general_knowledge` +- `haerae_history` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/haerae/_default_haerae_yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/haerae/_default_haerae_yaml new file mode 100644 index 0000000000000000000000000000000000000000..807c10e0850078de24227d2738093e4511079690 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/haerae/_default_haerae_yaml @@ -0,0 +1,16 @@ +dataset_path: HAERAE-HUB/HAE_RAE_BENCH +test_split: test +fewshot_split: test +output_type: multiple_choice +doc_to_text: "{{query}}" +doc_to_choice: ["(A)", "(B)", "(C)", "(D)", "(E)"] +doc_to_target: "{{answer}}" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/haerae/_haerae.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/haerae/_haerae.yaml new file mode 100644 index 0000000000000000000000000000000000000000..acf413fb8293c5f32001305e09d25dfd6b1dfc5d --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/haerae/_haerae.yaml @@ -0,0 +1,16 @@ +group: haerae +task: + - haerae_general_knowledge + - haerae_history + - haerae_loan_word + - haerae_rare_word + - haerae_standard_nomenclature +aggregate_metric_list: + - metric: acc + aggregation: mean + weight_by_size: true + - metric: acc_norm + aggregation: mean + weight_by_size: true +metadata: + version: 1.0 diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/haerae/haerae_gk.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/haerae/haerae_gk.yaml new file mode 100644 index 0000000000000000000000000000000000000000..97d2bd71d8b9a333a16a3065a14276e0b49926da --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/haerae/haerae_gk.yaml @@ -0,0 +1,3 @@ +dataset_name: general_knowledge +include: _default_haerae_yaml +task: haerae_general_knowledge diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/haerae/haerae_hi.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/haerae/haerae_hi.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ed366912762cf7d1784fdef9103223fc82ab4c70 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/haerae/haerae_hi.yaml @@ -0,0 +1,3 @@ +dataset_name: history +include: _default_haerae_yaml +task: haerae_history diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/haerae/haerae_lw.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/haerae/haerae_lw.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1cb572784f06fa9b1299d8c2c817bc9541ce646b --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/haerae/haerae_lw.yaml @@ -0,0 +1,3 @@ +dataset_name: loan_words +include: _default_haerae_yaml +task: haerae_loan_word diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/haerae/haerae_rw.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/haerae/haerae_rw.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cfa2d1cf472e0b5a58231d6519f8e62bb8295c5c --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/haerae/haerae_rw.yaml @@ -0,0 +1,3 @@ +dataset_name: rare_words +include: _default_haerae_yaml +task: haerae_rare_word diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/haerae/haerae_sn.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/haerae/haerae_sn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..66bf43e060e7af557274653ccbe626194f91c94e --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/haerae/haerae_sn.yaml @@ -0,0 +1,3 @@ +dataset_name: standard_nomenclature +include: _default_haerae_yaml +task: haerae_standard_nomenclature diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/headqa/README.md b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/headqa/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9e061f0ed44e65ef04cc9d98220058051d509da6 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/headqa/README.md @@ -0,0 +1,57 @@ +# HEAD-QA + +### Paper + +HEAD-QA: A Healthcare Dataset for Complex Reasoning +https://arxiv.org/pdf/1906.04701.pdf + +HEAD-QA is a multi-choice HEAlthcare Dataset. The questions come from exams to access a specialized position in the +Spanish healthcare system, and are challenging even for highly specialized humans. They are designed by the Ministerio +de Sanidad, Consumo y Bienestar Social. +The dataset contains questions about the following topics: medicine, nursing, psychology, chemistry, pharmacology and biology. + +Homepage: https://aghie.github.io/head-qa/ + + +### Citation + +``` +@inproceedings{vilares-gomez-rodriguez-2019-head, + title = "{HEAD}-{QA}: A Healthcare Dataset for Complex Reasoning", + author = "Vilares, David and + G{\'o}mez-Rodr{\'i}guez, Carlos", + booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", + month = jul, + year = "2019", + address = "Florence, Italy", + publisher = "Association for Computational Linguistics", + url = "https://www.aclweb.org/anthology/P19-1092", + doi = "10.18653/v1/P19-1092", + pages = "960--966", + abstract = "We present HEAD-QA, a multi-choice question answering testbed to encourage research on complex reasoning. The questions come from exams to access a specialized position in the Spanish healthcare system, and are challenging even for highly specialized humans. We then consider monolingual (Spanish) and cross-lingual (to English) experiments with information retrieval and neural techniques. We show that: (i) HEAD-QA challenges current methods, and (ii) the results lag well behind human performance, demonstrating its usefulness as a benchmark for future work.", +} +``` + +### Groups and Tasks + +#### Groups + +- `headqa`: Evaluates `headqa_en` and `headqa_es` + +#### Tasks + +* `headqa_en` - English variant of HEAD-QA +* `headqa_es` - Spanish variant of HEAD-QA + +### Checklist + +* [x] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [x] Is the "Main" variant of this task clearly denoted? +* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?\ + * [x] Same as LM Evaluation Harness v0.3.0 implementation diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/headqa/headqa_en.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/headqa/headqa_en.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3d1b0b3e015240cc3285ffe67c167901bfa385e1 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/headqa/headqa_en.yaml @@ -0,0 +1,22 @@ +tag: headqa +task: headqa_en +dataset_path: EleutherAI/headqa +dataset_name: en +output_type: multiple_choice +training_split: train +validation_split: validation +test_split: test +doc_to_text: "Question: {{qtext}}\nAnswer:" +doc_to_target: "{{ra - 1}}" +doc_to_choice: "{{answers|map(attribute='atext')|list}}" # this will be cast to an int. +should_decontaminate: true +doc_to_decontamination_query: query +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/headqa/headqa_es.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/headqa/headqa_es.yaml new file mode 100644 index 0000000000000000000000000000000000000000..88e202f753e18f6fd6b8e303353cc0f38fce73e3 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/headqa/headqa_es.yaml @@ -0,0 +1,3 @@ +include: headqa_en.yaml +task: headqa_es +dataset_name: es diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/lambada_cloze/README.md b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/lambada_cloze/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9d33dce59bab7a4736b822d20cc973669d1b3874 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/lambada_cloze/README.md @@ -0,0 +1,56 @@ +# LAMBADA Cloze + +### Paper + +Title: `The LAMBADA dataset: Word prediction requiring a broad discourse context` + +Abstract: https://arxiv.org/abs/1606.06031 + +Cloze-style LAMBADA dataset. +LAMBADA is a dataset to evaluate the capabilities of computational models for text +understanding by means of a word prediction task. LAMBADA is a collection of narrative +passages sharing the characteristic that human subjects are able to guess their last +word if they are exposed to the whole passage, but not if they only see the last +sentence preceding the target word. To succeed on LAMBADA, computational models +cannot simply rely on local context, but must be able to keep track of information +in the broader discourse. + +Homepage: https://zenodo.org/record/2630551#.X4Xzn5NKjUI + + +### Citation + +``` +@misc{ + author={Paperno, Denis and Kruszewski, Germán and Lazaridou, Angeliki and Pham, Quan Ngoc and Bernardi, Raffaella and Pezzelle, Sandro and Baroni, Marco and Boleda, Gemma and Fernández, Raquel}, + title={The LAMBADA dataset}, + DOI={10.5281/zenodo.2630551}, + publisher={Zenodo}, + year={2016}, + month={Aug} +} +``` + +### Groups and Tasks + +#### Groups + +* `lambada_cloze` + +#### Tasks + +* `lambada_openai_cloze_yaml` +* `lambada_standard_cloze_yaml` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/lambada_cloze/lambada_openai_cloze.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/lambada_cloze/lambada_openai_cloze.yaml new file mode 100644 index 0000000000000000000000000000000000000000..602a3ff44dc6383f47ed4799c4bd3f619f8ff136 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/lambada_cloze/lambada_openai_cloze.yaml @@ -0,0 +1,20 @@ +tag: + - lambada_cloze +task: lambada_openai_cloze_yaml +dataset_path: EleutherAI/lambada_openai +dataset_name: default +output_type: loglikelihood +test_split: test +doc_to_text: "{{text.split(' ')[:-1]|join(' ')}} ____. ->" +doc_to_target: "{{' '+text.split(' ')[-1]}}" +should_decontaminate: true +doc_to_decontamination_query: "{{text}}" +metric_list: + - metric: perplexity + aggregation: perplexity + higher_is_better: false + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/lambada_cloze/lambada_standard_cloze.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/lambada_cloze/lambada_standard_cloze.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9c1568b3394f03b22bb9a4ab20bf35bdda3b9440 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/lambada_cloze/lambada_standard_cloze.yaml @@ -0,0 +1,21 @@ +tag: + - lambada_cloze +task: lambada_standard_cloze_yaml +dataset_path: lambada +dataset_name: null +output_type: loglikelihood +validation_split: validation +test_split: test +doc_to_text: "{{text.split(' ')[:-1]|join(' ')}} ____. ->" +doc_to_target: "{{' '+text.split(' ')[-1]}}" +should_decontaminate: true +doc_to_decontamination_query: "{{text}}" +metric_list: + - metric: perplexity + aggregation: perplexity + higher_is_better: false + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/lambada_multilingual/README.md b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/lambada_multilingual/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3845c95ca3839bea04e6d49d4a373515012f78da --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/lambada_multilingual/README.md @@ -0,0 +1,49 @@ +# LAMBADA + +### Paper +The LAMBADA dataset: Word prediction requiring a broad discourse context +https://arxiv.org/pdf/1606.06031.pdf + +LAMBADA is a dataset to evaluate the capabilities of computational models for text +understanding by means of a word prediction task. LAMBADA is a collection of narrative +passages sharing the characteristic that human subjects are able to guess their last +word if they are exposed to the whole passage, but not if they only see the last +sentence preceding the target word. To succeed on LAMBADA, computational models +cannot simply rely on local context, but must be able to keep track of information +in the broader discourse. + +Homepage: https://zenodo.org/record/2630551#.X4Xzn5NKjUI + +### Citation + +@misc{ + author={Paperno, Denis and Kruszewski, Germán and Lazaridou, Angeliki and Pham, Quan Ngoc and Bernardi, Raffaella and Pezzelle, Sandro and Baroni, Marco and Boleda, Gemma and Fernández, Raquel}, + title={The LAMBADA dataset}, + DOI={10.5281/zenodo.2630551}, + publisher={Zenodo}, + year={2016}, + month={Aug} +} + +### Groups and Tasks + +#### Groups + +* `lambada_multilingual`: Evaluates all `lambada_mt_X` tasks + +#### Tasks + +* `lambada_mt_{en, fr, de, it, es}`: Machine-translated versions of OpenAI's Lambada variant. + +### Checklist + +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? +(This task is novel to the Evaluation Harness, and has been checked against v0.3.0 of the harness.) + + +If other tasks on this dataset are already supported: +* [x] Is the "Main" variant of this task clearly denoted? +* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [x] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/lambada_multilingual/lambada_mt_de.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/lambada_multilingual/lambada_mt_de.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ef24c84eddbae9e24f6ce0a229b4ede28ee1e83e --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/lambada_multilingual/lambada_mt_de.yaml @@ -0,0 +1,3 @@ +include: lambada_mt_en.yaml +task: lambada_openai_mt_de +dataset_name: de diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/lambada_multilingual/lambada_mt_en.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/lambada_multilingual/lambada_mt_en.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8af05cdb00d322d40991d97af111695b840e6860 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/lambada_multilingual/lambada_mt_en.yaml @@ -0,0 +1,20 @@ +tag: + - lambada_multilingual +task: lambada_openai_mt_en +dataset_path: EleutherAI/lambada_openai +dataset_name: en +output_type: loglikelihood +test_split: test +doc_to_text: "{{text.split(' ')[:-1]|join(' ')}}" +doc_to_target: "{{' '+text.split(' ')[-1]}}" +should_decontaminate: true +doc_to_decontamination_query: "{{text}}" +metric_list: + - metric: perplexity + aggregation: perplexity + higher_is_better: false + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/lambada_multilingual/lambada_mt_es.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/lambada_multilingual/lambada_mt_es.yaml new file mode 100644 index 0000000000000000000000000000000000000000..afe53b00d3eeff6b6b12549d2176fe25ba39db02 --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/lambada_multilingual/lambada_mt_es.yaml @@ -0,0 +1,3 @@ +include: lambada_mt_en.yaml +task: lambada_openai_mt_es +dataset_name: es diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/lambada_multilingual/lambada_mt_fr.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/lambada_multilingual/lambada_mt_fr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dee21c03d61913c64ff56690a531fe918fe08eee --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/lambada_multilingual/lambada_mt_fr.yaml @@ -0,0 +1,3 @@ +include: lambada_mt_en.yaml +task: lambada_openai_mt_fr +dataset_name: fr diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/lambada_multilingual/lambada_mt_it.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/lambada_multilingual/lambada_mt_it.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2c7a929a4ea9df369f2c82610f6c59d28fdf45bf --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/lambada_multilingual/lambada_mt_it.yaml @@ -0,0 +1,3 @@ +include: lambada_mt_en.yaml +task: lambada_openai_mt_it +dataset_name: it diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/wsc273/README.md b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/wsc273/README.md new file mode 100644 index 0000000000000000000000000000000000000000..59e730471db0a8ebbab7e127427a0eeadc7dfa1b --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/wsc273/README.md @@ -0,0 +1,60 @@ +# WSC273 + +### Paper + +Title: `The Winograd Schema Challenge` + +Abstract: http://commonsensereasoning.org/2011/papers/Levesque.pdf + +A Winograd schema is a pair of sentences that differ in only one or two words +and that contain an ambiguity that is resolved in opposite ways in the two +sentences and requires the use of world knowledge and reasoning for its resolution. +The Winograd Schema Challenge 273 is a collection of 273 such Winograd schemas. + +NOTE: This evaluation of Winograd Schema Challenge is based on `partial evaluation` +as described by Trinh & Le in Simple Method for Commonsense Reasoning (2018). +See: https://arxiv.org/abs/1806.0 + +Homepage: https://cs.nyu.edu/~davise/papers/WinogradSchemas/WS.html + + +### Citation + +``` +@inproceedings{ea01b9c0db064caca6986b925d75f2bb, + title = "The winograd schema challenge", + abstract = "In this paper, we present an alternative to the Turing Test that has some conceptual and practical advantages. A Wino-grad schema is a pair of sentences that differ only in one or two words and that contain a referential ambiguity that is resolved in opposite directions in the two sentences. We have compiled a collection of Winograd schemas, designed so that the correct answer is obvious to the human reader, but cannot easily be found using selectional restrictions or statistical techniques over text corpora. A contestant in the Winograd Schema Challenge is presented with a collection of one sentence from each pair, and required to achieve human-level accuracy in choosing the correct disambiguation.", + author = "Levesque, {Hector J.} and Ernest Davis and Leora Morgenstern", + year = "2012", + language = "English (US)", + isbn = "9781577355601", + series = "Proceedings of the International Conference on Knowledge Representation and Reasoning", + publisher = "Institute of Electrical and Electronics Engineers Inc.", + pages = "552--561", + booktitle = "13th International Conference on the Principles of Knowledge Representation and Reasoning, KR 2012", + note = "13th International Conference on the Principles of Knowledge Representation and Reasoning, KR 2012 ; Conference date: 10-06-2012 Through 14-06-2012", +} +``` + +### Groups and Tasks + +#### Groups + +* Not part of any group yet. + +#### Tasks + +* `wsc273` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/wsc273/default.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/wsc273/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c6f7335700066c55c262c0fa1a49beb549b13f9c --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/wsc273/default.yaml @@ -0,0 +1,17 @@ +task: wsc273 +dataset_path: winograd_wsc +dataset_name: wsc273 +output_type: multiple_choice +test_split: test +doc_to_text: label +process_docs: !function utils.process_doc +doc_to_target: "{% set index = pronoun_loc + pronoun | length %}{{text[index:]}}" +doc_to_choice: "{% set template = text[:pronoun_loc] %}{{[template+options[0], template+options[1]]}}" +should_decontaminate: true +doc_to_decontamination_query: text +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/wsc273/utils.py b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/wsc273/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..97a0c529d07f440e31373e0a3f4ec92c54db7cbf --- /dev/null +++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/wsc273/utils.py @@ -0,0 +1,36 @@ +upper_pronouns = [ + "A", + "An", + "The", + "She", + "He", + "It", + "They", + "My", + "His", + "Her", + "Their", +] + + +def process_doc(dataset): + def process_fn(doc): + # The HF implementation of `wsc273` is not `partial evaluation` friendly. + doc["text"] = doc["text"].replace(" ", " ") + doc["options"][0] = __normalize_option(doc, doc["options"][0]) + doc["options"][1] = __normalize_option(doc, doc["options"][1]) + return doc + + return dataset.map(process_fn) + + +def __normalize_option(doc, option): + # Append `'s` to possessive determiner based options. + if doc["pronoun"].lower() in ["my", "his", "her", "our", "their"]: + option += "'s" + # Appropriately lowercase the pronoun in the option. + pronoun = option.split()[0] + start_of_sentence = doc["text"][doc["pronoun_loc"] - 2] == "." + if not start_of_sentence and pronoun in upper_pronouns: + return option.replace(pronoun, pronoun.lower()) + return option