koichi12 commited on
Commit
fb07762
·
verified ·
1 Parent(s): 0a12ded

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/README.md +59 -0
  2. scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/_generate_configs.py +26 -0
  3. scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/_gpqa_cot_n_shot_yaml +38 -0
  4. scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/gpqa_diamond_cot_n_shot.yaml +4 -0
  5. scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/gpqa_extended_cot_n_shot.yaml +4 -0
  6. scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/gpqa_main_cot_n_shot.yaml +4 -0
  7. scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/utils.py +39 -0
  8. scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_zeroshot/_generate_configs.py +26 -0
  9. scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_zeroshot/_gpqa_cot_zeroshot_yaml +38 -0
  10. scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_diamond_cot_zeroshot.yaml +4 -0
  11. scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_extended_cot_zeroshot.yaml +4 -0
  12. scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_main_cot_zeroshot.yaml +4 -0
  13. scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_zeroshot/utils.py +39 -0
  14. scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/generative/_generate_configs.py +26 -0
  15. scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/generative/_gpqa_generative_n_shot_yaml +39 -0
  16. scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/generative/gpqa_diamond_generative_n_shot.yaml +4 -0
  17. scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/generative/gpqa_extended_generative_n_shot.yaml +4 -0
  18. scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/generative/gpqa_main_generative_n_shot.yaml +4 -0
  19. scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/generative/utils.py +39 -0
  20. scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/n_shot/_generate_configs.py +26 -0
  21. scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/n_shot/_gpqa_n_shot_yaml +21 -0
  22. scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/n_shot/gpqa_diamond_n_shot.yaml +4 -0
  23. scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/n_shot/gpqa_extended_n_shot.yaml +4 -0
  24. scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/n_shot/gpqa_main_n_shot.yaml +4 -0
  25. scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/n_shot/utils.py +41 -0
  26. scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/zeroshot/_generate_configs.py +26 -0
  27. scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/zeroshot/_gpqa_zeroshot_yaml +21 -0
  28. scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/zeroshot/gpqa_diamond_zeroshot.yaml +4 -0
  29. scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/zeroshot/gpqa_extended_zeroshot.yaml +4 -0
  30. scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/zeroshot/gpqa_main_zeroshot.yaml +4 -0
  31. scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/zeroshot/utils.py +38 -0
  32. scripts/yans/lm-evaluation-harness/lm_eval/tasks/med_concepts_qa/_generate_configs.py +34 -0
  33. scripts/yans/lm-evaluation-harness/lm_eval/tasks/med_concepts_qa/_med_concepts_qa.yaml +10 -0
  34. scripts/yans/lm-evaluation-harness/lm_eval/tasks/med_concepts_qa/_med_concepts_qa_icd10cm.yaml +6 -0
  35. scripts/yans/lm-evaluation-harness/lm_eval/tasks/med_concepts_qa/_med_concepts_qa_icd10proc.yaml +6 -0
  36. scripts/yans/lm-evaluation-harness/lm_eval/tasks/med_concepts_qa/med_concepts_qa_atc_hard.yaml +5 -0
  37. scripts/yans/lm-evaluation-harness/lm_eval/tasks/med_concepts_qa/med_concepts_qa_icd10cm_easy.yaml +5 -0
  38. scripts/yans/lm-evaluation-harness/lm_eval/tasks/med_concepts_qa/med_concepts_qa_icd10cm_hard.yaml +5 -0
  39. scripts/yans/lm-evaluation-harness/lm_eval/tasks/med_concepts_qa/med_concepts_qa_icd10proc_easy.yaml +5 -0
  40. scripts/yans/lm-evaluation-harness/lm_eval/tasks/med_concepts_qa/med_concepts_qa_icd9proc_easy.yaml +5 -0
  41. scripts/yans/lm-evaluation-harness/lm_eval/tasks/med_concepts_qa/med_concepts_qa_icd9proc_hard.yaml +5 -0
  42. scripts/yans/lm-evaluation-harness/lm_eval/tasks/mmlusr/answer_only/answer_only_anatomy.yaml +7 -0
  43. scripts/yans/lm-evaluation-harness/lm_eval/tasks/mmlusr/answer_only/answer_only_business_ethics.yaml +7 -0
  44. scripts/yans/lm-evaluation-harness/lm_eval/tasks/mmlusr/answer_only/answer_only_clinical_knowledge.yaml +7 -0
  45. scripts/yans/lm-evaluation-harness/lm_eval/tasks/mmlusr/answer_only/answer_only_college_biology.yaml +7 -0
  46. scripts/yans/lm-evaluation-harness/lm_eval/tasks/mmlusr/answer_only/answer_only_college_chemistry.yaml +7 -0
  47. scripts/yans/lm-evaluation-harness/lm_eval/tasks/mmlusr/answer_only/answer_only_college_medicine.yaml +7 -0
  48. scripts/yans/lm-evaluation-harness/lm_eval/tasks/mmlusr/answer_only/answer_only_computer_security.yaml +7 -0
  49. scripts/yans/lm-evaluation-harness/lm_eval/tasks/mmlusr/answer_only/answer_only_conceptual_physics.yaml +7 -0
  50. scripts/yans/lm-evaluation-harness/lm_eval/tasks/mmlusr/answer_only/answer_only_electrical_engineering.yaml +7 -0
scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/README.md ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # GPQA
2
+
3
+ ### Paper
4
+
5
+ Title: GPQA: A Graduate-Level Google-Proof Q&A Benchmark
6
+
7
+ Abstract: https://arxiv.org/abs/2311.12022
8
+
9
+ We present GPQA, a challenging dataset of 448 multiple-choice questions written by domain experts in biology, physics, and chemistry. We ensure that the questions are high-quality and extremely difficult: experts who have or are pursuing PhDs in the corresponding domains reach 65% accuracy (74% when discounting clear mistakes the experts identified in retrospect), while highly skilled non-expert validators only reach 34% accuracy, despite spending on average over 30 minutes with unrestricted access to the web (i.e., the questions are “Google-proof”). The questions are also difficult for state-of-the-art AI systems, with our strongest GPT-4–based baseline achieving 39% accuracy. If we are to use future AI systems to help us answer very hard questions—for example, when developing new scientific knowledge—we need to develop *scalable oversight* methods that enable humans to supervise their outputs, which may be difficult even if the supervisors are themselves skilled and knowledgeable. The difficulty of GPQA both for skilled non-experts and frontier AI systems should enable realistic scalable oversight experiments, which we hope can help devise ways for human experts to reliably get truthful information from AI systems that surpass human capabilities.
10
+
11
+ Homepage: `https://github.com/idavidrein/gpqa/tree/main`
12
+
13
+ ### Citation
14
+
15
+ ```
16
+ @misc{rein2023gpqa,
17
+ title={GPQA: A Graduate-Level Google-Proof Q&A Benchmark},
18
+ author={David Rein and Betty Li Hou and Asa Cooper Stickland and Jackson Petty and Richard Yuanzhe Pang and Julien Dirani and Julian Michael and Samuel R. Bowman},
19
+ year={2023},
20
+ eprint={2311.12022},
21
+ archivePrefix={arXiv},
22
+ primaryClass={cs.AI}
23
+ }
24
+ ```
25
+
26
+ This dataset is gated, so you will have to accept the terms of use at https://huggingface.co/datasets/Idavidrein/gpqa and login via `huggingface-cli login` using your HF Hub token before running this task.
27
+
28
+ ### Groups, Tags, and Tasks
29
+
30
+ #### Groups
31
+
32
+ None
33
+
34
+ #### Tags
35
+
36
+ * `gpqa`: runs all GPQA variants.
37
+
38
+ #### Tasks
39
+
40
+ * `gpqa_{main, diamond, extended}_zeroshot`
41
+ * `gpqa_{main, diamond, extended}_n_shot`
42
+ * `gpqa_{main, diamond, extended}_generative_n_shot`
43
+ * `gpqa_{main, diamond, extended}_cot_zeroshot`
44
+ * `gpqa_{main, diamond, extended}_cot_n_shot`
45
+
46
+ ### Checklist
47
+
48
+ For adding novel benchmarks/datasets to the library:
49
+
50
+ * [x] Is the task an existing benchmark in the literature?
51
+ * [x] Have you referenced the original paper that introduced the task?
52
+ * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
53
+
54
+
55
+ If other tasks on this dataset are already supported:
56
+
57
+ * [ ] Is the "Main" variant of this task clearly denoted?
58
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
59
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/_generate_configs.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import yaml
2
+ from tqdm import tqdm
3
+
4
+
5
+ def main() -> None:
6
+ subset = ["extended", "diamond", "main"]
7
+ setting = "cot_n_shot"
8
+ for task in tqdm(subset):
9
+ file_name = f"gpqa_{task}_{setting}.yaml"
10
+ try:
11
+ with open(f"{file_name}", "w") as f:
12
+ f.write("# Generated by _generate_configs.py\n")
13
+ yaml.dump(
14
+ {
15
+ "include": f"_gpqa_{setting}_yaml",
16
+ "task": f"gpqa_{task}_{setting}",
17
+ "dataset_name": f"gpqa_{task}",
18
+ },
19
+ f,
20
+ )
21
+ except FileExistsError:
22
+ pass
23
+
24
+
25
+ if __name__ == "__main__":
26
+ main()
scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/_gpqa_cot_n_shot_yaml ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_path: Idavidrein/gpqa
2
+ tag: gpqa
3
+ output_type: generate_until
4
+ process_docs: !function utils.process_docs
5
+ training_split: train
6
+ # Because huggingface dataset only has train split
7
+ validation_split: train
8
+ test_split: null
9
+ description: "Here are some example questions from experts. Answer the final question yourself, following the format of the previous questions exactly.\n"
10
+ doc_to_text: "Question: {{Question}}\nChoices:\n(A) {{choice1}}\n(B) {{choice2}}\n(C) {{choice3}}\n(D) {{choice4}}\nLet's think step by step: "
11
+ doc_to_target: answer
12
+ filter_list:
13
+ - name: "strict-match"
14
+ filter:
15
+ - function: "regex"
16
+ regex_pattern: "(?<=The answer is )(.*)(?=.)"
17
+ - function: "take_first"
18
+ - name: "flexible-extract"
19
+ filter:
20
+ - function: "multi_choice_regex"
21
+ group_select: -1
22
+ ignore_case: true
23
+ ignore_punctuation: true
24
+ regex_pattern: "(\\([A-Z]\\))"
25
+ - function: "take_first"
26
+ generation_kwargs:
27
+ until:
28
+ - "</s>"
29
+ do_sample: false
30
+ temperature: 0.0
31
+ metric_list:
32
+ - metric: exact_match
33
+ aggregation: mean
34
+ higher_is_better: true
35
+ ignore_case: true
36
+ ignore_punctuation: true
37
+ metadata:
38
+ version: 2.0
scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/gpqa_diamond_cot_n_shot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_diamond
3
+ include: _gpqa_cot_n_shot_yaml
4
+ task: gpqa_diamond_cot_n_shot
scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/gpqa_extended_cot_n_shot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_extended
3
+ include: _gpqa_cot_n_shot_yaml
4
+ task: gpqa_extended_cot_n_shot
scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/gpqa_main_cot_n_shot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_main
3
+ include: _gpqa_cot_n_shot_yaml
4
+ task: gpqa_main_cot_n_shot
scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/utils.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import re
3
+
4
+ import datasets
5
+
6
+
7
+ def preprocess(text):
8
+ if text is None:
9
+ return " "
10
+ text = text.strip()
11
+ text = text.replace(" [title]", ". ")
12
+ text = re.sub("\\[.*?\\]", "", text)
13
+ text = text.replace(" ", " ")
14
+ return text
15
+
16
+
17
+ def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
18
+ def _process_doc(doc):
19
+ choices = [
20
+ preprocess(doc["Incorrect Answer 1"]),
21
+ preprocess(doc["Incorrect Answer 2"]),
22
+ preprocess(doc["Incorrect Answer 3"]),
23
+ preprocess(doc["Correct Answer"]),
24
+ ]
25
+
26
+ random.shuffle(choices)
27
+ correct_answer_index = choices.index(preprocess(doc["Correct Answer"]))
28
+
29
+ out_doc = {
30
+ "choice1": choices[0],
31
+ "choice2": choices[1],
32
+ "choice3": choices[2],
33
+ "choice4": choices[3],
34
+ "choices": [choices[0], choices[1], choices[2], choices[3]],
35
+ "answer": f"({chr(65 + correct_answer_index)})",
36
+ }
37
+ return out_doc
38
+
39
+ return dataset.map(_process_doc)
scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_zeroshot/_generate_configs.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import yaml
2
+ from tqdm import tqdm
3
+
4
+
5
+ def main() -> None:
6
+ subset = ["extended", "diamond", "main"]
7
+ setting = "cot_zeroshot"
8
+ for task in tqdm(subset):
9
+ file_name = f"gpqa_{task}_{setting}.yaml"
10
+ try:
11
+ with open(f"{file_name}", "w") as f:
12
+ f.write("# Generated by _generate_configs.py\n")
13
+ yaml.dump(
14
+ {
15
+ "include": f"_gpqa_{setting}_yaml",
16
+ "task": f"gpqa_{task}_{setting}",
17
+ "dataset_name": f"gpqa_{task}",
18
+ },
19
+ f,
20
+ )
21
+ except FileExistsError:
22
+ pass
23
+
24
+
25
+ if __name__ == "__main__":
26
+ main()
scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_zeroshot/_gpqa_cot_zeroshot_yaml ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_path: Idavidrein/gpqa
2
+ tag: gpqa
3
+ output_type: generate_until
4
+ process_docs: !function utils.process_docs
5
+ training_split: train
6
+ # Because huggingface dataset only has train split
7
+ validation_split: train
8
+ test_split: null
9
+ doc_to_text: "What is the correct answer to this question:{{Question}}\nChoices:\n(A) {{choice1}}\n(B) {{choice2}}\n(C) {{choice3}}\n(D) {{choice4}}\nLet's think step by step: "
10
+ doc_to_target: answer
11
+ filter_list:
12
+ - name: "strict-match"
13
+ filter:
14
+ - function: "regex"
15
+ regex_pattern: "(?<=The answer is )(.*)(?=.)"
16
+ - function: "take_first"
17
+ - name: "flexible-extract"
18
+ filter:
19
+ - function: "multi_choice_regex"
20
+ group_select: -1
21
+ ignore_case: true
22
+ ignore_punctuation: true
23
+ regex_pattern: "(\\([A-Z]\\))"
24
+ - function: "take_first"
25
+ generation_kwargs:
26
+ until:
27
+ - "</s>"
28
+ do_sample: false
29
+ temperature: 0.0
30
+ num_fewshot: 0
31
+ metric_list:
32
+ - metric: exact_match
33
+ aggregation: mean
34
+ higher_is_better: true
35
+ ignore_case: true
36
+ ignore_punctuation: true
37
+ metadata:
38
+ version: 1.0
scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_diamond_cot_zeroshot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_diamond
3
+ include: _gpqa_cot_zeroshot_yaml
4
+ task: gpqa_diamond_cot_zeroshot
scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_extended_cot_zeroshot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_extended
3
+ include: _gpqa_cot_zeroshot_yaml
4
+ task: gpqa_extended_cot_zeroshot
scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_main_cot_zeroshot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_main
3
+ include: _gpqa_cot_zeroshot_yaml
4
+ task: gpqa_main_cot_zeroshot
scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_zeroshot/utils.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import re
3
+
4
+ import datasets
5
+
6
+
7
+ def preprocess(text):
8
+ if text is None:
9
+ return " "
10
+ text = text.strip()
11
+ text = text.replace(" [title]", ". ")
12
+ text = re.sub("\\[.*?\\]", "", text)
13
+ text = text.replace(" ", " ")
14
+ return text
15
+
16
+
17
+ def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
18
+ def _process_doc(doc):
19
+ choices = [
20
+ preprocess(doc["Incorrect Answer 1"]),
21
+ preprocess(doc["Incorrect Answer 2"]),
22
+ preprocess(doc["Incorrect Answer 3"]),
23
+ preprocess(doc["Correct Answer"]),
24
+ ]
25
+
26
+ random.shuffle(choices)
27
+ correct_answer_index = choices.index(preprocess(doc["Correct Answer"]))
28
+
29
+ out_doc = {
30
+ "choice1": choices[0],
31
+ "choice2": choices[1],
32
+ "choice3": choices[2],
33
+ "choice4": choices[3],
34
+ "choices": [choices[0], choices[1], choices[2], choices[3]],
35
+ "answer": f"({chr(65 + correct_answer_index)})",
36
+ }
37
+ return out_doc
38
+
39
+ return dataset.map(_process_doc)
scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/generative/_generate_configs.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import yaml
2
+ from tqdm import tqdm
3
+
4
+
5
+ def main() -> None:
6
+ subset = ["extended", "diamond", "main"]
7
+ setting = "generative_n_shot"
8
+ for task in tqdm(subset):
9
+ file_name = f"gpqa_{task}_{setting}.yaml"
10
+ try:
11
+ with open(f"{file_name}", "w") as f:
12
+ f.write("# Generated by _generate_configs.py\n")
13
+ yaml.dump(
14
+ {
15
+ "include": f"_gpqa_{setting}_yaml",
16
+ "task": f"gpqa_{task}_{setting}",
17
+ "dataset_name": f"gpqa_{task}",
18
+ },
19
+ f,
20
+ )
21
+ except FileExistsError:
22
+ pass
23
+
24
+
25
+ if __name__ == "__main__":
26
+ main()
scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/generative/_gpqa_generative_n_shot_yaml ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_path: Idavidrein/gpqa
2
+ tag: gpqa
3
+ output_type: generate_until
4
+ process_docs: !function utils.process_docs
5
+ training_split: train
6
+ # Because huggingface dataset only has train split
7
+ validation_split: train
8
+ test_split: null
9
+ description: "Here are some example questions from experts. Answer the final question yourself, following the format of the previous questions exactly.\n"
10
+ doc_to_text: "Question: {{Question}}\nChoices:\n(A) {{choice1}}\n(B) {{choice2}}\n(C) {{choice3}}\n(D) {{choice4}}\nAnswer:"
11
+ doc_to_target: answer
12
+ filter_list:
13
+ - name: "strict-match"
14
+ filter:
15
+ - function: "regex"
16
+ regex_pattern: "(?<=The answer is )(.*)(?=.)"
17
+ - function: "take_first"
18
+ - name: "flexible-extract"
19
+ filter:
20
+ - function: "multi_choice_regex"
21
+ group_select: -1
22
+ ignore_case: true
23
+ ignore_punctuation: true
24
+ regex_pattern: "(\\([A-Z]\\))"
25
+ - function: "take_first"
26
+ generation_kwargs:
27
+ until:
28
+ - "</s>"
29
+ - "Question:"
30
+ - "<|im_end|>"
31
+ temperature: 0.0
32
+ metric_list:
33
+ - metric: exact_match
34
+ aggregation: mean
35
+ higher_is_better: true
36
+ ignore_case: true
37
+ ignore_punctuation: true
38
+ metadata:
39
+ version: 2.0
scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/generative/gpqa_diamond_generative_n_shot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_diamond
3
+ include: _gpqa_generative_n_shot_yaml
4
+ task: gpqa_diamond_generative_n_shot
scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/generative/gpqa_extended_generative_n_shot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_extended
3
+ include: _gpqa_generative_n_shot_yaml
4
+ task: gpqa_extended_generative_n_shot
scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/generative/gpqa_main_generative_n_shot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_main
3
+ include: _gpqa_generative_n_shot_yaml
4
+ task: gpqa_main_generative_n_shot
scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/generative/utils.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import re
3
+
4
+ import datasets
5
+
6
+
7
+ def preprocess(text):
8
+ if text is None:
9
+ return " "
10
+ text = text.strip()
11
+ text = text.replace(" [title]", ". ")
12
+ text = re.sub("\\[.*?\\]", "", text)
13
+ text = text.replace(" ", " ")
14
+ return text
15
+
16
+
17
+ def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
18
+ def _process_doc(doc):
19
+ choices = [
20
+ preprocess(doc["Incorrect Answer 1"]),
21
+ preprocess(doc["Incorrect Answer 2"]),
22
+ preprocess(doc["Incorrect Answer 3"]),
23
+ preprocess(doc["Correct Answer"]),
24
+ ]
25
+
26
+ random.shuffle(choices)
27
+ correct_answer_index = choices.index(preprocess(doc["Correct Answer"]))
28
+
29
+ out_doc = {
30
+ "choice1": choices[0],
31
+ "choice2": choices[1],
32
+ "choice3": choices[2],
33
+ "choice4": choices[3],
34
+ "choices": [choices[0], choices[1], choices[2], choices[3]],
35
+ "answer": f"({chr(65 + correct_answer_index)})",
36
+ }
37
+ return out_doc
38
+
39
+ return dataset.map(_process_doc)
scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/n_shot/_generate_configs.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import yaml
2
+ from tqdm import tqdm
3
+
4
+
5
+ def main() -> None:
6
+ subset = ["extended", "diamond", "main"]
7
+
8
+ for task in tqdm(subset):
9
+ file_name = f"gpqa_{task}_n_shot.yaml"
10
+ try:
11
+ with open(f"{file_name}", "w") as f:
12
+ f.write("# Generated by _generate_configs.py\n")
13
+ yaml.dump(
14
+ {
15
+ "include": "_gpqa_n_shot_yaml",
16
+ "task": f"gpqa_{task}_n_shot",
17
+ "dataset_name": f"gpqa_{task}",
18
+ },
19
+ f,
20
+ )
21
+ except FileExistsError:
22
+ pass
23
+
24
+
25
+ if __name__ == "__main__":
26
+ main()
scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/n_shot/_gpqa_n_shot_yaml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_path: Idavidrein/gpqa
2
+ tag: gpqa
3
+ output_type: multiple_choice
4
+ process_docs: !function utils.process_docs
5
+ training_split: train
6
+ # Because huggingface dataset only has train split
7
+ validation_split: train
8
+ test_split: null
9
+ description: "Here are some example questions from experts. Answer the final question yourself, following the format of the previous questions exactly.\n"
10
+ doc_to_text: "Question: {{Question}}\nChoices:\n(A) {{choice1}}\n(B) {{choice2}}\n(C) {{choice3}}\n(D) {{choice4}}\nAnswer:"
11
+ doc_to_target: answer
12
+ doc_to_choice: ["(A)", "(B)", "(C)", "(D)"]
13
+ metric_list:
14
+ - metric: acc
15
+ aggregation: mean
16
+ higher_is_better: true
17
+ - metric: acc_norm
18
+ aggregation: mean
19
+ higher_is_better: true
20
+ metadata:
21
+ version: 2.0
scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/n_shot/gpqa_diamond_n_shot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_diamond
3
+ include: _gpqa_n_shot_yaml
4
+ task: gpqa_diamond_n_shot
scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/n_shot/gpqa_extended_n_shot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_extended
3
+ include: _gpqa_n_shot_yaml
4
+ task: gpqa_extended_n_shot
scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/n_shot/gpqa_main_n_shot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_main
3
+ include: _gpqa_n_shot_yaml
4
+ task: gpqa_main_n_shot
scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/n_shot/utils.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import re
3
+
4
+ import datasets
5
+
6
+
7
+ def preprocess(text):
8
+ if text is None:
9
+ return " "
10
+ text = text.strip()
11
+ text = text.replace(" [title]", ". ")
12
+ text = re.sub("\\[.*?\\]", "", text)
13
+ text = text.replace(" ", " ")
14
+ return text
15
+
16
+
17
+ rng = random.Random(42)
18
+
19
+
20
+ def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
21
+ def _process_doc(doc):
22
+ choices = [
23
+ preprocess(doc["Incorrect Answer 1"]),
24
+ preprocess(doc["Incorrect Answer 2"]),
25
+ preprocess(doc["Incorrect Answer 3"]),
26
+ preprocess(doc["Correct Answer"]),
27
+ ]
28
+
29
+ rng.shuffle(choices)
30
+ correct_answer_index = choices.index(preprocess(doc["Correct Answer"]))
31
+
32
+ out_doc = {
33
+ "choice1": choices[0],
34
+ "choice2": choices[1],
35
+ "choice3": choices[2],
36
+ "choice4": choices[3],
37
+ "answer": f"({chr(65 + correct_answer_index)})",
38
+ }
39
+ return out_doc
40
+
41
+ return dataset.map(_process_doc)
scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/zeroshot/_generate_configs.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import yaml
2
+ from tqdm import tqdm
3
+
4
+
5
+ def main() -> None:
6
+ subset = ["extended", "diamond", "main"]
7
+ setting = "zeroshot"
8
+ for task in tqdm(subset):
9
+ file_name = f"gpqa_{task}_{setting}.yaml"
10
+ try:
11
+ with open(f"{file_name}", "w") as f:
12
+ f.write("# Generated by _generate_configs.py\n")
13
+ yaml.dump(
14
+ {
15
+ "include": f"_gpqa_{setting}_yaml",
16
+ "task": f"gpqa_{task}_{setting}",
17
+ "dataset_name": f"gpqa_{task}",
18
+ },
19
+ f,
20
+ )
21
+ except FileExistsError:
22
+ pass
23
+
24
+
25
+ if __name__ == "__main__":
26
+ main()
scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/zeroshot/_gpqa_zeroshot_yaml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_path: Idavidrein/gpqa
2
+ tag: gpqa
3
+ output_type: multiple_choice
4
+ process_docs: !function utils.process_docs
5
+ training_split: train
6
+ # Because huggingface dataset only has train split
7
+ validation_split: train
8
+ test_split: null
9
+ doc_to_text: "What is the correct answer to this question:{{Question}}\nChoices:\n(A) {{choice1}}\n(B) {{choice2}}\n(C) {{choice3}}\n(D) {{choice4}}\nAnswer:"
10
+ doc_to_target: answer
11
+ doc_to_choice: ["(A)", "(B)", "(C)", "(D)"]
12
+ num_fewshot: 0
13
+ metric_list:
14
+ - metric: acc
15
+ aggregation: mean
16
+ higher_is_better: true
17
+ - metric: acc_norm
18
+ aggregation: mean
19
+ higher_is_better: true
20
+ metadata:
21
+ version: 1.0
scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/zeroshot/gpqa_diamond_zeroshot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_diamond
3
+ include: _gpqa_zeroshot_yaml
4
+ task: gpqa_diamond_zeroshot
scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/zeroshot/gpqa_extended_zeroshot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_extended
3
+ include: _gpqa_zeroshot_yaml
4
+ task: gpqa_extended_zeroshot
scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/zeroshot/gpqa_main_zeroshot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_main
3
+ include: _gpqa_zeroshot_yaml
4
+ task: gpqa_main_zeroshot
scripts/yans/lm-evaluation-harness/lm_eval/tasks/gpqa/zeroshot/utils.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import re
3
+
4
+ import datasets
5
+
6
+
7
+ def preprocess(text):
8
+ if text is None:
9
+ return " "
10
+ text = text.strip()
11
+ text = text.replace(" [title]", ". ")
12
+ text = re.sub("\\[.*?\\]", "", text)
13
+ text = text.replace(" ", " ")
14
+ return text
15
+
16
+
17
+ def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
18
+ def _process_doc(doc):
19
+ choices = [
20
+ preprocess(doc["Incorrect Answer 1"]),
21
+ preprocess(doc["Incorrect Answer 2"]),
22
+ preprocess(doc["Incorrect Answer 3"]),
23
+ preprocess(doc["Correct Answer"]),
24
+ ]
25
+
26
+ random.shuffle(choices)
27
+ correct_answer_index = choices.index(preprocess(doc["Correct Answer"]))
28
+
29
+ out_doc = {
30
+ "choice1": choices[0],
31
+ "choice2": choices[1],
32
+ "choice3": choices[2],
33
+ "choice4": choices[3],
34
+ "answer": f"({chr(65 + correct_answer_index)})",
35
+ }
36
+ return out_doc
37
+
38
+ return dataset.map(_process_doc)
scripts/yans/lm-evaluation-harness/lm_eval/tasks/med_concepts_qa/_generate_configs.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+ import yaml
4
+
5
+
6
+ def generate_yaml_content(vocab_name: str, level: str):
7
+ content = {
8
+ "dataset_name": f"{vocab_name}_{level}",
9
+ "tag": f"med_concepts_qa_{vocab_name}_tasks",
10
+ "include": "_default_template_yaml",
11
+ "task": f"med_concepts_qa_{vocab_name}_{level}",
12
+ "task_alias": f"{vocab_name}_{level}",
13
+ }
14
+ return content
15
+
16
+
17
+ def generate_yaml_files(
18
+ vocab_names: List[str], levels: List[str], file_name_prefix: str
19
+ ):
20
+ for vocab_name in vocab_names:
21
+ for level in levels:
22
+ yaml_content = generate_yaml_content(vocab_name, level)
23
+ filename = f"{file_name_prefix}_{vocab_name}_{level}.yaml"
24
+ with open(filename, "w") as yaml_file:
25
+ yaml.dump(yaml_content, yaml_file, default_flow_style=False)
26
+ print(f"Done to generated {filename}")
27
+
28
+
29
+ if __name__ == "__main__":
30
+ generate_yaml_files(
31
+ vocab_names=["icd9cm", "icd10cm", "icd9proc", "icd10proc", "atc"],
32
+ levels=["easy", "medium", "hard"],
33
+ file_name_prefix="med_concepts_qa",
34
+ )
scripts/yans/lm-evaluation-harness/lm_eval/tasks/med_concepts_qa/_med_concepts_qa.yaml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ group: med_concepts_qa
2
+ task:
3
+ - med_concepts_qa_icd9cm
4
+ - med_concepts_qa_icd10cm
5
+ - med_concepts_qa_icd9proc
6
+ - med_concepts_qa_icd10proc
7
+ - med_concepts_qa_atc
8
+ aggregate_metric_list:
9
+ - metric: acc
10
+ aggregation: mean
scripts/yans/lm-evaluation-harness/lm_eval/tasks/med_concepts_qa/_med_concepts_qa_icd10cm.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ group: med_concepts_qa_icd10cm
2
+ task:
3
+ - med_concepts_qa_icd10cm_tasks
4
+ aggregate_metric_list:
5
+ - metric: acc
6
+ aggregation: mean
scripts/yans/lm-evaluation-harness/lm_eval/tasks/med_concepts_qa/_med_concepts_qa_icd10proc.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ group: med_concepts_qa_icd10proc
2
+ task:
3
+ - med_concepts_qa_icd10proc_tasks
4
+ aggregate_metric_list:
5
+ - metric: acc
6
+ aggregation: mean
scripts/yans/lm-evaluation-harness/lm_eval/tasks/med_concepts_qa/med_concepts_qa_atc_hard.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ dataset_name: atc_hard
2
+ include: _default_template_yaml
3
+ tag: med_concepts_qa_atc_tasks
4
+ task: med_concepts_qa_atc_hard
5
+ task_alias: atc_hard
scripts/yans/lm-evaluation-harness/lm_eval/tasks/med_concepts_qa/med_concepts_qa_icd10cm_easy.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ dataset_name: icd10cm_easy
2
+ include: _default_template_yaml
3
+ tag: med_concepts_qa_icd10cm_tasks
4
+ task: med_concepts_qa_icd10cm_easy
5
+ task_alias: icd10cm_easy
scripts/yans/lm-evaluation-harness/lm_eval/tasks/med_concepts_qa/med_concepts_qa_icd10cm_hard.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ dataset_name: icd10cm_hard
2
+ include: _default_template_yaml
3
+ tag: med_concepts_qa_icd10cm_tasks
4
+ task: med_concepts_qa_icd10cm_hard
5
+ task_alias: icd10cm_hard
scripts/yans/lm-evaluation-harness/lm_eval/tasks/med_concepts_qa/med_concepts_qa_icd10proc_easy.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ dataset_name: icd10proc_easy
2
+ include: _default_template_yaml
3
+ tag: med_concepts_qa_icd10proc_tasks
4
+ task: med_concepts_qa_icd10proc_easy
5
+ task_alias: icd10proc_easy
scripts/yans/lm-evaluation-harness/lm_eval/tasks/med_concepts_qa/med_concepts_qa_icd9proc_easy.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ dataset_name: icd9proc_easy
2
+ include: _default_template_yaml
3
+ tag: med_concepts_qa_icd9proc_tasks
4
+ task: med_concepts_qa_icd9proc_easy
5
+ task_alias: icd9proc_easy
scripts/yans/lm-evaluation-harness/lm_eval/tasks/med_concepts_qa/med_concepts_qa_icd9proc_hard.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ dataset_name: icd9proc_hard
2
+ include: _default_template_yaml
3
+ tag: med_concepts_qa_icd9proc_tasks
4
+ task: med_concepts_qa_icd9proc_hard
5
+ task_alias: icd9proc_hard
scripts/yans/lm-evaluation-harness/lm_eval/tasks/mmlusr/answer_only/answer_only_anatomy.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "answer_only_anatomy"
2
+ "description": "The following are multiple choice questions (with answers) about anatomy.\n\
3
+ \n"
4
+ "tag": "mmlusr_answer_only_stem_tasks"
5
+ "include": "_mmlusr_a_yml"
6
+ "task": "mmlusr_answer_only_anatomy"
7
+ "task_alias": "anatomy"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/mmlusr/answer_only/answer_only_business_ethics.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "answer_only_business_ethics"
2
+ "description": "The following are multiple choice questions (with answers) about business\
3
+ \ ethics.\n\n"
4
+ "tag": "mmlusr_answer_only_other_tasks"
5
+ "include": "_mmlusr_a_yml"
6
+ "task": "mmlusr_answer_only_business_ethics"
7
+ "task_alias": "business ethics"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/mmlusr/answer_only/answer_only_clinical_knowledge.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "answer_only_clinical_knowledge"
2
+ "description": "The following are multiple choice questions (with answers) about clinical\
3
+ \ knowledge.\n\n"
4
+ "tag": "mmlusr_answer_only_other_tasks"
5
+ "include": "_mmlusr_a_yml"
6
+ "task": "mmlusr_answer_only_clinical_knowledge"
7
+ "task_alias": "clinical knowledge"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/mmlusr/answer_only/answer_only_college_biology.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "answer_only_college_biology"
2
+ "description": "The following are multiple choice questions (with answers) about college\
3
+ \ biology.\n\n"
4
+ "tag": "mmlusr_answer_only_stem_tasks"
5
+ "include": "_mmlusr_a_yml"
6
+ "task": "mmlusr_answer_only_college_biology"
7
+ "task_alias": "college biology"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/mmlusr/answer_only/answer_only_college_chemistry.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "answer_only_college_chemistry"
2
+ "description": "The following are multiple choice questions (with answers) about college\
3
+ \ chemistry.\n\n"
4
+ "tag": "mmlusr_answer_only_stem_tasks"
5
+ "include": "_mmlusr_a_yml"
6
+ "task": "mmlusr_answer_only_college_chemistry"
7
+ "task_alias": "college chemistry"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/mmlusr/answer_only/answer_only_college_medicine.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "answer_only_college_medicine"
2
+ "description": "The following are multiple choice questions (with answers) about college\
3
+ \ medicine.\n\n"
4
+ "tag": "mmlusr_answer_only_other_tasks"
5
+ "include": "_mmlusr_a_yml"
6
+ "task": "mmlusr_answer_only_college_medicine"
7
+ "task_alias": "college medicine"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/mmlusr/answer_only/answer_only_computer_security.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "answer_only_computer_security"
2
+ "description": "The following are multiple choice questions (with answers) about computer\
3
+ \ security.\n\n"
4
+ "tag": "mmlusr_answer_only_stem_tasks"
5
+ "include": "_mmlusr_a_yml"
6
+ "task": "mmlusr_answer_only_computer_security"
7
+ "task_alias": "computer security"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/mmlusr/answer_only/answer_only_conceptual_physics.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "answer_only_conceptual_physics"
2
+ "description": "The following are multiple choice questions (with answers) about conceptual\
3
+ \ physics.\n\n"
4
+ "tag": "mmlusr_answer_only_stem_tasks"
5
+ "include": "_mmlusr_a_yml"
6
+ "task": "mmlusr_answer_only_conceptual_physics"
7
+ "task_alias": "conceptual physics"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/mmlusr/answer_only/answer_only_electrical_engineering.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "answer_only_electrical_engineering"
2
+ "description": "The following are multiple choice questions (with answers) about electrical\
3
+ \ engineering.\n\n"
4
+ "tag": "mmlusr_answer_only_stem_tasks"
5
+ "include": "_mmlusr_a_yml"
6
+ "task": "mmlusr_answer_only_electrical_engineering"
7
+ "task_alias": "electrical engineering"