koichi12 commited on
Commit
3ebb242
·
verified ·
1 Parent(s): 49ebc99

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/_cot_zeroshot_template_yaml +26 -0
  2. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/causal_judgement.yaml +18 -0
  3. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/disambiguation_qa.yaml +20 -0
  4. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/logical_deduction_five_objects.yaml +19 -0
  5. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/logical_deduction_seven_objects.yaml +19 -0
  6. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/logical_deduction_three_objects.yaml +19 -0
  7. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/movie_recommendation.yaml +19 -0
  8. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/navigate.yaml +17 -0
  9. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/object_counting.yaml +17 -0
  10. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/ruin_names.yaml +19 -0
  11. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/salient_translation_error_detection.yaml +19 -0
  12. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/snarks.yaml +19 -0
  13. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/sports_understanding.yaml +21 -0
  14. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/temporal_sequences.yaml +19 -0
  15. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/tracking_shuffled_objects_seven_objects.yaml +19 -0
  16. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/tracking_shuffled_objects_three_objects.yaml +19 -0
  17. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/utils.py +224 -0
  18. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/web_of_lies.yaml +20 -0
  19. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/word_sorting.yaml +15 -0
  20. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/_bbh_fewshot.yaml +35 -0
  21. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/_fewshot_template_yaml +20 -0
  22. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/boolean_expressions.yaml +19 -0
  23. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/causal_judgement.yaml +67 -0
  24. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/date_understanding.yaml +60 -0
  25. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/disambiguation_qa.yaml +53 -0
  26. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/dyck_languages.yaml +23 -0
  27. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/formal_fallacies.yaml +60 -0
  28. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/geometric_shapes.yaml +89 -0
  29. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/hyperbaton.yaml +37 -0
  30. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/logical_deduction_five_objects.yaml +55 -0
  31. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/logical_deduction_seven_objects.yaml +55 -0
  32. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/logical_deduction_three_objects.yaml +55 -0
  33. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/movie_recommendation.yaml +57 -0
  34. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/multistep_arithmetic_two.yaml +19 -0
  35. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/navigate.yaml +43 -0
  36. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/object_counting.yaml +23 -0
  37. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/penguins_in_a_table.yaml +71 -0
  38. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/reasoning_about_colored_objects.yaml +119 -0
  39. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/ruin_names.yaml +53 -0
  40. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/salient_translation_error_detection.yaml +77 -0
  41. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/snarks.yaml +47 -0
  42. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/sports_understanding.yaml +22 -0
  43. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/temporal_sequences.yaml +104 -0
  44. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/tracking_shuffled_objects_five_objects.yaml +62 -0
  45. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/tracking_shuffled_objects_seven_objects.yaml +62 -0
  46. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/tracking_shuffled_objects_three_objects.yaml +62 -0
  47. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/web_of_lies.yaml +25 -0
  48. scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/word_sorting.yaml +21 -0
  49. scripts/yans/lm-evaluation-harness/lm_eval/tasks/kobest/README.md +37 -0
  50. scripts/yans/lm-evaluation-harness/lm_eval/tasks/kobest/kobest_boolq.yaml +23 -0
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/_cot_zeroshot_template_yaml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_path: lukaemon/bbh
2
+ output_type: generate_until
3
+ test_split: test
4
+ doc_to_target: "{{target}}"
5
+ metric_list:
6
+ - metric: exact_match
7
+ aggregation: mean
8
+ higher_is_better: true
9
+ ignore_case: true
10
+ # ignore_punctuation: true
11
+ regexes_to_ignore:
12
+ - "\\.$"
13
+ - ","
14
+ - "\\\\"
15
+ - "\n"
16
+ - '"'
17
+ generation_kwargs:
18
+ until:
19
+ - "</s>"
20
+ - "Q:"
21
+ - "<|im_end|>"
22
+ do_sample: false
23
+ temperature: 0.0
24
+ num_fewshot: 0
25
+ metadata:
26
+ version: 3.0
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/causal_judgement.yaml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "causal_judgement"
2
+ "description": "Answer questions about causal attribution.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_causal_judgement"
6
+
7
+ filter_list:
8
+ - name: "flexible-extract"
9
+ filter:
10
+ - function: "regex"
11
+ group_select: -1
12
+ regex_pattern: "\\b(Yes|No|yes|no)\\b"
13
+ - function: "take_first"
14
+ - name: "strict-match"
15
+ filter:
16
+ - function: "regex"
17
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
18
+ - function: "take_first"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/disambiguation_qa.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "disambiguation_qa"
2
+ "description": "Clarify the meaning of sentences with ambiguous pronouns.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_disambiguation_qa"
6
+
7
+ filter_list:
8
+ - name: "flexible-extract"
9
+ filter:
10
+ - function: !function utils.MultiChoiceRegexFilter
11
+ group_select: -1
12
+ ignore_case: true
13
+ ignore_punctuation: true
14
+ regex_pattern: "(\\([A-Z]\\))"
15
+ - function: "take_first"
16
+ - name: "strict-match"
17
+ filter:
18
+ - function: "regex"
19
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
20
+ - function: "take_first"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/logical_deduction_five_objects.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "logical_deduction_five_objects"
2
+ "description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_logical_deduction_five_objects"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.MultiChoiceRegexFilter
10
+ group_select: -1
11
+ ignore_case: true
12
+ ignore_punctuation: true
13
+ regex_pattern: "(\\([A-Z]\\))"
14
+ - function: "take_first"
15
+ - name: "strict-match"
16
+ filter:
17
+ - function: "regex"
18
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
19
+ - function: "take_first"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/logical_deduction_seven_objects.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "logical_deduction_seven_objects"
2
+ "description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_logical_deduction_seven_objects"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.MultiChoiceRegexFilter
10
+ group_select: -1
11
+ ignore_case: true
12
+ ignore_punctuation: true
13
+ regex_pattern: "(\\([A-Z]\\))"
14
+ - function: "take_first"
15
+ - name: "strict-match"
16
+ filter:
17
+ - function: "regex"
18
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
19
+ - function: "take_first"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/logical_deduction_three_objects.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "logical_deduction_three_objects"
2
+ "description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_logical_deduction_three_objects"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.MultiChoiceRegexFilter
10
+ group_select: -1
11
+ ignore_case: true
12
+ ignore_punctuation: true
13
+ regex_pattern: "(\\([A-Z]\\))"
14
+ - function: "take_first"
15
+ - name: "strict-match"
16
+ filter:
17
+ - function: "regex"
18
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
19
+ - function: "take_first"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/movie_recommendation.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "movie_recommendation"
2
+ "description": "Recommend movies similar to the given list of movies.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_movie_recommendation"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.MultiChoiceRegexFilter
10
+ group_select: -1
11
+ ignore_case: true
12
+ ignore_punctuation: true
13
+ regex_pattern: "(\\([A-Z]\\))"
14
+ - function: "take_first"
15
+ - name: "strict-match"
16
+ filter:
17
+ - function: "regex"
18
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
19
+ - function: "take_first"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/navigate.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "navigate"
2
+ "description": "Given a series of navigation instructions, determine whether one would end up back at the starting point.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_navigate"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: "regex"
10
+ group_select: -1
11
+ regex_pattern: "\\b(Yes|No|yes|no)\\b"
12
+ - function: "take_first"
13
+ - name: "strict-match"
14
+ filter:
15
+ - function: "regex"
16
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
17
+ - function: "take_first"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/object_counting.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "object_counting"
2
+ "description": "Questions that involve enumerating objects and asking the model to count them.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_object_counting"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.NumberParseRegexFilter
10
+ group_select: -1
11
+ regex_pattern: "([-0-9]+)"
12
+ - function: "take_first"
13
+ - name: "strict-match"
14
+ filter:
15
+ - function: "regex"
16
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
17
+ - function: "take_first"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/ruin_names.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "ruin_names"
2
+ "description": "Select the humorous edit that 'ruins' the input movie or musical artist name.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_ruin_names"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.MultiChoiceRegexFilter
10
+ group_select: -1
11
+ ignore_case: true
12
+ ignore_punctuation: true
13
+ regex_pattern: "(\\([A-Z]\\))"
14
+ - function: "take_first"
15
+ - name: "strict-match"
16
+ filter:
17
+ - function: "regex"
18
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
19
+ - function: "take_first"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/salient_translation_error_detection.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "salient_translation_error_detection"
2
+ "description": "Detect the type of error in an English translation of a German source sentence.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_salient_translation_error_detection"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.MultiChoiceRegexFilter
10
+ group_select: -1
11
+ ignore_case: true
12
+ ignore_punctuation: true
13
+ regex_pattern: "(\\([A-Z]\\))"
14
+ - function: "take_first"
15
+ - name: "strict-match"
16
+ filter:
17
+ - function: "regex"
18
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
19
+ - function: "take_first"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/snarks.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "snarks"
2
+ "description": "Determine which of two sentences is sarcastic.\n\nAccording to Cambridge University Dictionary, sarcasm is \"the use of remarks that clearly mean the opposite of what they say, made in order to hurt someone's feelings or to criticize something in a humorous way.\" Sarcastic sentences often contain satirical or ironic utterances, hyperboles, ambivalent or witty remarks.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_snarks"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.MultiChoiceRegexFilter
10
+ group_select: -1
11
+ ignore_case: true
12
+ ignore_punctuation: true
13
+ regex_pattern: "(\\([A-Z]\\))"
14
+ - function: "take_first"
15
+ - name: "strict-match"
16
+ filter:
17
+ - function: "regex"
18
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
19
+ - function: "take_first"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/sports_understanding.yaml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "sports_understanding"
2
+ "description": "Determine whether an artificially constructed sentence relating to sports is plausible or not.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_sports_understanding"
6
+
7
+ filter_list:
8
+ - name: "flexible-extract"
9
+ filter:
10
+ - function: !function utils.MapRegexFilter
11
+ group_select: -1
12
+ ignore_case: true
13
+ regex_pattern_to_value:
14
+ \b(no|not plausible)\b: "no"
15
+ \b(yes|plausible)\b: "yes"
16
+ - function: "take_first"
17
+ - name: "strict-match"
18
+ filter:
19
+ - function: "regex"
20
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
21
+ - function: "take_first"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/temporal_sequences.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "temporal_sequences"
2
+ "description": "Task description: Answer questions about which times certain events could have occurred.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_temporal_sequences"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.MultiChoiceRegexFilter
10
+ group_select: -1
11
+ ignore_case: true
12
+ ignore_punctuation: true
13
+ regex_pattern: "(\\([A-Z]\\))"
14
+ - function: "take_first"
15
+ - name: "strict-match"
16
+ filter:
17
+ - function: "regex"
18
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
19
+ - function: "take_first"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/tracking_shuffled_objects_seven_objects.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "tracking_shuffled_objects_seven_objects"
2
+ "description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_tracking_shuffled_objects_seven_objects"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.MultiChoiceRegexFilter
10
+ group_select: -1
11
+ ignore_case: true
12
+ ignore_punctuation: true
13
+ regex_pattern: "(\\([A-Z]\\))"
14
+ - function: "take_first"
15
+ - name: "strict-match"
16
+ filter:
17
+ - function: "regex"
18
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
19
+ - function: "take_first"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/tracking_shuffled_objects_three_objects.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "tracking_shuffled_objects_three_objects"
2
+ "description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_tracking_shuffled_objects_three_objects"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.MultiChoiceRegexFilter
10
+ group_select: -1
11
+ ignore_case: true
12
+ ignore_punctuation: true
13
+ regex_pattern: "(\\([A-Z]\\))"
14
+ - function: "take_first"
15
+ - name: "strict-match"
16
+ filter:
17
+ - function: "regex"
18
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
19
+ - function: "take_first"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/utils.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import re
3
+ import sys
4
+ import unicodedata
5
+
6
+ from lm_eval.filters.extraction import Filter, RegexFilter
7
+
8
+
9
+ class ExtendedRegexFilter(RegexFilter):
10
+ punct_tbl = dict.fromkeys(
11
+ i for i in range(sys.maxunicode) if unicodedata.category(chr(i)).startswith("P")
12
+ )
13
+
14
+ def __init__(
15
+ self,
16
+ regex_pattern: str = r"#### (\-?[0-9\.\,]+)",
17
+ group_select=0,
18
+ fallback: str = "[invalid]",
19
+ ignore_case=False,
20
+ ignore_punctuation=False,
21
+ regexes_to_ignore=None,
22
+ ) -> None:
23
+ super().__init__(regex_pattern, group_select, fallback)
24
+ self.ignore_case = ignore_case
25
+ self.ignore_punctuation = ignore_punctuation
26
+ self.regexes_to_ignore = regexes_to_ignore
27
+
28
+ def filter_ignores(self, st):
29
+ if self.regexes_to_ignore is not None:
30
+ for s in self.regexes_to_ignore:
31
+ st = re.sub(s, "", st)
32
+
33
+ if self.ignore_case:
34
+ st = st.lower()
35
+
36
+ if self.ignore_punctuation:
37
+ # https://stackoverflow.com/a/266162
38
+ st = st.translate(self.punct_tbl)
39
+ return st
40
+
41
+ def find_match(self, regex, resp, convert_dict={}):
42
+ match = regex.findall(resp)
43
+ if match:
44
+ match = match[self.group_select]
45
+ if isinstance(match, tuple):
46
+ match = [m for m in match if m][0]
47
+ match = match.strip()
48
+ if match and match in convert_dict:
49
+ match = convert_dict[match]
50
+ return match
51
+
52
+
53
+ class MapRegexFilter(ExtendedRegexFilter):
54
+ def __init__(
55
+ self,
56
+ regex_pattern_to_value: dict = {},
57
+ group_select=0,
58
+ fallback: str = "[invalid]",
59
+ ignore_case=False,
60
+ ignore_punctuation=False,
61
+ regexes_to_ignore=None,
62
+ ) -> None:
63
+ """
64
+ regex_pattern_to_value: Match the regex pattern and change the result into the value
65
+ group_select: Selects the (group_select)th match from the findall result. We use the whole regex_patterns, concatenated by |
66
+ ignore_case: Lowers the case of response before matching with the given regex
67
+ ignore_punctuation: Remove the punctuation before matching with the given regex
68
+ regexes_to_ignore: Remove these regexes before matching with the given regex
69
+ """
70
+ super().__init__(
71
+ "|".join(list(regex_pattern_to_value.keys())),
72
+ group_select,
73
+ fallback,
74
+ ignore_case,
75
+ ignore_punctuation,
76
+ regexes_to_ignore,
77
+ )
78
+ self.regex_to_value = {
79
+ re.compile(r): v for r, v in regex_pattern_to_value.items()
80
+ }
81
+
82
+ def apply(self, resps, docs):
83
+ filtered_resps = []
84
+
85
+ for r in resps:
86
+ filtered = []
87
+ for resp in r:
88
+ whole_match_considering_group_select = self.find_match(
89
+ self.regex, self.filter_ignores(resp)
90
+ )
91
+ if whole_match_considering_group_select:
92
+ for regex, mapped_value in self.regex_to_value.items():
93
+ match = self.find_match(
94
+ regex,
95
+ self.filter_ignores(whole_match_considering_group_select),
96
+ )
97
+ if match:
98
+ match = mapped_value
99
+ break
100
+ if not whole_match_considering_group_select or not match:
101
+ match = self.fallback
102
+
103
+ filtered.append(match)
104
+ filtered_resps.append(filtered)
105
+
106
+ return filtered_resps
107
+
108
+
109
+ class NumberParseRegexFilter(ExtendedRegexFilter):
110
+ def apply(self, resps, docs):
111
+ # here, we assume we have a list, in which each element is
112
+ # a list of model responses for some particular input/target pair.
113
+ # so we process each of these (same input/target response sets)
114
+ # independently (and keep them a list.)
115
+ filtered_resps = []
116
+ import regex
117
+ from word2number import w2n
118
+
119
+ # https://www.reddit.com/r/regex/comments/11a38uk/parsing_numbers_written_out_as_english_words
120
+ english_number_regex = regex.compile(
121
+ "((?:(?:zero|one|two|three|four|five|(?:twen|thir|for|fif|six|seven|nine)(?|teen|ty)|eight(?:|een|y)|ten|eleven|twelve|fourteen|hundred|thousand|(?:m|b|tr)illion)(?:zero|one|two|three|four|five|(?:twen|thir|for|fif|six|seven|nine)(?:|teen|ty)|eight(?|een|y)|ten|eleven|twelve|fourteen|hundred|thousand|(?:m|b|tr)illion|[^\S\r\n]|,|and|&)+)?(?:zero|one|two|three|four|five|(?:twen|thir|for|fif|six|seven|nine)(?|teen|ty)|eight(?|een|y)|ten|eleven|twelve|fourteen|hundred|thousand|(?:m|b|tr)illion))"
122
+ )
123
+
124
+ for r in resps:
125
+ filtered = []
126
+ for resp in r:
127
+ match = self.find_match(self.regex, resp)
128
+ if not match:
129
+ match = self.find_match(english_number_regex, resp.lower())
130
+ if match:
131
+ match = str(w2n.word_to_num(match))
132
+ if not match:
133
+ match = self.fallback
134
+ filtered.append(match)
135
+ filtered_resps.append(filtered)
136
+
137
+ return filtered_resps
138
+
139
+
140
+ class WordSortFilter(Filter):
141
+ """ """
142
+
143
+ def apply(self, resps, docs):
144
+ filtered_resps = []
145
+
146
+ for r, doc in zip(resps, docs):
147
+ words = doc["input"].split("List:")[1].strip().split()
148
+ regex = re.compile("|".join([f"\\b{w}\\b" for w in words]))
149
+ filtered = []
150
+ for resp in r:
151
+ match = regex.findall(resp)
152
+ match.reverse()
153
+ ordered_words = reversed(
154
+ collections.OrderedDict(zip(match, [None] * len(match)))
155
+ )
156
+ filtered.append(" ".join(ordered_words))
157
+ filtered_resps.append(filtered)
158
+
159
+ return filtered_resps
160
+
161
+
162
+ class MultiChoiceRegexFilter(ExtendedRegexFilter):
163
+ def __init__(self, *args, **kwargs):
164
+ """
165
+ regex_pattern: The basic regex pattern to use. If fails to match, we will use the customized match procedure
166
+ - step 1 : We parse the choices between ([A-Z])s then try to find these choices in the response.
167
+ - step 2 : We parse the choice with regex :[\s]*([A-?]), where ? varies by number of choices.
168
+ group_select: Selects the (group_select)th match from the findall result.
169
+ ignore_case: Ignores the case during step 1 matching
170
+ ignore_punctuation: Remove the punctuation during step 1 matching
171
+ regexes_to_ignore: Remove these regexes during step 1 matching
172
+ """
173
+ super().__init__(*args, **kwargs)
174
+
175
+ def apply(self, resps, docs):
176
+ # here, we assume we have a list, in which each element is
177
+ # a list of model responses for some particular input/target pair.
178
+ # so we process each of these (same input/target response sets)
179
+ # independently (and keep them a list.)
180
+
181
+ filtered_resps = []
182
+
183
+ for r, doc in zip(resps, docs):
184
+ fallback_regexes = []
185
+ choice_to_alpha = {}
186
+ next_alpha = "A"
187
+
188
+ without_paren_fallback_regexes = []
189
+ without_paren_to_target = {}
190
+
191
+ multiple_choices_regex = re.compile(r"\([A-Z]\)([^\n^(]*)")
192
+ match = multiple_choices_regex.findall(doc["input"])
193
+ for m in match:
194
+ m = self.filter_ignores(m.strip())
195
+ fallback_regexes.append(f"{re.escape(m)}")
196
+ choice_to_alpha[m] = f"({next_alpha})"
197
+
198
+ without_paren_fallback_regexes.append(next_alpha)
199
+ without_paren_to_target[next_alpha] = f"({next_alpha})"
200
+
201
+ next_alpha = chr(ord(next_alpha) + 1)
202
+ fallback_regex = re.compile("|".join(fallback_regexes))
203
+ without_paren_fallback_regex = "|".join(without_paren_fallback_regexes)
204
+ without_paren_fallback_regex = re.compile(
205
+ f":[\s]*({without_paren_fallback_regex})"
206
+ )
207
+
208
+ filtered = []
209
+ for resp in r:
210
+ match = self.find_match(self.regex, resp)
211
+ if not match:
212
+ match = self.find_match(
213
+ fallback_regex, self.filter_ignores(resp), choice_to_alpha
214
+ )
215
+ if not match:
216
+ match = self.find_match(
217
+ without_paren_fallback_regex, resp, without_paren_to_target
218
+ )
219
+ if not match:
220
+ match = self.fallback
221
+ filtered.append(match)
222
+ filtered_resps.append(filtered)
223
+
224
+ return filtered_resps
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/web_of_lies.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "web_of_lies"
2
+ "description": "Evaluate a random boolean function expressed as a word problem.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_web_of_lies"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.MapRegexFilter
10
+ group_select: -1
11
+ ignore_case: true
12
+ regex_pattern_to_value:
13
+ \b(no|does not tell the truth|is not telling the truth)\b: "no"
14
+ \b(yes|tells the truth|is telling the truth)\b: "yes"
15
+ - function: "take_first"
16
+ - name: "strict-match"
17
+ filter:
18
+ - function: "regex"
19
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
20
+ - function: "take_first"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/cot_zeroshot/word_sorting.yaml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "word_sorting"
2
+ "description": "Sort a list of words.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_word_sorting"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.WordSortFilter
10
+ - function: "take_first"
11
+ - name: "strict-match"
12
+ filter:
13
+ - function: "regex"
14
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
15
+ - function: "take_first"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/_bbh_fewshot.yaml ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: bbh_fewshot
2
+ task:
3
+ - bbh_fewshot_boolean_expressions
4
+ - bbh_fewshot_causal_judgement
5
+ - bbh_fewshot_date_understanding
6
+ - bbh_fewshot_disambiguation_qa
7
+ - bbh_fewshot_dyck_languages
8
+ - bbh_fewshot_formal_fallacies
9
+ - bbh_fewshot_geometric_shapes
10
+ - bbh_fewshot_hyperbaton
11
+ - bbh_fewshot_logical_deduction_five_objects
12
+ - bbh_fewshot_logical_deduction_seven_objects
13
+ - bbh_fewshot_logical_deduction_three_objects
14
+ - bbh_fewshot_movie_recommendation
15
+ - bbh_fewshot_multistep_arithmetic_two
16
+ - bbh_fewshot_navigate
17
+ - bbh_fewshot_object_counting
18
+ - bbh_fewshot_penguins_in_a_table
19
+ - bbh_fewshot_reasoning_about_colored_objects
20
+ - bbh_fewshot_ruin_names
21
+ - bbh_fewshot_salient_translation_error_detection
22
+ - bbh_fewshot_snarks
23
+ - bbh_fewshot_sports_understanding
24
+ - bbh_fewshot_temporal_sequences
25
+ - bbh_fewshot_tracking_shuffled_objects_five_objects
26
+ - bbh_fewshot_tracking_shuffled_objects_seven_objects
27
+ - bbh_fewshot_tracking_shuffled_objects_three_objects
28
+ - bbh_fewshot_web_of_lies
29
+ - bbh_fewshot_word_sorting
30
+ aggregate_metric_list:
31
+ - metric: exact_match
32
+ aggregation: mean
33
+ weight_by_size: true
34
+ metadata:
35
+ version: 3.0
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/_fewshot_template_yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_path: lukaemon/bbh
2
+ output_type: generate_until
3
+ test_split: test
4
+ doc_to_target: "{{target}}"
5
+ metric_list:
6
+ - metric: exact_match
7
+ aggregation: mean
8
+ higher_is_better: true
9
+ # ignore_case: true
10
+ # ignore_punctuation: true
11
+ generation_kwargs:
12
+ until:
13
+ - "</s>"
14
+ - "Q"
15
+ - "\n\n"
16
+ do_sample: false
17
+ temperature: 0.0
18
+ num_fewshot: 3
19
+ metadata:
20
+ version: 2.0
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/boolean_expressions.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: boolean_expressions
2
+ description: 'Evaluate the result of a random Boolean expression.
3
+
4
+
5
+ '
6
+ doc_to_text: 'Q: {{input}}
7
+
8
+ A:'
9
+ fewshot_config:
10
+ sampler: first_n
11
+ samples:
12
+ - input: not ( ( not not True ) ) is
13
+ target: 'False'
14
+ - input: True and False and not True and True is
15
+ target: 'False'
16
+ - input: not not ( not ( False ) ) is
17
+ target: 'True'
18
+ include: _fewshot_template_yaml
19
+ task: bbh_fewshot_boolean_expressions
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/causal_judgement.yaml ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: causal_judgement
2
+ description: 'Answer questions about causal attribution.
3
+
4
+
5
+ '
6
+ doc_to_text: 'Q: {{input}}
7
+
8
+ A:'
9
+ fewshot_config:
10
+ sampler: first_n
11
+ samples:
12
+ - input: 'How would a typical person answer each of the following questions about
13
+ causation?
14
+
15
+ Frank T., had an ongoing dispute with his neighbor over a stretch of land and
16
+ one day decided to shoot his neighbor in the body. Frank T. had no experience
17
+ with guns, his hand slipped on the barrel of the gun, and the shot went wild.
18
+ Nonetheless, the bullet bounced off a large boulder several feet away and hit
19
+ the neighbor''s body, causing significant injury. Did Frank T. intentionally
20
+ shoot his neighbor in the body?
21
+
22
+ Options:
23
+
24
+ - Yes
25
+
26
+ - No'
27
+ target: 'No'
28
+ - input: 'How would a typical person answer each of the following questions about
29
+ causation?
30
+
31
+ Suzy and Billy are working on a project that is very important for our nation''s
32
+ security. The boss tells them both: "Be sure that you are here at exactly 9
33
+ am. It is absolutely essential that you arrive at that time." Both Billy and
34
+ Suzy arrive at 9 am. As it happens, there was a motion detector installed in
35
+ the room where they arrived. The motion detector was set up to be triggered
36
+ if at least one person appeared in the room at the same time. So the motion
37
+ detector went off. Did Billy cause the motion detector to go off?
38
+
39
+ Options:
40
+
41
+ - Yes
42
+
43
+ - No'
44
+ target: 'Yes'
45
+ - input: 'How would a typical person answer each of the following questions about
46
+ causation?
47
+
48
+ George and his sister Lena reunite at their parents'' house for Thanksgiving.
49
+ Whereas George just got into medical school, Lena is unhappy in her marriage
50
+ and recently lost her job. Over the course of the day, George and Lena get into
51
+ a number of heated arguments. Later in the afternoon they play a game of darts.
52
+ They split the first two games, and the third game is close until the end. Who
53
+ will win comes down to George''s last shot. If he hits a high point region,
54
+ he wins; if he hits a low point region, Lena wins. George thinks of the difficult
55
+ time Lena is having, and he really wants to let her win. He aims the dart at
56
+ the low point region. He sets up his shot and the dart lands in the low point
57
+ region. After his shot, Lena wins the game and is very happy. Did George hit
58
+ the low point region intentionally?
59
+
60
+ Options:
61
+
62
+ - Yes
63
+
64
+ - No'
65
+ target: 'Yes'
66
+ include: _fewshot_template_yaml
67
+ task: bbh_fewshot_causal_judgement
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/date_understanding.yaml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: date_understanding
2
+ description: 'Infer the date from context.
3
+
4
+
5
+ '
6
+ doc_to_text: 'Q: {{input}}
7
+
8
+ A:'
9
+ fewshot_config:
10
+ sampler: first_n
11
+ samples:
12
+ - input: 'Today is Christmas Eve of 1937. What is the date 10 days ago in MM/DD/YYYY?
13
+
14
+ Options:
15
+
16
+ (A) 12/14/2026
17
+
18
+ (B) 12/14/1950
19
+
20
+ (C) 12/14/2007
21
+
22
+ (D) 12/14/1937
23
+
24
+ (E) 07/14/1938
25
+
26
+ (F) 12/14/1988'
27
+ target: (D)
28
+ - input: 'Tomorrow is 11/12/2019. What is the date one year ago from today in MM/DD/YYYY?
29
+
30
+ Options:
31
+
32
+ (A) 09/04/2018
33
+
34
+ (B) 11/11/2018
35
+
36
+ (C) 08/25/2018
37
+
38
+ (D) 11/02/2018
39
+
40
+ (E) 11/04/2018'
41
+ target: (B)
42
+ - input: 'Jane and John married on Jan 2, 1958. It is their 5-year anniversary today.
43
+ What is the date tomorrow in MM/DD/YYYY?
44
+
45
+ Options:
46
+
47
+ (A) 01/11/1961
48
+
49
+ (B) 01/03/1963
50
+
51
+ (C) 01/18/1961
52
+
53
+ (D) 10/14/1960
54
+
55
+ (E) 01/03/1982
56
+
57
+ (F) 12/03/1960'
58
+ target: (B)
59
+ include: _fewshot_template_yaml
60
+ task: bbh_fewshot_date_understanding
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/disambiguation_qa.yaml ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: disambiguation_qa
2
+ description: 'Clarify the meaning of sentences with ambiguous pronouns.
3
+
4
+
5
+ '
6
+ doc_to_text: 'Q: {{input}}
7
+
8
+ A:'
9
+ fewshot_config:
10
+ sampler: first_n
11
+ samples:
12
+ - input: 'In the following sentences, explain the antecedent of the pronoun (which
13
+ thing the pronoun refers to), or state that it is ambiguous.
14
+
15
+ Sentence: The chief told the counselor that they took the day off.
16
+
17
+ Options:
18
+
19
+ (A) The chief took the day off
20
+
21
+ (B) The counselor took the day off
22
+
23
+ (C) Ambiguous'
24
+ target: (A)
25
+ - input: 'In the following sentences, explain the antecedent of the pronoun (which
26
+ thing the pronoun refers to), or state that it is ambiguous.
27
+
28
+ Sentence: The manager sent a message to the secretary, but he didn''t reply
29
+ yet.
30
+
31
+ Options:
32
+
33
+ (A) The secretary didn''t reply yet
34
+
35
+ (B) The manager didn''t reply yet
36
+
37
+ (C) Ambiguous'
38
+ target: (A)
39
+ - input: 'In the following sentences, explain the antecedent of the pronoun (which
40
+ thing the pronoun refers to), or state that it is ambiguous.
41
+
42
+ Sentence: Bailey will plan to meet the director at his office
43
+
44
+ Options:
45
+
46
+ (A) It will be Bailey''s office
47
+
48
+ (B) It will be the director''s office
49
+
50
+ (C) Ambiguous'
51
+ target: (C)
52
+ include: _fewshot_template_yaml
53
+ task: bbh_fewshot_disambiguation_qa
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/dyck_languages.yaml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: dyck_languages
2
+ description: 'Correctly close a Dyck-n word.
3
+
4
+
5
+ '
6
+ doc_to_text: 'Q: {{input}}
7
+
8
+ A:'
9
+ fewshot_config:
10
+ sampler: first_n
11
+ samples:
12
+ - input: 'Complete the rest of the sequence, making sure that the parentheses are
13
+ closed properly. Input: [ { ['
14
+ target: '] } ]'
15
+ - input: 'Complete the rest of the sequence, making sure that the parentheses are
16
+ closed properly. Input: < > ( ( [ [ ( { } ) [ < > ] ]'
17
+ target: '] ) )'
18
+ - input: 'Complete the rest of the sequence, making sure that the parentheses are
19
+ closed properly. Input: < [ < [ { < [ ] < { } > > } ] > { { ( ) } { < [ < >
20
+ ] > }'
21
+ target: '} ] >'
22
+ include: _fewshot_template_yaml
23
+ task: bbh_fewshot_dyck_languages
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/formal_fallacies.yaml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: formal_fallacies
2
+ description: 'Distinguish deductively valid arguments from formal fallacies.
3
+
4
+
5
+ '
6
+ doc_to_text: 'Q: {{input}}
7
+
8
+ A:'
9
+ fewshot_config:
10
+ sampler: first_n
11
+ samples:
12
+ - input: '"It is not always easy to see who is related to whom -- and in which ways.
13
+ The following argument pertains to this question: To begin with, Lesley is a
14
+ close friend of Fernando. Moreover, being a close friend of Fernando or a schoolmate
15
+ of Lowell is sufficient for being a great-grandfather of Leroy. It follows that
16
+ Lesley is a great-grandfather of Leroy."
17
+
18
+ Is the argument, given the explicitly stated premises, deductively valid or
19
+ invalid?
20
+
21
+ Options:
22
+
23
+ - valid
24
+
25
+ - invalid'
26
+ target: valid
27
+ - input: '"It is not always easy to see who is related to whom -- and in which ways.
28
+ The following argument pertains to this question: Whoever is not a great-grandfather
29
+ of Clyde is a stepbrother of Brian. Being an ancestor of Dana is sufficient
30
+ for not being a great-grandfather of Clyde. We may conclude: Everyone who is
31
+ an ancestor of Dana is a stepbrother of Brian, too."
32
+
33
+ Is the argument, given the explicitly stated premises, deductively valid or
34
+ invalid?
35
+
36
+ Options:
37
+
38
+ - valid
39
+
40
+ - invalid'
41
+ target: valid
42
+ - input: '"It is not always easy to grasp who is consuming which products. The following
43
+ argument pertains to this question: Every infrequent user of Paul Mitchell shampoo
44
+ is either a rare consumer of Nioxin shampoo or a loyal buyer of Caress soap,
45
+ or both. No regular consumer of Lush soap is a rare consumer of Nioxin shampoo
46
+ and, in the same time, a loyal buyer of Caress soap. It follows that whoever
47
+ is an infrequent user of Paul Mitchell shampoo is not a regular consumer of
48
+ Lush soap."
49
+
50
+ Is the argument, given the explicitly stated premises, deductively valid or
51
+ invalid?
52
+
53
+ Options:
54
+
55
+ - valid
56
+
57
+ - invalid'
58
+ target: invalid
59
+ include: _fewshot_template_yaml
60
+ task: bbh_fewshot_formal_fallacies
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/geometric_shapes.yaml ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: geometric_shapes
2
+ description: 'Name geometric shapes from their SVG paths.
3
+
4
+
5
+ '
6
+ doc_to_text: 'Q: {{input}}
7
+
8
+ A:'
9
+ fewshot_config:
10
+ sampler: first_n
11
+ samples:
12
+ - input: 'This SVG path element <path d="M 31.00,73.00 L 32.00,59.00 L 44.00,50.00
13
+ L 49.00,41.00 L 64.00,37.00 L 71.00,55.00 L 64.00,76.00 L 52.00,61.00 L 31.00,73.00"/>
14
+ draws a
15
+
16
+ Options:
17
+
18
+ (A) circle
19
+
20
+ (B) heptagon
21
+
22
+ (C) hexagon
23
+
24
+ (D) kite
25
+
26
+ (E) line
27
+
28
+ (F) octagon
29
+
30
+ (G) pentagon
31
+
32
+ (H) rectangle
33
+
34
+ (I) sector
35
+
36
+ (J) triangle'
37
+ target: (F)
38
+ - input: 'This SVG path element <path d="M 14.19,26.04 L 51.43,39.21 L 58.44,36.69
39
+ L 56.63,30.17 L 48.53,26.66 L 14.19,26.04"/> draws a
40
+
41
+ Options:
42
+
43
+ (A) circle
44
+
45
+ (B) heptagon
46
+
47
+ (C) hexagon
48
+
49
+ (D) kite
50
+
51
+ (E) line
52
+
53
+ (F) octagon
54
+
55
+ (G) pentagon
56
+
57
+ (H) rectangle
58
+
59
+ (I) sector
60
+
61
+ (J) triangle'
62
+ target: (G)
63
+ - input: 'This SVG path element <path d="M 41.00,43.00 L 37.00,34.00 L 41.00,33.00
64
+ L 45.00,34.00 L 41.00,43.00"/> draws a
65
+
66
+ Options:
67
+
68
+ (A) circle
69
+
70
+ (B) heptagon
71
+
72
+ (C) hexagon
73
+
74
+ (D) kite
75
+
76
+ (E) line
77
+
78
+ (F) octagon
79
+
80
+ (G) pentagon
81
+
82
+ (H) rectangle
83
+
84
+ (I) sector
85
+
86
+ (J) triangle'
87
+ target: (D)
88
+ include: _fewshot_template_yaml
89
+ task: bbh_fewshot_geometric_shapes
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/hyperbaton.yaml ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: hyperbaton
2
+ description: 'Order adjectives correctly in English sentences.
3
+
4
+
5
+ '
6
+ doc_to_text: 'Q: {{input}}
7
+
8
+ A:'
9
+ fewshot_config:
10
+ sampler: first_n
11
+ samples:
12
+ - input: 'Which sentence has the correct adjective order:
13
+
14
+ Options:
15
+
16
+ (A) rubber terrible ship
17
+
18
+ (B) terrible rubber ship'
19
+ target: (B)
20
+ - input: 'Which sentence has the correct adjective order:
21
+
22
+ Options:
23
+
24
+ (A) repulsive small Brazilian exercise ship
25
+
26
+ (B) Brazilian repulsive exercise small ship'
27
+ target: (A)
28
+ - input: 'Which sentence has the correct adjective order:
29
+
30
+ Options:
31
+
32
+ (A) blue gold wonderful square shoe
33
+
34
+ (B) wonderful square blue gold shoe'
35
+ target: (B)
36
+ include: _fewshot_template_yaml
37
+ task: bbh_fewshot_hyperbaton
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/logical_deduction_five_objects.yaml ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: logical_deduction_five_objects
2
+ description: 'A logical deduction task which requires deducing the order of a sequence
3
+ of objects.
4
+
5
+
6
+ '
7
+ doc_to_text: 'Q: {{input}}
8
+
9
+ A:'
10
+ fewshot_config:
11
+ sampler: first_n
12
+ samples:
13
+ - input: 'The following paragraphs each describe a set of three objects arranged
14
+ in a fixed order. The statements are logically consistent within each paragraph.
15
+ In a golf tournament, there were three golfers: Amy, Eli, and Eve. Eve finished
16
+ above Amy. Eli finished below Amy.
17
+
18
+ Options:
19
+
20
+ (A) Amy finished last
21
+
22
+ (B) Eli finished last
23
+
24
+ (C) Eve finished last'
25
+ target: (B)
26
+ - input: 'The following paragraphs each describe a set of three objects arranged
27
+ in a fixed order. The statements are logically consistent within each paragraph.
28
+ On a shelf, there are three books: a white book, a green book, and an orange
29
+ book. The green book is to the right of the white book. The orange book is the
30
+ rightmost.
31
+
32
+ Options:
33
+
34
+ (A) The white book is the leftmost
35
+
36
+ (B) The green book is the leftmost
37
+
38
+ (C) The orange book is the leftmost'
39
+ target: (A)
40
+ - input: 'The following paragraphs each describe a set of three objects arranged
41
+ in a fixed order. The statements are logically consistent within each paragraph.
42
+ On a shelf, there are three books: a red book, a gray book, and a white book.
43
+ The white book is to the left of the gray book. The red book is the second from
44
+ the left.
45
+
46
+ Options:
47
+
48
+ (A) The red book is the leftmost
49
+
50
+ (B) The gray book is the leftmost
51
+
52
+ (C) The white book is the leftmost'
53
+ target: (C)
54
+ include: _fewshot_template_yaml
55
+ task: bbh_fewshot_logical_deduction_five_objects
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/logical_deduction_seven_objects.yaml ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: logical_deduction_seven_objects
2
+ description: 'A logical deduction task which requires deducing the order of a sequence
3
+ of objects.
4
+
5
+
6
+ '
7
+ doc_to_text: 'Q: {{input}}
8
+
9
+ A:'
10
+ fewshot_config:
11
+ sampler: first_n
12
+ samples:
13
+ - input: 'The following paragraphs each describe a set of three objects arranged
14
+ in a fixed order. The statements are logically consistent within each paragraph.
15
+ In a golf tournament, there were three golfers: Amy, Eli, and Eve. Eve finished
16
+ above Amy. Eli finished below Amy.
17
+
18
+ Options:
19
+
20
+ (A) Amy finished last
21
+
22
+ (B) Eli finished last
23
+
24
+ (C) Eve finished last'
25
+ target: (B)
26
+ - input: 'The following paragraphs each describe a set of three objects arranged
27
+ in a fixed order. The statements are logically consistent within each paragraph.
28
+ On a shelf, there are three books: a white book, a green book, and an orange
29
+ book. The green book is to the right of the white book. The orange book is the
30
+ rightmost.
31
+
32
+ Options:
33
+
34
+ (A) The white book is the leftmost
35
+
36
+ (B) The green book is the leftmost
37
+
38
+ (C) The orange book is the leftmost'
39
+ target: (A)
40
+ - input: 'The following paragraphs each describe a set of three objects arranged
41
+ in a fixed order. The statements are logically consistent within each paragraph.
42
+ On a shelf, there are three books: a red book, a gray book, and a white book.
43
+ The white book is to the left of the gray book. The red book is the second from
44
+ the left.
45
+
46
+ Options:
47
+
48
+ (A) The red book is the leftmost
49
+
50
+ (B) The gray book is the leftmost
51
+
52
+ (C) The white book is the leftmost'
53
+ target: (C)
54
+ include: _fewshot_template_yaml
55
+ task: bbh_fewshot_logical_deduction_seven_objects
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/logical_deduction_three_objects.yaml ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: logical_deduction_three_objects
2
+ description: 'A logical deduction task which requires deducing the order of a sequence
3
+ of objects.
4
+
5
+
6
+ '
7
+ doc_to_text: 'Q: {{input}}
8
+
9
+ A:'
10
+ fewshot_config:
11
+ sampler: first_n
12
+ samples:
13
+ - input: 'The following paragraphs each describe a set of three objects arranged
14
+ in a fixed order. The statements are logically consistent within each paragraph.
15
+ In a golf tournament, there were three golfers: Amy, Eli, and Eve. Eve finished
16
+ above Amy. Eli finished below Amy.
17
+
18
+ Options:
19
+
20
+ (A) Amy finished last
21
+
22
+ (B) Eli finished last
23
+
24
+ (C) Eve finished last'
25
+ target: (B)
26
+ - input: 'The following paragraphs each describe a set of three objects arranged
27
+ in a fixed order. The statements are logically consistent within each paragraph.
28
+ On a shelf, there are three books: a white book, a green book, and an orange
29
+ book. The green book is to the right of the white book. The orange book is the
30
+ rightmost.
31
+
32
+ Options:
33
+
34
+ (A) The white book is the leftmost
35
+
36
+ (B) The green book is the leftmost
37
+
38
+ (C) The orange book is the leftmost'
39
+ target: (A)
40
+ - input: 'The following paragraphs each describe a set of three objects arranged
41
+ in a fixed order. The statements are logically consistent within each paragraph.
42
+ On a shelf, there are three books: a red book, a gray book, and a white book.
43
+ The white book is to the left of the gray book. The red book is the second from
44
+ the left.
45
+
46
+ Options:
47
+
48
+ (A) The red book is the leftmost
49
+
50
+ (B) The gray book is the leftmost
51
+
52
+ (C) The white book is the leftmost'
53
+ target: (C)
54
+ include: _fewshot_template_yaml
55
+ task: bbh_fewshot_logical_deduction_three_objects
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/movie_recommendation.yaml ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: movie_recommendation
2
+ description: 'Recommend movies similar to the given list of movies.
3
+
4
+
5
+ '
6
+ doc_to_text: 'Q: {{input}}
7
+
8
+ A:'
9
+ fewshot_config:
10
+ sampler: first_n
11
+ samples:
12
+ - input: 'Find a movie similar to Star Wars Episode IV - A New Hope, Indiana Jones
13
+ and the Last Crusade, Star Wars Episode V - The Empire Strikes Back, The Big
14
+ Lebowski:
15
+
16
+ Options:
17
+
18
+ (A) Tetsuo
19
+
20
+ (B) the Ironman
21
+
22
+ (C) The Princess Bride
23
+
24
+ (D) The Barkley Marathons The Race That Eats Its Young
25
+
26
+ (E) Bug'
27
+ target: (C)
28
+ - input: 'Find a movie similar to Twister, The Silence of the Lambs, Independence
29
+ Day, Braveheart:
30
+
31
+ Options:
32
+
33
+ (A) They Shoot Horses
34
+
35
+ (B) Don''t They
36
+
37
+ (C) Forrest Gump
38
+
39
+ (D) The Salton Sea
40
+
41
+ (E) Extreme Days'
42
+ target: (C)
43
+ - input: 'Find a movie similar to Minority Report, Total Recall, Inside Out, Forrest
44
+ Gump:
45
+
46
+ Options:
47
+
48
+ (A) Phenomena
49
+
50
+ (B) Lilting
51
+
52
+ (C) Catwoman
53
+
54
+ (D) Edge of Tomorrow'
55
+ target: (D)
56
+ include: _fewshot_template_yaml
57
+ task: bbh_fewshot_movie_recommendation
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/multistep_arithmetic_two.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: multistep_arithmetic_two
2
+ description: 'Solve multi-step arithmetic problems.
3
+
4
+
5
+ '
6
+ doc_to_text: 'Q: {{input}}
7
+
8
+ A:'
9
+ fewshot_config:
10
+ sampler: first_n
11
+ samples:
12
+ - input: ((-5 + 9 * -4 - 0) * (4 + -7 + 0 * -5)) =
13
+ target: '123'
14
+ - input: ((-9 * 7 * 7 * -9) + (4 * -9 - 8 - -4)) =
15
+ target: '3929'
16
+ - input: ((-3 + 5 * 8 * -4) - (9 - 8 * -7 + -9)) =
17
+ target: '-219'
18
+ include: _fewshot_template_yaml
19
+ task: bbh_fewshot_multistep_arithmetic_two
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/navigate.yaml ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: navigate
2
+ description: 'Given a series of navigation instructions, determine whether one would
3
+ end up back at the starting point.
4
+
5
+
6
+ '
7
+ doc_to_text: 'Q: {{input}}
8
+
9
+ A:'
10
+ fewshot_config:
11
+ sampler: first_n
12
+ samples:
13
+ - input: 'If you follow these instructions, do you return to the starting point?
14
+ Turn left. Turn around. Turn left. Take 7 steps. Take 2 steps. Take 4 steps.
15
+ Take 8 steps.
16
+
17
+ Options:
18
+
19
+ - Yes
20
+
21
+ - No'
22
+ target: 'No'
23
+ - input: 'If you follow these instructions, do you return to the starting point?
24
+ Turn around. Take 1 step. Take 6 steps. Turn around. Take 6 steps. Take 9 steps.
25
+ Take 1 step.
26
+
27
+ Options:
28
+
29
+ - Yes
30
+
31
+ - No'
32
+ target: 'No'
33
+ - input: 'If you follow these instructions, do you return to the starting point?
34
+ Always face forward. Take 2 steps right. Take 9 steps left. Take 7 steps right.
35
+
36
+ Options:
37
+
38
+ - Yes
39
+
40
+ - No'
41
+ target: 'Yes'
42
+ include: _fewshot_template_yaml
43
+ task: bbh_fewshot_navigate
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/object_counting.yaml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: object_counting
2
+ description: 'Questions that involve enumerating objects and asking the model to count
3
+ them.
4
+
5
+
6
+ '
7
+ doc_to_text: 'Q: {{input}}
8
+
9
+ A:'
10
+ fewshot_config:
11
+ sampler: first_n
12
+ samples:
13
+ - input: I have a blackberry, a clarinet, a nectarine, a plum, a strawberry, a banana,
14
+ a flute, an orange, and a violin. How many fruits do I have?
15
+ target: '6'
16
+ - input: I have an orange, a raspberry, two peaches, a blackberry, an apple, a grape,
17
+ a nectarine, and three plums. How many fruits do I have?
18
+ target: '11'
19
+ - input: I have a lettuce head, a head of broccoli, an onion, a stalk of celery,
20
+ two carrots, a garlic, and a yam. How many vegetables do I have?
21
+ target: '8'
22
+ include: _fewshot_template_yaml
23
+ task: bbh_fewshot_object_counting
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/penguins_in_a_table.yaml ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: penguins_in_a_table
2
+ description: 'Answer questions about a table of penguins and their attributes.
3
+
4
+
5
+ '
6
+ doc_to_text: 'Q: {{input}}
7
+
8
+ A:'
9
+ fewshot_config:
10
+ sampler: first_n
11
+ samples:
12
+ - input: 'Here is a table where the first line is a header and each subsequent line
13
+ is a penguin: name, age, height (cm), weight (kg) Louis, 7, 50, 11 Bernard,
14
+ 5, 80, 13 Vincent, 9, 60, 11 Gwen, 8, 70, 15 For example: the age of Louis
15
+ is 7, the weight of Gwen is 15 kg, the height of Bernard is 80 cm. We now add
16
+ a penguin to the table:
17
+
18
+ James, 12, 90, 12
19
+
20
+ How many penguins are less than 8 years old?
21
+
22
+ Options:
23
+
24
+ (A) 1
25
+
26
+ (B) 2
27
+
28
+ (C) 3
29
+
30
+ (D) 4
31
+
32
+ (E) 5'
33
+ target: (B)
34
+ - input: 'Here is a table where the first line is a header and each subsequent line
35
+ is a penguin: name, age, height (cm), weight (kg) Louis, 7, 50, 11 Bernard,
36
+ 5, 80, 13 Vincent, 9, 60, 11 Gwen, 8, 70, 15 For example: the age of Louis
37
+ is 7, the weight of Gwen is 15 kg, the height of Bernard is 80 cm. Which is
38
+ the youngest penguin?
39
+
40
+ Options:
41
+
42
+ (A) Louis
43
+
44
+ (B) Bernard
45
+
46
+ (C) Vincent
47
+
48
+ (D) Gwen
49
+
50
+ (E) James'
51
+ target: (B)
52
+ - input: 'Here is a table where the first line is a header and each subsequent line
53
+ is a penguin: name, age, height (cm), weight (kg) Louis, 7, 50, 11 Bernard,
54
+ 5, 80, 13 Vincent, 9, 60, 11 Gwen, 8, 70, 15 For example: the age of Louis
55
+ is 7, the weight of Gwen is 15 kg, the height of Bernard is 80 cm. What is
56
+ the name of the second penguin sorted by alphabetic order?
57
+
58
+ Options:
59
+
60
+ (A) Louis
61
+
62
+ (B) Bernard
63
+
64
+ (C) Vincent
65
+
66
+ (D) Gwen
67
+
68
+ (E) James'
69
+ target: (D)
70
+ include: _fewshot_template_yaml
71
+ task: bbh_fewshot_penguins_in_a_table
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/reasoning_about_colored_objects.yaml ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: reasoning_about_colored_objects
2
+ description: 'Answer extremely simple questions about the colors of objects on a surface.
3
+
4
+
5
+ '
6
+ doc_to_text: 'Q: {{input}}
7
+
8
+ A:'
9
+ fewshot_config:
10
+ sampler: first_n
11
+ samples:
12
+ - input: 'On the nightstand, there is a red pencil, a purple mug, a burgundy keychain,
13
+ a fuchsia teddy bear, a black plate, and a blue stress ball. What color is the
14
+ stress ball?
15
+
16
+ Options:
17
+
18
+ (A) red
19
+
20
+ (B) orange
21
+
22
+ (C) yellow
23
+
24
+ (D) green
25
+
26
+ (E) blue
27
+
28
+ (F) brown
29
+
30
+ (G) magenta
31
+
32
+ (H) fuchsia
33
+
34
+ (I) mauve
35
+
36
+ (J) teal
37
+
38
+ (K) turquoise
39
+
40
+ (L) burgundy
41
+
42
+ (M) silver
43
+
44
+ (N) gold
45
+
46
+ (O) black
47
+
48
+ (P) grey
49
+
50
+ (Q) purple
51
+
52
+ (R) pink'
53
+ target: (E)
54
+ - input: 'On the table, you see a bunch of objects arranged in a row: a purple paperclip,
55
+ a pink stress ball, a brown keychain, a green scrunchiephone charger, a mauve
56
+ fidget spinner, and a burgundy pen. What is the color of the object directly
57
+ to the right of the stress ball?
58
+
59
+ Options:
60
+
61
+ (A) red
62
+
63
+ (B) orange
64
+
65
+ (C) yellow
66
+
67
+ (D) green
68
+
69
+ (E) blue
70
+
71
+ (F) brown
72
+
73
+ (G) magenta
74
+
75
+ (H) fuchsia
76
+
77
+ (I) mauve
78
+
79
+ (J) teal
80
+
81
+ (K) turquoise
82
+
83
+ (L) burgundy
84
+
85
+ (M) silver
86
+
87
+ (N) gold
88
+
89
+ (O) black
90
+
91
+ (P) grey
92
+
93
+ (Q) purple
94
+
95
+ (R) pink'
96
+ target: (F)
97
+ - input: 'On the nightstand, you see the following items arranged in a row: a teal
98
+ plate, a burgundy keychain, a yellow scrunchiephone charger, an orange mug,
99
+ a pink notebook, and a grey cup. How many non-orange items do you see to the
100
+ left of the teal item?
101
+
102
+ Options:
103
+
104
+ (A) zero
105
+
106
+ (B) one
107
+
108
+ (C) two
109
+
110
+ (D) three
111
+
112
+ (E) four
113
+
114
+ (F) five
115
+
116
+ (G) six'
117
+ target: (A)
118
+ include: _fewshot_template_yaml
119
+ task: bbh_fewshot_reasoning_about_colored_objects
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/ruin_names.yaml ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: ruin_names
2
+ description: 'Select the humorous edit that ''ruins'' the input movie or musical artist
3
+ name.
4
+
5
+
6
+ '
7
+ doc_to_text: 'Q: {{input}}
8
+
9
+ A:'
10
+ fewshot_config:
11
+ sampler: first_n
12
+ samples:
13
+ - input: 'Which of the following is a humorous edit of this artist or movie name:
14
+ ''whitesnake''?
15
+
16
+ Options:
17
+
18
+ (A) whitesnape
19
+
20
+ (B) whitesnapke
21
+
22
+ (C) whitesnuake
23
+
24
+ (D) mwhitesnake'
25
+ target: (A)
26
+ - input: 'Which of the following is a humorous edit of this artist or movie name:
27
+ ''one of our dinosaurs is missing''?
28
+
29
+ Options:
30
+
31
+ (A) ofne of our dinosaurs is missing
32
+
33
+ (B) one af our dinosaurs is missing
34
+
35
+ (C) one of our dinosaurs is pissing
36
+
37
+ (D) one of our dinosaur is missing'
38
+ target: (C)
39
+ - input: 'Which of the following is a humorous edit of this artist or movie name:
40
+ ''counting crows''?
41
+
42
+ Options:
43
+
44
+ (A) countingy crows
45
+
46
+ (B) counting cows
47
+
48
+ (C) courting crows
49
+
50
+ (D) coutnting crows'
51
+ target: (B)
52
+ include: _fewshot_template_yaml
53
+ task: bbh_fewshot_ruin_names
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/salient_translation_error_detection.yaml ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: salient_translation_error_detection
2
+ description: 'Detect the type of error in an English translation of a German source
3
+ sentence.
4
+
5
+
6
+ '
7
+ doc_to_text: 'Q: {{input}}
8
+
9
+ A:'
10
+ fewshot_config:
11
+ sampler: first_n
12
+ samples:
13
+ - input: 'The following translations from German to English contain a particular
14
+ error. That error will be one of the following types: Named Entities: An entity
15
+ (names, places, locations, etc.) is changed to a different entity. Numerical
16
+ Values: Numerical values (ordinals or cardinals), dates, and/or units are changed.
17
+ Modifiers or Adjectives: The modifiers and adjectives pertaining to a noun are
18
+ changed. Negation or Antonyms: Introduce or remove a negation or change comparatives
19
+ to their antonyms. Facts: Trivial factual errors not pertaining to the above
20
+ classes are introduced in the translations. Dropped Content: A significant clause
21
+ in the translation is removed. Please identify that error. Source: In der Liste
22
+ der Baudenkmale in Lenzen (Elbe) sind alle Baudenkmale der brandenburgischen
23
+ Stadt Lenzen (Elbe) und ihrer Ortsteile aufgelistet.
24
+
25
+ Translation: In the list of architectural monuments in Lenzen all architectural
26
+ monuments of the Brandenburg city of Lenzen and its districts are listed.
27
+
28
+ The translation contains an error pertaining to
29
+
30
+ Options:
31
+
32
+ (A) Modifiers or Adjectives
33
+
34
+ (B) Numerical Values
35
+
36
+ (C) Negation or Antonyms
37
+
38
+ (D) Named Entities
39
+
40
+ (E) Dropped Content
41
+
42
+ (F) Facts'
43
+ target: (D)
44
+ - input: "The following translations from German to English contain a particular\
45
+ \ error. That error will be one of the following types: Named Entities: An entity\
46
+ \ (names, places, locations, etc.) is changed to a different entity. Numerical\
47
+ \ Values: Numerical values (ordinals or cardinals), dates, and/or units are\
48
+ \ changed. Modifiers or Adjectives: The modifiers and adjectives pertaining\
49
+ \ to a noun are changed. Negation or Antonyms: Introduce or remove a negation\
50
+ \ or change comparatives to their antonyms. Facts: Trivial factual errors not\
51
+ \ pertaining to the above classes are introduced in the translations. Dropped\
52
+ \ Content: A significant clause in the translation is removed. Please identify\
53
+ \ that error. Source: Auf dieser Seite sind die Baudenkm\xE4ler der oberbayerischen\
54
+ \ Gro\xDFen Kreisstadt Landsberg am Lech zusammengestellt.\nTranslation: On\
55
+ \ this page are compiled the architectural monuments of the town of Landsberg\
56
+ \ am Lech.\nThe translation contains an error pertaining to\nOptions:\n(A) Modifiers\
57
+ \ or Adjectives\n(B) Numerical Values\n(C) Negation or Antonyms\n(D) Named Entities\n\
58
+ (E) Dropped Content\n(F) Facts"
59
+ target: (E)
60
+ - input: "The following translations from German to English contain a particular\
61
+ \ error. That error will be one of the following types: Named Entities: An entity\
62
+ \ (names, places, locations, etc.) is changed to a different entity. Numerical\
63
+ \ Values: Numerical values (ordinals or cardinals), dates, and/or units are\
64
+ \ changed. Modifiers or Adjectives: The modifiers and adjectives pertaining\
65
+ \ to a noun are changed. Negation or Antonyms: Introduce or remove a negation\
66
+ \ or change comparatives to their antonyms. Facts: Trivial factual errors not\
67
+ \ pertaining to the above classes are introduced in the translations. Dropped\
68
+ \ Content: A significant clause in the translation is removed. Please identify\
69
+ \ that error. Source: \u0141eba ist eine Kleinstadt und ein Badeort im Powiat\
70
+ \ L\u0119borski der polnischen Woiwodschaft Pommern.\nTranslation: Eba is not\
71
+ \ a small town and seaside resort in the Powiat L\xE9borski county of the Pomeranian\
72
+ \ Voivodeship of Poland.\nThe translation contains an error pertaining to\n\
73
+ Options:\n(A) Modifiers or Adjectives\n(B) Numerical Values\n(C) Negation or\
74
+ \ Antonyms\n(D) Named Entities\n(E) Dropped Content\n(F) Facts"
75
+ target: (C)
76
+ include: _fewshot_template_yaml
77
+ task: bbh_fewshot_salient_translation_error_detection
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/snarks.yaml ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: snarks
2
+ description: 'Determine which of two sentences is sarcastic.
3
+
4
+
5
+ According to Cambridge University Dictionary, sarcasm is "the use of remarks that
6
+ clearly mean the opposite of what they say, made in order to hurt someone''s feelings
7
+ or to criticize something in a humorous way." Sarcastic sentences often contain
8
+ satirical or ironic utterances, hyperboles, ambivalent or witty remarks.
9
+
10
+
11
+ '
12
+ doc_to_text: 'Q: {{input}}
13
+
14
+ A:'
15
+ fewshot_config:
16
+ sampler: first_n
17
+ samples:
18
+ - input: 'Which statement is sarcastic?
19
+
20
+ Options:
21
+
22
+ (A) Yes, because having interests and actively researching them is a huge waste
23
+
24
+ (B) Yes, because having interests and actively researching them is a huge deal'
25
+ target: (A)
26
+ - input: 'Which statement is sarcastic?
27
+
28
+ Options:
29
+
30
+ (A) No one is going to disagree with you on this. Avoiding ad hominem attacks
31
+ really help your case
32
+
33
+ (B) No one is going to disagree with you on this. Ad hominem attacks really
34
+ help your case'
35
+ target: (B)
36
+ - input: 'Which statement is sarcastic?
37
+
38
+ Options:
39
+
40
+ (A) Consistency in the league''s punishments? What do you think this is supposed
41
+ to be, politics?
42
+
43
+ (B) Consistency in the league''s punishments? What do you think this is supposed
44
+ to be, moral?'
45
+ target: (A)
46
+ include: _fewshot_template_yaml
47
+ task: bbh_fewshot_snarks
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/sports_understanding.yaml ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: sports_understanding
2
+ description: 'Determine whether an artificially constructed sentence relating to sports
3
+ is plausible or not.
4
+
5
+
6
+ '
7
+ doc_to_text: 'Q: {{input}}
8
+
9
+ A:'
10
+ fewshot_config:
11
+ sampler: first_n
12
+ samples:
13
+ - input: Is the following sentence plausible? "Bam Adebayo scored a reverse layup
14
+ in the Western Conference Finals."
15
+ target: 'yes'
16
+ - input: Is the following sentence plausible? "Santi Cazorla scored a touchdown."
17
+ target: 'no'
18
+ - input: Is the following sentence plausible? "DeMar DeRozan was called for the
19
+ goal tend."
20
+ target: 'yes'
21
+ include: _fewshot_template_yaml
22
+ task: bbh_fewshot_sports_understanding
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/temporal_sequences.yaml ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: temporal_sequences
2
+ description: 'Task description: Answer questions about which times certain events
3
+ could have occurred.
4
+
5
+
6
+ '
7
+ doc_to_text: 'Q: {{input}}
8
+
9
+ A:'
10
+ fewshot_config:
11
+ sampler: first_n
12
+ samples:
13
+ - input: 'Today, Emily went to the museum. Between what times could they have gone?
14
+
15
+ We know that:
16
+
17
+ Emily woke up at 1pm.
18
+
19
+ Elizabeth saw Emily reading at the library from 2pm to 4pm.
20
+
21
+ Jessica saw Emily watching a movie at the theater from 4pm to 5pm.
22
+
23
+ Leslie saw Emily waiting at the airport from 5pm to 6pm.
24
+
25
+ William saw Emily buying clothes at the mall from 6pm to 7pm.
26
+
27
+ The museum was closed after 7pm.
28
+
29
+ Between what times could Emily have gone to the museum?
30
+
31
+ Options:
32
+
33
+ (A) 1pm to 2pm
34
+
35
+ (B) 6pm to 7pm
36
+
37
+ (C) 5pm to 6pm
38
+
39
+ (D) 2pm to 4pm'
40
+ target: (A)
41
+ - input: 'Today, Elizabeth went to the amusement park. Between what times could
42
+ they have gone?
43
+
44
+ We know that:
45
+
46
+ Elizabeth woke up at 7am.
47
+
48
+ David saw Elizabeth fixing their computer at the electronic store from 1pm to
49
+ 2pm.
50
+
51
+ Sarah saw Elizabeth playing tennis at the tennis court from 2pm to 3pm.
52
+
53
+ Susan saw Elizabeth walking towards the Statue of Liberty from 3pm to 6pm.
54
+
55
+ Andrew saw Elizabeth taking photos near the Eiffel Tower from 6pm to 9pm.
56
+
57
+ Emily saw Elizabeth getting a coffee at the cafe from 9pm to 10pm.
58
+
59
+ The amusement park was closed after 10pm.
60
+
61
+ Between what times could Elizabeth have gone to the amusement park?
62
+
63
+ Options:
64
+
65
+ (A) 7am to 1pm
66
+
67
+ (B) 9pm to 10pm
68
+
69
+ (C) 1pm to 2pm
70
+
71
+ (D) 3pm to 6pm'
72
+ target: (A)
73
+ - input: 'Today, Tiffany went to the beach. Between what times could they have gone?
74
+
75
+ We know that:
76
+
77
+ Tiffany woke up at 5am.
78
+
79
+ Betty saw Tiffany getting a coffee at the cafe from 5am to 6am.
80
+
81
+ Jessica saw Tiffany working at the office from 6am to 9am.
82
+
83
+ John saw Tiffany stretching at a yoga studio from 9am to 12pm.
84
+
85
+ Sean saw Tiffany sitting on a rooftop from 12pm to 2pm.
86
+
87
+ Sarah saw Tiffany playing tennis at the tennis court from 2pm to 3pm.
88
+
89
+ The beach was closed after 4pm.
90
+
91
+ Between what times could Tiffany have gone to the beach?
92
+
93
+ Options:
94
+
95
+ (A) 9am to 12pm
96
+
97
+ (B) 12pm to 2pm
98
+
99
+ (C) 5am to 6am
100
+
101
+ (D) 3pm to 4pm'
102
+ target: (D)
103
+ include: _fewshot_template_yaml
104
+ task: bbh_fewshot_temporal_sequences
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/tracking_shuffled_objects_five_objects.yaml ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: tracking_shuffled_objects_five_objects
2
+ description: 'A task requiring determining the final positions of a set of objects
3
+ given their initial positions and a description of a sequence of swaps.
4
+
5
+
6
+ '
7
+ doc_to_text: 'Q: {{input}}
8
+
9
+ A:'
10
+ fewshot_config:
11
+ sampler: first_n
12
+ samples:
13
+ - input: 'Alice, Bob, and Claire are playing a game. At the start of the game, they
14
+ are each holding a ball: Alice has a yellow ball, Bob has a blue ball, and Claire
15
+ has a pink ball.
16
+
17
+ As the game progresses, pairs of players trade balls. First, Claire and Alice
18
+ swap balls. Then, Alice and Bob swap balls. Finally, Claire and Bob swap balls.
19
+ At the end of the game, Bob has the
20
+
21
+ Options:
22
+
23
+ (A) yellow ball
24
+
25
+ (B) blue ball
26
+
27
+ (C) pink ball'
28
+ target: (A)
29
+ - input: 'Alice, Bob, and Claire are playing a game. At the start of the game, they
30
+ are each holding a ball: Alice has a white ball, Bob has a purple ball, and
31
+ Claire has a pink ball.
32
+
33
+ As the game progresses, pairs of players trade balls. First, Bob and Alice swap
34
+ balls. Then, Bob and Claire swap balls. Finally, Bob and Alice swap balls. At
35
+ the end of the game, Alice has the
36
+
37
+ Options:
38
+
39
+ (A) white ball
40
+
41
+ (B) purple ball
42
+
43
+ (C) pink ball'
44
+ target: (C)
45
+ - input: 'Alice, Bob, and Claire are dancers at a square dance. At the start of
46
+ a song, they each have a partner: Alice is dancing with Lola, Bob is dancing
47
+ with Rodrigo, and Claire is dancing with Patrick.
48
+
49
+ Throughout the song, the dancers often trade partners. First, Alice and Bob
50
+ switch partners. Then, Claire and Bob switch partners. Finally, Bob and Alice
51
+ switch partners. At the end of the dance, Alice is dancing with
52
+
53
+ Options:
54
+
55
+ (A) Lola
56
+
57
+ (B) Rodrigo
58
+
59
+ (C) Patrick'
60
+ target: (C)
61
+ include: _fewshot_template_yaml
62
+ task: bbh_fewshot_tracking_shuffled_objects_five_objects
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/tracking_shuffled_objects_seven_objects.yaml ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: tracking_shuffled_objects_seven_objects
2
+ description: 'A task requiring determining the final positions of a set of objects
3
+ given their initial positions and a description of a sequence of swaps.
4
+
5
+
6
+ '
7
+ doc_to_text: 'Q: {{input}}
8
+
9
+ A:'
10
+ fewshot_config:
11
+ sampler: first_n
12
+ samples:
13
+ - input: 'Alice, Bob, and Claire are playing a game. At the start of the game, they
14
+ are each holding a ball: Alice has a yellow ball, Bob has a blue ball, and Claire
15
+ has a pink ball.
16
+
17
+ As the game progresses, pairs of players trade balls. First, Claire and Alice
18
+ swap balls. Then, Alice and Bob swap balls. Finally, Claire and Bob swap balls.
19
+ At the end of the game, Bob has the
20
+
21
+ Options:
22
+
23
+ (A) yellow ball
24
+
25
+ (B) blue ball
26
+
27
+ (C) pink ball'
28
+ target: (A)
29
+ - input: 'Alice, Bob, and Claire are playing a game. At the start of the game, they
30
+ are each holding a ball: Alice has a white ball, Bob has a purple ball, and
31
+ Claire has a pink ball.
32
+
33
+ As the game progresses, pairs of players trade balls. First, Bob and Alice swap
34
+ balls. Then, Bob and Claire swap balls. Finally, Bob and Alice swap balls. At
35
+ the end of the game, Alice has the
36
+
37
+ Options:
38
+
39
+ (A) white ball
40
+
41
+ (B) purple ball
42
+
43
+ (C) pink ball'
44
+ target: (C)
45
+ - input: 'Alice, Bob, and Claire are dancers at a square dance. At the start of
46
+ a song, they each have a partner: Alice is dancing with Lola, Bob is dancing
47
+ with Rodrigo, and Claire is dancing with Patrick.
48
+
49
+ Throughout the song, the dancers often trade partners. First, Alice and Bob
50
+ switch partners. Then, Claire and Bob switch partners. Finally, Bob and Alice
51
+ switch partners. At the end of the dance, Alice is dancing with
52
+
53
+ Options:
54
+
55
+ (A) Lola
56
+
57
+ (B) Rodrigo
58
+
59
+ (C) Patrick'
60
+ target: (C)
61
+ include: _fewshot_template_yaml
62
+ task: bbh_fewshot_tracking_shuffled_objects_seven_objects
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/tracking_shuffled_objects_three_objects.yaml ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: tracking_shuffled_objects_three_objects
2
+ description: 'A task requiring determining the final positions of a set of objects
3
+ given their initial positions and a description of a sequence of swaps.
4
+
5
+
6
+ '
7
+ doc_to_text: 'Q: {{input}}
8
+
9
+ A:'
10
+ fewshot_config:
11
+ sampler: first_n
12
+ samples:
13
+ - input: 'Alice, Bob, and Claire are playing a game. At the start of the game, they
14
+ are each holding a ball: Alice has a yellow ball, Bob has a blue ball, and Claire
15
+ has a pink ball.
16
+
17
+ As the game progresses, pairs of players trade balls. First, Claire and Alice
18
+ swap balls. Then, Alice and Bob swap balls. Finally, Claire and Bob swap balls.
19
+ At the end of the game, Bob has the
20
+
21
+ Options:
22
+
23
+ (A) yellow ball
24
+
25
+ (B) blue ball
26
+
27
+ (C) pink ball'
28
+ target: (A)
29
+ - input: 'Alice, Bob, and Claire are playing a game. At the start of the game, they
30
+ are each holding a ball: Alice has a white ball, Bob has a purple ball, and
31
+ Claire has a pink ball.
32
+
33
+ As the game progresses, pairs of players trade balls. First, Bob and Alice swap
34
+ balls. Then, Bob and Claire swap balls. Finally, Bob and Alice swap balls. At
35
+ the end of the game, Alice has the
36
+
37
+ Options:
38
+
39
+ (A) white ball
40
+
41
+ (B) purple ball
42
+
43
+ (C) pink ball'
44
+ target: (C)
45
+ - input: 'Alice, Bob, and Claire are dancers at a square dance. At the start of
46
+ a song, they each have a partner: Alice is dancing with Lola, Bob is dancing
47
+ with Rodrigo, and Claire is dancing with Patrick.
48
+
49
+ Throughout the song, the dancers often trade partners. First, Alice and Bob
50
+ switch partners. Then, Claire and Bob switch partners. Finally, Bob and Alice
51
+ switch partners. At the end of the dance, Alice is dancing with
52
+
53
+ Options:
54
+
55
+ (A) Lola
56
+
57
+ (B) Rodrigo
58
+
59
+ (C) Patrick'
60
+ target: (C)
61
+ include: _fewshot_template_yaml
62
+ task: bbh_fewshot_tracking_shuffled_objects_three_objects
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/web_of_lies.yaml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: web_of_lies
2
+ description: 'Evaluate a random boolean function expressed as a word problem.
3
+
4
+
5
+ '
6
+ doc_to_text: 'Q: {{input}}
7
+
8
+ A:'
9
+ fewshot_config:
10
+ sampler: first_n
11
+ samples:
12
+ - input: 'Question: Fidel tells the truth. Jerry says Fidel tells the truth. Vina
13
+ says Jerry tells the truth. Millicent says Vina lies. Raymond says Millicent
14
+ lies. Does Raymond tell the truth?'
15
+ target: 'Yes'
16
+ - input: 'Question: Kristian lies. Millie says Kristian lies. Maybelle says Millie
17
+ tells the truth. Fidel says Maybelle lies. Leda says Fidel lies. Does Leda tell
18
+ the truth?'
19
+ target: 'Yes'
20
+ - input: 'Question: Kristian tells the truth. Michaela says Kristian lies. Raymond
21
+ says Michaela tells the truth. Osvaldo says Raymond tells the truth. Jamey says
22
+ Osvaldo tells the truth. Does Jamey tell the truth?'
23
+ target: 'No'
24
+ include: _fewshot_template_yaml
25
+ task: bbh_fewshot_web_of_lies
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/word_sorting.yaml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_name: word_sorting
2
+ description: 'Sort a list of words.
3
+
4
+
5
+ '
6
+ doc_to_text: 'Q: {{input}}
7
+
8
+ A:'
9
+ fewshot_config:
10
+ sampler: first_n
11
+ samples:
12
+ - input: 'Sort the following words alphabetically: List: oven costume counterpart'
13
+ target: costume counterpart oven
14
+ - input: 'Sort the following words alphabetically: List: hypochlorite ponderosa
15
+ phone credulity'
16
+ target: credulity hypochlorite phone ponderosa
17
+ - input: 'Sort the following words alphabetically: List: newt arson parthia seismography
18
+ mugho aspect census'
19
+ target: arson aspect census mugho newt parthia seismography
20
+ include: _fewshot_template_yaml
21
+ task: bbh_fewshot_word_sorting
scripts/yans/lm-evaluation-harness/lm_eval/tasks/kobest/README.md ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # LAMBADA
2
+
3
+ ### Paper
4
+ Title: `KOBEST: Korean Balanced Evaluation of Significant Tasks`
5
+
6
+ Abstract: https://arxiv.org/abs/2204.04541
7
+
8
+ A well-formulated benchmark plays a critical role in spurring advancements in the natural language processing (NLP) field, as it allows objective and precise evaluation of diverse models. As modern language models (LMs) have become more elaborate and sophisticated, more difficult benchmarks that require linguistic knowledge and reasoning have been proposed. However, most of these benchmarks only support English, and great effort is necessary to construct benchmarks for other low resource languages. To this end, we propose a new benchmark named Korean balanced evaluation of significant tasks (KoBEST), which consists of five Korean-language downstream tasks. Professional Korean linguists designed the tasks that require advanced Korean linguistic knowledge. Moreover, our data is purely annotated by humans and thoroughly reviewed to guarantee high data quality. We also provide baseline models and human performance results. Our dataset is available on the Huggingface.
9
+
10
+
11
+ Homepage: https://huggingface.co/datasets/skt/kobest_v1
12
+
13
+ ### Groups and Tasks
14
+
15
+ #### Groups
16
+
17
+ - `kobest`
18
+
19
+ #### Tasks
20
+
21
+ - `kobest_boolq`
22
+ - `kobest_copa`
23
+ - `kobest_hallawag`
24
+ - `kobest_sentineg`
25
+ - `kobest_wic`
26
+
27
+
28
+ ### Citation
29
+
30
+ @misc{
31
+ author={Dohyeong Kim, Myeongjun Jang, Deuk Sin Kwon, Eric Davis},
32
+ title={KOBEST: Korean Balanced Evaluation of Significant Tasks},
33
+ DOI={https://doi.org/10.48550/arXiv.2204.04541},
34
+ publisher={arXiv},
35
+ year={2022},
36
+ month={Apr}
37
+ }
scripts/yans/lm-evaluation-harness/lm_eval/tasks/kobest/kobest_boolq.yaml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ tag:
2
+ - kobest
3
+ task: kobest_boolq
4
+ dataset_path: skt/kobest_v1
5
+ dataset_name: boolq
6
+ output_type: multiple_choice
7
+ training_split: train
8
+ validation_split: validation
9
+ test_split: test
10
+ doc_to_text: "{{paragraph}} 질문: {{question}} 답변: "
11
+ doc_to_target: "{{label}}"
12
+ doc_to_choice: ["아니오", "예"]
13
+ metric_list:
14
+ - metric: acc
15
+ aggregation: mean
16
+ higher_is_better: True
17
+ - metric: f1
18
+ aggregation: !function utils.macro_f1_score
19
+ average: macro
20
+ hf_evaluate: true
21
+ higher_is_better: True
22
+ metadata:
23
+ version: 1.0