diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/direct/afrimmlu_direct_fra.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/direct/afrimmlu_direct_fra.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e6adb6c8aa4e50c6efca737792907cb658c30627
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/direct/afrimmlu_direct_fra.yaml
@@ -0,0 +1,3 @@
+dataset_name: fra
+include: afrimmlu_common_yaml
+task: afrimmlu_direct_fra
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/direct/afrimmlu_direct_hau.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/direct/afrimmlu_direct_hau.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9cc9a1ae7acc7318faf68a241f68b0d5cba93978
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/direct/afrimmlu_direct_hau.yaml
@@ -0,0 +1,3 @@
+dataset_name: hau
+include: afrimmlu_common_yaml
+task: afrimmlu_direct_hau
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/direct/afrimmlu_direct_lin.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/direct/afrimmlu_direct_lin.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..55363ed93772284fc54386592ae827c03246d681
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/direct/afrimmlu_direct_lin.yaml
@@ -0,0 +1,3 @@
+dataset_name: lin
+include: afrimmlu_common_yaml
+task: afrimmlu_direct_lin
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/direct/afrimmlu_direct_lug.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/direct/afrimmlu_direct_lug.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0d484427eda8fcd4b645b3f90b191f075cb88ce9
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/direct/afrimmlu_direct_lug.yaml
@@ -0,0 +1,3 @@
+dataset_name: lug
+include: afrimmlu_common_yaml
+task: afrimmlu_direct_lug
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/direct/afrimmlu_direct_orm.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/direct/afrimmlu_direct_orm.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..763eb8a75f894797185436d3a83c9fd57393f4ac
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/direct/afrimmlu_direct_orm.yaml
@@ -0,0 +1,3 @@
+dataset_name: orm
+include: afrimmlu_common_yaml
+task: afrimmlu_direct_orm
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/direct/afrimmlu_direct_sna.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/direct/afrimmlu_direct_sna.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ed9e69af392838290bac14d08259585c56daace8
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/direct/afrimmlu_direct_sna.yaml
@@ -0,0 +1,3 @@
+dataset_name: sna
+include: afrimmlu_common_yaml
+task: afrimmlu_direct_sna
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/direct/afrimmlu_direct_sot.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/direct/afrimmlu_direct_sot.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..acdba0fdccf12f73004669dbed1b7cbee9ded24f
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/direct/afrimmlu_direct_sot.yaml
@@ -0,0 +1,3 @@
+dataset_name: sot
+include: afrimmlu_common_yaml
+task: afrimmlu_direct_sot
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/direct/afrimmlu_direct_swa.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/direct/afrimmlu_direct_swa.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c1aa82b0b1d44314c337b904c346806cb3c720a4
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/direct/afrimmlu_direct_swa.yaml
@@ -0,0 +1,3 @@
+dataset_name: swa
+include: afrimmlu_common_yaml
+task: afrimmlu_direct_swa
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/direct/utils.py b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/direct/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..f1bb9162f0fbc68807db68134970ae2636980cbf
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/direct/utils.py
@@ -0,0 +1,32 @@
+from lm_eval.utils import weighted_f1_score
+
+
+def doc_to_choice(doc):
+ choices = eval(doc["choices"])
+ return choices
+
+
+def doc_to_text(doc):
+ output = """You are a highly knowledgeable and intelligent artificial intelligence
+ model answers multiple-choice questions about {subject}
+
+ Question: {question}
+
+ Choices:
+ A: {choice1}
+ B: {choice2}
+ C: {choice3}
+ D: {choice4}
+
+ Answer: """
+
+ choices = eval(doc["choices"])
+ text = output.format(
+ subject=doc["subject"],
+ question=doc["question"],
+ choice1=choices[0],
+ choice2=choices[1],
+ choice3=choices[2],
+ choice4=choices[3],
+ )
+ return text
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_common_translate_yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_common_translate_yaml
new file mode 100644
index 0000000000000000000000000000000000000000..515c007228583bc0db97fa4db6f40d0a5c7176fd
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_common_translate_yaml
@@ -0,0 +1,34 @@
+group:
+ - afrimmlu_translate
+task: null
+dataset_path: masakhane/afrimmlu-translate-test
+dataset_name: null
+output_type: multiple_choice
+test_split: test
+doc_to_text: !function utils.doc_to_text
+doc_to_target: "{{['A', 'B', 'C', 'D'].index(answer)}}"
+doc_to_choice: !function utils.doc_to_choice
+should_decontaminate: true
+doc_to_decontamination_query: "Question: {{question}}\nAnswer:"
+metric_list:
+ - metric: f1
+ aggregation: !function utils.weighted_f1_score
+ # aggregation: mean
+ average: weighted
+ hf_evaluate: true
+ higher_is_better: True
+ ignore_case: true
+ ignore_punctuation: true
+ regexes_to_ignore:
+ - ","
+ - "\\$"
+ - metric: acc
+ aggregation: mean
+ higher_is_better: true
+ ignore_case: true
+ ignore_punctuation: true
+ regexes_to_ignore:
+ - ","
+ - "\\$"
+metadata:
+ version: 1.0
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_amh.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_amh.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ac88ffa9500701e8bbb2b5c64d1f4c9f2ec856bc
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_amh.yaml
@@ -0,0 +1,3 @@
+dataset_name: amh
+include: afrimmlu_common_translate_yaml
+task: afrimmlu_translate_amh
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_eng.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_eng.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0be98beedd86223dd14c1abbf51dbe93c7ff658a
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_eng.yaml
@@ -0,0 +1,3 @@
+dataset_name: eng
+include: afrimmlu_common_translate_yaml
+task: afrimmlu_translate_eng
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_ewe.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_ewe.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..624342b91f383479c7ef340bfb80ce305608cf61
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_ewe.yaml
@@ -0,0 +1,3 @@
+dataset_name: ewe
+include: afrimmlu_common_translate_yaml
+task: afrimmlu_translate_ewe
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_fra.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_fra.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c4fd7e1fc774b6dd987e6c35d3a3fadbf6d577c4
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_fra.yaml
@@ -0,0 +1,3 @@
+dataset_name: fra
+include: afrimmlu_common_translate_yaml
+task: afrimmlu_translate_fra
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_hau.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_hau.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..aaeb415fa2a00516ea3a84133066b7eae009f017
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_hau.yaml
@@ -0,0 +1,3 @@
+dataset_name: hau
+include: afrimmlu_common_translate_yaml
+task: afrimmlu_translate_hau
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_ibo.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_ibo.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..93fb24e8c3fa799a41c022a708748bb5e7341631
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_ibo.yaml
@@ -0,0 +1,3 @@
+dataset_name: ibo
+include: afrimmlu_common_translate_yaml
+task: afrimmlu_translate_ibo
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_kin.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_kin.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f39f666840626dcf6ea61a196be702ec1c3e3308
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_kin.yaml
@@ -0,0 +1,3 @@
+dataset_name: kin
+include: afrimmlu_common_translate_yaml
+task: afrimmlu_translate_kin
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_lin.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_lin.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c935ee47382973e3dbe833987ea083bd3023b5cd
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_lin.yaml
@@ -0,0 +1,3 @@
+dataset_name: lin
+include: afrimmlu_common_translate_yaml
+task: afrimmlu_translate_lin
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_lug.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_lug.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..72e4bce0113c8473eabf68a7d2e43ba2eabc965c
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_lug.yaml
@@ -0,0 +1,3 @@
+dataset_name: lug
+include: afrimmlu_common_translate_yaml
+task: afrimmlu_translate_lug
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_orm.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_orm.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3ff902499480d35576cb84453406a5d484349816
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_orm.yaml
@@ -0,0 +1,3 @@
+dataset_name: orm
+include: afrimmlu_common_translate_yaml
+task: afrimmlu_translate_orm
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_sna.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_sna.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9979740a9bf6194d9a9c4db0f0b4845312f1aed7
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_sna.yaml
@@ -0,0 +1,3 @@
+dataset_name: sna
+include: afrimmlu_common_translate_yaml
+task: afrimmlu_translate_sna
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_sot.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_sot.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..deb2b9b81d544140bfa7e720d0b544089b39bfcd
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_sot.yaml
@@ -0,0 +1,3 @@
+dataset_name: sot
+include: afrimmlu_common_translate_yaml
+task: afrimmlu_translate_sot
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_swa.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_swa.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e58d90bc69357a3b9c166e8f29000894daa8b108
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_swa.yaml
@@ -0,0 +1,3 @@
+dataset_name: swa
+include: afrimmlu_common_translate_yaml
+task: afrimmlu_translate_swa
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_twi.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_twi.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..51a2d26ae0563acda4972b272de4c0d6de81146f
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_twi.yaml
@@ -0,0 +1,3 @@
+dataset_name: twi
+include: afrimmlu_common_translate_yaml
+task: afrimmlu_translate_twi
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_wol.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_wol.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..006b684782c853a432d9e694abe525aaeb9609ca
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_wol.yaml
@@ -0,0 +1,3 @@
+dataset_name: wol
+include: afrimmlu_common_translate_yaml
+task: afrimmlu_translate_wol
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_xho.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_xho.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c0bdf4471b2178c67d7f6e1ae9c5fba16b3b7710
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_xho.yaml
@@ -0,0 +1,3 @@
+dataset_name: xho
+include: afrimmlu_common_translate_yaml
+task: afrimmlu_translate_xho
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_yor.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_yor.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0e7ba6005b591141dc84efa454196458c1261e8c
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_yor.yaml
@@ -0,0 +1,3 @@
+dataset_name: yor
+include: afrimmlu_common_translate_yaml
+task: afrimmlu_translate_yor
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_zul.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_zul.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a18d251cc8f838fa2578019475b089c4b61ecf65
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/afrimmlu_translate_zul.yaml
@@ -0,0 +1,3 @@
+dataset_name: zul
+include: afrimmlu_common_translate_yaml
+task: afrimmlu_translate_zul
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/utils.py b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..9d02b342b2e3c9f3d3bd66d3f62330aa53c9159c
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/afrimmlu/translate/utils.py
@@ -0,0 +1,32 @@
+from lm_eval.utils import weighted_f1_score
+
+
+def doc_to_choice(doc):
+ choices = eval(doc["choices"])
+ return choices
+
+
+def doc_to_text(doc):
+ output = """You are a highly knowledgeable and intelligent artificial intelligence
+ model answers multiple-choice questions about '{subject}'
+
+ Question: '''{question}'''
+
+ Choices:
+ A: ''{choice1}'''
+ B: ''{choice2}'''
+ C: ''{choice3}'''
+ D: ''{choice4}'''
+
+ Answer: """
+
+ choices = eval(doc["choices"])
+ text = output.format(
+ subject=doc["subject"],
+ question=doc["question"],
+ choice1=choices[0],
+ choice2=choices[1],
+ choice3=choices[2],
+ choice4=choices[3],
+ )
+ return text
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/benchmarks/flan/_held_in_template_yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/benchmarks/flan/_held_in_template_yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c19b47cdae40bbc0ff91236d2048992f314172f0
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/benchmarks/flan/_held_in_template_yaml
@@ -0,0 +1,14 @@
+output_type: generate_until
+test_split: null
+doc_to_choice: null
+metric_list:
+ - metric: exact_match
+ aggregation: mean
+ higher_is_better: true
+generation_kwargs:
+ until:
+ - ""
+ do_sample: false
+ temperature: 0.0
+metadata:
+ version: 1.0
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/benchmarks/flan/flan_held_in.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/benchmarks/flan/flan_held_in.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c57d265492916e76d2938feb0f1ab688e3562ca9
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/benchmarks/flan/flan_held_in.yaml
@@ -0,0 +1,352 @@
+group: flan_held_in
+group_alias: Flan (Held-In)
+task:
+ # ANLI R1
+ - group: anli_r1_flan
+ group_alias: ANLI R1
+ aggregate_metric_list:
+ - metric: acc
+ weight_by_size: True
+ task:
+ - task: anli_r1_prompt-0
+ task_alias: prompt-0
+ include: _held_in_template_yaml
+ doc_to_text: "{{premise}}\n\nChoose your answer: based on the paragraph above can we conclude that \"{{hypothesis}}\"?\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nI think the answer is"
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
+ - task: anli_r1_prompt-1
+ task_alias: prompt-1
+ include: _held_in_template_yaml
+ doc_to_text: "{{premise}}\n\nBased on that paragraph can we conclude that this sentence is true?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
+ - task: anli_r1_prompt-2
+ task_alias: prompt-2
+ include: _held_in_template_yaml
+ doc_to_text: "{{premise}}\n\nCan we draw the following conclusion?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
+ - task: anli_r1_prompt-3
+ task_alias: prompt-3
+ include: _held_in_template_yaml
+ doc_to_text: "{{premise}}\nDoes this next sentence follow, given the preceding text?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
+ - task: anli_r1_prompt-4
+ task_alias: prompt-4
+ include: _held_in_template_yaml
+ doc_to_text: "{{premise}}\nCan we infer the following?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nThe answer is:"
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
+ - task: anli_r1_prompt-5
+ task_alias: prompt-5
+ include: _held_in_template_yaml
+ doc_to_text: "Read the following paragraph and determine if the hypothesis is true:\n\n{{premise}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nHypothesis: {{hypothesis}}\n\n\n"
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
+ - task: anli_r1_prompt-6
+ task_alias: prompt-6
+ include: _held_in_template_yaml
+ doc_to_text: "Read the text and determine if the sentence is true (see options at the end):\n\n{{premise}}\n\nSentence: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
+ - task: anli_r1_prompt-7
+ task_alias: prompt-7
+ include: _held_in_template_yaml
+ doc_to_text: "Can we draw the following hypothesis from the context (see options)? \n\nContext:\n\n{{premise}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
+ - task: anli_r1_prompt-8
+ task_alias: prompt-8
+ include: _held_in_template_yaml
+ doc_to_text: "Choose from options: Determine if the sentence is true based on the text below:\n{{hypothesis}}\n\n{{premise}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
+ # ANLI R2
+ - group: anli_r2_flan
+ group_alias: ANLI R2
+ aggregate_metric_list:
+ - metric: acc
+ weight_by_size: True
+ task:
+ - task: anli_r2_prompt-0
+ task_alias: prompt-0
+ include: _held_in_template_yaml
+ doc_to_text: "{{premise}}\n\nChoose your answer: based on the paragraph above can we conclude that \"{{hypothesis}}\"?\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nI think the answer is"
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
+ - task: anli_r2_prompt-1
+ task_alias: prompt-1
+ include: _held_in_template_yaml
+ doc_to_text: "{{premise}}\n\nBased on that paragraph can we conclude that this sentence is true?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
+ - task: anli_r2_prompt-2
+ task_alias: prompt-2
+ include: _held_in_template_yaml
+ doc_to_text: "{{premise}}\n\nCan we draw the following conclusion?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
+ - task: anli_r2_prompt-3
+ task_alias: prompt-3
+ include: _held_in_template_yaml
+ doc_to_text: "{{premise}}\nDoes this next sentence follow, given the preceding text?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
+ - task: anli_r2_prompt-4
+ task_alias: prompt-4
+ include: _held_in_template_yaml
+ doc_to_text: "{{premise}}\nCan we infer the following?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nThe answer is:"
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
+ - task: anli_r2_prompt-5
+ task_alias: prompt-5
+ include: _held_in_template_yaml
+ doc_to_text: "Read the following paragraph and determine if the hypothesis is true:\n\n{{premise}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nHypothesis: {{hypothesis}}\n\n\n"
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
+ - task: anli_r2_prompt-6
+ task_alias: prompt-6
+ include: _held_in_template_yaml
+ doc_to_text: "Read the text and determine if the sentence is true (see options at the end):\n\n{{premise}}\n\nSentence: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
+ - task: anli_r2_prompt-7
+ task_alias: prompt-7
+ include: _held_in_template_yaml
+ doc_to_text: "Can we draw the following hypothesis from the context (see options)? \n\nContext:\n\n{{premise}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
+ - task: anli_r2_prompt-8
+ task_alias: prompt-8
+ include: _held_in_template_yaml
+ doc_to_text: "Choose from options: Determine if the sentence is true based on the text below:\n{{hypothesis}}\n\n{{premise}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
+ # ANLI R3
+ - group: anli_r3_flan
+ group_alias: ANLI R3
+ aggregate_metric_list:
+ - metric: acc
+ weight_by_size: True
+ task:
+ - task: anli_r3_prompt-0
+ task_alias: prompt-0
+ include: _held_in_template_yaml
+ doc_to_text: "{{premise}}\n\nChoose your answer: based on the paragraph above can we conclude that \"{{hypothesis}}\"?\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nI think the answer is"
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
+ - task: anli_r3_prompt-1
+ task_alias: prompt-1
+ include: _held_in_template_yaml
+ doc_to_text: "{{premise}}\n\nBased on that paragraph can we conclude that this sentence is true?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
+ - task: anli_r3_prompt-2
+ task_alias: prompt-2
+ include: _held_in_template_yaml
+ doc_to_text: "{{premise}}\n\nCan we draw the following conclusion?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
+ - task: anli_r3_prompt-3
+ task_alias: prompt-3
+ include: _held_in_template_yaml
+ doc_to_text: "{{premise}}\nDoes this next sentence follow, given the preceding text?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
+ - task: anli_r3_prompt-4
+ task_alias: prompt-4
+ include: _held_in_template_yaml
+ doc_to_text: "{{premise}}\nCan we infer the following?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nThe answer is:"
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
+ - task: anli_r3_prompt-5
+ task_alias: prompt-5
+ include: _held_in_template_yaml
+ doc_to_text: "Read the following paragraph and determine if the hypothesis is true:\n\n{{premise}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nHypothesis: {{hypothesis}}\n\n\n"
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
+ - task: anli_r3_prompt-6
+ task_alias: prompt-6
+ include: _held_in_template_yaml
+ doc_to_text: "Read the text and determine if the sentence is true (see options at the end):\n\n{{premise}}\n\nSentence: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
+ - task: anli_r3_prompt-7
+ task_alias: prompt-7
+ include: _held_in_template_yaml
+ doc_to_text: "Can we draw the following hypothesis from the context (see options)? \n\nContext:\n\n{{premise}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
+ - task: anli_r3_prompt-8
+ task_alias: prompt-8
+ include: _held_in_template_yaml
+ doc_to_text: "Choose from options: Determine if the sentence is true based on the text below:\n{{hypothesis}}\n\n{{premise}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
+ # Arc Easy
+ - group: arc_easy_flan
+ group_alias: Arc Easy
+ aggregate_metric_list:
+ - metric: acc
+ weight_by_size: True
+ task:
+ - task: arc_easy_prompt-0
+ task_alias: prompt-0
+ include: _held_in_template_yaml
+ doc_to_text: "{{question}}\n\nOPTIONS:\n- {{choices.text|join('\n- ')}}"
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
+ - task: arc_easy_prompt-1
+ task_alias: prompt-1
+ include: _held_in_template_yaml
+ doc_to_text: "Question: {{question}}\nOPTIONS:\n- {{choices.text|join('\n- ')}}\nAnswer:"
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
+ - task: arc_easy_prompt-2
+ task_alias: prompt-2
+ include: _held_in_template_yaml
+ doc_to_text: "Question: {{question}}\n\nWhat is the correct answer to the question from the following choices?\nOPTIONS:\n- {{choices.text|join('\n- ')}}"
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
+ - task: arc_easy_prompt-3
+ task_alias: prompt-3
+ include: _held_in_template_yaml
+ doc_to_text: "Q: {{question}}\nWhat is the correct answer to this question?\nOPTIONS:\n- {{choices.text|join('\n- ')}}...A:"
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
+ - task: arc_easy_prompt-4
+ task_alias: prompt-4
+ include: _held_in_template_yaml
+ doc_to_text: "Choose your answer?\n\n{{question}}\n\nOPTIONS:\n- {{choices.text|join('\n- ')}}"
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
+ - task: arc_easy_prompt-5
+ task_alias: prompt-5
+ include: _held_in_template_yaml
+ doc_to_text: "Answer the question\n\n{{question}}\nOPTIONS:\n- {{choices.text|join('\n- ')}}"
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
+ - task: arc_easy_prompt-6
+ task_alias: prompt-6
+ include: _held_in_template_yaml
+ doc_to_text: "{{question}}\n\nPick the answer from these options\n\nOPTIONS:\n- {{choices.text|join('\n- ')}}"
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
+ # Arc Challenge
+ - group: arc_challenge_flan
+ group_alias: Arc Challenge
+ aggregate_metric_list:
+ - metric: acc
+ weight_by_size: True
+ task:
+ - task: arc_challenge_prompt-0
+ task_alias: prompt-0
+ include: _held_in_template_yaml
+ doc_to_text: "{{question}}\n\nOPTIONS:\n- {{choices.text|join('\n- ')}}"
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
+ - task: arc_challenge_prompt-1
+ task_alias: prompt-1
+ include: _held_in_template_yaml
+ doc_to_text: "Question: {{question}}\nOPTIONS:\n- {{choices.text|join('\n- ')}}\nAnswer:"
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
+ - task: arc_challenge_prompt-2
+ task_alias: prompt-2
+ include: _held_in_template_yaml
+ doc_to_text: "Question: {{question}}\n\nWhat is the correct answer to the question from the following choices?\nOPTIONS:\n- {{choices.text|join('\n- ')}}"
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
+ - task: arc_challenge_prompt-3
+ task_alias: prompt-3
+ include: _held_in_template_yaml
+ doc_to_text: "Q: {{question}}\nWhat is the correct answer to this question?\nOPTIONS:\n- {{choices.text|join('\n- ')}}...A:"
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
+ - task: arc_challenge_prompt-4
+ task_alias: prompt-4
+ include: _held_in_template_yaml
+ doc_to_text: "Choose your answer?\n\n{{question}}\n\nOPTIONS:\n- {{choices.text|join('\n- ')}}"
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
+ - task: arc_challenge_prompt-5
+ task_alias: prompt-5
+ include: _held_in_template_yaml
+ doc_to_text: "Answer the question\n\n{{question}}\nOPTIONS:\n- {{choices.text|join('\n- ')}}"
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
+ - task: arc_challenge_prompt-6
+ task_alias: prompt-6
+ include: _held_in_template_yaml
+ doc_to_text: "{{question}}\n\nPick the answer from these options\n\nOPTIONS:\n- {{choices.text|join('\n- ')}}"
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
+ # BoolQ
+ - group: boolq_flan
+ group_alias: BoolQ
+ aggregate_metric_list:
+ - metric: acc
+ weight_by_size: True
+ task:
+ - task: boolq_prompt-0
+ task_alias: prompt-0
+ include: _held_in_template_yaml
+ doc_to_text: "{{passage}}\n\nCan we conclude that {{question}}?\n\nOPTIONS:\n- no\n- yes"
+ doc_to_target: "{{['no', 'yes'][label]}}"
+ - task: boolq_prompt-1
+ task_alias: prompt-1
+ include: _held_in_template_yaml
+ doc_to_text: "{{passage}}\n\nIs it true that {{question}}?\n\nOPTIONS:\n- no\n- yes"
+ doc_to_target: "{{['no', 'yes'][label]}}"
+ - task: boolq_prompt-2
+ task_alias: prompt-2
+ include: _held_in_template_yaml
+ doc_to_text: "{{passage}}\n\n{{question}}?\n\nOPTIONS:\n- no\n- yes"
+ doc_to_target: "{{['no', 'yes'][label]}}"
+ - task: boolq_prompt-3
+ task_alias: prompt-3
+ include: _held_in_template_yaml
+ doc_to_text: "Text: {{passage}}\n\nQuestion: {{question}}?\n\nOPTIONS:\n- no\n- yes"
+ doc_to_target: "{{['no', 'yes'][label]}}"
+ - task: boolq_prompt-4
+ task_alias: prompt-4
+ include: _held_in_template_yaml
+ doc_to_text: "{{passage}}\n\nWhat's the best answer to this question: {{question}}?\n\nOPTIONS:\n- no\n- yes"
+ doc_to_target: "{{['no', 'yes'][label]}}"
+ - task: boolq_prompt-5
+ task_alias: prompt-5
+ include: _held_in_template_yaml
+ doc_to_text: "{{passage}}\nBased on the above text what's the best answer to this question: {{question}}?\n\nOPTIONS:\n- no\n- yes"
+ doc_to_target: "{{['no', 'yes'][label]}}"
+ - task: boolq_prompt-6
+ task_alias: prompt-6
+ include: _held_in_template_yaml
+ doc_to_text: "{{passage}}\nAnswer this question making sure that the answer is supposed by the text: {{question}}?\n\nOPTIONS:\n- no\n- yes"
+ doc_to_target: "{{['no', 'yes'][label]}}"
+ - task: boolq_prompt-7
+ task_alias: prompt-7
+ include: _held_in_template_yaml
+ doc_to_text: "{{passage}}\n\nIs the following statement correct based on the text\n\n{{question}}\n\nOPTIONS:\n- no\n- yes"
+ doc_to_target: "{{['no', 'yes'][label]}}"
+ - task: boolq_prompt-8
+ task_alias: prompt-8
+ include: _held_in_template_yaml
+ doc_to_text: "{{passage}}\n\nIs this statement correct \"{{question}}\"?\n\nOPTIONS:\n- no\n- yes"
+ doc_to_target: "{{['no', 'yes'][label]}}"
+ - task: boolq_prompt-9
+ task_alias: prompt-9
+ include: _held_in_template_yaml
+ doc_to_text: "Is it true that {{question}} based on the following text?\n\n{{passage}}\n\nOPTIONS:\n- no\n- yes"
+ doc_to_target: "{{['no', 'yes'][label]}}"
+ # RTE
+ - group: rte_flan
+ group_alias: RTE
+ aggregate_metric_list:
+ - metric: acc
+ weight_by_size: True
+ task:
+ - task: rte_prompt-0
+ task_alias: prompt-0
+ include: _held_in_template_yaml
+ doc_to_text: "{{sentence1}}\n\nQuestion with options: Based on the paragraph above can we conclude that \"{{sentence2}}\"?\n\nOPTIONS:\n- yes\n- no"
+ doc_to_target: "{{['yes', 'no'][label]}}"
+ - task: rte_prompt-1
+ task_alias: prompt-1
+ include: _held_in_template_yaml
+ doc_to_text: "{{sentence1}}\n\nBased on that paragraph can we conclude that the sentence below is true?\n{{sentence2}}\n\nOPTIONS:\n- yes\n- no"
+ doc_to_target: "{{['yes', 'no'][label]}}"
+ - task: rte_prompt-1
+ task_alias: prompt-2
+ include: _held_in_template_yaml
+ doc_to_text: "{{sentence1}}\n\nQ with options: Can we draw the following conclusion?\n{{sentence2}}\n\nOPTIONS:\n- yes\n- no"
+ doc_to_target: "{{['yes', 'no'][label]}}"
+ - task: rte_prompt-3
+ task_alias: prompt-3
+ include: _held_in_template_yaml
+ doc_to_text: "{{sentence1}}\nDoes this next sentence follow, given the preceding text?\n{{sentence2}}\n\nOPTIONS:\n- yes\n- no"
+ doc_to_target: "{{['yes', 'no'][label]}}"
+ - task: rte_prompt-4
+ task_alias: prompt-4
+ include: _held_in_template_yaml
+ doc_to_text: "{{sentence1}}\nOPTIONS:\n- yes\n- no\nQuestion: Can we infer the following?\n{{sentence2}}"
+ doc_to_target: "{{['yes', 'no'][label]}}"
+ - task: rte_prompt-5
+ task_alias: prompt-5
+ include: _held_in_template_yaml
+ doc_to_text: "Read the following paragraph and determine if the hypothesis is true. Select from options at the end:\n\n{{sentence1}}\n\nHypothesis: {{sentence2}}\nOPTIONS:\n- yes\n- no\nThe answer is"
+ doc_to_target: "{{['yes', 'no'][label]}}"
+ - task: rte_prompt-6
+ task_alias: prompt-6
+ include: _held_in_template_yaml
+ doc_to_text: "Read the text and determine if the sentence is true:\n\n{{sentence1}}\n\nSentence: {{sentence2}}\nOPTIONS:\n- yes\n- no\nA:"
+ doc_to_target: "{{['yes', 'no'][label]}}"
+ - task: rte_prompt-7
+ task_alias: prompt-7
+ include: _held_in_template_yaml
+ doc_to_text: "Question with options: can we draw the following hypothesis from the context? \n\nContext:\n\n{{sentence1}}\n\nHypothesis: {{sentence2}}\nOPTIONS:\n- yes\n- no\nA:"
+ doc_to_target: "{{['yes', 'no'][label]}}"
+ - task: rte_prompt-8
+ task_alias: prompt-8
+ include: _held_in_template_yaml
+ doc_to_text: "Determine if the sentence is true based on the text below. Choose from options.\n{{sentence2}}\n\n{{sentence1}}\nOPTIONS:\n- yes\n- no"
+ doc_to_target: "{{['yes', 'no'][label]}}"
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/benchmarks/flan/flan_held_out.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/benchmarks/flan/flan_held_out.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cf806b882167dacc83e3baab67fe69d293de6ddc
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/benchmarks/flan/flan_held_out.yaml
@@ -0,0 +1,13 @@
+group: flan_held_out
+task:
+ # BBH
+ - bbh_zeroshot
+ - bbh_fewshot
+ - bbh_cot_fewshot
+ - bbh_cot_zeroshot
+ # MMLU
+ - mmlu
+ - mmlu_flan_n_shot_generative
+ - mmlu_flan_n_shot_loglikelihood
+ - mmlu_flan_cot_zeroshot
+ - mmlu_flan_cot_fewshot
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/benchmarks/minerva_math.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/benchmarks/minerva_math.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f1ec09f7dcc41bc9a95c1958fc99fff62e6128db
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/benchmarks/minerva_math.yaml
@@ -0,0 +1,15 @@
+group: minerva_math
+task:
+ - minerva_math_algebra
+ - minerva_math_counting_and_prob
+ - minerva_math_geometry
+ - minerva_math_intermediate_algebra
+ - minerva_math_num_theory
+ - minerva_math_prealgebra
+ - minerva_math_precalc
+aggregate_metric_list:
+ - metric: exact_match
+ aggregation: mean
+ weight_by_size: true
+metadata:
+ version: 1.0
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/benchmarks/multimedqa/README.md b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/benchmarks/multimedqa/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..de694e47ebeecf52c6d95038019a7ea17a623e52
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/benchmarks/multimedqa/README.md
@@ -0,0 +1,43 @@
+# MultiMedQA (multiple-choice subset)
+
+### Paper
+
+Title: Large Language Models Encode Clinical Knowledge
+
+Abstract: https://arxiv.org/abs/2212.13138
+
+A benchmark combining four existing multiple-choice question answering datasets spanning professional medical exams and research queries.
+
+### Citation
+
+```
+@Article{Singhal2023,
+author={Singhal, Karan and Azizi, Shekoofeh and Tu, Tao and Mahdavi, S. Sara and Wei, Jason and Chung, Hyung Won and Scales, Nathan and Tanwani, Ajay and Cole-Lewis, Heather and Pfohl, Stephen and Payne, Perry and Seneviratne, Martin and Gamble, Paul and Kelly, Chris and Babiker, Abubakr and Sch{\"a}rli, Nathanael and Chowdhery, Aakanksha and Mansfield, Philip and Demner-Fushman, Dina and Ag{\"u}era y Arcas, Blaise and Webster, Dale and Corrado, Greg S. and Matias, Yossi and Chou, Katherine and Gottweis, Juraj and Tomasev, Nenad and Liu, Yun and Rajkomar, Alvin and Barral, Joelle and Semturs, Christopher and Karthikesalingam, Alan and Natarajan, Vivek},
+title={Large language models encode clinical knowledge},
+journal={Nature},
+year={2023},
+month={Aug},
+day={01},
+volume={620},
+number={7972},
+pages={172-180},
+issn={1476-4687},
+doi={10.1038/s41586-023-06291-2},
+url={https://doi.org/10.1038/s41586-023-06291-2}
+}
+```
+
+### Tasks
+
+* [PubMedQA](https://pubmedqa.github.io/) - 1,000 expert-labeled Q&A pairs where a question and corresponding PubMed abstract as context is given and the a yes/maybe/no answer must be produced. Unlike the rest of the tasks in this suite, PubMedQA is a closed-domain Q&A task.
+* [MedQA](https://github.com/jind11/MedQA) - US Medical License Exam (USMLE) questions with 4 or 5 possible answers. Typically, only the 4-option questions are used.
+* [MedMCQA](https://medmcqa.github.io/) - 4-option multiple choice questions from Indian medical entrance examinations, >191k total questions.
+* [MMLU](https://arxiv.org/abs/2009.03300) - 4-option multiple choice exam questions from a variety of domains. The following 6 domains are utilized here:
+ * Anatomy
+ * Clinical Knowledge
+ * College Medicine
+ * Medical Genetics
+ * Professional Medicine
+ * College Biology
+
+Note that MultiMedQA also includes some short-form and long-form Q&A tasks (LiveQA, MedicationQA, HealthSearchQA). Evaluation on these tasks is usually done by experts and is not typically performed automatically, and therefore is ignored here.
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/benchmarks/multimedqa/multimedqa.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/benchmarks/multimedqa/multimedqa.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7a8409e47f3dd4e5ee4430fcf56b3616521cd6a9
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/benchmarks/multimedqa/multimedqa.yaml
@@ -0,0 +1,21 @@
+group: multimedqa
+task:
+ - pubmedqa
+ - medmcqa
+ - medqa_4options
+ - task: mmlu_anatomy
+ task_alias: "anatomy (mmlu)"
+ - task: mmlu_clinical_knowledge
+ task_alias: "clinical_knowledge (mmlu)"
+ - task: mmlu_college_medicine
+ task_alias: "college_medicine (mmlu)"
+ - task: mmlu_medical_genetics
+ task_alias: "medical_genetics (mmlu)"
+ - task: mmlu_professional_medicine
+ task_alias: "professional_medicine (mmlu)"
+ - task: mmlu_college_biology
+ task_alias: "college_biology (mmlu)"
+aggregate_metric_list:
+ - metric: acc
+ aggregation: mean
+ weight_by_size: True
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/benchmarks/openllm.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/benchmarks/openllm.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0296a0a548e1206f70627b4176d79aab7438db75
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/benchmarks/openllm.yaml
@@ -0,0 +1,18 @@
+group: openllm
+group_alias: Open LLM Leaderboard
+task:
+ - task: arc_challenge
+ fewshot_split: validation
+ num_fewshot: 25
+ - task: hellaswag
+ fewshot_split: train
+ num_fewshot: 10
+ - task: truthfulqa
+ num_fewshot: 0
+ - task: mmlu
+ num_fewshot: 5
+ - task: winogrande
+ fewshot_split: train
+ num_fewshot: 5
+ - task: gsm8k
+ num_fewshot: 5
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/benchmarks/pythia.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/benchmarks/pythia.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..bdeadd3ce995ce3d4d9340082ede3bf424ba276d
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/benchmarks/pythia.yaml
@@ -0,0 +1,12 @@
+group: pythia
+task:
+ - lambada_openai
+ - logiqa
+ - piqa
+ - sciq
+ - wikitext
+ - winogrande
+ - wsc
+ - ai2_arc
+ - blimp
+ - mmlu
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/benchmarks/t0_eval.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/benchmarks/t0_eval.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..27e7adc41bd2eaffa20b3344cfdf83a52b4d65fc
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/benchmarks/t0_eval.yaml
@@ -0,0 +1,127 @@
+group: t0_eval
+task:
+ # Coreference Resolution
+ - dataset_path: super_glue
+ dataset_name: wsc.fixed
+ use_prompt: promptsource:*
+ training_split: train
+ validation_split: validation
+ output_type: generate_until
+ metric_list:
+ - metric: exact_match
+ aggregation: mean
+ higher_is_better: true
+ ignore_case: true
+ ignore_punctuation: true
+ # Coreference Resolution
+ - dataset_path: winogrande
+ dataset_name: winogrande_xl
+ use_prompt: promptsource:*
+ training_split: train
+ validation_split: validation
+ output_type: generate_until
+ metric_list:
+ - metric: exact_match
+ aggregation: mean
+ higher_is_better: true
+ ignore_case: true
+ ignore_punctuation: true
+ # Natural Language Inference
+ - dataset_path: super_glue
+ dataset_name: cb
+ use_prompt: promptsource:*
+ training_split: train
+ validation_split: validation
+ output_type: generate_until
+ metric_list:
+ - metric: exact_match
+ aggregation: mean
+ higher_is_better: true
+ ignore_case: true
+ ignore_punctuation: true
+ - dataset_path: super_glue
+ dataset_name: rte
+ use_prompt: promptsource:*
+ training_split: train
+ validation_split: validation
+ output_type: generate_until
+ metric_list:
+ - metric: exact_match
+ aggregation: mean
+ higher_is_better: true
+ ignore_case: true
+ ignore_punctuation: true
+ - task: anli_r1
+ dataset_path: anli
+ use_prompt: promptsource:*
+ training_split: train_r1
+ validation_split: dev_r1
+ output_type: generate_until
+ metric_list:
+ - metric: exact_match
+ aggregation: mean
+ higher_is_better: true
+ ignore_case: true
+ ignore_punctuation: true
+ - task: anli_r2
+ dataset_path: anli
+ use_prompt: promptsource:*
+ training_split: train_r2
+ validation_split: dev_r2
+ output_type: generate_until
+ metric_list:
+ - metric: exact_match
+ aggregation: mean
+ higher_is_better: true
+ ignore_case: true
+ ignore_punctuation: true
+ - task: anli_r3
+ dataset_path: anli
+ use_prompt: promptsource:*
+ training_split: train_r3
+ validation_split: dev_r3
+ output_type: generate_until
+ metric_list:
+ - metric: exact_match
+ aggregation: mean
+ higher_is_better: true
+ ignore_case: true
+ ignore_punctuation: true
+ # Sentence Completion
+ - dataset_path: super_glue
+ dataset_name: copa
+ use_prompt: promptsource:*
+ training_split: train
+ validation_split: validation
+ output_type: generate_until
+ metric_list:
+ - metric: exact_match
+ aggregation: mean
+ higher_is_better: true
+ ignore_case: true
+ ignore_punctuation: true
+ # Natural Language Inference
+ - dataset_path: hellaswag
+ use_prompt: promptsource:*
+ training_split: train
+ validation_split: validation
+ output_type: generate_until
+ metric_list:
+ - metric: exact_match
+ aggregation: mean
+ higher_is_better: true
+ ignore_case: true
+ ignore_punctuation: true
+ # Word Sense Disambiguation
+ - dataset_path: super_glue
+ dataset_name: wic
+ use_prompt: promptsource:*
+ training_split: train
+ validation_split: validation
+ output_type: generate_until
+ metric_list:
+ - metric: exact_match
+ aggregation: mean
+ higher_is_better: true
+ ignore_case: true
+ ignore_punctuation: true
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/fld/README.md b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/fld/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..06ccf8f606fe9e0175b3e3b48eb3b45193f2c0c3
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/fld/README.md
@@ -0,0 +1,65 @@
+# FLD
+
+### Paper
+
+Title: Learning Deductive Reasoning from Synthetic Corpus based on Formal Logic
+
+Abstract: https://arxiv.org/abs/2308.07336
+
+**FLD** (**F**ormal **L**ogic **D**eduction) is a deductive reasoning benchmark.
+Given a set of facts and a hypothesis, an LLM is required to generate (i) proof steps to (dis-)prove the hypothesis, and (ii) an answer ("proved", "disproved" or unknown").
+
+Unique features of FLD are:
+* It assesses the model's logical reasoning ability *isolated from knowledge*, as the facts are randomly constructed so that referring to existing knowledge never helps solve the task.
+* It assesses diverse reasoning patterns (i.e., deduction rules), as it is based on formal logic theory.
+* As a result, it is highly challenging. Indeed, even GPT-4 can solve only about half of the problems.
+
+Homepage: https://github.com/hitachi-nlp/FLD
+
+
+### Citation
+
+```
+@InProceedings{pmlr-v202-morishita23a,
+ title = {Learning Deductive Reasoning from Synthetic Corpus based on Formal Logic},
+ author = {Morishita, Terufumi and Morio, Gaku and Yamaguchi, Atsuki and Sogawa, Yasuhiro},
+ booktitle = {Proceedings of the 40th International Conference on Machine Learning},
+ pages = {25254--25274},
+ year = {2023},
+ editor = {Krause, Andreas and Brunskill, Emma and Cho, Kyunghyun and Engelhardt, Barbara and Sabato, Sivan and Scarlett, Jonathan},
+ volume = {202},
+ series = {Proceedings of Machine Learning Research},
+ month = {23--29 Jul},
+ publisher = {PMLR},
+ pdf = {https://proceedings.mlr.press/v202/morishita23a/morishita23a.pdf},
+ url = {https://proceedings.mlr.press/v202/morishita23a.html},
+}
+```
+
+### Groups and Tasks
+
+This release is the simplified version of FLD where a model is required to predict only an answer.
+This setting is described by "answer accuracy" in the original paper.
+
+#### Tasks in Group `fld`
+* `fld_default` is a basic task based on [FLD.v2](https://huggingface.co/datasets/hitachi-nlp/FLD.v2/viewer/star)
+* `fld_star`: is a more challenging version based on [FLD.v2-star](https://huggingface.co/datasets/hitachi-nlp/FLD.v2/viewer/star)
+
+#### Tasks in Group `fld_logical_formula`
+Further, we have "logical formula" versions of the benchmarks, which evaluate LLMs' pure logical reasoning capabilities within the domain of logical formulas, rather than natural language:
+* `fld_logical_formula_default`
+* `fld_logical_formula_fld_star`
+
+
+### Checklist
+
+For adding novel benchmarks/datasets to the library:
+* [x] Is the task an existing benchmark in the literature?
+ * [x] Have you referenced the original paper that introduced the task?
+ * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
+
+
+If other tasks on this dataset are already supported:
+* [ ] Is the "Main" variant of this task clearly denoted?
+* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
+* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/fld/fld_default.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/fld/fld_default.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..460a9ec6dbf52be9819891ed27ae59b62162c75d
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/fld/fld_default.yaml
@@ -0,0 +1,19 @@
+task: fld_default
+dataset_path: hitachi-nlp/FLD.v2
+dataset_name: default
+training_split: train
+validation_split: validation
+test_split: test
+doc_to_text: "Based on the provided facts ($context$), either prove or disprove the hypothesis or state that it is unknown. {{prompt_serial}}"
+doc_to_target: world_assump_label
+metric_list:
+ - metric: exact_match
+ aggregation: mean
+ higher_is_better: true
+filter_list:
+ - name: remove_whitespace
+ filter:
+ - function: remove_whitespace
+ - function: take_first
+metadata:
+ version: 2.0
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/fld/fld_logical_formula_default.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/fld/fld_logical_formula_default.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..67ff4acfe80757b7b53a4a5fdd3c7b388f751770
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/fld/fld_logical_formula_default.yaml
@@ -0,0 +1,21 @@
+group:
+ - fld_logical_formula
+task: fld_logical_formula_default
+dataset_path: hitachi-nlp/FLD.v2
+dataset_name: default
+training_split: train
+validation_split: validation
+test_split: test
+doc_to_text: "Based on the provided facts ($context$), either prove or disprove the hypothesis or state that it is unknown. The facts and the hypothesis are written in logical formulas as follows: capital letters such as \"{A}\", \"{B}\", \"{AB}\" are predicates, small letters such as \"{a}\", \"{b}\", \"{ab}\" are constants, \"&\" is logical conjunction, \"v\" is logical disjunction, \"¬\" is negation, \"->\" is implication, \"(x)\" is \"for all x\", and \"(Ex)\" is \"for some x\". $hypothesis$ = {{hypothesis_formula}} ; $context$ = {{context_formula}} ; $proof$ = "
+doc_to_target: world_assump_label
+metric_list:
+ - metric: exact_match
+ aggregation: mean
+ higher_is_better: true
+filter_list:
+ - name: remove_whitespace
+ filter:
+ - function: remove_whitespace
+ - function: take_first
+metadata:
+ version: 2.0
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/fld/fld_logical_formula_star.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/fld/fld_logical_formula_star.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..28aee616f420ef62844febef6626771029f04500
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/fld/fld_logical_formula_star.yaml
@@ -0,0 +1,3 @@
+include: fld_logical_formula_default.yaml
+task: fld_logical_formula_star
+dataset_name: star
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/fld/fld_star.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/fld/fld_star.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..750e808c780001e4659c9def75400f8a2460045e
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/fld/fld_star.yaml
@@ -0,0 +1,3 @@
+include: fld_default.yaml
+task: fld_star
+dataset_name: star
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/README.md b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..dbb72ea8e1f45c9f19741dd6764b0937e59aa69a
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/README.md
@@ -0,0 +1,328 @@
+# Leaderboard evaluations
+Our goal with this group is to create an unchanging through time version of
+evaluations that will power the Open LLM Leaderboard on HuggingFace.
+
+As we want to evaluate models across capabilities, the list currently contains:
+- BBH (3-shots, multichoice)
+- GPQA (0-shot, multichoice)
+- mmlu-pro (5-shots, multichoice)
+- Musr (0-shot, multichoice)
+- ifeval (0-shot, generative)
+- Math-lvl-5 (4-shots, generative, minerva version)
+
+
+Details on the choice of those evals can be found [here](https://huggingface.co/spaces/open-llm-leaderboard/blog) !
+
+## BigBenchHard (BBH)
+
+A suite of 23 challenging BIG-Bench tasks which we call BIG-Bench Hard (BBH).
+These are the task for which prior language model evaluations did not
+outperform the average human-rater.
+
+### Paper
+
+Title: Challenging BIG-Bench Tasks and Whether Chain-of-Thought Can Solve Them
+
+BIG-Bench (Srivastava et al., 2022) is a diverse evaluation suite that focuses on tasks believed to be beyond the capabilities of current language models. Language models have already made good progress on this benchmark, with the best model in the BIG-Bench paper outperforming average reported human-rater results on 65% of the BIG-Bench tasks via few-shot prompting. But on what tasks do language models fall short of average human-rater performance, and are those tasks actually unsolvable by current language models?
+In this work, we focus on a suite of 23 challenging BIG-Bench tasks which we call BIG-Bench Hard (BBH). These are the task for which prior language model evaluations did not outperform the average human-rater. We find that applying chain-of-thought (CoT) prompting to BBH tasks enables PaLM to surpass the average human-rater performance on 10 of the 23 tasks, and Codex (code-davinci-002) to surpass the average human-rater performance on 17 of the 23 tasks. Since many tasks in BBH require multi-step reasoning, few-shot prompting without CoT, as done in the BIG-Bench evaluations (Srivastava et al., 2022), substantially underestimates the best performance and capabilities of language models, which is better captured via CoT prompting. As further analysis, we explore the interaction between CoT and model scale on BBH, finding that CoT enables emergent task performance on several BBH tasks with otherwise flat scaling curves.
+
+
+- paper: https://huggingface.co/papers/2210.09261
+- Homepage: https://github.com/suzgunmirac/BIG-Bench-Hard
+
+### Citation
+
+```
+@article{suzgun2022challenging,
+ title={Challenging BIG-Bench Tasks and Whether Chain-of-Thought Can Solve Them},
+ author={Suzgun, Mirac and Scales, Nathan and Sch{\"a}rli, Nathanael and Gehrmann, Sebastian and Tay, Yi and Chung, Hyung Won and Chowdhery, Aakanksha and Le, Quoc V and Chi, Ed H and Zhou, Denny and and Wei, Jason},
+ journal={arXiv preprint arXiv:2210.09261},
+ year={2022}
+}
+```
+
+### Groups
+
+- `leaderboard_bbh`
+
+### Tasks
+
+- `leaderboard_bbh_boolean_expressions`
+- `leaderboard_bbh_causal_judgement`
+- `leaderboard_bbh_date_understanding`
+- `leaderboard_bbh_disambiguation_qa`
+- `leaderboard_bbh_dyck_languages`
+- `leaderboard_bbh_formal_fallacies`
+- `leaderboard_bbh_geometric_shapes`
+- `leaderboard_bbh_hyperbaton`
+- `leaderboard_bbh_logical_deduction_five_objects`
+- `leaderboard_bbh_logical_deduction_seven_objects`
+- `leaderboard_bbh_logical_deduction_three_objects`
+- `leaderboard_bbh_movie_recommendation`
+- `leaderboard_bbh_multistep_arithmetic_two`
+- `leaderboard_bbh_navigate`
+- `leaderboard_bbh_object_counting`
+- `leaderboard_bbh_penguins_in_a_table`
+- `leaderboard_bbh_reasoning_about_colored_objects`
+- `leaderboard_bbh_ruin_names`
+- `leaderboard_bbh_salient_translation_error_detection`
+- `leaderboard_bbh_snarks`
+- `leaderboard_bbh_sports_understanding`
+- `leaderboard_bbh_temporal_sequences`
+- `leaderboard_bbh_tracking_shuffled_objects_five_objects`
+- `leaderboard_bbh_tracking_shuffled_objects_seven_objects`
+- `leaderboard_bbh_tracking_shuffled_objects_three_objects`
+- `leaderboard_bbh_web_of_lies`
+- `leaderboard_bbh_word_sorting`
+
+## GPQA
+
+### Paper
+
+Title: GPQA: A Graduate-Level Google-Proof Q&A Benchmark
+
+We present GPQA, a challenging dataset of 448 multiple-choice questions written
+by domain experts in biology, physics, and chemistry. We ensure that the
+questions are high-quality and extremely difficult: experts who have or are
+pursuing PhDs in the corresponding domains reach 65% accuracy (74% when
+discounting clear mistakes the experts identified in retrospect), while highly
+skilled non-expert validators only reach 34% accuracy, despite spending on
+average over 30 minutes with unrestricted access to the web (i.e., the
+questions are “Google-proof”). The questions are also difficult for
+state-of-the-art AI systems, with our strongest GPT-4–based baseline achieving
+39% accuracy. If we are to use future AI systems to help us answer very hard
+questions—for example, when developing new scientific knowledge—we need to
+develop scalable oversight methods that enable humans to supervise their
+outputs, which may be difficult even if the supervisors are themselves skilled
+and knowledgeable. The difficulty of GPQA both for skilled non-experts and
+frontier AI systems should enable realistic scalable oversight experiments,
+which we hope can help devise ways for human experts to reliably get truthful
+information from AI systems that surpass human capabilities.
+
+- Paper: https://huggingface.co/papers/2311.12022
+- Homepage: https://github.com/idavidrein/gpqa/tree/main
+
+### Citation
+
+```
+@misc{rein2023gpqa,
+ title={GPQA: A Graduate-Level Google-Proof Q&A Benchmark},
+ author={David Rein and Betty Li Hou and Asa Cooper Stickland and Jackson Petty and Richard Yuanzhe Pang and Julien Dirani and Julian Michael and Samuel R. Bowman},
+ year={2023},
+ eprint={2311.12022},
+ archivePrefix={arXiv},
+ primaryClass={cs.AI}
+}
+```
+
+### Groups
+
+- `leaderboard_gpqa`
+
+### Tasks
+
+- `leaderboard_gpqa_extended`
+- `leaderboard_gpqa_diamond`
+- `leaderboard_gpqa_main`
+
+## IFEval
+
+### Paper
+
+Title: Instruction-Following Evaluation for Large Language Models
+
+One core capability of Large Language Models (LLMs) is to follow natural
+language instructions. However, the evaluation of such abilities is not
+standardized: Human evaluations are expensive, slow, and not objectively
+reproducible, while LLM-based auto-evaluation is potentially biased or limited
+by the ability of the evaluator LLM. To overcome these issues, we introduce
+Instruction-Following Eval (IFEval) for large language models. IFEval is a
+straightforward and easy-to-reproduce evaluation benchmark. It focuses on a set
+of "verifiable instructions" such as "write in more than 400 words" and
+"mention the keyword of AI at least 3 times". We identified 25 types of those
+verifiable instructions and constructed around 500 prompts, with each prompt
+containing one or more verifiable instructions. We show evaluation results of
+two widely available LLMs on the market.
+
+- Paper: https://huggingface.co/papers/2210.09261
+- Homepage: https://github.com/google-research/google-research/tree/master/instruction_following_eval
+
+### Citation
+
+```
+@article{zhou2023instructionfollowing,
+ title={Instruction-Following Evaluation for Large Language Models},
+ author={Jeffrey Zhou and Tianjian Lu and Swaroop Mishra and Siddhartha Brahma and Sujoy Basu and Yi Luan and Denny Zhou and Le Hou},
+ journal={arXiv preprint arXiv:2311.07911},
+ year={2023},
+}
+```
+
+### Tasks
+
+- `leaderboard_ifeval`
+
+## MATH-hard
+
+This is the 4 shots variant of minerva math but only keeping the level 5 questions.
+
+### Paper
+
+Title: Measuring Mathematical Problem Solving With the MATH Dataset
+
+Many intellectual endeavors require mathematical problem solving, but this
+skill remains beyond the capabilities of computers. To measure this ability in
+machine learning models, we introduce MATH, a new dataset of 12,500 challenging
+competition mathematics problems. Each problem in MATH has a full step-by-step
+solution which can be used to teach models to generate answer derivations and
+explanations.
+
+NOTE: The few-shot and the generated answer extraction is based on the
+[Minerva](https://arxiv.org/abs/2206.14858) and exact match equivalence is
+calculated using the `sympy` library. This requires additional dependencies,
+which can be installed via the `lm-eval[math]` extra.
+
+- Paper: https://huggingface.co/papers/2103.03874
+- Homepage: https://github.com/hendrycks/math
+
+
+### Citation
+
+```
+@article{hendrycksmath2021,
+ title={Measuring Mathematical Problem Solving With the MATH Dataset},
+ author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt},
+ journal={NeurIPS},
+ year={2021}
+}
+@misc{2206.14858,
+Author = {Aitor Lewkowycz and Anders Andreassen and David Dohan and Ethan Dye and Henryk Michalewski and Vinay Ramasesh and Ambrose Slone and Cem Anil and Imanol Schlag and Theo Gutman-Solo and Yuhuai Wu and Behnam Neyshabur and Guy Gur-Ari and Vedant Misra},
+Title = {Solving Quantitative Reasoning Problems with Language Models},
+Year = {2022},
+Eprint = {arXiv:2206.14858},
+}
+```
+
+### Groups
+
+- `leaderboard_math_hard`
+
+### Tasks
+
+- `leaderboard_math_algebra_hard`
+- `leaderboard_math_counting_and_prob_hard`
+- `leaderboard_math_geometry_hard`
+- `leaderboard_math_intermediate_algebra_hard`
+- `leaderboard_math_num_theory_hard`
+- `leaderboard_math_prealgebra_hard`
+- `leaderboard_math_precalc_hard`
+
+
+## MMLU-Pro
+
+### Paper
+
+Title: MMLU-Pro: A More Robust and Challenging Multi-Task Language
+Understanding Benchmark
+
+In the age of large-scale language models, benchmarks like the Massive
+Multitask Language Understanding (MMLU) have been pivotal in pushing the
+boundaries of what AI can achieve in language comprehension and reasoning
+across diverse domains. However, as models continue to improve, their
+performance on these benchmarks has begun to plateau, making it increasingly
+difficult to discern differences in model capabilities. This paper introduces
+MMLU-Pro, an enhanced dataset designed to extend the mostly knowledge-driven
+MMLU benchmark by integrating more challenging, reasoning-focused questions and
+expanding the choice set from four to ten options. Additionally, MMLU-Pro
+eliminates the trivial and noisy questions in MMLU. Our experimental results
+show that MMLU-Pro not only raises the challenge, causing a significant drop in
+accuracy by 16% to 33% compared to MMLU but also demonstrates greater stability
+under varying prompts. With 24 different prompt styles tested, the sensitivity
+of model scores to prompt variations decreased from 4-5% in MMLU to just 2% in
+MMLU-Pro. Additionally, we found that models utilizing Chain of Thought (CoT)
+reasoning achieved better performance on MMLU-Pro compared to direct answering,
+which is in stark contrast to the findings on the original MMLU, indicating
+that MMLU-Pro includes more complex reasoning questions. Our assessments
+confirm that MMLU-Pro is a more discriminative benchmark to better track
+progress in the field.
+
+- Paper: https://huggingface.co/papers/2406.01574
+- Homepage: https://huggingface.co/datasets/TIGER-Lab/MMLU-Pro
+
+### Citation
+
+```
+@misc{wang2024mmluprorobustchallengingmultitask,
+ title={MMLU-Pro: A More Robust and Challenging Multi-Task Language
+ Understanding Benchmark},
+ author={Yubo Wang and Xueguang Ma and Ge Zhang and Yuansheng Ni and Abhranil Chandra and Shiguang Guo and Weiming Ren and Aaran Arulraj and Xuan He and Ziyan Jiang and Tianle Li and Max Ku and Kai Wang and Alex Zhuang and Rongqi Fan and Xiang Yue and Wenhu Chen},
+ year={2024},
+ eprint={2406.01574},
+ archivePrefix={arXiv},
+ primaryClass={cs.CL},
+ url={https://arxiv.org/abs/2406.01574},
+}
+```
+
+### Groups
+
+- `leaderboard_mmlu_pro`
+
+### Tasks
+
+- `leaderboard_mmlu_pro`
+
+
+## Musr
+
+### Paper
+
+Title: MuSR: Testing the Limits of Chain-of-thought with Multistep Soft
+Reasoning
+
+While large language models (LLMs) equipped with techniques like
+chain-of-thought prompting have demonstrated impressive capabilities, they
+still fall short in their ability to reason robustly in complex settings.
+However, evaluating LLM reasoning is challenging because system capabilities
+continue to grow while benchmark datasets for tasks like logical deduction have
+remained static. We introduce MuSR, a dataset for evaluating language models on
+multistep soft reasoning tasks specified in a natural language narrative. This
+dataset has two crucial features. First, it is created through a novel
+neurosymbolic synthetic-to-natural generation algorithm, enabling the
+construction of complex reasoning instances that challenge GPT-4 (e.g., murder
+mysteries roughly 1000 words in length) and which can be scaled further as more
+capable LLMs are released. Second, our dataset instances are free text
+narratives corresponding to real-world domains of reasoning; this makes it
+simultaneously much more challenging than other synthetically-crafted
+benchmarks while remaining realistic and tractable for human annotators to
+solve with high accuracy. We evaluate a range of LLMs and prompting techniques
+on this dataset and characterize the gaps that remain for techniques like
+chain-of-thought to perform robust reasoning.
+
+- Paper: https://huggingface.co/papers/2310.16049
+- Homepage: https://zayne-sprague.github.io/MuSR/
+
+### Citation
+
+```
+@misc{sprague2024musrtestinglimitschainofthought,
+ title={MuSR: Testing the Limits of Chain-of-thought with Multistep Soft
+ Reasoning},
+ author={Zayne Sprague and Xi Ye and Kaj Bostrom and Swarat Chaudhuri and Greg Durrett},
+ year={2024},
+ eprint={2310.16049},
+ archivePrefix={arXiv},
+ primaryClass={cs.CL},
+ url={https://arxiv.org/abs/2310.16049},
+}
+```
+
+### Groups
+
+- `leaderboard_musr`
+
+### Tasks
+
+- `leaderboard_musr_murder_mysteries`
+- `leaderboard_musr_object_placements`
+- `leaderboard_musr_team_allocation`
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/bbh_mc/_leaderboard_bbh.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/bbh_mc/_leaderboard_bbh.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9cc10d396824d06bb6e58eeadf6d89cb6e507f16
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/bbh_mc/_leaderboard_bbh.yaml
@@ -0,0 +1,26 @@
+group: leaderboard_bbh
+task:
+ - leaderboard_bbh_boolean_expressions
+ - leaderboard_bbh_causal_judgement
+ - leaderboard_bbh_date_understanding
+ - leaderboard_bbh_disambiguation_qa
+ - leaderboard_bbh_formal_fallacies
+ - leaderboard_bbh_geometric_shapes
+ - leaderboard_bbh_hyperbaton
+ - leaderboard_bbh_logical_deduction_five_objects
+ - leaderboard_bbh_logical_deduction_seven_objects
+ - leaderboard_bbh_logical_deduction_three_objects
+ - leaderboard_bbh_movie_recommendation
+ - leaderboard_bbh_navigate
+ - leaderboard_bbh_object_counting
+ - leaderboard_bbh_penguins_in_a_table
+ - leaderboard_bbh_reasoning_about_colored_objects
+ - leaderboard_bbh_ruin_names
+ - leaderboard_bbh_salient_translation_error_detection
+ - leaderboard_bbh_snarks
+ - leaderboard_bbh_sports_understanding
+ - leaderboard_bbh_temporal_sequences
+ - leaderboard_bbh_tracking_shuffled_objects_five_objects
+ - leaderboard_bbh_tracking_shuffled_objects_seven_objects
+ - leaderboard_bbh_tracking_shuffled_objects_three_objects
+ - leaderboard_bbh_web_of_lies
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/bbh_mc/boolean_expressions.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/bbh_mc/boolean_expressions.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0660740a836a2f428478376c73c977ae305b215e
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/bbh_mc/boolean_expressions.yaml
@@ -0,0 +1,14 @@
+dataset_name: boolean_expressions
+description: 'Evaluate the result of a random Boolean expression.'
+doc_to_choice: ["False", "True"]
+fewshot_config:
+ sampler: first_n
+ samples:
+ - input: not ( ( not not True ) ) is
+ target: 'False'
+ - input: True and False and not True and True is
+ target: 'False'
+ - input: not not ( not ( False ) ) is
+ target: 'True'
+include: _fewshot_template_yaml
+task: leaderboard_bbh_boolean_expressions
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/bbh_mc/formal_fallacies.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/bbh_mc/formal_fallacies.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..eebff0a4df84f196cbd35f0a1c7859af4be722ad
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/bbh_mc/formal_fallacies.yaml
@@ -0,0 +1,55 @@
+dataset_name: formal_fallacies
+description: 'Distinguish deductively valid arguments from formal fallacies.'
+doc_to_choice: ["valid", "invalid"]
+fewshot_config:
+ sampler: first_n
+ samples:
+ - input: '"It is not always easy to see who is related to whom -- and in which ways.
+ The following argument pertains to this question: To begin with, Lesley is a
+ close friend of Fernando. Moreover, being a close friend of Fernando or a schoolmate
+ of Lowell is sufficient for being a great-grandfather of Leroy. It follows that
+ Lesley is a great-grandfather of Leroy."
+
+ Is the argument, given the explicitly stated premises, deductively valid or
+ invalid?
+
+ Options:
+
+ - valid
+
+ - invalid'
+ target: valid
+ - input: '"It is not always easy to see who is related to whom -- and in which ways.
+ The following argument pertains to this question: Whoever is not a great-grandfather
+ of Clyde is a stepbrother of Brian. Being an ancestor of Dana is sufficient
+ for not being a great-grandfather of Clyde. We may conclude: Everyone who is
+ an ancestor of Dana is a stepbrother of Brian, too."
+
+ Is the argument, given the explicitly stated premises, deductively valid or
+ invalid?
+
+ Options:
+
+ - valid
+
+ - invalid'
+ target: valid
+ - input: '"It is not always easy to grasp who is consuming which products. The following
+ argument pertains to this question: Every infrequent user of Paul Mitchell shampoo
+ is either a rare consumer of Nioxin shampoo or a loyal buyer of Caress soap,
+ or both. No regular consumer of Lush soap is a rare consumer of Nioxin shampoo
+ and, in the same time, a loyal buyer of Caress soap. It follows that whoever
+ is an infrequent user of Paul Mitchell shampoo is not a regular consumer of
+ Lush soap."
+
+ Is the argument, given the explicitly stated premises, deductively valid or
+ invalid?
+
+ Options:
+
+ - valid
+
+ - invalid'
+ target: invalid
+include: _fewshot_template_yaml
+task: leaderboard_bbh_formal_fallacies
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/bbh_mc/geometric_shapes.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/bbh_mc/geometric_shapes.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a32fcecac132ed2b537d4fcf4f2812e99726dfbc
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/bbh_mc/geometric_shapes.yaml
@@ -0,0 +1,84 @@
+dataset_name: geometric_shapes
+description: 'Name geometric shapes from their SVG paths.'
+doc_to_choice: ["(A)","(B)","(C)","(D)","(E)","(F)","(G)","(H)","(I)","(J)","(K)"]
+fewshot_config:
+ sampler: first_n
+ samples:
+ - input: 'This SVG path element
+ draws a
+
+ Options:
+
+ (A) circle
+
+ (B) heptagon
+
+ (C) hexagon
+
+ (D) kite
+
+ (E) line
+
+ (F) octagon
+
+ (G) pentagon
+
+ (H) rectangle
+
+ (I) sector
+
+ (J) triangle'
+ target: (F)
+ - input: 'This SVG path element draws a
+
+ Options:
+
+ (A) circle
+
+ (B) heptagon
+
+ (C) hexagon
+
+ (D) kite
+
+ (E) line
+
+ (F) octagon
+
+ (G) pentagon
+
+ (H) rectangle
+
+ (I) sector
+
+ (J) triangle'
+ target: (G)
+ - input: 'This SVG path element draws a
+
+ Options:
+
+ (A) circle
+
+ (B) heptagon
+
+ (C) hexagon
+
+ (D) kite
+
+ (E) line
+
+ (F) octagon
+
+ (G) pentagon
+
+ (H) rectangle
+
+ (I) sector
+
+ (J) triangle'
+ target: (D)
+include: _fewshot_template_yaml
+task: leaderboard_bbh_geometric_shapes
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/bbh_mc/hyperbaton.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/bbh_mc/hyperbaton.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3d1eece64c067623487092b845d65246cc78037e
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/bbh_mc/hyperbaton.yaml
@@ -0,0 +1,32 @@
+dataset_name: hyperbaton
+description: 'Order adjectives correctly in English sentences.'
+doc_to_choice: ["(A)", "(B)"]
+fewshot_config:
+ sampler: first_n
+ samples:
+ - input: 'Which sentence has the correct adjective order:
+
+ Options:
+
+ (A) rubber terrible ship
+
+ (B) terrible rubber ship'
+ target: (B)
+ - input: 'Which sentence has the correct adjective order:
+
+ Options:
+
+ (A) repulsive small Brazilian exercise ship
+
+ (B) Brazilian repulsive exercise small ship'
+ target: (A)
+ - input: 'Which sentence has the correct adjective order:
+
+ Options:
+
+ (A) blue gold wonderful square shoe
+
+ (B) wonderful square blue gold shoe'
+ target: (B)
+include: _fewshot_template_yaml
+task: leaderboard_bbh_hyperbaton
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/bbh_mc/reasoning_about_colored_objects.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/bbh_mc/reasoning_about_colored_objects.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6ca071a9c8562182050b92f1060ae63f3af33777
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/bbh_mc/reasoning_about_colored_objects.yaml
@@ -0,0 +1,114 @@
+dataset_name: reasoning_about_colored_objects
+description: 'Answer extremely simple questions about the colors of objects on a surface.'
+doc_to_choice: ["(A)","(B)","(C)","(D)","(E)","(F)","(G)","(H)","(I)","(J)","(K)","(L)","(M)","(N)","(O)","(P)","(Q)","(R)"]
+fewshot_config:
+ sampler: first_n
+ samples:
+ - input: 'On the nightstand, there is a red pencil, a purple mug, a burgundy keychain,
+ a fuchsia teddy bear, a black plate, and a blue stress ball. What color is the
+ stress ball?
+
+ Options:
+
+ (A) red
+
+ (B) orange
+
+ (C) yellow
+
+ (D) green
+
+ (E) blue
+
+ (F) brown
+
+ (G) magenta
+
+ (H) fuchsia
+
+ (I) mauve
+
+ (J) teal
+
+ (K) turquoise
+
+ (L) burgundy
+
+ (M) silver
+
+ (N) gold
+
+ (O) black
+
+ (P) grey
+
+ (Q) purple
+
+ (R) pink'
+ target: (E)
+ - input: 'On the table, you see a bunch of objects arranged in a row: a purple paperclip,
+ a pink stress ball, a brown keychain, a green scrunchiephone charger, a mauve
+ fidget spinner, and a burgundy pen. What is the color of the object directly
+ to the right of the stress ball?
+
+ Options:
+
+ (A) red
+
+ (B) orange
+
+ (C) yellow
+
+ (D) green
+
+ (E) blue
+
+ (F) brown
+
+ (G) magenta
+
+ (H) fuchsia
+
+ (I) mauve
+
+ (J) teal
+
+ (K) turquoise
+
+ (L) burgundy
+
+ (M) silver
+
+ (N) gold
+
+ (O) black
+
+ (P) grey
+
+ (Q) purple
+
+ (R) pink'
+ target: (F)
+ - input: 'On the nightstand, you see the following items arranged in a row: a teal
+ plate, a burgundy keychain, a yellow scrunchiephone charger, an orange mug,
+ a pink notebook, and a grey cup. How many non-orange items do you see to the
+ left of the teal item?
+
+ Options:
+
+ (A) zero
+
+ (B) one
+
+ (C) two
+
+ (D) three
+
+ (E) four
+
+ (F) five
+
+ (G) six'
+ target: (A)
+include: _fewshot_template_yaml
+task: leaderboard_bbh_reasoning_about_colored_objects
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/bbh_mc/tracking_shuffled_objects_five_objects.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/bbh_mc/tracking_shuffled_objects_five_objects.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..75a23a141198fb33161a0bba0db89292db3e418e
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/bbh_mc/tracking_shuffled_objects_five_objects.yaml
@@ -0,0 +1,57 @@
+dataset_name: tracking_shuffled_objects_five_objects
+description: 'A task requiring determining the final positions of a set of objects
+ given their initial positions and a description of a sequence of swaps.'
+doc_to_choice: ["(A)","(B)","(C)","(D)","(E)"]
+fewshot_config:
+ sampler: first_n
+ samples:
+ - input: 'Alice, Bob, and Claire are playing a game. At the start of the game, they
+ are each holding a ball: Alice has a yellow ball, Bob has a blue ball, and Claire
+ has a pink ball.
+
+ As the game progresses, pairs of players trade balls. First, Claire and Alice
+ swap balls. Then, Alice and Bob swap balls. Finally, Claire and Bob swap balls.
+ At the end of the game, Bob has the
+
+ Options:
+
+ (A) yellow ball
+
+ (B) blue ball
+
+ (C) pink ball'
+ target: (A)
+ - input: 'Alice, Bob, and Claire are playing a game. At the start of the game, they
+ are each holding a ball: Alice has a white ball, Bob has a purple ball, and
+ Claire has a pink ball.
+
+ As the game progresses, pairs of players trade balls. First, Bob and Alice swap
+ balls. Then, Bob and Claire swap balls. Finally, Bob and Alice swap balls. At
+ the end of the game, Alice has the
+
+ Options:
+
+ (A) white ball
+
+ (B) purple ball
+
+ (C) pink ball'
+ target: (C)
+ - input: 'Alice, Bob, and Claire are dancers at a square dance. At the start of
+ a song, they each have a partner: Alice is dancing with Lola, Bob is dancing
+ with Rodrigo, and Claire is dancing with Patrick.
+
+ Throughout the song, the dancers often trade partners. First, Alice and Bob
+ switch partners. Then, Claire and Bob switch partners. Finally, Bob and Alice
+ switch partners. At the end of the dance, Alice is dancing with
+
+ Options:
+
+ (A) Lola
+
+ (B) Rodrigo
+
+ (C) Patrick'
+ target: (C)
+include: _fewshot_template_yaml
+task: leaderboard_bbh_tracking_shuffled_objects_five_objects
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/ifeval/_leaderboard_instruction_following.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/ifeval/_leaderboard_instruction_following.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a1203b0069cf3e01003ac7f3b4cc2a351c90214c
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/ifeval/_leaderboard_instruction_following.yaml
@@ -0,0 +1,3 @@
+group: leaderboard_instruction_following
+task:
+ - leaderboard_ifeval
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/ifeval/ifeval.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/ifeval/ifeval.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cab7498bd894dc546ca2da6b0c5257807e5d83b6
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/ifeval/ifeval.yaml
@@ -0,0 +1,31 @@
+task: leaderboard_ifeval
+dataset_path: wis-k/instruction-following-eval
+dataset_name: null
+output_type: generate_until
+test_split: train
+num_fewshot: 0
+doc_to_text: prompt
+doc_to_target: 0
+generation_kwargs:
+ until: []
+ do_sample: false
+ temperature: 0.0
+ max_gen_toks: 1280
+process_results: !function utils.process_results
+metric_list:
+ - metric: prompt_level_strict_acc
+ aggregation: mean
+ higher_is_better: true
+ - metric: inst_level_strict_acc
+ aggregation: !function utils.agg_inst_level_acc
+ higher_is_better: true
+ - metric: prompt_level_loose_acc
+ aggregation: mean
+ higher_is_better: true
+ - metric: inst_level_loose_acc
+ aggregation: !function utils.agg_inst_level_acc
+ higher_is_better: true
+metadata:
+ version: 2.0
+fewshot_config:
+ sampler: first_n
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/ifeval/instructions.py b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/ifeval/instructions.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c352af3f70e8e25561a755b877f36ce28b13c11
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/ifeval/instructions.py
@@ -0,0 +1,1612 @@
+# Copyright 2023 The Google Research Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Library of instructions."""
+
+import collections
+import json
+import logging
+import random
+import re
+import string
+from typing import Dict, Optional, Sequence, Union
+
+import langdetect
+
+from lm_eval.tasks.ifeval import instructions_util
+
+
+logger = logging.getLogger(__name__)
+
+_InstructionArgsDtype = Optional[Dict[str, Union[int, str, Sequence[str]]]]
+
+_LANGUAGES = instructions_util.LANGUAGE_CODES
+
+# The relational operation for comparison.
+_COMPARISON_RELATION = ("less than", "at least")
+
+# The maximum number of sentences.
+_MAX_NUM_SENTENCES = 20
+
+# The number of placeholders.
+_NUM_PLACEHOLDERS = 4
+
+# The number of bullet lists.
+_NUM_BULLETS = 5
+
+# The options of constrained response.
+_CONSTRAINED_RESPONSE_OPTIONS = (
+ "My answer is yes.",
+ "My answer is no.",
+ "My answer is maybe.",
+)
+
+# The options of starter keywords.
+_STARTER_OPTIONS = (
+ "I would say",
+ "My answer is",
+ "I believe",
+ "In my opinion",
+ "I think",
+ "I reckon",
+ "I feel",
+ "From my perspective",
+ "As I see it",
+ "According to me",
+ "As far as I'm concerned",
+ "To my understanding",
+ "In my view",
+ "My take on it is",
+ "As per my perception",
+)
+
+# The options of ending keywords.
+# TODO(jeffreyzhou) add more ending options
+_ENDING_OPTIONS = ("Any other questions?", "Is there anything else I can help with?")
+
+# The number of highlighted sections.
+_NUM_HIGHLIGHTED_SECTIONS = 4
+
+# The section spliter.
+_SECTION_SPLITER = ("Section", "SECTION")
+
+# The number of sections.
+_NUM_SECTIONS = 5
+
+# The number of paragraphs.
+_NUM_PARAGRAPHS = 5
+
+# The postscript marker.
+_POSTSCRIPT_MARKER = ("P.S.", "P.P.S")
+
+# The number of keywords.
+_NUM_KEYWORDS = 2
+
+# The occurrences of a single keyword.
+_KEYWORD_FREQUENCY = 3
+
+# The occurrences of a single letter.
+_LETTER_FREQUENCY = 10
+
+# The occurrences of words with all capital letters.
+_ALL_CAPITAL_WORD_FREQUENCY = 20
+
+# The number of words in the response.
+_NUM_WORDS_LOWER_LIMIT = 100
+_NUM_WORDS_UPPER_LIMIT = 500
+
+
+class Instruction:
+ """An instruction template."""
+
+ def __init__(self, instruction_id):
+ self.id = instruction_id
+
+ def build_description(self, **kwargs):
+ raise NotImplementedError("`build_description` not implemented.")
+
+ def get_instruction_args(self):
+ raise NotImplementedError("`get_instruction_args` not implemented.")
+
+ def get_instruction_args_keys(self):
+ raise NotImplementedError("`get_instruction_args_keys` not implemented.")
+
+ def check_following(self, value):
+ raise NotImplementedError("`check_following` not implemented.")
+
+
+class ResponseLanguageChecker(Instruction):
+ """Check the language of the entire response."""
+
+ def build_description(self, *, language=None):
+ """Build the instruction description.
+
+ Args:
+ language: A string representing the expected language of the response. The
+ language has to comply to the 97 types defined in
+ `langid.py` (https://pypi.org/project/langid/1.1.5/), which follows
+ ISO 639-1 codes (https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes);
+ for example, `en` for English, `zh` for Chinese, `fr` for French.
+
+ Returns:
+ A string representing the instruction description.
+ """
+ self._language = language
+ if self._language is None:
+ self._language = random.choice(list(_LANGUAGES.keys()))
+ # TODO(tianjianlu): opens the description generation to more choices.
+ self._description_pattern = (
+ "Your ENTIRE response should be in {language} language, no other "
+ + "language is allowed."
+ )
+ return self._description_pattern.format(language=_LANGUAGES[self._language])
+
+ def get_instruction_args(self):
+ """Returns the keyward args of `build_description`."""
+ return {"language": self._language}
+
+ def get_instruction_args_keys(self):
+ """Returns the args keys of `build_description`."""
+ return ["language"]
+
+ def check_following(self, value):
+ """Check if the language of the entire response follows the instruction.
+
+ Args:
+ value: A string representing the response.
+
+ Returns:
+ True if the language of `value` follows instruction; otherwise False.
+ """
+ assert isinstance(value, str)
+
+ try:
+ return langdetect.detect(value) == self._language
+ except langdetect.LangDetectException as e:
+ # Count as instruction is followed.
+ logging.error(
+ "Unable to detect language for text %s due to %s", value, e
+ ) # refex: disable=pytotw.037
+ return True
+
+
+class NumberOfSentences(Instruction):
+ """Check the number of sentences."""
+
+ def build_description(self, *, num_sentences=None, relation=None):
+ """Build the instruction description.
+
+ Args:
+ num_sentences: An integer specifying the number of sentences as a
+ threshold.
+ relation: A string in (`less than`, `at least`), defining the relational
+ operator for comparison.
+ Two relational comparisons are supported for now:
+ if 'less than', the actual number of sentences < the threshold;
+ if 'at least', the actual number of sentences >= the threshold.
+
+ Returns:
+ A string representing the instruction description.
+ """
+ # The number of sentences as a threshold for comparison.
+ self._num_sentences_threshold = num_sentences
+ if self._num_sentences_threshold is None or self._num_sentences_threshold < 0:
+ self._num_sentences_threshold = random.randint(1, _MAX_NUM_SENTENCES)
+
+ if relation is None:
+ self._comparison_relation = random.choice(_COMPARISON_RELATION)
+ elif relation not in _COMPARISON_RELATION:
+ raise ValueError(
+ "The supported relation for comparison must be in "
+ f"{_COMPARISON_RELATION}, but {relation} is given."
+ )
+ else:
+ self._comparison_relation = relation
+
+ self._description_pattern = (
+ "Your response should contain {relation} {num_sentences} sentences."
+ )
+ return self._description_pattern.format(
+ relation=self._comparison_relation,
+ num_sentences=self._num_sentences_threshold,
+ )
+
+ def get_instruction_args(self):
+ """Returns the keyward args of `build_description`."""
+ return {
+ "num_sentences": self._num_sentences_threshold,
+ "relation": self._comparison_relation,
+ }
+
+ def get_instruction_args_keys(self):
+ """Returns the args keys of `build_description`."""
+ return ["num_sentences", "relation"]
+
+ def check_following(self, value):
+ """Check if the number of sentences follows the instruction.
+
+ Args:
+ value: A string representing the response.
+
+ Returns:
+ True if the response follows the instruction.
+
+ Raise:
+ ValueError if the string in `instruction_args` is not in
+ [`less_than`, `at_least`].
+ """
+ num_sentences = instructions_util.count_sentences(value)
+ if self._comparison_relation == _COMPARISON_RELATION[0]:
+ return num_sentences < self._num_sentences_threshold
+ elif self._comparison_relation == _COMPARISON_RELATION[1]:
+ return num_sentences >= self._num_sentences_threshold
+
+
+class PlaceholderChecker(Instruction):
+ """Check the placeholders in template writing."""
+
+ def build_description(self, *, num_placeholders=None):
+ """Build the instruction description.
+
+ Args:
+ num_placeholders: An integer denoting the minimum number of
+ placeholders required in the response.
+
+ Returns:
+ A string representing the instruction description.
+ """
+ self._num_placeholders = num_placeholders
+ if self._num_placeholders is None or self._num_placeholders < 0:
+ self._num_placeholders = random.randint(1, _NUM_PLACEHOLDERS)
+ self._description_pattern = (
+ "The response must contain at least {num_placeholders} placeholders "
+ + "represented by square brackets, such as [address]."
+ )
+ return self._description_pattern.format(num_placeholders=self._num_placeholders)
+
+ def get_instruction_args(self):
+ """Returns the keyward args of `build_description`."""
+ return {"num_placeholders": self._num_placeholders}
+
+ def get_instruction_args_keys(self):
+ """Returns the args keys of `build_description`."""
+ return ["num_placeholders"]
+
+ def check_following(self, value):
+ """Check if the number of placeholders follows the instruction.
+
+ Args:
+ value: A string representing the response.
+
+ Returns:
+ True if the actual number of placeholders in the response is greater than
+ or equal to `num_placeholders`; otherwise, False.
+ """
+ placeholders = re.findall(r"\[.*?\]", value)
+ num_placeholders = len(placeholders)
+ return num_placeholders >= self._num_placeholders
+
+
+class BulletListChecker(Instruction):
+ """Checks the bullet list in the prompt."""
+
+ def build_description(self, *, num_bullets=None):
+ """Build the instruction description.
+
+ Args:
+ num_bullets: An integer specifying the exact number of bullet lists
+ that is required to appear in the response.
+
+ Returns:
+ A string representing the instruction description.
+ """
+ self._num_bullets = num_bullets
+ if self._num_bullets is None or self._num_bullets < 0:
+ self._num_bullets = random.randint(1, _NUM_BULLETS)
+ self._description_pattern = (
+ "Your answer must contain exactly {num_bullets} bullet points. "
+ + "Use the markdown bullet points such as:\n"
+ + "* This is point 1. \n"
+ + "* This is point 2"
+ )
+ return self._description_pattern.format(num_bullets=self._num_bullets)
+
+ def get_instruction_args(self):
+ """Returns the keyward args of `build_description`."""
+ return {"num_bullets": self._num_bullets}
+
+ def get_instruction_args_keys(self):
+ """Returns the args keys of `build_description`."""
+ return ["num_bullets"]
+
+ def check_following(self, value):
+ r"""Check if the number of bullet lists meets the requirement.
+
+ Args:
+ value: A string representing the response. The response is expected to
+ contain some bullet lists that start with `\*`.
+
+ Returns:
+ True if the actual number of bullet lists in the response meets the
+ requirement.
+ """
+ bullet_lists = re.findall(r"^\s*\*[^\*].*$", value, flags=re.MULTILINE)
+ bullet_lists_2 = re.findall(r"^\s*-.*$", value, flags=re.MULTILINE)
+ num_bullet_lists = len(bullet_lists) + len(bullet_lists_2)
+ return num_bullet_lists == self._num_bullets
+
+
+class ConstrainedResponseChecker(Instruction):
+ """Checks the constrained response."""
+
+ def build_description(self):
+ """Build the instruction description."""
+ # A sequence of string(s) representing the options of the expected response.
+ self._constrained_responses = _CONSTRAINED_RESPONSE_OPTIONS
+ self._description_pattern = (
+ "Answer with one of the following options: {response_options}"
+ )
+ return self._description_pattern.format(
+ response_options=self._constrained_responses
+ )
+
+ def get_instruction_args(self):
+ """Returns the keyward args of `build_description`."""
+ return None
+
+ def get_instruction_args_keys(self):
+ """Returns the args keys of `build_description`."""
+ return []
+
+ def check_following(self, value):
+ """Checks if the response matches the constrained options.
+
+ Args:
+ value: A string representing the response.
+
+ Returns:
+ True if the actual response contains one of the options in the constrained
+ responses; otherwise False.
+ """
+ value = value.strip()
+ for constrained_response in self._constrained_responses:
+ if constrained_response in value:
+ return True
+ return False
+
+
+class ConstrainedStartChecker(Instruction):
+ """Checks the response start."""
+
+ def build_description(self, *, starter=None):
+ """Build the instruction description.
+
+ Args:
+ starter: A string representing the keyward that the response should start
+ with.
+
+ Returns:
+ A string representing the instruction description.
+ """
+ self._starter = starter.strip() if isinstance(starter, str) else starter
+ if self._starter is None:
+ self._starter = random.choice(_STARTER_OPTIONS)
+ self._description_pattern = (
+ "During the conversation, when it is your turn, "
+ + "please always start with {starter}"
+ )
+ return self._description_pattern.format(starter=self._starter)
+
+ def get_instruction_args(self):
+ """Returns the keyward args of `build_description`."""
+ return {"starter": self._starter}
+
+ def get_instruction_args_keys(self):
+ """Returns the args keys of `build_description`."""
+ return ["starter"]
+
+ def check_following(self, value):
+ """Checks if the response starts with the constrained keyword or phrase.
+
+ Args:
+ value: A string representing the response.
+
+ Returns:
+ True if the response starts with the given phrase or keyword that is
+ contained in `instruction_args`; otherwise, False.
+ """
+ response_pattern = r"^\s*" + self._starter + r".*$"
+ response_with_constrained_start = re.search(
+ response_pattern, value, flags=re.MULTILINE
+ )
+ return True if response_with_constrained_start else False
+
+
+class HighlightSectionChecker(Instruction):
+ """Checks the highlighted section."""
+
+ def build_description(self, *, num_highlights=None):
+ """Build the instruction description.
+
+ Args:
+ num_highlights: An integer specifying the minimum number of highlighted
+ sections.
+
+ Returns:
+ A string representing the instruction description.
+ """
+ self._num_highlights = num_highlights
+ if self._num_highlights is None or self._num_highlights < 0:
+ self._num_highlights = random.randint(1, _NUM_HIGHLIGHTED_SECTIONS)
+
+ self._description_pattern = (
+ "Highlight at least {num_highlights} sections in your answer with "
+ + "markdown, i.e. *highlighted section*."
+ )
+
+ return self._description_pattern.format(num_highlights=self._num_highlights)
+
+ def get_instruction_args(self):
+ """Returns the keyward args of `build_description`."""
+ return {"num_highlights": self._num_highlights}
+
+ def get_instruction_args_keys(self):
+ """Returns the args keys of `build_description`."""
+ return ["num_highlights"]
+
+ def check_following(self, value):
+ """Checks if the number of highlighted sections meets the requirement.
+
+ Args:
+ value: a string repesenting the response. The response is expected to
+ contain highlighted sections in the format of *highlighted*.
+
+ Returns:
+ True if the actual number of highlighted sections in the format of
+ *highlighed sections* meets the minimum requirement; otherwise False.
+ """
+ num_highlights = 0
+ highlights = re.findall(r"\*[^\n\*]*\*", value)
+ double_highlights = re.findall(r"\*\*[^\n\*]*\*\*", value)
+ for highlight in highlights:
+ if highlight.strip("*").strip():
+ num_highlights += 1
+ for highlight in double_highlights:
+ if highlight.removeprefix("**").removesuffix("**").strip():
+ num_highlights += 1
+
+ return num_highlights >= self._num_highlights
+
+
+class SectionChecker(Instruction):
+ """Checks the sections."""
+
+ def build_description(self, *, section_spliter=None, num_sections=None):
+ """Build the instruction description.
+
+ Args:
+ section_spliter: A string represents the section spliter keyword that
+ marks a new section, i.e., `Section` or `SECTION`.
+ num_sections: An integer specifying the number of sections.
+
+ Returns:
+ A string representing the instruction description.
+ """
+ self._section_spliter = (
+ section_spliter.strip()
+ if isinstance(section_spliter, str)
+ else section_spliter
+ )
+ if self._section_spliter is None:
+ self._section_spliter = random.choice(_SECTION_SPLITER)
+
+ self._num_sections = num_sections
+ if self._num_sections is None or self._num_sections < 0:
+ self._num_sections = random.randint(1, _NUM_SECTIONS)
+
+ self._description_pattern = (
+ "Your response must have {num_sections} sections. Mark the beginning "
+ + "of each section with {section_spliter} X, such as:\n"
+ + "{section_spliter} 1\n"
+ + "[content of section 1]\n"
+ + "{section_spliter} 2\n"
+ + "[content of section 2]"
+ )
+
+ return self._description_pattern.format(
+ num_sections=self._num_sections, section_spliter=self._section_spliter
+ )
+
+ def get_instruction_args(self):
+ """Returns the keyward args of `build_description`."""
+ return {
+ "section_spliter": self._section_spliter,
+ "num_sections": self._num_sections,
+ }
+
+ def get_instruction_args_keys(self):
+ """Returns the args keys of `build_description`."""
+ return ["section_spliter", "num_sections"]
+
+ def check_following(self, value):
+ """Checks the response contains multiple sections.
+
+ Args:
+ value: A string representing the response. The response is expected
+ to contain multiple sections (number of sections is greater than 1).
+ A new section starts with `Section 1`, where the number denotes the
+ section index.
+
+ Returns:
+ True if the number of sections in the response is greater than or equal to
+ the minimum number of sections; otherwise, False.
+ """
+ section_splitter_patten = r"\s?" + self._section_spliter + r"\s?\d+\s?"
+ sections = re.split(section_splitter_patten, value)
+ num_sections = len(sections) - 1
+ return num_sections >= self._num_sections
+
+
+class ParagraphChecker(Instruction):
+ """Checks the paragraphs."""
+
+ def build_description(self, *, num_paragraphs=None):
+ """Build the instruction description.
+
+ Args:
+ num_paragraphs: An integer specifying the number of paragraphs.
+
+ Returns:
+ A string representing the instruction description.
+ """
+ self._num_paragraphs = num_paragraphs
+ if self._num_paragraphs is None or self._num_paragraphs < 0:
+ self._num_paragraphs = random.randint(1, _NUM_PARAGRAPHS)
+
+ self._description_pattern = (
+ "There should be {num_paragraphs} paragraphs. "
+ + "Paragraphs are separated with the markdown divider: ***"
+ )
+
+ return self._description_pattern.format(num_paragraphs=self._num_paragraphs)
+
+ def get_instruction_args(self):
+ """Returns the keyward args of `build_description`."""
+ return {"num_paragraphs": self._num_paragraphs}
+
+ def get_instruction_args_keys(self):
+ """Returns the args keys of `build_description`."""
+ return ["num_paragraphs"]
+
+ def check_following(self, value):
+ """Checks the response contains required number of paragraphs.
+
+ Args:
+ value: A string representing the response. The response may contain
+ paragraphs that are separated by the markdown divider: `***`.
+
+ Returns:
+ True if the actual number of paragraphs is the same as required;
+ otherwise, False.
+ """
+ paragraphs = re.split(r"\s?\*\*\*\s?", value)
+ num_paragraphs = len(paragraphs)
+
+ for index, paragraph in enumerate(paragraphs):
+ if not paragraph.strip():
+ if index == 0 or index == len(paragraphs) - 1:
+ num_paragraphs -= 1
+ else:
+ return False
+
+ return num_paragraphs == self._num_paragraphs
+
+
+class PostscriptChecker(Instruction):
+ """Checks the postscript."""
+
+ def build_description(self, *, postscript_marker=None):
+ """Build the instruction description.
+
+ Args:
+ postscript_marker: A string containing the keyword that marks the start
+ of the postscript section.
+
+ Returns:
+ A string representing the instruction description.
+ """
+ self._postscript_marker = (
+ postscript_marker.strip()
+ if isinstance(postscript_marker, str)
+ else postscript_marker
+ )
+ if self._postscript_marker is None:
+ self._postscript_marker = random.choice(_POSTSCRIPT_MARKER)
+
+ self._description_pattern = (
+ "At the end of your response, please explicitly add a postscript "
+ + "starting with {postscript}"
+ )
+
+ return self._description_pattern.format(postscript=self._postscript_marker)
+
+ def get_instruction_args(self):
+ """Returns the keyward args of `build_description`."""
+ return {"postscript_marker": self._postscript_marker}
+
+ def get_instruction_args_keys(self):
+ """Returns the args keys of `build_description`."""
+ return ["postscript_marker"]
+
+ def check_following(self, value):
+ """Checks if the response follows the postscript format.
+
+ Args:
+ value: a string representing the response. The response is expected to
+ contain a postscript section.
+
+ Returns:
+ True if the response contains a postscript section starting with
+ the keyword containing in the `instruction_args`; otherwise False.
+ """
+ value = value.lower()
+ if self._postscript_marker == "P.P.S":
+ postscript_pattern = r"\s*p\.\s?p\.\s?s.*$"
+ elif self._postscript_marker == "P.S.":
+ postscript_pattern = r"\s*p\.\s?s\..*$"
+ else:
+ postscript_pattern = r"\s*" + self._postscript_marker.lower() + r".*$"
+ postscript = re.findall(postscript_pattern, value, flags=re.MULTILINE)
+ return True if postscript else False
+
+
+class RephraseChecker(Instruction):
+ """Checks the repharse."""
+
+ def build_description(self, *, original_message):
+ """Build the instruction description.
+
+ Args:
+ original_message: A string representing the original message. The
+ rephrased response should only change its words/sentences in between
+ its two asterisks, for example, *change me*. Both original and rephrased
+ messages should contain the changes in the form of *change me*.
+
+ Returns:
+ A string representing the instruction description.
+ """
+ if not self.is_change(original_message):
+ raise ValueError(
+ f"Message {original_message} does not contain changes "
+ "in the form of *change me*."
+ )
+
+ self._reference_without_change = original_message
+ self._description = (
+ "Rephrasing: Your rephrased response should only"
+ + "change the words/sentences in between two asterisks"
+ + "such as *change me*."
+ )
+ return self._description
+
+ def get_instruction_args(self):
+ """Returns the keyward args of `build_description`."""
+ return {"original_message": self._reference_without_change}
+
+ def get_instruction_args_keys(self):
+ """Returns the args keys of `build_description`."""
+ return ["original_message"]
+
+ def check_following(self, value):
+ r"""Checks if the rephrasing follows the instruction.
+
+ Args:
+ value: A string representing the response, which is expected to rephras
+ the string of `instruction_args`.
+
+ Returns:
+ True if `value` and `instruction_args` only differ by the words/sentences
+ in between two asterisks such as *change me*; otherwise, False.
+ """
+
+ if not self.is_change(value):
+ raise ValueError(
+ f"value {value} does not contain " "changes in the form of *change me*."
+ )
+
+ response_without_changes = self.strip_changes(value)
+ reference_without_changes = self.strip_changes(self._reference_without_change)
+
+ return response_without_changes == reference_without_changes
+
+ def is_change(self, response):
+ """Check if there is change in the response in the form of *change me*."""
+ return re.search(r"\*.*\*", response)
+
+ def strip_changes(self, response):
+ """Strips off the changes."""
+ return re.sub(r"\*.*\*", "", response)
+
+
+class KeywordChecker(Instruction):
+ """Check the exisitence of certain keywords."""
+
+ def build_description(self, *, keywords=None):
+ """Build the instruction description.
+
+ Args:
+ keywords: A sequence of strings representing the keywords that are
+ expected in the response.
+
+ Returns:
+ A string representing the instruction description.
+ """
+
+ if not keywords:
+ self._keywords = instructions_util.generate_keywords(
+ num_keywords=_NUM_KEYWORDS
+ )
+ else:
+ self._keywords = keywords
+ self._keywords = sorted(self._keywords)
+
+ self._description_pattern = "Include keywords {keywords} in the response."
+
+ return self._description_pattern.format(keywords=self._keywords)
+
+ def get_instruction_args(self):
+ """Returns the keyward args of `build_description`."""
+ return {"keywords": self._keywords}
+
+ def get_instruction_args_keys(self):
+ """Returns the args keys of `build_description`."""
+ return ["keywords"]
+
+ def check_following(self, value):
+ """Check if the response contain the expected keywords."""
+ for keyword in self._keywords:
+ if not re.search(keyword, value, flags=re.IGNORECASE):
+ return False
+ return True
+
+
+class KeywordFrequencyChecker(Instruction):
+ """Check the keyword frequency."""
+
+ def build_description(self, *, keyword=None, frequency=None, relation=None):
+ """Build the instruction description.
+
+ Args:
+ keyword: A string representing a keyword that is expected in the response.
+ frequency: An integer specifying the number of times `keyword` is expected
+ to appear in the response.
+ relation: A string in (`less than`, `at least`), defining the relational
+ operator for comparison.
+ Two relational comparisons are supported for now:
+ if 'less than', the actual number of occurrences < frequency;
+ if 'at least', the actual number of occurrences >= frequency.
+
+ Returns:
+ A string representing the instruction description.
+ """
+ if not keyword:
+ self._keyword = instructions_util.generate_keywords(num_keywords=1)[0]
+ else:
+ self._keyword = keyword.strip()
+
+ self._frequency = frequency
+ if self._frequency is None or self._frequency < 0:
+ self._frequency = random.randint(1, _KEYWORD_FREQUENCY)
+
+ if relation is None:
+ self._comparison_relation = random.choice(_COMPARISON_RELATION)
+ elif relation not in _COMPARISON_RELATION:
+ raise ValueError(
+ "The supported relation for comparison must be in "
+ f"{_COMPARISON_RELATION}, but {relation} is given."
+ )
+ else:
+ self._comparison_relation = relation
+
+ self._description_pattern = (
+ "In your response, the word {keyword} should appear {relation} "
+ + "{frequency} times."
+ )
+
+ return self._description_pattern.format(
+ keyword=self._keyword,
+ relation=self._comparison_relation,
+ frequency=self._frequency,
+ )
+
+ def get_instruction_args(self):
+ """Returns the keyward args of `build_description`."""
+ return {
+ "keyword": self._keyword,
+ "frequency": self._frequency,
+ "relation": self._comparison_relation,
+ }
+
+ def get_instruction_args_keys(self):
+ """Returns the args keys of `build_description`."""
+ return ["keyword", "frequency", "relation"]
+
+ def check_following(self, value):
+ """Checks if the response contain the keyword with required frequency."""
+ actual_occurrences = len(re.findall(self._keyword, value, flags=re.IGNORECASE))
+
+ if self._comparison_relation == _COMPARISON_RELATION[0]:
+ return actual_occurrences < self._frequency
+ elif self._comparison_relation == _COMPARISON_RELATION[1]:
+ return actual_occurrences >= self._frequency
+
+
+class NumberOfWords(Instruction):
+ """Checks the number of words."""
+
+ def build_description(self, *, num_words=None, relation=None):
+ """Build the instruction description.
+
+ Args:
+ num_words: An integer specifying the number of words contained in the
+ response.
+ relation: A string in (`less than`, `at least`), defining the relational
+ operator for comparison.
+ Two relational comparisons are supported for now:
+ if 'less than', the actual number of words < num_words;
+ if 'at least', the actual number of words >= num_words.
+
+ Returns:
+ A string representing the instruction description.
+ """
+
+ self._num_words = num_words
+ if self._num_words is None or self._num_words < 0:
+ self._num_words = random.randint(
+ _NUM_WORDS_LOWER_LIMIT, _NUM_WORDS_UPPER_LIMIT
+ )
+
+ if relation is None:
+ self._comparison_relation = random.choice(_COMPARISON_RELATION)
+ elif relation not in _COMPARISON_RELATION:
+ raise ValueError(
+ "The supported relation for comparison must be in "
+ f"{_COMPARISON_RELATION}, but {relation} is given."
+ )
+ else:
+ self._comparison_relation = relation
+
+ self._description_pattern = "Answer with {relation} {num_words} words."
+
+ return self._description_pattern.format(
+ relation=self._comparison_relation, num_words=self._num_words
+ )
+
+ def get_instruction_args(self):
+ """Returns the keyward args of `build_description`."""
+ return {"num_words": self._num_words, "relation": self._comparison_relation}
+
+ def get_instruction_args_keys(self):
+ """Returns the args keys of `build_description`."""
+ return ["num_words", "relation"]
+
+ def check_following(self, value):
+ """Checks if the response contains the expected number of words."""
+ num_words = instructions_util.count_words(value)
+
+ if self._comparison_relation == _COMPARISON_RELATION[0]:
+ return num_words < self._num_words
+ elif self._comparison_relation == _COMPARISON_RELATION[1]:
+ return num_words >= self._num_words
+
+
+class JsonFormat(Instruction):
+ """Check the Json format."""
+
+ def build_description(self):
+ self._description_pattern = (
+ "Entire output should be wrapped in JSON format. You can use markdown"
+ " ticks such as ```."
+ )
+ return self._description_pattern
+
+ def get_instruction_args(self):
+ """Returns the keyward args of `build_description`."""
+ return None
+
+ def get_instruction_args_keys(self):
+ """Returns the args keys of `build_description`."""
+ return []
+
+ def check_following(self, value):
+ value = (
+ value.strip()
+ .removeprefix("```json")
+ .removeprefix("```Json")
+ .removeprefix("```JSON")
+ .removeprefix("```")
+ .removesuffix("```")
+ .strip()
+ )
+ try:
+ json.loads(value)
+ except ValueError:
+ return False
+ return True
+
+
+class ParagraphFirstWordCheck(Instruction):
+ """Check the paragraph and the first word of the nth paragraph."""
+
+ def build_description(
+ self, num_paragraphs=None, nth_paragraph=None, first_word=None
+ ):
+ r"""Build the instruction description.
+
+ Args:
+ num_paragraphs: An integer indicating the number of paragraphs expected
+ in the response. A paragraph is a subset of the string that is
+ expected to be separated by '\n\n'.
+ nth_paragraph: An integer indicating the paragraph number that we look at.
+ Note that n starts from 1.
+ first_word: A string that represent the first word of the bth paragraph.
+
+ Returns:
+ A string representing the instruction description.
+ """
+ self._num_paragraphs = num_paragraphs
+ if self._num_paragraphs is None or self._num_paragraphs < 0:
+ self._num_paragraphs = random.randint(1, _NUM_PARAGRAPHS)
+
+ self._nth_paragraph = nth_paragraph
+ if (
+ self._nth_paragraph is None
+ or self._nth_paragraph <= 0
+ or self._nth_paragraph > self._num_paragraphs
+ ):
+ self._nth_paragraph = random.randint(1, self._num_paragraphs + 1)
+
+ self._first_word = first_word
+ if self._first_word is None:
+ self._first_word = instructions_util.generate_keywords(num_keywords=1)[0]
+ self._first_word = self._first_word.lower()
+
+ self._description_pattern = (
+ "There should be {num_paragraphs} paragraphs. "
+ + "Paragraphs and only paragraphs are separated with each other by two "
+ + "new lines as if it was '\\n\\n' in python. "
+ + "Paragraph {nth_paragraph} must start with word {first_word}."
+ )
+
+ return self._description_pattern.format(
+ num_paragraphs=self._num_paragraphs,
+ nth_paragraph=self._nth_paragraph,
+ first_word=self._first_word,
+ )
+
+ def get_instruction_args(self):
+ """Returns the keyward args of `build_description`."""
+ return {
+ "num_paragraphs": self._num_paragraphs,
+ "nth_paragraph": self._nth_paragraph,
+ "first_word": self._first_word,
+ }
+
+ def get_instruction_args_keys(self):
+ """Returns the args keys of `build_description`."""
+ return ["num_paragraphs", "nth_paragraph", "first_word"]
+
+ def check_following(self, value):
+ """Checks for required number of paragraphs and correct first word.
+
+ Args:
+ value: a string representing the response. The response may contain
+ paragraphs that are separated by two new lines and the first word of
+ the nth paragraph will have to match a specified word.
+
+ Returns:
+ True if the number of paragraphs is the same as required and the first
+ word of the specified paragraph is the same as required. Otherwise, false.
+ """
+
+ paragraphs = re.split(r"\n\n", value)
+ num_paragraphs = len(paragraphs)
+
+ for paragraph in paragraphs:
+ if not paragraph.strip():
+ num_paragraphs -= 1
+
+ # check that index doesn't go out of bounds
+ if self._nth_paragraph <= num_paragraphs:
+ paragraph = paragraphs[self._nth_paragraph - 1].strip()
+ if not paragraph:
+ return False
+ else:
+ return False
+
+ first_word = ""
+ punctuation = {".", ",", "?", "!", "'", '"'}
+
+ # get first word and remove punctuation
+ word = paragraph.split()[0].strip()
+ # TODO(jeffrey): make more complex?
+ word = word.lstrip("'")
+ word = word.lstrip('"')
+
+ for letter in word:
+ if letter in punctuation:
+ break
+ first_word += letter.lower()
+
+ return num_paragraphs == self._num_paragraphs and first_word == self._first_word
+
+
+# TODO(jeffrey) add relation - at least/at most?
+class KeySentenceChecker(Instruction):
+ """Check the existence of certain key sentences."""
+
+ def build_description(self, key_sentences=None, num_sentences=None):
+ """Build the instruction description.
+
+ Args:
+ key_sentences: A sequences of strings representing the key sentences that
+ are expected in the response.
+ num_sentences: The number of key sentences that are expected to be seen in
+ the response.
+
+ Returns:
+ A string representing the instruction description.
+ """
+
+ if not key_sentences:
+ # TODO(jeffrey) make a generate sentences function? wonderwords package
+ self._key_sentences = set(["For now, this is fine."])
+ else:
+ self._key_sentences = key_sentences
+
+ if not num_sentences:
+ self._num_sentences = random.randint(1, len(self._key_sentences))
+ else:
+ self._num_sentences = num_sentences
+
+ self._description_pattern = (
+ "Include {num_sentences} of the following sentences {key_sentences}"
+ )
+
+ return self._description_pattern.format(
+ num_sentences=self._num_sentences, key_sentences=self._key_sentences
+ )
+
+ def get_instruction_args(self):
+ """Returns the keyward args of `build_description`."""
+ return {
+ "num_sentences": self._num_sentences,
+ "key_sentences": list(self._key_sentences),
+ }
+
+ def get_instruction_args_keys(self):
+ """Returns the args keys of `build_description`."""
+ return ["num_sentences", "key_sentences"]
+
+ def check_following(self, value):
+ """Checks if the response contains the expected key sentences."""
+ count = 0
+ sentences = instructions_util.split_into_sentences(value)
+ for sentence in self._key_sentences:
+ if sentence in sentences:
+ count += 1
+
+ return count == self._num_sentences
+
+
+class ForbiddenWords(Instruction):
+ """Checks that specified words are not used in response."""
+
+ def build_description(self, forbidden_words=None):
+ """Build the instruction description.
+
+ Args:
+ forbidden_words: A sequences of strings respresenting words that are not
+ allowed in the response.
+
+ Returns:
+ A string representing the instruction description.
+ """
+
+ if not forbidden_words:
+ self._forbidden_words = instructions_util.generate_keywords(
+ num_keywords=_NUM_KEYWORDS
+ )
+ else:
+ self._forbidden_words = list(set(forbidden_words))
+ self._forbidden_words = sorted(self._forbidden_words)
+ self._description_pattern = (
+ "Do not include keywords {forbidden_words} in the response."
+ )
+
+ return self._description_pattern.format(forbidden_words=self._forbidden_words)
+
+ def get_instruction_args(self):
+ """Returns the keyward args of `build_description`."""
+ return {"forbidden_words": self._forbidden_words}
+
+ def get_instruction_args_keys(self):
+ """Returns the args keys of `build_description`."""
+ return ["forbidden_words"]
+
+ def check_following(self, value):
+ """Check if the response does not contain the expected keywords."""
+ for word in self._forbidden_words:
+ if re.search(r"\b" + word + r"\b", value, flags=re.IGNORECASE):
+ return False
+ return True
+
+
+class RephraseParagraph(Instruction):
+ """Checks that the paragraph is rephrased."""
+
+ def build_description(self, *, original_paragraph, low, high):
+ """Builds the instruction description.
+
+ Args:
+ original_paragraph: A string presenting the original paragraph. The
+ rephrases response should have betweeb low-high words in common.
+ low: An integer presenting the lower bound of similar words.
+ high: An integer representing the upper bound of similar words.
+
+ Returns:
+ A string representing the instruction description.
+ """
+ # TODO(jeffrey) make more encompassing
+ self._original_paragraph = original_paragraph
+ self._low = low
+ self._high = high
+
+ self._description = (
+ "Rephrase the following paragraph: "
+ + "{original_paragraph}\nYour response should have "
+ + "between {low} and {high} of the same words. "
+ + "Words are the same if and only if all of the "
+ + "letters, ignoring cases, are the same. For "
+ + "example, 'run' is the same as 'Run' but different "
+ + "to 'ran'."
+ )
+
+ return self._description.format(
+ original_paragraph=original_paragraph, low=self._low, high=self._high
+ )
+
+ def get_instruction_args(self):
+ """Returns the keyward args of `build_description`."""
+ return {
+ "original_paragraph": self._original_paragraph,
+ "low": self._low,
+ "high": self._high,
+ }
+
+ def get_instruction_args_keys(self):
+ """Returns the args keys of `build_description`."""
+ return ["original_paragraph", "low", "high"]
+
+ def check_following(self, value):
+ val_words = re.findall(r"\w+", value.lower())
+ original_words = re.findall(r"\w+", self._original_paragraph.lower())
+ similar_words = 0
+
+ dict_val = collections.Counter(val_words)
+ dict_original = collections.Counter(original_words)
+
+ for word in dict_original:
+ similar_words += min(dict_original[word], dict_val[word])
+
+ return similar_words >= self._low and similar_words <= self._high
+
+
+class TwoResponsesChecker(Instruction):
+ """Check that two responses were given."""
+
+ def build_description(self):
+ """Build the instruction description."""
+ self._description_pattern = (
+ "Give two different responses. Responses and only responses should"
+ " be separated by 6 asterisk symbols: ******."
+ )
+ return self._description_pattern
+
+ def get_instruction_args(self):
+ """Returns the keyward args of `build_description`."""
+ return None
+
+ def get_instruction_args_keys(self):
+ """Returns the args keys of `build_description`."""
+ return []
+
+ def check_following(self, value):
+ """Checks if the response has two different answers.
+
+ Args:
+ value: A string representing the response.
+
+ Returns:
+ True if two responses are detected and false otherwise.
+ """
+ valid_responses = list()
+ responses = value.split("******")
+ for index, response in enumerate(responses):
+ if not response.strip():
+ if index != 0 and index != len(responses) - 1:
+ return False
+ else:
+ valid_responses.append(response)
+ return (
+ len(valid_responses) == 2
+ and valid_responses[0].strip() != valid_responses[1].strip()
+ )
+
+
+class RepeatPromptThenAnswer(Instruction):
+ """Checks that Prompt is first repeated then answered."""
+
+ def build_description(self, *, prompt_to_repeat=None):
+ """Build the instruction description.
+
+ Args:
+ prompt_to_repeat: The prompt that is meant to be repeated.
+
+ Returns:
+ A string representing the instruction description.
+ """
+ if not prompt_to_repeat:
+ raise ValueError("prompt_to_repeat must be set.")
+ else:
+ self._prompt_to_repeat = prompt_to_repeat
+ self._description_pattern = (
+ "First repeat the request word for word without change,"
+ " then give your answer (1. do not say any words or characters"
+ " before repeating the request; 2. the request you need to repeat"
+ " does not include this sentence)"
+ )
+ return self._description_pattern
+
+ def get_instruction_args(self):
+ return {"prompt_to_repeat": self._prompt_to_repeat}
+
+ def get_instruction_args_keys(self):
+ """Returns the args keys of `build_description`."""
+ return ["prompt_to_repeat"]
+
+ def check_following(self, value):
+ if value.strip().lower().startswith(self._prompt_to_repeat.strip().lower()):
+ return True
+ return False
+
+
+class EndChecker(Instruction):
+ """Checks that the prompt ends with a given phrase."""
+
+ def build_description(self, *, end_phrase=None):
+ """Build the instruction description.
+
+ Args:
+ end_phrase: A string representing the phrase the response should end with.
+
+ Returns:
+ A string representing the instruction description.
+ """
+ self._end_phrase = (
+ end_phrase.strip() if isinstance(end_phrase, str) else end_phrase
+ )
+ if self._end_phrase is None:
+ self._end_phrase = random.choice(_ENDING_OPTIONS)
+ self._description_pattern = (
+ "Finish your response with this exact phrase {ender}. "
+ "No other words should follow this phrase."
+ )
+ return self._description_pattern.format(ender=self._end_phrase)
+
+ def get_instruction_args(self):
+ return {"end_phrase": self._end_phrase}
+
+ def get_instruction_args_keys(self):
+ """Returns the args keys of `build_description`."""
+ return ["end_phrase"]
+
+ def check_following(self, value):
+ """Checks if the response ends with the expected phrase."""
+ value = value.strip().strip('"').lower()
+ self._end_phrase = self._end_phrase.strip().lower()
+ return value.endswith(self._end_phrase)
+
+
+class TitleChecker(Instruction):
+ """Checks the response for a title."""
+
+ def build_description(self):
+ """Build the instruction description."""
+ self._description_pattern = (
+ "Your answer must contain a title, wrapped in double angular brackets,"
+ " such as <>."
+ )
+ return self._description_pattern
+
+ def get_instruction_args(self):
+ return None
+
+ def get_instruction_args_keys(self):
+ """Returns the args keys of `build_description`."""
+ return []
+
+ def check_following(self, value):
+ """Checks if the response contains a title."""
+ pattern = r"<<[^\n]+>>"
+ re_pattern = re.compile(pattern)
+ titles = re.findall(re_pattern, value)
+
+ for title in titles:
+ if title.lstrip("<").rstrip(">").strip():
+ return True
+ return False
+
+
+class LetterFrequencyChecker(Instruction):
+ """Checks letter frequency."""
+
+ def build_description(self, *, letter=None, let_frequency=None, let_relation=None):
+ """Build the instruction description.
+
+ Args:
+ letter: A string representing a letter that is expected in the response.
+ let_frequency: An integer specifying the number of times `keyword` is
+ expected to appear in the response.
+ let_relation: A string in (`less than`, `at least`), defining the
+ relational operator for comparison. Two relational comparisons are
+ supported for now; if 'less than', the actual number of
+ occurrences < frequency; if 'at least', the actual number of
+ occurrences >= frequency.
+
+ Returns:
+ A string representing the instruction description.
+ """
+ if (
+ not letter
+ or len(letter) > 1
+ or ord(letter.lower()) < 97
+ or ord(letter.lower()) > 122
+ ):
+ self._letter = random.choice(list(string.ascii_letters))
+ else:
+ self._letter = letter.strip()
+ self._letter = self._letter.lower()
+
+ self._frequency = let_frequency
+ if self._frequency is None or self._frequency < 0:
+ self._frequency = random.randint(1, _LETTER_FREQUENCY)
+
+ if let_relation is None:
+ self._comparison_relation = random.choice(_COMPARISON_RELATION)
+ elif let_relation not in _COMPARISON_RELATION:
+ raise ValueError(
+ "The supported relation for comparison must be in "
+ f"{_COMPARISON_RELATION}, but {let_relation} is given."
+ )
+ else:
+ self._comparison_relation = let_relation
+
+ self._description_pattern = (
+ "In your response, the letter {letter} should appear {let_relation}"
+ " {let_frequency} times."
+ )
+
+ return self._description_pattern.format(
+ letter=self._letter,
+ let_frequency=self._frequency,
+ let_relation=self._comparison_relation,
+ )
+
+ def get_instruction_args(self):
+ """Returns the keyword args of build description."""
+ return {
+ "letter": self._letter,
+ "let_frequency": self._frequency,
+ "let_relation": self._comparison_relation,
+ }
+
+ def get_instruction_args_keys(self):
+ """Returns the args keys of `build_description`."""
+ return ["letter", "let_frequency", "let_relation"]
+
+ def check_following(self, value):
+ """Checks that the response contains the letter at the right frequency."""
+ value = value.lower()
+ letters = collections.Counter(value)
+
+ if self._comparison_relation == _COMPARISON_RELATION[0]:
+ return letters[self._letter] < self._frequency
+ else:
+ return letters[self._letter] >= self._frequency
+
+
+class CapitalLettersEnglishChecker(Instruction):
+ """Checks that the response is in english and is in all capital letters."""
+
+ def build_description(self):
+ """Build the instruction description."""
+ self._description_pattern = (
+ "Your entire response should be in English, and in all capital letters."
+ )
+ return self._description_pattern
+
+ def get_instruction_args(self):
+ return None
+
+ def get_instruction_args_keys(self):
+ """Returns the args keys of `build_description`."""
+ return []
+
+ def check_following(self, value):
+ """Checks that the response is in English and in all capital letters."""
+ assert isinstance(value, str)
+
+ try:
+ return value.isupper() and langdetect.detect(value) == "en"
+ except langdetect.LangDetectException as e:
+ # Count as instruction is followed.
+ logging.error(
+ "Unable to detect language for text %s due to %s", value, e
+ ) # refex: disable=pytotw.037
+ return True
+
+
+class LowercaseLettersEnglishChecker(Instruction):
+ """Checks that the response is in english and is in all lowercase letters."""
+
+ def build_description(self):
+ """Build the instruction description."""
+ self._description_pattern = (
+ "Your entire response should be in English, and in all lowercase"
+ " letters. No capital letters are allowed."
+ )
+ return self._description_pattern
+
+ def get_instruction_args(self):
+ return None
+
+ def get_instruction_args_keys(self):
+ """Returns the args keys of `build_description`."""
+ return []
+
+ def check_following(self, value):
+ """Checks that the response is in English and in all lowercase letters."""
+ assert isinstance(value, str)
+
+ try:
+ return value.islower() and langdetect.detect(value) == "en"
+ except langdetect.LangDetectException as e:
+ # Count as instruction is followed.
+ logging.error(
+ "Unable to detect language for text %s due to %s", value, e
+ ) # refex: disable=pytotw.037
+ return True
+
+
+class CommaChecker(Instruction):
+ """Checks the response for no commas."""
+
+ def build_description(self):
+ """Build the instruction description."""
+ self._description_pattern = (
+ "In your entire response, refrain from the use of any commas."
+ )
+ return self._description_pattern
+
+ def get_instruction_args(self):
+ return None
+
+ def get_instruction_args_keys(self):
+ """Returns the args keys of `build_description`."""
+ return []
+
+ def check_following(self, value):
+ """Checks that the response does not contain commas."""
+ return not re.search(r"\,", value)
+
+
+class CapitalWordFrequencyChecker(Instruction):
+ """Checks frequency of words with all capital letters."""
+
+ def build_description(
+ self,
+ capital_frequency=None,
+ capital_relation=None,
+ ):
+ """Build the instruction description.
+
+ Args:
+ capital_frequency: An integer that represents the number of words that
+ should be in all capital letters.
+ capital_relation: A string that is 'at least' or 'at most' that refers to
+ the frequency.
+
+ Returns:
+ A string representing the instruction description.
+ """
+ self._frequency = capital_frequency
+ if self._frequency is None:
+ self._frequency = random.randint(1, _ALL_CAPITAL_WORD_FREQUENCY)
+
+ self._comparison_relation = capital_relation
+ if capital_relation is None:
+ self._comparison_relation = random.choice(_COMPARISON_RELATION)
+ elif capital_relation not in _COMPARISON_RELATION:
+ raise ValueError(
+ "The supported relation for comparison must be in "
+ f"{_COMPARISON_RELATION}, but {capital_relation} is given."
+ )
+
+ self._description_pattern = (
+ "In your response, words with all capital letters should appear"
+ " {relation} {frequency} times."
+ )
+
+ return self._description_pattern.format(
+ frequency=self._frequency, relation=self._comparison_relation
+ )
+
+ def get_instruction_args(self):
+ """Returns the keyword args of build description."""
+ return {
+ "capital_frequency": self._frequency,
+ "capital_relation": self._comparison_relation,
+ }
+
+ def get_instruction_args_keys(self):
+ """Returns the args keys of `build_description`."""
+ return ["capital_frequency", "capital_relation"]
+
+ def check_following(self, value):
+ """Checks the frequency of words with all capital letters."""
+ # Hyphenated words will count as one word
+ words = instructions_util.nltk.word_tokenize(value)
+ capital_words = [word for word in words if word.isupper()]
+
+ capital_words = len(capital_words)
+
+ if self._comparison_relation == _COMPARISON_RELATION[0]:
+ return capital_words < self._frequency
+ else:
+ return capital_words >= self._frequency
+
+
+class QuotationChecker(Instruction):
+ """Checks response is wrapped with double quotation marks."""
+
+ def build_description(self):
+ """Build the instruction description."""
+ self._description_pattern = (
+ "Wrap your entire response with double quotation marks."
+ )
+ return self._description_pattern
+
+ def get_instruction_args(self):
+ """Returns the keyword args of build description."""
+ return None
+
+ def get_instruction_args_keys(self):
+ """Returns the args keys of `build_description`."""
+ return []
+
+ def check_following(self, value):
+ """Checks if the response is wrapped with double quotation marks."""
+ value = value.strip()
+ return len(value) > 1 and value[0] == '"' and value[-1] == '"'
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/ifeval/instructions_registry.py b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/ifeval/instructions_registry.py
new file mode 100644
index 0000000000000000000000000000000000000000..00d9a1de1985beacead34215952ecf4642d1ea35
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/ifeval/instructions_registry.py
@@ -0,0 +1,168 @@
+# Copyright 2023 The Google Research Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Registry of all instructions."""
+
+from lm_eval.tasks.ifeval import instructions
+
+
+_KEYWORD = "keywords:"
+
+_LANGUAGE = "language:"
+
+_LENGTH = "length_constraints:"
+
+_CONTENT = "detectable_content:"
+
+_FORMAT = "detectable_format:"
+
+_MULTITURN = "multi-turn:"
+
+_COMBINATION = "combination:"
+
+_STARTEND = "startend:"
+
+_CHANGE_CASES = "change_case:"
+
+_PUNCTUATION = "punctuation:"
+
+INSTRUCTION_DICT = {
+ _KEYWORD + "existence": instructions.KeywordChecker,
+ _KEYWORD + "frequency": instructions.KeywordFrequencyChecker,
+ # TODO(jeffreyzhou): make a proper set of sentences to choose from
+ # _KEYWORD + "key_sentences": instructions.KeySentenceChecker,
+ _KEYWORD + "forbidden_words": instructions.ForbiddenWords,
+ _KEYWORD + "letter_frequency": instructions.LetterFrequencyChecker,
+ _LANGUAGE + "response_language": instructions.ResponseLanguageChecker,
+ _LENGTH + "number_sentences": instructions.NumberOfSentences,
+ _LENGTH + "number_paragraphs": instructions.ParagraphChecker,
+ _LENGTH + "number_words": instructions.NumberOfWords,
+ _LENGTH + "nth_paragraph_first_word": instructions.ParagraphFirstWordCheck,
+ _CONTENT + "number_placeholders": instructions.PlaceholderChecker,
+ _CONTENT + "postscript": instructions.PostscriptChecker,
+ _FORMAT + "number_bullet_lists": instructions.BulletListChecker,
+ # TODO(jeffreyzhou): Pre-create paragraph or use prompt to replace
+ # _CONTENT + "rephrase_paragraph": instructions.RephraseParagraph,
+ _FORMAT + "constrained_response": instructions.ConstrainedResponseChecker,
+ _FORMAT + "number_highlighted_sections": (instructions.HighlightSectionChecker),
+ _FORMAT + "multiple_sections": instructions.SectionChecker,
+ # TODO(tianjianlu): Re-enable rephrasing with preprocessing the message.
+ # _FORMAT + "rephrase": instructions.RephraseChecker,
+ _FORMAT + "json_format": instructions.JsonFormat,
+ _FORMAT + "title": instructions.TitleChecker,
+ # TODO(tianjianlu): Re-enable with specific prompts.
+ # _MULTITURN + "constrained_start": instructions.ConstrainedStartChecker,
+ _COMBINATION + "two_responses": instructions.TwoResponsesChecker,
+ _COMBINATION + "repeat_prompt": instructions.RepeatPromptThenAnswer,
+ _STARTEND + "end_checker": instructions.EndChecker,
+ _CHANGE_CASES + "capital_word_frequency": instructions.CapitalWordFrequencyChecker,
+ _CHANGE_CASES + "english_capital": instructions.CapitalLettersEnglishChecker,
+ _CHANGE_CASES + "english_lowercase": instructions.LowercaseLettersEnglishChecker,
+ _PUNCTUATION + "no_comma": instructions.CommaChecker,
+ _STARTEND + "quotation": instructions.QuotationChecker,
+}
+
+INSTRUCTION_CONFLICTS = {
+ _KEYWORD + "existence": {_KEYWORD + "existence"},
+ _KEYWORD + "frequency": {_KEYWORD + "frequency"},
+ # TODO(jeffreyzhou): make a proper set of sentences to choose from
+ # _KEYWORD + "key_sentences": instructions.KeySentenceChecker,
+ _KEYWORD + "forbidden_words": {_KEYWORD + "forbidden_words"},
+ _KEYWORD + "letter_frequency": {_KEYWORD + "letter_frequency"},
+ _LANGUAGE + "response_language": {
+ _LANGUAGE + "response_language",
+ _FORMAT + "multiple_sections",
+ _KEYWORD + "existence",
+ _KEYWORD + "frequency",
+ _KEYWORD + "forbidden_words",
+ _STARTEND + "end_checker",
+ _CHANGE_CASES + "english_capital",
+ _CHANGE_CASES + "english_lowercase",
+ },
+ _LENGTH + "number_sentences": {_LENGTH + "number_sentences"},
+ _LENGTH + "number_paragraphs": {
+ _LENGTH + "number_paragraphs",
+ _LENGTH + "nth_paragraph_first_word",
+ _LENGTH + "number_sentences",
+ _LENGTH + "nth_paragraph_first_word",
+ },
+ _LENGTH + "number_words": {_LENGTH + "number_words"},
+ _LENGTH + "nth_paragraph_first_word": {
+ _LENGTH + "nth_paragraph_first_word",
+ _LENGTH + "number_paragraphs",
+ },
+ _CONTENT + "number_placeholders": {_CONTENT + "number_placeholders"},
+ _CONTENT + "postscript": {_CONTENT + "postscript"},
+ _FORMAT + "number_bullet_lists": {_FORMAT + "number_bullet_lists"},
+ # TODO(jeffreyzhou): Pre-create paragraph or use prompt to replace
+ # _CONTENT + "rephrase_paragraph": instructions.RephraseParagraph,
+ _FORMAT + "constrained_response": set(INSTRUCTION_DICT.keys()),
+ _FORMAT + "number_highlighted_sections": {_FORMAT + "number_highlighted_sections"},
+ _FORMAT + "multiple_sections": {
+ _FORMAT + "multiple_sections",
+ _LANGUAGE + "response_language",
+ _FORMAT + "number_highlighted_sections",
+ },
+ # TODO(tianjianlu): Re-enable rephrasing with preprocessing the message.
+ # _FORMAT + "rephrase": instructions.RephraseChecker,
+ _FORMAT + "json_format": set(INSTRUCTION_DICT.keys()).difference(
+ {_KEYWORD + "forbidden_words", _KEYWORD + "existence"}
+ ),
+ _FORMAT + "title": {_FORMAT + "title"},
+ # TODO(tianjianlu): Re-enable with specific prompts.
+ # _MULTITURN + "constrained_start": instructions.ConstrainedStartChecker,
+ _COMBINATION + "two_responses": set(INSTRUCTION_DICT.keys()).difference(
+ {
+ _KEYWORD + "forbidden_words",
+ _KEYWORD + "existence",
+ _LANGUAGE + "response_language",
+ _FORMAT + "title",
+ _PUNCTUATION + "no_comma",
+ }
+ ),
+ _COMBINATION + "repeat_prompt": set(INSTRUCTION_DICT.keys()).difference(
+ {_KEYWORD + "existence", _FORMAT + "title", _PUNCTUATION + "no_comma"}
+ ),
+ _STARTEND + "end_checker": {_STARTEND + "end_checker"},
+ _CHANGE_CASES + "capital_word_frequency": {
+ _CHANGE_CASES + "capital_word_frequency",
+ _CHANGE_CASES + "english_lowercase",
+ _CHANGE_CASES + "english_capital",
+ },
+ _CHANGE_CASES + "english_capital": {_CHANGE_CASES + "english_capital"},
+ _CHANGE_CASES + "english_lowercase": {
+ _CHANGE_CASES + "english_lowercase",
+ _CHANGE_CASES + "english_capital",
+ },
+ _PUNCTUATION + "no_comma": {_PUNCTUATION + "no_comma"},
+ _STARTEND + "quotation": {_STARTEND + "quotation", _FORMAT + "title"},
+}
+
+
+def conflict_make(conflicts):
+ """Makes sure if A conflicts with B, B will conflict with A.
+
+ Args:
+ conflicts: Dictionary of potential conflicts where key is instruction id
+ and value is set of instruction ids that it conflicts with.
+
+ Returns:
+ Revised version of the dictionary. All instructions conflict with
+ themselves. If A conflicts with B, B will conflict with A.
+ """
+ for key in conflicts:
+ for k in conflicts[key]:
+ conflicts[k].add(key)
+ conflicts[key].add(key)
+ return conflicts
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/ifeval/instructions_util.py b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/ifeval/instructions_util.py
new file mode 100644
index 0000000000000000000000000000000000000000..ccb531f96e6e6c4313d8bf91786a4ce87d6c5926
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/ifeval/instructions_util.py
@@ -0,0 +1,1682 @@
+# Copyright 2023 The Google Research Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utility library of instructions."""
+
+import functools
+import random
+import re
+
+import immutabledict
+import nltk
+
+
+def download_nltk_resources():
+ """Download 'punkt' if not already installed"""
+ try:
+ nltk.data.find("tokenizers/punkt")
+ except LookupError:
+ nltk.download("punkt")
+
+
+download_nltk_resources()
+
+WORD_LIST = [
+ "western",
+ "sentence",
+ "signal",
+ "dump",
+ "spot",
+ "opposite",
+ "bottom",
+ "potato",
+ "administration",
+ "working",
+ "welcome",
+ "morning",
+ "good",
+ "agency",
+ "primary",
+ "wish",
+ "responsibility",
+ "press",
+ "problem",
+ "president",
+ "steal",
+ "brush",
+ "read",
+ "type",
+ "beat",
+ "trainer",
+ "growth",
+ "lock",
+ "bone",
+ "case",
+ "equal",
+ "comfortable",
+ "region",
+ "replacement",
+ "performance",
+ "mate",
+ "walk",
+ "medicine",
+ "film",
+ "thing",
+ "rock",
+ "tap",
+ "total",
+ "competition",
+ "ease",
+ "south",
+ "establishment",
+ "gather",
+ "parking",
+ "world",
+ "plenty",
+ "breath",
+ "claim",
+ "alcohol",
+ "trade",
+ "dear",
+ "highlight",
+ "street",
+ "matter",
+ "decision",
+ "mess",
+ "agreement",
+ "studio",
+ "coach",
+ "assist",
+ "brain",
+ "wing",
+ "style",
+ "private",
+ "top",
+ "brown",
+ "leg",
+ "buy",
+ "procedure",
+ "method",
+ "speed",
+ "high",
+ "company",
+ "valuable",
+ "pie",
+ "analyst",
+ "session",
+ "pattern",
+ "district",
+ "pleasure",
+ "dinner",
+ "swimming",
+ "joke",
+ "order",
+ "plate",
+ "department",
+ "motor",
+ "cell",
+ "spend",
+ "cabinet",
+ "difference",
+ "power",
+ "examination",
+ "engine",
+ "horse",
+ "dimension",
+ "pay",
+ "toe",
+ "curve",
+ "literature",
+ "bother",
+ "fire",
+ "possibility",
+ "debate",
+ "activity",
+ "passage",
+ "hello",
+ "cycle",
+ "background",
+ "quiet",
+ "author",
+ "effect",
+ "actor",
+ "page",
+ "bicycle",
+ "error",
+ "throat",
+ "attack",
+ "character",
+ "phone",
+ "tea",
+ "increase",
+ "outcome",
+ "file",
+ "specific",
+ "inspector",
+ "internal",
+ "potential",
+ "staff",
+ "building",
+ "employer",
+ "shoe",
+ "hand",
+ "direction",
+ "garden",
+ "purchase",
+ "interview",
+ "study",
+ "recognition",
+ "member",
+ "spiritual",
+ "oven",
+ "sandwich",
+ "weird",
+ "passenger",
+ "particular",
+ "response",
+ "reaction",
+ "size",
+ "variation",
+ "a",
+ "cancel",
+ "candy",
+ "exit",
+ "guest",
+ "condition",
+ "fly",
+ "price",
+ "weakness",
+ "convert",
+ "hotel",
+ "great",
+ "mouth",
+ "mind",
+ "song",
+ "sugar",
+ "suspect",
+ "telephone",
+ "ear",
+ "roof",
+ "paint",
+ "refrigerator",
+ "organization",
+ "jury",
+ "reward",
+ "engineering",
+ "day",
+ "possession",
+ "crew",
+ "bar",
+ "road",
+ "description",
+ "celebration",
+ "score",
+ "mark",
+ "letter",
+ "shower",
+ "suggestion",
+ "sir",
+ "luck",
+ "national",
+ "progress",
+ "hall",
+ "stroke",
+ "theory",
+ "offer",
+ "story",
+ "tax",
+ "definition",
+ "history",
+ "ride",
+ "medium",
+ "opening",
+ "glass",
+ "elevator",
+ "stomach",
+ "question",
+ "ability",
+ "leading",
+ "village",
+ "computer",
+ "city",
+ "grand",
+ "confidence",
+ "candle",
+ "priest",
+ "recommendation",
+ "point",
+ "necessary",
+ "body",
+ "desk",
+ "secret",
+ "horror",
+ "noise",
+ "culture",
+ "warning",
+ "water",
+ "round",
+ "diet",
+ "flower",
+ "bus",
+ "tough",
+ "permission",
+ "week",
+ "prompt",
+ "connection",
+ "abuse",
+ "height",
+ "save",
+ "corner",
+ "border",
+ "stress",
+ "drive",
+ "stop",
+ "rip",
+ "meal",
+ "listen",
+ "confusion",
+ "girlfriend",
+ "living",
+ "relation",
+ "significance",
+ "plan",
+ "creative",
+ "atmosphere",
+ "blame",
+ "invite",
+ "housing",
+ "paper",
+ "drink",
+ "roll",
+ "silver",
+ "drunk",
+ "age",
+ "damage",
+ "smoke",
+ "environment",
+ "pack",
+ "savings",
+ "influence",
+ "tourist",
+ "rain",
+ "post",
+ "sign",
+ "grandmother",
+ "run",
+ "profit",
+ "push",
+ "clerk",
+ "final",
+ "wine",
+ "swim",
+ "pause",
+ "stuff",
+ "singer",
+ "funeral",
+ "average",
+ "source",
+ "scene",
+ "tradition",
+ "personal",
+ "snow",
+ "nobody",
+ "distance",
+ "sort",
+ "sensitive",
+ "animal",
+ "major",
+ "negotiation",
+ "click",
+ "mood",
+ "period",
+ "arrival",
+ "expression",
+ "holiday",
+ "repeat",
+ "dust",
+ "closet",
+ "gold",
+ "bad",
+ "sail",
+ "combination",
+ "clothes",
+ "emphasis",
+ "duty",
+ "black",
+ "step",
+ "school",
+ "jump",
+ "document",
+ "professional",
+ "lip",
+ "chemical",
+ "front",
+ "wake",
+ "while",
+ "inside",
+ "watch",
+ "row",
+ "subject",
+ "penalty",
+ "balance",
+ "possible",
+ "adult",
+ "aside",
+ "sample",
+ "appeal",
+ "wedding",
+ "depth",
+ "king",
+ "award",
+ "wife",
+ "blow",
+ "site",
+ "camp",
+ "music",
+ "safe",
+ "gift",
+ "fault",
+ "guess",
+ "act",
+ "shame",
+ "drama",
+ "capital",
+ "exam",
+ "stupid",
+ "record",
+ "sound",
+ "swing",
+ "novel",
+ "minimum",
+ "ratio",
+ "machine",
+ "shape",
+ "lead",
+ "operation",
+ "salary",
+ "cloud",
+ "affair",
+ "hit",
+ "chapter",
+ "stage",
+ "quantity",
+ "access",
+ "army",
+ "chain",
+ "traffic",
+ "kick",
+ "analysis",
+ "airport",
+ "time",
+ "vacation",
+ "philosophy",
+ "ball",
+ "chest",
+ "thanks",
+ "place",
+ "mountain",
+ "advertising",
+ "red",
+ "past",
+ "rent",
+ "return",
+ "tour",
+ "house",
+ "construction",
+ "net",
+ "native",
+ "war",
+ "figure",
+ "fee",
+ "spray",
+ "user",
+ "dirt",
+ "shot",
+ "task",
+ "stick",
+ "friend",
+ "software",
+ "promotion",
+ "interaction",
+ "surround",
+ "block",
+ "purpose",
+ "practice",
+ "conflict",
+ "routine",
+ "requirement",
+ "bonus",
+ "hole",
+ "state",
+ "junior",
+ "sweet",
+ "catch",
+ "tear",
+ "fold",
+ "wall",
+ "editor",
+ "life",
+ "position",
+ "pound",
+ "respect",
+ "bathroom",
+ "coat",
+ "script",
+ "job",
+ "teach",
+ "birth",
+ "view",
+ "resolve",
+ "theme",
+ "employee",
+ "doubt",
+ "market",
+ "education",
+ "serve",
+ "recover",
+ "tone",
+ "harm",
+ "miss",
+ "union",
+ "understanding",
+ "cow",
+ "river",
+ "association",
+ "concept",
+ "training",
+ "recipe",
+ "relationship",
+ "reserve",
+ "depression",
+ "proof",
+ "hair",
+ "revenue",
+ "independent",
+ "lift",
+ "assignment",
+ "temporary",
+ "amount",
+ "loss",
+ "edge",
+ "track",
+ "check",
+ "rope",
+ "estimate",
+ "pollution",
+ "stable",
+ "message",
+ "delivery",
+ "perspective",
+ "mirror",
+ "assistant",
+ "representative",
+ "witness",
+ "nature",
+ "judge",
+ "fruit",
+ "tip",
+ "devil",
+ "town",
+ "emergency",
+ "upper",
+ "drop",
+ "stay",
+ "human",
+ "neck",
+ "speaker",
+ "network",
+ "sing",
+ "resist",
+ "league",
+ "trip",
+ "signature",
+ "lawyer",
+ "importance",
+ "gas",
+ "choice",
+ "engineer",
+ "success",
+ "part",
+ "external",
+ "worker",
+ "simple",
+ "quarter",
+ "student",
+ "heart",
+ "pass",
+ "spite",
+ "shift",
+ "rough",
+ "lady",
+ "grass",
+ "community",
+ "garage",
+ "youth",
+ "standard",
+ "skirt",
+ "promise",
+ "blind",
+ "television",
+ "disease",
+ "commission",
+ "positive",
+ "energy",
+ "calm",
+ "presence",
+ "tune",
+ "basis",
+ "preference",
+ "head",
+ "common",
+ "cut",
+ "somewhere",
+ "presentation",
+ "current",
+ "thought",
+ "revolution",
+ "effort",
+ "master",
+ "implement",
+ "republic",
+ "floor",
+ "principle",
+ "stranger",
+ "shoulder",
+ "grade",
+ "button",
+ "tennis",
+ "police",
+ "collection",
+ "account",
+ "register",
+ "glove",
+ "divide",
+ "professor",
+ "chair",
+ "priority",
+ "combine",
+ "peace",
+ "extension",
+ "maybe",
+ "evening",
+ "frame",
+ "sister",
+ "wave",
+ "code",
+ "application",
+ "mouse",
+ "match",
+ "counter",
+ "bottle",
+ "half",
+ "cheek",
+ "resolution",
+ "back",
+ "knowledge",
+ "make",
+ "discussion",
+ "screw",
+ "length",
+ "accident",
+ "battle",
+ "dress",
+ "knee",
+ "log",
+ "package",
+ "it",
+ "turn",
+ "hearing",
+ "newspaper",
+ "layer",
+ "wealth",
+ "profile",
+ "imagination",
+ "answer",
+ "weekend",
+ "teacher",
+ "appearance",
+ "meet",
+ "bike",
+ "rise",
+ "belt",
+ "crash",
+ "bowl",
+ "equivalent",
+ "support",
+ "image",
+ "poem",
+ "risk",
+ "excitement",
+ "remote",
+ "secretary",
+ "public",
+ "produce",
+ "plane",
+ "display",
+ "money",
+ "sand",
+ "situation",
+ "punch",
+ "customer",
+ "title",
+ "shake",
+ "mortgage",
+ "option",
+ "number",
+ "pop",
+ "window",
+ "extent",
+ "nothing",
+ "experience",
+ "opinion",
+ "departure",
+ "dance",
+ "indication",
+ "boy",
+ "material",
+ "band",
+ "leader",
+ "sun",
+ "beautiful",
+ "muscle",
+ "farmer",
+ "variety",
+ "fat",
+ "handle",
+ "director",
+ "opportunity",
+ "calendar",
+ "outside",
+ "pace",
+ "bath",
+ "fish",
+ "consequence",
+ "put",
+ "owner",
+ "go",
+ "doctor",
+ "information",
+ "share",
+ "hurt",
+ "protection",
+ "career",
+ "finance",
+ "force",
+ "golf",
+ "garbage",
+ "aspect",
+ "kid",
+ "food",
+ "boot",
+ "milk",
+ "respond",
+ "objective",
+ "reality",
+ "raw",
+ "ring",
+ "mall",
+ "one",
+ "impact",
+ "area",
+ "news",
+ "international",
+ "series",
+ "impress",
+ "mother",
+ "shelter",
+ "strike",
+ "loan",
+ "month",
+ "seat",
+ "anything",
+ "entertainment",
+ "familiar",
+ "clue",
+ "year",
+ "glad",
+ "supermarket",
+ "natural",
+ "god",
+ "cost",
+ "conversation",
+ "tie",
+ "ruin",
+ "comfort",
+ "earth",
+ "storm",
+ "percentage",
+ "assistance",
+ "budget",
+ "strength",
+ "beginning",
+ "sleep",
+ "other",
+ "young",
+ "unit",
+ "fill",
+ "store",
+ "desire",
+ "hide",
+ "value",
+ "cup",
+ "maintenance",
+ "nurse",
+ "function",
+ "tower",
+ "role",
+ "class",
+ "camera",
+ "database",
+ "panic",
+ "nation",
+ "basket",
+ "ice",
+ "art",
+ "spirit",
+ "chart",
+ "exchange",
+ "feedback",
+ "statement",
+ "reputation",
+ "search",
+ "hunt",
+ "exercise",
+ "nasty",
+ "notice",
+ "male",
+ "yard",
+ "annual",
+ "collar",
+ "date",
+ "platform",
+ "plant",
+ "fortune",
+ "passion",
+ "friendship",
+ "spread",
+ "cancer",
+ "ticket",
+ "attitude",
+ "island",
+ "active",
+ "object",
+ "service",
+ "buyer",
+ "bite",
+ "card",
+ "face",
+ "steak",
+ "proposal",
+ "patient",
+ "heat",
+ "rule",
+ "resident",
+ "broad",
+ "politics",
+ "west",
+ "knife",
+ "expert",
+ "girl",
+ "design",
+ "salt",
+ "baseball",
+ "grab",
+ "inspection",
+ "cousin",
+ "couple",
+ "magazine",
+ "cook",
+ "dependent",
+ "security",
+ "chicken",
+ "version",
+ "currency",
+ "ladder",
+ "scheme",
+ "kitchen",
+ "employment",
+ "local",
+ "attention",
+ "manager",
+ "fact",
+ "cover",
+ "sad",
+ "guard",
+ "relative",
+ "county",
+ "rate",
+ "lunch",
+ "program",
+ "initiative",
+ "gear",
+ "bridge",
+ "breast",
+ "talk",
+ "dish",
+ "guarantee",
+ "beer",
+ "vehicle",
+ "reception",
+ "woman",
+ "substance",
+ "copy",
+ "lecture",
+ "advantage",
+ "park",
+ "cold",
+ "death",
+ "mix",
+ "hold",
+ "scale",
+ "tomorrow",
+ "blood",
+ "request",
+ "green",
+ "cookie",
+ "church",
+ "strip",
+ "forever",
+ "beyond",
+ "debt",
+ "tackle",
+ "wash",
+ "following",
+ "feel",
+ "maximum",
+ "sector",
+ "sea",
+ "property",
+ "economics",
+ "menu",
+ "bench",
+ "try",
+ "language",
+ "start",
+ "call",
+ "solid",
+ "address",
+ "income",
+ "foot",
+ "senior",
+ "honey",
+ "few",
+ "mixture",
+ "cash",
+ "grocery",
+ "link",
+ "map",
+ "form",
+ "factor",
+ "pot",
+ "model",
+ "writer",
+ "farm",
+ "winter",
+ "skill",
+ "anywhere",
+ "birthday",
+ "policy",
+ "release",
+ "husband",
+ "lab",
+ "hurry",
+ "mail",
+ "equipment",
+ "sink",
+ "pair",
+ "driver",
+ "consideration",
+ "leather",
+ "skin",
+ "blue",
+ "boat",
+ "sale",
+ "brick",
+ "two",
+ "feed",
+ "square",
+ "dot",
+ "rush",
+ "dream",
+ "location",
+ "afternoon",
+ "manufacturer",
+ "control",
+ "occasion",
+ "trouble",
+ "introduction",
+ "advice",
+ "bet",
+ "eat",
+ "kill",
+ "category",
+ "manner",
+ "office",
+ "estate",
+ "pride",
+ "awareness",
+ "slip",
+ "crack",
+ "client",
+ "nail",
+ "shoot",
+ "membership",
+ "soft",
+ "anybody",
+ "web",
+ "official",
+ "individual",
+ "pizza",
+ "interest",
+ "bag",
+ "spell",
+ "profession",
+ "queen",
+ "deal",
+ "resource",
+ "ship",
+ "guy",
+ "chocolate",
+ "joint",
+ "formal",
+ "upstairs",
+ "car",
+ "resort",
+ "abroad",
+ "dealer",
+ "associate",
+ "finger",
+ "surgery",
+ "comment",
+ "team",
+ "detail",
+ "crazy",
+ "path",
+ "tale",
+ "initial",
+ "arm",
+ "radio",
+ "demand",
+ "single",
+ "draw",
+ "yellow",
+ "contest",
+ "piece",
+ "quote",
+ "pull",
+ "commercial",
+ "shirt",
+ "contribution",
+ "cream",
+ "channel",
+ "suit",
+ "discipline",
+ "instruction",
+ "concert",
+ "speech",
+ "low",
+ "effective",
+ "hang",
+ "scratch",
+ "industry",
+ "breakfast",
+ "lay",
+ "join",
+ "metal",
+ "bedroom",
+ "minute",
+ "product",
+ "rest",
+ "temperature",
+ "many",
+ "give",
+ "argument",
+ "print",
+ "purple",
+ "laugh",
+ "health",
+ "credit",
+ "investment",
+ "sell",
+ "setting",
+ "lesson",
+ "egg",
+ "middle",
+ "marriage",
+ "level",
+ "evidence",
+ "phrase",
+ "love",
+ "self",
+ "benefit",
+ "guidance",
+ "affect",
+ "you",
+ "dad",
+ "anxiety",
+ "special",
+ "boyfriend",
+ "test",
+ "blank",
+ "payment",
+ "soup",
+ "obligation",
+ "reply",
+ "smile",
+ "deep",
+ "complaint",
+ "addition",
+ "review",
+ "box",
+ "towel",
+ "minor",
+ "fun",
+ "soil",
+ "issue",
+ "cigarette",
+ "internet",
+ "gain",
+ "tell",
+ "entry",
+ "spare",
+ "incident",
+ "family",
+ "refuse",
+ "branch",
+ "can",
+ "pen",
+ "grandfather",
+ "constant",
+ "tank",
+ "uncle",
+ "climate",
+ "ground",
+ "volume",
+ "communication",
+ "kind",
+ "poet",
+ "child",
+ "screen",
+ "mine",
+ "quit",
+ "gene",
+ "lack",
+ "charity",
+ "memory",
+ "tooth",
+ "fear",
+ "mention",
+ "marketing",
+ "reveal",
+ "reason",
+ "court",
+ "season",
+ "freedom",
+ "land",
+ "sport",
+ "audience",
+ "classroom",
+ "law",
+ "hook",
+ "win",
+ "carry",
+ "eye",
+ "smell",
+ "distribution",
+ "research",
+ "country",
+ "dare",
+ "hope",
+ "whereas",
+ "stretch",
+ "library",
+ "if",
+ "delay",
+ "college",
+ "plastic",
+ "book",
+ "present",
+ "use",
+ "worry",
+ "champion",
+ "goal",
+ "economy",
+ "march",
+ "election",
+ "reflection",
+ "midnight",
+ "slide",
+ "inflation",
+ "action",
+ "challenge",
+ "guitar",
+ "coast",
+ "apple",
+ "campaign",
+ "field",
+ "jacket",
+ "sense",
+ "way",
+ "visual",
+ "remove",
+ "weather",
+ "trash",
+ "cable",
+ "regret",
+ "buddy",
+ "beach",
+ "historian",
+ "courage",
+ "sympathy",
+ "truck",
+ "tension",
+ "permit",
+ "nose",
+ "bed",
+ "son",
+ "person",
+ "base",
+ "meat",
+ "usual",
+ "air",
+ "meeting",
+ "worth",
+ "game",
+ "independence",
+ "physical",
+ "brief",
+ "play",
+ "raise",
+ "board",
+ "she",
+ "key",
+ "writing",
+ "pick",
+ "command",
+ "party",
+ "yesterday",
+ "spring",
+ "candidate",
+ "physics",
+ "university",
+ "concern",
+ "development",
+ "change",
+ "string",
+ "target",
+ "instance",
+ "room",
+ "bitter",
+ "bird",
+ "football",
+ "normal",
+ "split",
+ "impression",
+ "wood",
+ "long",
+ "meaning",
+ "stock",
+ "cap",
+ "leadership",
+ "media",
+ "ambition",
+ "fishing",
+ "essay",
+ "salad",
+ "repair",
+ "today",
+ "designer",
+ "night",
+ "bank",
+ "drawing",
+ "inevitable",
+ "phase",
+ "vast",
+ "chip",
+ "anger",
+ "switch",
+ "cry",
+ "twist",
+ "personality",
+ "attempt",
+ "storage",
+ "being",
+ "preparation",
+ "bat",
+ "selection",
+ "white",
+ "technology",
+ "contract",
+ "side",
+ "section",
+ "station",
+ "till",
+ "structure",
+ "tongue",
+ "taste",
+ "truth",
+ "difficulty",
+ "group",
+ "limit",
+ "main",
+ "move",
+ "feeling",
+ "light",
+ "example",
+ "mission",
+ "might",
+ "wait",
+ "wheel",
+ "shop",
+ "host",
+ "classic",
+ "alternative",
+ "cause",
+ "agent",
+ "consist",
+ "table",
+ "airline",
+ "text",
+ "pool",
+ "craft",
+ "range",
+ "fuel",
+ "tool",
+ "partner",
+ "load",
+ "entrance",
+ "deposit",
+ "hate",
+ "article",
+ "video",
+ "summer",
+ "feature",
+ "extreme",
+ "mobile",
+ "hospital",
+ "flight",
+ "fall",
+ "pension",
+ "piano",
+ "fail",
+ "result",
+ "rub",
+ "gap",
+ "system",
+ "report",
+ "suck",
+ "ordinary",
+ "wind",
+ "nerve",
+ "ask",
+ "shine",
+ "note",
+ "line",
+ "mom",
+ "perception",
+ "brother",
+ "reference",
+ "bend",
+ "charge",
+ "treat",
+ "trick",
+ "term",
+ "homework",
+ "bake",
+ "bid",
+ "status",
+ "project",
+ "strategy",
+ "orange",
+ "let",
+ "enthusiasm",
+ "parent",
+ "concentrate",
+ "device",
+ "travel",
+ "poetry",
+ "business",
+ "society",
+ "kiss",
+ "end",
+ "vegetable",
+ "employ",
+ "schedule",
+ "hour",
+ "brave",
+ "focus",
+ "process",
+ "movie",
+ "illegal",
+ "general",
+ "coffee",
+ "ad",
+ "highway",
+ "chemistry",
+ "psychology",
+ "hire",
+ "bell",
+ "conference",
+ "relief",
+ "show",
+ "neat",
+ "funny",
+ "weight",
+ "quality",
+ "club",
+ "daughter",
+ "zone",
+ "touch",
+ "tonight",
+ "shock",
+ "burn",
+ "excuse",
+ "name",
+ "survey",
+ "landscape",
+ "advance",
+ "satisfaction",
+ "bread",
+ "disaster",
+ "item",
+ "hat",
+ "prior",
+ "shopping",
+ "visit",
+ "east",
+ "photo",
+ "home",
+ "idea",
+ "father",
+ "comparison",
+ "cat",
+ "pipe",
+ "winner",
+ "count",
+ "lake",
+ "fight",
+ "prize",
+ "foundation",
+ "dog",
+ "keep",
+ "ideal",
+ "fan",
+ "struggle",
+ "peak",
+ "safety",
+ "solution",
+ "hell",
+ "conclusion",
+ "population",
+ "strain",
+ "alarm",
+ "measurement",
+ "second",
+ "train",
+ "race",
+ "due",
+ "insurance",
+ "boss",
+ "tree",
+ "monitor",
+ "sick",
+ "course",
+ "drag",
+ "appointment",
+ "slice",
+ "still",
+ "care",
+ "patience",
+ "rich",
+ "escape",
+ "emotion",
+ "royal",
+ "female",
+ "childhood",
+ "government",
+ "picture",
+ "will",
+ "sock",
+ "big",
+ "gate",
+ "oil",
+ "cross",
+ "pin",
+ "improvement",
+ "championship",
+ "silly",
+ "help",
+ "sky",
+ "pitch",
+ "man",
+ "diamond",
+ "most",
+ "transition",
+ "work",
+ "science",
+ "committee",
+ "moment",
+ "fix",
+ "teaching",
+ "dig",
+ "specialist",
+ "complex",
+ "guide",
+ "people",
+ "dead",
+ "voice",
+ "original",
+ "break",
+ "topic",
+ "data",
+ "degree",
+ "reading",
+ "recording",
+ "bunch",
+ "reach",
+ "judgment",
+ "lie",
+ "regular",
+ "set",
+ "painting",
+ "mode",
+ "list",
+ "player",
+ "bear",
+ "north",
+ "wonder",
+ "carpet",
+ "heavy",
+ "officer",
+ "negative",
+ "clock",
+ "unique",
+ "baby",
+ "pain",
+ "assumption",
+ "disk",
+ "iron",
+ "bill",
+ "drawer",
+ "look",
+ "double",
+ "mistake",
+ "finish",
+ "future",
+ "brilliant",
+ "contact",
+ "math",
+ "rice",
+ "leave",
+ "restaurant",
+ "discount",
+ "sex",
+ "virus",
+ "bit",
+ "trust",
+ "event",
+ "wear",
+ "juice",
+ "failure",
+ "bug",
+ "context",
+ "mud",
+ "whole",
+ "wrap",
+ "intention",
+ "draft",
+ "pressure",
+ "cake",
+ "dark",
+ "explanation",
+ "space",
+ "angle",
+ "word",
+ "efficiency",
+ "management",
+ "habit",
+ "star",
+ "chance",
+ "finding",
+ "transportation",
+ "stand",
+ "criticism",
+ "flow",
+ "door",
+ "injury",
+ "insect",
+ "surprise",
+ "apartment",
+] # pylint: disable=line-too-long
+
+# ISO 639-1 codes to language names.
+LANGUAGE_CODES = immutabledict.immutabledict(
+ {
+ "en": "English",
+ "es": "Spanish",
+ "pt": "Portuguese",
+ "ar": "Arabic",
+ "hi": "Hindi",
+ "fr": "French",
+ "ru": "Russian",
+ "de": "German",
+ "ja": "Japanese",
+ "it": "Italian",
+ "bn": "Bengali",
+ "uk": "Ukrainian",
+ "th": "Thai",
+ "ur": "Urdu",
+ "ta": "Tamil",
+ "te": "Telugu",
+ "bg": "Bulgarian",
+ "ko": "Korean",
+ "pl": "Polish",
+ "he": "Hebrew",
+ "fa": "Persian",
+ "vi": "Vietnamese",
+ "ne": "Nepali",
+ "sw": "Swahili",
+ "kn": "Kannada",
+ "mr": "Marathi",
+ "gu": "Gujarati",
+ "pa": "Punjabi",
+ "ml": "Malayalam",
+ "fi": "Finnish",
+ }
+)
+
+_ALPHABETS = "([A-Za-z])"
+_PREFIXES = "(Mr|St|Mrs|Ms|Dr)[.]"
+_SUFFIXES = "(Inc|Ltd|Jr|Sr|Co)"
+_STARTERS = r"(Mr|Mrs|Ms|Dr|Prof|Capt|Cpt|Lt|He\s|She\s|It\s|They\s|Their\s|Our\s|We\s|But\s|However\s|That\s|This\s|Wherever)"
+_ACRONYMS = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
+_WEBSITES = "[.](com|net|org|io|gov|edu|me)"
+_DIGITS = "([0-9])"
+_MULTIPLE_DOTS = r"\.{2,}"
+
+
+def split_into_sentences(text):
+ """Split the text into sentences.
+
+ Args:
+ text: A string that consists of more than or equal to one sentences.
+
+ Returns:
+ A list of strings where each string is a sentence.
+ """
+ text = " " + text + " "
+ text = text.replace("\n", " ")
+ text = re.sub(_PREFIXES, "\\1", text)
+ text = re.sub(_WEBSITES, "\\1", text)
+ text = re.sub(_DIGITS + "[.]" + _DIGITS, "\\1\\2", text)
+ text = re.sub(
+ _MULTIPLE_DOTS,
+ lambda match: "" * len(match.group(0)) + "",
+ text,
+ )
+ if "Ph.D" in text:
+ text = text.replace("Ph.D.", "PhD")
+ text = re.sub(r"\s" + _ALPHABETS + "[.] ", " \\1 ", text)
+ text = re.sub(_ACRONYMS + " " + _STARTERS, "\\1 \\2", text)
+ text = re.sub(
+ _ALPHABETS + "[.]" + _ALPHABETS + "[.]" + _ALPHABETS + "[.]",
+ "\\1\\2\\3",
+ text,
+ )
+ text = re.sub(_ALPHABETS + "[.]" + _ALPHABETS + "[.]", "\\1\\2", text)
+ text = re.sub(" " + _SUFFIXES + "[.] " + _STARTERS, " \\1 \\2", text)
+ text = re.sub(" " + _SUFFIXES + "[.]", " \\1", text)
+ text = re.sub(" " + _ALPHABETS + "[.]", " \\1", text)
+ if "”" in text:
+ text = text.replace(".”", "”.")
+ if '"' in text:
+ text = text.replace('."', '".')
+ if "!" in text:
+ text = text.replace('!"', '"!')
+ if "?" in text:
+ text = text.replace('?"', '"?')
+ text = text.replace(".", ".")
+ text = text.replace("?", "?")
+ text = text.replace("!", "!")
+ text = text.replace("", ".")
+ sentences = text.split("")
+ sentences = [s.strip() for s in sentences]
+ if sentences and not sentences[-1]:
+ sentences = sentences[:-1]
+ return sentences
+
+
+def count_words(text):
+ """Counts the number of words."""
+ tokenizer = nltk.tokenize.RegexpTokenizer(r"\w+")
+ tokens = tokenizer.tokenize(text)
+ num_words = len(tokens)
+ return num_words
+
+
+@functools.lru_cache(maxsize=None)
+def _get_sentence_tokenizer():
+ return nltk.data.load("nltk:tokenizers/punkt/english.pickle")
+
+
+def count_sentences(text):
+ """Count the number of sentences."""
+ tokenizer = _get_sentence_tokenizer()
+ tokenized_sentences = tokenizer.tokenize(text)
+ return len(tokenized_sentences)
+
+
+def generate_keywords(num_keywords):
+ """Randomly generates a few keywords."""
+ return random.sample(WORD_LIST, k=num_keywords)
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/ifeval/utils.py b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/ifeval/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..985e8d5ae578c484267c7c2d90ee7c896028941f
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/ifeval/utils.py
@@ -0,0 +1,134 @@
+import dataclasses
+from typing import Dict, Optional, Union
+
+from lm_eval.tasks.ifeval import instructions_registry
+
+
+@dataclasses.dataclass
+class InputExample:
+ key: int
+ instruction_id_list: list[str]
+ prompt: str
+ kwargs: list[Dict[str, Optional[Union[str, int]]]]
+
+
+@dataclasses.dataclass
+class OutputExample:
+ instruction_id_list: list[str]
+ prompt: str
+ response: str
+ follow_all_instructions: bool
+ follow_instruction_list: list[bool]
+
+
+def test_instruction_following_strict(
+ inp,
+ response,
+):
+ """Tests response to see if instructions are followed."""
+ instruction_list = inp.instruction_id_list
+ is_following_list = []
+
+ for index, instruction_id in enumerate(instruction_list):
+ instruction_cls = instructions_registry.INSTRUCTION_DICT[instruction_id]
+ instruction = instruction_cls(instruction_id)
+
+ # Remove None values from kwargs to avoid unexpected keyword argument errors in build_description method.
+ kwargs = {k: v for k, v in inp.kwargs[index].items() if v}
+ instruction.build_description(**kwargs)
+ args = instruction.get_instruction_args()
+ if args and "prompt" in args:
+ instruction.build_description(prompt=inp.prompt)
+
+ if response.strip() and instruction.check_following(response):
+ is_following_list.append(True)
+ else:
+ is_following_list.append(False)
+
+ return OutputExample(
+ instruction_id_list=inp.instruction_id_list,
+ prompt=inp.prompt,
+ response=response,
+ follow_all_instructions=all(is_following_list),
+ follow_instruction_list=is_following_list,
+ )
+
+
+def test_instruction_following_loose(
+ inp,
+ response,
+):
+ """Tests response for an upper bound for following instructions."""
+ r = response.split("\n")
+ response_remove_first = "\n".join(r[1:]).strip()
+ response_remove_last = "\n".join(r[:-1]).strip()
+ response_remove_both = "\n".join(r[1:-1]).strip()
+ revised_response = response.replace("*", "")
+ revised_response_remove_first = response_remove_first.replace("*", "")
+ revised_response_remove_last = response_remove_last.replace("*", "")
+ revised_response_remove_both = response_remove_both.replace("*", "")
+ all_responses = [
+ response,
+ revised_response,
+ response_remove_first,
+ response_remove_last,
+ response_remove_both,
+ revised_response_remove_first,
+ revised_response_remove_last,
+ revised_response_remove_both,
+ ]
+ instruction_list = inp.instruction_id_list
+ is_following_list = []
+
+ for index, instruction_id in enumerate(instruction_list):
+ instruction_cls = instructions_registry.INSTRUCTION_DICT[instruction_id]
+ instruction = instruction_cls(instruction_id)
+
+ # Remove None values from kwargs to avoid unexpected keyword argument errors in build_description method.
+ kwargs = {k: v for k, v in inp.kwargs[index].items() if v}
+ instruction.build_description(**kwargs)
+ args = instruction.get_instruction_args()
+ if args and "prompt" in args:
+ instruction.build_description(prompt=inp.prompt)
+
+ is_following = False
+ for r in all_responses:
+ if r.strip() and instruction.check_following(r):
+ is_following = True
+ break
+
+ is_following_list.append(is_following)
+
+ return OutputExample(
+ instruction_id_list=inp.instruction_id_list,
+ prompt=inp.prompt,
+ response=response,
+ follow_all_instructions=all(is_following_list),
+ follow_instruction_list=is_following_list,
+ )
+
+
+def process_results(doc, results):
+ inp = InputExample(
+ key=doc["key"],
+ instruction_id_list=doc["instruction_id_list"],
+ prompt=doc["prompt"],
+ kwargs=doc["kwargs"],
+ )
+ response = results[0]
+
+ out_strict = test_instruction_following_strict(inp, response)
+ out_loose = test_instruction_following_loose(inp, response)
+
+ return {
+ "prompt_level_strict_acc": out_strict.follow_all_instructions,
+ "inst_level_strict_acc": out_strict.follow_instruction_list,
+ "prompt_level_loose_acc": out_loose.follow_all_instructions,
+ "inst_level_loose_acc": out_loose.follow_instruction_list,
+ }
+
+
+def agg_inst_level_acc(items):
+ flat_items = [item for sublist in items for item in sublist]
+ inst_level_acc = sum(flat_items) / len(flat_items)
+ return inst_level_acc
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/leaderboard.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/leaderboard.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d9c5aaac175df34bf0f5ba91684eac6c4db195fc
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/leaderboard.yaml
@@ -0,0 +1,8 @@
+group: leaderboard
+task:
+ - leaderboard_mmlu_pro
+ - leaderboard_bbh
+ - leaderboard_gpqa
+ - leaderboard_math_hard
+ - leaderboard_ifeval
+ - leaderboard_musr
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/musr/_musr.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/musr/_musr.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..060d231aae9a11c3078999364b23de802caf5638
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/musr/_musr.yaml
@@ -0,0 +1,5 @@
+group: leaderboard_musr
+task:
+ - leaderboard_musr_murder_mysteries
+ - leaderboard_musr_object_placements
+ - leaderboard_musr_team_allocation
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/musr/_template_yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/musr/_template_yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d14081247271fb8f3860e7f636b9265e9ef418b5
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/musr/_template_yaml
@@ -0,0 +1,11 @@
+dataset_path: TAUR-Lab/MuSR
+output_type: multiple_choice
+doc_to_text: !function utils.doc_to_text
+doc_to_target: "{{answer_choice}}"
+doc_to_choice: "{{choices}}"
+metric_list:
+ - metric: acc_norm
+ aggregation: mean
+ higher_is_better: true
+metadata:
+ version: 1.0
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/musr/musr_murder_mysteries.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/musr/musr_murder_mysteries.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..88aac33eef3d9eee2479f8f0b3c391d6ffdda0a2
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/musr/musr_murder_mysteries.yaml
@@ -0,0 +1,3 @@
+include: "_template_yaml"
+task: leaderboard_musr_murder_mysteries
+test_split: murder_mysteries
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/musr/musr_object_placements.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/musr/musr_object_placements.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..89b29fb732422e2ae0bed278307dc7f633d39e34
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/musr/musr_object_placements.yaml
@@ -0,0 +1,3 @@
+include: "_template_yaml"
+task: leaderboard_musr_object_placements
+test_split: object_placements
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/musr/musr_team_allocation.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/musr/musr_team_allocation.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8ede2ba9e05bffa93b6f0a5bc82005a842bbce02
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/musr/musr_team_allocation.yaml
@@ -0,0 +1,3 @@
+include: "_template_yaml"
+task: leaderboard_musr_team_allocation
+test_split: team_allocation
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/musr/utils.py b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/musr/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d0a7d1ca0c98a4ae0641b0520e693826090b7b9
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/leaderboard/musr/utils.py
@@ -0,0 +1,26 @@
+import ast
+
+
+def doc_to_choice(doc):
+ """
+ Convert a doc to a choice.
+ """
+ return ast.literal_eval(doc["choices"])
+
+
+DOC_TO_TEXT = "{narrative}\n\n" "{question}\n\n" "{choices}\n" "Answer:"
+
+
+def doc_to_text(doc):
+ """
+ Convert a doc to text.
+ """
+ choices = ""
+ for i, choice in enumerate(ast.literal_eval(doc["choices"])):
+ choices += f"{i+1} - {choice}\n"
+
+ text = DOC_TO_TEXT.format(
+ narrative=doc["narrative"], question=doc["question"], choices=choices
+ )
+
+ return text
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/paws-x/README.md b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/paws-x/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..fb82edba224d643f68c7317131ecf8a3f96f0f42
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/paws-x/README.md
@@ -0,0 +1,79 @@
+# PAWS-X
+
+### Paper
+
+Title: `PAWS-X: A Cross-lingual Adversarial Dataset for Paraphrase Identification`
+Abstract: https://arxiv.org/abs/1908.11828
+
+The dataset consists of 23,659 human translated PAWS evaluation pairs and
+296,406 machine translated training pairs in 6 typologically distinct languages.
+
+Examples are adapted from PAWS-Wiki
+
+Prompt format (same as in mGPT):
+
+"" + sentence1 + ", right? " + mask + ", " + sentence2 + "",
+
+where mask is the string that matches the label:
+
+Yes, No.
+
+Example:
+
+ The Tabaci River is a tributary of the River Leurda in Romania, right? No, The Leurda River is a tributary of the River Tabaci in Romania.
+
+Language specific prompts are translated word-by-word with Google Translate
+and may differ from the ones used by mGPT and XGLM (they do not provide their prompts).
+
+Homepage: https://github.com/google-research-datasets/paws/tree/master/pawsx
+
+
+### Citation
+
+```
+@inproceedings{yang-etal-2019-paws,
+ title = "{PAWS}-{X}: A Cross-lingual Adversarial Dataset for Paraphrase Identification",
+ author = "Yang, Yinfei and
+ Zhang, Yuan and
+ Tar, Chris and
+ Baldridge, Jason",
+ booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)",
+ month = nov,
+ year = "2019",
+ address = "Hong Kong, China",
+ publisher = "Association for Computational Linguistics",
+ url = "https://aclanthology.org/D19-1382",
+ doi = "10.18653/v1/D19-1382",
+ pages = "3687--3692",
+}
+```
+
+### Groups and Tasks
+
+#### Groups
+
+* `pawsx`
+
+#### Tasks
+
+* `paws_de`: German
+* `paws_en`: English
+* `paws_es`: Spanish
+* `paws_fr`: French
+* `paws_ja`: Japanese
+* `paws_ko`: Korean
+* `paws_zh`: Chinese
+
+
+### Checklist
+
+For adding novel benchmarks/datasets to the library:
+* [ ] Is the task an existing benchmark in the literature?
+ * [ ] Have you referenced the original paper that introduced the task?
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
+
+
+If other tasks on this dataset are already supported:
+* [ ] Is the "Main" variant of this task clearly denoted?
+* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
+* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/paws-x/_generate_config.py b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/paws-x/_generate_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..a1341fec89b52f3b0e9e7e778825b0d774117174
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/paws-x/_generate_config.py
@@ -0,0 +1,109 @@
+import argparse
+
+import yaml
+
+
+# Different languages that are part of xnli.
+# These correspond to dataset names (Subsets) on HuggingFace.
+# A yaml file is generated by this script for each language.
+
+LANGUAGES = {
+ "de": { # German
+ "QUESTION_WORD": "richtig",
+ "YES": "Ja",
+ "NO": "Nein",
+ },
+ "en": { # English
+ "QUESTION_WORD": "right",
+ "YES": "Yes",
+ "NO": "No",
+ },
+ "es": { # Spanish
+ "QUESTION_WORD": "verdad",
+ "YES": "Sí",
+ "NO": "No",
+ },
+ "fr": { # French
+ "QUESTION_WORD": "n'est-ce pas",
+ "YES": "Oui",
+ "NO": "No",
+ },
+ "ja": { # Japanese
+ "QUESTION_WORD": "ですね",
+ "YES": "はい",
+ "NO": "いいえ",
+ },
+ "ko": { # Korean
+ "QUESTION_WORD": "맞죠",
+ "YES": "예",
+ "NO": "아니요",
+ },
+ "zh": { # Chinese
+ "QUESTION_WORD": "对吧",
+ "YES": "是",
+ "NO": "不是",
+ },
+}
+
+
+def gen_lang_yamls(output_dir: str, overwrite: bool) -> None:
+ """
+ Generate a yaml file for each language.
+
+ :param output_dir: The directory to output the files to.
+ :param overwrite: Whether to overwrite files if they already exist.
+ """
+ err = []
+ for lang in LANGUAGES.keys():
+ file_name = f"paws_{lang}.yaml"
+ try:
+ QUESTION_WORD = LANGUAGES[lang]["QUESTION_WORD"]
+ YES = LANGUAGES[lang]["YES"]
+ NO = LANGUAGES[lang]["NO"]
+ with open(
+ f"{output_dir}/{file_name}", "w" if overwrite else "x", encoding="utf8"
+ ) as f:
+ f.write("# Generated by utils.py\n")
+ yaml.dump(
+ {
+ "include": "pawsx_template_yaml",
+ "dataset_name": lang,
+ "task": f"paws_{lang}",
+ "doc_to_text": "",
+ "doc_to_choice": f"{{{{["
+ f"""sentence1+\", {QUESTION_WORD}? {YES}, \"+sentence2,"""
+ f""" sentence1+\", {QUESTION_WORD}? {NO}, \"+sentence2"""
+ f"]}}}}",
+ },
+ f,
+ allow_unicode=True,
+ )
+ except FileExistsError:
+ err.append(file_name)
+
+ if len(err) > 0:
+ raise FileExistsError(
+ "Files were not created because they already exist (use --overwrite flag):"
+ f" {', '.join(err)}"
+ )
+
+
+def main() -> None:
+ """Parse CLI args and generate language-specific yaml files."""
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--overwrite",
+ default=False,
+ action="store_true",
+ help="Overwrite files if they already exist",
+ )
+ parser.add_argument(
+ "--output-dir", default=".", help="Directory to write yaml files to"
+ )
+ args = parser.parse_args()
+
+ gen_lang_yamls(output_dir=args.output_dir, overwrite=args.overwrite)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/paws-x/_pawsx.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/paws-x/_pawsx.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6377e05c9550510d13030f0aba1cb109c207bc56
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/paws-x/_pawsx.yaml
@@ -0,0 +1,15 @@
+group: pawsx
+task:
+ - paws_en
+ - paws_de
+ - paws_es
+ - paws_fr
+ - paws_ja
+ - paws_ko
+ - paws_zh
+aggregate_metric_list:
+ - metric: acc
+ aggregation: mean
+ weight_by_size: true
+metadata:
+ version: 0.0
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/paws-x/paws_de.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/paws-x/paws_de.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0d9ffad3b000727764c69e7eef3596d4d3b0762f
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/paws-x/paws_de.yaml
@@ -0,0 +1,7 @@
+# Generated by utils.py
+dataset_name: de
+doc_to_choice: '{{[sentence1+", richtig? Ja, "+sentence2, sentence1+", richtig? Nein,
+ "+sentence2]}}'
+doc_to_text: ''
+include: pawsx_template_yaml
+task: paws_de
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/paws-x/paws_en.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/paws-x/paws_en.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c667e77a74f66a94efe9e10d6ef0b54bf53645d4
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/paws-x/paws_en.yaml
@@ -0,0 +1,6 @@
+# Generated by utils.py
+dataset_name: en
+doc_to_choice: '{{[sentence1+", right? Yes, "+sentence2, sentence1+", right? No, "+sentence2]}}'
+doc_to_text: ''
+include: pawsx_template_yaml
+task: paws_en
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/paws-x/paws_es.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/paws-x/paws_es.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e58805a9c6d7fcbcd5ada9a277d7fa2283655012
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/paws-x/paws_es.yaml
@@ -0,0 +1,7 @@
+# Generated by utils.py
+dataset_name: es
+doc_to_choice: '{{[sentence1+", verdad? Sí, "+sentence2, sentence1+", verdad? No,
+ "+sentence2]}}'
+doc_to_text: ''
+include: pawsx_template_yaml
+task: paws_es
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/paws-x/paws_fr.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/paws-x/paws_fr.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f6973d998e53624af21ffedef577a040cc467d9d
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/paws-x/paws_fr.yaml
@@ -0,0 +1,7 @@
+# Generated by utils.py
+dataset_name: fr
+doc_to_choice: '{{[sentence1+", n''est-ce pas? Oui, "+sentence2, sentence1+", n''est-ce
+ pas? No, "+sentence2]}}'
+doc_to_text: ''
+include: pawsx_template_yaml
+task: paws_fr
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/paws-x/paws_ja.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/paws-x/paws_ja.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..296885b3e2790bfaa72ecc697ed7e9f3269aec47
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/paws-x/paws_ja.yaml
@@ -0,0 +1,6 @@
+# Generated by utils.py
+dataset_name: ja
+doc_to_choice: '{{[sentence1+", ですね? はい, "+sentence2, sentence1+", ですね? いいえ, "+sentence2]}}'
+doc_to_text: ''
+include: pawsx_template_yaml
+task: paws_ja
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/paws-x/paws_ko.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/paws-x/paws_ko.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fc7034415496efcaffc50e988bd5f5f359c4fb2a
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/paws-x/paws_ko.yaml
@@ -0,0 +1,6 @@
+# Generated by utils.py
+dataset_name: ko
+doc_to_choice: '{{[sentence1+", 맞죠? 예, "+sentence2, sentence1+", 맞죠? 아니요, "+sentence2]}}'
+doc_to_text: ''
+include: pawsx_template_yaml
+task: paws_ko
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/paws-x/paws_zh.yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/paws-x/paws_zh.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6d8d2ac044e71e775eafe89d8df7bc2aa6675390
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/paws-x/paws_zh.yaml
@@ -0,0 +1,6 @@
+# Generated by utils.py
+dataset_name: zh
+doc_to_choice: '{{[sentence1+", 对吧? 是, "+sentence2, sentence1+", 对吧? 不是, "+sentence2]}}'
+doc_to_text: ''
+include: pawsx_template_yaml
+task: paws_zh
diff --git a/scripts/yans/lm-evaluation-harness/lm_eval/tasks/paws-x/pawsx_template_yaml b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/paws-x/pawsx_template_yaml
new file mode 100644
index 0000000000000000000000000000000000000000..dfdaae274606078923f2e34ae0e1a7d5b794c832
--- /dev/null
+++ b/scripts/yans/lm-evaluation-harness/lm_eval/tasks/paws-x/pawsx_template_yaml
@@ -0,0 +1,19 @@
+# This file will be included in the generated language-specific task configs.
+# It doesn't have a yaml file extension as it is not meant to be imported directly
+# by the harness.
+task: null
+dataset_path: paws-x
+dataset_name: null
+output_type: multiple_choice
+training_split: train
+validation_split: validation
+test_split: test
+doc_to_text: null
+doc_to_target: label
+doc_to_choice: null
+metric_list:
+ - metric: acc
+ aggregation: mean
+ higher_is_better: true
+metadata:
+ version: 0.0