Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/arc_mt/README.md +12 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/arc_mt/arc_challenge_mt_da.yaml +3 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/arc_mt/arc_challenge_mt_de.yaml +3 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/arc_mt/arc_challenge_mt_el.yaml +3 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/arc_mt/arc_challenge_mt_es.yaml +3 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/arc_mt/arc_challenge_mt_fi.yaml +23 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/arc_mt/arc_challenge_mt_hu.yaml +3 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/arc_mt/arc_challenge_mt_is.yaml +22 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/arc_mt/arc_challenge_mt_it.yaml +3 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/arc_mt/arc_challenge_mt_nb.yaml +3 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/arc_mt/arc_challenge_mt_pl.yaml +3 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/arc_mt/arc_challenge_mt_pt.yaml +3 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/arc_mt/arc_challenge_mt_sv.yaml +3 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/asdiv/README.md +56 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/asdiv/default.yaml +16 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/belebele/_generate_configs.py +100 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_amh_Ethi.yaml +4 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_ary_Arab.yaml +4 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_ces_Latn.yaml +4 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_jpn_Jpan.yaml +4 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_lin_Latn.yaml +4 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_mya_Mymr.yaml +4 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_nld_Latn.yaml +4 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_sun_Latn.yaml +4 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_tgl_Latn.yaml +4 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_tsn_Latn.yaml +4 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_tur_Latn.yaml +4 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_uzn_Latn.yaml +4 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_war_Latn.yaml +4 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_zho_Hans.yaml +4 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/auto_debugging.yaml +4 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/codenames.yaml +4 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/empirical_judgments.yaml +4 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/hhh_alignment.yaml +4 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/known_unknowns.yaml +4 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/natural_instructions.yaml +4 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/strange_stories.yaml +4 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/word_sorting.yaml +4 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/fda/README.md +78 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/fda/fda.yaml +2 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/fda/task.py +98 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/hellaswag/README.md +49 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/hellaswag/__pycache__/utils.cpython-310.pyc +0 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/hellaswag/hellaswag.yaml +24 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/hellaswag/utils.py +25 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/ifeval/README.md +45 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/ifeval/ifeval.yaml +29 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/ifeval/instructions.py +1612 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/ifeval/instructions_registry.py +168 -0
- scripts/yans/lm-evaluation-harness/lm_eval/tasks/ifeval/instructions_util.py +1682 -0
scripts/yans/lm-evaluation-harness/lm_eval/tasks/arc_mt/README.md
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# arc mt
|
| 2 |
+
|
| 3 |
+
arc mt is an implementation of tasks to support machine translated arc
|
| 4 |
+
challenge evals, to improve eval support across a number of additional
|
| 5 |
+
languages.
|
| 6 |
+
|
| 7 |
+
The main page for the effort is
|
| 8 |
+
[here](https://huggingface.co/datasets/LumiOpen/arc_challenge_mt) and we will
|
| 9 |
+
include more data and analysis there.
|
| 10 |
+
|
| 11 |
+
Initial datasets include a number of European languages, and we plan to expand
|
| 12 |
+
more in the future.
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/arc_mt/arc_challenge_mt_da.yaml
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
include: arc_challenge_mt_fi.yaml
|
| 2 |
+
task: arc_challenge_mt_da
|
| 3 |
+
dataset_name: da
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/arc_mt/arc_challenge_mt_de.yaml
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
include: arc_challenge_mt_fi.yaml
|
| 2 |
+
task: arc_challenge_mt_de
|
| 3 |
+
dataset_name: de
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/arc_mt/arc_challenge_mt_el.yaml
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
include: arc_challenge_mt_fi.yaml
|
| 2 |
+
task: arc_challenge_mt_el
|
| 3 |
+
dataset_name: el
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/arc_mt/arc_challenge_mt_es.yaml
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
include: arc_challenge_mt_fi.yaml
|
| 2 |
+
task: arc_challenge_mt_es
|
| 3 |
+
dataset_name: es
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/arc_mt/arc_challenge_mt_fi.yaml
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
tag:
|
| 2 |
+
- arc_challenge_mt
|
| 3 |
+
task: arc_challenge_mt_fi
|
| 4 |
+
dataset_path: LumiOpen/arc_challenge_mt
|
| 5 |
+
dataset_name: fi
|
| 6 |
+
output_type: multiple_choice
|
| 7 |
+
training_split: train
|
| 8 |
+
validation_split: validation
|
| 9 |
+
test_split: test
|
| 10 |
+
doc_to_text: "Question: {{question}}\nAnswer:"
|
| 11 |
+
doc_to_target: "{{choices.label.index(answerKey)}}"
|
| 12 |
+
doc_to_choice: "{{choices.text}}"
|
| 13 |
+
should_decontaminate: true
|
| 14 |
+
doc_to_decontamination_query: "Question: {{question}}\nAnswer:"
|
| 15 |
+
metric_list:
|
| 16 |
+
- metric: acc
|
| 17 |
+
aggregation: mean
|
| 18 |
+
higher_is_better: true
|
| 19 |
+
- metric: acc_norm
|
| 20 |
+
aggregation: mean
|
| 21 |
+
higher_is_better: true
|
| 22 |
+
metadata:
|
| 23 |
+
version: 1.0
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/arc_mt/arc_challenge_mt_hu.yaml
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
include: arc_challenge_mt_fi.yaml
|
| 2 |
+
task: arc_challenge_mt_hu
|
| 3 |
+
dataset_name: hu
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/arc_mt/arc_challenge_mt_is.yaml
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
group:
|
| 2 |
+
- arc_challenge_mt
|
| 3 |
+
task: arc_challenge_mt_is
|
| 4 |
+
dataset_path: mideind/icelandic-arc-challenge
|
| 5 |
+
output_type: multiple_choice
|
| 6 |
+
training_split: train
|
| 7 |
+
validation_split: validation
|
| 8 |
+
test_split: test
|
| 9 |
+
doc_to_text: "Question: {{question}}\nAnswer:"
|
| 10 |
+
doc_to_target: "{{choices.label.index(answerKey)}}"
|
| 11 |
+
doc_to_choice: "{{choices.text}}"
|
| 12 |
+
should_decontaminate: true
|
| 13 |
+
doc_to_decontamination_query: "Question: {{question}}\nAnswer:"
|
| 14 |
+
metric_list:
|
| 15 |
+
- metric: acc
|
| 16 |
+
aggregation: mean
|
| 17 |
+
higher_is_better: true
|
| 18 |
+
- metric: acc_norm
|
| 19 |
+
aggregation: mean
|
| 20 |
+
higher_is_better: true
|
| 21 |
+
metadata:
|
| 22 |
+
version: 1.0
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/arc_mt/arc_challenge_mt_it.yaml
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
include: arc_challenge_mt_fi.yaml
|
| 2 |
+
task: arc_challenge_mt_it
|
| 3 |
+
dataset_name: it
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/arc_mt/arc_challenge_mt_nb.yaml
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
include: arc_challenge_mt_fi.yaml
|
| 2 |
+
task: arc_challenge_mt_nb
|
| 3 |
+
dataset_name: nb
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/arc_mt/arc_challenge_mt_pl.yaml
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
include: arc_challenge_mt_fi.yaml
|
| 2 |
+
task: arc_challenge_mt_pl
|
| 3 |
+
dataset_name: pl
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/arc_mt/arc_challenge_mt_pt.yaml
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
include: arc_challenge_mt_fi.yaml
|
| 2 |
+
task: arc_challenge_mt_pt
|
| 3 |
+
dataset_name: pt
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/arc_mt/arc_challenge_mt_sv.yaml
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
include: arc_challenge_mt_fi.yaml
|
| 2 |
+
task: arc_challenge_mt_sv
|
| 3 |
+
dataset_name: sv
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/asdiv/README.md
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ASDiv
|
| 2 |
+
|
| 3 |
+
### Paper
|
| 4 |
+
|
| 5 |
+
Title: `ASDiv: A Diverse Corpus for Evaluating and Developing English Math Word Problem Solvers`
|
| 6 |
+
|
| 7 |
+
Abstract: https://arxiv.org/abs/2106.15772
|
| 8 |
+
|
| 9 |
+
ASDiv (Academia Sinica Diverse MWP Dataset) is a diverse (in terms of both language
|
| 10 |
+
patterns and problem types) English math word problem (MWP) corpus for evaluating
|
| 11 |
+
the capability of various MWP solvers. Existing MWP corpora for studying AI progress
|
| 12 |
+
remain limited either in language usage patterns or in problem types. We thus present
|
| 13 |
+
a new English MWP corpus with 2,305 MWPs that cover more text patterns and most problem
|
| 14 |
+
types taught in elementary school. Each MWP is annotated with its problem type and grade
|
| 15 |
+
level (for indicating the level of difficulty).
|
| 16 |
+
|
| 17 |
+
NOTE: We currently ignore formulas for answer generation.
|
| 18 |
+
|
| 19 |
+
Homepage: https://github.com/chaochun/nlu-asdiv-dataset
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
### Citation
|
| 23 |
+
|
| 24 |
+
```
|
| 25 |
+
@misc{miao2021diverse,
|
| 26 |
+
title={A Diverse Corpus for Evaluating and Developing English Math Word Problem Solvers},
|
| 27 |
+
author={Shen-Yun Miao and Chao-Chun Liang and Keh-Yih Su},
|
| 28 |
+
year={2021},
|
| 29 |
+
eprint={2106.15772},
|
| 30 |
+
archivePrefix={arXiv},
|
| 31 |
+
primaryClass={cs.AI}
|
| 32 |
+
}
|
| 33 |
+
```
|
| 34 |
+
|
| 35 |
+
### Groups, Tags, and Tasks
|
| 36 |
+
|
| 37 |
+
#### Groups
|
| 38 |
+
|
| 39 |
+
* Not part of a group yet.
|
| 40 |
+
|
| 41 |
+
#### Tasks
|
| 42 |
+
|
| 43 |
+
* `asdiv`
|
| 44 |
+
|
| 45 |
+
### Checklist
|
| 46 |
+
|
| 47 |
+
For adding novel benchmarks/datasets to the library:
|
| 48 |
+
* [ ] Is the task an existing benchmark in the literature?
|
| 49 |
+
* [ ] Have you referenced the original paper that introduced the task?
|
| 50 |
+
* [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
If other tasks on this dataset are already supported:
|
| 54 |
+
* [ ] Is the "Main" variant of this task clearly denoted?
|
| 55 |
+
* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
|
| 56 |
+
* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/asdiv/default.yaml
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: asdiv
|
| 2 |
+
dataset_path: EleutherAI/asdiv
|
| 3 |
+
output_type: loglikelihood
|
| 4 |
+
validation_split: validation
|
| 5 |
+
doc_to_text: "{{body}}\nQuestion:{{question}}\nAnswer:"
|
| 6 |
+
doc_to_target: "{{answer.split(' (')[0]}}"
|
| 7 |
+
should_decontaminate: true
|
| 8 |
+
doc_to_decontamination_query: "{{body}} {{question}}"
|
| 9 |
+
metric_list:
|
| 10 |
+
- metric: acc
|
| 11 |
+
aggregation: mean
|
| 12 |
+
higher_is_better: true
|
| 13 |
+
metadata:
|
| 14 |
+
version: 1.0
|
| 15 |
+
dataset_kwargs:
|
| 16 |
+
trust_remote_code: true
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/belebele/_generate_configs.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Take in a YAML, and output all other splits with this YAML
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import argparse
|
| 6 |
+
import os
|
| 7 |
+
|
| 8 |
+
import requests
|
| 9 |
+
import yaml
|
| 10 |
+
from tqdm import tqdm
|
| 11 |
+
|
| 12 |
+
from lm_eval.utils import logging
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
API_URL = "https://datasets-server.huggingface.co/splits?dataset=facebook/belebele"
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def parse_args():
|
| 19 |
+
parser = argparse.ArgumentParser()
|
| 20 |
+
parser.add_argument("--base_yaml_path", required=True)
|
| 21 |
+
parser.add_argument("--save_prefix_path", default="belebele")
|
| 22 |
+
parser.add_argument("--cot_prompt_path", default=None)
|
| 23 |
+
parser.add_argument("--task_prefix", default="")
|
| 24 |
+
return parser.parse_args()
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
if __name__ == "__main__":
|
| 28 |
+
args = parse_args()
|
| 29 |
+
|
| 30 |
+
# get filename of base_yaml so we can `"include": ` it in our other YAMLs.
|
| 31 |
+
base_yaml_name = os.path.split(args.base_yaml_path)[-1]
|
| 32 |
+
with open(args.base_yaml_path, encoding="utf-8") as f:
|
| 33 |
+
base_yaml = yaml.full_load(f)
|
| 34 |
+
|
| 35 |
+
if args.cot_prompt_path is not None:
|
| 36 |
+
import json
|
| 37 |
+
|
| 38 |
+
with open(args.cot_prompt_path, encoding="utf-8") as f:
|
| 39 |
+
cot_file = json.load(f)
|
| 40 |
+
|
| 41 |
+
def query():
|
| 42 |
+
response = requests.get(API_URL)
|
| 43 |
+
return response.json()["splits"]
|
| 44 |
+
|
| 45 |
+
print(query())
|
| 46 |
+
languages = [split["split"] for split in query()]
|
| 47 |
+
|
| 48 |
+
for lang in tqdm([lang for lang in languages if "default" not in lang]):
|
| 49 |
+
yaml_dict = {
|
| 50 |
+
"include": base_yaml_name,
|
| 51 |
+
"task": f"belebele_{args.task_prefix}_{lang}"
|
| 52 |
+
if args.task_prefix != ""
|
| 53 |
+
else f"belebele_{lang}",
|
| 54 |
+
"test_split": lang,
|
| 55 |
+
"fewshot_split": lang,
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
file_save_path = args.save_prefix_path + f"_{lang}.yaml"
|
| 59 |
+
logging.info(f"Saving yaml for subset {lang} to {file_save_path}")
|
| 60 |
+
with open(file_save_path, "w", encoding="utf-8") as yaml_file:
|
| 61 |
+
yaml.dump(
|
| 62 |
+
yaml_dict,
|
| 63 |
+
yaml_file,
|
| 64 |
+
width=float("inf"),
|
| 65 |
+
allow_unicode=True,
|
| 66 |
+
default_style='"',
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
# write group config out
|
| 70 |
+
|
| 71 |
+
group_yaml_dict = {
|
| 72 |
+
"group": f"belebele_{args.task_prefix}"
|
| 73 |
+
if args.task_prefix != ""
|
| 74 |
+
else "belebele",
|
| 75 |
+
"task": [
|
| 76 |
+
(
|
| 77 |
+
f"belebele_{args.task_prefix}_{lang}"
|
| 78 |
+
if args.task_prefix != ""
|
| 79 |
+
else f"belebele_{lang}"
|
| 80 |
+
)
|
| 81 |
+
for lang in languages
|
| 82 |
+
if "default" not in lang
|
| 83 |
+
],
|
| 84 |
+
"aggregate_metric_list": [
|
| 85 |
+
{"metric": "acc", "aggregation": "mean", "weight_by_size": False},
|
| 86 |
+
{"metric": "acc_norm", "aggregation": "mean", "weight_by_size": False},
|
| 87 |
+
],
|
| 88 |
+
"metadata": {"version": 0.0},
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
file_save_path = "_" + args.save_prefix_path + f"{args.task_prefix}.yaml"
|
| 92 |
+
|
| 93 |
+
with open(file_save_path, "w", encoding="utf-8") as group_yaml_file:
|
| 94 |
+
yaml.dump(
|
| 95 |
+
group_yaml_dict,
|
| 96 |
+
group_yaml_file,
|
| 97 |
+
width=float("inf"),
|
| 98 |
+
allow_unicode=True,
|
| 99 |
+
default_style='"',
|
| 100 |
+
)
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_amh_Ethi.yaml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"fewshot_split": "amh_Ethi"
|
| 2 |
+
"include": "_default_template_yaml"
|
| 3 |
+
"task": "belebele_amh_Ethi"
|
| 4 |
+
"test_split": "amh_Ethi"
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_ary_Arab.yaml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"fewshot_split": "ary_Arab"
|
| 2 |
+
"include": "_default_template_yaml"
|
| 3 |
+
"task": "belebele_ary_Arab"
|
| 4 |
+
"test_split": "ary_Arab"
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_ces_Latn.yaml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"fewshot_split": "ces_Latn"
|
| 2 |
+
"include": "_default_template_yaml"
|
| 3 |
+
"task": "belebele_ces_Latn"
|
| 4 |
+
"test_split": "ces_Latn"
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_jpn_Jpan.yaml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"fewshot_split": "jpn_Jpan"
|
| 2 |
+
"include": "_default_template_yaml"
|
| 3 |
+
"task": "belebele_jpn_Jpan"
|
| 4 |
+
"test_split": "jpn_Jpan"
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_lin_Latn.yaml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"fewshot_split": "lin_Latn"
|
| 2 |
+
"include": "_default_template_yaml"
|
| 3 |
+
"task": "belebele_lin_Latn"
|
| 4 |
+
"test_split": "lin_Latn"
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_mya_Mymr.yaml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"fewshot_split": "mya_Mymr"
|
| 2 |
+
"include": "_default_template_yaml"
|
| 3 |
+
"task": "belebele_mya_Mymr"
|
| 4 |
+
"test_split": "mya_Mymr"
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_nld_Latn.yaml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"fewshot_split": "nld_Latn"
|
| 2 |
+
"include": "_default_template_yaml"
|
| 3 |
+
"task": "belebele_nld_Latn"
|
| 4 |
+
"test_split": "nld_Latn"
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_sun_Latn.yaml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"fewshot_split": "sun_Latn"
|
| 2 |
+
"include": "_default_template_yaml"
|
| 3 |
+
"task": "belebele_sun_Latn"
|
| 4 |
+
"test_split": "sun_Latn"
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_tgl_Latn.yaml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"fewshot_split": "tgl_Latn"
|
| 2 |
+
"include": "_default_template_yaml"
|
| 3 |
+
"task": "belebele_tgl_Latn"
|
| 4 |
+
"test_split": "tgl_Latn"
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_tsn_Latn.yaml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"fewshot_split": "tsn_Latn"
|
| 2 |
+
"include": "_default_template_yaml"
|
| 3 |
+
"task": "belebele_tsn_Latn"
|
| 4 |
+
"test_split": "tsn_Latn"
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_tur_Latn.yaml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"fewshot_split": "tur_Latn"
|
| 2 |
+
"include": "_default_template_yaml"
|
| 3 |
+
"task": "belebele_tur_Latn"
|
| 4 |
+
"test_split": "tur_Latn"
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_uzn_Latn.yaml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"fewshot_split": "uzn_Latn"
|
| 2 |
+
"include": "_default_template_yaml"
|
| 3 |
+
"task": "belebele_uzn_Latn"
|
| 4 |
+
"test_split": "uzn_Latn"
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_war_Latn.yaml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"fewshot_split": "war_Latn"
|
| 2 |
+
"include": "_default_template_yaml"
|
| 3 |
+
"task": "belebele_war_Latn"
|
| 4 |
+
"test_split": "war_Latn"
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_zho_Hans.yaml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"fewshot_split": "zho_Hans"
|
| 2 |
+
"include": "_default_template_yaml"
|
| 3 |
+
"task": "belebele_zho_Hans"
|
| 4 |
+
"test_split": "zho_Hans"
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/auto_debugging.yaml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generated by utils.py
|
| 2 |
+
dataset_name: auto_debugging_zero_shot
|
| 3 |
+
include: ../generate_until_template_yaml
|
| 4 |
+
task: bigbench_auto_debugging_generate_until
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/codenames.yaml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generated by utils.py
|
| 2 |
+
dataset_name: codenames_zero_shot
|
| 3 |
+
include: ../generate_until_template_yaml
|
| 4 |
+
task: bigbench_codenames_generate_until
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/empirical_judgments.yaml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generated by utils.py
|
| 2 |
+
dataset_name: empirical_judgments_zero_shot
|
| 3 |
+
include: ../generate_until_template_yaml
|
| 4 |
+
task: bigbench_empirical_judgments_generate_until
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/hhh_alignment.yaml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generated by utils.py
|
| 2 |
+
dataset_name: hhh_alignment_zero_shot
|
| 3 |
+
include: ../generate_until_template_yaml
|
| 4 |
+
task: bigbench_hhh_alignment_generate_until
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/known_unknowns.yaml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generated by utils.py
|
| 2 |
+
dataset_name: known_unknowns_zero_shot
|
| 3 |
+
include: ../generate_until_template_yaml
|
| 4 |
+
task: bigbench_known_unknowns_generate_until
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/natural_instructions.yaml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generated by utils.py
|
| 2 |
+
dataset_name: natural_instructions_zero_shot
|
| 3 |
+
include: ../generate_until_template_yaml
|
| 4 |
+
task: bigbench_natural_instructions_generate_until
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/strange_stories.yaml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generated by utils.py
|
| 2 |
+
dataset_name: strange_stories_zero_shot
|
| 3 |
+
include: ../generate_until_template_yaml
|
| 4 |
+
task: bigbench_strange_stories_generate_until
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/word_sorting.yaml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generated by utils.py
|
| 2 |
+
dataset_name: word_sorting_zero_shot
|
| 3 |
+
include: ../generate_until_template_yaml
|
| 4 |
+
task: bigbench_word_sorting_generate_until
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/fda/README.md
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# FDA
|
| 2 |
+
|
| 3 |
+
### Paper
|
| 4 |
+
|
| 5 |
+
Title: Language Models Enable Simple Systems For
|
| 6 |
+
Generating Structured Views Of Heterogenous Data
|
| 7 |
+
Lakes
|
| 8 |
+
|
| 9 |
+
Abstract: A long standing goal of the data management community is to develop general, automated systems
|
| 10 |
+
that ingest semi-structured documents and output queryable tables without human effort or domain
|
| 11 |
+
specific customization. Given the sheer variety of potential documents, state-of-the art systems make
|
| 12 |
+
simplifying assumptions and use domain specific training. In this work, we ask whether we can
|
| 13 |
+
maintain generality by using large language models (LLMs). LLMs, which are pretrained on broad
|
| 14 |
+
data, can perform diverse downstream tasks simply conditioned on natural language task descriptions.
|
| 15 |
+
We propose and evaluate EVAPORATE, a simple, prototype system powered by LLMs. We identify
|
| 16 |
+
two fundamentally different strategies for implementing this system: prompt the LLM to directly
|
| 17 |
+
extract values from documents or prompt the LLM to synthesize code that performs the extraction.
|
| 18 |
+
Our evaluations show a cost-quality tradeoff between these two approaches. Code synthesis is cheap,
|
| 19 |
+
but far less accurate than directly processing each document with the LLM. To improve quality while
|
| 20 |
+
maintaining low cost, we propose an extended code synthesis implementation, EVAPORATE-CODE+,
|
| 21 |
+
which achieves better quality than direct extraction. Our key insight is to generate many candidate
|
| 22 |
+
functions and ensemble their extractions using weak supervision. EVAPORATE-CODE+ not only
|
| 23 |
+
outperforms the state-of-the art systems, but does so using a sublinear pass over the documents with
|
| 24 |
+
the LLM. This equates to a 110× reduction in the number of tokens the LLM needs to process,
|
| 25 |
+
averaged across 16 real-world evaluation settings of 10k documents each.
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
A task for LMs to perform Information Extraction, as implemented by Based.
|
| 29 |
+
|
| 30 |
+
Homepage: https://github.com/HazyResearch/based-evaluation-harness
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
Description:
|
| 34 |
+
> FDA (Information Extraction). The task is to extract key-value pairs from a set of PDFs scraped from the FDA website. We use the dataset and labels collected in Arora et al. 2023. We break apart the documents into chunks of 1,920 tokens. For every key-value pair that appears in the chunk, we create a zero-shot prompt using the simple prompt template: {chunk} \n {key}: We allow the model to generate a fixed number of tokens after the prompt and check (with case insensitivity) if the value is contained within the generation. We report accuracy, the fraction of prompts for which the generation contains the value.
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
### Citation
|
| 39 |
+
|
| 40 |
+
```
|
| 41 |
+
@misc{arora2024simple,
|
| 42 |
+
title={Simple linear attention language models balance the recall-throughput tradeoff},
|
| 43 |
+
author={Simran Arora and Sabri Eyuboglu and Michael Zhang and Aman Timalsina and Silas Alberti and Dylan Zinsley and James Zou and Atri Rudra and Christopher Ré},
|
| 44 |
+
year={2024},
|
| 45 |
+
eprint={2402.18668},
|
| 46 |
+
archivePrefix={arXiv},
|
| 47 |
+
primaryClass={cs.CL}
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
@misc{arora2023language,
|
| 51 |
+
title={Language Models Enable Simple Systems for Generating Structured Views of Heterogeneous Data Lakes},
|
| 52 |
+
author={Simran Arora and Brandon Yang and Sabri Eyuboglu and Avanika Narayan and Andrew Hojel and Immanuel Trummer and Christopher Ré},
|
| 53 |
+
year={2023},
|
| 54 |
+
eprint={2304.09433},
|
| 55 |
+
archivePrefix={arXiv},
|
| 56 |
+
primaryClass={cs.CL}
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
```
|
| 60 |
+
|
| 61 |
+
### Groups and Tasks
|
| 62 |
+
|
| 63 |
+
#### Tasks
|
| 64 |
+
|
| 65 |
+
* `fda`: the FDA task as implemented in the paper "Simple linear attention language models balance the recall-throughput tradeoff". Designed for zero-shot evaluation of small LMs.
|
| 66 |
+
|
| 67 |
+
### Checklist
|
| 68 |
+
|
| 69 |
+
For adding novel benchmarks/datasets to the library:
|
| 70 |
+
* [x] Is the task an existing benchmark in the literature?
|
| 71 |
+
* [x] Have you referenced the original paper that introduced the task?
|
| 72 |
+
* [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
If other tasks on this dataset are already supported:
|
| 76 |
+
* [x] Is the "Main" variant of this task clearly denoted?
|
| 77 |
+
* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates?
|
| 78 |
+
* [x] Have you noted which, if any, published evaluation setups are matched by this variant?
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/fda/fda.yaml
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: fda
|
| 2 |
+
class: !function task.FDA
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/fda/task.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
from typing import List
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
from lm_eval.api.instance import Instance
|
| 7 |
+
from lm_eval.api.task import ConfigurableTask
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class FDA(ConfigurableTask):
|
| 11 |
+
VERSION = 0
|
| 12 |
+
DATASET_PATH = "hazyresearch/based-fda"
|
| 13 |
+
DATASET_NAME = "default"
|
| 14 |
+
|
| 15 |
+
def __init__(self, **kwargs):
|
| 16 |
+
super().__init__(config={"metadata": {"version": self.VERSION}})
|
| 17 |
+
|
| 18 |
+
def has_training_docs(self):
|
| 19 |
+
return False
|
| 20 |
+
|
| 21 |
+
def has_validation_docs(self):
|
| 22 |
+
return True
|
| 23 |
+
|
| 24 |
+
def has_test_docs(self):
|
| 25 |
+
return False
|
| 26 |
+
|
| 27 |
+
def validation_docs(self):
|
| 28 |
+
return self.dataset["validation"]
|
| 29 |
+
|
| 30 |
+
def doc_to_text(self, doc):
|
| 31 |
+
return doc["text"]
|
| 32 |
+
|
| 33 |
+
def doc_to_target(self, doc):
|
| 34 |
+
return doc["value"]
|
| 35 |
+
|
| 36 |
+
def construct_requests(self, doc, ctx, **kwargs):
|
| 37 |
+
"""Uses RequestFactory to construct Requests and returns an iterable of
|
| 38 |
+
Requests which will be sent to the LM.
|
| 39 |
+
|
| 40 |
+
:param doc:
|
| 41 |
+
The document as returned from training_docs, validation_docs, or test_docs.
|
| 42 |
+
:param ctx: str
|
| 43 |
+
The context string, generated by fewshot_context. This includes the natural
|
| 44 |
+
language description, as well as the few shot examples, and the question
|
| 45 |
+
part of the document for `doc`.
|
| 46 |
+
"""
|
| 47 |
+
|
| 48 |
+
return [
|
| 49 |
+
Instance(
|
| 50 |
+
request_type="generate_until",
|
| 51 |
+
doc=doc,
|
| 52 |
+
arguments=(ctx, {"until": ["\n"], "max_gen_toks": 48}),
|
| 53 |
+
idx=0,
|
| 54 |
+
**kwargs,
|
| 55 |
+
)
|
| 56 |
+
]
|
| 57 |
+
|
| 58 |
+
def process_results(self, doc, results):
|
| 59 |
+
"""Take a single document and the LM results and evaluates, returning a
|
| 60 |
+
dict where keys are the names of submetrics and values are the values of
|
| 61 |
+
the metric for that one document
|
| 62 |
+
|
| 63 |
+
:param doc:
|
| 64 |
+
The document as returned from training_docs, validation_docs, or test_docs.
|
| 65 |
+
:param results:
|
| 66 |
+
The results of the requests created in construct_requests.
|
| 67 |
+
"""
|
| 68 |
+
# continuation, (logprob_unanswerable, _) = results
|
| 69 |
+
continuation = results
|
| 70 |
+
|
| 71 |
+
return {"contains": contains_score(continuation[0], [doc["value"]])}
|
| 72 |
+
|
| 73 |
+
def aggregation(self):
|
| 74 |
+
"""
|
| 75 |
+
:returns: {str: [float] -> float}
|
| 76 |
+
A dictionary where keys are the names of submetrics and values are
|
| 77 |
+
functions that aggregate a list of metrics
|
| 78 |
+
"""
|
| 79 |
+
return {
|
| 80 |
+
"contains": np.mean, # Exact match (the normalized answer exactly match the gold answer)
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
def higher_is_better(self):
|
| 84 |
+
"""
|
| 85 |
+
:returns: {str: bool}
|
| 86 |
+
A dictionary where keys are the names of submetrics and values are
|
| 87 |
+
whether a higher value of the submetric is better
|
| 88 |
+
"""
|
| 89 |
+
return {
|
| 90 |
+
"contains": True, # Exact match (the normalized answer exactly match the gold answer
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def contains_score(prediction: str, labels: List[str]):
|
| 95 |
+
return max(
|
| 96 |
+
int(bool(re.search(re.compile(re.escape(label), re.IGNORECASE), prediction)))
|
| 97 |
+
for label in labels
|
| 98 |
+
)
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/hellaswag/README.md
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# HellaSwag
|
| 2 |
+
|
| 3 |
+
### Paper
|
| 4 |
+
|
| 5 |
+
Title: `HellaSwag: Can a Machine Really Finish Your Sentence?`
|
| 6 |
+
|
| 7 |
+
Abstract: https://arxiv.org/abs/1905.07830
|
| 8 |
+
|
| 9 |
+
Recent work by Zellers et al. (2018) introduced a new task of commonsense natural language inference: given an event description such as "A woman sits at a piano," a machine must select the most likely followup: "She sets her fingers on the keys." With the introduction of BERT, near human-level performance was reached. Does this mean that machines can perform human level commonsense inference?
|
| 10 |
+
In this paper, we show that commonsense inference still proves difficult for even state-of-the-art models, by presenting HellaSwag, a new challenge dataset. Though its questions are trivial for humans (>95% accuracy), state-of-the-art models struggle (<48%). We achieve this via Adversarial Filtering (AF), a data collection paradigm wherein a series of discriminators iteratively select an adversarial set of machine-generated wrong answers. AF proves to be surprisingly robust. The key insight is to scale up the length and complexity of the dataset examples towards a critical 'Goldilocks' zone wherein generated text is ridiculous to humans, yet often misclassified by state-of-the-art models.
|
| 11 |
+
Our construction of HellaSwag, and its resulting difficulty, sheds light on the inner workings of deep pretrained models. More broadly, it suggests a new path forward for NLP research, in which benchmarks co-evolve with the evolving state-of-the-art in an adversarial way, so as to present ever-harder challenges.
|
| 12 |
+
|
| 13 |
+
Homepage: `https://rowanzellers.com/hellaswag/`
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
### Citation
|
| 17 |
+
|
| 18 |
+
```
|
| 19 |
+
@inproceedings{zellers2019hellaswag,
|
| 20 |
+
title={HellaSwag: Can a Machine Really Finish Your Sentence?},
|
| 21 |
+
author={Zellers, Rowan and Holtzman, Ari and Bisk, Yonatan and Farhadi, Ali and Choi, Yejin},
|
| 22 |
+
booktitle ={Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics},
|
| 23 |
+
year={2019}
|
| 24 |
+
}
|
| 25 |
+
```
|
| 26 |
+
|
| 27 |
+
### Groups and Tasks
|
| 28 |
+
|
| 29 |
+
#### Groups
|
| 30 |
+
|
| 31 |
+
- Not part of a group yet
|
| 32 |
+
|
| 33 |
+
#### Tasks
|
| 34 |
+
|
| 35 |
+
- `hellaswag`
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
### Checklist
|
| 39 |
+
|
| 40 |
+
For adding novel benchmarks/datasets to the library:
|
| 41 |
+
* [x] Is the task an existing benchmark in the literature?
|
| 42 |
+
* [x] Have you referenced the original paper that introduced the task?
|
| 43 |
+
* [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
If other tasks on this dataset are already supported:
|
| 47 |
+
* [ ] Is the "Main" variant of this task clearly denoted?
|
| 48 |
+
* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
|
| 49 |
+
* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/hellaswag/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (1.08 kB). View file
|
|
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/hellaswag/hellaswag.yaml
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
tag:
|
| 2 |
+
- multiple_choice
|
| 3 |
+
task: hellaswag
|
| 4 |
+
dataset_path: hellaswag
|
| 5 |
+
dataset_name: null
|
| 6 |
+
output_type: multiple_choice
|
| 7 |
+
training_split: train
|
| 8 |
+
validation_split: validation
|
| 9 |
+
test_split: null
|
| 10 |
+
process_docs: !function utils.process_docs
|
| 11 |
+
doc_to_text: "{{query}}"
|
| 12 |
+
doc_to_target: "{{label}}"
|
| 13 |
+
doc_to_choice: "choices"
|
| 14 |
+
metric_list:
|
| 15 |
+
- metric: acc
|
| 16 |
+
aggregation: mean
|
| 17 |
+
higher_is_better: true
|
| 18 |
+
- metric: acc_norm
|
| 19 |
+
aggregation: mean
|
| 20 |
+
higher_is_better: true
|
| 21 |
+
metadata:
|
| 22 |
+
version: 1.0
|
| 23 |
+
dataset_kwargs:
|
| 24 |
+
trust_remote_code: true
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/hellaswag/utils.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
|
| 3 |
+
import datasets
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def preprocess(text):
|
| 7 |
+
text = text.strip()
|
| 8 |
+
# NOTE: Brackets are artifacts of the WikiHow dataset portion of HellaSwag.
|
| 9 |
+
text = text.replace(" [title]", ". ")
|
| 10 |
+
text = re.sub("\\[.*?\\]", "", text)
|
| 11 |
+
text = text.replace(" ", " ")
|
| 12 |
+
return text
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
|
| 16 |
+
def _process_doc(doc):
|
| 17 |
+
ctx = doc["ctx_a"] + " " + doc["ctx_b"].capitalize()
|
| 18 |
+
out_doc = {
|
| 19 |
+
"query": preprocess(doc["activity_label"] + ": " + ctx),
|
| 20 |
+
"choices": [preprocess(ending) for ending in doc["endings"]],
|
| 21 |
+
"gold": int(doc["label"]),
|
| 22 |
+
}
|
| 23 |
+
return out_doc
|
| 24 |
+
|
| 25 |
+
return dataset.map(_process_doc)
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/ifeval/README.md
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# IFEval
|
| 2 |
+
|
| 3 |
+
### Paper
|
| 4 |
+
|
| 5 |
+
Title: Instruction-Following Evaluation for Large Language Models
|
| 6 |
+
Abstract: https://arxiv.org/abs/2311.07911
|
| 7 |
+
|
| 8 |
+
One core capability of Large Language Models (LLMs) is to follow natural language instructions. However, the evaluation of such abilities is not standardized: Human evaluations are expensive, slow, and not objectively reproducible, while LLM-based auto-evaluation is potentially biased or limited by the ability of the evaluator LLM. To overcome these issues, we introduce Instruction-Following Eval (IFEval) for large language models. IFEval is a straightforward and easy-to-reproduce evaluation benchmark. It focuses on a set of "verifiable instructions" such as "write in more than 400 words" and "mention the keyword of AI at least 3 times". We identified 25 types of those verifiable instructions and constructed around 500 prompts, with each prompt containing one or more verifiable instructions. We show evaluation results of two widely available LLMs on the market. Our code and data can be found at https://github.com/google-research/google-research/tree/master/instruction_following_eval
|
| 9 |
+
|
| 10 |
+
Homepage: https://github.com/google-research/google-research/tree/master/instruction_following_eval
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
### Citation
|
| 14 |
+
|
| 15 |
+
```
|
| 16 |
+
@article{zhou2023instructionfollowing,
|
| 17 |
+
title={Instruction-Following Evaluation for Large Language Models},
|
| 18 |
+
author={Jeffrey Zhou and Tianjian Lu and Swaroop Mishra and Siddhartha Brahma and Sujoy Basu and Yi Luan and Denny Zhou and Le Hou},
|
| 19 |
+
journal={arXiv preprint arXiv:2311.07911},
|
| 20 |
+
year={2023},
|
| 21 |
+
}
|
| 22 |
+
```
|
| 23 |
+
|
| 24 |
+
### Groups and Tasks
|
| 25 |
+
|
| 26 |
+
#### Groups
|
| 27 |
+
|
| 28 |
+
* Not part of a group yet
|
| 29 |
+
|
| 30 |
+
#### Tasks
|
| 31 |
+
|
| 32 |
+
* `ifeval`
|
| 33 |
+
|
| 34 |
+
### Checklist
|
| 35 |
+
|
| 36 |
+
For adding novel benchmarks/datasets to the library:
|
| 37 |
+
* [x] Is the task an existing benchmark in the literature?
|
| 38 |
+
* [x] Have you referenced the original paper that introduced the task?
|
| 39 |
+
* [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
If other tasks on this dataset are already supported:
|
| 43 |
+
* [ ] Is the "Main" variant of this task clearly denoted?
|
| 44 |
+
* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
|
| 45 |
+
* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/ifeval/ifeval.yaml
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: ifeval
|
| 2 |
+
dataset_path: wis-k/instruction-following-eval
|
| 3 |
+
dataset_name: null
|
| 4 |
+
output_type: generate_until
|
| 5 |
+
test_split: train
|
| 6 |
+
num_fewshot: 0
|
| 7 |
+
doc_to_text: prompt
|
| 8 |
+
doc_to_target: 0
|
| 9 |
+
generation_kwargs:
|
| 10 |
+
until: []
|
| 11 |
+
do_sample: false
|
| 12 |
+
temperature: 0.0
|
| 13 |
+
max_gen_toks: 1280
|
| 14 |
+
process_results: !function utils.process_results
|
| 15 |
+
metric_list:
|
| 16 |
+
- metric: prompt_level_strict_acc
|
| 17 |
+
aggregation: mean
|
| 18 |
+
higher_is_better: true
|
| 19 |
+
- metric: inst_level_strict_acc
|
| 20 |
+
aggregation: !function utils.agg_inst_level_acc
|
| 21 |
+
higher_is_better: true
|
| 22 |
+
- metric: prompt_level_loose_acc
|
| 23 |
+
aggregation: mean
|
| 24 |
+
higher_is_better: true
|
| 25 |
+
- metric: inst_level_loose_acc
|
| 26 |
+
aggregation: !function utils.agg_inst_level_acc
|
| 27 |
+
higher_is_better: true
|
| 28 |
+
metadata:
|
| 29 |
+
version: 2.0
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/ifeval/instructions.py
ADDED
|
@@ -0,0 +1,1612 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2023 The Google Research Authors.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
"""Library of instructions."""
|
| 16 |
+
|
| 17 |
+
import collections
|
| 18 |
+
import json
|
| 19 |
+
import logging
|
| 20 |
+
import random
|
| 21 |
+
import re
|
| 22 |
+
import string
|
| 23 |
+
from typing import Dict, Optional, Sequence, Union
|
| 24 |
+
|
| 25 |
+
import langdetect
|
| 26 |
+
|
| 27 |
+
from lm_eval.tasks.ifeval import instructions_util
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
logger = logging.getLogger(__name__)
|
| 31 |
+
|
| 32 |
+
_InstructionArgsDtype = Optional[Dict[str, Union[int, str, Sequence[str]]]]
|
| 33 |
+
|
| 34 |
+
_LANGUAGES = instructions_util.LANGUAGE_CODES
|
| 35 |
+
|
| 36 |
+
# The relational operation for comparison.
|
| 37 |
+
_COMPARISON_RELATION = ("less than", "at least")
|
| 38 |
+
|
| 39 |
+
# The maximum number of sentences.
|
| 40 |
+
_MAX_NUM_SENTENCES = 20
|
| 41 |
+
|
| 42 |
+
# The number of placeholders.
|
| 43 |
+
_NUM_PLACEHOLDERS = 4
|
| 44 |
+
|
| 45 |
+
# The number of bullet lists.
|
| 46 |
+
_NUM_BULLETS = 5
|
| 47 |
+
|
| 48 |
+
# The options of constrained response.
|
| 49 |
+
_CONSTRAINED_RESPONSE_OPTIONS = (
|
| 50 |
+
"My answer is yes.",
|
| 51 |
+
"My answer is no.",
|
| 52 |
+
"My answer is maybe.",
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
# The options of starter keywords.
|
| 56 |
+
_STARTER_OPTIONS = (
|
| 57 |
+
"I would say",
|
| 58 |
+
"My answer is",
|
| 59 |
+
"I believe",
|
| 60 |
+
"In my opinion",
|
| 61 |
+
"I think",
|
| 62 |
+
"I reckon",
|
| 63 |
+
"I feel",
|
| 64 |
+
"From my perspective",
|
| 65 |
+
"As I see it",
|
| 66 |
+
"According to me",
|
| 67 |
+
"As far as I'm concerned",
|
| 68 |
+
"To my understanding",
|
| 69 |
+
"In my view",
|
| 70 |
+
"My take on it is",
|
| 71 |
+
"As per my perception",
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
# The options of ending keywords.
|
| 75 |
+
# TODO(jeffreyzhou) add more ending options
|
| 76 |
+
_ENDING_OPTIONS = ("Any other questions?", "Is there anything else I can help with?")
|
| 77 |
+
|
| 78 |
+
# The number of highlighted sections.
|
| 79 |
+
_NUM_HIGHLIGHTED_SECTIONS = 4
|
| 80 |
+
|
| 81 |
+
# The section spliter.
|
| 82 |
+
_SECTION_SPLITER = ("Section", "SECTION")
|
| 83 |
+
|
| 84 |
+
# The number of sections.
|
| 85 |
+
_NUM_SECTIONS = 5
|
| 86 |
+
|
| 87 |
+
# The number of paragraphs.
|
| 88 |
+
_NUM_PARAGRAPHS = 5
|
| 89 |
+
|
| 90 |
+
# The postscript marker.
|
| 91 |
+
_POSTSCRIPT_MARKER = ("P.S.", "P.P.S")
|
| 92 |
+
|
| 93 |
+
# The number of keywords.
|
| 94 |
+
_NUM_KEYWORDS = 2
|
| 95 |
+
|
| 96 |
+
# The occurrences of a single keyword.
|
| 97 |
+
_KEYWORD_FREQUENCY = 3
|
| 98 |
+
|
| 99 |
+
# The occurrences of a single letter.
|
| 100 |
+
_LETTER_FREQUENCY = 10
|
| 101 |
+
|
| 102 |
+
# The occurrences of words with all capital letters.
|
| 103 |
+
_ALL_CAPITAL_WORD_FREQUENCY = 20
|
| 104 |
+
|
| 105 |
+
# The number of words in the response.
|
| 106 |
+
_NUM_WORDS_LOWER_LIMIT = 100
|
| 107 |
+
_NUM_WORDS_UPPER_LIMIT = 500
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
class Instruction:
|
| 111 |
+
"""An instruction template."""
|
| 112 |
+
|
| 113 |
+
def __init__(self, instruction_id):
|
| 114 |
+
self.id = instruction_id
|
| 115 |
+
|
| 116 |
+
def build_description(self, **kwargs):
|
| 117 |
+
raise NotImplementedError("`build_description` not implemented.")
|
| 118 |
+
|
| 119 |
+
def get_instruction_args(self):
|
| 120 |
+
raise NotImplementedError("`get_instruction_args` not implemented.")
|
| 121 |
+
|
| 122 |
+
def get_instruction_args_keys(self):
|
| 123 |
+
raise NotImplementedError("`get_instruction_args_keys` not implemented.")
|
| 124 |
+
|
| 125 |
+
def check_following(self, value):
|
| 126 |
+
raise NotImplementedError("`check_following` not implemented.")
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
class ResponseLanguageChecker(Instruction):
|
| 130 |
+
"""Check the language of the entire response."""
|
| 131 |
+
|
| 132 |
+
def build_description(self, *, language=None):
|
| 133 |
+
"""Build the instruction description.
|
| 134 |
+
|
| 135 |
+
Args:
|
| 136 |
+
language: A string representing the expected language of the response. The
|
| 137 |
+
language has to comply to the 97 types defined in
|
| 138 |
+
`langid.py` (https://pypi.org/project/langid/1.1.5/), which follows
|
| 139 |
+
ISO 639-1 codes (https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes);
|
| 140 |
+
for example, `en` for English, `zh` for Chinese, `fr` for French.
|
| 141 |
+
|
| 142 |
+
Returns:
|
| 143 |
+
A string representing the instruction description.
|
| 144 |
+
"""
|
| 145 |
+
self._language = language
|
| 146 |
+
if self._language is None:
|
| 147 |
+
self._language = random.choice(list(_LANGUAGES.keys()))
|
| 148 |
+
# TODO(tianjianlu): opens the description generation to more choices.
|
| 149 |
+
self._description_pattern = (
|
| 150 |
+
"Your ENTIRE response should be in {language} language, no other "
|
| 151 |
+
+ "language is allowed."
|
| 152 |
+
)
|
| 153 |
+
return self._description_pattern.format(language=_LANGUAGES[self._language])
|
| 154 |
+
|
| 155 |
+
def get_instruction_args(self):
|
| 156 |
+
"""Returns the keyward args of `build_description`."""
|
| 157 |
+
return {"language": self._language}
|
| 158 |
+
|
| 159 |
+
def get_instruction_args_keys(self):
|
| 160 |
+
"""Returns the args keys of `build_description`."""
|
| 161 |
+
return ["language"]
|
| 162 |
+
|
| 163 |
+
def check_following(self, value):
|
| 164 |
+
"""Check if the language of the entire response follows the instruction.
|
| 165 |
+
|
| 166 |
+
Args:
|
| 167 |
+
value: A string representing the response.
|
| 168 |
+
|
| 169 |
+
Returns:
|
| 170 |
+
True if the language of `value` follows instruction; otherwise False.
|
| 171 |
+
"""
|
| 172 |
+
assert isinstance(value, str)
|
| 173 |
+
|
| 174 |
+
try:
|
| 175 |
+
return langdetect.detect(value) == self._language
|
| 176 |
+
except langdetect.LangDetectException as e:
|
| 177 |
+
# Count as instruction is followed.
|
| 178 |
+
logging.error(
|
| 179 |
+
"Unable to detect language for text %s due to %s", value, e
|
| 180 |
+
) # refex: disable=pytotw.037
|
| 181 |
+
return True
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
class NumberOfSentences(Instruction):
|
| 185 |
+
"""Check the number of sentences."""
|
| 186 |
+
|
| 187 |
+
def build_description(self, *, num_sentences=None, relation=None):
|
| 188 |
+
"""Build the instruction description.
|
| 189 |
+
|
| 190 |
+
Args:
|
| 191 |
+
num_sentences: An integer specifying the number of sentences as a
|
| 192 |
+
threshold.
|
| 193 |
+
relation: A string in (`less than`, `at least`), defining the relational
|
| 194 |
+
operator for comparison.
|
| 195 |
+
Two relational comparisons are supported for now:
|
| 196 |
+
if 'less than', the actual number of sentences < the threshold;
|
| 197 |
+
if 'at least', the actual number of sentences >= the threshold.
|
| 198 |
+
|
| 199 |
+
Returns:
|
| 200 |
+
A string representing the instruction description.
|
| 201 |
+
"""
|
| 202 |
+
# The number of sentences as a threshold for comparison.
|
| 203 |
+
self._num_sentences_threshold = num_sentences
|
| 204 |
+
if self._num_sentences_threshold is None or self._num_sentences_threshold < 0:
|
| 205 |
+
self._num_sentences_threshold = random.randint(1, _MAX_NUM_SENTENCES)
|
| 206 |
+
|
| 207 |
+
if relation is None:
|
| 208 |
+
self._comparison_relation = random.choice(_COMPARISON_RELATION)
|
| 209 |
+
elif relation not in _COMPARISON_RELATION:
|
| 210 |
+
raise ValueError(
|
| 211 |
+
"The supported relation for comparison must be in "
|
| 212 |
+
f"{_COMPARISON_RELATION}, but {relation} is given."
|
| 213 |
+
)
|
| 214 |
+
else:
|
| 215 |
+
self._comparison_relation = relation
|
| 216 |
+
|
| 217 |
+
self._description_pattern = (
|
| 218 |
+
"Your response should contain {relation} {num_sentences} sentences."
|
| 219 |
+
)
|
| 220 |
+
return self._description_pattern.format(
|
| 221 |
+
relation=self._comparison_relation,
|
| 222 |
+
num_sentences=self._num_sentences_threshold,
|
| 223 |
+
)
|
| 224 |
+
|
| 225 |
+
def get_instruction_args(self):
|
| 226 |
+
"""Returns the keyward args of `build_description`."""
|
| 227 |
+
return {
|
| 228 |
+
"num_sentences": self._num_sentences_threshold,
|
| 229 |
+
"relation": self._comparison_relation,
|
| 230 |
+
}
|
| 231 |
+
|
| 232 |
+
def get_instruction_args_keys(self):
|
| 233 |
+
"""Returns the args keys of `build_description`."""
|
| 234 |
+
return ["num_sentences", "relation"]
|
| 235 |
+
|
| 236 |
+
def check_following(self, value):
|
| 237 |
+
"""Check if the number of sentences follows the instruction.
|
| 238 |
+
|
| 239 |
+
Args:
|
| 240 |
+
value: A string representing the response.
|
| 241 |
+
|
| 242 |
+
Returns:
|
| 243 |
+
True if the response follows the instruction.
|
| 244 |
+
|
| 245 |
+
Raise:
|
| 246 |
+
ValueError if the string in `instruction_args` is not in
|
| 247 |
+
[`less_than`, `at_least`].
|
| 248 |
+
"""
|
| 249 |
+
num_sentences = instructions_util.count_sentences(value)
|
| 250 |
+
if self._comparison_relation == _COMPARISON_RELATION[0]:
|
| 251 |
+
return num_sentences < self._num_sentences_threshold
|
| 252 |
+
elif self._comparison_relation == _COMPARISON_RELATION[1]:
|
| 253 |
+
return num_sentences >= self._num_sentences_threshold
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
class PlaceholderChecker(Instruction):
|
| 257 |
+
"""Check the placeholders in template writing."""
|
| 258 |
+
|
| 259 |
+
def build_description(self, *, num_placeholders=None):
|
| 260 |
+
"""Build the instruction description.
|
| 261 |
+
|
| 262 |
+
Args:
|
| 263 |
+
num_placeholders: An integer denoting the minimum number of
|
| 264 |
+
placeholders required in the response.
|
| 265 |
+
|
| 266 |
+
Returns:
|
| 267 |
+
A string representing the instruction description.
|
| 268 |
+
"""
|
| 269 |
+
self._num_placeholders = num_placeholders
|
| 270 |
+
if self._num_placeholders is None or self._num_placeholders < 0:
|
| 271 |
+
self._num_placeholders = random.randint(1, _NUM_PLACEHOLDERS)
|
| 272 |
+
self._description_pattern = (
|
| 273 |
+
"The response must contain at least {num_placeholders} placeholders "
|
| 274 |
+
+ "represented by square brackets, such as [address]."
|
| 275 |
+
)
|
| 276 |
+
return self._description_pattern.format(num_placeholders=self._num_placeholders)
|
| 277 |
+
|
| 278 |
+
def get_instruction_args(self):
|
| 279 |
+
"""Returns the keyward args of `build_description`."""
|
| 280 |
+
return {"num_placeholders": self._num_placeholders}
|
| 281 |
+
|
| 282 |
+
def get_instruction_args_keys(self):
|
| 283 |
+
"""Returns the args keys of `build_description`."""
|
| 284 |
+
return ["num_placeholders"]
|
| 285 |
+
|
| 286 |
+
def check_following(self, value):
|
| 287 |
+
"""Check if the number of placeholders follows the instruction.
|
| 288 |
+
|
| 289 |
+
Args:
|
| 290 |
+
value: A string representing the response.
|
| 291 |
+
|
| 292 |
+
Returns:
|
| 293 |
+
True if the actual number of placeholders in the response is greater than
|
| 294 |
+
or equal to `num_placeholders`; otherwise, False.
|
| 295 |
+
"""
|
| 296 |
+
placeholders = re.findall(r"\[.*?\]", value)
|
| 297 |
+
num_placeholders = len(placeholders)
|
| 298 |
+
return num_placeholders >= self._num_placeholders
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
class BulletListChecker(Instruction):
|
| 302 |
+
"""Checks the bullet list in the prompt."""
|
| 303 |
+
|
| 304 |
+
def build_description(self, *, num_bullets=None):
|
| 305 |
+
"""Build the instruction description.
|
| 306 |
+
|
| 307 |
+
Args:
|
| 308 |
+
num_bullets: An integer specifying the exact number of bullet lists
|
| 309 |
+
that is required to appear in the response.
|
| 310 |
+
|
| 311 |
+
Returns:
|
| 312 |
+
A string representing the instruction description.
|
| 313 |
+
"""
|
| 314 |
+
self._num_bullets = num_bullets
|
| 315 |
+
if self._num_bullets is None or self._num_bullets < 0:
|
| 316 |
+
self._num_bullets = random.randint(1, _NUM_BULLETS)
|
| 317 |
+
self._description_pattern = (
|
| 318 |
+
"Your answer must contain exactly {num_bullets} bullet points. "
|
| 319 |
+
+ "Use the markdown bullet points such as:\n"
|
| 320 |
+
+ "* This is point 1. \n"
|
| 321 |
+
+ "* This is point 2"
|
| 322 |
+
)
|
| 323 |
+
return self._description_pattern.format(num_bullets=self._num_bullets)
|
| 324 |
+
|
| 325 |
+
def get_instruction_args(self):
|
| 326 |
+
"""Returns the keyward args of `build_description`."""
|
| 327 |
+
return {"num_bullets": self._num_bullets}
|
| 328 |
+
|
| 329 |
+
def get_instruction_args_keys(self):
|
| 330 |
+
"""Returns the args keys of `build_description`."""
|
| 331 |
+
return ["num_bullets"]
|
| 332 |
+
|
| 333 |
+
def check_following(self, value):
|
| 334 |
+
r"""Check if the number of bullet lists meets the requirement.
|
| 335 |
+
|
| 336 |
+
Args:
|
| 337 |
+
value: A string representing the response. The response is expected to
|
| 338 |
+
contain some bullet lists that start with `\*`.
|
| 339 |
+
|
| 340 |
+
Returns:
|
| 341 |
+
True if the actual number of bullet lists in the response meets the
|
| 342 |
+
requirement.
|
| 343 |
+
"""
|
| 344 |
+
bullet_lists = re.findall(r"^\s*\*[^\*].*$", value, flags=re.MULTILINE)
|
| 345 |
+
bullet_lists_2 = re.findall(r"^\s*-.*$", value, flags=re.MULTILINE)
|
| 346 |
+
num_bullet_lists = len(bullet_lists) + len(bullet_lists_2)
|
| 347 |
+
return num_bullet_lists == self._num_bullets
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
class ConstrainedResponseChecker(Instruction):
|
| 351 |
+
"""Checks the constrained response."""
|
| 352 |
+
|
| 353 |
+
def build_description(self):
|
| 354 |
+
"""Build the instruction description."""
|
| 355 |
+
# A sequence of string(s) representing the options of the expected response.
|
| 356 |
+
self._constrained_responses = _CONSTRAINED_RESPONSE_OPTIONS
|
| 357 |
+
self._description_pattern = (
|
| 358 |
+
"Answer with one of the following options: {response_options}"
|
| 359 |
+
)
|
| 360 |
+
return self._description_pattern.format(
|
| 361 |
+
response_options=self._constrained_responses
|
| 362 |
+
)
|
| 363 |
+
|
| 364 |
+
def get_instruction_args(self):
|
| 365 |
+
"""Returns the keyward args of `build_description`."""
|
| 366 |
+
return None
|
| 367 |
+
|
| 368 |
+
def get_instruction_args_keys(self):
|
| 369 |
+
"""Returns the args keys of `build_description`."""
|
| 370 |
+
return []
|
| 371 |
+
|
| 372 |
+
def check_following(self, value):
|
| 373 |
+
"""Checks if the response matches the constrained options.
|
| 374 |
+
|
| 375 |
+
Args:
|
| 376 |
+
value: A string representing the response.
|
| 377 |
+
|
| 378 |
+
Returns:
|
| 379 |
+
True if the actual response contains one of the options in the constrained
|
| 380 |
+
responses; otherwise False.
|
| 381 |
+
"""
|
| 382 |
+
value = value.strip()
|
| 383 |
+
for constrained_response in self._constrained_responses:
|
| 384 |
+
if constrained_response in value:
|
| 385 |
+
return True
|
| 386 |
+
return False
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
class ConstrainedStartChecker(Instruction):
|
| 390 |
+
"""Checks the response start."""
|
| 391 |
+
|
| 392 |
+
def build_description(self, *, starter=None):
|
| 393 |
+
"""Build the instruction description.
|
| 394 |
+
|
| 395 |
+
Args:
|
| 396 |
+
starter: A string representing the keyward that the response should start
|
| 397 |
+
with.
|
| 398 |
+
|
| 399 |
+
Returns:
|
| 400 |
+
A string representing the instruction description.
|
| 401 |
+
"""
|
| 402 |
+
self._starter = starter.strip() if isinstance(starter, str) else starter
|
| 403 |
+
if self._starter is None:
|
| 404 |
+
self._starter = random.choice(_STARTER_OPTIONS)
|
| 405 |
+
self._description_pattern = (
|
| 406 |
+
"During the conversation, when it is your turn, "
|
| 407 |
+
+ "please always start with {starter}"
|
| 408 |
+
)
|
| 409 |
+
return self._description_pattern.format(starter=self._starter)
|
| 410 |
+
|
| 411 |
+
def get_instruction_args(self):
|
| 412 |
+
"""Returns the keyward args of `build_description`."""
|
| 413 |
+
return {"starter": self._starter}
|
| 414 |
+
|
| 415 |
+
def get_instruction_args_keys(self):
|
| 416 |
+
"""Returns the args keys of `build_description`."""
|
| 417 |
+
return ["starter"]
|
| 418 |
+
|
| 419 |
+
def check_following(self, value):
|
| 420 |
+
"""Checks if the response starts with the constrained keyword or phrase.
|
| 421 |
+
|
| 422 |
+
Args:
|
| 423 |
+
value: A string representing the response.
|
| 424 |
+
|
| 425 |
+
Returns:
|
| 426 |
+
True if the response starts with the given phrase or keyword that is
|
| 427 |
+
contained in `instruction_args`; otherwise, False.
|
| 428 |
+
"""
|
| 429 |
+
response_pattern = r"^\s*" + self._starter + r".*$"
|
| 430 |
+
response_with_constrained_start = re.search(
|
| 431 |
+
response_pattern, value, flags=re.MULTILINE
|
| 432 |
+
)
|
| 433 |
+
return True if response_with_constrained_start else False
|
| 434 |
+
|
| 435 |
+
|
| 436 |
+
class HighlightSectionChecker(Instruction):
|
| 437 |
+
"""Checks the highlighted section."""
|
| 438 |
+
|
| 439 |
+
def build_description(self, *, num_highlights=None):
|
| 440 |
+
"""Build the instruction description.
|
| 441 |
+
|
| 442 |
+
Args:
|
| 443 |
+
num_highlights: An integer specifying the minimum number of highlighted
|
| 444 |
+
sections.
|
| 445 |
+
|
| 446 |
+
Returns:
|
| 447 |
+
A string representing the instruction description.
|
| 448 |
+
"""
|
| 449 |
+
self._num_highlights = num_highlights
|
| 450 |
+
if self._num_highlights is None or self._num_highlights < 0:
|
| 451 |
+
self._num_highlights = random.randint(1, _NUM_HIGHLIGHTED_SECTIONS)
|
| 452 |
+
|
| 453 |
+
self._description_pattern = (
|
| 454 |
+
"Highlight at least {num_highlights} sections in your answer with "
|
| 455 |
+
+ "markdown, i.e. *highlighted section*."
|
| 456 |
+
)
|
| 457 |
+
|
| 458 |
+
return self._description_pattern.format(num_highlights=self._num_highlights)
|
| 459 |
+
|
| 460 |
+
def get_instruction_args(self):
|
| 461 |
+
"""Returns the keyward args of `build_description`."""
|
| 462 |
+
return {"num_highlights": self._num_highlights}
|
| 463 |
+
|
| 464 |
+
def get_instruction_args_keys(self):
|
| 465 |
+
"""Returns the args keys of `build_description`."""
|
| 466 |
+
return ["num_highlights"]
|
| 467 |
+
|
| 468 |
+
def check_following(self, value):
|
| 469 |
+
"""Checks if the number of highlighted sections meets the requirement.
|
| 470 |
+
|
| 471 |
+
Args:
|
| 472 |
+
value: a string repesenting the response. The response is expected to
|
| 473 |
+
contain highlighted sections in the format of *highlighted*.
|
| 474 |
+
|
| 475 |
+
Returns:
|
| 476 |
+
True if the actual number of highlighted sections in the format of
|
| 477 |
+
*highlighed sections* meets the minimum requirement; otherwise False.
|
| 478 |
+
"""
|
| 479 |
+
num_highlights = 0
|
| 480 |
+
highlights = re.findall(r"\*[^\n\*]*\*", value)
|
| 481 |
+
double_highlights = re.findall(r"\*\*[^\n\*]*\*\*", value)
|
| 482 |
+
for highlight in highlights:
|
| 483 |
+
if highlight.strip("*").strip():
|
| 484 |
+
num_highlights += 1
|
| 485 |
+
for highlight in double_highlights:
|
| 486 |
+
if highlight.removeprefix("**").removesuffix("**").strip():
|
| 487 |
+
num_highlights += 1
|
| 488 |
+
|
| 489 |
+
return num_highlights >= self._num_highlights
|
| 490 |
+
|
| 491 |
+
|
| 492 |
+
class SectionChecker(Instruction):
|
| 493 |
+
"""Checks the sections."""
|
| 494 |
+
|
| 495 |
+
def build_description(self, *, section_spliter=None, num_sections=None):
|
| 496 |
+
"""Build the instruction description.
|
| 497 |
+
|
| 498 |
+
Args:
|
| 499 |
+
section_spliter: A string represents the section spliter keyword that
|
| 500 |
+
marks a new section, i.e., `Section` or `SECTION`.
|
| 501 |
+
num_sections: An integer specifying the number of sections.
|
| 502 |
+
|
| 503 |
+
Returns:
|
| 504 |
+
A string representing the instruction description.
|
| 505 |
+
"""
|
| 506 |
+
self._section_spliter = (
|
| 507 |
+
section_spliter.strip()
|
| 508 |
+
if isinstance(section_spliter, str)
|
| 509 |
+
else section_spliter
|
| 510 |
+
)
|
| 511 |
+
if self._section_spliter is None:
|
| 512 |
+
self._section_spliter = random.choice(_SECTION_SPLITER)
|
| 513 |
+
|
| 514 |
+
self._num_sections = num_sections
|
| 515 |
+
if self._num_sections is None or self._num_sections < 0:
|
| 516 |
+
self._num_sections = random.randint(1, _NUM_SECTIONS)
|
| 517 |
+
|
| 518 |
+
self._description_pattern = (
|
| 519 |
+
"Your response must have {num_sections} sections. Mark the beginning "
|
| 520 |
+
+ "of each section with {section_spliter} X, such as:\n"
|
| 521 |
+
+ "{section_spliter} 1\n"
|
| 522 |
+
+ "[content of section 1]\n"
|
| 523 |
+
+ "{section_spliter} 2\n"
|
| 524 |
+
+ "[content of section 2]"
|
| 525 |
+
)
|
| 526 |
+
|
| 527 |
+
return self._description_pattern.format(
|
| 528 |
+
num_sections=self._num_sections, section_spliter=self._section_spliter
|
| 529 |
+
)
|
| 530 |
+
|
| 531 |
+
def get_instruction_args(self):
|
| 532 |
+
"""Returns the keyward args of `build_description`."""
|
| 533 |
+
return {
|
| 534 |
+
"section_spliter": self._section_spliter,
|
| 535 |
+
"num_sections": self._num_sections,
|
| 536 |
+
}
|
| 537 |
+
|
| 538 |
+
def get_instruction_args_keys(self):
|
| 539 |
+
"""Returns the args keys of `build_description`."""
|
| 540 |
+
return ["section_spliter", "num_sections"]
|
| 541 |
+
|
| 542 |
+
def check_following(self, value):
|
| 543 |
+
"""Checks the response contains multiple sections.
|
| 544 |
+
|
| 545 |
+
Args:
|
| 546 |
+
value: A string representing the response. The response is expected
|
| 547 |
+
to contain multiple sections (number of sections is greater than 1).
|
| 548 |
+
A new section starts with `Section 1`, where the number denotes the
|
| 549 |
+
section index.
|
| 550 |
+
|
| 551 |
+
Returns:
|
| 552 |
+
True if the number of sections in the response is greater than or equal to
|
| 553 |
+
the minimum number of sections; otherwise, False.
|
| 554 |
+
"""
|
| 555 |
+
section_splitter_patten = r"\s?" + self._section_spliter + r"\s?\d+\s?"
|
| 556 |
+
sections = re.split(section_splitter_patten, value)
|
| 557 |
+
num_sections = len(sections) - 1
|
| 558 |
+
return num_sections >= self._num_sections
|
| 559 |
+
|
| 560 |
+
|
| 561 |
+
class ParagraphChecker(Instruction):
|
| 562 |
+
"""Checks the paragraphs."""
|
| 563 |
+
|
| 564 |
+
def build_description(self, *, num_paragraphs=None):
|
| 565 |
+
"""Build the instruction description.
|
| 566 |
+
|
| 567 |
+
Args:
|
| 568 |
+
num_paragraphs: An integer specifying the number of paragraphs.
|
| 569 |
+
|
| 570 |
+
Returns:
|
| 571 |
+
A string representing the instruction description.
|
| 572 |
+
"""
|
| 573 |
+
self._num_paragraphs = num_paragraphs
|
| 574 |
+
if self._num_paragraphs is None or self._num_paragraphs < 0:
|
| 575 |
+
self._num_paragraphs = random.randint(1, _NUM_PARAGRAPHS)
|
| 576 |
+
|
| 577 |
+
self._description_pattern = (
|
| 578 |
+
"There should be {num_paragraphs} paragraphs. "
|
| 579 |
+
+ "Paragraphs are separated with the markdown divider: ***"
|
| 580 |
+
)
|
| 581 |
+
|
| 582 |
+
return self._description_pattern.format(num_paragraphs=self._num_paragraphs)
|
| 583 |
+
|
| 584 |
+
def get_instruction_args(self):
|
| 585 |
+
"""Returns the keyward args of `build_description`."""
|
| 586 |
+
return {"num_paragraphs": self._num_paragraphs}
|
| 587 |
+
|
| 588 |
+
def get_instruction_args_keys(self):
|
| 589 |
+
"""Returns the args keys of `build_description`."""
|
| 590 |
+
return ["num_paragraphs"]
|
| 591 |
+
|
| 592 |
+
def check_following(self, value):
|
| 593 |
+
"""Checks the response contains required number of paragraphs.
|
| 594 |
+
|
| 595 |
+
Args:
|
| 596 |
+
value: A string representing the response. The response may contain
|
| 597 |
+
paragraphs that are separated by the markdown divider: `***`.
|
| 598 |
+
|
| 599 |
+
Returns:
|
| 600 |
+
True if the actual number of paragraphs is the same as required;
|
| 601 |
+
otherwise, False.
|
| 602 |
+
"""
|
| 603 |
+
paragraphs = re.split(r"\s?\*\*\*\s?", value)
|
| 604 |
+
num_paragraphs = len(paragraphs)
|
| 605 |
+
|
| 606 |
+
for index, paragraph in enumerate(paragraphs):
|
| 607 |
+
if not paragraph.strip():
|
| 608 |
+
if index == 0 or index == len(paragraphs) - 1:
|
| 609 |
+
num_paragraphs -= 1
|
| 610 |
+
else:
|
| 611 |
+
return False
|
| 612 |
+
|
| 613 |
+
return num_paragraphs == self._num_paragraphs
|
| 614 |
+
|
| 615 |
+
|
| 616 |
+
class PostscriptChecker(Instruction):
|
| 617 |
+
"""Checks the postscript."""
|
| 618 |
+
|
| 619 |
+
def build_description(self, *, postscript_marker=None):
|
| 620 |
+
"""Build the instruction description.
|
| 621 |
+
|
| 622 |
+
Args:
|
| 623 |
+
postscript_marker: A string containing the keyword that marks the start
|
| 624 |
+
of the postscript section.
|
| 625 |
+
|
| 626 |
+
Returns:
|
| 627 |
+
A string representing the instruction description.
|
| 628 |
+
"""
|
| 629 |
+
self._postscript_marker = (
|
| 630 |
+
postscript_marker.strip()
|
| 631 |
+
if isinstance(postscript_marker, str)
|
| 632 |
+
else postscript_marker
|
| 633 |
+
)
|
| 634 |
+
if self._postscript_marker is None:
|
| 635 |
+
self._postscript_marker = random.choice(_POSTSCRIPT_MARKER)
|
| 636 |
+
|
| 637 |
+
self._description_pattern = (
|
| 638 |
+
"At the end of your response, please explicitly add a postscript "
|
| 639 |
+
+ "starting with {postscript}"
|
| 640 |
+
)
|
| 641 |
+
|
| 642 |
+
return self._description_pattern.format(postscript=self._postscript_marker)
|
| 643 |
+
|
| 644 |
+
def get_instruction_args(self):
|
| 645 |
+
"""Returns the keyward args of `build_description`."""
|
| 646 |
+
return {"postscript_marker": self._postscript_marker}
|
| 647 |
+
|
| 648 |
+
def get_instruction_args_keys(self):
|
| 649 |
+
"""Returns the args keys of `build_description`."""
|
| 650 |
+
return ["postscript_marker"]
|
| 651 |
+
|
| 652 |
+
def check_following(self, value):
|
| 653 |
+
"""Checks if the response follows the postscript format.
|
| 654 |
+
|
| 655 |
+
Args:
|
| 656 |
+
value: a string representing the response. The response is expected to
|
| 657 |
+
contain a postscript section.
|
| 658 |
+
|
| 659 |
+
Returns:
|
| 660 |
+
True if the response contains a postscript section starting with
|
| 661 |
+
the keyword containing in the `instruction_args`; otherwise False.
|
| 662 |
+
"""
|
| 663 |
+
value = value.lower()
|
| 664 |
+
if self._postscript_marker == "P.P.S":
|
| 665 |
+
postscript_pattern = r"\s*p\.\s?p\.\s?s.*$"
|
| 666 |
+
elif self._postscript_marker == "P.S.":
|
| 667 |
+
postscript_pattern = r"\s*p\.\s?s\..*$"
|
| 668 |
+
else:
|
| 669 |
+
postscript_pattern = r"\s*" + self._postscript_marker.lower() + r".*$"
|
| 670 |
+
postscript = re.findall(postscript_pattern, value, flags=re.MULTILINE)
|
| 671 |
+
return True if postscript else False
|
| 672 |
+
|
| 673 |
+
|
| 674 |
+
class RephraseChecker(Instruction):
|
| 675 |
+
"""Checks the repharse."""
|
| 676 |
+
|
| 677 |
+
def build_description(self, *, original_message):
|
| 678 |
+
"""Build the instruction description.
|
| 679 |
+
|
| 680 |
+
Args:
|
| 681 |
+
original_message: A string representing the original message. The
|
| 682 |
+
rephrased response should only change its words/sentences in between
|
| 683 |
+
its two asterisks, for example, *change me*. Both original and rephrased
|
| 684 |
+
messages should contain the changes in the form of *change me*.
|
| 685 |
+
|
| 686 |
+
Returns:
|
| 687 |
+
A string representing the instruction description.
|
| 688 |
+
"""
|
| 689 |
+
if not self.is_change(original_message):
|
| 690 |
+
raise ValueError(
|
| 691 |
+
f"Message {original_message} does not contain changes "
|
| 692 |
+
"in the form of *change me*."
|
| 693 |
+
)
|
| 694 |
+
|
| 695 |
+
self._reference_without_change = original_message
|
| 696 |
+
self._description = (
|
| 697 |
+
"Rephrasing: Your rephrased response should only"
|
| 698 |
+
+ "change the words/sentences in between two asterisks"
|
| 699 |
+
+ "such as *change me*."
|
| 700 |
+
)
|
| 701 |
+
return self._description
|
| 702 |
+
|
| 703 |
+
def get_instruction_args(self):
|
| 704 |
+
"""Returns the keyward args of `build_description`."""
|
| 705 |
+
return {"original_message": self._reference_without_change}
|
| 706 |
+
|
| 707 |
+
def get_instruction_args_keys(self):
|
| 708 |
+
"""Returns the args keys of `build_description`."""
|
| 709 |
+
return ["original_message"]
|
| 710 |
+
|
| 711 |
+
def check_following(self, value):
|
| 712 |
+
r"""Checks if the rephrasing follows the instruction.
|
| 713 |
+
|
| 714 |
+
Args:
|
| 715 |
+
value: A string representing the response, which is expected to rephras
|
| 716 |
+
the string of `instruction_args`.
|
| 717 |
+
|
| 718 |
+
Returns:
|
| 719 |
+
True if `value` and `instruction_args` only differ by the words/sentences
|
| 720 |
+
in between two asterisks such as *change me*; otherwise, False.
|
| 721 |
+
"""
|
| 722 |
+
|
| 723 |
+
if not self.is_change(value):
|
| 724 |
+
raise ValueError(
|
| 725 |
+
f"value {value} does not contain " "changes in the form of *change me*."
|
| 726 |
+
)
|
| 727 |
+
|
| 728 |
+
response_without_changes = self.strip_changes(value)
|
| 729 |
+
reference_without_changes = self.strip_changes(self._reference_without_change)
|
| 730 |
+
|
| 731 |
+
return response_without_changes == reference_without_changes
|
| 732 |
+
|
| 733 |
+
def is_change(self, response):
|
| 734 |
+
"""Check if there is change in the response in the form of *change me*."""
|
| 735 |
+
return re.search(r"\*.*\*", response)
|
| 736 |
+
|
| 737 |
+
def strip_changes(self, response):
|
| 738 |
+
"""Strips off the changes."""
|
| 739 |
+
return re.sub(r"\*.*\*", "", response)
|
| 740 |
+
|
| 741 |
+
|
| 742 |
+
class KeywordChecker(Instruction):
|
| 743 |
+
"""Check the exisitence of certain keywords."""
|
| 744 |
+
|
| 745 |
+
def build_description(self, *, keywords=None):
|
| 746 |
+
"""Build the instruction description.
|
| 747 |
+
|
| 748 |
+
Args:
|
| 749 |
+
keywords: A sequence of strings representing the keywords that are
|
| 750 |
+
expected in the response.
|
| 751 |
+
|
| 752 |
+
Returns:
|
| 753 |
+
A string representing the instruction description.
|
| 754 |
+
"""
|
| 755 |
+
|
| 756 |
+
if not keywords:
|
| 757 |
+
self._keywords = instructions_util.generate_keywords(
|
| 758 |
+
num_keywords=_NUM_KEYWORDS
|
| 759 |
+
)
|
| 760 |
+
else:
|
| 761 |
+
self._keywords = keywords
|
| 762 |
+
self._keywords = sorted(self._keywords)
|
| 763 |
+
|
| 764 |
+
self._description_pattern = "Include keywords {keywords} in the response."
|
| 765 |
+
|
| 766 |
+
return self._description_pattern.format(keywords=self._keywords)
|
| 767 |
+
|
| 768 |
+
def get_instruction_args(self):
|
| 769 |
+
"""Returns the keyward args of `build_description`."""
|
| 770 |
+
return {"keywords": self._keywords}
|
| 771 |
+
|
| 772 |
+
def get_instruction_args_keys(self):
|
| 773 |
+
"""Returns the args keys of `build_description`."""
|
| 774 |
+
return ["keywords"]
|
| 775 |
+
|
| 776 |
+
def check_following(self, value):
|
| 777 |
+
"""Check if the response contain the expected keywords."""
|
| 778 |
+
for keyword in self._keywords:
|
| 779 |
+
if not re.search(keyword, value, flags=re.IGNORECASE):
|
| 780 |
+
return False
|
| 781 |
+
return True
|
| 782 |
+
|
| 783 |
+
|
| 784 |
+
class KeywordFrequencyChecker(Instruction):
|
| 785 |
+
"""Check the keyword frequency."""
|
| 786 |
+
|
| 787 |
+
def build_description(self, *, keyword=None, frequency=None, relation=None):
|
| 788 |
+
"""Build the instruction description.
|
| 789 |
+
|
| 790 |
+
Args:
|
| 791 |
+
keyword: A string representing a keyword that is expected in the response.
|
| 792 |
+
frequency: An integer specifying the number of times `keyword` is expected
|
| 793 |
+
to appear in the response.
|
| 794 |
+
relation: A string in (`less than`, `at least`), defining the relational
|
| 795 |
+
operator for comparison.
|
| 796 |
+
Two relational comparisons are supported for now:
|
| 797 |
+
if 'less than', the actual number of occurrences < frequency;
|
| 798 |
+
if 'at least', the actual number of occurrences >= frequency.
|
| 799 |
+
|
| 800 |
+
Returns:
|
| 801 |
+
A string representing the instruction description.
|
| 802 |
+
"""
|
| 803 |
+
if not keyword:
|
| 804 |
+
self._keyword = instructions_util.generate_keywords(num_keywords=1)[0]
|
| 805 |
+
else:
|
| 806 |
+
self._keyword = keyword.strip()
|
| 807 |
+
|
| 808 |
+
self._frequency = frequency
|
| 809 |
+
if self._frequency is None or self._frequency < 0:
|
| 810 |
+
self._frequency = random.randint(1, _KEYWORD_FREQUENCY)
|
| 811 |
+
|
| 812 |
+
if relation is None:
|
| 813 |
+
self._comparison_relation = random.choice(_COMPARISON_RELATION)
|
| 814 |
+
elif relation not in _COMPARISON_RELATION:
|
| 815 |
+
raise ValueError(
|
| 816 |
+
"The supported relation for comparison must be in "
|
| 817 |
+
f"{_COMPARISON_RELATION}, but {relation} is given."
|
| 818 |
+
)
|
| 819 |
+
else:
|
| 820 |
+
self._comparison_relation = relation
|
| 821 |
+
|
| 822 |
+
self._description_pattern = (
|
| 823 |
+
"In your response, the word {keyword} should appear {relation} "
|
| 824 |
+
+ "{frequency} times."
|
| 825 |
+
)
|
| 826 |
+
|
| 827 |
+
return self._description_pattern.format(
|
| 828 |
+
keyword=self._keyword,
|
| 829 |
+
relation=self._comparison_relation,
|
| 830 |
+
frequency=self._frequency,
|
| 831 |
+
)
|
| 832 |
+
|
| 833 |
+
def get_instruction_args(self):
|
| 834 |
+
"""Returns the keyward args of `build_description`."""
|
| 835 |
+
return {
|
| 836 |
+
"keyword": self._keyword,
|
| 837 |
+
"frequency": self._frequency,
|
| 838 |
+
"relation": self._comparison_relation,
|
| 839 |
+
}
|
| 840 |
+
|
| 841 |
+
def get_instruction_args_keys(self):
|
| 842 |
+
"""Returns the args keys of `build_description`."""
|
| 843 |
+
return ["keyword", "frequency", "relation"]
|
| 844 |
+
|
| 845 |
+
def check_following(self, value):
|
| 846 |
+
"""Checks if the response contain the keyword with required frequency."""
|
| 847 |
+
actual_occurrences = len(re.findall(self._keyword, value, flags=re.IGNORECASE))
|
| 848 |
+
|
| 849 |
+
if self._comparison_relation == _COMPARISON_RELATION[0]:
|
| 850 |
+
return actual_occurrences < self._frequency
|
| 851 |
+
elif self._comparison_relation == _COMPARISON_RELATION[1]:
|
| 852 |
+
return actual_occurrences >= self._frequency
|
| 853 |
+
|
| 854 |
+
|
| 855 |
+
class NumberOfWords(Instruction):
|
| 856 |
+
"""Checks the number of words."""
|
| 857 |
+
|
| 858 |
+
def build_description(self, *, num_words=None, relation=None):
|
| 859 |
+
"""Build the instruction description.
|
| 860 |
+
|
| 861 |
+
Args:
|
| 862 |
+
num_words: An integer specifying the number of words contained in the
|
| 863 |
+
response.
|
| 864 |
+
relation: A string in (`less than`, `at least`), defining the relational
|
| 865 |
+
operator for comparison.
|
| 866 |
+
Two relational comparisons are supported for now:
|
| 867 |
+
if 'less than', the actual number of words < num_words;
|
| 868 |
+
if 'at least', the actual number of words >= num_words.
|
| 869 |
+
|
| 870 |
+
Returns:
|
| 871 |
+
A string representing the instruction description.
|
| 872 |
+
"""
|
| 873 |
+
|
| 874 |
+
self._num_words = num_words
|
| 875 |
+
if self._num_words is None or self._num_words < 0:
|
| 876 |
+
self._num_words = random.randint(
|
| 877 |
+
_NUM_WORDS_LOWER_LIMIT, _NUM_WORDS_UPPER_LIMIT
|
| 878 |
+
)
|
| 879 |
+
|
| 880 |
+
if relation is None:
|
| 881 |
+
self._comparison_relation = random.choice(_COMPARISON_RELATION)
|
| 882 |
+
elif relation not in _COMPARISON_RELATION:
|
| 883 |
+
raise ValueError(
|
| 884 |
+
"The supported relation for comparison must be in "
|
| 885 |
+
f"{_COMPARISON_RELATION}, but {relation} is given."
|
| 886 |
+
)
|
| 887 |
+
else:
|
| 888 |
+
self._comparison_relation = relation
|
| 889 |
+
|
| 890 |
+
self._description_pattern = "Answer with {relation} {num_words} words."
|
| 891 |
+
|
| 892 |
+
return self._description_pattern.format(
|
| 893 |
+
relation=self._comparison_relation, num_words=self._num_words
|
| 894 |
+
)
|
| 895 |
+
|
| 896 |
+
def get_instruction_args(self):
|
| 897 |
+
"""Returns the keyward args of `build_description`."""
|
| 898 |
+
return {"num_words": self._num_words, "relation": self._comparison_relation}
|
| 899 |
+
|
| 900 |
+
def get_instruction_args_keys(self):
|
| 901 |
+
"""Returns the args keys of `build_description`."""
|
| 902 |
+
return ["num_words", "relation"]
|
| 903 |
+
|
| 904 |
+
def check_following(self, value):
|
| 905 |
+
"""Checks if the response contains the expected number of words."""
|
| 906 |
+
num_words = instructions_util.count_words(value)
|
| 907 |
+
|
| 908 |
+
if self._comparison_relation == _COMPARISON_RELATION[0]:
|
| 909 |
+
return num_words < self._num_words
|
| 910 |
+
elif self._comparison_relation == _COMPARISON_RELATION[1]:
|
| 911 |
+
return num_words >= self._num_words
|
| 912 |
+
|
| 913 |
+
|
| 914 |
+
class JsonFormat(Instruction):
|
| 915 |
+
"""Check the Json format."""
|
| 916 |
+
|
| 917 |
+
def build_description(self):
|
| 918 |
+
self._description_pattern = (
|
| 919 |
+
"Entire output should be wrapped in JSON format. You can use markdown"
|
| 920 |
+
" ticks such as ```."
|
| 921 |
+
)
|
| 922 |
+
return self._description_pattern
|
| 923 |
+
|
| 924 |
+
def get_instruction_args(self):
|
| 925 |
+
"""Returns the keyward args of `build_description`."""
|
| 926 |
+
return None
|
| 927 |
+
|
| 928 |
+
def get_instruction_args_keys(self):
|
| 929 |
+
"""Returns the args keys of `build_description`."""
|
| 930 |
+
return []
|
| 931 |
+
|
| 932 |
+
def check_following(self, value):
|
| 933 |
+
value = (
|
| 934 |
+
value.strip()
|
| 935 |
+
.removeprefix("```json")
|
| 936 |
+
.removeprefix("```Json")
|
| 937 |
+
.removeprefix("```JSON")
|
| 938 |
+
.removeprefix("```")
|
| 939 |
+
.removesuffix("```")
|
| 940 |
+
.strip()
|
| 941 |
+
)
|
| 942 |
+
try:
|
| 943 |
+
json.loads(value)
|
| 944 |
+
except ValueError:
|
| 945 |
+
return False
|
| 946 |
+
return True
|
| 947 |
+
|
| 948 |
+
|
| 949 |
+
class ParagraphFirstWordCheck(Instruction):
|
| 950 |
+
"""Check the paragraph and the first word of the nth paragraph."""
|
| 951 |
+
|
| 952 |
+
def build_description(
|
| 953 |
+
self, num_paragraphs=None, nth_paragraph=None, first_word=None
|
| 954 |
+
):
|
| 955 |
+
r"""Build the instruction description.
|
| 956 |
+
|
| 957 |
+
Args:
|
| 958 |
+
num_paragraphs: An integer indicating the number of paragraphs expected
|
| 959 |
+
in the response. A paragraph is a subset of the string that is
|
| 960 |
+
expected to be separated by '\n\n'.
|
| 961 |
+
nth_paragraph: An integer indicating the paragraph number that we look at.
|
| 962 |
+
Note that n starts from 1.
|
| 963 |
+
first_word: A string that represent the first word of the bth paragraph.
|
| 964 |
+
|
| 965 |
+
Returns:
|
| 966 |
+
A string representing the instruction description.
|
| 967 |
+
"""
|
| 968 |
+
self._num_paragraphs = num_paragraphs
|
| 969 |
+
if self._num_paragraphs is None or self._num_paragraphs < 0:
|
| 970 |
+
self._num_paragraphs = random.randint(1, _NUM_PARAGRAPHS)
|
| 971 |
+
|
| 972 |
+
self._nth_paragraph = nth_paragraph
|
| 973 |
+
if (
|
| 974 |
+
self._nth_paragraph is None
|
| 975 |
+
or self._nth_paragraph <= 0
|
| 976 |
+
or self._nth_paragraph > self._num_paragraphs
|
| 977 |
+
):
|
| 978 |
+
self._nth_paragraph = random.randint(1, self._num_paragraphs + 1)
|
| 979 |
+
|
| 980 |
+
self._first_word = first_word
|
| 981 |
+
if self._first_word is None:
|
| 982 |
+
self._first_word = instructions_util.generate_keywords(num_keywords=1)[0]
|
| 983 |
+
self._first_word = self._first_word.lower()
|
| 984 |
+
|
| 985 |
+
self._description_pattern = (
|
| 986 |
+
"There should be {num_paragraphs} paragraphs. "
|
| 987 |
+
+ "Paragraphs and only paragraphs are separated with each other by two "
|
| 988 |
+
+ "new lines as if it was '\\n\\n' in python. "
|
| 989 |
+
+ "Paragraph {nth_paragraph} must start with word {first_word}."
|
| 990 |
+
)
|
| 991 |
+
|
| 992 |
+
return self._description_pattern.format(
|
| 993 |
+
num_paragraphs=self._num_paragraphs,
|
| 994 |
+
nth_paragraph=self._nth_paragraph,
|
| 995 |
+
first_word=self._first_word,
|
| 996 |
+
)
|
| 997 |
+
|
| 998 |
+
def get_instruction_args(self):
|
| 999 |
+
"""Returns the keyward args of `build_description`."""
|
| 1000 |
+
return {
|
| 1001 |
+
"num_paragraphs": self._num_paragraphs,
|
| 1002 |
+
"nth_paragraph": self._nth_paragraph,
|
| 1003 |
+
"first_word": self._first_word,
|
| 1004 |
+
}
|
| 1005 |
+
|
| 1006 |
+
def get_instruction_args_keys(self):
|
| 1007 |
+
"""Returns the args keys of `build_description`."""
|
| 1008 |
+
return ["num_paragraphs", "nth_paragraph", "first_word"]
|
| 1009 |
+
|
| 1010 |
+
def check_following(self, value):
|
| 1011 |
+
"""Checks for required number of paragraphs and correct first word.
|
| 1012 |
+
|
| 1013 |
+
Args:
|
| 1014 |
+
value: a string representing the response. The response may contain
|
| 1015 |
+
paragraphs that are separated by two new lines and the first word of
|
| 1016 |
+
the nth paragraph will have to match a specified word.
|
| 1017 |
+
|
| 1018 |
+
Returns:
|
| 1019 |
+
True if the number of paragraphs is the same as required and the first
|
| 1020 |
+
word of the specified paragraph is the same as required. Otherwise, false.
|
| 1021 |
+
"""
|
| 1022 |
+
|
| 1023 |
+
paragraphs = re.split(r"\n\n", value)
|
| 1024 |
+
num_paragraphs = len(paragraphs)
|
| 1025 |
+
|
| 1026 |
+
for paragraph in paragraphs:
|
| 1027 |
+
if not paragraph.strip():
|
| 1028 |
+
num_paragraphs -= 1
|
| 1029 |
+
|
| 1030 |
+
# check that index doesn't go out of bounds
|
| 1031 |
+
if self._nth_paragraph <= num_paragraphs:
|
| 1032 |
+
paragraph = paragraphs[self._nth_paragraph - 1].strip()
|
| 1033 |
+
if not paragraph:
|
| 1034 |
+
return False
|
| 1035 |
+
else:
|
| 1036 |
+
return False
|
| 1037 |
+
|
| 1038 |
+
first_word = ""
|
| 1039 |
+
punctuation = {".", ",", "?", "!", "'", '"'}
|
| 1040 |
+
|
| 1041 |
+
# get first word and remove punctuation
|
| 1042 |
+
word = paragraph.split()[0].strip()
|
| 1043 |
+
# TODO(jeffrey): make more complex?
|
| 1044 |
+
word = word.lstrip("'")
|
| 1045 |
+
word = word.lstrip('"')
|
| 1046 |
+
|
| 1047 |
+
for letter in word:
|
| 1048 |
+
if letter in punctuation:
|
| 1049 |
+
break
|
| 1050 |
+
first_word += letter.lower()
|
| 1051 |
+
|
| 1052 |
+
return num_paragraphs == self._num_paragraphs and first_word == self._first_word
|
| 1053 |
+
|
| 1054 |
+
|
| 1055 |
+
# TODO(jeffrey) add relation - at least/at most?
|
| 1056 |
+
class KeySentenceChecker(Instruction):
|
| 1057 |
+
"""Check the existence of certain key sentences."""
|
| 1058 |
+
|
| 1059 |
+
def build_description(self, key_sentences=None, num_sentences=None):
|
| 1060 |
+
"""Build the instruction description.
|
| 1061 |
+
|
| 1062 |
+
Args:
|
| 1063 |
+
key_sentences: A sequences of strings representing the key sentences that
|
| 1064 |
+
are expected in the response.
|
| 1065 |
+
num_sentences: The number of key sentences that are expected to be seen in
|
| 1066 |
+
the response.
|
| 1067 |
+
|
| 1068 |
+
Returns:
|
| 1069 |
+
A string representing the instruction description.
|
| 1070 |
+
"""
|
| 1071 |
+
|
| 1072 |
+
if not key_sentences:
|
| 1073 |
+
# TODO(jeffrey) make a generate sentences function? wonderwords package
|
| 1074 |
+
self._key_sentences = set(["For now, this is fine."])
|
| 1075 |
+
else:
|
| 1076 |
+
self._key_sentences = key_sentences
|
| 1077 |
+
|
| 1078 |
+
if not num_sentences:
|
| 1079 |
+
self._num_sentences = random.randint(1, len(self._key_sentences))
|
| 1080 |
+
else:
|
| 1081 |
+
self._num_sentences = num_sentences
|
| 1082 |
+
|
| 1083 |
+
self._description_pattern = (
|
| 1084 |
+
"Include {num_sentences} of the following sentences {key_sentences}"
|
| 1085 |
+
)
|
| 1086 |
+
|
| 1087 |
+
return self._description_pattern.format(
|
| 1088 |
+
num_sentences=self._num_sentences, key_sentences=self._key_sentences
|
| 1089 |
+
)
|
| 1090 |
+
|
| 1091 |
+
def get_instruction_args(self):
|
| 1092 |
+
"""Returns the keyward args of `build_description`."""
|
| 1093 |
+
return {
|
| 1094 |
+
"num_sentences": self._num_sentences,
|
| 1095 |
+
"key_sentences": list(self._key_sentences),
|
| 1096 |
+
}
|
| 1097 |
+
|
| 1098 |
+
def get_instruction_args_keys(self):
|
| 1099 |
+
"""Returns the args keys of `build_description`."""
|
| 1100 |
+
return ["num_sentences", "key_sentences"]
|
| 1101 |
+
|
| 1102 |
+
def check_following(self, value):
|
| 1103 |
+
"""Checks if the response contains the expected key sentences."""
|
| 1104 |
+
count = 0
|
| 1105 |
+
sentences = instructions_util.split_into_sentences(value)
|
| 1106 |
+
for sentence in self._key_sentences:
|
| 1107 |
+
if sentence in sentences:
|
| 1108 |
+
count += 1
|
| 1109 |
+
|
| 1110 |
+
return count == self._num_sentences
|
| 1111 |
+
|
| 1112 |
+
|
| 1113 |
+
class ForbiddenWords(Instruction):
|
| 1114 |
+
"""Checks that specified words are not used in response."""
|
| 1115 |
+
|
| 1116 |
+
def build_description(self, forbidden_words=None):
|
| 1117 |
+
"""Build the instruction description.
|
| 1118 |
+
|
| 1119 |
+
Args:
|
| 1120 |
+
forbidden_words: A sequences of strings respresenting words that are not
|
| 1121 |
+
allowed in the response.
|
| 1122 |
+
|
| 1123 |
+
Returns:
|
| 1124 |
+
A string representing the instruction description.
|
| 1125 |
+
"""
|
| 1126 |
+
|
| 1127 |
+
if not forbidden_words:
|
| 1128 |
+
self._forbidden_words = instructions_util.generate_keywords(
|
| 1129 |
+
num_keywords=_NUM_KEYWORDS
|
| 1130 |
+
)
|
| 1131 |
+
else:
|
| 1132 |
+
self._forbidden_words = list(set(forbidden_words))
|
| 1133 |
+
self._forbidden_words = sorted(self._forbidden_words)
|
| 1134 |
+
self._description_pattern = (
|
| 1135 |
+
"Do not include keywords {forbidden_words} in the response."
|
| 1136 |
+
)
|
| 1137 |
+
|
| 1138 |
+
return self._description_pattern.format(forbidden_words=self._forbidden_words)
|
| 1139 |
+
|
| 1140 |
+
def get_instruction_args(self):
|
| 1141 |
+
"""Returns the keyward args of `build_description`."""
|
| 1142 |
+
return {"forbidden_words": self._forbidden_words}
|
| 1143 |
+
|
| 1144 |
+
def get_instruction_args_keys(self):
|
| 1145 |
+
"""Returns the args keys of `build_description`."""
|
| 1146 |
+
return ["forbidden_words"]
|
| 1147 |
+
|
| 1148 |
+
def check_following(self, value):
|
| 1149 |
+
"""Check if the response does not contain the expected keywords."""
|
| 1150 |
+
for word in self._forbidden_words:
|
| 1151 |
+
if re.search(r"\b" + word + r"\b", value, flags=re.IGNORECASE):
|
| 1152 |
+
return False
|
| 1153 |
+
return True
|
| 1154 |
+
|
| 1155 |
+
|
| 1156 |
+
class RephraseParagraph(Instruction):
|
| 1157 |
+
"""Checks that the paragraph is rephrased."""
|
| 1158 |
+
|
| 1159 |
+
def build_description(self, *, original_paragraph, low, high):
|
| 1160 |
+
"""Builds the instruction description.
|
| 1161 |
+
|
| 1162 |
+
Args:
|
| 1163 |
+
original_paragraph: A string presenting the original paragraph. The
|
| 1164 |
+
rephrases response should have betweeb low-high words in common.
|
| 1165 |
+
low: An integer presenting the lower bound of similar words.
|
| 1166 |
+
high: An integer representing the upper bound of similar words.
|
| 1167 |
+
|
| 1168 |
+
Returns:
|
| 1169 |
+
A string representing the instruction description.
|
| 1170 |
+
"""
|
| 1171 |
+
# TODO(jeffrey) make more encompassing
|
| 1172 |
+
self._original_paragraph = original_paragraph
|
| 1173 |
+
self._low = low
|
| 1174 |
+
self._high = high
|
| 1175 |
+
|
| 1176 |
+
self._description = (
|
| 1177 |
+
"Rephrase the following paragraph: "
|
| 1178 |
+
+ "{original_paragraph}\nYour response should have "
|
| 1179 |
+
+ "between {low} and {high} of the same words. "
|
| 1180 |
+
+ "Words are the same if and only if all of the "
|
| 1181 |
+
+ "letters, ignoring cases, are the same. For "
|
| 1182 |
+
+ "example, 'run' is the same as 'Run' but different "
|
| 1183 |
+
+ "to 'ran'."
|
| 1184 |
+
)
|
| 1185 |
+
|
| 1186 |
+
return self._description.format(
|
| 1187 |
+
original_paragraph=original_paragraph, low=self._low, high=self._high
|
| 1188 |
+
)
|
| 1189 |
+
|
| 1190 |
+
def get_instruction_args(self):
|
| 1191 |
+
"""Returns the keyward args of `build_description`."""
|
| 1192 |
+
return {
|
| 1193 |
+
"original_paragraph": self._original_paragraph,
|
| 1194 |
+
"low": self._low,
|
| 1195 |
+
"high": self._high,
|
| 1196 |
+
}
|
| 1197 |
+
|
| 1198 |
+
def get_instruction_args_keys(self):
|
| 1199 |
+
"""Returns the args keys of `build_description`."""
|
| 1200 |
+
return ["original_paragraph", "low", "high"]
|
| 1201 |
+
|
| 1202 |
+
def check_following(self, value):
|
| 1203 |
+
val_words = re.findall(r"\w+", value.lower())
|
| 1204 |
+
original_words = re.findall(r"\w+", self._original_paragraph.lower())
|
| 1205 |
+
similar_words = 0
|
| 1206 |
+
|
| 1207 |
+
dict_val = collections.Counter(val_words)
|
| 1208 |
+
dict_original = collections.Counter(original_words)
|
| 1209 |
+
|
| 1210 |
+
for word in dict_original:
|
| 1211 |
+
similar_words += min(dict_original[word], dict_val[word])
|
| 1212 |
+
|
| 1213 |
+
return similar_words >= self._low and similar_words <= self._high
|
| 1214 |
+
|
| 1215 |
+
|
| 1216 |
+
class TwoResponsesChecker(Instruction):
|
| 1217 |
+
"""Check that two responses were given."""
|
| 1218 |
+
|
| 1219 |
+
def build_description(self):
|
| 1220 |
+
"""Build the instruction description."""
|
| 1221 |
+
self._description_pattern = (
|
| 1222 |
+
"Give two different responses. Responses and only responses should"
|
| 1223 |
+
" be separated by 6 asterisk symbols: ******."
|
| 1224 |
+
)
|
| 1225 |
+
return self._description_pattern
|
| 1226 |
+
|
| 1227 |
+
def get_instruction_args(self):
|
| 1228 |
+
"""Returns the keyward args of `build_description`."""
|
| 1229 |
+
return None
|
| 1230 |
+
|
| 1231 |
+
def get_instruction_args_keys(self):
|
| 1232 |
+
"""Returns the args keys of `build_description`."""
|
| 1233 |
+
return []
|
| 1234 |
+
|
| 1235 |
+
def check_following(self, value):
|
| 1236 |
+
"""Checks if the response has two different answers.
|
| 1237 |
+
|
| 1238 |
+
Args:
|
| 1239 |
+
value: A string representing the response.
|
| 1240 |
+
|
| 1241 |
+
Returns:
|
| 1242 |
+
True if two responses are detected and false otherwise.
|
| 1243 |
+
"""
|
| 1244 |
+
valid_responses = list()
|
| 1245 |
+
responses = value.split("******")
|
| 1246 |
+
for index, response in enumerate(responses):
|
| 1247 |
+
if not response.strip():
|
| 1248 |
+
if index != 0 and index != len(responses) - 1:
|
| 1249 |
+
return False
|
| 1250 |
+
else:
|
| 1251 |
+
valid_responses.append(response)
|
| 1252 |
+
return (
|
| 1253 |
+
len(valid_responses) == 2
|
| 1254 |
+
and valid_responses[0].strip() != valid_responses[1].strip()
|
| 1255 |
+
)
|
| 1256 |
+
|
| 1257 |
+
|
| 1258 |
+
class RepeatPromptThenAnswer(Instruction):
|
| 1259 |
+
"""Checks that Prompt is first repeated then answered."""
|
| 1260 |
+
|
| 1261 |
+
def build_description(self, *, prompt_to_repeat=None):
|
| 1262 |
+
"""Build the instruction description.
|
| 1263 |
+
|
| 1264 |
+
Args:
|
| 1265 |
+
prompt_to_repeat: The prompt that is meant to be repeated.
|
| 1266 |
+
|
| 1267 |
+
Returns:
|
| 1268 |
+
A string representing the instruction description.
|
| 1269 |
+
"""
|
| 1270 |
+
if not prompt_to_repeat:
|
| 1271 |
+
raise ValueError("prompt_to_repeat must be set.")
|
| 1272 |
+
else:
|
| 1273 |
+
self._prompt_to_repeat = prompt_to_repeat
|
| 1274 |
+
self._description_pattern = (
|
| 1275 |
+
"First repeat the request word for word without change,"
|
| 1276 |
+
" then give your answer (1. do not say any words or characters"
|
| 1277 |
+
" before repeating the request; 2. the request you need to repeat"
|
| 1278 |
+
" does not include this sentence)"
|
| 1279 |
+
)
|
| 1280 |
+
return self._description_pattern
|
| 1281 |
+
|
| 1282 |
+
def get_instruction_args(self):
|
| 1283 |
+
return {"prompt_to_repeat": self._prompt_to_repeat}
|
| 1284 |
+
|
| 1285 |
+
def get_instruction_args_keys(self):
|
| 1286 |
+
"""Returns the args keys of `build_description`."""
|
| 1287 |
+
return ["prompt_to_repeat"]
|
| 1288 |
+
|
| 1289 |
+
def check_following(self, value):
|
| 1290 |
+
if value.strip().lower().startswith(self._prompt_to_repeat.strip().lower()):
|
| 1291 |
+
return True
|
| 1292 |
+
return False
|
| 1293 |
+
|
| 1294 |
+
|
| 1295 |
+
class EndChecker(Instruction):
|
| 1296 |
+
"""Checks that the prompt ends with a given phrase."""
|
| 1297 |
+
|
| 1298 |
+
def build_description(self, *, end_phrase=None):
|
| 1299 |
+
"""Build the instruction description.
|
| 1300 |
+
|
| 1301 |
+
Args:
|
| 1302 |
+
end_phrase: A string representing the phrase the response should end with.
|
| 1303 |
+
|
| 1304 |
+
Returns:
|
| 1305 |
+
A string representing the instruction description.
|
| 1306 |
+
"""
|
| 1307 |
+
self._end_phrase = (
|
| 1308 |
+
end_phrase.strip() if isinstance(end_phrase, str) else end_phrase
|
| 1309 |
+
)
|
| 1310 |
+
if self._end_phrase is None:
|
| 1311 |
+
self._end_phrase = random.choice(_ENDING_OPTIONS)
|
| 1312 |
+
self._description_pattern = (
|
| 1313 |
+
"Finish your response with this exact phrase {ender}. "
|
| 1314 |
+
"No other words should follow this phrase."
|
| 1315 |
+
)
|
| 1316 |
+
return self._description_pattern.format(ender=self._end_phrase)
|
| 1317 |
+
|
| 1318 |
+
def get_instruction_args(self):
|
| 1319 |
+
return {"end_phrase": self._end_phrase}
|
| 1320 |
+
|
| 1321 |
+
def get_instruction_args_keys(self):
|
| 1322 |
+
"""Returns the args keys of `build_description`."""
|
| 1323 |
+
return ["end_phrase"]
|
| 1324 |
+
|
| 1325 |
+
def check_following(self, value):
|
| 1326 |
+
"""Checks if the response ends with the expected phrase."""
|
| 1327 |
+
value = value.strip().strip('"').lower()
|
| 1328 |
+
self._end_phrase = self._end_phrase.strip().lower()
|
| 1329 |
+
return value.endswith(self._end_phrase)
|
| 1330 |
+
|
| 1331 |
+
|
| 1332 |
+
class TitleChecker(Instruction):
|
| 1333 |
+
"""Checks the response for a title."""
|
| 1334 |
+
|
| 1335 |
+
def build_description(self):
|
| 1336 |
+
"""Build the instruction description."""
|
| 1337 |
+
self._description_pattern = (
|
| 1338 |
+
"Your answer must contain a title, wrapped in double angular brackets,"
|
| 1339 |
+
" such as <<poem of joy>>."
|
| 1340 |
+
)
|
| 1341 |
+
return self._description_pattern
|
| 1342 |
+
|
| 1343 |
+
def get_instruction_args(self):
|
| 1344 |
+
return None
|
| 1345 |
+
|
| 1346 |
+
def get_instruction_args_keys(self):
|
| 1347 |
+
"""Returns the args keys of `build_description`."""
|
| 1348 |
+
return []
|
| 1349 |
+
|
| 1350 |
+
def check_following(self, value):
|
| 1351 |
+
"""Checks if the response contains a title."""
|
| 1352 |
+
pattern = r"<<[^\n]+>>"
|
| 1353 |
+
re_pattern = re.compile(pattern)
|
| 1354 |
+
titles = re.findall(re_pattern, value)
|
| 1355 |
+
|
| 1356 |
+
for title in titles:
|
| 1357 |
+
if title.lstrip("<").rstrip(">").strip():
|
| 1358 |
+
return True
|
| 1359 |
+
return False
|
| 1360 |
+
|
| 1361 |
+
|
| 1362 |
+
class LetterFrequencyChecker(Instruction):
|
| 1363 |
+
"""Checks letter frequency."""
|
| 1364 |
+
|
| 1365 |
+
def build_description(self, *, letter=None, let_frequency=None, let_relation=None):
|
| 1366 |
+
"""Build the instruction description.
|
| 1367 |
+
|
| 1368 |
+
Args:
|
| 1369 |
+
letter: A string representing a letter that is expected in the response.
|
| 1370 |
+
let_frequency: An integer specifying the number of times `keyword` is
|
| 1371 |
+
expected to appear in the response.
|
| 1372 |
+
let_relation: A string in (`less than`, `at least`), defining the
|
| 1373 |
+
relational operator for comparison. Two relational comparisons are
|
| 1374 |
+
supported for now; if 'less than', the actual number of
|
| 1375 |
+
occurrences < frequency; if 'at least', the actual number of
|
| 1376 |
+
occurrences >= frequency.
|
| 1377 |
+
|
| 1378 |
+
Returns:
|
| 1379 |
+
A string representing the instruction description.
|
| 1380 |
+
"""
|
| 1381 |
+
if (
|
| 1382 |
+
not letter
|
| 1383 |
+
or len(letter) > 1
|
| 1384 |
+
or ord(letter.lower()) < 97
|
| 1385 |
+
or ord(letter.lower()) > 122
|
| 1386 |
+
):
|
| 1387 |
+
self._letter = random.choice(list(string.ascii_letters))
|
| 1388 |
+
else:
|
| 1389 |
+
self._letter = letter.strip()
|
| 1390 |
+
self._letter = self._letter.lower()
|
| 1391 |
+
|
| 1392 |
+
self._frequency = let_frequency
|
| 1393 |
+
if self._frequency is None or self._frequency < 0:
|
| 1394 |
+
self._frequency = random.randint(1, _LETTER_FREQUENCY)
|
| 1395 |
+
|
| 1396 |
+
if let_relation is None:
|
| 1397 |
+
self._comparison_relation = random.choice(_COMPARISON_RELATION)
|
| 1398 |
+
elif let_relation not in _COMPARISON_RELATION:
|
| 1399 |
+
raise ValueError(
|
| 1400 |
+
"The supported relation for comparison must be in "
|
| 1401 |
+
f"{_COMPARISON_RELATION}, but {let_relation} is given."
|
| 1402 |
+
)
|
| 1403 |
+
else:
|
| 1404 |
+
self._comparison_relation = let_relation
|
| 1405 |
+
|
| 1406 |
+
self._description_pattern = (
|
| 1407 |
+
"In your response, the letter {letter} should appear {let_relation}"
|
| 1408 |
+
" {let_frequency} times."
|
| 1409 |
+
)
|
| 1410 |
+
|
| 1411 |
+
return self._description_pattern.format(
|
| 1412 |
+
letter=self._letter,
|
| 1413 |
+
let_frequency=self._frequency,
|
| 1414 |
+
let_relation=self._comparison_relation,
|
| 1415 |
+
)
|
| 1416 |
+
|
| 1417 |
+
def get_instruction_args(self):
|
| 1418 |
+
"""Returns the keyword args of build description."""
|
| 1419 |
+
return {
|
| 1420 |
+
"letter": self._letter,
|
| 1421 |
+
"let_frequency": self._frequency,
|
| 1422 |
+
"let_relation": self._comparison_relation,
|
| 1423 |
+
}
|
| 1424 |
+
|
| 1425 |
+
def get_instruction_args_keys(self):
|
| 1426 |
+
"""Returns the args keys of `build_description`."""
|
| 1427 |
+
return ["letter", "let_frequency", "let_relation"]
|
| 1428 |
+
|
| 1429 |
+
def check_following(self, value):
|
| 1430 |
+
"""Checks that the response contains the letter at the right frequency."""
|
| 1431 |
+
value = value.lower()
|
| 1432 |
+
letters = collections.Counter(value)
|
| 1433 |
+
|
| 1434 |
+
if self._comparison_relation == _COMPARISON_RELATION[0]:
|
| 1435 |
+
return letters[self._letter] < self._frequency
|
| 1436 |
+
else:
|
| 1437 |
+
return letters[self._letter] >= self._frequency
|
| 1438 |
+
|
| 1439 |
+
|
| 1440 |
+
class CapitalLettersEnglishChecker(Instruction):
|
| 1441 |
+
"""Checks that the response is in english and is in all capital letters."""
|
| 1442 |
+
|
| 1443 |
+
def build_description(self):
|
| 1444 |
+
"""Build the instruction description."""
|
| 1445 |
+
self._description_pattern = (
|
| 1446 |
+
"Your entire response should be in English, and in all capital letters."
|
| 1447 |
+
)
|
| 1448 |
+
return self._description_pattern
|
| 1449 |
+
|
| 1450 |
+
def get_instruction_args(self):
|
| 1451 |
+
return None
|
| 1452 |
+
|
| 1453 |
+
def get_instruction_args_keys(self):
|
| 1454 |
+
"""Returns the args keys of `build_description`."""
|
| 1455 |
+
return []
|
| 1456 |
+
|
| 1457 |
+
def check_following(self, value):
|
| 1458 |
+
"""Checks that the response is in English and in all capital letters."""
|
| 1459 |
+
assert isinstance(value, str)
|
| 1460 |
+
|
| 1461 |
+
try:
|
| 1462 |
+
return value.isupper() and langdetect.detect(value) == "en"
|
| 1463 |
+
except langdetect.LangDetectException as e:
|
| 1464 |
+
# Count as instruction is followed.
|
| 1465 |
+
logging.error(
|
| 1466 |
+
"Unable to detect language for text %s due to %s", value, e
|
| 1467 |
+
) # refex: disable=pytotw.037
|
| 1468 |
+
return True
|
| 1469 |
+
|
| 1470 |
+
|
| 1471 |
+
class LowercaseLettersEnglishChecker(Instruction):
|
| 1472 |
+
"""Checks that the response is in english and is in all lowercase letters."""
|
| 1473 |
+
|
| 1474 |
+
def build_description(self):
|
| 1475 |
+
"""Build the instruction description."""
|
| 1476 |
+
self._description_pattern = (
|
| 1477 |
+
"Your entire response should be in English, and in all lowercase"
|
| 1478 |
+
" letters. No capital letters are allowed."
|
| 1479 |
+
)
|
| 1480 |
+
return self._description_pattern
|
| 1481 |
+
|
| 1482 |
+
def get_instruction_args(self):
|
| 1483 |
+
return None
|
| 1484 |
+
|
| 1485 |
+
def get_instruction_args_keys(self):
|
| 1486 |
+
"""Returns the args keys of `build_description`."""
|
| 1487 |
+
return []
|
| 1488 |
+
|
| 1489 |
+
def check_following(self, value):
|
| 1490 |
+
"""Checks that the response is in English and in all lowercase letters."""
|
| 1491 |
+
assert isinstance(value, str)
|
| 1492 |
+
|
| 1493 |
+
try:
|
| 1494 |
+
return value.islower() and langdetect.detect(value) == "en"
|
| 1495 |
+
except langdetect.LangDetectException as e:
|
| 1496 |
+
# Count as instruction is followed.
|
| 1497 |
+
logging.error(
|
| 1498 |
+
"Unable to detect language for text %s due to %s", value, e
|
| 1499 |
+
) # refex: disable=pytotw.037
|
| 1500 |
+
return True
|
| 1501 |
+
|
| 1502 |
+
|
| 1503 |
+
class CommaChecker(Instruction):
|
| 1504 |
+
"""Checks the response for no commas."""
|
| 1505 |
+
|
| 1506 |
+
def build_description(self):
|
| 1507 |
+
"""Build the instruction description."""
|
| 1508 |
+
self._description_pattern = (
|
| 1509 |
+
"In your entire response, refrain from the use of any commas."
|
| 1510 |
+
)
|
| 1511 |
+
return self._description_pattern
|
| 1512 |
+
|
| 1513 |
+
def get_instruction_args(self):
|
| 1514 |
+
return None
|
| 1515 |
+
|
| 1516 |
+
def get_instruction_args_keys(self):
|
| 1517 |
+
"""Returns the args keys of `build_description`."""
|
| 1518 |
+
return []
|
| 1519 |
+
|
| 1520 |
+
def check_following(self, value):
|
| 1521 |
+
"""Checks that the response does not contain commas."""
|
| 1522 |
+
return not re.search(r"\,", value)
|
| 1523 |
+
|
| 1524 |
+
|
| 1525 |
+
class CapitalWordFrequencyChecker(Instruction):
|
| 1526 |
+
"""Checks frequency of words with all capital letters."""
|
| 1527 |
+
|
| 1528 |
+
def build_description(
|
| 1529 |
+
self,
|
| 1530 |
+
capital_frequency=None,
|
| 1531 |
+
capital_relation=None,
|
| 1532 |
+
):
|
| 1533 |
+
"""Build the instruction description.
|
| 1534 |
+
|
| 1535 |
+
Args:
|
| 1536 |
+
capital_frequency: An integer that represents the number of words that
|
| 1537 |
+
should be in all capital letters.
|
| 1538 |
+
capital_relation: A string that is 'at least' or 'at most' that refers to
|
| 1539 |
+
the frequency.
|
| 1540 |
+
|
| 1541 |
+
Returns:
|
| 1542 |
+
A string representing the instruction description.
|
| 1543 |
+
"""
|
| 1544 |
+
self._frequency = capital_frequency
|
| 1545 |
+
if self._frequency is None:
|
| 1546 |
+
self._frequency = random.randint(1, _ALL_CAPITAL_WORD_FREQUENCY)
|
| 1547 |
+
|
| 1548 |
+
self._comparison_relation = capital_relation
|
| 1549 |
+
if capital_relation is None:
|
| 1550 |
+
self._comparison_relation = random.choice(_COMPARISON_RELATION)
|
| 1551 |
+
elif capital_relation not in _COMPARISON_RELATION:
|
| 1552 |
+
raise ValueError(
|
| 1553 |
+
"The supported relation for comparison must be in "
|
| 1554 |
+
f"{_COMPARISON_RELATION}, but {capital_relation} is given."
|
| 1555 |
+
)
|
| 1556 |
+
|
| 1557 |
+
self._description_pattern = (
|
| 1558 |
+
"In your response, words with all capital letters should appear"
|
| 1559 |
+
" {relation} {frequency} times."
|
| 1560 |
+
)
|
| 1561 |
+
|
| 1562 |
+
return self._description_pattern.format(
|
| 1563 |
+
frequency=self._frequency, relation=self._comparison_relation
|
| 1564 |
+
)
|
| 1565 |
+
|
| 1566 |
+
def get_instruction_args(self):
|
| 1567 |
+
"""Returns the keyword args of build description."""
|
| 1568 |
+
return {
|
| 1569 |
+
"capital_frequency": self._frequency,
|
| 1570 |
+
"capital_relation": self._comparison_relation,
|
| 1571 |
+
}
|
| 1572 |
+
|
| 1573 |
+
def get_instruction_args_keys(self):
|
| 1574 |
+
"""Returns the args keys of `build_description`."""
|
| 1575 |
+
return ["capital_frequency", "capital_relation"]
|
| 1576 |
+
|
| 1577 |
+
def check_following(self, value):
|
| 1578 |
+
"""Checks the frequency of words with all capital letters."""
|
| 1579 |
+
# Hyphenated words will count as one word
|
| 1580 |
+
words = instructions_util.nltk.word_tokenize(value)
|
| 1581 |
+
capital_words = [word for word in words if word.isupper()]
|
| 1582 |
+
|
| 1583 |
+
capital_words = len(capital_words)
|
| 1584 |
+
|
| 1585 |
+
if self._comparison_relation == _COMPARISON_RELATION[0]:
|
| 1586 |
+
return capital_words < self._frequency
|
| 1587 |
+
else:
|
| 1588 |
+
return capital_words >= self._frequency
|
| 1589 |
+
|
| 1590 |
+
|
| 1591 |
+
class QuotationChecker(Instruction):
|
| 1592 |
+
"""Checks response is wrapped with double quotation marks."""
|
| 1593 |
+
|
| 1594 |
+
def build_description(self):
|
| 1595 |
+
"""Build the instruction description."""
|
| 1596 |
+
self._description_pattern = (
|
| 1597 |
+
"Wrap your entire response with double quotation marks."
|
| 1598 |
+
)
|
| 1599 |
+
return self._description_pattern
|
| 1600 |
+
|
| 1601 |
+
def get_instruction_args(self):
|
| 1602 |
+
"""Returns the keyword args of build description."""
|
| 1603 |
+
return None
|
| 1604 |
+
|
| 1605 |
+
def get_instruction_args_keys(self):
|
| 1606 |
+
"""Returns the args keys of `build_description`."""
|
| 1607 |
+
return []
|
| 1608 |
+
|
| 1609 |
+
def check_following(self, value):
|
| 1610 |
+
"""Checks if the response is wrapped with double quotation marks."""
|
| 1611 |
+
value = value.strip()
|
| 1612 |
+
return len(value) > 1 and value[0] == '"' and value[-1] == '"'
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/ifeval/instructions_registry.py
ADDED
|
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2023 The Google Research Authors.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
"""Registry of all instructions."""
|
| 16 |
+
|
| 17 |
+
from lm_eval.tasks.ifeval import instructions
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
_KEYWORD = "keywords:"
|
| 21 |
+
|
| 22 |
+
_LANGUAGE = "language:"
|
| 23 |
+
|
| 24 |
+
_LENGTH = "length_constraints:"
|
| 25 |
+
|
| 26 |
+
_CONTENT = "detectable_content:"
|
| 27 |
+
|
| 28 |
+
_FORMAT = "detectable_format:"
|
| 29 |
+
|
| 30 |
+
_MULTITURN = "multi-turn:"
|
| 31 |
+
|
| 32 |
+
_COMBINATION = "combination:"
|
| 33 |
+
|
| 34 |
+
_STARTEND = "startend:"
|
| 35 |
+
|
| 36 |
+
_CHANGE_CASES = "change_case:"
|
| 37 |
+
|
| 38 |
+
_PUNCTUATION = "punctuation:"
|
| 39 |
+
|
| 40 |
+
INSTRUCTION_DICT = {
|
| 41 |
+
_KEYWORD + "existence": instructions.KeywordChecker,
|
| 42 |
+
_KEYWORD + "frequency": instructions.KeywordFrequencyChecker,
|
| 43 |
+
# TODO(jeffreyzhou): make a proper set of sentences to choose from
|
| 44 |
+
# _KEYWORD + "key_sentences": instructions.KeySentenceChecker,
|
| 45 |
+
_KEYWORD + "forbidden_words": instructions.ForbiddenWords,
|
| 46 |
+
_KEYWORD + "letter_frequency": instructions.LetterFrequencyChecker,
|
| 47 |
+
_LANGUAGE + "response_language": instructions.ResponseLanguageChecker,
|
| 48 |
+
_LENGTH + "number_sentences": instructions.NumberOfSentences,
|
| 49 |
+
_LENGTH + "number_paragraphs": instructions.ParagraphChecker,
|
| 50 |
+
_LENGTH + "number_words": instructions.NumberOfWords,
|
| 51 |
+
_LENGTH + "nth_paragraph_first_word": instructions.ParagraphFirstWordCheck,
|
| 52 |
+
_CONTENT + "number_placeholders": instructions.PlaceholderChecker,
|
| 53 |
+
_CONTENT + "postscript": instructions.PostscriptChecker,
|
| 54 |
+
_FORMAT + "number_bullet_lists": instructions.BulletListChecker,
|
| 55 |
+
# TODO(jeffreyzhou): Pre-create paragraph or use prompt to replace
|
| 56 |
+
# _CONTENT + "rephrase_paragraph": instructions.RephraseParagraph,
|
| 57 |
+
_FORMAT + "constrained_response": instructions.ConstrainedResponseChecker,
|
| 58 |
+
_FORMAT + "number_highlighted_sections": (instructions.HighlightSectionChecker),
|
| 59 |
+
_FORMAT + "multiple_sections": instructions.SectionChecker,
|
| 60 |
+
# TODO(tianjianlu): Re-enable rephrasing with preprocessing the message.
|
| 61 |
+
# _FORMAT + "rephrase": instructions.RephraseChecker,
|
| 62 |
+
_FORMAT + "json_format": instructions.JsonFormat,
|
| 63 |
+
_FORMAT + "title": instructions.TitleChecker,
|
| 64 |
+
# TODO(tianjianlu): Re-enable with specific prompts.
|
| 65 |
+
# _MULTITURN + "constrained_start": instructions.ConstrainedStartChecker,
|
| 66 |
+
_COMBINATION + "two_responses": instructions.TwoResponsesChecker,
|
| 67 |
+
_COMBINATION + "repeat_prompt": instructions.RepeatPromptThenAnswer,
|
| 68 |
+
_STARTEND + "end_checker": instructions.EndChecker,
|
| 69 |
+
_CHANGE_CASES + "capital_word_frequency": instructions.CapitalWordFrequencyChecker,
|
| 70 |
+
_CHANGE_CASES + "english_capital": instructions.CapitalLettersEnglishChecker,
|
| 71 |
+
_CHANGE_CASES + "english_lowercase": instructions.LowercaseLettersEnglishChecker,
|
| 72 |
+
_PUNCTUATION + "no_comma": instructions.CommaChecker,
|
| 73 |
+
_STARTEND + "quotation": instructions.QuotationChecker,
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
INSTRUCTION_CONFLICTS = {
|
| 77 |
+
_KEYWORD + "existence": {_KEYWORD + "existence"},
|
| 78 |
+
_KEYWORD + "frequency": {_KEYWORD + "frequency"},
|
| 79 |
+
# TODO(jeffreyzhou): make a proper set of sentences to choose from
|
| 80 |
+
# _KEYWORD + "key_sentences": instructions.KeySentenceChecker,
|
| 81 |
+
_KEYWORD + "forbidden_words": {_KEYWORD + "forbidden_words"},
|
| 82 |
+
_KEYWORD + "letter_frequency": {_KEYWORD + "letter_frequency"},
|
| 83 |
+
_LANGUAGE + "response_language": {
|
| 84 |
+
_LANGUAGE + "response_language",
|
| 85 |
+
_FORMAT + "multiple_sections",
|
| 86 |
+
_KEYWORD + "existence",
|
| 87 |
+
_KEYWORD + "frequency",
|
| 88 |
+
_KEYWORD + "forbidden_words",
|
| 89 |
+
_STARTEND + "end_checker",
|
| 90 |
+
_CHANGE_CASES + "english_capital",
|
| 91 |
+
_CHANGE_CASES + "english_lowercase",
|
| 92 |
+
},
|
| 93 |
+
_LENGTH + "number_sentences": {_LENGTH + "number_sentences"},
|
| 94 |
+
_LENGTH + "number_paragraphs": {
|
| 95 |
+
_LENGTH + "number_paragraphs",
|
| 96 |
+
_LENGTH + "nth_paragraph_first_word",
|
| 97 |
+
_LENGTH + "number_sentences",
|
| 98 |
+
_LENGTH + "nth_paragraph_first_word",
|
| 99 |
+
},
|
| 100 |
+
_LENGTH + "number_words": {_LENGTH + "number_words"},
|
| 101 |
+
_LENGTH + "nth_paragraph_first_word": {
|
| 102 |
+
_LENGTH + "nth_paragraph_first_word",
|
| 103 |
+
_LENGTH + "number_paragraphs",
|
| 104 |
+
},
|
| 105 |
+
_CONTENT + "number_placeholders": {_CONTENT + "number_placeholders"},
|
| 106 |
+
_CONTENT + "postscript": {_CONTENT + "postscript"},
|
| 107 |
+
_FORMAT + "number_bullet_lists": {_FORMAT + "number_bullet_lists"},
|
| 108 |
+
# TODO(jeffreyzhou): Pre-create paragraph or use prompt to replace
|
| 109 |
+
# _CONTENT + "rephrase_paragraph": instructions.RephraseParagraph,
|
| 110 |
+
_FORMAT + "constrained_response": set(INSTRUCTION_DICT.keys()),
|
| 111 |
+
_FORMAT + "number_highlighted_sections": {_FORMAT + "number_highlighted_sections"},
|
| 112 |
+
_FORMAT + "multiple_sections": {
|
| 113 |
+
_FORMAT + "multiple_sections",
|
| 114 |
+
_LANGUAGE + "response_language",
|
| 115 |
+
_FORMAT + "number_highlighted_sections",
|
| 116 |
+
},
|
| 117 |
+
# TODO(tianjianlu): Re-enable rephrasing with preprocessing the message.
|
| 118 |
+
# _FORMAT + "rephrase": instructions.RephraseChecker,
|
| 119 |
+
_FORMAT + "json_format": set(INSTRUCTION_DICT.keys()).difference(
|
| 120 |
+
{_KEYWORD + "forbidden_words", _KEYWORD + "existence"}
|
| 121 |
+
),
|
| 122 |
+
_FORMAT + "title": {_FORMAT + "title"},
|
| 123 |
+
# TODO(tianjianlu): Re-enable with specific prompts.
|
| 124 |
+
# _MULTITURN + "constrained_start": instructions.ConstrainedStartChecker,
|
| 125 |
+
_COMBINATION + "two_responses": set(INSTRUCTION_DICT.keys()).difference(
|
| 126 |
+
{
|
| 127 |
+
_KEYWORD + "forbidden_words",
|
| 128 |
+
_KEYWORD + "existence",
|
| 129 |
+
_LANGUAGE + "response_language",
|
| 130 |
+
_FORMAT + "title",
|
| 131 |
+
_PUNCTUATION + "no_comma",
|
| 132 |
+
}
|
| 133 |
+
),
|
| 134 |
+
_COMBINATION + "repeat_prompt": set(INSTRUCTION_DICT.keys()).difference(
|
| 135 |
+
{_KEYWORD + "existence", _FORMAT + "title", _PUNCTUATION + "no_comma"}
|
| 136 |
+
),
|
| 137 |
+
_STARTEND + "end_checker": {_STARTEND + "end_checker"},
|
| 138 |
+
_CHANGE_CASES + "capital_word_frequency": {
|
| 139 |
+
_CHANGE_CASES + "capital_word_frequency",
|
| 140 |
+
_CHANGE_CASES + "english_lowercase",
|
| 141 |
+
_CHANGE_CASES + "english_capital",
|
| 142 |
+
},
|
| 143 |
+
_CHANGE_CASES + "english_capital": {_CHANGE_CASES + "english_capital"},
|
| 144 |
+
_CHANGE_CASES + "english_lowercase": {
|
| 145 |
+
_CHANGE_CASES + "english_lowercase",
|
| 146 |
+
_CHANGE_CASES + "english_capital",
|
| 147 |
+
},
|
| 148 |
+
_PUNCTUATION + "no_comma": {_PUNCTUATION + "no_comma"},
|
| 149 |
+
_STARTEND + "quotation": {_STARTEND + "quotation", _FORMAT + "title"},
|
| 150 |
+
}
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def conflict_make(conflicts):
|
| 154 |
+
"""Makes sure if A conflicts with B, B will conflict with A.
|
| 155 |
+
|
| 156 |
+
Args:
|
| 157 |
+
conflicts: Dictionary of potential conflicts where key is instruction id
|
| 158 |
+
and value is set of instruction ids that it conflicts with.
|
| 159 |
+
|
| 160 |
+
Returns:
|
| 161 |
+
Revised version of the dictionary. All instructions conflict with
|
| 162 |
+
themselves. If A conflicts with B, B will conflict with A.
|
| 163 |
+
"""
|
| 164 |
+
for key in conflicts:
|
| 165 |
+
for k in conflicts[key]:
|
| 166 |
+
conflicts[k].add(key)
|
| 167 |
+
conflicts[key].add(key)
|
| 168 |
+
return conflicts
|
scripts/yans/lm-evaluation-harness/lm_eval/tasks/ifeval/instructions_util.py
ADDED
|
@@ -0,0 +1,1682 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2023 The Google Research Authors.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
"""Utility library of instructions."""
|
| 16 |
+
|
| 17 |
+
import functools
|
| 18 |
+
import random
|
| 19 |
+
import re
|
| 20 |
+
|
| 21 |
+
import immutabledict
|
| 22 |
+
import nltk
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def download_nltk_resources():
|
| 26 |
+
"""Download 'punkt' if not already installed"""
|
| 27 |
+
try:
|
| 28 |
+
nltk.data.find("tokenizers/punkt")
|
| 29 |
+
except LookupError:
|
| 30 |
+
nltk.download("punkt")
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
download_nltk_resources()
|
| 34 |
+
|
| 35 |
+
WORD_LIST = [
|
| 36 |
+
"western",
|
| 37 |
+
"sentence",
|
| 38 |
+
"signal",
|
| 39 |
+
"dump",
|
| 40 |
+
"spot",
|
| 41 |
+
"opposite",
|
| 42 |
+
"bottom",
|
| 43 |
+
"potato",
|
| 44 |
+
"administration",
|
| 45 |
+
"working",
|
| 46 |
+
"welcome",
|
| 47 |
+
"morning",
|
| 48 |
+
"good",
|
| 49 |
+
"agency",
|
| 50 |
+
"primary",
|
| 51 |
+
"wish",
|
| 52 |
+
"responsibility",
|
| 53 |
+
"press",
|
| 54 |
+
"problem",
|
| 55 |
+
"president",
|
| 56 |
+
"steal",
|
| 57 |
+
"brush",
|
| 58 |
+
"read",
|
| 59 |
+
"type",
|
| 60 |
+
"beat",
|
| 61 |
+
"trainer",
|
| 62 |
+
"growth",
|
| 63 |
+
"lock",
|
| 64 |
+
"bone",
|
| 65 |
+
"case",
|
| 66 |
+
"equal",
|
| 67 |
+
"comfortable",
|
| 68 |
+
"region",
|
| 69 |
+
"replacement",
|
| 70 |
+
"performance",
|
| 71 |
+
"mate",
|
| 72 |
+
"walk",
|
| 73 |
+
"medicine",
|
| 74 |
+
"film",
|
| 75 |
+
"thing",
|
| 76 |
+
"rock",
|
| 77 |
+
"tap",
|
| 78 |
+
"total",
|
| 79 |
+
"competition",
|
| 80 |
+
"ease",
|
| 81 |
+
"south",
|
| 82 |
+
"establishment",
|
| 83 |
+
"gather",
|
| 84 |
+
"parking",
|
| 85 |
+
"world",
|
| 86 |
+
"plenty",
|
| 87 |
+
"breath",
|
| 88 |
+
"claim",
|
| 89 |
+
"alcohol",
|
| 90 |
+
"trade",
|
| 91 |
+
"dear",
|
| 92 |
+
"highlight",
|
| 93 |
+
"street",
|
| 94 |
+
"matter",
|
| 95 |
+
"decision",
|
| 96 |
+
"mess",
|
| 97 |
+
"agreement",
|
| 98 |
+
"studio",
|
| 99 |
+
"coach",
|
| 100 |
+
"assist",
|
| 101 |
+
"brain",
|
| 102 |
+
"wing",
|
| 103 |
+
"style",
|
| 104 |
+
"private",
|
| 105 |
+
"top",
|
| 106 |
+
"brown",
|
| 107 |
+
"leg",
|
| 108 |
+
"buy",
|
| 109 |
+
"procedure",
|
| 110 |
+
"method",
|
| 111 |
+
"speed",
|
| 112 |
+
"high",
|
| 113 |
+
"company",
|
| 114 |
+
"valuable",
|
| 115 |
+
"pie",
|
| 116 |
+
"analyst",
|
| 117 |
+
"session",
|
| 118 |
+
"pattern",
|
| 119 |
+
"district",
|
| 120 |
+
"pleasure",
|
| 121 |
+
"dinner",
|
| 122 |
+
"swimming",
|
| 123 |
+
"joke",
|
| 124 |
+
"order",
|
| 125 |
+
"plate",
|
| 126 |
+
"department",
|
| 127 |
+
"motor",
|
| 128 |
+
"cell",
|
| 129 |
+
"spend",
|
| 130 |
+
"cabinet",
|
| 131 |
+
"difference",
|
| 132 |
+
"power",
|
| 133 |
+
"examination",
|
| 134 |
+
"engine",
|
| 135 |
+
"horse",
|
| 136 |
+
"dimension",
|
| 137 |
+
"pay",
|
| 138 |
+
"toe",
|
| 139 |
+
"curve",
|
| 140 |
+
"literature",
|
| 141 |
+
"bother",
|
| 142 |
+
"fire",
|
| 143 |
+
"possibility",
|
| 144 |
+
"debate",
|
| 145 |
+
"activity",
|
| 146 |
+
"passage",
|
| 147 |
+
"hello",
|
| 148 |
+
"cycle",
|
| 149 |
+
"background",
|
| 150 |
+
"quiet",
|
| 151 |
+
"author",
|
| 152 |
+
"effect",
|
| 153 |
+
"actor",
|
| 154 |
+
"page",
|
| 155 |
+
"bicycle",
|
| 156 |
+
"error",
|
| 157 |
+
"throat",
|
| 158 |
+
"attack",
|
| 159 |
+
"character",
|
| 160 |
+
"phone",
|
| 161 |
+
"tea",
|
| 162 |
+
"increase",
|
| 163 |
+
"outcome",
|
| 164 |
+
"file",
|
| 165 |
+
"specific",
|
| 166 |
+
"inspector",
|
| 167 |
+
"internal",
|
| 168 |
+
"potential",
|
| 169 |
+
"staff",
|
| 170 |
+
"building",
|
| 171 |
+
"employer",
|
| 172 |
+
"shoe",
|
| 173 |
+
"hand",
|
| 174 |
+
"direction",
|
| 175 |
+
"garden",
|
| 176 |
+
"purchase",
|
| 177 |
+
"interview",
|
| 178 |
+
"study",
|
| 179 |
+
"recognition",
|
| 180 |
+
"member",
|
| 181 |
+
"spiritual",
|
| 182 |
+
"oven",
|
| 183 |
+
"sandwich",
|
| 184 |
+
"weird",
|
| 185 |
+
"passenger",
|
| 186 |
+
"particular",
|
| 187 |
+
"response",
|
| 188 |
+
"reaction",
|
| 189 |
+
"size",
|
| 190 |
+
"variation",
|
| 191 |
+
"a",
|
| 192 |
+
"cancel",
|
| 193 |
+
"candy",
|
| 194 |
+
"exit",
|
| 195 |
+
"guest",
|
| 196 |
+
"condition",
|
| 197 |
+
"fly",
|
| 198 |
+
"price",
|
| 199 |
+
"weakness",
|
| 200 |
+
"convert",
|
| 201 |
+
"hotel",
|
| 202 |
+
"great",
|
| 203 |
+
"mouth",
|
| 204 |
+
"mind",
|
| 205 |
+
"song",
|
| 206 |
+
"sugar",
|
| 207 |
+
"suspect",
|
| 208 |
+
"telephone",
|
| 209 |
+
"ear",
|
| 210 |
+
"roof",
|
| 211 |
+
"paint",
|
| 212 |
+
"refrigerator",
|
| 213 |
+
"organization",
|
| 214 |
+
"jury",
|
| 215 |
+
"reward",
|
| 216 |
+
"engineering",
|
| 217 |
+
"day",
|
| 218 |
+
"possession",
|
| 219 |
+
"crew",
|
| 220 |
+
"bar",
|
| 221 |
+
"road",
|
| 222 |
+
"description",
|
| 223 |
+
"celebration",
|
| 224 |
+
"score",
|
| 225 |
+
"mark",
|
| 226 |
+
"letter",
|
| 227 |
+
"shower",
|
| 228 |
+
"suggestion",
|
| 229 |
+
"sir",
|
| 230 |
+
"luck",
|
| 231 |
+
"national",
|
| 232 |
+
"progress",
|
| 233 |
+
"hall",
|
| 234 |
+
"stroke",
|
| 235 |
+
"theory",
|
| 236 |
+
"offer",
|
| 237 |
+
"story",
|
| 238 |
+
"tax",
|
| 239 |
+
"definition",
|
| 240 |
+
"history",
|
| 241 |
+
"ride",
|
| 242 |
+
"medium",
|
| 243 |
+
"opening",
|
| 244 |
+
"glass",
|
| 245 |
+
"elevator",
|
| 246 |
+
"stomach",
|
| 247 |
+
"question",
|
| 248 |
+
"ability",
|
| 249 |
+
"leading",
|
| 250 |
+
"village",
|
| 251 |
+
"computer",
|
| 252 |
+
"city",
|
| 253 |
+
"grand",
|
| 254 |
+
"confidence",
|
| 255 |
+
"candle",
|
| 256 |
+
"priest",
|
| 257 |
+
"recommendation",
|
| 258 |
+
"point",
|
| 259 |
+
"necessary",
|
| 260 |
+
"body",
|
| 261 |
+
"desk",
|
| 262 |
+
"secret",
|
| 263 |
+
"horror",
|
| 264 |
+
"noise",
|
| 265 |
+
"culture",
|
| 266 |
+
"warning",
|
| 267 |
+
"water",
|
| 268 |
+
"round",
|
| 269 |
+
"diet",
|
| 270 |
+
"flower",
|
| 271 |
+
"bus",
|
| 272 |
+
"tough",
|
| 273 |
+
"permission",
|
| 274 |
+
"week",
|
| 275 |
+
"prompt",
|
| 276 |
+
"connection",
|
| 277 |
+
"abuse",
|
| 278 |
+
"height",
|
| 279 |
+
"save",
|
| 280 |
+
"corner",
|
| 281 |
+
"border",
|
| 282 |
+
"stress",
|
| 283 |
+
"drive",
|
| 284 |
+
"stop",
|
| 285 |
+
"rip",
|
| 286 |
+
"meal",
|
| 287 |
+
"listen",
|
| 288 |
+
"confusion",
|
| 289 |
+
"girlfriend",
|
| 290 |
+
"living",
|
| 291 |
+
"relation",
|
| 292 |
+
"significance",
|
| 293 |
+
"plan",
|
| 294 |
+
"creative",
|
| 295 |
+
"atmosphere",
|
| 296 |
+
"blame",
|
| 297 |
+
"invite",
|
| 298 |
+
"housing",
|
| 299 |
+
"paper",
|
| 300 |
+
"drink",
|
| 301 |
+
"roll",
|
| 302 |
+
"silver",
|
| 303 |
+
"drunk",
|
| 304 |
+
"age",
|
| 305 |
+
"damage",
|
| 306 |
+
"smoke",
|
| 307 |
+
"environment",
|
| 308 |
+
"pack",
|
| 309 |
+
"savings",
|
| 310 |
+
"influence",
|
| 311 |
+
"tourist",
|
| 312 |
+
"rain",
|
| 313 |
+
"post",
|
| 314 |
+
"sign",
|
| 315 |
+
"grandmother",
|
| 316 |
+
"run",
|
| 317 |
+
"profit",
|
| 318 |
+
"push",
|
| 319 |
+
"clerk",
|
| 320 |
+
"final",
|
| 321 |
+
"wine",
|
| 322 |
+
"swim",
|
| 323 |
+
"pause",
|
| 324 |
+
"stuff",
|
| 325 |
+
"singer",
|
| 326 |
+
"funeral",
|
| 327 |
+
"average",
|
| 328 |
+
"source",
|
| 329 |
+
"scene",
|
| 330 |
+
"tradition",
|
| 331 |
+
"personal",
|
| 332 |
+
"snow",
|
| 333 |
+
"nobody",
|
| 334 |
+
"distance",
|
| 335 |
+
"sort",
|
| 336 |
+
"sensitive",
|
| 337 |
+
"animal",
|
| 338 |
+
"major",
|
| 339 |
+
"negotiation",
|
| 340 |
+
"click",
|
| 341 |
+
"mood",
|
| 342 |
+
"period",
|
| 343 |
+
"arrival",
|
| 344 |
+
"expression",
|
| 345 |
+
"holiday",
|
| 346 |
+
"repeat",
|
| 347 |
+
"dust",
|
| 348 |
+
"closet",
|
| 349 |
+
"gold",
|
| 350 |
+
"bad",
|
| 351 |
+
"sail",
|
| 352 |
+
"combination",
|
| 353 |
+
"clothes",
|
| 354 |
+
"emphasis",
|
| 355 |
+
"duty",
|
| 356 |
+
"black",
|
| 357 |
+
"step",
|
| 358 |
+
"school",
|
| 359 |
+
"jump",
|
| 360 |
+
"document",
|
| 361 |
+
"professional",
|
| 362 |
+
"lip",
|
| 363 |
+
"chemical",
|
| 364 |
+
"front",
|
| 365 |
+
"wake",
|
| 366 |
+
"while",
|
| 367 |
+
"inside",
|
| 368 |
+
"watch",
|
| 369 |
+
"row",
|
| 370 |
+
"subject",
|
| 371 |
+
"penalty",
|
| 372 |
+
"balance",
|
| 373 |
+
"possible",
|
| 374 |
+
"adult",
|
| 375 |
+
"aside",
|
| 376 |
+
"sample",
|
| 377 |
+
"appeal",
|
| 378 |
+
"wedding",
|
| 379 |
+
"depth",
|
| 380 |
+
"king",
|
| 381 |
+
"award",
|
| 382 |
+
"wife",
|
| 383 |
+
"blow",
|
| 384 |
+
"site",
|
| 385 |
+
"camp",
|
| 386 |
+
"music",
|
| 387 |
+
"safe",
|
| 388 |
+
"gift",
|
| 389 |
+
"fault",
|
| 390 |
+
"guess",
|
| 391 |
+
"act",
|
| 392 |
+
"shame",
|
| 393 |
+
"drama",
|
| 394 |
+
"capital",
|
| 395 |
+
"exam",
|
| 396 |
+
"stupid",
|
| 397 |
+
"record",
|
| 398 |
+
"sound",
|
| 399 |
+
"swing",
|
| 400 |
+
"novel",
|
| 401 |
+
"minimum",
|
| 402 |
+
"ratio",
|
| 403 |
+
"machine",
|
| 404 |
+
"shape",
|
| 405 |
+
"lead",
|
| 406 |
+
"operation",
|
| 407 |
+
"salary",
|
| 408 |
+
"cloud",
|
| 409 |
+
"affair",
|
| 410 |
+
"hit",
|
| 411 |
+
"chapter",
|
| 412 |
+
"stage",
|
| 413 |
+
"quantity",
|
| 414 |
+
"access",
|
| 415 |
+
"army",
|
| 416 |
+
"chain",
|
| 417 |
+
"traffic",
|
| 418 |
+
"kick",
|
| 419 |
+
"analysis",
|
| 420 |
+
"airport",
|
| 421 |
+
"time",
|
| 422 |
+
"vacation",
|
| 423 |
+
"philosophy",
|
| 424 |
+
"ball",
|
| 425 |
+
"chest",
|
| 426 |
+
"thanks",
|
| 427 |
+
"place",
|
| 428 |
+
"mountain",
|
| 429 |
+
"advertising",
|
| 430 |
+
"red",
|
| 431 |
+
"past",
|
| 432 |
+
"rent",
|
| 433 |
+
"return",
|
| 434 |
+
"tour",
|
| 435 |
+
"house",
|
| 436 |
+
"construction",
|
| 437 |
+
"net",
|
| 438 |
+
"native",
|
| 439 |
+
"war",
|
| 440 |
+
"figure",
|
| 441 |
+
"fee",
|
| 442 |
+
"spray",
|
| 443 |
+
"user",
|
| 444 |
+
"dirt",
|
| 445 |
+
"shot",
|
| 446 |
+
"task",
|
| 447 |
+
"stick",
|
| 448 |
+
"friend",
|
| 449 |
+
"software",
|
| 450 |
+
"promotion",
|
| 451 |
+
"interaction",
|
| 452 |
+
"surround",
|
| 453 |
+
"block",
|
| 454 |
+
"purpose",
|
| 455 |
+
"practice",
|
| 456 |
+
"conflict",
|
| 457 |
+
"routine",
|
| 458 |
+
"requirement",
|
| 459 |
+
"bonus",
|
| 460 |
+
"hole",
|
| 461 |
+
"state",
|
| 462 |
+
"junior",
|
| 463 |
+
"sweet",
|
| 464 |
+
"catch",
|
| 465 |
+
"tear",
|
| 466 |
+
"fold",
|
| 467 |
+
"wall",
|
| 468 |
+
"editor",
|
| 469 |
+
"life",
|
| 470 |
+
"position",
|
| 471 |
+
"pound",
|
| 472 |
+
"respect",
|
| 473 |
+
"bathroom",
|
| 474 |
+
"coat",
|
| 475 |
+
"script",
|
| 476 |
+
"job",
|
| 477 |
+
"teach",
|
| 478 |
+
"birth",
|
| 479 |
+
"view",
|
| 480 |
+
"resolve",
|
| 481 |
+
"theme",
|
| 482 |
+
"employee",
|
| 483 |
+
"doubt",
|
| 484 |
+
"market",
|
| 485 |
+
"education",
|
| 486 |
+
"serve",
|
| 487 |
+
"recover",
|
| 488 |
+
"tone",
|
| 489 |
+
"harm",
|
| 490 |
+
"miss",
|
| 491 |
+
"union",
|
| 492 |
+
"understanding",
|
| 493 |
+
"cow",
|
| 494 |
+
"river",
|
| 495 |
+
"association",
|
| 496 |
+
"concept",
|
| 497 |
+
"training",
|
| 498 |
+
"recipe",
|
| 499 |
+
"relationship",
|
| 500 |
+
"reserve",
|
| 501 |
+
"depression",
|
| 502 |
+
"proof",
|
| 503 |
+
"hair",
|
| 504 |
+
"revenue",
|
| 505 |
+
"independent",
|
| 506 |
+
"lift",
|
| 507 |
+
"assignment",
|
| 508 |
+
"temporary",
|
| 509 |
+
"amount",
|
| 510 |
+
"loss",
|
| 511 |
+
"edge",
|
| 512 |
+
"track",
|
| 513 |
+
"check",
|
| 514 |
+
"rope",
|
| 515 |
+
"estimate",
|
| 516 |
+
"pollution",
|
| 517 |
+
"stable",
|
| 518 |
+
"message",
|
| 519 |
+
"delivery",
|
| 520 |
+
"perspective",
|
| 521 |
+
"mirror",
|
| 522 |
+
"assistant",
|
| 523 |
+
"representative",
|
| 524 |
+
"witness",
|
| 525 |
+
"nature",
|
| 526 |
+
"judge",
|
| 527 |
+
"fruit",
|
| 528 |
+
"tip",
|
| 529 |
+
"devil",
|
| 530 |
+
"town",
|
| 531 |
+
"emergency",
|
| 532 |
+
"upper",
|
| 533 |
+
"drop",
|
| 534 |
+
"stay",
|
| 535 |
+
"human",
|
| 536 |
+
"neck",
|
| 537 |
+
"speaker",
|
| 538 |
+
"network",
|
| 539 |
+
"sing",
|
| 540 |
+
"resist",
|
| 541 |
+
"league",
|
| 542 |
+
"trip",
|
| 543 |
+
"signature",
|
| 544 |
+
"lawyer",
|
| 545 |
+
"importance",
|
| 546 |
+
"gas",
|
| 547 |
+
"choice",
|
| 548 |
+
"engineer",
|
| 549 |
+
"success",
|
| 550 |
+
"part",
|
| 551 |
+
"external",
|
| 552 |
+
"worker",
|
| 553 |
+
"simple",
|
| 554 |
+
"quarter",
|
| 555 |
+
"student",
|
| 556 |
+
"heart",
|
| 557 |
+
"pass",
|
| 558 |
+
"spite",
|
| 559 |
+
"shift",
|
| 560 |
+
"rough",
|
| 561 |
+
"lady",
|
| 562 |
+
"grass",
|
| 563 |
+
"community",
|
| 564 |
+
"garage",
|
| 565 |
+
"youth",
|
| 566 |
+
"standard",
|
| 567 |
+
"skirt",
|
| 568 |
+
"promise",
|
| 569 |
+
"blind",
|
| 570 |
+
"television",
|
| 571 |
+
"disease",
|
| 572 |
+
"commission",
|
| 573 |
+
"positive",
|
| 574 |
+
"energy",
|
| 575 |
+
"calm",
|
| 576 |
+
"presence",
|
| 577 |
+
"tune",
|
| 578 |
+
"basis",
|
| 579 |
+
"preference",
|
| 580 |
+
"head",
|
| 581 |
+
"common",
|
| 582 |
+
"cut",
|
| 583 |
+
"somewhere",
|
| 584 |
+
"presentation",
|
| 585 |
+
"current",
|
| 586 |
+
"thought",
|
| 587 |
+
"revolution",
|
| 588 |
+
"effort",
|
| 589 |
+
"master",
|
| 590 |
+
"implement",
|
| 591 |
+
"republic",
|
| 592 |
+
"floor",
|
| 593 |
+
"principle",
|
| 594 |
+
"stranger",
|
| 595 |
+
"shoulder",
|
| 596 |
+
"grade",
|
| 597 |
+
"button",
|
| 598 |
+
"tennis",
|
| 599 |
+
"police",
|
| 600 |
+
"collection",
|
| 601 |
+
"account",
|
| 602 |
+
"register",
|
| 603 |
+
"glove",
|
| 604 |
+
"divide",
|
| 605 |
+
"professor",
|
| 606 |
+
"chair",
|
| 607 |
+
"priority",
|
| 608 |
+
"combine",
|
| 609 |
+
"peace",
|
| 610 |
+
"extension",
|
| 611 |
+
"maybe",
|
| 612 |
+
"evening",
|
| 613 |
+
"frame",
|
| 614 |
+
"sister",
|
| 615 |
+
"wave",
|
| 616 |
+
"code",
|
| 617 |
+
"application",
|
| 618 |
+
"mouse",
|
| 619 |
+
"match",
|
| 620 |
+
"counter",
|
| 621 |
+
"bottle",
|
| 622 |
+
"half",
|
| 623 |
+
"cheek",
|
| 624 |
+
"resolution",
|
| 625 |
+
"back",
|
| 626 |
+
"knowledge",
|
| 627 |
+
"make",
|
| 628 |
+
"discussion",
|
| 629 |
+
"screw",
|
| 630 |
+
"length",
|
| 631 |
+
"accident",
|
| 632 |
+
"battle",
|
| 633 |
+
"dress",
|
| 634 |
+
"knee",
|
| 635 |
+
"log",
|
| 636 |
+
"package",
|
| 637 |
+
"it",
|
| 638 |
+
"turn",
|
| 639 |
+
"hearing",
|
| 640 |
+
"newspaper",
|
| 641 |
+
"layer",
|
| 642 |
+
"wealth",
|
| 643 |
+
"profile",
|
| 644 |
+
"imagination",
|
| 645 |
+
"answer",
|
| 646 |
+
"weekend",
|
| 647 |
+
"teacher",
|
| 648 |
+
"appearance",
|
| 649 |
+
"meet",
|
| 650 |
+
"bike",
|
| 651 |
+
"rise",
|
| 652 |
+
"belt",
|
| 653 |
+
"crash",
|
| 654 |
+
"bowl",
|
| 655 |
+
"equivalent",
|
| 656 |
+
"support",
|
| 657 |
+
"image",
|
| 658 |
+
"poem",
|
| 659 |
+
"risk",
|
| 660 |
+
"excitement",
|
| 661 |
+
"remote",
|
| 662 |
+
"secretary",
|
| 663 |
+
"public",
|
| 664 |
+
"produce",
|
| 665 |
+
"plane",
|
| 666 |
+
"display",
|
| 667 |
+
"money",
|
| 668 |
+
"sand",
|
| 669 |
+
"situation",
|
| 670 |
+
"punch",
|
| 671 |
+
"customer",
|
| 672 |
+
"title",
|
| 673 |
+
"shake",
|
| 674 |
+
"mortgage",
|
| 675 |
+
"option",
|
| 676 |
+
"number",
|
| 677 |
+
"pop",
|
| 678 |
+
"window",
|
| 679 |
+
"extent",
|
| 680 |
+
"nothing",
|
| 681 |
+
"experience",
|
| 682 |
+
"opinion",
|
| 683 |
+
"departure",
|
| 684 |
+
"dance",
|
| 685 |
+
"indication",
|
| 686 |
+
"boy",
|
| 687 |
+
"material",
|
| 688 |
+
"band",
|
| 689 |
+
"leader",
|
| 690 |
+
"sun",
|
| 691 |
+
"beautiful",
|
| 692 |
+
"muscle",
|
| 693 |
+
"farmer",
|
| 694 |
+
"variety",
|
| 695 |
+
"fat",
|
| 696 |
+
"handle",
|
| 697 |
+
"director",
|
| 698 |
+
"opportunity",
|
| 699 |
+
"calendar",
|
| 700 |
+
"outside",
|
| 701 |
+
"pace",
|
| 702 |
+
"bath",
|
| 703 |
+
"fish",
|
| 704 |
+
"consequence",
|
| 705 |
+
"put",
|
| 706 |
+
"owner",
|
| 707 |
+
"go",
|
| 708 |
+
"doctor",
|
| 709 |
+
"information",
|
| 710 |
+
"share",
|
| 711 |
+
"hurt",
|
| 712 |
+
"protection",
|
| 713 |
+
"career",
|
| 714 |
+
"finance",
|
| 715 |
+
"force",
|
| 716 |
+
"golf",
|
| 717 |
+
"garbage",
|
| 718 |
+
"aspect",
|
| 719 |
+
"kid",
|
| 720 |
+
"food",
|
| 721 |
+
"boot",
|
| 722 |
+
"milk",
|
| 723 |
+
"respond",
|
| 724 |
+
"objective",
|
| 725 |
+
"reality",
|
| 726 |
+
"raw",
|
| 727 |
+
"ring",
|
| 728 |
+
"mall",
|
| 729 |
+
"one",
|
| 730 |
+
"impact",
|
| 731 |
+
"area",
|
| 732 |
+
"news",
|
| 733 |
+
"international",
|
| 734 |
+
"series",
|
| 735 |
+
"impress",
|
| 736 |
+
"mother",
|
| 737 |
+
"shelter",
|
| 738 |
+
"strike",
|
| 739 |
+
"loan",
|
| 740 |
+
"month",
|
| 741 |
+
"seat",
|
| 742 |
+
"anything",
|
| 743 |
+
"entertainment",
|
| 744 |
+
"familiar",
|
| 745 |
+
"clue",
|
| 746 |
+
"year",
|
| 747 |
+
"glad",
|
| 748 |
+
"supermarket",
|
| 749 |
+
"natural",
|
| 750 |
+
"god",
|
| 751 |
+
"cost",
|
| 752 |
+
"conversation",
|
| 753 |
+
"tie",
|
| 754 |
+
"ruin",
|
| 755 |
+
"comfort",
|
| 756 |
+
"earth",
|
| 757 |
+
"storm",
|
| 758 |
+
"percentage",
|
| 759 |
+
"assistance",
|
| 760 |
+
"budget",
|
| 761 |
+
"strength",
|
| 762 |
+
"beginning",
|
| 763 |
+
"sleep",
|
| 764 |
+
"other",
|
| 765 |
+
"young",
|
| 766 |
+
"unit",
|
| 767 |
+
"fill",
|
| 768 |
+
"store",
|
| 769 |
+
"desire",
|
| 770 |
+
"hide",
|
| 771 |
+
"value",
|
| 772 |
+
"cup",
|
| 773 |
+
"maintenance",
|
| 774 |
+
"nurse",
|
| 775 |
+
"function",
|
| 776 |
+
"tower",
|
| 777 |
+
"role",
|
| 778 |
+
"class",
|
| 779 |
+
"camera",
|
| 780 |
+
"database",
|
| 781 |
+
"panic",
|
| 782 |
+
"nation",
|
| 783 |
+
"basket",
|
| 784 |
+
"ice",
|
| 785 |
+
"art",
|
| 786 |
+
"spirit",
|
| 787 |
+
"chart",
|
| 788 |
+
"exchange",
|
| 789 |
+
"feedback",
|
| 790 |
+
"statement",
|
| 791 |
+
"reputation",
|
| 792 |
+
"search",
|
| 793 |
+
"hunt",
|
| 794 |
+
"exercise",
|
| 795 |
+
"nasty",
|
| 796 |
+
"notice",
|
| 797 |
+
"male",
|
| 798 |
+
"yard",
|
| 799 |
+
"annual",
|
| 800 |
+
"collar",
|
| 801 |
+
"date",
|
| 802 |
+
"platform",
|
| 803 |
+
"plant",
|
| 804 |
+
"fortune",
|
| 805 |
+
"passion",
|
| 806 |
+
"friendship",
|
| 807 |
+
"spread",
|
| 808 |
+
"cancer",
|
| 809 |
+
"ticket",
|
| 810 |
+
"attitude",
|
| 811 |
+
"island",
|
| 812 |
+
"active",
|
| 813 |
+
"object",
|
| 814 |
+
"service",
|
| 815 |
+
"buyer",
|
| 816 |
+
"bite",
|
| 817 |
+
"card",
|
| 818 |
+
"face",
|
| 819 |
+
"steak",
|
| 820 |
+
"proposal",
|
| 821 |
+
"patient",
|
| 822 |
+
"heat",
|
| 823 |
+
"rule",
|
| 824 |
+
"resident",
|
| 825 |
+
"broad",
|
| 826 |
+
"politics",
|
| 827 |
+
"west",
|
| 828 |
+
"knife",
|
| 829 |
+
"expert",
|
| 830 |
+
"girl",
|
| 831 |
+
"design",
|
| 832 |
+
"salt",
|
| 833 |
+
"baseball",
|
| 834 |
+
"grab",
|
| 835 |
+
"inspection",
|
| 836 |
+
"cousin",
|
| 837 |
+
"couple",
|
| 838 |
+
"magazine",
|
| 839 |
+
"cook",
|
| 840 |
+
"dependent",
|
| 841 |
+
"security",
|
| 842 |
+
"chicken",
|
| 843 |
+
"version",
|
| 844 |
+
"currency",
|
| 845 |
+
"ladder",
|
| 846 |
+
"scheme",
|
| 847 |
+
"kitchen",
|
| 848 |
+
"employment",
|
| 849 |
+
"local",
|
| 850 |
+
"attention",
|
| 851 |
+
"manager",
|
| 852 |
+
"fact",
|
| 853 |
+
"cover",
|
| 854 |
+
"sad",
|
| 855 |
+
"guard",
|
| 856 |
+
"relative",
|
| 857 |
+
"county",
|
| 858 |
+
"rate",
|
| 859 |
+
"lunch",
|
| 860 |
+
"program",
|
| 861 |
+
"initiative",
|
| 862 |
+
"gear",
|
| 863 |
+
"bridge",
|
| 864 |
+
"breast",
|
| 865 |
+
"talk",
|
| 866 |
+
"dish",
|
| 867 |
+
"guarantee",
|
| 868 |
+
"beer",
|
| 869 |
+
"vehicle",
|
| 870 |
+
"reception",
|
| 871 |
+
"woman",
|
| 872 |
+
"substance",
|
| 873 |
+
"copy",
|
| 874 |
+
"lecture",
|
| 875 |
+
"advantage",
|
| 876 |
+
"park",
|
| 877 |
+
"cold",
|
| 878 |
+
"death",
|
| 879 |
+
"mix",
|
| 880 |
+
"hold",
|
| 881 |
+
"scale",
|
| 882 |
+
"tomorrow",
|
| 883 |
+
"blood",
|
| 884 |
+
"request",
|
| 885 |
+
"green",
|
| 886 |
+
"cookie",
|
| 887 |
+
"church",
|
| 888 |
+
"strip",
|
| 889 |
+
"forever",
|
| 890 |
+
"beyond",
|
| 891 |
+
"debt",
|
| 892 |
+
"tackle",
|
| 893 |
+
"wash",
|
| 894 |
+
"following",
|
| 895 |
+
"feel",
|
| 896 |
+
"maximum",
|
| 897 |
+
"sector",
|
| 898 |
+
"sea",
|
| 899 |
+
"property",
|
| 900 |
+
"economics",
|
| 901 |
+
"menu",
|
| 902 |
+
"bench",
|
| 903 |
+
"try",
|
| 904 |
+
"language",
|
| 905 |
+
"start",
|
| 906 |
+
"call",
|
| 907 |
+
"solid",
|
| 908 |
+
"address",
|
| 909 |
+
"income",
|
| 910 |
+
"foot",
|
| 911 |
+
"senior",
|
| 912 |
+
"honey",
|
| 913 |
+
"few",
|
| 914 |
+
"mixture",
|
| 915 |
+
"cash",
|
| 916 |
+
"grocery",
|
| 917 |
+
"link",
|
| 918 |
+
"map",
|
| 919 |
+
"form",
|
| 920 |
+
"factor",
|
| 921 |
+
"pot",
|
| 922 |
+
"model",
|
| 923 |
+
"writer",
|
| 924 |
+
"farm",
|
| 925 |
+
"winter",
|
| 926 |
+
"skill",
|
| 927 |
+
"anywhere",
|
| 928 |
+
"birthday",
|
| 929 |
+
"policy",
|
| 930 |
+
"release",
|
| 931 |
+
"husband",
|
| 932 |
+
"lab",
|
| 933 |
+
"hurry",
|
| 934 |
+
"mail",
|
| 935 |
+
"equipment",
|
| 936 |
+
"sink",
|
| 937 |
+
"pair",
|
| 938 |
+
"driver",
|
| 939 |
+
"consideration",
|
| 940 |
+
"leather",
|
| 941 |
+
"skin",
|
| 942 |
+
"blue",
|
| 943 |
+
"boat",
|
| 944 |
+
"sale",
|
| 945 |
+
"brick",
|
| 946 |
+
"two",
|
| 947 |
+
"feed",
|
| 948 |
+
"square",
|
| 949 |
+
"dot",
|
| 950 |
+
"rush",
|
| 951 |
+
"dream",
|
| 952 |
+
"location",
|
| 953 |
+
"afternoon",
|
| 954 |
+
"manufacturer",
|
| 955 |
+
"control",
|
| 956 |
+
"occasion",
|
| 957 |
+
"trouble",
|
| 958 |
+
"introduction",
|
| 959 |
+
"advice",
|
| 960 |
+
"bet",
|
| 961 |
+
"eat",
|
| 962 |
+
"kill",
|
| 963 |
+
"category",
|
| 964 |
+
"manner",
|
| 965 |
+
"office",
|
| 966 |
+
"estate",
|
| 967 |
+
"pride",
|
| 968 |
+
"awareness",
|
| 969 |
+
"slip",
|
| 970 |
+
"crack",
|
| 971 |
+
"client",
|
| 972 |
+
"nail",
|
| 973 |
+
"shoot",
|
| 974 |
+
"membership",
|
| 975 |
+
"soft",
|
| 976 |
+
"anybody",
|
| 977 |
+
"web",
|
| 978 |
+
"official",
|
| 979 |
+
"individual",
|
| 980 |
+
"pizza",
|
| 981 |
+
"interest",
|
| 982 |
+
"bag",
|
| 983 |
+
"spell",
|
| 984 |
+
"profession",
|
| 985 |
+
"queen",
|
| 986 |
+
"deal",
|
| 987 |
+
"resource",
|
| 988 |
+
"ship",
|
| 989 |
+
"guy",
|
| 990 |
+
"chocolate",
|
| 991 |
+
"joint",
|
| 992 |
+
"formal",
|
| 993 |
+
"upstairs",
|
| 994 |
+
"car",
|
| 995 |
+
"resort",
|
| 996 |
+
"abroad",
|
| 997 |
+
"dealer",
|
| 998 |
+
"associate",
|
| 999 |
+
"finger",
|
| 1000 |
+
"surgery",
|
| 1001 |
+
"comment",
|
| 1002 |
+
"team",
|
| 1003 |
+
"detail",
|
| 1004 |
+
"crazy",
|
| 1005 |
+
"path",
|
| 1006 |
+
"tale",
|
| 1007 |
+
"initial",
|
| 1008 |
+
"arm",
|
| 1009 |
+
"radio",
|
| 1010 |
+
"demand",
|
| 1011 |
+
"single",
|
| 1012 |
+
"draw",
|
| 1013 |
+
"yellow",
|
| 1014 |
+
"contest",
|
| 1015 |
+
"piece",
|
| 1016 |
+
"quote",
|
| 1017 |
+
"pull",
|
| 1018 |
+
"commercial",
|
| 1019 |
+
"shirt",
|
| 1020 |
+
"contribution",
|
| 1021 |
+
"cream",
|
| 1022 |
+
"channel",
|
| 1023 |
+
"suit",
|
| 1024 |
+
"discipline",
|
| 1025 |
+
"instruction",
|
| 1026 |
+
"concert",
|
| 1027 |
+
"speech",
|
| 1028 |
+
"low",
|
| 1029 |
+
"effective",
|
| 1030 |
+
"hang",
|
| 1031 |
+
"scratch",
|
| 1032 |
+
"industry",
|
| 1033 |
+
"breakfast",
|
| 1034 |
+
"lay",
|
| 1035 |
+
"join",
|
| 1036 |
+
"metal",
|
| 1037 |
+
"bedroom",
|
| 1038 |
+
"minute",
|
| 1039 |
+
"product",
|
| 1040 |
+
"rest",
|
| 1041 |
+
"temperature",
|
| 1042 |
+
"many",
|
| 1043 |
+
"give",
|
| 1044 |
+
"argument",
|
| 1045 |
+
"print",
|
| 1046 |
+
"purple",
|
| 1047 |
+
"laugh",
|
| 1048 |
+
"health",
|
| 1049 |
+
"credit",
|
| 1050 |
+
"investment",
|
| 1051 |
+
"sell",
|
| 1052 |
+
"setting",
|
| 1053 |
+
"lesson",
|
| 1054 |
+
"egg",
|
| 1055 |
+
"middle",
|
| 1056 |
+
"marriage",
|
| 1057 |
+
"level",
|
| 1058 |
+
"evidence",
|
| 1059 |
+
"phrase",
|
| 1060 |
+
"love",
|
| 1061 |
+
"self",
|
| 1062 |
+
"benefit",
|
| 1063 |
+
"guidance",
|
| 1064 |
+
"affect",
|
| 1065 |
+
"you",
|
| 1066 |
+
"dad",
|
| 1067 |
+
"anxiety",
|
| 1068 |
+
"special",
|
| 1069 |
+
"boyfriend",
|
| 1070 |
+
"test",
|
| 1071 |
+
"blank",
|
| 1072 |
+
"payment",
|
| 1073 |
+
"soup",
|
| 1074 |
+
"obligation",
|
| 1075 |
+
"reply",
|
| 1076 |
+
"smile",
|
| 1077 |
+
"deep",
|
| 1078 |
+
"complaint",
|
| 1079 |
+
"addition",
|
| 1080 |
+
"review",
|
| 1081 |
+
"box",
|
| 1082 |
+
"towel",
|
| 1083 |
+
"minor",
|
| 1084 |
+
"fun",
|
| 1085 |
+
"soil",
|
| 1086 |
+
"issue",
|
| 1087 |
+
"cigarette",
|
| 1088 |
+
"internet",
|
| 1089 |
+
"gain",
|
| 1090 |
+
"tell",
|
| 1091 |
+
"entry",
|
| 1092 |
+
"spare",
|
| 1093 |
+
"incident",
|
| 1094 |
+
"family",
|
| 1095 |
+
"refuse",
|
| 1096 |
+
"branch",
|
| 1097 |
+
"can",
|
| 1098 |
+
"pen",
|
| 1099 |
+
"grandfather",
|
| 1100 |
+
"constant",
|
| 1101 |
+
"tank",
|
| 1102 |
+
"uncle",
|
| 1103 |
+
"climate",
|
| 1104 |
+
"ground",
|
| 1105 |
+
"volume",
|
| 1106 |
+
"communication",
|
| 1107 |
+
"kind",
|
| 1108 |
+
"poet",
|
| 1109 |
+
"child",
|
| 1110 |
+
"screen",
|
| 1111 |
+
"mine",
|
| 1112 |
+
"quit",
|
| 1113 |
+
"gene",
|
| 1114 |
+
"lack",
|
| 1115 |
+
"charity",
|
| 1116 |
+
"memory",
|
| 1117 |
+
"tooth",
|
| 1118 |
+
"fear",
|
| 1119 |
+
"mention",
|
| 1120 |
+
"marketing",
|
| 1121 |
+
"reveal",
|
| 1122 |
+
"reason",
|
| 1123 |
+
"court",
|
| 1124 |
+
"season",
|
| 1125 |
+
"freedom",
|
| 1126 |
+
"land",
|
| 1127 |
+
"sport",
|
| 1128 |
+
"audience",
|
| 1129 |
+
"classroom",
|
| 1130 |
+
"law",
|
| 1131 |
+
"hook",
|
| 1132 |
+
"win",
|
| 1133 |
+
"carry",
|
| 1134 |
+
"eye",
|
| 1135 |
+
"smell",
|
| 1136 |
+
"distribution",
|
| 1137 |
+
"research",
|
| 1138 |
+
"country",
|
| 1139 |
+
"dare",
|
| 1140 |
+
"hope",
|
| 1141 |
+
"whereas",
|
| 1142 |
+
"stretch",
|
| 1143 |
+
"library",
|
| 1144 |
+
"if",
|
| 1145 |
+
"delay",
|
| 1146 |
+
"college",
|
| 1147 |
+
"plastic",
|
| 1148 |
+
"book",
|
| 1149 |
+
"present",
|
| 1150 |
+
"use",
|
| 1151 |
+
"worry",
|
| 1152 |
+
"champion",
|
| 1153 |
+
"goal",
|
| 1154 |
+
"economy",
|
| 1155 |
+
"march",
|
| 1156 |
+
"election",
|
| 1157 |
+
"reflection",
|
| 1158 |
+
"midnight",
|
| 1159 |
+
"slide",
|
| 1160 |
+
"inflation",
|
| 1161 |
+
"action",
|
| 1162 |
+
"challenge",
|
| 1163 |
+
"guitar",
|
| 1164 |
+
"coast",
|
| 1165 |
+
"apple",
|
| 1166 |
+
"campaign",
|
| 1167 |
+
"field",
|
| 1168 |
+
"jacket",
|
| 1169 |
+
"sense",
|
| 1170 |
+
"way",
|
| 1171 |
+
"visual",
|
| 1172 |
+
"remove",
|
| 1173 |
+
"weather",
|
| 1174 |
+
"trash",
|
| 1175 |
+
"cable",
|
| 1176 |
+
"regret",
|
| 1177 |
+
"buddy",
|
| 1178 |
+
"beach",
|
| 1179 |
+
"historian",
|
| 1180 |
+
"courage",
|
| 1181 |
+
"sympathy",
|
| 1182 |
+
"truck",
|
| 1183 |
+
"tension",
|
| 1184 |
+
"permit",
|
| 1185 |
+
"nose",
|
| 1186 |
+
"bed",
|
| 1187 |
+
"son",
|
| 1188 |
+
"person",
|
| 1189 |
+
"base",
|
| 1190 |
+
"meat",
|
| 1191 |
+
"usual",
|
| 1192 |
+
"air",
|
| 1193 |
+
"meeting",
|
| 1194 |
+
"worth",
|
| 1195 |
+
"game",
|
| 1196 |
+
"independence",
|
| 1197 |
+
"physical",
|
| 1198 |
+
"brief",
|
| 1199 |
+
"play",
|
| 1200 |
+
"raise",
|
| 1201 |
+
"board",
|
| 1202 |
+
"she",
|
| 1203 |
+
"key",
|
| 1204 |
+
"writing",
|
| 1205 |
+
"pick",
|
| 1206 |
+
"command",
|
| 1207 |
+
"party",
|
| 1208 |
+
"yesterday",
|
| 1209 |
+
"spring",
|
| 1210 |
+
"candidate",
|
| 1211 |
+
"physics",
|
| 1212 |
+
"university",
|
| 1213 |
+
"concern",
|
| 1214 |
+
"development",
|
| 1215 |
+
"change",
|
| 1216 |
+
"string",
|
| 1217 |
+
"target",
|
| 1218 |
+
"instance",
|
| 1219 |
+
"room",
|
| 1220 |
+
"bitter",
|
| 1221 |
+
"bird",
|
| 1222 |
+
"football",
|
| 1223 |
+
"normal",
|
| 1224 |
+
"split",
|
| 1225 |
+
"impression",
|
| 1226 |
+
"wood",
|
| 1227 |
+
"long",
|
| 1228 |
+
"meaning",
|
| 1229 |
+
"stock",
|
| 1230 |
+
"cap",
|
| 1231 |
+
"leadership",
|
| 1232 |
+
"media",
|
| 1233 |
+
"ambition",
|
| 1234 |
+
"fishing",
|
| 1235 |
+
"essay",
|
| 1236 |
+
"salad",
|
| 1237 |
+
"repair",
|
| 1238 |
+
"today",
|
| 1239 |
+
"designer",
|
| 1240 |
+
"night",
|
| 1241 |
+
"bank",
|
| 1242 |
+
"drawing",
|
| 1243 |
+
"inevitable",
|
| 1244 |
+
"phase",
|
| 1245 |
+
"vast",
|
| 1246 |
+
"chip",
|
| 1247 |
+
"anger",
|
| 1248 |
+
"switch",
|
| 1249 |
+
"cry",
|
| 1250 |
+
"twist",
|
| 1251 |
+
"personality",
|
| 1252 |
+
"attempt",
|
| 1253 |
+
"storage",
|
| 1254 |
+
"being",
|
| 1255 |
+
"preparation",
|
| 1256 |
+
"bat",
|
| 1257 |
+
"selection",
|
| 1258 |
+
"white",
|
| 1259 |
+
"technology",
|
| 1260 |
+
"contract",
|
| 1261 |
+
"side",
|
| 1262 |
+
"section",
|
| 1263 |
+
"station",
|
| 1264 |
+
"till",
|
| 1265 |
+
"structure",
|
| 1266 |
+
"tongue",
|
| 1267 |
+
"taste",
|
| 1268 |
+
"truth",
|
| 1269 |
+
"difficulty",
|
| 1270 |
+
"group",
|
| 1271 |
+
"limit",
|
| 1272 |
+
"main",
|
| 1273 |
+
"move",
|
| 1274 |
+
"feeling",
|
| 1275 |
+
"light",
|
| 1276 |
+
"example",
|
| 1277 |
+
"mission",
|
| 1278 |
+
"might",
|
| 1279 |
+
"wait",
|
| 1280 |
+
"wheel",
|
| 1281 |
+
"shop",
|
| 1282 |
+
"host",
|
| 1283 |
+
"classic",
|
| 1284 |
+
"alternative",
|
| 1285 |
+
"cause",
|
| 1286 |
+
"agent",
|
| 1287 |
+
"consist",
|
| 1288 |
+
"table",
|
| 1289 |
+
"airline",
|
| 1290 |
+
"text",
|
| 1291 |
+
"pool",
|
| 1292 |
+
"craft",
|
| 1293 |
+
"range",
|
| 1294 |
+
"fuel",
|
| 1295 |
+
"tool",
|
| 1296 |
+
"partner",
|
| 1297 |
+
"load",
|
| 1298 |
+
"entrance",
|
| 1299 |
+
"deposit",
|
| 1300 |
+
"hate",
|
| 1301 |
+
"article",
|
| 1302 |
+
"video",
|
| 1303 |
+
"summer",
|
| 1304 |
+
"feature",
|
| 1305 |
+
"extreme",
|
| 1306 |
+
"mobile",
|
| 1307 |
+
"hospital",
|
| 1308 |
+
"flight",
|
| 1309 |
+
"fall",
|
| 1310 |
+
"pension",
|
| 1311 |
+
"piano",
|
| 1312 |
+
"fail",
|
| 1313 |
+
"result",
|
| 1314 |
+
"rub",
|
| 1315 |
+
"gap",
|
| 1316 |
+
"system",
|
| 1317 |
+
"report",
|
| 1318 |
+
"suck",
|
| 1319 |
+
"ordinary",
|
| 1320 |
+
"wind",
|
| 1321 |
+
"nerve",
|
| 1322 |
+
"ask",
|
| 1323 |
+
"shine",
|
| 1324 |
+
"note",
|
| 1325 |
+
"line",
|
| 1326 |
+
"mom",
|
| 1327 |
+
"perception",
|
| 1328 |
+
"brother",
|
| 1329 |
+
"reference",
|
| 1330 |
+
"bend",
|
| 1331 |
+
"charge",
|
| 1332 |
+
"treat",
|
| 1333 |
+
"trick",
|
| 1334 |
+
"term",
|
| 1335 |
+
"homework",
|
| 1336 |
+
"bake",
|
| 1337 |
+
"bid",
|
| 1338 |
+
"status",
|
| 1339 |
+
"project",
|
| 1340 |
+
"strategy",
|
| 1341 |
+
"orange",
|
| 1342 |
+
"let",
|
| 1343 |
+
"enthusiasm",
|
| 1344 |
+
"parent",
|
| 1345 |
+
"concentrate",
|
| 1346 |
+
"device",
|
| 1347 |
+
"travel",
|
| 1348 |
+
"poetry",
|
| 1349 |
+
"business",
|
| 1350 |
+
"society",
|
| 1351 |
+
"kiss",
|
| 1352 |
+
"end",
|
| 1353 |
+
"vegetable",
|
| 1354 |
+
"employ",
|
| 1355 |
+
"schedule",
|
| 1356 |
+
"hour",
|
| 1357 |
+
"brave",
|
| 1358 |
+
"focus",
|
| 1359 |
+
"process",
|
| 1360 |
+
"movie",
|
| 1361 |
+
"illegal",
|
| 1362 |
+
"general",
|
| 1363 |
+
"coffee",
|
| 1364 |
+
"ad",
|
| 1365 |
+
"highway",
|
| 1366 |
+
"chemistry",
|
| 1367 |
+
"psychology",
|
| 1368 |
+
"hire",
|
| 1369 |
+
"bell",
|
| 1370 |
+
"conference",
|
| 1371 |
+
"relief",
|
| 1372 |
+
"show",
|
| 1373 |
+
"neat",
|
| 1374 |
+
"funny",
|
| 1375 |
+
"weight",
|
| 1376 |
+
"quality",
|
| 1377 |
+
"club",
|
| 1378 |
+
"daughter",
|
| 1379 |
+
"zone",
|
| 1380 |
+
"touch",
|
| 1381 |
+
"tonight",
|
| 1382 |
+
"shock",
|
| 1383 |
+
"burn",
|
| 1384 |
+
"excuse",
|
| 1385 |
+
"name",
|
| 1386 |
+
"survey",
|
| 1387 |
+
"landscape",
|
| 1388 |
+
"advance",
|
| 1389 |
+
"satisfaction",
|
| 1390 |
+
"bread",
|
| 1391 |
+
"disaster",
|
| 1392 |
+
"item",
|
| 1393 |
+
"hat",
|
| 1394 |
+
"prior",
|
| 1395 |
+
"shopping",
|
| 1396 |
+
"visit",
|
| 1397 |
+
"east",
|
| 1398 |
+
"photo",
|
| 1399 |
+
"home",
|
| 1400 |
+
"idea",
|
| 1401 |
+
"father",
|
| 1402 |
+
"comparison",
|
| 1403 |
+
"cat",
|
| 1404 |
+
"pipe",
|
| 1405 |
+
"winner",
|
| 1406 |
+
"count",
|
| 1407 |
+
"lake",
|
| 1408 |
+
"fight",
|
| 1409 |
+
"prize",
|
| 1410 |
+
"foundation",
|
| 1411 |
+
"dog",
|
| 1412 |
+
"keep",
|
| 1413 |
+
"ideal",
|
| 1414 |
+
"fan",
|
| 1415 |
+
"struggle",
|
| 1416 |
+
"peak",
|
| 1417 |
+
"safety",
|
| 1418 |
+
"solution",
|
| 1419 |
+
"hell",
|
| 1420 |
+
"conclusion",
|
| 1421 |
+
"population",
|
| 1422 |
+
"strain",
|
| 1423 |
+
"alarm",
|
| 1424 |
+
"measurement",
|
| 1425 |
+
"second",
|
| 1426 |
+
"train",
|
| 1427 |
+
"race",
|
| 1428 |
+
"due",
|
| 1429 |
+
"insurance",
|
| 1430 |
+
"boss",
|
| 1431 |
+
"tree",
|
| 1432 |
+
"monitor",
|
| 1433 |
+
"sick",
|
| 1434 |
+
"course",
|
| 1435 |
+
"drag",
|
| 1436 |
+
"appointment",
|
| 1437 |
+
"slice",
|
| 1438 |
+
"still",
|
| 1439 |
+
"care",
|
| 1440 |
+
"patience",
|
| 1441 |
+
"rich",
|
| 1442 |
+
"escape",
|
| 1443 |
+
"emotion",
|
| 1444 |
+
"royal",
|
| 1445 |
+
"female",
|
| 1446 |
+
"childhood",
|
| 1447 |
+
"government",
|
| 1448 |
+
"picture",
|
| 1449 |
+
"will",
|
| 1450 |
+
"sock",
|
| 1451 |
+
"big",
|
| 1452 |
+
"gate",
|
| 1453 |
+
"oil",
|
| 1454 |
+
"cross",
|
| 1455 |
+
"pin",
|
| 1456 |
+
"improvement",
|
| 1457 |
+
"championship",
|
| 1458 |
+
"silly",
|
| 1459 |
+
"help",
|
| 1460 |
+
"sky",
|
| 1461 |
+
"pitch",
|
| 1462 |
+
"man",
|
| 1463 |
+
"diamond",
|
| 1464 |
+
"most",
|
| 1465 |
+
"transition",
|
| 1466 |
+
"work",
|
| 1467 |
+
"science",
|
| 1468 |
+
"committee",
|
| 1469 |
+
"moment",
|
| 1470 |
+
"fix",
|
| 1471 |
+
"teaching",
|
| 1472 |
+
"dig",
|
| 1473 |
+
"specialist",
|
| 1474 |
+
"complex",
|
| 1475 |
+
"guide",
|
| 1476 |
+
"people",
|
| 1477 |
+
"dead",
|
| 1478 |
+
"voice",
|
| 1479 |
+
"original",
|
| 1480 |
+
"break",
|
| 1481 |
+
"topic",
|
| 1482 |
+
"data",
|
| 1483 |
+
"degree",
|
| 1484 |
+
"reading",
|
| 1485 |
+
"recording",
|
| 1486 |
+
"bunch",
|
| 1487 |
+
"reach",
|
| 1488 |
+
"judgment",
|
| 1489 |
+
"lie",
|
| 1490 |
+
"regular",
|
| 1491 |
+
"set",
|
| 1492 |
+
"painting",
|
| 1493 |
+
"mode",
|
| 1494 |
+
"list",
|
| 1495 |
+
"player",
|
| 1496 |
+
"bear",
|
| 1497 |
+
"north",
|
| 1498 |
+
"wonder",
|
| 1499 |
+
"carpet",
|
| 1500 |
+
"heavy",
|
| 1501 |
+
"officer",
|
| 1502 |
+
"negative",
|
| 1503 |
+
"clock",
|
| 1504 |
+
"unique",
|
| 1505 |
+
"baby",
|
| 1506 |
+
"pain",
|
| 1507 |
+
"assumption",
|
| 1508 |
+
"disk",
|
| 1509 |
+
"iron",
|
| 1510 |
+
"bill",
|
| 1511 |
+
"drawer",
|
| 1512 |
+
"look",
|
| 1513 |
+
"double",
|
| 1514 |
+
"mistake",
|
| 1515 |
+
"finish",
|
| 1516 |
+
"future",
|
| 1517 |
+
"brilliant",
|
| 1518 |
+
"contact",
|
| 1519 |
+
"math",
|
| 1520 |
+
"rice",
|
| 1521 |
+
"leave",
|
| 1522 |
+
"restaurant",
|
| 1523 |
+
"discount",
|
| 1524 |
+
"sex",
|
| 1525 |
+
"virus",
|
| 1526 |
+
"bit",
|
| 1527 |
+
"trust",
|
| 1528 |
+
"event",
|
| 1529 |
+
"wear",
|
| 1530 |
+
"juice",
|
| 1531 |
+
"failure",
|
| 1532 |
+
"bug",
|
| 1533 |
+
"context",
|
| 1534 |
+
"mud",
|
| 1535 |
+
"whole",
|
| 1536 |
+
"wrap",
|
| 1537 |
+
"intention",
|
| 1538 |
+
"draft",
|
| 1539 |
+
"pressure",
|
| 1540 |
+
"cake",
|
| 1541 |
+
"dark",
|
| 1542 |
+
"explanation",
|
| 1543 |
+
"space",
|
| 1544 |
+
"angle",
|
| 1545 |
+
"word",
|
| 1546 |
+
"efficiency",
|
| 1547 |
+
"management",
|
| 1548 |
+
"habit",
|
| 1549 |
+
"star",
|
| 1550 |
+
"chance",
|
| 1551 |
+
"finding",
|
| 1552 |
+
"transportation",
|
| 1553 |
+
"stand",
|
| 1554 |
+
"criticism",
|
| 1555 |
+
"flow",
|
| 1556 |
+
"door",
|
| 1557 |
+
"injury",
|
| 1558 |
+
"insect",
|
| 1559 |
+
"surprise",
|
| 1560 |
+
"apartment",
|
| 1561 |
+
] # pylint: disable=line-too-long
|
| 1562 |
+
|
| 1563 |
+
# ISO 639-1 codes to language names.
|
| 1564 |
+
LANGUAGE_CODES = immutabledict.immutabledict(
|
| 1565 |
+
{
|
| 1566 |
+
"en": "English",
|
| 1567 |
+
"es": "Spanish",
|
| 1568 |
+
"pt": "Portuguese",
|
| 1569 |
+
"ar": "Arabic",
|
| 1570 |
+
"hi": "Hindi",
|
| 1571 |
+
"fr": "French",
|
| 1572 |
+
"ru": "Russian",
|
| 1573 |
+
"de": "German",
|
| 1574 |
+
"ja": "Japanese",
|
| 1575 |
+
"it": "Italian",
|
| 1576 |
+
"bn": "Bengali",
|
| 1577 |
+
"uk": "Ukrainian",
|
| 1578 |
+
"th": "Thai",
|
| 1579 |
+
"ur": "Urdu",
|
| 1580 |
+
"ta": "Tamil",
|
| 1581 |
+
"te": "Telugu",
|
| 1582 |
+
"bg": "Bulgarian",
|
| 1583 |
+
"ko": "Korean",
|
| 1584 |
+
"pl": "Polish",
|
| 1585 |
+
"he": "Hebrew",
|
| 1586 |
+
"fa": "Persian",
|
| 1587 |
+
"vi": "Vietnamese",
|
| 1588 |
+
"ne": "Nepali",
|
| 1589 |
+
"sw": "Swahili",
|
| 1590 |
+
"kn": "Kannada",
|
| 1591 |
+
"mr": "Marathi",
|
| 1592 |
+
"gu": "Gujarati",
|
| 1593 |
+
"pa": "Punjabi",
|
| 1594 |
+
"ml": "Malayalam",
|
| 1595 |
+
"fi": "Finnish",
|
| 1596 |
+
}
|
| 1597 |
+
)
|
| 1598 |
+
|
| 1599 |
+
_ALPHABETS = "([A-Za-z])"
|
| 1600 |
+
_PREFIXES = "(Mr|St|Mrs|Ms|Dr)[.]"
|
| 1601 |
+
_SUFFIXES = "(Inc|Ltd|Jr|Sr|Co)"
|
| 1602 |
+
_STARTERS = r"(Mr|Mrs|Ms|Dr|Prof|Capt|Cpt|Lt|He\s|She\s|It\s|They\s|Their\s|Our\s|We\s|But\s|However\s|That\s|This\s|Wherever)"
|
| 1603 |
+
_ACRONYMS = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
|
| 1604 |
+
_WEBSITES = "[.](com|net|org|io|gov|edu|me)"
|
| 1605 |
+
_DIGITS = "([0-9])"
|
| 1606 |
+
_MULTIPLE_DOTS = r"\.{2,}"
|
| 1607 |
+
|
| 1608 |
+
|
| 1609 |
+
def split_into_sentences(text):
|
| 1610 |
+
"""Split the text into sentences.
|
| 1611 |
+
|
| 1612 |
+
Args:
|
| 1613 |
+
text: A string that consists of more than or equal to one sentences.
|
| 1614 |
+
|
| 1615 |
+
Returns:
|
| 1616 |
+
A list of strings where each string is a sentence.
|
| 1617 |
+
"""
|
| 1618 |
+
text = " " + text + " "
|
| 1619 |
+
text = text.replace("\n", " ")
|
| 1620 |
+
text = re.sub(_PREFIXES, "\\1<prd>", text)
|
| 1621 |
+
text = re.sub(_WEBSITES, "<prd>\\1", text)
|
| 1622 |
+
text = re.sub(_DIGITS + "[.]" + _DIGITS, "\\1<prd>\\2", text)
|
| 1623 |
+
text = re.sub(
|
| 1624 |
+
_MULTIPLE_DOTS,
|
| 1625 |
+
lambda match: "<prd>" * len(match.group(0)) + "<stop>",
|
| 1626 |
+
text,
|
| 1627 |
+
)
|
| 1628 |
+
if "Ph.D" in text:
|
| 1629 |
+
text = text.replace("Ph.D.", "Ph<prd>D<prd>")
|
| 1630 |
+
text = re.sub(r"\s" + _ALPHABETS + "[.] ", " \\1<prd> ", text)
|
| 1631 |
+
text = re.sub(_ACRONYMS + " " + _STARTERS, "\\1<stop> \\2", text)
|
| 1632 |
+
text = re.sub(
|
| 1633 |
+
_ALPHABETS + "[.]" + _ALPHABETS + "[.]" + _ALPHABETS + "[.]",
|
| 1634 |
+
"\\1<prd>\\2<prd>\\3<prd>",
|
| 1635 |
+
text,
|
| 1636 |
+
)
|
| 1637 |
+
text = re.sub(_ALPHABETS + "[.]" + _ALPHABETS + "[.]", "\\1<prd>\\2<prd>", text)
|
| 1638 |
+
text = re.sub(" " + _SUFFIXES + "[.] " + _STARTERS, " \\1<stop> \\2", text)
|
| 1639 |
+
text = re.sub(" " + _SUFFIXES + "[.]", " \\1<prd>", text)
|
| 1640 |
+
text = re.sub(" " + _ALPHABETS + "[.]", " \\1<prd>", text)
|
| 1641 |
+
if "”" in text:
|
| 1642 |
+
text = text.replace(".”", "”.")
|
| 1643 |
+
if '"' in text:
|
| 1644 |
+
text = text.replace('."', '".')
|
| 1645 |
+
if "!" in text:
|
| 1646 |
+
text = text.replace('!"', '"!')
|
| 1647 |
+
if "?" in text:
|
| 1648 |
+
text = text.replace('?"', '"?')
|
| 1649 |
+
text = text.replace(".", ".<stop>")
|
| 1650 |
+
text = text.replace("?", "?<stop>")
|
| 1651 |
+
text = text.replace("!", "!<stop>")
|
| 1652 |
+
text = text.replace("<prd>", ".")
|
| 1653 |
+
sentences = text.split("<stop>")
|
| 1654 |
+
sentences = [s.strip() for s in sentences]
|
| 1655 |
+
if sentences and not sentences[-1]:
|
| 1656 |
+
sentences = sentences[:-1]
|
| 1657 |
+
return sentences
|
| 1658 |
+
|
| 1659 |
+
|
| 1660 |
+
def count_words(text):
|
| 1661 |
+
"""Counts the number of words."""
|
| 1662 |
+
tokenizer = nltk.tokenize.RegexpTokenizer(r"\w+")
|
| 1663 |
+
tokens = tokenizer.tokenize(text)
|
| 1664 |
+
num_words = len(tokens)
|
| 1665 |
+
return num_words
|
| 1666 |
+
|
| 1667 |
+
|
| 1668 |
+
@functools.lru_cache(maxsize=None)
|
| 1669 |
+
def _get_sentence_tokenizer():
|
| 1670 |
+
return nltk.data.load("nltk:tokenizers/punkt/english.pickle")
|
| 1671 |
+
|
| 1672 |
+
|
| 1673 |
+
def count_sentences(text):
|
| 1674 |
+
"""Count the number of sentences."""
|
| 1675 |
+
tokenizer = _get_sentence_tokenizer()
|
| 1676 |
+
tokenized_sentences = tokenizer.tokenize(text)
|
| 1677 |
+
return len(tokenized_sentences)
|
| 1678 |
+
|
| 1679 |
+
|
| 1680 |
+
def generate_keywords(num_keywords):
|
| 1681 |
+
"""Randomly generates a few keywords."""
|
| 1682 |
+
return random.sample(WORD_LIST, k=num_keywords)
|