hc99 commited on
Commit
ceaa2e7
·
verified ·
1 Parent(s): d93da99

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. testbed/EleutherAI__lm-evaluation-harness/.coveragerc +28 -0
  2. testbed/EleutherAI__lm-evaluation-harness/.flake8 +5 -0
  3. testbed/EleutherAI__lm-evaluation-harness/.gitignore +24 -0
  4. testbed/EleutherAI__lm-evaluation-harness/.pre-commit-config.yaml +54 -0
  5. testbed/EleutherAI__lm-evaluation-harness/CITATION.bib +10 -0
  6. testbed/EleutherAI__lm-evaluation-harness/LICENSE.md +21 -0
  7. testbed/EleutherAI__lm-evaluation-harness/README.md +497 -0
  8. testbed/EleutherAI__lm-evaluation-harness/ignore.txt +8 -0
  9. testbed/EleutherAI__lm-evaluation-harness/mypy.ini +29 -0
  10. testbed/EleutherAI__lm-evaluation-harness/pile_statistics.json +37 -0
  11. testbed/EleutherAI__lm-evaluation-harness/requirements.txt +1 -0
  12. testbed/EleutherAI__lm-evaluation-harness/setup.py +5 -0
  13. testbed/EleutherAI__lm-evaluation-harness/tests/__init__.py +0 -0
  14. testbed/EleutherAI__lm-evaluation-harness/tests/test_cli.py +43 -0
  15. testbed/EleutherAI__lm-evaluation-harness/tests/test_evaluator.py +151 -0
  16. testbed/EleutherAI__lm-evaluation-harness/tests/test_include_path.py +93 -0
  17. testbed/EleutherAI__lm-evaluation-harness/tests/test_janitor.py +446 -0
  18. testbed/EleutherAI__lm-evaluation-harness/tests/test_misc.py +14 -0
  19. testbed/EleutherAI__lm-evaluation-harness/tests/test_prompt.py +119 -0
  20. testbed/EleutherAI__lm-evaluation-harness/tests/test_requests_caching.py +122 -0
  21. testbed/EleutherAI__lm-evaluation-harness/tests/test_tasks.py +119 -0
  22. testbed/EleutherAI__lm-evaluation-harness/tests/testconfigs/arc_test.yaml +21 -0
  23. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/ai2_arc_10_hf_pretrained-EleutherAI-pythia-14m-dtype-float32-device-cpu.txt +6 -0
  24. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/lambada_openai_10_hf_pretrained-EleutherAI-pythia-14m-dtype-float32-device-cpu.txt +4 -0
  25. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/mmlu_stem_10_hf_pretrained-EleutherAI-pythia-14m-dtype-float32-device-cpu.txt +22 -0
  26. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wikitext_10_hf_pretrained-EleutherAI-pythia-14m-dtype-float32-device-cpu.txt +5 -0
  27. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-en-de-v0-res.json +1 -0
  28. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-en-ja-v0-greedy_until +1 -0
  29. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-en-ja-v0-res.json +1 -0
  30. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-en-ps-v0-greedy_until +1 -0
  31. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-en-ru-v0-res.json +1 -0
  32. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-en-ta-v0-greedy_until +1 -0
  33. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-en-ta-v0-res.json +1 -0
  34. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-en-zh-v0-greedy_until +1 -0
  35. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-en-zh-v0-res.json +1 -0
  36. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-en-zh-v1-greedy_until +1 -0
  37. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-en-zh-v1-res.json +1 -0
  38. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-fr-de-v0-res.json +1 -0
  39. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-iu-en-v0-greedy_until +1 -0
  40. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-ja-en-v0-greedy_until +1 -0
  41. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-ja-en-v0-res.json +1 -0
  42. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-km-en-v0-greedy_until +1 -0
  43. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-pl-en-v0-greedy_until +1 -0
  44. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-pl-en-v0-res.json +1 -0
  45. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-ps-en-v0-res.json +1 -0
  46. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-ru-en-v0-greedy_until +1 -0
  47. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-ru-en-v0-res.json +1 -0
  48. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-ta-en-v0-greedy_until +1 -0
  49. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-ta-en-v0-res.json +1 -0
  50. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-zh-en-v0-greedy_until +1 -0
testbed/EleutherAI__lm-evaluation-harness/.coveragerc ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [run]
2
+
3
+ # tasks that aren't wired up.
4
+ omit =
5
+ lm_eval/tasks/quac.py
6
+ lm_eval/tasks/storycloze.py
7
+ lm_eval/tasks/cbt.py
8
+ lm_eval/tasks/sat.py
9
+ lm_eval/tasks/triviaqa.py
10
+ lm_eval/tasks/naturalqs.py
11
+ lm_eval/models/dummy.py
12
+
13
+ [report]
14
+ exclude_lines =
15
+ # Skip any pass lines such as may be used for @abstractmethod
16
+ pass
17
+
18
+ # Have to re-enable the standard pragma
19
+ pragma: no cover
20
+
21
+ # Don't complain about missing debug-only code:
22
+ def __repr__
23
+ if self\.debug
24
+
25
+ # Don't complain if tests don't hit defensive assertion code:
26
+ raise AssertionError
27
+ raise NotImplementedError
28
+ return NotImplemented
testbed/EleutherAI__lm-evaluation-harness/.flake8 ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ [flake8]
2
+ ignore = E203, E266, E501, W503, F403, F401, C901
3
+ max-line-length = 127
4
+ max-complexity = 10
5
+ select = B,C,E,F,W,T4,B9
testbed/EleutherAI__lm-evaluation-harness/.gitignore ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ env
2
+ *.pyc
3
+ output/
4
+ data/
5
+ lm_cache
6
+ .idea
7
+ build
8
+ dist
9
+ *.egg-info
10
+ venv
11
+ .vscode/
12
+ temp
13
+ __pycache__
14
+ .ipynb_checkpoints
15
+ temp
16
+ test_logs/
17
+ # IPython
18
+ profile_default/
19
+ ipython_config.py
20
+ # don't track (the default location of) the cached requests
21
+ lm_eval/caching/.cache
22
+ # don't track files created by wandb
23
+ wandb
24
+ examples/wandb
testbed/EleutherAI__lm-evaluation-harness/.pre-commit-config.yaml ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Ignore test linting to avoid conflicting changes to version stability.
2
+ exclude: ^tests/testdata/
3
+ repos:
4
+ - repo: https://github.com/pre-commit/pre-commit-hooks
5
+ rev: v4.5.0
6
+ hooks:
7
+ - id: check-added-large-files
8
+ - id: check-ast
9
+ - id: check-byte-order-marker
10
+ - id: check-case-conflict
11
+ - id: check-json
12
+ - id: check-merge-conflict
13
+ args: [--assume-in-merge]
14
+ - id: check-symlinks
15
+ - id: check-yaml
16
+ args: ["--unsafe"]
17
+ - id: destroyed-symlinks
18
+ - id: detect-private-key
19
+ - id: end-of-file-fixer
20
+ - id: no-commit-to-branch
21
+ always_run: false
22
+ - id: requirements-txt-fixer
23
+ - id: trailing-whitespace
24
+ args: [--markdown-linebreak-ext=md]
25
+ - id: fix-byte-order-marker
26
+ exclude: docs/CNAME
27
+ - id: fix-encoding-pragma
28
+ args: [--remove]
29
+ - id: mixed-line-ending
30
+ args: [--fix=lf]
31
+ - repo: https://github.com/astral-sh/ruff-pre-commit
32
+ rev: v0.4.8
33
+ hooks:
34
+ # Run the linter.
35
+ - id: ruff
36
+ args:
37
+ - --fix
38
+ # Run the formatter.
39
+ - id: ruff-format
40
+ - repo: https://github.com/codespell-project/codespell
41
+ rev: v2.3.0
42
+ hooks:
43
+ - id: codespell
44
+ exclude: >
45
+ (?x)^(
46
+ .*\.json|ignore.txt|lm_eval/tasks/.*|.*yaml|.*\.ipynb
47
+ )$
48
+ args: [--check-filenames, --check-hidden, --ignore-words=ignore.txt]
49
+ # - repo: https://github.com/pre-commit/mirrors-mypy
50
+ # rev: v1.5.1
51
+ # hooks:
52
+ # - id: mypy
53
+ # additional_dependencies: [".[sentencepiece,multilingual,promptsource,gptq]", "types-PyYAML", "types-requests"]
54
+ # exclude: ^tests/.*$
testbed/EleutherAI__lm-evaluation-harness/CITATION.bib ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ @misc{eval-harness,
2
+ author = {Gao, Leo and Tow, Jonathan and Abbasi, Baber and Biderman, Stella and Black, Sid and DiPofi, Anthony and Foster, Charles and Golding, Laurence and Hsu, Jeffrey and Le Noac'h, Alain and Li, Haonan and McDonell, Kyle and Muennighoff, Niklas and Ociepa, Chris and Phang, Jason and Reynolds, Laria and Schoelkopf, Hailey and Skowron, Aviya and Sutawika, Lintang and Tang, Eric and Thite, Anish and Wang, Ben and Wang, Kevin and Zou, Andy},
3
+ title = {A framework for few-shot language model evaluation},
4
+ month = 12,
5
+ year = 2023,
6
+ publisher = {Zenodo},
7
+ version = {v0.4.0},
8
+ doi = {10.5281/zenodo.10256836},
9
+ url = {https://zenodo.org/records/10256836}
10
+ }
testbed/EleutherAI__lm-evaluation-harness/LICENSE.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2020 EleutherAI
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
testbed/EleutherAI__lm-evaluation-harness/README.md ADDED
@@ -0,0 +1,497 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Language Model Evaluation Harness
2
+
3
+ [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.10256836.svg)](https://doi.org/10.5281/zenodo.10256836)
4
+
5
+ ---
6
+
7
+ *Latest News 📣*
8
+
9
+ - [2024/07] [API model](docs/API_guide.md) support has been updated and refactored, introducing support for batched and async requests, and making it significantly easier to customize and use for your own purposes. **To run Llama 405B, we recommend using VLLM's OpenAI-compliant API to host the model, and use the `local-completions` model type to evaluate the model.**
10
+ - [2024/07] New Open LLM Leaderboard tasks have been added ! You can find them under the [leaderboard](lm_eval/tasks/leaderboard/README.md) task group.
11
+
12
+ ---
13
+
14
+ ## Announcement
15
+ **A new v0.4.0 release of lm-evaluation-harness is available** !
16
+
17
+ New updates and features include:
18
+
19
+ - **New Open LLM Leaderboard tasks have been added ! You can find them under the [leaderboard](lm_eval/tasks/leaderboard/README.md) task group.**
20
+ - Internal refactoring
21
+ - Config-based task creation and configuration
22
+ - Easier import and sharing of externally-defined task config YAMLs
23
+ - Support for Jinja2 prompt design, easy modification of prompts + prompt imports from Promptsource
24
+ - More advanced configuration options, including output post-processing, answer extraction, and multiple LM generations per document, configurable fewshot settings, and more
25
+ - Speedups and new modeling libraries supported, including: faster data-parallel HF model usage, vLLM support, MPS support with HuggingFace, and more
26
+ - Logging and usability changes
27
+ - New tasks including CoT BIG-Bench-Hard, Belebele, user-defined task groupings, and more
28
+
29
+ Please see our updated documentation pages in `docs/` for more details.
30
+
31
+ Development will be continuing on the `main` branch, and we encourage you to give us feedback on what features are desired and how to improve the library further, or ask questions, either in issues or PRs on GitHub, or in the [EleutherAI discord](https://discord.gg/eleutherai)!
32
+
33
+ ---
34
+
35
+ ## Overview
36
+
37
+ This project provides a unified framework to test generative language models on a large number of different evaluation tasks.
38
+
39
+ **Features:**
40
+ - Over 60 standard academic benchmarks for LLMs, with hundreds of subtasks and variants implemented.
41
+ - Support for models loaded via [transformers](https://github.com/huggingface/transformers/) (including quantization via [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ)), [GPT-NeoX](https://github.com/EleutherAI/gpt-neox), and [Megatron-DeepSpeed](https://github.com/microsoft/Megatron-DeepSpeed/), with a flexible tokenization-agnostic interface.
42
+ - Support for fast and memory-efficient inference with [vLLM](https://github.com/vllm-project/vllm).
43
+ - Support for commercial APIs including [OpenAI](https://openai.com), and [TextSynth](https://textsynth.com/).
44
+ - Support for evaluation on adapters (e.g. LoRA) supported in [HuggingFace's PEFT library](https://github.com/huggingface/peft).
45
+ - Support for local models and benchmarks.
46
+ - Evaluation with publicly available prompts ensures reproducibility and comparability between papers.
47
+ - Easy support for custom prompts and evaluation metrics.
48
+
49
+ The Language Model Evaluation Harness is the backend for 🤗 Hugging Face's popular [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard), has been used in [hundreds of papers](https://scholar.google.com/scholar?oi=bibs&hl=en&authuser=2&cites=15052937328817631261,4097184744846514103,1520777361382155671,17476825572045927382,18443729326628441434,14801318227356878622,7890865700763267262,12854182577605049984,15641002901115500560,5104500764547628290), and is used internally by dozens of organizations including NVIDIA, Cohere, BigScience, BigCode, Nous Research, and Mosaic ML.
50
+
51
+ ## Install
52
+
53
+ To install the `lm-eval` package from the github repository, run:
54
+
55
+ ```bash
56
+ git clone https://github.com/EleutherAI/lm-evaluation-harness
57
+ cd lm-evaluation-harness
58
+ pip install -e .
59
+ ```
60
+
61
+ We also provide a number of optional dependencies for extended functionality. A detailed table is available at the end of this document.
62
+
63
+ ## Basic Usage
64
+ ### User Guide
65
+
66
+ A user guide detailing the full list of supported arguments is provided [here](./docs/interface.md), and on the terminal by calling `lm_eval -h`. Alternatively, you can use `lm-eval` instead of `lm_eval`.
67
+
68
+ A list of supported tasks (or groupings of tasks) can be viewed with `lm-eval --tasks list`. Task descriptions and links to corresponding subfolders are provided [here](./lm_eval/tasks/README.md).
69
+
70
+ ### Hugging Face `transformers`
71
+
72
+ To evaluate a model hosted on the [HuggingFace Hub](https://huggingface.co/models) (e.g. GPT-J-6B) on `hellaswag` you can use the following command (this assumes you are using a CUDA-compatible GPU):
73
+
74
+ ```bash
75
+ lm_eval --model hf \
76
+ --model_args pretrained=EleutherAI/gpt-j-6B \
77
+ --tasks hellaswag \
78
+ --device cuda:0 \
79
+ --batch_size 8
80
+ ```
81
+
82
+ Additional arguments can be provided to the model constructor using the `--model_args` flag. Most notably, this supports the common practice of using the `revisions` feature on the Hub to store partially trained checkpoints, or to specify the datatype for running a model:
83
+
84
+ ```bash
85
+ lm_eval --model hf \
86
+ --model_args pretrained=EleutherAI/pythia-160m,revision=step100000,dtype="float" \
87
+ --tasks lambada_openai,hellaswag \
88
+ --device cuda:0 \
89
+ --batch_size 8
90
+ ```
91
+
92
+ Models that are loaded via both `transformers.AutoModelForCausalLM` (autoregressive, decoder-only GPT style models) and `transformers.AutoModelForSeq2SeqLM` (such as encoder-decoder models like T5) in Huggingface are supported.
93
+
94
+ Batch size selection can be automated by setting the ```--batch_size``` flag to ```auto```. This will perform automatic detection of the largest batch size that will fit on your device. On tasks where there is a large difference between the longest and shortest example, it can be helpful to periodically recompute the largest batch size, to gain a further speedup. To do this, append ```:N``` to above flag to automatically recompute the largest batch size ```N``` times. For example, to recompute the batch size 4 times, the command would be:
95
+
96
+ ```bash
97
+ lm_eval --model hf \
98
+ --model_args pretrained=EleutherAI/pythia-160m,revision=step100000,dtype="float" \
99
+ --tasks lambada_openai,hellaswag \
100
+ --device cuda:0 \
101
+ --batch_size auto:4
102
+ ```
103
+
104
+ > [!Note]
105
+ > Just like you can provide a local path to `transformers.AutoModel`, you can also provide a local path to `lm_eval` via `--model_args pretrained=/path/to/model`
106
+
107
+ #### Multi-GPU Evaluation with Hugging Face `accelerate`
108
+
109
+ We support three main ways of using Hugging Face's [accelerate 🚀](https://github.com/huggingface/accelerate) library for multi-GPU evaluation.
110
+
111
+ To perform *data-parallel evaluation* (where each GPU loads a **separate full copy** of the model), we leverage the `accelerate` launcher as follows:
112
+
113
+ ```
114
+ accelerate launch -m lm_eval --model hf \
115
+ --tasks lambada_openai,arc_easy \
116
+ --batch_size 16
117
+ ```
118
+ (or via `accelerate launch --no-python lm_eval`).
119
+
120
+ For cases where your model can fit on a single GPU, this allows you to evaluate on K GPUs K times faster than on one.
121
+
122
+ **WARNING**: This setup does not work with FSDP model sharding, so in `accelerate config` FSDP must be disabled, or the NO_SHARD FSDP option must be used.
123
+
124
+ The second way of using `accelerate` for multi-GPU evaluation is when your model is *too large to fit on a single GPU.*
125
+
126
+ In this setting, run the library *outside the `accelerate` launcher*, but passing `parallelize=True` to `--model_args` as follows:
127
+
128
+ ```
129
+ lm_eval --model hf \
130
+ --tasks lambada_openai,arc_easy \
131
+ --model_args parallelize=True \
132
+ --batch_size 16
133
+ ```
134
+
135
+ This means that your model's weights will be split across all available GPUs.
136
+
137
+ For more advanced users or even larger models, we allow for the following arguments when `parallelize=True` as well:
138
+ - `device_map_option`: How to split model weights across available GPUs. defaults to "auto".
139
+ - `max_memory_per_gpu`: the max GPU memory to use per GPU in loading the model.
140
+ - `max_cpu_memory`: the max amount of CPU memory to use when offloading the model weights to RAM.
141
+ - `offload_folder`: a folder where model weights will be offloaded to disk if needed.
142
+
143
+ The third option is to use both at the same time. This will allow you to take advantage of both data parallelism and model sharding, and is especially useful for models that are too large to fit on a single GPU.
144
+
145
+ ```
146
+ accelerate launch --multi_gpu --num_processes {nb_of_copies_of_your_model} \
147
+ -m lm_eval --model hf \
148
+ --tasks lambada_openai,arc_easy \
149
+ --model_args parallelize=True \
150
+ --batch_size 16
151
+ ```
152
+
153
+ To learn more about model parallelism and how to use it with the `accelerate` library, see the [accelerate documentation](https://huggingface.co/docs/transformers/v4.15.0/en/parallelism)
154
+
155
+ **Warning: We do not natively support multi-node evaluation using the `hf` model type! Please reference [our GPT-NeoX library integration](https://github.com/EleutherAI/gpt-neox/blob/main/eval.py) for an example of code in which a custom multi-machine evaluation script is written.**
156
+
157
+ **Note: we do not currently support multi-node evaluations natively, and advise using either an externally hosted server to run inference requests against, or creating a custom integration with your distributed framework [as is done for the GPT-NeoX library](https://github.com/EleutherAI/gpt-neox/blob/main/eval_tasks/eval_adapter.py).**
158
+
159
+ ### NVIDIA `nemo` models
160
+
161
+ [NVIDIA NeMo Framework](https://github.com/NVIDIA/NeMo) is a generative AI framework built for researchers and pytorch developers working on language models.
162
+
163
+ To evaluate a `nemo` model, start by installing NeMo following [the documentation](https://github.com/NVIDIA/NeMo?tab=readme-ov-file#installation). We highly recommended to use the NVIDIA PyTorch or NeMo container, especially if having issues installing Apex or any other dependencies (see [latest released containers](https://github.com/NVIDIA/NeMo/releases)). Please also install the lm evaluation harness library following the instructions in [the Install section](https://github.com/EleutherAI/lm-evaluation-harness/tree/main?tab=readme-ov-file#install).
164
+
165
+ NeMo models can be obtained through [NVIDIA NGC Catalog](https://catalog.ngc.nvidia.com/models) or in [NVIDIA's Hugging Face page](https://huggingface.co/nvidia). In [NVIDIA NeMo Framework](https://github.com/NVIDIA/NeMo/tree/main/scripts/nlp_language_modeling) there are conversion scripts to convert the `hf` checkpoints of popular models like llama, falcon, mixtral or mpt to `nemo`.
166
+
167
+ Run a `nemo` model on one GPU:
168
+ ```bash
169
+ lm_eval --model nemo_lm \
170
+ --model_args path=<path_to_nemo_model> \
171
+ --tasks hellaswag \
172
+ --batch_size 32
173
+ ```
174
+
175
+ It is recommended to unpack the `nemo` model to avoid the unpacking inside the docker container - it may overflow disk space. For that you can run:
176
+
177
+ ```
178
+ mkdir MY_MODEL
179
+ tar -xvf MY_MODEL.nemo -c MY_MODEL
180
+ ```
181
+
182
+ #### Multi-GPU evaluation with NVIDIA `nemo` models
183
+
184
+ By default, only one GPU is used. But we do support either data replication or tensor/pipeline parallelism during evaluation, on one node.
185
+
186
+ 1) To enable data replication, set the `model_args` of `devices` to the number of data replicas to run. For example, the command to run 8 data replicas over 8 GPUs is:
187
+ ```bash
188
+ torchrun --nproc-per-node=8 --no-python lm_eval \
189
+ --model nemo_lm \
190
+ --model_args path=<path_to_nemo_model>,devices=8 \
191
+ --tasks hellaswag \
192
+ --batch_size 32
193
+ ```
194
+
195
+ 2) To enable tensor and/or pipeline parallelism, set the `model_args` of `tensor_model_parallel_size` and/or `pipeline_model_parallel_size`. In addition, you also have to set up `devices` to be equal to the product of `tensor_model_parallel_size` and/or `pipeline_model_parallel_size`. For example, the command to use one node of 4 GPUs with tensor parallelism of 2 and pipeline parallelism of 2 is:
196
+ ```bash
197
+ torchrun --nproc-per-node=4 --no-python lm_eval \
198
+ --model nemo_lm \
199
+ --model_args path=<path_to_nemo_model>,devices=4,tensor_model_parallel_size=2,pipeline_model_parallel_size=2 \
200
+ --tasks hellaswag \
201
+ --batch_size 32
202
+ ```
203
+ Note that it is recommended to substitute the `python` command by `torchrun --nproc-per-node=<number of devices> --no-python` to facilitate loading the model into the GPUs. This is especially important for large checkpoints loaded into multiple GPUs.
204
+
205
+ Not supported yet: multi-node evaluation and combinations of data replication with tensor or pipeline parallelism.
206
+
207
+ ### Tensor + Data Parallel and Optimized Inference with `vLLM`
208
+
209
+ We also support vLLM for faster inference on [supported model types](https://docs.vllm.ai/en/latest/models/supported_models.html), especially faster when splitting a model across multiple GPUs. For single-GPU or multi-GPU — tensor parallel, data parallel, or a combination of both — inference, for example:
210
+
211
+ ```bash
212
+ lm_eval --model vllm \
213
+ --model_args pretrained={model_name},tensor_parallel_size={GPUs_per_model},dtype=auto,gpu_memory_utilization=0.8,data_parallel_size={model_replicas} \
214
+ --tasks lambada_openai \
215
+ --batch_size auto
216
+ ```
217
+ To use vllm, do `pip install lm_eval[vllm]`. For a full list of supported vLLM configurations, please reference our [vLLM integration](https://github.com/EleutherAI/lm-evaluation-harness/blob/e74ec966556253fbe3d8ecba9de675c77c075bce/lm_eval/models/vllm_causallms.py) and the vLLM documentation.
218
+
219
+ vLLM occasionally differs in output from Huggingface. We treat Huggingface as the reference implementation, and provide a [script](./scripts/model_comparator.py) for checking the validity of vllm results against HF.
220
+
221
+ > [!Tip]
222
+ > For fastest performance, we recommend using `--batch_size auto` for vLLM whenever possible, to leverage its continuous batching functionality!
223
+
224
+ > [!Tip]
225
+ > Passing `max_model_len=4096` or some other reasonable default to vLLM through model args may cause speedups or prevent out-of-memory errors when trying to use auto batch size, such as for Mistral-7B-v0.1 which defaults to a maximum length of 32k.
226
+
227
+ ### Model APIs and Inference Servers
228
+
229
+ Our library also supports the evaluation of models served via several commercial APIs, and we hope to implement support for the most commonly used performant local/self-hosted inference servers.
230
+
231
+ To call a hosted model, use:
232
+
233
+ ```bash
234
+ export OPENAI_API_KEY=YOUR_KEY_HERE
235
+ lm_eval --model openai-completions \
236
+ --model_args model=davinci \
237
+ --tasks lambada_openai,hellaswag
238
+ ```
239
+
240
+ We also support using your own local inference server with servers that mirror the OpenAI Completions and ChatCompletions APIs.
241
+
242
+ ```bash
243
+ lm_eval --model local-completions --tasks gsm8k --model_args model=facebook/opt-125m,base_url=http://{yourip}:8000/v1/completions,num_concurrent=1,max_retries=3,tokenized_requests=False,batch_size=16
244
+ ```
245
+ Note that for externally hosted models, configs such as `--device` which relate to where to place a local model should not be used and do not function. Just like you can use `--model_args` to pass arbitrary arguments to the model constructor for local models, you can use it to pass arbitrary arguments to the model API for hosted models. See the documentation of the hosting service for information on what arguments they support.
246
+
247
+ | API or Inference Server | Implemented? | `--model <xxx>` name | Models supported: | Request Types: |
248
+ |---------------------------------------------------------------------------------------------------------------------------|---------------------------------|-----------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------|
249
+ | OpenAI Completions | :heavy_check_mark: | `openai-completions`, `local-completions` | All OpenAI Completions API models | `generate_until`, `loglikelihood`, `loglikelihood_rolling` |
250
+ | OpenAI ChatCompletions | :heavy_check_mark: | `openai-chat-completions`, `local-chat-completions` | [All ChatCompletions API models](https://platform.openai.com/docs/guides/gpt) | `generate_until` (no logprobs) |
251
+ | Anthropic | :heavy_check_mark: | `anthropic` | [Supported Anthropic Engines](https://docs.anthropic.com/claude/reference/selecting-a-model) | `generate_until` (no logprobs) |
252
+ | Anthropic Chat | :heavy_check_mark: | `anthropic-chat`, `anthropic-chat-completions` | [Supported Anthropic Engines](https://docs.anthropic.com/claude/docs/models-overview) | `generate_until` (no logprobs) |
253
+ | Textsynth | :heavy_check_mark: | `textsynth` | [All supported engines](https://textsynth.com/documentation.html#engines) | `generate_until`, `loglikelihood`, `loglikelihood_rolling` |
254
+ | Cohere | [:hourglass: - blocked on Cohere API bug](https://github.com/EleutherAI/lm-evaluation-harness/pull/395) | N/A | [All `cohere.generate()` engines](https://docs.cohere.com/docs/models) | `generate_until`, `loglikelihood`, `loglikelihood_rolling` |
255
+ | [Llama.cpp](https://github.com/ggerganov/llama.cpp) (via [llama-cpp-python](https://github.com/abetlen/llama-cpp-python)) | :heavy_check_mark: | `gguf`, `ggml` | [All models supported by llama.cpp](https://github.com/ggerganov/llama.cpp) | `generate_until`, `loglikelihood`, (perplexity evaluation not yet implemented) |
256
+ | vLLM | :heavy_check_mark: | `vllm` | [Most HF Causal Language Models](https://docs.vllm.ai/en/latest/models/supported_models.html) | `generate_until`, `loglikelihood`, `loglikelihood_rolling` |
257
+ | Mamba | :heavy_check_mark: | `mamba_ssm` | [Mamba architecture Language Models via the `mamba_ssm` package](https://huggingface.co/state-spaces) | `generate_until`, `loglikelihood`, `loglikelihood_rolling` |
258
+ | Huggingface Optimum (Causal LMs) | ✔️ | `openvino` | Any decoder-only AutoModelForCausalLM converted with Huggingface Optimum into OpenVINO™ Intermediate Representation (IR) format | `generate_until`, `loglikelihood`, `loglikelihood_rolling` | ... |
259
+ | Neuron via AWS Inf2 (Causal LMs) | ✔️ | `neuronx` | Any decoder-only AutoModelForCausalLM supported to run on [huggingface-ami image for inferentia2](https://aws.amazon.com/marketplace/pp/prodview-gr3e6yiscria2) | `generate_until`, `loglikelihood`, `loglikelihood_rolling` | ... |
260
+ | [Neural Magic DeepSparse](https://github.com/neuralmagic/deepsparse) | ✔️ | `deepsparse` | Any LM from [SparseZoo](https://sparsezoo.neuralmagic.com/) or on [HF Hub with the "deepsparse" tag](https://huggingface.co/models?other=deepsparse) | `generate_until`, `loglikelihood` | ... |
261
+ | [Neural Magic SparseML](https://github.com/neuralmagic/sparseml) | ✔️ | `sparseml` | Any decoder-only AutoModelForCausalLM from [SparseZoo](https://sparsezoo.neuralmagic.com/) or on [HF Hub](https://huggingface.co/neuralmagic). Especially useful for models with quantization like [`zoo:llama2-7b-gsm8k_llama2_pretrain-pruned60_quantized`](https://sparsezoo.neuralmagic.com/models/llama2-7b-gsm8k_llama2_pretrain-pruned60_quantized) | `generate_until`, `loglikelihood`, `loglikelihood_rolling` | ... |
262
+ | Your local inference server! | :heavy_check_mark: | `local-completions` or `local-chat-completions` | Support for OpenAI API-compatible servers, with easy customization for other APIs. | `generate_until`, `loglikelihood`, `loglikelihood_rolling` | | ... |
263
+
264
+ Models which do not supply logits or logprobs can be used with tasks of type `generate_until` only, while local models, or APIs that supply logprobs/logits of their prompts, can be run on all task types: `generate_until`, `loglikelihood`, `loglikelihood_rolling`, and `multiple_choice`.
265
+
266
+ For more information on the different task `output_types` and model request types, see [our documentation](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/docs/model_guide.md#interface).
267
+
268
+ > [!Note]
269
+ > For best performance with closed chat model APIs such as Anthropic Claude 3 and GPT-4, we recommend carefully looking at a few sample outputs using `--limit 10` first to confirm answer extraction and scoring on generative tasks is performing as expected. providing `system="<some system prompt here>"` within `--model_args` for anthropic-chat-completions, to instruct the model what format to respond in, may be useful.
270
+
271
+
272
+ ### Other Frameworks
273
+
274
+ A number of other libraries contain scripts for calling the eval harness through their library. These include [GPT-NeoX](https://github.com/EleutherAI/gpt-neox/blob/main/eval_tasks/eval_adapter.py), [Megatron-DeepSpeed](https://github.com/microsoft/Megatron-DeepSpeed/blob/main/examples/MoE/readme_evalharness.md), and [mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/blob/master/eval_harness.py).
275
+
276
+ To create your own custom integration you can follow instructions from [this tutorial](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/docs/interface.md#external-library-usage).
277
+
278
+ ### Additional Features
279
+ > [!Note]
280
+ > For tasks unsuitable for direct evaluation — either due risks associated with executing untrusted code or complexities in the evaluation process — the `--predict_only` flag is available to obtain decoded generations for post-hoc evaluation.
281
+
282
+ If you have a Metal compatible Mac, you can run the eval harness using the MPS back-end by replacing `--device cuda:0` with `--device mps` (requires PyTorch version 2.1 or higher). **Note that the PyTorch MPS backend is still in early stages of development, so correctness issues or unsupported operations may exist. If you observe oddities in model performance on the MPS back-end, we recommend first checking that a forward pass of your model on `--device cpu` and `--device mps` match.**
283
+
284
+ > [!Note]
285
+ > You can inspect what the LM inputs look like by running the following command:
286
+ > ```bash
287
+ > python write_out.py \
288
+ > --tasks <task1,task2,...> \
289
+ > --num_fewshot 5 \
290
+ > --num_examples 10 \
291
+ > --output_base_path /path/to/output/folder
292
+ > ```
293
+ > This will write out one text file for each task.
294
+
295
+ To verify the data integrity of the tasks you're performing in addition to running the tasks themselves, you can use the `--check_integrity` flag:
296
+
297
+ ```bash
298
+ lm_eval --model openai \
299
+ --model_args engine=davinci \
300
+ --tasks lambada_openai,hellaswag \
301
+ --check_integrity
302
+ ```
303
+
304
+ ## Advanced Usage Tips
305
+
306
+ For models loaded with the HuggingFace `transformers` library, any arguments provided via `--model_args` get passed to the relevant constructor directly. This means that anything you can do with `AutoModel` can be done with our library. For example, you can pass a local path via `pretrained=` or use models finetuned with [PEFT](https://github.com/huggingface/peft) by taking the call you would run to evaluate the base model and add `,peft=PATH` to the `model_args` argument:
307
+ ```bash
308
+ lm_eval --model hf \
309
+ --model_args pretrained=EleutherAI/gpt-j-6b,parallelize=True,load_in_4bit=True,peft=nomic-ai/gpt4all-j-lora \
310
+ --tasks openbookqa,arc_easy,winogrande,hellaswag,arc_challenge,piqa,boolq \
311
+ --device cuda:0
312
+ ```
313
+
314
+ Models provided as delta weights can be easily loaded using the Hugging Face transformers library. Within --model_args, set the delta argument to specify the delta weights, and use the pretrained argument to designate the relative base model to which they will be applied:
315
+ ```bash
316
+ lm_eval --model hf \
317
+ --model_args pretrained=Ejafa/llama_7B,delta=lmsys/vicuna-7b-delta-v1.1 \
318
+ --tasks hellaswag
319
+ ```
320
+
321
+ [GPTQ](https://github.com/PanQiWei/AutoGPTQ) quantized models can be loaded by specifying their file names in `,autogptq=NAME` (or `,autogptq=True` for default names) in the `model_args` argument:
322
+
323
+ ```bash
324
+ lm_eval --model hf \
325
+ --model_args pretrained=model-name-or-path,autogptq=model.safetensors,gptq_use_triton=True \
326
+ --tasks hellaswag
327
+ ```
328
+
329
+ We support wildcards in task names, for example you can run all of the machine-translated lambada tasks via `--task lambada_openai_mt_*`.
330
+
331
+ ## Saving Results
332
+
333
+ To save evaluation results provide an `--output_path`. We also support logging model responses with the `--log_samples` flag for post-hoc analysis.
334
+
335
+ Additionally, one can provide a directory with `--use_cache` to cache the results of prior runs. This allows you to avoid repeated execution of the same (model, task) pairs for re-scoring.
336
+
337
+ To push results and samples to the Hugging Face Hub, first ensure an access token with write access is set in the `HF_TOKEN` environment variable. Then, use the `--hf_hub_log_args` flag to specify the organization, repository name, repository visibility, and whether to push results and samples to the Hub - [example dataset on the HF Hub](https://huggingface.co/datasets/KonradSzafer/lm-eval-results-demo). For instance:
338
+
339
+ ```bash
340
+ lm_eval --model hf \
341
+ --model_args pretrained=model-name-or-path,autogptq=model.safetensors,gptq_use_triton=True \
342
+ --tasks hellaswag \
343
+ --log_samples \
344
+ --output_path results \
345
+ --hf_hub_log_args hub_results_org=EleutherAI,hub_repo_name=lm-eval-results,push_results_to_hub=True,push_samples_to_hub=True,public_repo=False \
346
+ ```
347
+
348
+ This allows you to easily download the results and samples from the Hub, using:
349
+ ```python
350
+ from datasets import load_dataset
351
+
352
+ load_dataset("EleutherAI/lm-eval-results-private", "hellaswag", "latest")
353
+ ```
354
+
355
+ For a full list of supported arguments, check out the [interface](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/docs/interface.md) guide in our documentation!
356
+
357
+ ## Visualizing Results
358
+
359
+ You can seamlessly visualize and analyze the results of your evaluation harness runs using both Weights & Biases (W&B) and Zeno.
360
+
361
+ ### Zeno
362
+
363
+ You can use [Zeno](https://zenoml.com) to visualize the results of your eval harness runs.
364
+
365
+ First, head to [hub.zenoml.com](https://hub.zenoml.com) to create an account and get an API key [on your account page](https://hub.zenoml.com/account).
366
+ Add this key as an environment variable:
367
+
368
+ ```bash
369
+ export ZENO_API_KEY=[your api key]
370
+ ```
371
+
372
+ You'll also need to install the `lm_eval[zeno]` package extra.
373
+
374
+ To visualize the results, run the eval harness with the `log_samples` and `output_path` flags.
375
+ We expect `output_path` to contain multiple folders that represent individual model names.
376
+ You can thus run your evaluation on any number of tasks and models and upload all of the results as projects on Zeno.
377
+
378
+ ```bash
379
+ lm_eval \
380
+ --model hf \
381
+ --model_args pretrained=EleutherAI/gpt-j-6B \
382
+ --tasks hellaswag \
383
+ --device cuda:0 \
384
+ --batch_size 8 \
385
+ --log_samples \
386
+ --output_path output/gpt-j-6B
387
+ ```
388
+
389
+ Then, you can upload the resulting data using the `zeno_visualize` script:
390
+
391
+ ```bash
392
+ python scripts/zeno_visualize.py \
393
+ --data_path output \
394
+ --project_name "Eleuther Project"
395
+ ```
396
+
397
+ This will use all subfolders in `data_path` as different models and upload all tasks within these model folders to Zeno.
398
+ If you run the eval harness on multiple tasks, the `project_name` will be used as a prefix and one project will be created per task.
399
+
400
+ You can find an example of this workflow in [examples/visualize-zeno.ipynb](examples/visualize-zeno.ipynb).
401
+
402
+ ### Weights and Biases
403
+
404
+ With the [Weights and Biases](https://wandb.ai/site) integration, you can now spend more time extracting deeper insights into your evaluation results. The integration is designed to streamline the process of logging and visualizing experiment results using the Weights & Biases (W&B) platform.
405
+
406
+ The integration provide functionalities
407
+
408
+ - to automatically log the evaluation results,
409
+ - log the samples as W&B Tables for easy visualization,
410
+ - log the `results.json` file as an artifact for version control,
411
+ - log the `<task_name>_eval_samples.json` file if the samples are logged,
412
+ - generate a comprehensive report for analysis and visualization with all the important metric,
413
+ - log task and cli specific configs,
414
+ - and more out of the box like the command used to run the evaluation, GPU/CPU counts, timestamp, etc.
415
+
416
+ First you'll need to install the lm_eval[wandb] package extra. Do `pip install lm_eval[wandb]`.
417
+
418
+ Authenticate your machine with an your unique W&B token. Visit https://wandb.ai/authorize to get one. Do `wandb login` in your command line terminal.
419
+
420
+ Run eval harness as usual with a `wandb_args` flag. Use this flag to provide arguments for initializing a wandb run ([wandb.init](https://docs.wandb.ai/ref/python/init)) as comma separated string arguments.
421
+
422
+ ```bash
423
+ lm_eval \
424
+ --model hf \
425
+ --model_args pretrained=microsoft/phi-2,trust_remote_code=True \
426
+ --tasks hellaswag,mmlu_abstract_algebra \
427
+ --device cuda:0 \
428
+ --batch_size 8 \
429
+ --output_path output/phi-2 \
430
+ --limit 10 \
431
+ --wandb_args project=lm-eval-harness-integration \
432
+ --log_samples
433
+ ```
434
+
435
+ In the stdout, you will find the link to the W&B run page as well as link to the generated report. You can find an example of this workflow in [examples/visualize-wandb.ipynb](examples/visualize-wandb.ipynb), and an example of how to integrate it beyond the CLI.
436
+
437
+ ## How to Contribute or Learn More?
438
+
439
+ For more information on the library and how everything fits together, check out all of our [documentation pages](https://github.com/EleutherAI/lm-evaluation-harness/tree/main/docs)! We plan to post a larger roadmap of desired + planned library improvements soon, with more information on how contributors can help.
440
+
441
+ ### Implementing new tasks
442
+
443
+ To implement a new task in the eval harness, see [this guide](./docs/new_task_guide.md).
444
+
445
+ In general, we follow this priority list for addressing concerns about prompting and other eval details:
446
+ 1. If there is widespread agreement among people who train LLMs, use the agreed upon procedure.
447
+ 2. If there is a clear and unambiguous official implementation, use that procedure.
448
+ 3. If there is widespread agreement among people who evaluate LLMs, use the agreed upon procedure.
449
+ 4. If there are multiple common implementations but not universal or widespread agreement, use our preferred option among the common implementations. As before, prioritize choosing from among the implementations found in LLM training papers.
450
+
451
+ These are guidelines and not rules, and can be overruled in special circumstances.
452
+
453
+ We try to prioritize agreement with the procedures used by other groups to decrease the harm when people inevitably compare runs across different papers despite our discouragement of the practice. Historically, we also prioritized the implementation from [Language Models are Few Shot Learners](https://arxiv.org/abs/2005.14165) as our original goal was specifically to compare results with that paper.
454
+
455
+ ### Support
456
+
457
+ The best way to get support is to open an issue on this repo or join the [EleutherAI Discord server](https://discord.gg/eleutherai). The `#lm-thunderdome` channel is dedicated to developing this project and the `#release-discussion` channel is for receiving support for our releases. If you've used the library and have had a positive (or negative) experience, we'd love to hear from you!
458
+
459
+ ## Optional Extras
460
+ Extras dependencies can be installed via `pip install -e ".[NAME]"`
461
+
462
+ | Name | Use |
463
+ |-----------------|----------------------------------------------|
464
+ | api | For using api models (Anthropic, OpenAI API) |
465
+ | deepsparse | For running NM's DeepSparse models |
466
+ | dev | For linting PRs and contributions |
467
+ | gptq | For loading models with GPTQ |
468
+ | hf_transfer | For speeding up HF Hub file downloads |
469
+ | ifeval | For running the IFEval task |
470
+ | neuronx | For running on AWS inf2 instances |
471
+ | mamba | For loading Mamba SSM models |
472
+ | math | For running math task answer checking |
473
+ | multilingual | For multilingual tokenizers |
474
+ | optimum | For running Intel OpenVINO models |
475
+ | promptsource | For using PromptSource prompts |
476
+ | sentencepiece | For using the sentencepiece tokenizer |
477
+ | sparseml | For using NM's SparseML models |
478
+ | testing | For running library test suite |
479
+ | vllm | For loading models with vLLM |
480
+ | zeno | For visualizing results with Zeno |
481
+ | --------------- | --------------------------------------- |
482
+ | all | Loads all extras (not recommended) |
483
+
484
+ ## Cite as
485
+
486
+ ```
487
+ @misc{eval-harness,
488
+ author = {Gao, Leo and Tow, Jonathan and Abbasi, Baber and Biderman, Stella and Black, Sid and DiPofi, Anthony and Foster, Charles and Golding, Laurence and Hsu, Jeffrey and Le Noac'h, Alain and Li, Haonan and McDonell, Kyle and Muennighoff, Niklas and Ociepa, Chris and Phang, Jason and Reynolds, Laria and Schoelkopf, Hailey and Skowron, Aviya and Sutawika, Lintang and Tang, Eric and Thite, Anish and Wang, Ben and Wang, Kevin and Zou, Andy},
489
+ title = {A framework for few-shot language model evaluation},
490
+ month = 07,
491
+ year = 2024,
492
+ publisher = {Zenodo},
493
+ version = {v0.4.3},
494
+ doi = {10.5281/zenodo.12608602},
495
+ url = {https://zenodo.org/records/12608602}
496
+ }
497
+ ```
testbed/EleutherAI__lm-evaluation-harness/ignore.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ ROUGE
2
+ rouge
3
+ nin
4
+ maka
5
+ mor
6
+ te
7
+ ond
8
+ extraversion
testbed/EleutherAI__lm-evaluation-harness/mypy.ini ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [mypy]
2
+ python_version = 3.8
3
+ show_traceback = True
4
+ check_untyped_defs = True
5
+ no_implicit_reexport = True
6
+ warn_unreachable = True
7
+ warn_unused_configs = True
8
+ warn_unused_ignores = True
9
+ warn_redundant_casts = True
10
+
11
+ # We ignore errors everywhere to gradually add type annotations
12
+
13
+ [mypy-lm_eval.*]
14
+ ignore_errors = True
15
+
16
+ [mypy-lm_eval.api.*]
17
+ ignore_errors = True
18
+
19
+ [mypy-lm_eval.prompts.*]
20
+ ignore_errors = True
21
+
22
+ [mypy-lm_eval.models.*]
23
+ ignore_errors = True
24
+
25
+ [mypy-scripts.*]
26
+ ignore_errors = True
27
+
28
+ [mypy-main]
29
+ ignore_errors = True
testbed/EleutherAI__lm-evaluation-harness/pile_statistics.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "Data": "Pile statistics",
3
+ "Document Count": 210607728,
4
+ "Total Pile Characters": 421215456,
5
+ "File Start Offsets": [
6
+ 0,
7
+ 7021438,
8
+ 14042822,
9
+ 21066113,
10
+ 28086515,
11
+ 35106072,
12
+ 42123306,
13
+ 49145091,
14
+ 56165817,
15
+ 63185587,
16
+ 70211208,
17
+ 77234322,
18
+ 84249267,
19
+ 91267634,
20
+ 98285983,
21
+ 105305110,
22
+ 112322489,
23
+ 119342491,
24
+ 126367373,
25
+ 133389153,
26
+ 140412039,
27
+ 147432373,
28
+ 154452516,
29
+ 161470190,
30
+ 168492733,
31
+ 175512521,
32
+ 182526939,
33
+ 189547478,
34
+ 196565318,
35
+ 203583306
36
+ ]
37
+ }
testbed/EleutherAI__lm-evaluation-harness/requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ -e .
testbed/EleutherAI__lm-evaluation-harness/setup.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ import setuptools
2
+
3
+
4
+ # This is to make sure that the package supports editable installs
5
+ setuptools.setup()
testbed/EleutherAI__lm-evaluation-harness/tests/__init__.py ADDED
File without changes
testbed/EleutherAI__lm-evaluation-harness/tests/test_cli.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+
3
+ import pytest
4
+
5
+ import lm_eval.__main__
6
+
7
+
8
+ def test_cli_parse_error():
9
+ """
10
+ Assert error raised if cli args argument doesn't have type
11
+ """
12
+ with pytest.raises(ValueError):
13
+ parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
14
+ parser.add_argument(
15
+ "--model", "-m", type=str, default="hf", help="Name of model e.g. `hf`"
16
+ )
17
+ parser.add_argument(
18
+ "--tasks",
19
+ "-t",
20
+ default=None,
21
+ metavar="task1,task2",
22
+ help="To get full list of tasks, use the command lm-eval --tasks list",
23
+ )
24
+ lm_eval.__main__.check_argument_types(parser)
25
+
26
+
27
+ def test_cli_parse_no_error():
28
+ """
29
+ Assert typed arguments are parsed correctly
30
+ """
31
+ parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
32
+ parser.add_argument(
33
+ "--model", "-m", type=str, default="hf", help="Name of model e.g. `hf`"
34
+ )
35
+ parser.add_argument(
36
+ "--tasks",
37
+ "-t",
38
+ type=str,
39
+ default=None,
40
+ metavar="task1,task2",
41
+ help="To get full list of tasks, use the command lm-eval --tasks list",
42
+ )
43
+ lm_eval.__main__.check_argument_types(parser)
testbed/EleutherAI__lm-evaluation-harness/tests/test_evaluator.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ from typing import List
4
+
5
+ import pytest
6
+
7
+ import lm_eval.api as api
8
+ import lm_eval.evaluator as evaluator
9
+ from lm_eval import tasks
10
+ from lm_eval.utils import make_table
11
+
12
+
13
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
14
+ # TODO: more fine grained unit tests rather than this big honking integration
15
+ # test once we break evaluator into smaller, more manageable pieces
16
+
17
+
18
+ @pytest.mark.parametrize(
19
+ "task_name,limit,model,model_args,bootstrap_iters",
20
+ [
21
+ (
22
+ ["arc_easy"],
23
+ 10,
24
+ "hf",
25
+ "pretrained=EleutherAI/pythia-160m,dtype=float32,device=cpu",
26
+ 0,
27
+ ),
28
+ (
29
+ ["mmlu_abstract_algebra"],
30
+ None,
31
+ "hf",
32
+ "pretrained=EleutherAI/pythia-160m,dtype=float32,device=cpu",
33
+ 10000,
34
+ ),
35
+ ],
36
+ ids=lambda d: f"{d}",
37
+ )
38
+ def test_evaluator(
39
+ task_name: List[str], limit: int, model: str, model_args: str, bootstrap_iters: int
40
+ ):
41
+ e1 = evaluator.simple_evaluate(
42
+ model=model,
43
+ tasks=task_name,
44
+ limit=limit,
45
+ model_args=model_args,
46
+ bootstrap_iters=bootstrap_iters,
47
+ )
48
+ assert e1 is not None
49
+
50
+ lm = api.registry.get_model(model).create_from_arg_string(
51
+ model_args,
52
+ {
53
+ "batch_size": None,
54
+ "max_batch_size": None,
55
+ "device": None,
56
+ },
57
+ )
58
+ task_manager = tasks.TaskManager()
59
+ task_dict = tasks.get_task_dict(task_name, task_manager)
60
+
61
+ e2 = evaluator.evaluate(
62
+ lm=lm,
63
+ task_dict=task_dict,
64
+ limit=limit,
65
+ bootstrap_iters=bootstrap_iters,
66
+ )
67
+
68
+ assert e2 is not None
69
+ # check that caching is working
70
+
71
+ def r(x):
72
+ if "arc_easy" in x["results"]:
73
+ return x["results"]["arc_easy"]
74
+ else:
75
+ return x["results"]["mmlu_abstract_algebra"]
76
+
77
+ assert all(
78
+ x == y
79
+ for x, y in zip([y for _, y in r(e1).items()], [y for _, y in r(e2).items()])
80
+ )
81
+
82
+
83
+ @pytest.mark.parametrize(
84
+ "task_name,limit,model,model_args",
85
+ [
86
+ (
87
+ ["ai2_arc"],
88
+ 10,
89
+ "hf",
90
+ "pretrained=EleutherAI/pythia-14m,dtype=float32,device=cpu",
91
+ ),
92
+ (
93
+ ["mmlu_stem"],
94
+ 10,
95
+ "hf",
96
+ "pretrained=EleutherAI/pythia-14m,dtype=float32,device=cpu",
97
+ ),
98
+ (
99
+ ["lambada_openai"],
100
+ 10,
101
+ "hf",
102
+ "pretrained=EleutherAI/pythia-14m,dtype=float32,device=cpu",
103
+ ),
104
+ (
105
+ ["wikitext"],
106
+ 10,
107
+ "hf",
108
+ "pretrained=EleutherAI/pythia-14m,dtype=float32,device=cpu",
109
+ ),
110
+ ],
111
+ ids=lambda d: f"{d}",
112
+ )
113
+ def test_printed_results(task_name: List[str], limit: int, model: str, model_args: str):
114
+ results = evaluator.simple_evaluate(
115
+ model=model,
116
+ tasks=task_name,
117
+ limit=limit,
118
+ model_args=model_args,
119
+ bootstrap_iters=0,
120
+ random_seed=0,
121
+ numpy_random_seed=0,
122
+ torch_random_seed=0,
123
+ fewshot_random_seed=0,
124
+ )
125
+
126
+ filename = "_".join(
127
+ (
128
+ "-".join(task_name),
129
+ str(limit),
130
+ str(model),
131
+ re.sub(r"[^a-zA-Z0-9_\-\.]", "-", model_args),
132
+ )
133
+ )
134
+ filepath = f"./tests/testdata/{filename}.txt"
135
+ with open(filepath, "r") as f:
136
+ t1 = f.read().strip()
137
+
138
+ t2 = make_table(results).strip()
139
+
140
+ t1_lines, t2_lines = t1.splitlines(), t2.splitlines()
141
+ assert len(t1_lines) == len(t2_lines)
142
+ for t1_line, t2_line in zip(t1_lines, t2_lines):
143
+ t1_items, t2_items = t1_line.split("|"), t2_line.split("|")
144
+ assert len(t1_items) == len(t2_items)
145
+ for t1_item, t2_item in zip(t1_items, t2_items):
146
+ try:
147
+ t1_item = float(t1_item)
148
+ t2_item = float(t2_item)
149
+ assert abs(t1_item - t2_item) < 0.3
150
+ except ValueError:
151
+ assert t1_item == t2_item
testbed/EleutherAI__lm-evaluation-harness/tests/test_include_path.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import pytest
4
+
5
+ import lm_eval.api as api
6
+ import lm_eval.evaluator as evaluator
7
+ from lm_eval import tasks
8
+
9
+
10
+ @pytest.mark.parametrize(
11
+ "limit,model,model_args",
12
+ [
13
+ (
14
+ 10,
15
+ "hf",
16
+ "pretrained=EleutherAI/pythia-160m,dtype=float32,device=cpu",
17
+ ),
18
+ ],
19
+ )
20
+ def test_include_correctness(limit: int, model: str, model_args: str):
21
+ task_name = ["arc_easy"]
22
+
23
+ task_manager = tasks.TaskManager()
24
+ task_dict = tasks.get_task_dict(task_name, task_manager)
25
+
26
+ e1 = evaluator.simple_evaluate(
27
+ model=model,
28
+ tasks=task_name,
29
+ limit=limit,
30
+ model_args=model_args,
31
+ )
32
+ assert e1 is not None
33
+
34
+ # run with evaluate() and "arc_easy" test config (included from ./testconfigs path)
35
+ lm = api.registry.get_model(model).create_from_arg_string(
36
+ model_args,
37
+ {
38
+ "batch_size": None,
39
+ "max_batch_size": None,
40
+ "device": None,
41
+ },
42
+ )
43
+
44
+ task_name = ["arc_easy"]
45
+
46
+ task_manager = tasks.TaskManager(
47
+ include_path=os.path.dirname(os.path.abspath(__file__)) + "/testconfigs",
48
+ include_defaults=False,
49
+ )
50
+ task_dict = tasks.get_task_dict(task_name, task_manager)
51
+
52
+ e2 = evaluator.evaluate(
53
+ lm=lm,
54
+ task_dict=task_dict,
55
+ limit=limit,
56
+ )
57
+
58
+ assert e2 is not None
59
+ # check that caching is working
60
+
61
+ def r(x):
62
+ return x["results"]["arc_easy"]
63
+
64
+ assert all(
65
+ x == y
66
+ for x, y in zip([y for _, y in r(e1).items()], [y for _, y in r(e2).items()])
67
+ )
68
+
69
+
70
+ # test that setting include_defaults = False works as expected and that include_path works
71
+ def test_no_include_defaults():
72
+ task_name = ["arc_easy"]
73
+
74
+ task_manager = tasks.TaskManager(
75
+ include_path=os.path.dirname(os.path.abspath(__file__)) + "/testconfigs",
76
+ include_defaults=False,
77
+ )
78
+ # should succeed, because we've included an 'arc_easy' task from this dir
79
+ task_dict = tasks.get_task_dict(task_name, task_manager)
80
+
81
+ # should fail, since ./testconfigs has no arc_challenge task
82
+ task_name = ["arc_challenge"]
83
+ with pytest.raises(KeyError):
84
+ task_dict = tasks.get_task_dict(task_name, task_manager) # noqa: F841
85
+
86
+
87
+ # test that include_path containing a task shadowing another task's name fails
88
+ # def test_shadowed_name_fails():
89
+
90
+ # task_name = ["arc_easy"]
91
+
92
+ # task_manager = tasks.TaskManager(include_path=os.path.dirname(os.path.abspath(__file__)) + "/testconfigs")
93
+ # task_dict = tasks.get_task_dict(task_name, task_manager)
testbed/EleutherAI__lm-evaluation-harness/tests/test_janitor.py ADDED
@@ -0,0 +1,446 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from collections import defaultdict
3
+
4
+ from lm_eval.decontamination.janitor import (
5
+ Janitor,
6
+ form_ngrams,
7
+ split_indices,
8
+ word_ngrams,
9
+ word_ngrams_indices,
10
+ )
11
+
12
+
13
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
14
+ TEST_SEQUENCE = (
15
+ "Hello my name is Bob, I like eating pizza, chicken, chips and ice cream. Maybe I should eat some"
16
+ " more salad but it's so booooring. I just... like eating pizza, chicken, chips and ice cream so much."
17
+ )
18
+
19
+ JANITOR_EXPECTED = (
20
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
21
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
22
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
23
+ "This is a @line #containing "
24
+ " characters, 76 to be exact. "
25
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
26
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
27
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
28
+ )
29
+
30
+ JANITOR_FILTH1 = "filth lots of dirty filthy filth"
31
+ JANITOR_FILTH2 = "filth lots of filthy dirty filth"
32
+
33
+
34
+ def simple_ngram(sequence, n):
35
+ ngrams = list()
36
+ ngram = []
37
+ for x in sequence:
38
+ ngram.extend([x])
39
+ if len(ngram) == n:
40
+ ngrams.extend([tuple(ngram)])
41
+ ngram = ngram[1:]
42
+
43
+ return ngrams
44
+
45
+
46
+ def test_form_ngrams():
47
+ sequence = TEST_SEQUENCE
48
+
49
+ n_values = [1, 2, 3, 5, 13]
50
+ for n in n_values:
51
+ comparison = simple_ngram(sequence, n)
52
+ result_to_test = list(form_ngrams(iter(sequence), n))
53
+ assert len(comparison) == len(result_to_test)
54
+ assert comparison == result_to_test
55
+
56
+
57
+ def test_word_ngrams():
58
+ sequence = TEST_SEQUENCE
59
+
60
+ words = sequence.split()
61
+
62
+ n_values = [1, 2, 3, 5, 13]
63
+ for n in n_values:
64
+ comparison = simple_ngram(words, n)
65
+ comparison = [" ".join(ngram) for ngram in comparison]
66
+ result_to_test = list(word_ngrams(sequence, n))
67
+ assert len(comparison) == len(result_to_test)
68
+ assert result_to_test == comparison
69
+
70
+
71
+ def test_split_indices():
72
+ sequence = TEST_SEQUENCE
73
+
74
+ comparison = []
75
+ current_word = ""
76
+ for i, c in enumerate(sequence):
77
+ if c != " ":
78
+ current_word += c
79
+ else:
80
+ if current_word:
81
+ comparison.extend([(current_word, (i - len(current_word), i - 1))])
82
+ current_word = ""
83
+
84
+ if current_word:
85
+ len_sequence = len(sequence)
86
+ comparison.extend(
87
+ [
88
+ (
89
+ current_word,
90
+ (len_sequence - len(current_word), len_sequence - 1),
91
+ )
92
+ ]
93
+ )
94
+ current_word = ""
95
+
96
+ result_to_test = list(split_indices(sequence))
97
+ assert len(comparison) == len(result_to_test)
98
+ assert comparison == result_to_test
99
+
100
+
101
+ def test_word_ngrams_indices():
102
+ sequence = TEST_SEQUENCE
103
+
104
+ n_values = [1, 2, 3, 5, 13]
105
+
106
+ for n in n_values:
107
+ ngrams = [" ".join(ngram) for ngram in simple_ngram(sequence.split(), n)]
108
+ tracker = defaultdict(int)
109
+ comparison = []
110
+ for ngram in ngrams:
111
+ while True:
112
+ start = sequence.find(ngram, tracker[ngram])
113
+ assert start != -1 # testing the test
114
+
115
+ end = start + len(ngram) - 1
116
+ tracker[ngram] = end + 1
117
+
118
+ # ignore partial word matches
119
+ if not (
120
+ (start != 0 and sequence[start - 1] != " ")
121
+ or (end != len(sequence) - 1 and sequence[end + 1] != " ")
122
+ ):
123
+ break
124
+
125
+ comparison.extend([(ngram, (start, end))])
126
+
127
+ result_to_test = list(word_ngrams_indices(sequence, n))
128
+ assert len(result_to_test) == len(comparison)
129
+ assert result_to_test == comparison
130
+
131
+
132
+ # Assumptions from GPT3 Paper:
133
+ # the 200 characters to remove include punctuation and is actually a half-window
134
+
135
+
136
+ # All tests below initially test without any registered contaminants, expecting the same sequence back.
137
+ def test_janitor1():
138
+ # First test using a 1gram and expected the first block before the filth to have some remaining
139
+ # characters, but the second block should be completely removed.
140
+
141
+ sequence = (
142
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
143
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
144
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
145
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
146
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
147
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
148
+ "FILTH. "
149
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
150
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
151
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
152
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
153
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
154
+ )
155
+
156
+ filth = "filth"
157
+
158
+ expected_result = (
159
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
160
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
161
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
162
+ "This is a @line #containing "
163
+ )
164
+
165
+ janitor = Janitor(
166
+ ngram_n=1, window_to_remove=200, too_dirty_cutoff=10, minimum_slice_length=200
167
+ )
168
+ result = janitor.clean_python(sequence)
169
+ result = "".join(result)
170
+ assert result == sequence
171
+
172
+ janitor.register_contaminant(filth)
173
+ assert janitor.dirt_ngrams == {filth}
174
+
175
+ result = janitor.clean_python(sequence)
176
+ result = "".join(result)
177
+ assert result == expected_result
178
+
179
+
180
+ def test_janitor2():
181
+ # Second test using a 1gram and expected the first block before the filth to have some remaining
182
+ # characters, and the second block is longer then 200 characters so should also have some remaining.
183
+
184
+ sequence = (
185
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
186
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
187
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
188
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
189
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
190
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
191
+ "FILTH. "
192
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
193
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
194
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
195
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
196
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
197
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
198
+ )
199
+
200
+ filth = "filth"
201
+
202
+ janitor = Janitor(
203
+ ngram_n=1, window_to_remove=200, too_dirty_cutoff=10, minimum_slice_length=200
204
+ )
205
+ result = janitor.clean_python(sequence)
206
+ result = "".join(result)
207
+ assert result == sequence
208
+
209
+ janitor.register_contaminant(filth)
210
+ assert janitor.dirt_ngrams == {filth}
211
+
212
+ result = janitor.clean_python(sequence)
213
+ result = "".join(result)
214
+ assert result == JANITOR_EXPECTED
215
+
216
+
217
+ def test_janitor3():
218
+ # Same test as above but with a 6gram.
219
+
220
+ sequence = (
221
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
222
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
223
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
224
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
225
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
226
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
227
+ "FILTH. lots of dirty filtHy FIlTh "
228
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
229
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
230
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
231
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
232
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
233
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
234
+ )
235
+
236
+ janitor = Janitor(
237
+ ngram_n=6, window_to_remove=200, too_dirty_cutoff=10, minimum_slice_length=200
238
+ )
239
+ result = janitor.clean_python(sequence)
240
+ result = "".join(result)
241
+ assert result == sequence
242
+
243
+ janitor.register_contaminant(JANITOR_FILTH1)
244
+ assert janitor.dirt_ngrams == {JANITOR_FILTH1}
245
+
246
+ result = janitor.clean_python(sequence)
247
+ result = "".join(result)
248
+ assert result == JANITOR_EXPECTED
249
+
250
+
251
+ def test_janitor4():
252
+ # This test adds another block to that from the previous. The middle block should be entirely
253
+ # removed as the 200 characters are removed from each side.
254
+
255
+ sequence = (
256
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
257
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
258
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
259
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
260
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
261
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
262
+ "FILTH. lots of dirty filtHy FIlTh "
263
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
264
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
265
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
266
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
267
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
268
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
269
+ "FILTH. lots of dirty filtHy FIlTh "
270
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
271
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
272
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
273
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
274
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
275
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
276
+ )
277
+
278
+ janitor = Janitor(
279
+ ngram_n=6, window_to_remove=200, too_dirty_cutoff=10, minimum_slice_length=200
280
+ )
281
+ result = janitor.clean_python(sequence)
282
+ result = "".join(result)
283
+ assert result == sequence
284
+
285
+ janitor.register_contaminant(JANITOR_FILTH1)
286
+ assert janitor.dirt_ngrams == {JANITOR_FILTH1}
287
+
288
+ result = janitor.clean_python(sequence)
289
+ result = "".join(result)
290
+ assert result == JANITOR_EXPECTED
291
+
292
+
293
+ def test_janitor5():
294
+ # Same as above but using multiple different filth 6grams.
295
+
296
+ sequence = (
297
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
298
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
299
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
300
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
301
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
302
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
303
+ "FILTH. lots of dirty filtHy FIlTh "
304
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
305
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
306
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
307
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
308
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
309
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
310
+ "FILTH. lots of filtHy dirty FIlTh "
311
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
312
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
313
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
314
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
315
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
316
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
317
+ )
318
+
319
+ filths = [JANITOR_FILTH1, JANITOR_FILTH2]
320
+
321
+ janitor = Janitor(
322
+ ngram_n=6, window_to_remove=200, too_dirty_cutoff=10, minimum_slice_length=200
323
+ )
324
+ result = janitor.clean_python(sequence)
325
+ result = "".join(result)
326
+ assert result == sequence
327
+
328
+ for filth in filths:
329
+ janitor.register_contaminant(filth)
330
+ assert janitor.dirt_ngrams == set(filths)
331
+
332
+ result = janitor.clean_python(sequence)
333
+ result = "".join(result)
334
+ assert result == JANITOR_EXPECTED
335
+
336
+
337
+ def test_janitor6():
338
+ # Same as above but now we add 10 filths and expect the same result, the following test does 11.
339
+
340
+ sequence = (
341
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
342
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
343
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
344
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
345
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
346
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
347
+ "FILTH. lots of dirty filtHy FIlTh "
348
+ "FILTH. lots of dirty filtHy FIlTh "
349
+ "FILTH. lots of dirty filtHy FIlTh "
350
+ "FILTH. lots of dirty filtHy FIlTh "
351
+ "FILTH. lots of dirty filtHy FIlTh "
352
+ "FILTH. lots of dirty filtHy FIlTh "
353
+ "FILTH. lots of dirty filtHy FIlTh "
354
+ "FILTH. lots of dirty filtHy FIlTh "
355
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
356
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
357
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
358
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
359
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
360
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
361
+ "FILTH. lots of filtHy dirty FIlTh "
362
+ "FILTH. lots of filtHy dirty FIlTh "
363
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
364
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
365
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
366
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
367
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
368
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
369
+ )
370
+
371
+ filths = [JANITOR_FILTH1, JANITOR_FILTH2]
372
+
373
+ janitor = Janitor(
374
+ ngram_n=6, window_to_remove=200, too_dirty_cutoff=10, minimum_slice_length=200
375
+ )
376
+ result = janitor.clean_python(sequence)
377
+ result = "".join(result)
378
+ assert result == sequence
379
+
380
+ for filth in filths:
381
+ janitor.register_contaminant(filth)
382
+ assert janitor.dirt_ngrams == set(filths)
383
+
384
+ result = janitor.clean_python(sequence)
385
+ result = "".join(result)
386
+ assert result == JANITOR_EXPECTED
387
+
388
+
389
+ def test_janitor7():
390
+ # Same as above but now we add 9 filths and expect the same result, the following test does 10.
391
+
392
+ sequence = (
393
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
394
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
395
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
396
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
397
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
398
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
399
+ "FILTH. lots of dirty filtHy FIlTh "
400
+ "FILTH. lots of dirty filtHy FIlTh "
401
+ "FILTH. lots of dirty filtHy FIlTh "
402
+ "FILTH. lots of dirty filtHy FIlTh "
403
+ "FILTH. lots of dirty filtHy FIlTh "
404
+ "FILTH. lots of dirty filtHy FIlTh "
405
+ "FILTH. lots of dirty filtHy FIlTh "
406
+ "FILTH. lots of dirty filtHy FIlTh "
407
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
408
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
409
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
410
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
411
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
412
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
413
+ "FILTH. lots of filtHy dirty FIlTh "
414
+ "FILTH. lots of filtHy dirty FIlTh "
415
+ "FILTH. lots of filtHy dirty FIlTh "
416
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
417
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
418
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
419
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
420
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
421
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
422
+ )
423
+
424
+ filths = [JANITOR_FILTH1, JANITOR_FILTH2]
425
+
426
+ expected_result = ""
427
+
428
+ janitor = Janitor(
429
+ ngram_n=6, window_to_remove=200, too_dirty_cutoff=10, minimum_slice_length=200
430
+ )
431
+ result = janitor.clean_python(sequence)
432
+ result = "".join(result)
433
+ assert result == sequence
434
+
435
+ for filth in filths:
436
+ janitor.register_contaminant(filth)
437
+ assert janitor.dirt_ngrams == set(filths)
438
+
439
+ result = janitor.clean_python(sequence)
440
+ result = "".join(result)
441
+ assert result == expected_result
442
+
443
+
444
+ def test_janitor8():
445
+ # This will test the save and load contams
446
+ pass
testbed/EleutherAI__lm-evaluation-harness/tests/test_misc.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+
3
+ import pytest
4
+
5
+ import lm_eval.api.metrics as metrics
6
+
7
+
8
+ def test_bootstrapping():
9
+ random.seed(42)
10
+ arr = [random.random() for _ in range(1000)]
11
+ expected = metrics.mean_stderr(arr)
12
+ bootstrapped = metrics.bootstrap_stderr(metrics.mean, arr, iters=100000)
13
+
14
+ assert bootstrapped == pytest.approx(expected, abs=1e-4)
testbed/EleutherAI__lm-evaluation-harness/tests/test_prompt.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ from typing import List
3
+
4
+ import numpy as np
5
+ import pytest
6
+
7
+ from lm_eval import tasks
8
+ from lm_eval.tasks import TaskManager
9
+ from lm_eval.utils import join_iters
10
+
11
+
12
+ MMLU_ANATOMY_ZERO_SHOT = """The following are multiple choice questions (with answers) about anatomy.
13
+
14
+ A lesion causing compression of the facial nerve at the stylomastoid foramen will cause ipsilateral
15
+ A. paralysis of the facial muscles.
16
+ B. paralysis of the facial muscles and loss of taste.
17
+ C. paralysis of the facial muscles, loss of taste and lacrimation.
18
+ D. paralysis of the facial muscles, loss of taste, lacrimation and decreased salivation.
19
+ Answer:"""
20
+
21
+ MMLU_ANATOMY_FIVE_SHOT = """The following are multiple choice questions (with answers) about anatomy.
22
+
23
+ What is the embryological origin of the hyoid bone?
24
+ A. The first pharyngeal arch
25
+ B. The first and second pharyngeal arches
26
+ C. The second pharyngeal arch
27
+ D. The second and third pharyngeal arches
28
+ Answer: D
29
+
30
+ Which of these branches of the trigeminal nerve contain somatic motor processes?
31
+ A. The supraorbital nerve
32
+ B. The infraorbital nerve
33
+ C. The mental nerve
34
+ D. None of the above
35
+ Answer: D
36
+
37
+ The pleura
38
+ A. have no sensory innervation.
39
+ B. are separated by a 2 mm space.
40
+ C. extend into the neck.
41
+ D. are composed of respiratory epithelium.
42
+ Answer: C
43
+
44
+ In Angle's Class II Div 2 occlusion there is
45
+ A. excess overbite of the upper lateral incisors.
46
+ B. negative overjet of the upper central incisors.
47
+ C. excess overjet of the upper lateral incisors.
48
+ D. excess overjet of the upper central incisors.
49
+ Answer: C
50
+
51
+ Which of the following is the body cavity that contains the pituitary gland?
52
+ A. Abdominal
53
+ B. Cranial
54
+ C. Pleural
55
+ D. Spinal
56
+ Answer: B
57
+
58
+ A lesion causing compression of the facial nerve at the stylomastoid foramen will cause ipsilateral
59
+ A. paralysis of the facial muscles.
60
+ B. paralysis of the facial muscles and loss of taste.
61
+ C. paralysis of the facial muscles, loss of taste and lacrimation.
62
+ D. paralysis of the facial muscles, loss of taste, lacrimation and decreased salivation.
63
+ Answer:"""
64
+
65
+
66
+ @pytest.mark.parametrize(
67
+ "task_names,sets,num_fewshot,seed,num_examples,expected_prompt",
68
+ [
69
+ (["mmlu_anatomy"], "test", 0, 42, 1, MMLU_ANATOMY_ZERO_SHOT),
70
+ (["mmlu_anatomy"], "test", 5, 42, 1, MMLU_ANATOMY_FIVE_SHOT),
71
+ ],
72
+ )
73
+ def test_mmlu_prompt_rendering(
74
+ task_names: List[str],
75
+ sets: str,
76
+ num_fewshot: int,
77
+ seed: int,
78
+ num_examples: int,
79
+ expected_prompt: str,
80
+ ):
81
+ np.random.seed(seed)
82
+
83
+ task_manager = TaskManager()
84
+ task_dict = tasks.get_task_dict(task_names, task_manager)
85
+
86
+ for task_name, task in task_dict.items():
87
+ if isinstance(task, tuple):
88
+ _, task = task
89
+
90
+ rnd = random.Random()
91
+ rnd.seed(seed)
92
+
93
+ iters = []
94
+
95
+ for set in sets.split(","):
96
+ docs = None
97
+ if set == "train" and task.has_training_docs():
98
+ docs = task.training_docs()
99
+ if set == "val" and task.has_validation_docs():
100
+ docs = task.validation_docs()
101
+ if set == "test" and task.has_test_docs():
102
+ docs = task.test_docs()
103
+ if docs is not None:
104
+ iters.append(docs)
105
+
106
+ if len(iters) == 0:
107
+ raise ValueError
108
+
109
+ docs = join_iters(iters)
110
+
111
+ for i, doc in (
112
+ zip(range(num_examples), docs) if num_examples > 0 else enumerate(docs)
113
+ ):
114
+ ctx = task.fewshot_context(
115
+ doc=doc,
116
+ num_fewshot=num_fewshot,
117
+ )
118
+
119
+ assert ctx == expected_prompt
testbed/EleutherAI__lm-evaluation-harness/tests/test_requests_caching.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ import os
3
+ import sys
4
+ from datetime import datetime
5
+ from typing import List, Optional, Tuple
6
+
7
+ import pytest
8
+ import torch
9
+
10
+ from lm_eval.caching.cache import PATH
11
+
12
+
13
+ MODULE_DIR = os.path.dirname(os.path.realpath(__file__))
14
+
15
+ # NOTE the script this loads uses simple evaluate
16
+ # TODO potentially test both the helper script and the normal script
17
+ sys.path.append(f"{MODULE_DIR}/../scripts")
18
+ model_loader = importlib.import_module("requests_caching")
19
+ run_model_for_task_caching = model_loader.run_model_for_task_caching
20
+
21
+ os.environ["HF_DATASETS_TRUST_REMOTE_CODE"] = "1"
22
+ DEFAULT_TASKS = ["lambada_openai", "sciq"]
23
+
24
+
25
+ @pytest.fixture(autouse=True)
26
+ def setup_and_teardown():
27
+ # Setup
28
+ torch.use_deterministic_algorithms(False)
29
+ clear_cache()
30
+ # Yields control back to the test function
31
+ yield
32
+ # Cleanup here
33
+
34
+
35
+ def clear_cache():
36
+ if os.path.exists(PATH):
37
+ cache_files = os.listdir(PATH)
38
+ for file in cache_files:
39
+ file_path = f"{PATH}/{file}"
40
+ os.unlink(file_path)
41
+
42
+
43
+ # leaving tasks here to allow for the option to select specific task files
44
+ def get_cache_files(tasks: Optional[List[str]] = None) -> Tuple[List[str], List[str]]:
45
+ cache_files = os.listdir(PATH)
46
+
47
+ file_task_names = []
48
+
49
+ for file in cache_files:
50
+ file_without_prefix = file.split("-")[1]
51
+ file_without_prefix_and_suffix = file_without_prefix.split(".")[0]
52
+ file_task_names.extend([file_without_prefix_and_suffix])
53
+
54
+ return cache_files, file_task_names
55
+
56
+
57
+ def assert_created(tasks: List[str], file_task_names: List[str]):
58
+ tasks.sort()
59
+ file_task_names.sort()
60
+
61
+ assert tasks == file_task_names
62
+
63
+
64
+ @pytest.mark.parametrize("tasks", [DEFAULT_TASKS])
65
+ def requests_caching_true(tasks: List[str]):
66
+ run_model_for_task_caching(tasks=tasks, cache_requests="true")
67
+
68
+ cache_files, file_task_names = get_cache_files()
69
+ print(file_task_names)
70
+ assert_created(tasks=tasks, file_task_names=file_task_names)
71
+
72
+
73
+ @pytest.mark.parametrize("tasks", [DEFAULT_TASKS])
74
+ def requests_caching_refresh(tasks: List[str]):
75
+ run_model_for_task_caching(tasks=tasks, cache_requests="true")
76
+
77
+ timestamp_before_test = datetime.now().timestamp()
78
+
79
+ run_model_for_task_caching(tasks=tasks, cache_requests="refresh")
80
+
81
+ cache_files, file_task_names = get_cache_files()
82
+
83
+ for file in cache_files:
84
+ modification_time = os.path.getmtime(f"{PATH}/{file}")
85
+ assert modification_time > timestamp_before_test
86
+
87
+ tasks.sort()
88
+ file_task_names.sort()
89
+
90
+ assert tasks == file_task_names
91
+
92
+
93
+ @pytest.mark.parametrize("tasks", [DEFAULT_TASKS])
94
+ def requests_caching_delete(tasks: List[str]):
95
+ # populate the data first, rerun this test within this test for additional confidence
96
+ # test_requests_caching_true(tasks=tasks)
97
+
98
+ run_model_for_task_caching(tasks=tasks, cache_requests="delete")
99
+
100
+ cache_files, file_task_names = get_cache_files()
101
+
102
+ assert len(cache_files) == 0
103
+
104
+
105
+ # useful for locally running tests through the debugger
106
+ if __name__ == "__main__":
107
+
108
+ def run_tests():
109
+ tests = [
110
+ # test_requests_caching_true,
111
+ # test_requests_caching_refresh,
112
+ # test_requests_caching_delete,
113
+ ]
114
+ # Lookups of global names within a loop is inefficient, so copy to a local variable outside of the loop first
115
+ default_tasks = DEFAULT_TASKS
116
+ for test_func in tests:
117
+ clear_cache()
118
+ test_func(tasks=default_tasks)
119
+
120
+ print("Tests pass")
121
+
122
+ run_tests()
testbed/EleutherAI__lm-evaluation-harness/tests/test_tasks.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from itertools import islice
3
+
4
+ import pytest
5
+
6
+ import lm_eval.tasks as tasks
7
+ from lm_eval.api.task import ConfigurableTask
8
+
9
+ from .utils import new_tasks
10
+
11
+
12
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
13
+ task_manager = tasks.TaskManager()
14
+ # Default Task
15
+ TASKS = ["arc_easy"]
16
+
17
+
18
+ def task_class():
19
+ global TASKS
20
+ # CI: new_tasks checks if any modifications have been made
21
+ task_classes = new_tasks()
22
+ # Check if task_classes is empty
23
+ if task_classes:
24
+ return list(task_manager.load_task_or_group(task_classes).values())
25
+ else:
26
+ return list(task_manager.load_task_or_group(TASKS).values())
27
+
28
+
29
+ @pytest.fixture()
30
+ def limit() -> int:
31
+ return 10
32
+
33
+
34
+ # Tests
35
+ @pytest.mark.parametrize("task_class", task_class(), ids=lambda x: f"{x.config.task}")
36
+ class TestNewTasks:
37
+ def test_download(self, task_class: ConfigurableTask):
38
+ task_class.download()
39
+ assert task_class.dataset is not None
40
+
41
+ def test_has_training_docs(self, task_class: ConfigurableTask):
42
+ assert task_class.has_training_docs() in [True, False]
43
+
44
+ def test_check_training_docs(self, task_class: ConfigurableTask):
45
+ if task_class.has_training_docs():
46
+ assert task_class._config["training_split"] is not None
47
+
48
+ def test_has_validation_docs(self, task_class):
49
+ assert task_class.has_validation_docs() in [True, False]
50
+
51
+ def test_check_validation_docs(self, task_class):
52
+ if task_class.has_validation_docs():
53
+ assert task_class._config["validation_split"] is not None
54
+
55
+ def test_has_test_docs(self, task_class):
56
+ assert task_class.has_test_docs() in [True, False]
57
+
58
+ def test_check_test_docs(self, task_class):
59
+ task = task_class
60
+ if task.has_test_docs():
61
+ assert task._config["test_split"] is not None
62
+
63
+ def test_should_decontaminate(self, task_class):
64
+ task = task_class
65
+ assert task.should_decontaminate() in [True, False]
66
+ if task.should_decontaminate():
67
+ assert task._config["doc_to_decontamination_query"] is not None
68
+
69
+ def test_doc_to_text(self, task_class, limit):
70
+ task = task_class
71
+ arr = (
72
+ list(islice(task.test_docs(), limit))
73
+ if task.has_test_docs()
74
+ else list(islice(task.validation_docs(), limit))
75
+ )
76
+ _array = [task.doc_to_text(doc) for doc in arr]
77
+ # space convention; allow txt to have length 0 for perplexity-like tasks since the model tacks an <|endoftext|> on
78
+ assert all(
79
+ isinstance(x, str) and (x[-1] != " " if len(x) != 0 else True)
80
+ for x in _array
81
+ )
82
+
83
+ def test_create_choices(self, task_class, limit):
84
+ task = task_class
85
+ arr = (
86
+ list(islice(task.test_docs(), limit))
87
+ if task.has_test_docs()
88
+ else list(islice(task.validation_docs(), limit))
89
+ )
90
+ if "multiple_choice" in task._config.output_type:
91
+ _array = [task.doc_to_choice(doc) for doc in arr]
92
+ assert all(isinstance(x, list) for x in _array)
93
+ assert all(isinstance(x[0], str) for x in _array)
94
+
95
+ def test_doc_to_target(self, task_class, limit):
96
+ task = task_class
97
+ arr = (
98
+ list(islice(task.test_docs(), limit))
99
+ if task.has_test_docs()
100
+ else list(islice(task.validation_docs(), limit))
101
+ )
102
+ _array_target = [task.doc_to_target(doc) for doc in arr]
103
+ if task._config.output_type == "multiple_choice":
104
+ assert all(isinstance(label, int) for label in _array_target)
105
+
106
+ def test_build_all_requests(self, task_class, limit):
107
+ task_class.build_all_requests(rank=1, limit=limit, world_size=1)
108
+ assert task_class.instances is not None
109
+
110
+ # ToDO: Add proper testing
111
+ def test_construct_requests(self, task_class, limit):
112
+ task = task_class
113
+ arr = (
114
+ list(islice(task.test_docs(), limit))
115
+ if task.has_test_docs()
116
+ else list(islice(task.validation_docs(), limit))
117
+ )
118
+ requests = [task.construct_requests(doc, task.doc_to_text(doc)) for doc in arr]
119
+ assert len(requests) == limit if limit else True
testbed/EleutherAI__lm-evaluation-harness/tests/testconfigs/arc_test.yaml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task: arc_easy
2
+ dataset_path: allenai/ai2_arc
3
+ dataset_name: ARC-Easy
4
+ output_type: multiple_choice
5
+ training_split: train
6
+ validation_split: validation
7
+ test_split: test
8
+ doc_to_text: "Question: {{question}}\nAnswer:"
9
+ doc_to_target: "{{choices.label.index(answerKey)}}"
10
+ doc_to_choice: "{{choices.text}}"
11
+ should_decontaminate: true
12
+ doc_to_decontamination_query: "Question: {{question}}\nAnswer:"
13
+ metric_list:
14
+ - metric: acc
15
+ aggregation: mean
16
+ higher_is_better: true
17
+ - metric: acc_norm
18
+ aggregation: mean
19
+ higher_is_better: true
20
+ metadata:
21
+ version: 1.0
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/ai2_arc_10_hf_pretrained-EleutherAI-pythia-14m-dtype-float32-device-cpu.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ | Tasks |Version|Filter|n-shot| Metric | |Value| |Stderr|
2
+ |-------------|------:|------|-----:|--------|---|----:|---|------|
3
+ |arc_challenge| 1|none | 0|acc |↑ | 0.0|± | N/A|
4
+ | | |none | 0|acc_norm|↑ | 0.0|± | N/A|
5
+ |arc_easy | 1|none | 0|acc |↑ | 0.3|± | N/A|
6
+ | | |none | 0|acc_norm|↑ | 0.1|± | N/A|
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/lambada_openai_10_hf_pretrained-EleutherAI-pythia-14m-dtype-float32-device-cpu.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ | Tasks |Version|Filter|n-shot| Metric | | Value | |Stderr|
2
+ |--------------|------:|------|-----:|----------|---|-------:|---|------|
3
+ |lambada_openai| 1|none | 0|acc |↑ | 0.1000|± | N/A|
4
+ | | |none | 0|perplexity|↓ |605.3866|± | N/A|
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/mmlu_stem_10_hf_pretrained-EleutherAI-pythia-14m-dtype-float32-device-cpu.txt ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ | Tasks |Version|Filter|n-shot|Metric| |Value | |Stderr|
2
+ |-------------------------------|------:|------|-----:|------|---|-----:|---|------|
3
+ |stem | 2|none | |acc |↑ |0.2474|± | N/A|
4
+ | - abstract_algebra | 1|none | 0|acc |↑ |0.2000|± | N/A|
5
+ | - anatomy | 1|none | 0|acc |↑ |0.3000|± | N/A|
6
+ | - astronomy | 1|none | 0|acc |↑ |0.1000|± | N/A|
7
+ | - college_biology | 1|none | 0|acc |↑ |0.3000|± | N/A|
8
+ | - college_chemistry | 1|none | 0|acc |↑ |0.1000|± | N/A|
9
+ | - college_computer_science | 1|none | 0|acc |↑ |0.2000|± | N/A|
10
+ | - college_mathematics | 1|none | 0|acc |↑ |0.2000|± | N/A|
11
+ | - college_physics | 1|none | 0|acc |↑ |0.3000|± | N/A|
12
+ | - computer_security | 1|none | 0|acc |↑ |0.5000|± | N/A|
13
+ | - conceptual_physics | 1|none | 0|acc |↑ |0.3000|± | N/A|
14
+ | - electrical_engineering | 1|none | 0|acc |↑ |0.4000|± | N/A|
15
+ | - elementary_mathematics | 1|none | 0|acc |↑ |0.0000|± | N/A|
16
+ | - high_school_biology | 1|none | 0|acc |↑ |0.3000|± | N/A|
17
+ | - high_school_chemistry | 1|none | 0|acc |↑ |0.4000|± | N/A|
18
+ | - high_school_computer_science| 1|none | 0|acc |↑ |0.3000|± | N/A|
19
+ | - high_school_mathematics | 1|none | 0|acc |↑ |0.2000|± | N/A|
20
+ | - high_school_physics | 1|none | 0|acc |↑ |0.3000|± | N/A|
21
+ | - high_school_statistics | 1|none | 0|acc |↑ |0.0000|± | N/A|
22
+ | - machine_learning | 1|none | 0|acc |↑ |0.3000|± | N/A|
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wikitext_10_hf_pretrained-EleutherAI-pythia-14m-dtype-float32-device-cpu.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ | Tasks |Version|Filter|n-shot| Metric | | Value | |Stderr|
2
+ |--------|------:|------|-----:|---------------|---|-------:|---|------|
3
+ |wikitext| 2|none | 0|bits_per_byte |↓ | 1.3394|± | N/A|
4
+ | | |none | 0|byte_perplexity|↓ | 2.5304|± | N/A|
5
+ | | |none | 0|word_perplexity|↓ |130.4801|± | N/A|
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-en-de-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"wmt20-en-de": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.007148103038872972, "chrf_stderr": 9.594096858911254e-05, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-en-de": 0}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-en-ja-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ 7fe61f5847a51e93e97c84b39f4420978727754e4b6cf636a27851c615857530
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-en-ja-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"wmt20-en-ja": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 4.1308658294778584e-05, "chrf_stderr": 2.0456539027807417e-05, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-en-ja": 0}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-en-ps-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ 8411c2cb73114cbd0c6e0f17eab2625d486cc3a601105deb0ea1338a401df689
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-en-ru-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"wmt20-en-ru": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.0007327811114614671, "chrf_stderr": 4.43155903515048e-05, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-en-ru": 0}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-en-ta-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ 5fc556fa90bca7f1b1396e97e392eac8080b0ad53488358799b8fc0b21a94cb1
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-en-ta-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"wmt20-en-ta": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.0, "chrf_stderr": 0.0, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-en-ta": 0}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-en-zh-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ 67f0333ddbcb07d7a9ac12919129a18fe4fea24e4826a11bbdde4fd5ed5ed83f
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-en-zh-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"wmt20-en-zh": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.00014170297316825535, "chrf_stderr": 6.590669847391838e-05, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-en-zh": 0}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-en-zh-v1-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ 67f0333ddbcb07d7a9ac12919129a18fe4fea24e4826a11bbdde4fd5ed5ed83f
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-en-zh-v1-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"wmt20-en-zh": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.00014170297316825535, "chrf_stderr": 6.590669847391838e-05, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-en-zh": 1}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-fr-de-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"wmt20-fr-de": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.01143193767396364, "chrf_stderr": 0.00012555271954563658, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-fr-de": 0}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-iu-en-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ 97bf664a8efa54b5366b8341f77b418106dd0cb26169d5b2d0144e4d3d2bc5c9
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-ja-en-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ 1fd846f3c0104e794eb380dae7f648592092ab8bf59234c26d0a671bbbc28df1
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-ja-en-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"wmt20-ja-en": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.010703148854351403, "chrf_stderr": 0.00022242113108130186, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-ja-en": 0}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-km-en-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ fb4ec81bb89c70df7e21b43e0e882915b7b71a2a85bb8d4b59e0c7938baaa4c2
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-pl-en-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ 89274499d84176b1ffe4eaec06f2c89ca807342384dc946c2e348d00116aaade
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-pl-en-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"wmt20-pl-en": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.01353367757716276, "chrf_stderr": 0.00018386199249976465, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-pl-en": 0}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-ps-en-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"wmt20-ps-en": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.015192865365105723, "chrf_stderr": 0.00011334541381539086, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-ps-en": 0}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-ru-en-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ 1477ab6542c26bd0222cc1aded174f33bf8d04d1cf6a1c0959aeca4ff3779adc
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-ru-en-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"wmt20-ru-en": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.013344639906399232, "chrf_stderr": 7.583552652374546e-05, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-ru-en": 0}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-ta-en-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ 111ea3efdc08f1cf536631b9426c3a20e482c575d009d2a8c71f59c027578eec
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-ta-en-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"wmt20-ta-en": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.013841110664859798, "chrf_stderr": 0.00018476696850880766, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-ta-en": 0}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/wmt20-zh-en-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ 07dbadfd6f2b2b9462ab6187dbfaabae6e5192ab89a8e4ede9237834b9364dd1