hc99 commited on
Commit
5ccd75a
·
verified ·
1 Parent(s): ceaa2e7

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. testbed/EleutherAI__lm-evaluation-harness/CODEOWNERS +1 -0
  2. testbed/EleutherAI__lm-evaluation-harness/pyproject.toml +107 -0
  3. testbed/EleutherAI__lm-evaluation-harness/tests/test_utils.py +398 -0
  4. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_only_npi_scope-v0-loglikelihood +1 -0
  5. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_passive_2-v0-res.json +1 -0
  6. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_principle_A_case_1-v0-res.json +1 -0
  7. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_principle_A_domain_1-v0-loglikelihood +1 -0
  8. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_principle_A_domain_1-v0-res.json +1 -0
  9. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_principle_A_domain_2-v0-res.json +1 -0
  10. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_principle_A_domain_3-v0-loglikelihood +1 -0
  11. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_principle_A_domain_3-v0-res.json +1 -0
  12. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_principle_A_reconstruction-v0-loglikelihood +1 -0
  13. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_principle_A_reconstruction-v0-res.json +1 -0
  14. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_regular_plural_subject_verb_agreement_1-v0-res.json +1 -0
  15. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_regular_plural_subject_verb_agreement_2-v0-loglikelihood +1 -0
  16. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_regular_plural_subject_verb_agreement_2-v0-res.json +1 -0
  17. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_sentential_negation_npi_licensor_present-v0-res.json +1 -0
  18. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_sentential_negation_npi_scope-v0-res.json +1 -0
  19. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_sentential_subject_island-v0-loglikelihood +1 -0
  20. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_superlative_quantifiers_1-v0-loglikelihood +1 -0
  21. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_superlative_quantifiers_2-v0-loglikelihood +1 -0
  22. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_tough_vs_raising_2-v0-loglikelihood +1 -0
  23. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_wh_vs_that_no_gap-v0-loglikelihood +1 -0
  24. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/drop-v1-greedy_until +1 -0
  25. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/ethics_deontology-v0-res.json +1 -0
  26. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/ethics_justice-v0-loglikelihood +1 -0
  27. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/ethics_justice-v0-res.json +1 -0
  28. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/ethics_utilitarianism-v0-res.json +1 -0
  29. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/ethics_utilitarianism_original-v0-loglikelihood +1 -0
  30. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/ethics_virtue-v0-loglikelihood +1 -0
  31. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/gsm8k-v0-res.json +1 -0
  32. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/headqa-v0-loglikelihood +1 -0
  33. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/headqa-v0-res.json +1 -0
  34. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/headqa_en-v0-loglikelihood +1 -0
  35. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/headqa_en-v0-res.json +1 -0
  36. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/headqa_es-v0-loglikelihood +1 -0
  37. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/headqa_es-v0-res.json +1 -0
  38. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/hellaswag-v0-loglikelihood +1 -0
  39. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/hellaswag-v0-res.json +1 -0
  40. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/hendrycksTest-abstract_algebra-v0-loglikelihood +1 -0
  41. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/hendrycksTest-abstract_algebra-v0-res.json +1 -0
  42. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/hendrycksTest-anatomy-v0-loglikelihood +1 -0
  43. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/hendrycksTest-anatomy-v0-res.json +1 -0
  44. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/hendrycksTest-astronomy-v0-loglikelihood +1 -0
  45. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/hendrycksTest-astronomy-v0-res.json +1 -0
  46. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/hendrycksTest-business_ethics-v0-loglikelihood +1 -0
  47. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/hendrycksTest-business_ethics-v0-res.json +1 -0
  48. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/hendrycksTest-clinical_knowledge-v0-loglikelihood +1 -0
  49. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/hendrycksTest-clinical_knowledge-v0-res.json +1 -0
  50. testbed/EleutherAI__lm-evaluation-harness/tests/testdata/hendrycksTest-college_biology-v0-loglikelihood +1 -0
testbed/EleutherAI__lm-evaluation-harness/CODEOWNERS ADDED
@@ -0,0 +1 @@
 
 
1
+ * @haileyschoelkopf @lintangsutawika @baberabb
testbed/EleutherAI__lm-evaluation-harness/pyproject.toml ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools>=40.8.0", "wheel"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "lm_eval"
7
+ version = "0.4.4"
8
+ authors = [
9
+ {name="EleutherAI", email="contact@eleuther.ai"}
10
+ ]
11
+ description = "A framework for evaluating language models"
12
+ readme = "README.md"
13
+ classifiers = [
14
+ "Development Status :: 3 - Alpha",
15
+ "Programming Language :: Python :: 3",
16
+ "License :: OSI Approved :: MIT License",
17
+ "Operating System :: OS Independent",
18
+ ]
19
+ requires-python = ">=3.8"
20
+ license = { "text" = "MIT" }
21
+ dependencies = [
22
+ "accelerate>=0.26.0",
23
+ "evaluate",
24
+ "datasets>=2.16.0",
25
+ "evaluate>=0.4.0",
26
+ "jsonlines",
27
+ "numexpr",
28
+ "peft>=0.2.0",
29
+ "pybind11>=2.6.2",
30
+ "pytablewriter",
31
+ "rouge-score>=0.0.4",
32
+ "sacrebleu>=1.5.0",
33
+ "scikit-learn>=0.24.1",
34
+ "sqlitedict",
35
+ "torch>=1.8",
36
+ "tqdm-multiprocess",
37
+ "transformers>=4.1",
38
+ "zstandard",
39
+ "dill",
40
+ "word2number",
41
+ "more_itertools",
42
+ ]
43
+
44
+ [tool.setuptools.packages.find]
45
+ include = ["lm_eval*"]
46
+
47
+ # required to include yaml files in pip installation
48
+ [tool.setuptools.package-data]
49
+ lm_eval = ["**/*.yaml", "tasks/**/*"]
50
+
51
+ [project.scripts]
52
+ lm-eval = "lm_eval.__main__:cli_evaluate"
53
+ lm_eval = "lm_eval.__main__:cli_evaluate"
54
+
55
+ [project.urls]
56
+ Homepage = "https://github.com/EleutherAI/lm-evaluation-harness"
57
+ Repository = "https://github.com/EleutherAI/lm-evaluation-harness"
58
+
59
+ [project.optional-dependencies]
60
+ api = ["requests", "aiohttp", "tenacity", "tqdm", "tiktoken"]
61
+ dev = ["pytest", "pytest-cov", "pytest-xdist", "pre-commit", "mypy"]
62
+ deepsparse = ["deepsparse-nightly[llm]>=1.8.0.20240404"]
63
+ gptq = ["auto-gptq[triton]>=0.6.0"]
64
+ hf_transfer = ["hf_transfer"]
65
+ ifeval = ["langdetect", "immutabledict", "nltk>=3.9.1"]
66
+ neuronx = ["optimum[neuronx]"]
67
+ mamba = ["mamba_ssm", "causal-conv1d==1.0.2"]
68
+ math = ["sympy>=1.12", "antlr4-python3-runtime==4.11"]
69
+ multilingual = ["nagisa>=0.2.7", "jieba>=0.42.1", "pycountry"]
70
+ optimum = ["optimum[openvino]"]
71
+ promptsource = ["promptsource>=0.2.3"]
72
+ sentencepiece = ["sentencepiece>=0.1.98"]
73
+ sparseml = ["sparseml-nightly[llm]>=1.8.0.20240404"]
74
+ testing = ["pytest", "pytest-cov", "pytest-xdist"]
75
+ vllm = ["vllm>=0.4.2"]
76
+ zeno = ["pandas", "zeno-client"]
77
+ wandb = ["wandb>=0.16.3", "pandas", "numpy"]
78
+ all = [
79
+ "lm_eval[anthropic]",
80
+ "lm_eval[dev]",
81
+ "lm_eval[deepsparse]",
82
+ "lm_eval[gptq]",
83
+ "lm_eval[hf_transfer]",
84
+ "lm_eval[ifeval]",
85
+ "lm_eval[mamba]",
86
+ "lm_eval[math]",
87
+ "lm_eval[multilingual]",
88
+ "lm_eval[openai]",
89
+ "lm_eval[promptsource]",
90
+ "lm_eval[sentencepiece]",
91
+ "lm_eval[sparseml]",
92
+ "lm_eval[testing]",
93
+ "lm_eval[vllm]",
94
+ "lm_eval[zeno]",
95
+ "lm_eval[wandb]",
96
+ ]
97
+
98
+ [tool.ruff.lint]
99
+ extend-select = ["I"]
100
+
101
+ [tool.ruff.lint.isort]
102
+ lines-after-imports = 2
103
+ known-first-party = ["lm_eval"]
104
+
105
+ [tool.ruff.lint.extend-per-file-ignores]
106
+ "__init__.py" = ["F401","F402","F403"]
107
+ "utils.py" = ["F401"]
testbed/EleutherAI__lm-evaluation-harness/tests/test_utils.py ADDED
@@ -0,0 +1,398 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+
3
+ import numpy as np
4
+ import pytest
5
+ import torch
6
+
7
+ from lm_eval.api.metrics import (
8
+ aggregate_subtask_metrics,
9
+ mean,
10
+ pooled_sample_stderr,
11
+ stderr_for_metric,
12
+ )
13
+ from lm_eval.models.utils import Collator
14
+ from lm_eval.utils import (
15
+ get_rolling_token_windows,
16
+ make_disjoint_window,
17
+ )
18
+
19
+
20
+ # noinspection DuplicatedCode
21
+ def test_get_rolling_token_windows_v1():
22
+ gold = [
23
+ ([-100, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
24
+ (
25
+ [9, 10, 11, 12, 13, 14, 15, 16, 17, 18],
26
+ [10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
27
+ ),
28
+ (
29
+ [19, 20, 21, 22, 23, 24, 25, 26, 27, 28],
30
+ [20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
31
+ ),
32
+ ([23, 24, 25, 26, 27, 28, 29, 30, 31, 32], [30, 31, 32, 33]),
33
+ ]
34
+ x = list(range(34))
35
+ generator = get_rolling_token_windows(
36
+ token_list=x,
37
+ prefix_token=-100,
38
+ max_seq_len=10,
39
+ context_len=1,
40
+ )
41
+ pred_length = 0
42
+ output = []
43
+ for input_tokens, pred_tokens in generator:
44
+ output.extend([(input_tokens, pred_tokens)])
45
+ pred_length += len(pred_tokens)
46
+ assert pred_length == len(x)
47
+ assert gold == output
48
+
49
+
50
+ # noinspection DuplicatedCode
51
+ def test_get_rolling_token_windows_v2():
52
+ gold = [
53
+ ([-100, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
54
+ ([2, 3, 4, 5, 6, 7, 8, 9, 10, 11], [10, 11, 12]),
55
+ ([5, 6, 7, 8, 9, 10, 11, 12, 13, 14], [13, 14, 15]),
56
+ ([8, 9, 10, 11, 12, 13, 14, 15, 16, 17], [16, 17, 18]),
57
+ ([11, 12, 13, 14, 15, 16, 17, 18, 19, 20], [19, 20, 21]),
58
+ ([14, 15, 16, 17, 18, 19, 20, 21, 22, 23], [22, 23, 24]),
59
+ ([17, 18, 19, 20, 21, 22, 23, 24, 25, 26], [25, 26, 27]),
60
+ ([20, 21, 22, 23, 24, 25, 26, 27, 28, 29], [28, 29, 30]),
61
+ ([23, 24, 25, 26, 27, 28, 29, 30, 31, 32], [31, 32, 33]),
62
+ ]
63
+ x = list(range(34))
64
+ generator = get_rolling_token_windows(
65
+ token_list=x,
66
+ prefix_token=-100,
67
+ max_seq_len=10,
68
+ context_len=8,
69
+ )
70
+ pred_length = 0
71
+ output = []
72
+ for input_tokens, pred_tokens in generator:
73
+ output.extend([(input_tokens, pred_tokens)])
74
+ pred_length += len(pred_tokens)
75
+ assert pred_length == len(x)
76
+ assert gold == output
77
+
78
+
79
+ # noinspection DuplicatedCode
80
+ def test_get_rolling_token_windows_v3():
81
+ gold = [
82
+ ([-100, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
83
+ ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10]),
84
+ ([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [11]),
85
+ ([2, 3, 4, 5, 6, 7, 8, 9, 10, 11], [12]),
86
+ ([3, 4, 5, 6, 7, 8, 9, 10, 11, 12], [13]),
87
+ ([4, 5, 6, 7, 8, 9, 10, 11, 12, 13], [14]),
88
+ ([5, 6, 7, 8, 9, 10, 11, 12, 13, 14], [15]),
89
+ ([6, 7, 8, 9, 10, 11, 12, 13, 14, 15], [16]),
90
+ ([7, 8, 9, 10, 11, 12, 13, 14, 15, 16], [17]),
91
+ ([8, 9, 10, 11, 12, 13, 14, 15, 16, 17], [18]),
92
+ ([9, 10, 11, 12, 13, 14, 15, 16, 17, 18], [19]),
93
+ ([10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [20]),
94
+ ([11, 12, 13, 14, 15, 16, 17, 18, 19, 20], [21]),
95
+ ([12, 13, 14, 15, 16, 17, 18, 19, 20, 21], [22]),
96
+ ([13, 14, 15, 16, 17, 18, 19, 20, 21, 22], [23]),
97
+ ([14, 15, 16, 17, 18, 19, 20, 21, 22, 23], [24]),
98
+ ([15, 16, 17, 18, 19, 20, 21, 22, 23, 24], [25]),
99
+ ([16, 17, 18, 19, 20, 21, 22, 23, 24, 25], [26]),
100
+ ([17, 18, 19, 20, 21, 22, 23, 24, 25, 26], [27]),
101
+ ([18, 19, 20, 21, 22, 23, 24, 25, 26, 27], [28]),
102
+ ([19, 20, 21, 22, 23, 24, 25, 26, 27, 28], [29]),
103
+ ([20, 21, 22, 23, 24, 25, 26, 27, 28, 29], [30]),
104
+ ([21, 22, 23, 24, 25, 26, 27, 28, 29, 30], [31]),
105
+ ([22, 23, 24, 25, 26, 27, 28, 29, 30, 31], [32]),
106
+ ([23, 24, 25, 26, 27, 28, 29, 30, 31, 32], [33]),
107
+ ]
108
+ x = list(range(34))
109
+ generator = get_rolling_token_windows(
110
+ token_list=x,
111
+ prefix_token=-100,
112
+ max_seq_len=10,
113
+ context_len=10,
114
+ )
115
+ pred_length = 0
116
+ output = []
117
+ for input_tokens, pred_tokens in generator:
118
+ output.extend([(input_tokens, pred_tokens)])
119
+ pred_length += len(pred_tokens)
120
+ assert pred_length == len(x)
121
+ assert gold == output
122
+
123
+
124
+ # noinspection DuplicatedCode
125
+ def test_get_rolling_token_windows_v4():
126
+ gold = [
127
+ ([-100, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
128
+ ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10]),
129
+ ([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [11]),
130
+ ([2, 3, 4, 5, 6, 7, 8, 9, 10, 11], [12]),
131
+ ([3, 4, 5, 6, 7, 8, 9, 10, 11, 12], [13]),
132
+ ([4, 5, 6, 7, 8, 9, 10, 11, 12, 13], [14]),
133
+ ([5, 6, 7, 8, 9, 10, 11, 12, 13, 14], [15]),
134
+ ([6, 7, 8, 9, 10, 11, 12, 13, 14, 15], [16]),
135
+ ([7, 8, 9, 10, 11, 12, 13, 14, 15, 16], [17]),
136
+ ([8, 9, 10, 11, 12, 13, 14, 15, 16, 17], [18]),
137
+ ([9, 10, 11, 12, 13, 14, 15, 16, 17, 18], [19]),
138
+ ([10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [20]),
139
+ ([11, 12, 13, 14, 15, 16, 17, 18, 19, 20], [21]),
140
+ ([12, 13, 14, 15, 16, 17, 18, 19, 20, 21], [22]),
141
+ ([13, 14, 15, 16, 17, 18, 19, 20, 21, 22], [23]),
142
+ ([14, 15, 16, 17, 18, 19, 20, 21, 22, 23], [24]),
143
+ ([15, 16, 17, 18, 19, 20, 21, 22, 23, 24], [25]),
144
+ ([16, 17, 18, 19, 20, 21, 22, 23, 24, 25], [26]),
145
+ ([17, 18, 19, 20, 21, 22, 23, 24, 25, 26], [27]),
146
+ ([18, 19, 20, 21, 22, 23, 24, 25, 26, 27], [28]),
147
+ ([19, 20, 21, 22, 23, 24, 25, 26, 27, 28], [29]),
148
+ ]
149
+ x = list(range(30))
150
+ generator = get_rolling_token_windows(
151
+ token_list=x,
152
+ prefix_token=-100,
153
+ max_seq_len=10,
154
+ context_len=10,
155
+ )
156
+ pred_length = 0
157
+ output = []
158
+ for input_tokens, pred_tokens in generator:
159
+ output.extend([(input_tokens, pred_tokens)])
160
+ pred_length += len(pred_tokens)
161
+ assert pred_length == len(x)
162
+ assert gold == output
163
+
164
+
165
+ # noinspection DuplicatedCode
166
+ def test_get_rolling_token_windows_v5():
167
+ gold = [
168
+ ([-100, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
169
+ (
170
+ [9, 10, 11, 12, 13, 14, 15, 16, 17, 18],
171
+ [10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
172
+ ),
173
+ (
174
+ [19, 20, 21, 22, 23, 24, 25, 26, 27, 28],
175
+ [20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
176
+ ),
177
+ ]
178
+ x = list(range(30))
179
+ generator = get_rolling_token_windows(
180
+ token_list=x,
181
+ prefix_token=-100,
182
+ max_seq_len=10,
183
+ context_len=1,
184
+ )
185
+ pred_length = 0
186
+ output = []
187
+ for input_tokens, pred_tokens in generator:
188
+ output.extend([(input_tokens, pred_tokens)])
189
+ pred_length += len(pred_tokens)
190
+ assert pred_length == len(x)
191
+ assert gold == output
192
+
193
+
194
+ # noinspection DuplicatedCode
195
+ def test_get_rolling_token_windows_v6():
196
+ gold = [
197
+ ([-100, 0], [0, 1]),
198
+ ([1, 2], [2, 3]),
199
+ ([3, 4], [4, 5]),
200
+ ([5, 6], [6, 7]),
201
+ ([6, 7], [8]),
202
+ ]
203
+ x = list(range(9))
204
+ generator = get_rolling_token_windows(
205
+ token_list=x,
206
+ prefix_token=-100,
207
+ max_seq_len=2,
208
+ context_len=1,
209
+ )
210
+ pred_length = 0
211
+ output = []
212
+ for input_tokens, pred_tokens in generator:
213
+ output.extend([(input_tokens, pred_tokens)])
214
+ pred_length += len(pred_tokens)
215
+ assert pred_length == len(x)
216
+ assert gold == output
217
+
218
+
219
+ def test_get_rolling_token_windows_empty():
220
+ generator = get_rolling_token_windows(
221
+ token_list=[],
222
+ prefix_token=-100,
223
+ max_seq_len=2,
224
+ context_len=1,
225
+ )
226
+ n = 0
227
+ for _ in generator:
228
+ n += 1
229
+ assert n == 0
230
+
231
+
232
+ def test_make_disjoint_window():
233
+ assert make_disjoint_window(([1, 2, 3, 4, 5], [2, 3, 4, 5, 6])) == (
234
+ [1],
235
+ [2, 3, 4, 5, 6],
236
+ )
237
+ assert make_disjoint_window(([1, 2, 3, 4, 5], [4, 5, 6])) == ([1, 2, 3], [4, 5, 6])
238
+ assert make_disjoint_window(([1, 2, 3, 4, 5], [6])) == ([1, 2, 3, 4, 5], [6])
239
+
240
+
241
+ class TestCollator:
242
+ def make_generate_sample(self, end=10):
243
+ strings = ["x" * i for i in range(1, end + 1)]
244
+ gen_kwargs1, gen_kwargs2 = (
245
+ {"temperature": 0},
246
+ {"temperature": 0, "until": ["nn", "\n\n"]},
247
+ )
248
+ args = [
249
+ (string, gen_kwargs1 if i < len(strings) // 2 else gen_kwargs2)
250
+ for i, string in enumerate(strings)
251
+ ]
252
+
253
+ return args
254
+
255
+ def make_loglikelihood_sample(self, end=11):
256
+ samples = [
257
+ (("x", "x"), list(range(1, total_length + 1)))
258
+ for total_length in range(1, end + 1)
259
+ ]
260
+ return samples
261
+
262
+ def make_loglikelihood_sample_group(self, end=11):
263
+ a = [(("x", "x"), [1, 2, 3, 4, 5, 6, 7, 8], [x]) for x in range(9)]
264
+ b = [
265
+ (("x", "x"), [1, 2, 3, 4, 5, 6, 7, 8], [x, y, z])
266
+ for x, y, z in zip(range(9), range(9, 18), range(18, 27))
267
+ ]
268
+ return a + b
269
+
270
+ @pytest.mark.parametrize("batch_size, end", [(17, 30), (8, 61), (12, 48), (0, 9)])
271
+ def test_generations(self, batch_size, end):
272
+ _collate_gen = lambda x: (-len(x[0]), x[0]) # noqa: E731
273
+
274
+ generation_samples = self.make_generate_sample(int(end))
275
+ gens = Collator(generation_samples, _collate_gen, group_by="gen_kwargs")
276
+ chunks_gen = gens.get_batched(n=int(batch_size), batch_fn=None)
277
+ output = []
278
+ group_one = end // 2
279
+ group_two = end - end // 2
280
+ is_batch = batch_size != 0
281
+ for chunks in chunks_gen:
282
+ # check batching
283
+ assert (
284
+ len(chunks) <= batch_size
285
+ if is_batch
286
+ else len(chunks) in [group_one, group_two]
287
+ )
288
+ # check if reorder-er is working correctly
289
+ chunk_lengths = [len(chunk[0]) for chunk in chunks]
290
+ assert chunk_lengths == sorted(chunk_lengths, reverse=True)
291
+ # check if grouping correctly
292
+ chunk_to_compare = chunks[0][1]
293
+ assert all(x[1] == chunk_to_compare for x in chunks)
294
+ for x in chunks:
295
+ output.extend([x])
296
+ reordered_output = gens.get_original(output)
297
+ # check get original
298
+ assert reordered_output == generation_samples
299
+
300
+ @pytest.mark.parametrize("batch_size, end", [(17, 30), (8, 61), (12, 48), (0, 3)])
301
+ def test_loglikelihood(self, batch_size, end):
302
+ _collate_log = lambda x: (-len(x[1]), tuple(x[1])) # noqa: E731
303
+ loglikelihood_samples = self.make_loglikelihood_sample(int(end))
304
+ loglikelihoods = Collator(
305
+ loglikelihood_samples,
306
+ _collate_log,
307
+ )
308
+ chunks_gen = loglikelihoods.get_batched(n=int(batch_size), batch_fn=None)
309
+ output = []
310
+ is_batch = batch_size != 0
311
+ for chunks in chunks_gen:
312
+ # check batching
313
+ assert len(chunks) <= batch_size if is_batch else len(chunks) == end
314
+ # check reorder
315
+ chunk_lengths = [len(chunk[1]) for chunk in chunks]
316
+ assert chunk_lengths == sorted(chunk_lengths, reverse=True)
317
+ for x in chunks:
318
+ output.extend([x[1]])
319
+ # check indices
320
+ reordered_output = loglikelihoods.get_original(output)
321
+ assert reordered_output == [x[1] for x in loglikelihood_samples]
322
+
323
+ @pytest.mark.parametrize("batch_size", [17, 8, 12, 0])
324
+ def test_context_grouping(self, batch_size):
325
+ def _collate(x):
326
+ toks = x[1] + x[2]
327
+ return -len(toks), tuple(toks)
328
+
329
+ _collate_log = _collate # noqa: E731
330
+ loglikelihood_samples = self.make_loglikelihood_sample_group()
331
+ loglikelihoods = Collator(
332
+ loglikelihood_samples,
333
+ _collate_log,
334
+ group_fn=lambda a: a[-2] + a[-1][:-1],
335
+ group_by="contexts",
336
+ )
337
+ chunks_gen = loglikelihoods.get_batched(n=int(batch_size), batch_fn=None)
338
+ output = []
339
+ outputs_ = []
340
+ is_batch = batch_size != 0
341
+ for chunks in chunks_gen:
342
+ # check batching
343
+ if is_batch:
344
+ assert len(chunks) <= batch_size
345
+ # check reorder
346
+ chunk_lengths = [len(chunk[1]) for chunk in chunks]
347
+ assert chunk_lengths == sorted(chunk_lengths, reverse=True)
348
+ for x in chunks:
349
+ for request_str, cont_toks, logits in loglikelihoods.get_cache(
350
+ req_str="".join(x[0]),
351
+ cxt_toks=x[1],
352
+ cont_toks=x[2],
353
+ logits=torch.tensor([1, 2, 3, 4, 5, 6, 7, 8])
354
+ .unsqueeze(0)
355
+ .unsqueeze(0),
356
+ ):
357
+ output.extend([x[1]])
358
+ outputs_.extend([cont_toks])
359
+ assert len(output) == len(outputs_)
360
+ # check indices
361
+ reordered_output = loglikelihoods.get_original(output)
362
+ assert reordered_output == [x[1] for x in loglikelihood_samples]
363
+
364
+
365
+ def test_aggregate_mean():
366
+ # test weight_by_size is respected
367
+ assert (
368
+ aggregate_subtask_metrics([0.3, 0.2, 0.4], [20, 40, 100], weight_by_size=False)
369
+ == 0.3
370
+ )
371
+ assert (
372
+ aggregate_subtask_metrics([0.3, 0.2, 0.4], [20, 40, 100], weight_by_size=True)
373
+ == 0.3375
374
+ )
375
+
376
+
377
+ @pytest.mark.parametrize(
378
+ "samples",
379
+ [
380
+ [40 * [1.0] + 60 * [0.0], 30 * [1.0] + 30 * [0.0], 20 * [1.0] + 60 * [0.0]],
381
+ [35 * [1.0] + 65 * [0.0], 20 * [1.0] + 20 * [0.0]],
382
+ ],
383
+ )
384
+ def test_aggregate_stderrs(samples):
385
+ # check that aggregating subtasks' bootstrap stderrs with our formula
386
+ # (using weight_by_size) is ~equiv.
387
+ # to just getting bootstrap stderr of the whole set of samples
388
+ mean_stderr = stderr_for_metric(metric=mean, bootstrap_iters=100000)
389
+
390
+ stderrs = [mean_stderr(subtask) for subtask in samples]
391
+
392
+ sizes = [len(subtask) for subtask in samples]
393
+
394
+ assert np.allclose(
395
+ pooled_sample_stderr(stderrs, sizes),
396
+ mean_stderr(list(itertools.chain.from_iterable(samples))),
397
+ atol=1.0e-3,
398
+ )
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_only_npi_scope-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ fc0be817478c212327050fa297ef61ad214f4847dbff61d4e0fe7914c06a1691
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_passive_2-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_passive_2": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_passive_2": 0}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_principle_A_case_1-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_principle_A_case_1": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_principle_A_case_1": 0}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_principle_A_domain_1-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 290e7eddacea4ec16989af697f2ee3373fdd9aef4b452bf887184c6e2f6e7d9d
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_principle_A_domain_1-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_principle_A_domain_1": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_principle_A_domain_1": 0}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_principle_A_domain_2-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_principle_A_domain_2": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_principle_A_domain_2": 0}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_principle_A_domain_3-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 38454befedcf1f3f6ef27d3bef9ccfdfb3e94a7ab32d86a63493a920d2d50093
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_principle_A_domain_3-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_principle_A_domain_3": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_principle_A_domain_3": 0}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_principle_A_reconstruction-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 894efedfd8750d5b8de6157f9b2ed2b51b5290d3a78ea9b041fc62d34e96efbc
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_principle_A_reconstruction-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_principle_A_reconstruction": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_principle_A_reconstruction": 0}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_regular_plural_subject_verb_agreement_1-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_regular_plural_subject_verb_agreement_1": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_regular_plural_subject_verb_agreement_1": 0}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_regular_plural_subject_verb_agreement_2-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ f69d9891f59872538962221fccc425b07df7cfbd83cdc546ce83e6b0e9a93f7c
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_regular_plural_subject_verb_agreement_2-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_regular_plural_subject_verb_agreement_2": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_regular_plural_subject_verb_agreement_2": 0}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_sentential_negation_npi_licensor_present-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_sentential_negation_npi_licensor_present": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_sentential_negation_npi_licensor_present": 0}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_sentential_negation_npi_scope-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_sentential_negation_npi_scope": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_sentential_negation_npi_scope": 0}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_sentential_subject_island-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 80f5f98fad26240de2767fe58c4b18d864df41cbfa76f06c84c3fce9f14f4833
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_superlative_quantifiers_1-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 8a01f6a5ea87a01c0c9b0c7b3bc4de4711bf0ff050976976651182b9ed34a0d4
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_superlative_quantifiers_2-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 59c20ff0f632cf42afc74ecc682cf92e5e740417b01e6cf9a610a3bc544d2ea5
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_tough_vs_raising_2-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ d255a10a34f14d77d9526604a17b0f6747d32f62fc2e3a09e9ab10054535fd45
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/blimp_wh_vs_that_no_gap-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ d1d3e439b2020ef5ed232bfebbcc9634adc5117e9eb61e38fdbbe2c8ea128d54
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/drop-v1-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ a670f911ab2999d72db15f534b22703d19e7837edbda4f9f199ad587f7aae6b2
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/ethics_deontology-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"ethics_deontology": {"acc": 0.503615127919911, "acc_stderr": 0.008338908432085105, "em": 0.07119021134593993}}, "versions": {"ethics_deontology": 0}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/ethics_justice-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ d7dfc44fea507b5c5c3a8218f79ed8197da8599ebb396d85feb91c25512126b6
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/ethics_justice-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"ethics_justice": {"acc": 0.49556213017751477, "acc_stderr": 0.009616784279885177, "em": 0.057692307692307696}}, "versions": {"ethics_justice": 0}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/ethics_utilitarianism-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"ethics_utilitarianism": {"acc": 0.49771214642262895, "acc_stderr": 0.007211546310787838}}, "versions": {"ethics_utilitarianism": 0}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/ethics_utilitarianism_original-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 5b42ba1faf5ece6a6ec9a3976ce79c1fac8df5b98272aab85457188c2142693c
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/ethics_virtue-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 8021db8de46850090ddae6e6ec2d382029c3027b7c69884607503f916d09b709
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/gsm8k-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"gsm8k": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"gsm8k": 0}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/headqa-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 767ca34d9714edd9fb030ddbcc35a64e5180d1e247b0cb557fbb22fdf971ad1f
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/headqa-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"headqa": {"acc": 0.23559445660102116, "acc_norm": 0.25018234865062, "acc_norm_stderr": 0.008272783230806014, "acc_stderr": 0.008105688874297972}}, "versions": {"headqa": 0}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/headqa_en-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 09da45119b12a0144e3081f8fb790c2a22af7b9c3aac42f54423d348a711fbf5
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/headqa_en-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"headqa_en": {"acc": 0.23559445660102116, "acc_norm": 0.2447118891320204, "acc_norm_stderr": 0.008211629406841468, "acc_stderr": 0.008105688874297972}}, "versions": {"headqa_en": 0}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/headqa_es-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 767ca34d9714edd9fb030ddbcc35a64e5180d1e247b0cb557fbb22fdf971ad1f
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/headqa_es-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"headqa_es": {"acc": 0.23559445660102116, "acc_norm": 0.25018234865062, "acc_norm_stderr": 0.008272783230806014, "acc_stderr": 0.008105688874297972}}, "versions": {"headqa_es": 0}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/hellaswag-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ abb808c97d6529eda6c11067837a132c62d25cba0394d720f80cca6df9f7196e
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/hellaswag-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hellaswag": {"acc": 0.24965146385182235, "acc_norm": 0.24756024696275641, "acc_norm_stderr": 0.004307128573285236, "acc_stderr": 0.004319267432460666}}, "versions": {"hellaswag": 0}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/hendrycksTest-abstract_algebra-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ e35d1eeb356ac1084d4e9773f028cb3c81ba1c6e5574d598ac4a78aa467cd797
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/hendrycksTest-abstract_algebra-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-abstract_algebra": {"acc": 0.32, "acc_norm": 0.34, "acc_norm_stderr": 0.04760952285695235, "acc_stderr": 0.04688261722621504}}, "versions": {"hendrycksTest-abstract_algebra": 0}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/hendrycksTest-anatomy-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ bf05e04ed8cf61cf3aad294ed3f5a16137775ffdd20f1b129022ddffc1251768
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/hendrycksTest-anatomy-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-anatomy": {"acc": 0.2222222222222222, "acc_norm": 0.23703703703703705, "acc_norm_stderr": 0.03673731683969506, "acc_stderr": 0.0359144408419697}}, "versions": {"hendrycksTest-anatomy": 0}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/hendrycksTest-astronomy-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ bed1e47127cc2893c6aef63b9a0909cca31aa351a703da2a166b01cae03c3311
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/hendrycksTest-astronomy-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-astronomy": {"acc": 0.2565789473684211, "acc_norm": 0.29605263157894735, "acc_norm_stderr": 0.03715062154998904, "acc_stderr": 0.0355418036802569}}, "versions": {"hendrycksTest-astronomy": 0}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/hendrycksTest-business_ethics-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ b3b27e9dbad587377d3c8cab1072782de883e245da93a563bd8b3099017b1fc0
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/hendrycksTest-business_ethics-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-business_ethics": {"acc": 0.29, "acc_norm": 0.27, "acc_norm_stderr": 0.044619604333847394, "acc_stderr": 0.045604802157206845}}, "versions": {"hendrycksTest-business_ethics": 0}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/hendrycksTest-clinical_knowledge-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ fbcb7ce507e0675d811e71e10a67c8d05a6605e29036f46776e04a6588cefbda
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/hendrycksTest-clinical_knowledge-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-clinical_knowledge": {"acc": 0.23773584905660378, "acc_norm": 0.27169811320754716, "acc_norm_stderr": 0.027377706624670713, "acc_stderr": 0.02619980880756191}}, "versions": {"hendrycksTest-clinical_knowledge": 0}}
testbed/EleutherAI__lm-evaluation-harness/tests/testdata/hendrycksTest-college_biology-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ c29e4e67ff91af29b9434884874414d1b1b32ccc32903c6b1639469b19907419