Add files using upload-large-folder tool
Browse files- tests/dataset/test_humaneval.py +110 -0
- tests/dataset/test_local_datasets.py +230 -0
- tests/dataset/test_ms_datasets.py +226 -0
- tests/openicl/test_prompt_template.py +249 -0
- tests/prompt/test_api_template_parser.py +231 -0
- tests/prompt/test_lm_template_parser.py +235 -0
- tests/prompt/test_prompt_list.py +76 -0
tests/dataset/test_humaneval.py
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import unittest
|
| 2 |
+
|
| 3 |
+
from opencompass.datasets.humaneval import humaneval_postprocess
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def run_humaneval_check(completion):
|
| 7 |
+
program = [
|
| 8 |
+
'def get_fraction(x: float) -> float:',
|
| 9 |
+
humaneval_postprocess(completion),
|
| 10 |
+
'',
|
| 11 |
+
'assert get_fraction(1.28) == 0.28',
|
| 12 |
+
'assert get_fraction(1.0) == 0.0',
|
| 13 |
+
]
|
| 14 |
+
program = '\n'.join(program)
|
| 15 |
+
exec(program)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class TestHumaneval(unittest.TestCase):
|
| 19 |
+
|
| 20 |
+
def test_vanilla(self):
|
| 21 |
+
raw = ' return x - int(x)'
|
| 22 |
+
run_humaneval_check(raw)
|
| 23 |
+
|
| 24 |
+
def test_python_quote(self):
|
| 25 |
+
lines = [
|
| 26 |
+
'```python',
|
| 27 |
+
' return x - int(x)',
|
| 28 |
+
'```',
|
| 29 |
+
]
|
| 30 |
+
raw = '\n'.join(lines)
|
| 31 |
+
run_humaneval_check(raw)
|
| 32 |
+
|
| 33 |
+
def test_bare_quote(self):
|
| 34 |
+
lines = [
|
| 35 |
+
'```',
|
| 36 |
+
' return x - int(x)',
|
| 37 |
+
'```',
|
| 38 |
+
]
|
| 39 |
+
raw = '\n'.join(lines)
|
| 40 |
+
run_humaneval_check(raw)
|
| 41 |
+
|
| 42 |
+
def test_error_space_quote(self):
|
| 43 |
+
lines = [
|
| 44 |
+
'```',
|
| 45 |
+
' return x - int(x)',
|
| 46 |
+
'```',
|
| 47 |
+
]
|
| 48 |
+
raw = '\n'.join(lines)
|
| 49 |
+
run_humaneval_check(raw)
|
| 50 |
+
|
| 51 |
+
def test_import_1(self):
|
| 52 |
+
lines = [
|
| 53 |
+
'import numpy as np',
|
| 54 |
+
'import math',
|
| 55 |
+
'from typing import List',
|
| 56 |
+
'',
|
| 57 |
+
'def func(x):',
|
| 58 |
+
' return x - int(x)',
|
| 59 |
+
]
|
| 60 |
+
raw = '\n'.join(lines)
|
| 61 |
+
run_humaneval_check(raw)
|
| 62 |
+
|
| 63 |
+
def test_import_2(self):
|
| 64 |
+
lines = [
|
| 65 |
+
'from typing import List',
|
| 66 |
+
'import numpy as np',
|
| 67 |
+
'import math',
|
| 68 |
+
'def func(x):',
|
| 69 |
+
' return x - int(x)',
|
| 70 |
+
]
|
| 71 |
+
raw = '\n'.join(lines)
|
| 72 |
+
run_humaneval_check(raw)
|
| 73 |
+
|
| 74 |
+
def test_import_3(self):
|
| 75 |
+
lines = [
|
| 76 |
+
'import math',
|
| 77 |
+
'',
|
| 78 |
+
'',
|
| 79 |
+
'def func(x):',
|
| 80 |
+
' return x - int(x)',
|
| 81 |
+
]
|
| 82 |
+
raw = '\n'.join(lines)
|
| 83 |
+
run_humaneval_check(raw)
|
| 84 |
+
|
| 85 |
+
def test_comment(self):
|
| 86 |
+
lines = [
|
| 87 |
+
'def func(x: float) -> float:',
|
| 88 |
+
" '''",
|
| 89 |
+
' blah blah blah',
|
| 90 |
+
' blah blah blah',
|
| 91 |
+
" '''",
|
| 92 |
+
' return x - int(x)',
|
| 93 |
+
]
|
| 94 |
+
raw = '\n'.join(lines)
|
| 95 |
+
run_humaneval_check(raw)
|
| 96 |
+
|
| 97 |
+
def test_additional(self):
|
| 98 |
+
lines = [
|
| 99 |
+
' return x - int(x)',
|
| 100 |
+
'',
|
| 101 |
+
'',
|
| 102 |
+
'def func(x: float) -> float:',
|
| 103 |
+
" '''",
|
| 104 |
+
' blah blah blah',
|
| 105 |
+
' blah blah blah',
|
| 106 |
+
" '''",
|
| 107 |
+
' return x - int(x)',
|
| 108 |
+
]
|
| 109 |
+
raw = '\n'.join(lines)
|
| 110 |
+
run_humaneval_check(raw)
|
tests/dataset/test_local_datasets.py
ADDED
|
@@ -0,0 +1,230 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
import sys
|
| 3 |
+
import unittest
|
| 4 |
+
import warnings
|
| 5 |
+
from os import environ
|
| 6 |
+
|
| 7 |
+
from datasets import Dataset, DatasetDict
|
| 8 |
+
from mmengine.config import read_base
|
| 9 |
+
from tqdm import tqdm
|
| 10 |
+
|
| 11 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 12 |
+
|
| 13 |
+
warnings.filterwarnings('ignore', category=DeprecationWarning)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def reload_datasets():
|
| 17 |
+
modules_to_remove = [
|
| 18 |
+
module_name for module_name in sys.modules
|
| 19 |
+
if module_name.startswith('configs.datasets')
|
| 20 |
+
]
|
| 21 |
+
|
| 22 |
+
for module_name in modules_to_remove:
|
| 23 |
+
del sys.modules[module_name]
|
| 24 |
+
|
| 25 |
+
with read_base():
|
| 26 |
+
from configs.datasets.ceval.ceval_gen import ceval_datasets
|
| 27 |
+
from configs.datasets.gsm8k.gsm8k_gen import gsm8k_datasets
|
| 28 |
+
from configs.datasets.cmmlu.cmmlu_gen import cmmlu_datasets
|
| 29 |
+
from configs.datasets.ARC_c.ARC_c_gen import ARC_c_datasets
|
| 30 |
+
from configs.datasets.ARC_e.ARC_e_gen import ARC_e_datasets
|
| 31 |
+
from configs.datasets.humaneval.humaneval_gen import humaneval_datasets
|
| 32 |
+
from configs.datasets.humaneval.humaneval_repeat10_gen_8e312c import humaneval_datasets as humaneval_repeat10_datasets
|
| 33 |
+
from configs.datasets.race.race_ppl import race_datasets
|
| 34 |
+
from configs.datasets.commonsenseqa.commonsenseqa_gen import commonsenseqa_datasets
|
| 35 |
+
|
| 36 |
+
from configs.datasets.mmlu.mmlu_gen import mmlu_datasets
|
| 37 |
+
from configs.datasets.strategyqa.strategyqa_gen import strategyqa_datasets
|
| 38 |
+
from configs.datasets.bbh.bbh_gen import bbh_datasets
|
| 39 |
+
from configs.datasets.Xsum.Xsum_gen import Xsum_datasets
|
| 40 |
+
from configs.datasets.winogrande.winogrande_gen import winogrande_datasets
|
| 41 |
+
from configs.datasets.winogrande.winogrande_ll import winogrande_datasets as winogrande_ll_datasets
|
| 42 |
+
from configs.datasets.winogrande.winogrande_5shot_ll_252f01 import winogrande_datasets as winogrande_5shot_ll_datasets
|
| 43 |
+
from configs.datasets.obqa.obqa_gen import obqa_datasets
|
| 44 |
+
from configs.datasets.obqa.obqa_ppl_6aac9e import obqa_datasets as obqa_ppl_datasets
|
| 45 |
+
from configs.datasets.agieval.agieval_gen import agieval_datasets as agieval_v2_datasets
|
| 46 |
+
# from configs.datasets.agieval.agieval_gen_a0c741 import agieval_datasets as agieval_v1_datasets
|
| 47 |
+
from configs.datasets.siqa.siqa_gen import siqa_datasets as siqa_v2_datasets
|
| 48 |
+
from configs.datasets.siqa.siqa_gen_18632c import siqa_datasets as siqa_v3_datasets
|
| 49 |
+
from configs.datasets.siqa.siqa_ppl_42bc6e import siqa_datasets as siqa_ppl_datasets
|
| 50 |
+
from configs.datasets.storycloze.storycloze_gen import storycloze_datasets
|
| 51 |
+
from configs.datasets.storycloze.storycloze_ppl import storycloze_datasets as storycloze_ppl_datasets
|
| 52 |
+
from configs.datasets.summedits.summedits_gen import summedits_datasets as summedits_v2_datasets
|
| 53 |
+
|
| 54 |
+
from configs.datasets.hellaswag.hellaswag_gen import hellaswag_datasets as hellaswag_v2_datasets
|
| 55 |
+
from configs.datasets.hellaswag.hellaswag_10shot_gen_e42710 import hellaswag_datasets as hellaswag_ice_datasets
|
| 56 |
+
from configs.datasets.hellaswag.hellaswag_ppl_9dbb12 import hellaswag_datasets as hellaswag_v1_datasets
|
| 57 |
+
from configs.datasets.hellaswag.hellaswag_ppl_a6e128 import hellaswag_datasets as hellaswag_v3_datasets
|
| 58 |
+
from configs.datasets.mbpp.mbpp_gen import mbpp_datasets as mbpp_v1_datasets
|
| 59 |
+
from configs.datasets.mbpp.mbpp_passk_gen_830460 import mbpp_datasets as mbpp_v2_datasets
|
| 60 |
+
from configs.datasets.mbpp.sanitized_mbpp_gen_830460 import sanitized_mbpp_datasets
|
| 61 |
+
from configs.datasets.nq.nq_gen import nq_datasets
|
| 62 |
+
from configs.datasets.lcsts.lcsts_gen import lcsts_datasets
|
| 63 |
+
from configs.datasets.math.math_gen import math_datasets
|
| 64 |
+
from configs.datasets.piqa.piqa_gen import piqa_datasets as piqa_v2_datasets
|
| 65 |
+
from configs.datasets.piqa.piqa_ppl import piqa_datasets as piqa_v1_datasets
|
| 66 |
+
from configs.datasets.piqa.piqa_ppl_0cfff2 import piqa_datasets as piqa_v3_datasets
|
| 67 |
+
from configs.datasets.lambada.lambada_gen import lambada_datasets
|
| 68 |
+
from configs.datasets.tydiqa.tydiqa_gen import tydiqa_datasets
|
| 69 |
+
from configs.datasets.GaokaoBench.GaokaoBench_gen import GaokaoBench_datasets
|
| 70 |
+
from configs.datasets.GaokaoBench.GaokaoBench_mixed import GaokaoBench_datasets as GaokaoBench_mixed_datasets
|
| 71 |
+
from configs.datasets.GaokaoBench.GaokaoBench_no_subjective_gen_4c31db import GaokaoBench_datasets as GaokaoBench_no_subjective_datasets
|
| 72 |
+
from configs.datasets.triviaqa.triviaqa_gen import triviaqa_datasets
|
| 73 |
+
from configs.datasets.triviaqa.triviaqa_wiki_1shot_gen_20a989 import triviaqa_datasets as triviaqa_wiki_1shot_datasets
|
| 74 |
+
|
| 75 |
+
from configs.datasets.CLUE_cmnli.CLUE_cmnli_gen import cmnli_datasets
|
| 76 |
+
from configs.datasets.CLUE_cmnli.CLUE_cmnli_ppl import cmnli_datasets as cmnli_ppl_datasets
|
| 77 |
+
from configs.datasets.CLUE_ocnli.CLUE_ocnli_gen import ocnli_datasets
|
| 78 |
+
|
| 79 |
+
from configs.datasets.ceval.ceval_clean_ppl import ceval_datasets as ceval_clean_datasets
|
| 80 |
+
from configs.datasets.ARC_c.ARC_c_clean_ppl import ARC_c_datasets as ARC_c_clean_datasets
|
| 81 |
+
from configs.datasets.mmlu.mmlu_clean_ppl import mmlu_datasets as mmlu_clean_datasets
|
| 82 |
+
from configs.datasets.hellaswag.hellaswag_clean_ppl import hellaswag_datasets as hellaswag_clean_datasets
|
| 83 |
+
from configs.datasets.FewCLUE_ocnli_fc.FewCLUE_ocnli_fc_gen import ocnli_fc_datasets
|
| 84 |
+
|
| 85 |
+
return sum((v for k, v in locals().items() if k.endswith('_datasets')), [])
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def load_datasets_conf(source):
|
| 89 |
+
environ['DATASET_SOURCE'] = source
|
| 90 |
+
datasets_conf = reload_datasets()
|
| 91 |
+
return datasets_conf
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def load_datasets(source, conf):
|
| 95 |
+
environ['DATASET_SOURCE'] = source
|
| 96 |
+
if 'lang' in conf:
|
| 97 |
+
dataset = conf['type'].load(path=conf['path'], lang=conf['lang'])
|
| 98 |
+
return dataset
|
| 99 |
+
if 'setting_name' in conf:
|
| 100 |
+
dataset = conf['type'].load(path=conf['path'],
|
| 101 |
+
name=conf['name'],
|
| 102 |
+
setting_name=conf['setting_name'])
|
| 103 |
+
return dataset
|
| 104 |
+
if 'name' in conf:
|
| 105 |
+
dataset = conf['type'].load(path=conf['path'], name=conf['name'])
|
| 106 |
+
return dataset
|
| 107 |
+
|
| 108 |
+
if 'local_mode' in conf:
|
| 109 |
+
dataset = conf['type'].load(path=conf['path'], local_mode=conf['local_mode'])
|
| 110 |
+
return dataset
|
| 111 |
+
try:
|
| 112 |
+
dataset = conf['type'].load(path=conf['path'])
|
| 113 |
+
except Exception:
|
| 114 |
+
dataset = conf['type'].load(**conf)
|
| 115 |
+
return dataset
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def clean_string(value):
|
| 119 |
+
"""Helper function to clean and normalize string data.
|
| 120 |
+
|
| 121 |
+
It strips leading and trailing whitespace and replaces multiple whitespace
|
| 122 |
+
characters with a single space.
|
| 123 |
+
"""
|
| 124 |
+
if isinstance(value, str):
|
| 125 |
+
return ' '.join(value.split())
|
| 126 |
+
return value
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
class TestingLocalDatasets(unittest.TestCase):
|
| 130 |
+
|
| 131 |
+
def test_datasets(self):
|
| 132 |
+
# 加载 ModelScope 和 Local 数据集配置
|
| 133 |
+
# ms_datasets_conf = load_datasets_conf('ModelScope')
|
| 134 |
+
local_datasets_conf = load_datasets_conf('Local')
|
| 135 |
+
|
| 136 |
+
# 初始化成功和失败的数据集列表
|
| 137 |
+
successful_comparisons = []
|
| 138 |
+
failed_comparisons = []
|
| 139 |
+
|
| 140 |
+
def compare_datasets(local_conf):
|
| 141 |
+
# local_dataset = load_datasets(local_conf)
|
| 142 |
+
local_dataset = load_datasets('Local', local_conf)
|
| 143 |
+
# modelscope_path_name = f"{ms_conf.get('path')}/{ms_conf.get('name', '')}\t{ms_conf.get('lang', '')}"
|
| 144 |
+
local_path_name = f"{local_conf.get('path')}/{local_conf.get('name', '')}\t{local_conf.get('lang', '')}"
|
| 145 |
+
# # 断言类型一致
|
| 146 |
+
# assert ms_conf['type'] == local_conf['type'], "Data types do not match"
|
| 147 |
+
# print(modelscope_path_name, local_path_name)
|
| 148 |
+
try:
|
| 149 |
+
# ms_dataset = load_datasets('ModelScope', ms_conf)
|
| 150 |
+
local_dataset = load_datasets('Local', local_conf)
|
| 151 |
+
# _check_data(ms_dataset, local_dataset, sample_size=sample_size)
|
| 152 |
+
return 'success', f'{local_path_name}'
|
| 153 |
+
except Exception as exception:
|
| 154 |
+
# print(exception)
|
| 155 |
+
return 'failure', f'can\'t load {local_path_name}'
|
| 156 |
+
|
| 157 |
+
with ThreadPoolExecutor(16) as executor:
|
| 158 |
+
futures = {
|
| 159 |
+
executor.submit(compare_datasets, local_conf): local_conf
|
| 160 |
+
for local_conf in local_datasets_conf
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
for future in tqdm(as_completed(futures), total=len(futures)):
|
| 164 |
+
result, message = future.result()
|
| 165 |
+
if result == 'success':
|
| 166 |
+
successful_comparisons.append(message)
|
| 167 |
+
else:
|
| 168 |
+
failed_comparisons.append(message)
|
| 169 |
+
|
| 170 |
+
# 输出测试总结
|
| 171 |
+
total_datasets = len(local_datasets_conf)
|
| 172 |
+
print(f"All {total_datasets} datasets")
|
| 173 |
+
print(f"OK {len(successful_comparisons)} datasets")
|
| 174 |
+
for success in successful_comparisons:
|
| 175 |
+
print(f" {success}")
|
| 176 |
+
print(f"Fail {len(failed_comparisons)} datasets")
|
| 177 |
+
for failure in failed_comparisons:
|
| 178 |
+
print(f" {failure}")
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
def _check_data(ms_dataset: Dataset | DatasetDict,
|
| 182 |
+
oc_dataset: Dataset | DatasetDict,
|
| 183 |
+
sample_size):
|
| 184 |
+
assert type(ms_dataset) == type(
|
| 185 |
+
oc_dataset
|
| 186 |
+
), f'Dataset type not match: {type(ms_dataset)} != {type(oc_dataset)}'
|
| 187 |
+
|
| 188 |
+
# match DatasetDict
|
| 189 |
+
if isinstance(oc_dataset, DatasetDict):
|
| 190 |
+
assert ms_dataset.keys() == oc_dataset.keys(
|
| 191 |
+
), f'DatasetDict not match: {ms_dataset.keys()} != {oc_dataset.keys()}'
|
| 192 |
+
|
| 193 |
+
for key in ms_dataset.keys():
|
| 194 |
+
_check_data(ms_dataset[key], oc_dataset[key], sample_size=sample_size)
|
| 195 |
+
|
| 196 |
+
elif isinstance(oc_dataset, Dataset):
|
| 197 |
+
# match by cols
|
| 198 |
+
assert set(ms_dataset.column_names) == set(
|
| 199 |
+
oc_dataset.column_names
|
| 200 |
+
), f'Column names do not match: {ms_dataset.column_names} != {oc_dataset.column_names}'
|
| 201 |
+
|
| 202 |
+
# Check that the number of rows is the same
|
| 203 |
+
assert len(ms_dataset) == len(
|
| 204 |
+
oc_dataset
|
| 205 |
+
), f'Number of rows do not match: {len(ms_dataset)} != {len(oc_dataset)}'
|
| 206 |
+
|
| 207 |
+
# Randomly sample indices
|
| 208 |
+
sample_indices = random.sample(range(len(ms_dataset)),
|
| 209 |
+
min(sample_size, len(ms_dataset)))
|
| 210 |
+
|
| 211 |
+
for i, idx in enumerate(sample_indices):
|
| 212 |
+
for col in ms_dataset.column_names:
|
| 213 |
+
ms_value = clean_string(str(ms_dataset[col][idx]))
|
| 214 |
+
oc_value = clean_string(str(oc_dataset[col][idx]))
|
| 215 |
+
try:
|
| 216 |
+
assert ms_value == oc_value, f"Value mismatch in column '{col}', index {idx}: {ms_value} != {oc_value}"
|
| 217 |
+
except AssertionError as e:
|
| 218 |
+
print(f"Assertion failed for column '{col}', index {idx}")
|
| 219 |
+
print(f"ms_data: {ms_dataset[idx]}")
|
| 220 |
+
print(f'oc_data: {oc_dataset[idx]}')
|
| 221 |
+
print(f'ms_value: {ms_value} ({type(ms_value)})')
|
| 222 |
+
print(f'oc_value: {oc_value} ({type(oc_value)})')
|
| 223 |
+
raise e
|
| 224 |
+
else:
|
| 225 |
+
raise ValueError(f'Datasets type not supported {type(ms_dataset)}')
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
if __name__ == '__main__':
|
| 229 |
+
sample_size = 100
|
| 230 |
+
unittest.main()
|
tests/dataset/test_ms_datasets.py
ADDED
|
@@ -0,0 +1,226 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
import sys
|
| 3 |
+
import unittest
|
| 4 |
+
import warnings
|
| 5 |
+
from os import environ
|
| 6 |
+
|
| 7 |
+
from datasets import Dataset, DatasetDict
|
| 8 |
+
from mmengine.config import read_base
|
| 9 |
+
from tqdm import tqdm
|
| 10 |
+
|
| 11 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 12 |
+
|
| 13 |
+
warnings.filterwarnings('ignore', category=DeprecationWarning)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def reload_datasets():
|
| 17 |
+
modules_to_remove = [
|
| 18 |
+
module_name for module_name in sys.modules
|
| 19 |
+
if module_name.startswith('configs.datasets')
|
| 20 |
+
]
|
| 21 |
+
|
| 22 |
+
for module_name in modules_to_remove:
|
| 23 |
+
del sys.modules[module_name]
|
| 24 |
+
|
| 25 |
+
with read_base():
|
| 26 |
+
from configs.datasets.ceval.ceval_gen import ceval_datasets
|
| 27 |
+
from configs.datasets.gsm8k.gsm8k_gen import gsm8k_datasets
|
| 28 |
+
from configs.datasets.cmmlu.cmmlu_gen import cmmlu_datasets
|
| 29 |
+
from configs.datasets.ARC_c.ARC_c_gen import ARC_c_datasets
|
| 30 |
+
from configs.datasets.ARC_e.ARC_e_gen import ARC_e_datasets
|
| 31 |
+
from configs.datasets.humaneval.humaneval_gen import humaneval_datasets
|
| 32 |
+
from configs.datasets.humaneval.humaneval_repeat10_gen_8e312c import humaneval_datasets as humaneval_repeat10_datasets
|
| 33 |
+
from configs.datasets.race.race_ppl import race_datasets
|
| 34 |
+
from configs.datasets.commonsenseqa.commonsenseqa_gen import commonsenseqa_datasets
|
| 35 |
+
|
| 36 |
+
from configs.datasets.mmlu.mmlu_gen import mmlu_datasets
|
| 37 |
+
from configs.datasets.bbh.bbh_gen import bbh_datasets
|
| 38 |
+
from configs.datasets.Xsum.Xsum_gen import Xsum_datasets
|
| 39 |
+
from configs.datasets.winogrande.winogrande_gen import winogrande_datasets
|
| 40 |
+
from configs.datasets.winogrande.winogrande_ll import winogrande_datasets as winogrande_ll_datasets
|
| 41 |
+
from configs.datasets.winogrande.winogrande_5shot_ll_252f01 import winogrande_datasets as winogrande_5shot_ll_datasets
|
| 42 |
+
from configs.datasets.obqa.obqa_gen import obqa_datasets
|
| 43 |
+
from configs.datasets.obqa.obqa_ppl_6aac9e import obqa_datasets as obqa_ppl_datasets
|
| 44 |
+
from configs.datasets.agieval.agieval_gen import agieval_datasets as agieval_v2_datasets
|
| 45 |
+
from configs.datasets.agieval.agieval_gen_a0c741 import agieval_datasets as agieval_v1_datasets
|
| 46 |
+
from configs.datasets.siqa.siqa_gen import siqa_datasets as siqa_v2_datasets
|
| 47 |
+
from configs.datasets.siqa.siqa_gen_18632c import siqa_datasets as siqa_v3_datasets
|
| 48 |
+
from configs.datasets.siqa.siqa_ppl_42bc6e import siqa_datasets as siqa_ppl_datasets
|
| 49 |
+
from configs.datasets.storycloze.storycloze_gen import storycloze_datasets
|
| 50 |
+
from configs.datasets.storycloze.storycloze_ppl import storycloze_datasets as storycloze_ppl_datasets
|
| 51 |
+
from configs.datasets.summedits.summedits_gen import summedits_datasets as summedits_v2_datasets
|
| 52 |
+
|
| 53 |
+
from configs.datasets.strategyqa.strategyqa_gen import strategyqa_datasets
|
| 54 |
+
from configs.datasets.mbpp.mbpp_gen import mbpp_datasets as mbpp_v1_datasets
|
| 55 |
+
from configs.datasets.lcsts.lcsts_gen import lcsts_datasets
|
| 56 |
+
|
| 57 |
+
from configs.datasets.hellaswag.hellaswag_gen import hellaswag_datasets as hellaswag_v2_datasets
|
| 58 |
+
from configs.datasets.hellaswag.hellaswag_10shot_gen_e42710 import hellaswag_datasets as hellaswag_ice_datasets
|
| 59 |
+
from configs.datasets.hellaswag.hellaswag_ppl_9dbb12 import hellaswag_datasets as hellaswag_v1_datasets
|
| 60 |
+
from configs.datasets.hellaswag.hellaswag_ppl_a6e128 import hellaswag_datasets as hellaswag_v3_datasets
|
| 61 |
+
from configs.datasets.mbpp.mbpp_passk_gen_830460 import mbpp_datasets as mbpp_v2_datasets
|
| 62 |
+
from configs.datasets.mbpp.sanitized_mbpp_gen_830460 import sanitized_mbpp_datasets
|
| 63 |
+
from configs.datasets.nq.nq_gen import nq_datasets
|
| 64 |
+
from configs.datasets.math.math_gen import math_datasets
|
| 65 |
+
from configs.datasets.piqa.piqa_gen import piqa_datasets as piqa_v2_datasets
|
| 66 |
+
from configs.datasets.piqa.piqa_ppl import piqa_datasets as piqa_v1_datasets
|
| 67 |
+
from configs.datasets.piqa.piqa_ppl_0cfff2 import piqa_datasets as piqa_v3_datasets
|
| 68 |
+
from configs.datasets.lambada.lambada_gen import lambada_datasets
|
| 69 |
+
from configs.datasets.tydiqa.tydiqa_gen import tydiqa_datasets
|
| 70 |
+
from configs.datasets.GaokaoBench.GaokaoBench_gen import GaokaoBench_datasets
|
| 71 |
+
from configs.datasets.GaokaoBench.GaokaoBench_mixed import GaokaoBench_datasets as GaokaoBench_mixed_datasets
|
| 72 |
+
from configs.datasets.GaokaoBench.GaokaoBench_no_subjective_gen_4c31db import GaokaoBench_datasets as GaokaoBench_no_subjective_datasets
|
| 73 |
+
from configs.datasets.triviaqa.triviaqa_gen import triviaqa_datasets
|
| 74 |
+
from configs.datasets.triviaqa.triviaqa_wiki_1shot_gen_20a989 import triviaqa_datasets as triviaqa_wiki_1shot_datasets
|
| 75 |
+
|
| 76 |
+
from configs.datasets.CLUE_cmnli.CLUE_cmnli_gen import cmnli_datasets
|
| 77 |
+
from configs.datasets.CLUE_cmnli.CLUE_cmnli_ppl import cmnli_datasets as cmnli_ppl_datasets
|
| 78 |
+
from configs.datasets.CLUE_ocnli.CLUE_ocnli_gen import ocnli_datasets
|
| 79 |
+
|
| 80 |
+
from configs.datasets.ceval.ceval_clean_ppl import ceval_datasets as ceval_clean_datasets
|
| 81 |
+
from configs.datasets.ARC_c.ARC_c_clean_ppl import ARC_c_datasets as ARC_c_clean_datasets
|
| 82 |
+
from configs.datasets.mmlu.mmlu_clean_ppl import mmlu_datasets as mmlu_clean_datasets
|
| 83 |
+
from configs.datasets.hellaswag.hellaswag_clean_ppl import hellaswag_datasets as hellaswag_clean_datasets
|
| 84 |
+
|
| 85 |
+
return sum((v for k, v in locals().items() if k.endswith('_datasets')), [])
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def load_datasets_conf(source):
|
| 89 |
+
environ['DATASET_SOURCE'] = source
|
| 90 |
+
datasets_conf = reload_datasets()
|
| 91 |
+
return datasets_conf
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def load_datasets(source, conf):
|
| 95 |
+
environ['DATASET_SOURCE'] = source
|
| 96 |
+
if 'lang' in conf:
|
| 97 |
+
dataset = conf['type'].load(path=conf['path'], lang=conf['lang'])
|
| 98 |
+
return dataset
|
| 99 |
+
if 'setting_name' in conf:
|
| 100 |
+
dataset = conf['type'].load(path=conf['path'],
|
| 101 |
+
name=conf['name'],
|
| 102 |
+
setting_name=conf['setting_name'])
|
| 103 |
+
return dataset
|
| 104 |
+
if 'name' in conf:
|
| 105 |
+
dataset = conf['type'].load(path=conf['path'], name=conf['name'])
|
| 106 |
+
return dataset
|
| 107 |
+
try:
|
| 108 |
+
dataset = conf['type'].load(path=conf['path'])
|
| 109 |
+
except Exception as e:
|
| 110 |
+
print(e)
|
| 111 |
+
dataset = conf['type'].load(**conf)
|
| 112 |
+
return dataset
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def clean_string(value):
|
| 116 |
+
"""Helper function to clean and normalize string data.
|
| 117 |
+
|
| 118 |
+
It strips leading and trailing whitespace and replaces multiple whitespace
|
| 119 |
+
characters with a single space.
|
| 120 |
+
"""
|
| 121 |
+
if isinstance(value, str):
|
| 122 |
+
return ' '.join(value.split())
|
| 123 |
+
return value
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
class TestingMsDatasets(unittest.TestCase):
|
| 127 |
+
|
| 128 |
+
def test_datasets(self):
|
| 129 |
+
# 加载 ModelScope 和 Local 数据集配置
|
| 130 |
+
ms_datasets_conf = load_datasets_conf('ModelScope')
|
| 131 |
+
local_datasets_conf = load_datasets_conf('Local')
|
| 132 |
+
|
| 133 |
+
# 初始化成功和失败的数据集列表
|
| 134 |
+
successful_comparisons = []
|
| 135 |
+
failed_comparisons = []
|
| 136 |
+
|
| 137 |
+
def compare_datasets(ms_conf, local_conf):
|
| 138 |
+
modelscope_path_name = f"{ms_conf.get('path')}/{ms_conf.get('name', '')}\t{ms_conf.get('lang', '')}"
|
| 139 |
+
local_path_name = f"{local_conf.get('path')}/{local_conf.get('name', '')}\t{local_conf.get('lang', '')}"
|
| 140 |
+
# 断言类型一致
|
| 141 |
+
assert ms_conf['type'] == local_conf['type'], "Data types do not match"
|
| 142 |
+
print(modelscope_path_name, local_path_name)
|
| 143 |
+
try:
|
| 144 |
+
ms_dataset = load_datasets('ModelScope', ms_conf)
|
| 145 |
+
local_dataset = load_datasets('Local', local_conf)
|
| 146 |
+
_check_data(ms_dataset, local_dataset, sample_size=sample_size)
|
| 147 |
+
return 'success', f'{modelscope_path_name} | {local_path_name}'
|
| 148 |
+
except Exception as exception:
|
| 149 |
+
print(exception)
|
| 150 |
+
return 'failure', f'{modelscope_path_name} is not the same as {local_path_name}'
|
| 151 |
+
|
| 152 |
+
with ThreadPoolExecutor(thread) as executor:
|
| 153 |
+
futures = {
|
| 154 |
+
executor.submit(compare_datasets, ms_conf, local_conf): (ms_conf, local_conf)
|
| 155 |
+
for ms_conf, local_conf in zip(ms_datasets_conf, local_datasets_conf)
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
for future in tqdm(as_completed(futures), total=len(futures)):
|
| 159 |
+
result, message = future.result()
|
| 160 |
+
if result == 'success':
|
| 161 |
+
successful_comparisons.append(message)
|
| 162 |
+
else:
|
| 163 |
+
failed_comparisons.append(message)
|
| 164 |
+
|
| 165 |
+
# 输出测试总结
|
| 166 |
+
total_datasets = len(ms_datasets_conf)
|
| 167 |
+
print(f"All {total_datasets} datasets")
|
| 168 |
+
print(f"OK {len(successful_comparisons)} datasets")
|
| 169 |
+
for success in successful_comparisons:
|
| 170 |
+
print(f" {success}")
|
| 171 |
+
print(f"Fail {len(failed_comparisons)} datasets")
|
| 172 |
+
for failure in failed_comparisons:
|
| 173 |
+
print(f" {failure}")
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def _check_data(ms_dataset: Dataset | DatasetDict,
|
| 177 |
+
oc_dataset: Dataset | DatasetDict,
|
| 178 |
+
sample_size):
|
| 179 |
+
assert type(ms_dataset) == type(
|
| 180 |
+
oc_dataset
|
| 181 |
+
), f'Dataset type not match: {type(ms_dataset)} != {type(oc_dataset)}'
|
| 182 |
+
|
| 183 |
+
# match DatasetDict
|
| 184 |
+
if isinstance(oc_dataset, DatasetDict):
|
| 185 |
+
assert ms_dataset.keys() == oc_dataset.keys(
|
| 186 |
+
), f'DatasetDict not match: {ms_dataset.keys()} != {oc_dataset.keys()}'
|
| 187 |
+
|
| 188 |
+
for key in ms_dataset.keys():
|
| 189 |
+
_check_data(ms_dataset[key], oc_dataset[key], sample_size=sample_size)
|
| 190 |
+
|
| 191 |
+
elif isinstance(oc_dataset, Dataset):
|
| 192 |
+
# match by cols
|
| 193 |
+
assert set(ms_dataset.column_names) == set(
|
| 194 |
+
oc_dataset.column_names
|
| 195 |
+
), f'Column names do not match: {ms_dataset.column_names} != {oc_dataset.column_names}'
|
| 196 |
+
|
| 197 |
+
# Check that the number of rows is the same
|
| 198 |
+
assert len(ms_dataset) == len(
|
| 199 |
+
oc_dataset
|
| 200 |
+
), f'Number of rows do not match: {len(ms_dataset)} != {len(oc_dataset)}'
|
| 201 |
+
|
| 202 |
+
# Randomly sample indices
|
| 203 |
+
sample_indices = random.sample(range(len(ms_dataset)),
|
| 204 |
+
min(sample_size, len(ms_dataset)))
|
| 205 |
+
|
| 206 |
+
for i, idx in enumerate(sample_indices):
|
| 207 |
+
for col in ms_dataset.column_names:
|
| 208 |
+
ms_value = clean_string(str(ms_dataset[col][idx]))
|
| 209 |
+
oc_value = clean_string(str(oc_dataset[col][idx]))
|
| 210 |
+
try:
|
| 211 |
+
assert ms_value == oc_value, f"Value mismatch in column '{col}', index {idx}: {ms_value} != {oc_value}"
|
| 212 |
+
except AssertionError as e:
|
| 213 |
+
print(f"Assertion failed for column '{col}', index {idx}")
|
| 214 |
+
print(f"ms_data: {ms_dataset[idx]}")
|
| 215 |
+
print(f'oc_data: {oc_dataset[idx]}')
|
| 216 |
+
print(f'ms_value: {ms_value} ({type(ms_value)})')
|
| 217 |
+
print(f'oc_value: {oc_value} ({type(oc_value)})')
|
| 218 |
+
raise e
|
| 219 |
+
else:
|
| 220 |
+
raise ValueError(f'Datasets type not supported {type(ms_dataset)}')
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
if __name__ == '__main__':
|
| 224 |
+
sample_size = 100
|
| 225 |
+
thread = 1
|
| 226 |
+
unittest.main()
|
tests/openicl/test_prompt_template.py
ADDED
|
@@ -0,0 +1,249 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import unittest
|
| 2 |
+
|
| 3 |
+
from opencompass.openicl.icl_prompt_template import PromptTemplate
|
| 4 |
+
from opencompass.utils.prompt import PromptList
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class TestPromptTemplate(unittest.TestCase):
|
| 8 |
+
|
| 9 |
+
def setUp(self) -> None:
|
| 10 |
+
self.qa_template = dict(begin=[
|
| 11 |
+
dict(role='SYSTEM', fallback_role='HUMAN', prompt='instruct'),
|
| 12 |
+
'</E>',
|
| 13 |
+
],
|
| 14 |
+
round=[
|
| 15 |
+
dict(role='HUMAN', prompt='{input}'),
|
| 16 |
+
dict(role='BOT', prompt='Answer: {answer}')
|
| 17 |
+
])
|
| 18 |
+
self.multiround_qa_template = dict(round=[
|
| 19 |
+
dict(role='HUMAN', prompt='{input}'),
|
| 20 |
+
dict(role='BOT', prompt='A1', end='\n'),
|
| 21 |
+
dict(role='HUMAN', prompt='Q1'),
|
| 22 |
+
dict(role='BOT', prompt='A2', end='\n\n'),
|
| 23 |
+
dict(role='HUMAN', prompt='Q2', begin='HUMAN:'),
|
| 24 |
+
dict(role='BOT', prompt='Answer: {answer}')
|
| 25 |
+
])
|
| 26 |
+
self.entry = {'input': 'Hello, how are you?', 'answer': 'Good.'}
|
| 27 |
+
|
| 28 |
+
def test_init(self):
|
| 29 |
+
template = 'Translate the following English text to French: {input}.'
|
| 30 |
+
pt = PromptTemplate(template)
|
| 31 |
+
|
| 32 |
+
self.assertEqual(pt.template, template)
|
| 33 |
+
|
| 34 |
+
def test_generate_ice_item(self):
|
| 35 |
+
# Test simple prompt
|
| 36 |
+
template = 'Translate the following English text to French: {input}.'
|
| 37 |
+
pt = PromptTemplate(template)
|
| 38 |
+
label = None
|
| 39 |
+
ice = pt.generate_ice_item(self.entry, label)
|
| 40 |
+
|
| 41 |
+
self.assertEqual(ice,
|
| 42 |
+
('Translate the following English text to French: '
|
| 43 |
+
'Hello, how are you?.'))
|
| 44 |
+
|
| 45 |
+
# test meta prompt style
|
| 46 |
+
pt = PromptTemplate(self.qa_template, ice_token='</E>')
|
| 47 |
+
label = None
|
| 48 |
+
ice = pt.generate_ice_item(self.entry, label)
|
| 49 |
+
|
| 50 |
+
ice_target = PromptList([
|
| 51 |
+
{
|
| 52 |
+
'section': 'ice',
|
| 53 |
+
'pos': 'begin'
|
| 54 |
+
},
|
| 55 |
+
dict(role='HUMAN', prompt='Hello, how are you?'),
|
| 56 |
+
dict(role='BOT', prompt='Answer: Good.'),
|
| 57 |
+
{
|
| 58 |
+
'section': 'ice',
|
| 59 |
+
'pos': 'end'
|
| 60 |
+
},
|
| 61 |
+
])
|
| 62 |
+
self.assertEqual(ice, ice_target)
|
| 63 |
+
|
| 64 |
+
# test_multiround
|
| 65 |
+
pt = PromptTemplate(self.multiround_qa_template, ice_token='</E>')
|
| 66 |
+
label = None
|
| 67 |
+
ice = pt.generate_ice_item(self.entry, label)
|
| 68 |
+
|
| 69 |
+
ice_target = PromptList([
|
| 70 |
+
{
|
| 71 |
+
'section': 'ice',
|
| 72 |
+
'pos': 'begin'
|
| 73 |
+
},
|
| 74 |
+
dict(role='HUMAN', prompt='Hello, how are you?'),
|
| 75 |
+
dict(role='BOT', prompt='A1', end='\n'),
|
| 76 |
+
dict(role='HUMAN', prompt='Q1'),
|
| 77 |
+
dict(role='BOT', prompt='A2', end='\n\n'),
|
| 78 |
+
dict(role='HUMAN', prompt='Q2', begin='HUMAN:'),
|
| 79 |
+
dict(role='BOT', prompt='Answer: Good.'),
|
| 80 |
+
{
|
| 81 |
+
'section': 'ice',
|
| 82 |
+
'pos': 'end'
|
| 83 |
+
},
|
| 84 |
+
])
|
| 85 |
+
self.assertEqual(ice, ice_target)
|
| 86 |
+
|
| 87 |
+
def test_generate_label_prompt_item(self):
|
| 88 |
+
# Test simple prompt
|
| 89 |
+
template = ('</E> Translate the following English text to French: '
|
| 90 |
+
'{input}.')
|
| 91 |
+
pt = PromptTemplate(template, ice_token='</E>')
|
| 92 |
+
ice = 'ICE'
|
| 93 |
+
label = None
|
| 94 |
+
prompt = pt.generate_label_prompt_item(self.entry, ice, label)
|
| 95 |
+
|
| 96 |
+
self.assertEqual(
|
| 97 |
+
prompt, ('ICE Translate the following English text to French: '
|
| 98 |
+
'Hello, how are you?.'))
|
| 99 |
+
|
| 100 |
+
ice = PromptList([
|
| 101 |
+
{
|
| 102 |
+
'section': 'ice',
|
| 103 |
+
'pos': 'begin'
|
| 104 |
+
},
|
| 105 |
+
dict(role='HUMAN', prompt='h1'),
|
| 106 |
+
dict(role='BOT', prompt='b1'),
|
| 107 |
+
{
|
| 108 |
+
'section': 'ice',
|
| 109 |
+
'pos': 'end'
|
| 110 |
+
},
|
| 111 |
+
])
|
| 112 |
+
|
| 113 |
+
# test meta prompt style
|
| 114 |
+
pt = PromptTemplate(self.qa_template, ice_token='</E>')
|
| 115 |
+
label = None
|
| 116 |
+
prompt = pt.generate_label_prompt_item(self.entry, ice, label)
|
| 117 |
+
target = PromptList([
|
| 118 |
+
{
|
| 119 |
+
'section': 'begin',
|
| 120 |
+
'pos': 'begin'
|
| 121 |
+
},
|
| 122 |
+
dict(role='SYSTEM', fallback_role='HUMAN', prompt='instruct'),
|
| 123 |
+
{
|
| 124 |
+
'section': 'ice',
|
| 125 |
+
'pos': 'begin'
|
| 126 |
+
},
|
| 127 |
+
dict(role='HUMAN', prompt='h1'),
|
| 128 |
+
dict(role='BOT', prompt='b1'),
|
| 129 |
+
{
|
| 130 |
+
'section': 'ice',
|
| 131 |
+
'pos': 'end'
|
| 132 |
+
},
|
| 133 |
+
{
|
| 134 |
+
'section': 'begin',
|
| 135 |
+
'pos': 'end'
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
'section': 'round',
|
| 139 |
+
'pos': 'begin'
|
| 140 |
+
},
|
| 141 |
+
dict(role='HUMAN', prompt='Hello, how are you?'),
|
| 142 |
+
dict(role='BOT', prompt='Answer: Good.'),
|
| 143 |
+
{
|
| 144 |
+
'section': 'round',
|
| 145 |
+
'pos': 'end'
|
| 146 |
+
},
|
| 147 |
+
])
|
| 148 |
+
self.assertEqual(prompt, target)
|
| 149 |
+
|
| 150 |
+
# test_multiround
|
| 151 |
+
pt = PromptTemplate(self.multiround_qa_template, ice_token='</E>')
|
| 152 |
+
label = None
|
| 153 |
+
prompt = pt.generate_label_prompt_item(self.entry, ice, label)
|
| 154 |
+
target = PromptList([
|
| 155 |
+
{
|
| 156 |
+
'section': 'round',
|
| 157 |
+
'pos': 'begin'
|
| 158 |
+
},
|
| 159 |
+
dict(role='HUMAN', prompt='Hello, how are you?'),
|
| 160 |
+
dict(role='BOT', prompt='A1', end='\n'),
|
| 161 |
+
dict(role='HUMAN', prompt='Q1'),
|
| 162 |
+
dict(role='BOT', prompt='A2', end='\n\n'),
|
| 163 |
+
dict(role='HUMAN', prompt='Q2', begin='HUMAN:'),
|
| 164 |
+
dict(role='BOT', prompt='Answer: Good.'),
|
| 165 |
+
{
|
| 166 |
+
'section': 'round',
|
| 167 |
+
'pos': 'end'
|
| 168 |
+
},
|
| 169 |
+
])
|
| 170 |
+
self.assertEqual(prompt, target)
|
| 171 |
+
|
| 172 |
+
def test_generate_item(self):
|
| 173 |
+
# Test simple prompt
|
| 174 |
+
template = 'Translate the following English text to French: {input}.'
|
| 175 |
+
pt = PromptTemplate(template)
|
| 176 |
+
item = pt.generate_item(self.entry)
|
| 177 |
+
|
| 178 |
+
self.assertEqual(item,
|
| 179 |
+
('Translate the following English text to French: '
|
| 180 |
+
'Hello, how are you?.'))
|
| 181 |
+
|
| 182 |
+
ice = PromptList([
|
| 183 |
+
{
|
| 184 |
+
'section': 'ice',
|
| 185 |
+
'pos': 'begin'
|
| 186 |
+
},
|
| 187 |
+
dict(role='HUMAN', prompt='h1'),
|
| 188 |
+
dict(role='BOT', prompt='b1'),
|
| 189 |
+
{
|
| 190 |
+
'section': 'ice',
|
| 191 |
+
'pos': 'end'
|
| 192 |
+
},
|
| 193 |
+
])
|
| 194 |
+
|
| 195 |
+
# test meta prompt (without system role)
|
| 196 |
+
pt = PromptTemplate(self.qa_template, ice_token='</E>')
|
| 197 |
+
prompt = pt.generate_item(self.entry, ice_field_replace_token=ice)
|
| 198 |
+
target = PromptList([
|
| 199 |
+
{
|
| 200 |
+
'section': 'begin',
|
| 201 |
+
'pos': 'begin'
|
| 202 |
+
},
|
| 203 |
+
dict(role='SYSTEM', fallback_role='HUMAN', prompt='instruct'),
|
| 204 |
+
{
|
| 205 |
+
'section': 'ice',
|
| 206 |
+
'pos': 'begin'
|
| 207 |
+
},
|
| 208 |
+
dict(role='HUMAN', prompt='h1'),
|
| 209 |
+
dict(role='BOT', prompt='b1'),
|
| 210 |
+
{
|
| 211 |
+
'section': 'ice',
|
| 212 |
+
'pos': 'end'
|
| 213 |
+
},
|
| 214 |
+
{
|
| 215 |
+
'section': 'begin',
|
| 216 |
+
'pos': 'end'
|
| 217 |
+
},
|
| 218 |
+
{
|
| 219 |
+
'section': 'round',
|
| 220 |
+
'pos': 'begin'
|
| 221 |
+
},
|
| 222 |
+
dict(role='HUMAN', prompt='Hello, how are you?'),
|
| 223 |
+
dict(role='BOT', prompt='Answer: Good.'),
|
| 224 |
+
{
|
| 225 |
+
'section': 'round',
|
| 226 |
+
'pos': 'end'
|
| 227 |
+
},
|
| 228 |
+
])
|
| 229 |
+
self.assertEqual(prompt, target)
|
| 230 |
+
|
| 231 |
+
pt = PromptTemplate(self.multiround_qa_template, ice_token='</E>')
|
| 232 |
+
prompt = pt.generate_item(self.entry, ice_field_replace_token=ice)
|
| 233 |
+
target = PromptList([
|
| 234 |
+
{
|
| 235 |
+
'section': 'round',
|
| 236 |
+
'pos': 'begin'
|
| 237 |
+
},
|
| 238 |
+
dict(role='HUMAN', prompt='Hello, how are you?'),
|
| 239 |
+
dict(role='BOT', prompt='A1', end='\n'),
|
| 240 |
+
dict(role='HUMAN', prompt='Q1'),
|
| 241 |
+
dict(role='BOT', prompt='A2', end='\n\n'),
|
| 242 |
+
dict(role='HUMAN', prompt='Q2', begin='HUMAN:'),
|
| 243 |
+
dict(role='BOT', prompt='Answer: Good.'),
|
| 244 |
+
{
|
| 245 |
+
'section': 'round',
|
| 246 |
+
'pos': 'end'
|
| 247 |
+
},
|
| 248 |
+
])
|
| 249 |
+
self.assertEqual(prompt, target)
|
tests/prompt/test_api_template_parser.py
ADDED
|
@@ -0,0 +1,231 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import unittest
|
| 2 |
+
|
| 3 |
+
from opencompass.models.base_api import APITemplateParser
|
| 4 |
+
from opencompass.utils.prompt import PromptList
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class TestAPITemplateParser(unittest.TestCase):
|
| 8 |
+
|
| 9 |
+
def setUp(self):
|
| 10 |
+
self.parser = APITemplateParser()
|
| 11 |
+
self.prompt = PromptList([
|
| 12 |
+
{
|
| 13 |
+
'section': 'begin',
|
| 14 |
+
'pos': 'begin'
|
| 15 |
+
},
|
| 16 |
+
'begin',
|
| 17 |
+
{
|
| 18 |
+
'role': 'SYSTEM',
|
| 19 |
+
'fallback_role': 'HUMAN',
|
| 20 |
+
'prompt': 'system msg'
|
| 21 |
+
},
|
| 22 |
+
{
|
| 23 |
+
'section': 'ice',
|
| 24 |
+
'pos': 'begin'
|
| 25 |
+
},
|
| 26 |
+
{
|
| 27 |
+
'role': 'HUMAN',
|
| 28 |
+
'prompt': 'U0'
|
| 29 |
+
},
|
| 30 |
+
{
|
| 31 |
+
'role': 'BOT',
|
| 32 |
+
'prompt': 'B0'
|
| 33 |
+
},
|
| 34 |
+
{
|
| 35 |
+
'section': 'ice',
|
| 36 |
+
'pos': 'end'
|
| 37 |
+
},
|
| 38 |
+
{
|
| 39 |
+
'section': 'begin',
|
| 40 |
+
'pos': 'end'
|
| 41 |
+
},
|
| 42 |
+
{
|
| 43 |
+
'section': 'round',
|
| 44 |
+
'pos': 'begin'
|
| 45 |
+
},
|
| 46 |
+
{
|
| 47 |
+
'role': 'HUMAN',
|
| 48 |
+
'prompt': 'U1'
|
| 49 |
+
},
|
| 50 |
+
{
|
| 51 |
+
'role': 'BOT',
|
| 52 |
+
'prompt': 'B1'
|
| 53 |
+
},
|
| 54 |
+
{
|
| 55 |
+
'role': 'HUMAN',
|
| 56 |
+
'prompt': 'U2'
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
'role': 'BOT',
|
| 60 |
+
'prompt': 'B2'
|
| 61 |
+
},
|
| 62 |
+
{
|
| 63 |
+
'section': 'round',
|
| 64 |
+
'pos': 'end'
|
| 65 |
+
},
|
| 66 |
+
{
|
| 67 |
+
'section': 'end',
|
| 68 |
+
'pos': 'begin'
|
| 69 |
+
},
|
| 70 |
+
'end',
|
| 71 |
+
{
|
| 72 |
+
'section': 'end',
|
| 73 |
+
'pos': 'end'
|
| 74 |
+
},
|
| 75 |
+
])
|
| 76 |
+
|
| 77 |
+
def test_parse_template_str_input(self):
|
| 78 |
+
prompt = self.parser.parse_template('Hello, world!', mode='gen')
|
| 79 |
+
self.assertEqual(prompt, 'Hello, world!')
|
| 80 |
+
prompt = self.parser.parse_template('Hello, world!', mode='ppl')
|
| 81 |
+
self.assertEqual(prompt, 'Hello, world!')
|
| 82 |
+
|
| 83 |
+
def test_parse_template_list_input(self):
|
| 84 |
+
prompt = self.parser.parse_template(['Hello', 'world'], mode='gen')
|
| 85 |
+
self.assertEqual(prompt, ['Hello', 'world'])
|
| 86 |
+
prompt = self.parser.parse_template(['Hello', 'world'], mode='ppl')
|
| 87 |
+
self.assertEqual(prompt, ['Hello', 'world'])
|
| 88 |
+
|
| 89 |
+
def test_parse_template_PromptList_input_no_meta_template(self):
|
| 90 |
+
prompt = self.parser.parse_template(self.prompt, mode='gen')
|
| 91 |
+
self.assertEqual(prompt,
|
| 92 |
+
'begin\nsystem msg\nU0\nB0\nU1\nB1\nU2\nB2\nend')
|
| 93 |
+
prompt = self.parser.parse_template(self.prompt, mode='ppl')
|
| 94 |
+
self.assertEqual(prompt,
|
| 95 |
+
'begin\nsystem msg\nU0\nB0\nU1\nB1\nU2\nB2\nend')
|
| 96 |
+
|
| 97 |
+
def test_parse_template_PromptList_input_with_meta_template(self):
|
| 98 |
+
parser = APITemplateParser(meta_template=dict(round=[
|
| 99 |
+
dict(role='HUMAN', api_role='HUMAN'),
|
| 100 |
+
dict(role='BOT', api_role='BOT', generate=True)
|
| 101 |
+
], ))
|
| 102 |
+
with self.assertWarns(Warning):
|
| 103 |
+
prompt = parser.parse_template(self.prompt, mode='gen')
|
| 104 |
+
self.assertEqual(
|
| 105 |
+
prompt,
|
| 106 |
+
PromptList([
|
| 107 |
+
{
|
| 108 |
+
'role': 'HUMAN',
|
| 109 |
+
'prompt': 'system msg\nU0'
|
| 110 |
+
},
|
| 111 |
+
{
|
| 112 |
+
'role': 'BOT',
|
| 113 |
+
'prompt': 'B0'
|
| 114 |
+
},
|
| 115 |
+
{
|
| 116 |
+
'role': 'HUMAN',
|
| 117 |
+
'prompt': 'U1'
|
| 118 |
+
},
|
| 119 |
+
{
|
| 120 |
+
'role': 'BOT',
|
| 121 |
+
'prompt': 'B1'
|
| 122 |
+
},
|
| 123 |
+
{
|
| 124 |
+
'role': 'HUMAN',
|
| 125 |
+
'prompt': 'U2'
|
| 126 |
+
},
|
| 127 |
+
]))
|
| 128 |
+
with self.assertWarns(Warning):
|
| 129 |
+
prompt = parser.parse_template(self.prompt, mode='ppl')
|
| 130 |
+
self.assertEqual(
|
| 131 |
+
prompt,
|
| 132 |
+
PromptList([
|
| 133 |
+
{
|
| 134 |
+
'role': 'HUMAN',
|
| 135 |
+
'prompt': 'system msg\nU0'
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
'role': 'BOT',
|
| 139 |
+
'prompt': 'B0'
|
| 140 |
+
},
|
| 141 |
+
{
|
| 142 |
+
'role': 'HUMAN',
|
| 143 |
+
'prompt': 'U1'
|
| 144 |
+
},
|
| 145 |
+
{
|
| 146 |
+
'role': 'BOT',
|
| 147 |
+
'prompt': 'B1'
|
| 148 |
+
},
|
| 149 |
+
{
|
| 150 |
+
'role': 'HUMAN',
|
| 151 |
+
'prompt': 'U2'
|
| 152 |
+
},
|
| 153 |
+
{
|
| 154 |
+
'role': 'BOT',
|
| 155 |
+
'prompt': 'B2'
|
| 156 |
+
},
|
| 157 |
+
]))
|
| 158 |
+
|
| 159 |
+
parser = APITemplateParser(meta_template=dict(
|
| 160 |
+
round=[
|
| 161 |
+
dict(role='HUMAN', api_role='HUMAN'),
|
| 162 |
+
dict(role='BOT', api_role='BOT', generate=True)
|
| 163 |
+
],
|
| 164 |
+
reserved_roles=[
|
| 165 |
+
dict(role='SYSTEM', api_role='SYSTEM'),
|
| 166 |
+
],
|
| 167 |
+
))
|
| 168 |
+
with self.assertWarns(Warning):
|
| 169 |
+
prompt = parser.parse_template(self.prompt, mode='gen')
|
| 170 |
+
self.assertEqual(
|
| 171 |
+
prompt,
|
| 172 |
+
PromptList([
|
| 173 |
+
{
|
| 174 |
+
'role': 'SYSTEM',
|
| 175 |
+
'prompt': 'system msg'
|
| 176 |
+
},
|
| 177 |
+
{
|
| 178 |
+
'role': 'HUMAN',
|
| 179 |
+
'prompt': 'U0'
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
'role': 'BOT',
|
| 183 |
+
'prompt': 'B0'
|
| 184 |
+
},
|
| 185 |
+
{
|
| 186 |
+
'role': 'HUMAN',
|
| 187 |
+
'prompt': 'U1'
|
| 188 |
+
},
|
| 189 |
+
{
|
| 190 |
+
'role': 'BOT',
|
| 191 |
+
'prompt': 'B1'
|
| 192 |
+
},
|
| 193 |
+
{
|
| 194 |
+
'role': 'HUMAN',
|
| 195 |
+
'prompt': 'U2'
|
| 196 |
+
},
|
| 197 |
+
]))
|
| 198 |
+
with self.assertWarns(Warning):
|
| 199 |
+
prompt = parser.parse_template(self.prompt, mode='ppl')
|
| 200 |
+
self.assertEqual(
|
| 201 |
+
prompt,
|
| 202 |
+
PromptList([
|
| 203 |
+
{
|
| 204 |
+
'role': 'SYSTEM',
|
| 205 |
+
'prompt': 'system msg'
|
| 206 |
+
},
|
| 207 |
+
{
|
| 208 |
+
'role': 'HUMAN',
|
| 209 |
+
'prompt': 'U0'
|
| 210 |
+
},
|
| 211 |
+
{
|
| 212 |
+
'role': 'BOT',
|
| 213 |
+
'prompt': 'B0'
|
| 214 |
+
},
|
| 215 |
+
{
|
| 216 |
+
'role': 'HUMAN',
|
| 217 |
+
'prompt': 'U1'
|
| 218 |
+
},
|
| 219 |
+
{
|
| 220 |
+
'role': 'BOT',
|
| 221 |
+
'prompt': 'B1'
|
| 222 |
+
},
|
| 223 |
+
{
|
| 224 |
+
'role': 'HUMAN',
|
| 225 |
+
'prompt': 'U2'
|
| 226 |
+
},
|
| 227 |
+
{
|
| 228 |
+
'role': 'BOT',
|
| 229 |
+
'prompt': 'B2'
|
| 230 |
+
},
|
| 231 |
+
]))
|
tests/prompt/test_lm_template_parser.py
ADDED
|
@@ -0,0 +1,235 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import unittest
|
| 2 |
+
|
| 3 |
+
from opencompass.models.base import LMTemplateParser
|
| 4 |
+
from opencompass.utils.prompt import PromptList
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class TestLMTemplateParser(unittest.TestCase):
|
| 8 |
+
|
| 9 |
+
def setUp(self):
|
| 10 |
+
self.parser = LMTemplateParser()
|
| 11 |
+
self.prompt = PromptList([
|
| 12 |
+
{
|
| 13 |
+
'section': 'begin',
|
| 14 |
+
'pos': 'begin'
|
| 15 |
+
},
|
| 16 |
+
'begin',
|
| 17 |
+
{
|
| 18 |
+
'role': 'SYSTEM',
|
| 19 |
+
'fallback_role': 'HUMAN',
|
| 20 |
+
'prompt': 'system msg'
|
| 21 |
+
},
|
| 22 |
+
{
|
| 23 |
+
'section': 'ice',
|
| 24 |
+
'pos': 'begin'
|
| 25 |
+
},
|
| 26 |
+
{
|
| 27 |
+
'role': 'HUMAN',
|
| 28 |
+
'prompt': 'U0'
|
| 29 |
+
},
|
| 30 |
+
{
|
| 31 |
+
'role': 'BOT',
|
| 32 |
+
'prompt': 'B0'
|
| 33 |
+
},
|
| 34 |
+
{
|
| 35 |
+
'section': 'ice',
|
| 36 |
+
'pos': 'end'
|
| 37 |
+
},
|
| 38 |
+
{
|
| 39 |
+
'section': 'begin',
|
| 40 |
+
'pos': 'end'
|
| 41 |
+
},
|
| 42 |
+
{
|
| 43 |
+
'section': 'round',
|
| 44 |
+
'pos': 'begin'
|
| 45 |
+
},
|
| 46 |
+
{
|
| 47 |
+
'role': 'HUMAN',
|
| 48 |
+
'prompt': 'U1',
|
| 49 |
+
'end': '\n'
|
| 50 |
+
},
|
| 51 |
+
{
|
| 52 |
+
'role': 'BOT',
|
| 53 |
+
'prompt': 'B1'
|
| 54 |
+
},
|
| 55 |
+
{
|
| 56 |
+
'role': 'HUMAN',
|
| 57 |
+
'prompt': 'U2'
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
'role': 'BOT',
|
| 61 |
+
'prompt': 'B2'
|
| 62 |
+
},
|
| 63 |
+
{
|
| 64 |
+
'section': 'round',
|
| 65 |
+
'pos': 'end'
|
| 66 |
+
},
|
| 67 |
+
{
|
| 68 |
+
'section': 'end',
|
| 69 |
+
'pos': 'begin'
|
| 70 |
+
},
|
| 71 |
+
'end',
|
| 72 |
+
{
|
| 73 |
+
'section': 'end',
|
| 74 |
+
'pos': 'end'
|
| 75 |
+
},
|
| 76 |
+
])
|
| 77 |
+
|
| 78 |
+
def test_parse_template_str_input(self):
|
| 79 |
+
prompt = self.parser.parse_template('Hello, world!', mode='gen')
|
| 80 |
+
self.assertEqual(prompt, 'Hello, world!')
|
| 81 |
+
prompt = self.parser.parse_template('Hello, world!', mode='ppl')
|
| 82 |
+
self.assertEqual(prompt, 'Hello, world!')
|
| 83 |
+
|
| 84 |
+
def test_parse_template_list_input(self):
|
| 85 |
+
prompt = self.parser.parse_template(['Hello', 'world'], mode='gen')
|
| 86 |
+
self.assertEqual(prompt, ['Hello', 'world'])
|
| 87 |
+
prompt = self.parser.parse_template(['Hello', 'world'], mode='ppl')
|
| 88 |
+
self.assertEqual(prompt, ['Hello', 'world'])
|
| 89 |
+
|
| 90 |
+
def test_parse_template_PromptList_input_no_meta_template(self):
|
| 91 |
+
prompt = self.parser.parse_template(self.prompt, mode='gen')
|
| 92 |
+
self.assertEqual(prompt,
|
| 93 |
+
'begin\nsystem msg\nU0\nB0\nU1\nB1\nU2\nB2\nend')
|
| 94 |
+
prompt = self.parser.parse_template(self.prompt, mode='ppl')
|
| 95 |
+
self.assertEqual(prompt,
|
| 96 |
+
'begin\nsystem msg\nU0\nB0\nU1\nB1\nU2\nB2\nend')
|
| 97 |
+
|
| 98 |
+
def test_parse_template_PromptList_input_with_meta_template(self):
|
| 99 |
+
# no SYSTEM role, early generation in THOUGHTS
|
| 100 |
+
parser = LMTemplateParser(meta_template=dict(
|
| 101 |
+
begin='meta instruction\n',
|
| 102 |
+
round=[
|
| 103 |
+
dict(role='HUMAN', begin='<|HUMAN|>:', end='<eoh>\n'),
|
| 104 |
+
dict(role='THOUGHTS',
|
| 105 |
+
begin='<|Inner Thoughts|>:',
|
| 106 |
+
generate=True,
|
| 107 |
+
end='<eot>\n',
|
| 108 |
+
prompt='None'),
|
| 109 |
+
dict(role='BOT', begin='<|BOT|>:', end='<eob>\n'),
|
| 110 |
+
],
|
| 111 |
+
end='meta end',
|
| 112 |
+
))
|
| 113 |
+
prompt = parser.parse_template(self.prompt, mode='gen')
|
| 114 |
+
target = ('meta instruction\n'
|
| 115 |
+
'begin'
|
| 116 |
+
'<|HUMAN|>:system msg<eoh>\n'
|
| 117 |
+
'<|HUMAN|>:U0<eoh>\n'
|
| 118 |
+
'<|Inner Thoughts|>:None<eot>\n'
|
| 119 |
+
'<|BOT|>:B0<eob>\n'
|
| 120 |
+
'<|HUMAN|>:U1\n'
|
| 121 |
+
'<|Inner Thoughts|>:None<eot>\n'
|
| 122 |
+
'<|BOT|>:B1<eob>\n'
|
| 123 |
+
'<|HUMAN|>:U2<eoh>\n'
|
| 124 |
+
'<|Inner Thoughts|>:')
|
| 125 |
+
self.assertEqual(prompt, target)
|
| 126 |
+
prompt = parser.parse_template(self.prompt, mode='ppl')
|
| 127 |
+
target = ('meta instruction\n'
|
| 128 |
+
'begin'
|
| 129 |
+
'<|HUMAN|>:system msg<eoh>\n'
|
| 130 |
+
'<|HUMAN|>:U0<eoh>\n'
|
| 131 |
+
'<|Inner Thoughts|>:None<eot>\n'
|
| 132 |
+
'<|BOT|>:B0<eob>\n'
|
| 133 |
+
'<|HUMAN|>:U1\n'
|
| 134 |
+
'<|Inner Thoughts|>:None<eot>\n'
|
| 135 |
+
'<|BOT|>:B1<eob>\n'
|
| 136 |
+
'<|HUMAN|>:U2<eoh>\n'
|
| 137 |
+
'<|Inner Thoughts|>:None<eot>\n'
|
| 138 |
+
'<|BOT|>:B2<eob>\n'
|
| 139 |
+
'end'
|
| 140 |
+
'meta end')
|
| 141 |
+
self.assertEqual(prompt, target)
|
| 142 |
+
|
| 143 |
+
# no SYSTEM role, generation in BOT
|
| 144 |
+
parser = LMTemplateParser(meta_template=dict(
|
| 145 |
+
begin='meta instruction\n',
|
| 146 |
+
round=[
|
| 147 |
+
dict(role='HUMAN', begin='<|HUMAN|>:', end='<eoh>\n'),
|
| 148 |
+
dict(role='THOUGHTS',
|
| 149 |
+
begin='<|Inner Thoughts|>:',
|
| 150 |
+
end='<eot>\n',
|
| 151 |
+
prompt='None'),
|
| 152 |
+
dict(
|
| 153 |
+
role='BOT', begin='<|BOT|>:', end='<eob>\n',
|
| 154 |
+
generate=True),
|
| 155 |
+
],
|
| 156 |
+
end='meta end',
|
| 157 |
+
))
|
| 158 |
+
prompt = parser.parse_template(self.prompt, mode='gen')
|
| 159 |
+
target = ('meta instruction\n'
|
| 160 |
+
'begin'
|
| 161 |
+
'<|HUMAN|>:system msg<eoh>\n'
|
| 162 |
+
'<|HUMAN|>:U0<eoh>\n'
|
| 163 |
+
'<|Inner Thoughts|>:None<eot>\n'
|
| 164 |
+
'<|BOT|>:B0<eob>\n'
|
| 165 |
+
'<|HUMAN|>:U1\n'
|
| 166 |
+
'<|Inner Thoughts|>:None<eot>\n'
|
| 167 |
+
'<|BOT|>:B1<eob>\n'
|
| 168 |
+
'<|HUMAN|>:U2<eoh>\n'
|
| 169 |
+
'<|Inner Thoughts|>:None<eot>\n'
|
| 170 |
+
'<|BOT|>:')
|
| 171 |
+
self.assertEqual(prompt, target)
|
| 172 |
+
prompt = parser.parse_template(self.prompt, mode='ppl')
|
| 173 |
+
target = ('meta instruction\n'
|
| 174 |
+
'begin'
|
| 175 |
+
'<|HUMAN|>:system msg<eoh>\n'
|
| 176 |
+
'<|HUMAN|>:U0<eoh>\n'
|
| 177 |
+
'<|Inner Thoughts|>:None<eot>\n'
|
| 178 |
+
'<|BOT|>:B0<eob>\n'
|
| 179 |
+
'<|HUMAN|>:U1\n'
|
| 180 |
+
'<|Inner Thoughts|>:None<eot>\n'
|
| 181 |
+
'<|BOT|>:B1<eob>\n'
|
| 182 |
+
'<|HUMAN|>:U2<eoh>\n'
|
| 183 |
+
'<|Inner Thoughts|>:None<eot>\n'
|
| 184 |
+
'<|BOT|>:B2<eob>\n'
|
| 185 |
+
'end'
|
| 186 |
+
'meta end')
|
| 187 |
+
self.assertEqual(prompt, target)
|
| 188 |
+
|
| 189 |
+
# with SYSTEM role, generation in BOT
|
| 190 |
+
parser = LMTemplateParser(meta_template=dict(
|
| 191 |
+
begin='meta instruction\n',
|
| 192 |
+
round=[
|
| 193 |
+
dict(role='HUMAN', begin='<|HUMAN|>:', end='<eoh>\n'),
|
| 194 |
+
dict(role='THOUGHTS',
|
| 195 |
+
begin='<|Inner Thoughts|>:',
|
| 196 |
+
end='<eot>\n',
|
| 197 |
+
prompt='None'),
|
| 198 |
+
dict(
|
| 199 |
+
role='BOT', begin='<|BOT|>:', end='<eob>\n',
|
| 200 |
+
generate=True),
|
| 201 |
+
],
|
| 202 |
+
end='meta end',
|
| 203 |
+
reserved_roles=[
|
| 204 |
+
dict(role='SYSTEM', begin='<|SYSTEM|>:', end='<eos>\n')
|
| 205 |
+
]))
|
| 206 |
+
prompt = parser.parse_template(self.prompt, mode='gen')
|
| 207 |
+
target = ('meta instruction\n'
|
| 208 |
+
'begin'
|
| 209 |
+
'<|SYSTEM|>:system msg<eos>\n'
|
| 210 |
+
'<|HUMAN|>:U0<eoh>\n'
|
| 211 |
+
'<|Inner Thoughts|>:None<eot>\n'
|
| 212 |
+
'<|BOT|>:B0<eob>\n'
|
| 213 |
+
'<|HUMAN|>:U1\n'
|
| 214 |
+
'<|Inner Thoughts|>:None<eot>\n'
|
| 215 |
+
'<|BOT|>:B1<eob>\n'
|
| 216 |
+
'<|HUMAN|>:U2<eoh>\n'
|
| 217 |
+
'<|Inner Thoughts|>:None<eot>\n'
|
| 218 |
+
'<|BOT|>:')
|
| 219 |
+
self.assertEqual(prompt, target)
|
| 220 |
+
prompt = parser.parse_template(self.prompt, mode='ppl')
|
| 221 |
+
target = ('meta instruction\n'
|
| 222 |
+
'begin'
|
| 223 |
+
'<|SYSTEM|>:system msg<eos>\n'
|
| 224 |
+
'<|HUMAN|>:U0<eoh>\n'
|
| 225 |
+
'<|Inner Thoughts|>:None<eot>\n'
|
| 226 |
+
'<|BOT|>:B0<eob>\n'
|
| 227 |
+
'<|HUMAN|>:U1\n'
|
| 228 |
+
'<|Inner Thoughts|>:None<eot>\n'
|
| 229 |
+
'<|BOT|>:B1<eob>\n'
|
| 230 |
+
'<|HUMAN|>:U2<eoh>\n'
|
| 231 |
+
'<|Inner Thoughts|>:None<eot>\n'
|
| 232 |
+
'<|BOT|>:B2<eob>\n'
|
| 233 |
+
'end'
|
| 234 |
+
'meta end')
|
| 235 |
+
self.assertEqual(prompt, target)
|
tests/prompt/test_prompt_list.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import unittest
|
| 2 |
+
|
| 3 |
+
from opencompass.utils.prompt import PromptList
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class TestPromptList(unittest.TestCase):
|
| 7 |
+
|
| 8 |
+
def test_initialization(self):
|
| 9 |
+
pl = PromptList()
|
| 10 |
+
self.assertEqual(pl, [])
|
| 11 |
+
|
| 12 |
+
pl = PromptList(['test', '123'])
|
| 13 |
+
self.assertEqual(pl, ['test', '123'])
|
| 14 |
+
|
| 15 |
+
def test_format(self):
|
| 16 |
+
pl = PromptList(['hi {a}{b}', {'prompt': 'hey {a}!'}, '123'])
|
| 17 |
+
new_pl = pl.format(a=1, b=2, c=3)
|
| 18 |
+
self.assertEqual(new_pl, ['hi 12', {'prompt': 'hey 1!'}, '123'])
|
| 19 |
+
|
| 20 |
+
new_pl = pl.format(b=2)
|
| 21 |
+
self.assertEqual(new_pl, ['hi {a}2', {'prompt': 'hey {a}!'}, '123'])
|
| 22 |
+
|
| 23 |
+
new_pl = pl.format(d=1)
|
| 24 |
+
self.assertEqual(new_pl, ['hi {a}{b}', {'prompt': 'hey {a}!'}, '123'])
|
| 25 |
+
|
| 26 |
+
def test_replace(self):
|
| 27 |
+
pl = PromptList(['hello world', {'prompt': 'hello world'}, '123'])
|
| 28 |
+
new_pl = pl.replace('world', 'there')
|
| 29 |
+
self.assertEqual(new_pl,
|
| 30 |
+
['hello there', {
|
| 31 |
+
'prompt': 'hello there'
|
| 32 |
+
}, '123'])
|
| 33 |
+
|
| 34 |
+
new_pl = pl.replace('123', PromptList(['p', {'role': 'BOT'}]))
|
| 35 |
+
self.assertEqual(
|
| 36 |
+
new_pl,
|
| 37 |
+
['hello world', {
|
| 38 |
+
'prompt': 'hello world'
|
| 39 |
+
}, 'p', {
|
| 40 |
+
'role': 'BOT'
|
| 41 |
+
}])
|
| 42 |
+
|
| 43 |
+
new_pl = pl.replace('2', PromptList(['p', {'role': 'BOT'}]))
|
| 44 |
+
self.assertEqual(new_pl, [
|
| 45 |
+
'hello world', {
|
| 46 |
+
'prompt': 'hello world'
|
| 47 |
+
}, '1', 'p', {
|
| 48 |
+
'role': 'BOT'
|
| 49 |
+
}, '3'
|
| 50 |
+
])
|
| 51 |
+
|
| 52 |
+
with self.assertRaises(TypeError):
|
| 53 |
+
new_pl = pl.replace('world', PromptList(['new', 'world']))
|
| 54 |
+
|
| 55 |
+
def test_add(self):
|
| 56 |
+
pl = PromptList(['hello'])
|
| 57 |
+
new_pl = pl + ' world'
|
| 58 |
+
self.assertEqual(new_pl, ['hello', ' world'])
|
| 59 |
+
|
| 60 |
+
pl2 = PromptList([' world'])
|
| 61 |
+
new_pl = pl + pl2
|
| 62 |
+
self.assertEqual(new_pl, ['hello', ' world'])
|
| 63 |
+
|
| 64 |
+
new_pl = 'hi, ' + pl
|
| 65 |
+
self.assertEqual(new_pl, ['hi, ', 'hello'])
|
| 66 |
+
|
| 67 |
+
pl += '!'
|
| 68 |
+
self.assertEqual(pl, ['hello', '!'])
|
| 69 |
+
|
| 70 |
+
def test_str(self):
|
| 71 |
+
pl = PromptList(['hello', ' world', {'prompt': '!'}])
|
| 72 |
+
self.assertEqual(str(pl), 'hello world!')
|
| 73 |
+
|
| 74 |
+
with self.assertRaises(TypeError):
|
| 75 |
+
pl = PromptList(['hello', ' world', 123])
|
| 76 |
+
str(pl)
|