File size: 9,284 Bytes
24c2665 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 |
"""
Benchmark Problem Loader
AZR ๊ธฐ๋ฐ TestTime RLVR์ ์ํ ๋ฒค์น๋งํฌ ๋ฌธ์ ๋ก๋ฉ ์์คํ
๊ธฐ์กด Test-Time-RLVR์ load_humaneval_problem ํจ์๋ฅผ ํ์ฅ
"""
import json
import os
from typing import Dict, List, Any, Tuple, Optional
from pathlib import Path
from .config import BenchmarkConfig, TestTimeConfig
from .logger import TestTimeLogger
class BenchmarkProblemLoader:
"""๋ฒค์น๋งํฌ ๋ฌธ์ ๋ก๋ฉ ๋ฐ ๊ด๋ฆฌ (EvalPlus ํ์ค ๋ฐฉ์ ์ฌ์ฉ)"""
def __init__(self, config: TestTimeConfig, logger: Optional[TestTimeLogger] = None):
self.config = config
self.logger = logger or TestTimeLogger()
self.loaded_problems = {} # ์บ์
self.evalplus_cache = {} # EvalPlus ๋ฐ์ดํฐ ์บ์
def _load_evalplus_data(self, benchmark_name: str) -> Dict[str, Dict[str, Any]]:
"""EvalPlus ๋ฐ์ดํฐ ๋ก๋ ๋ฐ ์บ์"""
if benchmark_name in self.evalplus_cache:
return self.evalplus_cache[benchmark_name]
try:
if benchmark_name == 'mbpp':
from evalplus.data.mbpp import get_mbpp_plus
problems = get_mbpp_plus() # ์๋์ผ๋ก mbpp_deserialize_inputs ์ ์ฉ๋จ
self.logger.log_info(f"โ
MBPP+ EvalPlus ๋ฐ์ดํฐ ๋ก๋ ์ฑ๊ณต: {len(problems)}๊ฐ ๋ฌธ์ ")
elif benchmark_name == 'humaneval':
from evalplus.data.humaneval import get_human_eval_plus
problems = get_human_eval_plus() # EvalPlus ํ์ค ๋ฐฉ์
self.logger.log_info(f"โ
HumanEval+ EvalPlus ๋ฐ์ดํฐ ๋ก๋ ์ฑ๊ณต: {len(problems)}๊ฐ ๋ฌธ์ ")
else:
raise ValueError(f"Unsupported benchmark for EvalPlus: {benchmark_name}")
self.evalplus_cache[benchmark_name] = problems
return problems
except Exception as e:
self.logger.log_error(f"โ {benchmark_name.upper()}+ EvalPlus ๋ก๋ฉ ์คํจ: {e}")
return {}
def load_problem(self, benchmark_config: BenchmarkConfig, problem_id: str) -> Dict[str, Any]:
"""ํน์ ๋ฒค์น๋งํฌ ๋ฌธ์ ๋ก๋ (EvalPlus ํ์ค ๋ฐฉ์ ์ฐ์ ์ฌ์ฉ)"""
cache_key = f"{benchmark_config.name}_{problem_id}"
if cache_key in self.loaded_problems:
return self.loaded_problems[cache_key]
# EvalPlus ๋ฐฉ์ ์๋
if benchmark_config.name in ['mbpp', 'humaneval']:
evalplus_problems = self._load_evalplus_data(benchmark_config.name)
if problem_id in evalplus_problems:
problem = evalplus_problems[problem_id].copy()
# ์ถ๊ฐ ๋ฉํ๋ฐ์ดํฐ ์ค์
problem['benchmark_name'] = benchmark_config.name
problem['benchmark_config'] = benchmark_config
# ์บ์์ ์ ์ฅ
self.loaded_problems[cache_key] = problem
self.logger.log_info(f"โ
Problem loaded: {problem_id} from {benchmark_config.name} (EvalPlus)")
return problem
# Fallback: ๊ธฐ์กด ๋ฐฉ์
self.logger.log_info(f"โ ๏ธ {problem_id} EvalPlus ๋ก๋ฉ ์คํจ, ๊ธฐ์กด ๋ฐฉ์ ์ฌ์ฉ")
problem_file = benchmark_config.data_path
# ํ์ผ ์กด์ฌ ํ์ธ
if not os.path.exists(problem_file):
raise FileNotFoundError(f"Benchmark file not found: {problem_file}")
# JSONL ํ์ผ ๋ก๋ (๊ธฐ์กด ๋ฐฉ์๊ณผ ๋์ผ)
with open(problem_file, 'r', encoding='utf-8') as f:
problems = [json.loads(line) for line in f]
# ๋ฌธ์ ID๋ก ๊ฒ์
for problem in problems:
if problem['task_id'] == problem_id:
# ์ถ๊ฐ ๋ฉํ๋ฐ์ดํฐ ์ค์
problem['benchmark_name'] = benchmark_config.name
problem['benchmark_config'] = benchmark_config
# ์บ์์ ์ ์ฅ
self.loaded_problems[cache_key] = problem
self.logger.log_info(f"โ
Problem loaded: {problem_id} from {benchmark_config.name} (Original)")
return problem
raise ValueError(f"Problem {problem_id} not found in {problem_file}")
def load_problem_batch(self, benchmark_config: BenchmarkConfig,
problem_ids: List[str]) -> List[Dict[str, Any]]:
"""์ฌ๋ฌ ๋ฌธ์ ๋ฐฐ์น ๋ก๋ฉ"""
problems = []
for problem_id in problem_ids:
try:
problem = self.load_problem(benchmark_config, problem_id)
problems.append(problem)
except Exception as e:
self.logger.log_error(f"Failed to load {problem_id}: {e}")
return problems
def get_test_cases(self, problem: Dict[str, Any]) -> List[Tuple[str, str]]:
"""๋ฌธ์ ์์ ํ
์คํธ ์ผ์ด์ค ์ถ์ถ"""
test_cases = []
# ๊ธฐ๋ณธ ํ
์คํธ ์ผ์ด์ค (test ํ๋)
if 'test' in problem:
test_code = problem['test']
# assert ๋ฌธ์์ ์
๋ ฅ-์ถ๋ ฅ ์ ์ถ์ถ
test_cases.extend(self._parse_assert_statements(test_code))
# Plus ํ
์คํธ ์ผ์ด์ค (plus_input, plus_output)
if 'plus_input' in problem and 'plus_output' in problem:
plus_inputs = problem['plus_input']
plus_outputs = problem['plus_output']
if isinstance(plus_inputs, str):
plus_inputs = json.loads(plus_inputs)
if isinstance(plus_outputs, str):
plus_outputs = json.loads(plus_outputs)
for inp, out in zip(plus_inputs, plus_outputs):
test_cases.append((str(inp), str(out)))
return test_cases
def _parse_assert_statements(self, test_code: str) -> List[Tuple[str, str]]:
"""assert ๋ฌธ์์ ์
๋ ฅ-์ถ๋ ฅ ์ ์ถ์ถ"""
import re
test_cases = []
lines = test_code.strip().split('\n')
for line in lines:
line = line.strip()
if line.startswith('assert '):
# assert function(args) == expected ํํ ํ์ฑ
match = re.match(r'assert\s+(\w+)\(([^)]*)\)\s*==\s*(.+)', line)
if match:
func_name, args, expected = match.groups()
test_cases.append((args.strip(), expected.strip()))
return test_cases
def validate_solution(self, problem: Dict[str, Any], solution: str) -> Dict[str, Any]:
"""์๋ฃจ์
๊ฒ์ฆ (AZR Python Executor ์ฌ์ฉ ์์ )"""
validation_result = {
'problem_id': problem['task_id'],
'solution': solution,
'syntax_valid': False,
'test_results': [],
'overall_success': False,
'error_message': None
}
try:
# 1. ๊ตฌ๋ฌธ ๊ฒ์ฆ
compile(solution, '<string>', 'exec')
validation_result['syntax_valid'] = True
# 2. ํ
์คํธ ์ผ์ด์ค ์คํ (ํฅํ AZR Python Executor ์ฐ๋)
test_cases = self.get_test_cases(problem)
validation_result['test_results'] = [
{'input': inp, 'expected': out, 'passed': False}
for inp, out in test_cases
]
# ์์: ๊ตฌ๋ฌธ๋ง ํต๊ณผํ๋ฉด ์ฑ๊ณต์ผ๋ก ์ฒ๋ฆฌ
validation_result['overall_success'] = True
except SyntaxError as e:
validation_result['error_message'] = f"Syntax Error: {e}"
except Exception as e:
validation_result['error_message'] = f"Validation Error: {e}"
return validation_result
def get_sequential_problems(self, benchmark_config: BenchmarkConfig,
num_problems: int) -> List[Dict[str, Any]]:
"""์์ฐจ์ ์ผ๋ก N๊ฐ ๋ฌธ์ ๋ก๋"""
problems = []
for i in range(num_problems):
problem_index = benchmark_config.start_index + i
problem_id = f"{benchmark_config.problem_prefix}/{problem_index}"
try:
problem = self.load_problem(benchmark_config, problem_id)
problems.append(problem)
except Exception as e:
self.logger.log_error(f"Failed to load {problem_id}: {e}")
continue
return problems
def get_problem_statistics(self, benchmark_config: BenchmarkConfig) -> Dict[str, Any]:
"""๋ฒค์น๋งํฌ ํต๊ณ ์ ๋ณด"""
problem_file = benchmark_config.data_path
if not os.path.exists(problem_file):
return {"error": f"File not found: {problem_file}"}
with open(problem_file, 'r', encoding='utf-8') as f:
problems = [json.loads(line) for line in f]
stats = {
'total_problems': len(problems),
'benchmark_name': benchmark_config.name,
'data_file': problem_file,
'sample_problem_ids': [p['task_id'] for p in problems[:5]]
}
return stats |