zeju-0727 commited on
Commit
e496ccf
·
verified ·
1 Parent(s): bfb2b39

Upload dyve_tts/eval/new_eval_efficient.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. dyve_tts/eval/new_eval_efficient.py +178 -0
dyve_tts/eval/new_eval_efficient.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import asyncio
3
+ import aiofiles
4
+ from tqdm import tqdm
5
+ import os
6
+ import argparse
7
+ from openai import AsyncOpenAI
8
+ from math_verify import parse
9
+ from evaluate import load
10
+
11
+ math = load("competition_math")
12
+
13
+
14
+ async def generate_single_answer(client, question: str, model_name: str) -> str:
15
+ """Generate a single answer for a question using the language model."""
16
+
17
+ problem = question
18
+
19
+ prompt = f"""
20
+ The following is a math problem:
21
+
22
+ [Math Problem]
23
+
24
+ {problem}
25
+
26
+ Your task is to solve it step by step.
27
+
28
+ In your thinking, when you think you just get the result of one complete solution,
29
+ just stop (do not neec to self-check or rethink).
30
+ """
31
+
32
+ # Please put your final answer (i.e., the index) in \\boxed{{}}.
33
+ try:
34
+ response = await client.chat.completions.create(
35
+ model=model_name,
36
+ messages=[
37
+ {"role": "user", "content": prompt}
38
+ ],
39
+ max_tokens=8192,
40
+ temperature=0.6,
41
+ top_p=0.95,
42
+ n=1
43
+ )
44
+ return response.choices[0].message.content.strip()
45
+ except Exception as e:
46
+ print(f"Error in generate_single_answer: {str(e)}")
47
+ return None
48
+
49
+
50
+ async def evaluate_single_problem(
51
+ prob: dict,
52
+ client: AsyncOpenAI,
53
+ model_name: str,
54
+ sem: asyncio.Semaphore
55
+ ) -> dict:
56
+ async with sem:
57
+ try:
58
+ print("Evaluating problem: {}".format(prob["question"]))
59
+
60
+ # Generate single answer
61
+ answer = await generate_single_answer(client, prob["question"], model_name)
62
+ if answer is None:
63
+ return None
64
+
65
+ # Extract answer and check correctness
66
+ extracted_ans = parse(answer)
67
+ pass_at_1 = 1 if math.compute(references=[prob["expected_answer"]], predictions=[extracted_ans])["accuracy"] > 0.99 else 0
68
+
69
+ print("------------------------------------------------------------")
70
+ print("Question:", prob["question"])
71
+ print("Expected answer:", prob["expected_answer"])
72
+ print("Generated answer:", answer)
73
+ print("Pass@1:", pass_at_1)
74
+
75
+ result = {
76
+ "question": prob["question"],
77
+ "expected_answer": prob["expected_answer"],
78
+ "generated_answer": answer,
79
+ "pass@1": pass_at_1
80
+ }
81
+ return result
82
+ except Exception as e:
83
+ print(f"Error in evaluate_single_problem: {str(e)}")
84
+ return None
85
+
86
+
87
+ async def save_results_async(output_file: str, data: dict):
88
+ async with aiofiles.open(output_file, 'a') as f:
89
+ await f.write(json.dumps(data) + '\n')
90
+
91
+
92
+ async def main(debug: bool = False, resume: bool = False):
93
+ # Initialize the AsyncOpenAI client
94
+ client = AsyncOpenAI(
95
+ base_url="http://localhost:8014/v1",
96
+ api_key="token-abc123"
97
+ )
98
+
99
+ model_name = "DeepSeek-R1-Distill-Qwen-14B"
100
+
101
+ # Load problems from test500.jsonl
102
+ problems = []
103
+ with open('./test500.jsonl', 'r') as f:
104
+ for line in f:
105
+ problem = json.loads(line)
106
+ problems.append({
107
+ 'question': problem['problem'],
108
+ 'expected_answer': problem['answer']
109
+ })
110
+
111
+ # If debug flag is active, only evaluate the first 50 problems
112
+ if debug:
113
+ problems = problems[:50]
114
+ print("DEBUG MODE: processing only the first 50 problems.")
115
+
116
+ # If resume flag is active, skip already evaluated problems
117
+ output_file = "pass_at_1_simple_results.jsonl"
118
+ if resume:
119
+ if os.path.exists(output_file):
120
+ # Deduplicate the results file
121
+ dedup = {}
122
+ with open(output_file, 'r') as res_file:
123
+ for line in res_file:
124
+ if line.strip():
125
+ try:
126
+ rec = json.loads(line)
127
+ question = rec.get("question")
128
+ if question is not None:
129
+ dedup[question] = rec
130
+ except Exception as e:
131
+ continue
132
+
133
+ # Write deduplicated results back to the file
134
+ with open(output_file, 'w') as res_file:
135
+ for rec in dedup.values():
136
+ res_file.write(json.dumps(rec) + "\n")
137
+
138
+ evaluated_questions = set(dedup.keys())
139
+ original_count = len(problems)
140
+ problems = [p for p in problems if p["question"] not in evaluated_questions]
141
+ skipped = original_count - len(problems)
142
+ print(f"Resuming evaluation: Skipping {skipped} already evaluated problems.")
143
+ else:
144
+ print("No previous evaluation results found. Starting from scratch.")
145
+
146
+ # Create a semaphore to limit concurrent tasks
147
+ sem = asyncio.Semaphore(30) # Adjust the number based on your needs
148
+
149
+ # Create tasks for each problem
150
+ tasks = [
151
+ asyncio.create_task(evaluate_single_problem(prob, client, model_name, sem))
152
+ for prob in problems
153
+ ]
154
+
155
+ results = []
156
+ # Use as_completed to update progress with tqdm
157
+ for future in tqdm(asyncio.as_completed(tasks), total=len(tasks), desc='Processing problems'):
158
+ result = await future
159
+ if result is not None:
160
+ results.append(result)
161
+ # Save result immediately
162
+ await save_results_async(output_file, result)
163
+
164
+ if results:
165
+ total_pass_at_1 = sum(result["pass@1"] for result in results)
166
+ pass_at_1_rate = total_pass_at_1 / len(results) * 100
167
+ print(f"\nFinal Pass@1 Rate: {pass_at_1_rate:.2f}%")
168
+
169
+ print(f"Evaluation complete. Processed {len(results)} problems successfully.")
170
+ print(f"Results saved to {output_file}")
171
+
172
+
173
+ if __name__ == "__main__":
174
+ parser = argparse.ArgumentParser()
175
+ parser.add_argument("--debug", action="store_true", help="Run in debug mode (only evaluate the first 50 problems)")
176
+ parser.add_argument("--resume", action="store_true", help="Resume evaluation by skipping already evaluated problems")
177
+ args = parser.parse_args()
178
+ asyncio.run(main(debug=args.debug, resume=args.resume))