zeju-0727 commited on
Commit
1865bbd
·
verified ·
1 Parent(s): e496ccf

Upload dyve_tts/eval/evaluate_majority_vote.py with huggingface_hub

Browse files
dyve_tts/eval/evaluate_majority_vote.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import asyncio
3
+ import aiofiles
4
+ from tqdm import tqdm
5
+ import os
6
+ import argparse
7
+ from openai import AsyncOpenAI
8
+ from math_verify import parse
9
+ from evaluate import load
10
+ from collections import Counter
11
+
12
+ math = load("competition_math")
13
+
14
+
15
+ async def generate_k_answers(client, question: str, model_name: str, k: int = 5) -> list:
16
+ """Generate k answers for a question using the language model."""
17
+
18
+ problem = question
19
+
20
+ prompt = f"""
21
+ The following is a math problem:
22
+
23
+ [Math Problem]
24
+
25
+ {problem}
26
+
27
+ Your task is to solve it step by step.
28
+ """
29
+
30
+
31
+ try:
32
+ response = await client.chat.completions.create(
33
+ model=model_name,
34
+ messages=[
35
+ {"role": "user", "content": prompt}
36
+ ],
37
+ max_tokens=8192,
38
+ temperature=0.6,
39
+ top_p=0.95,
40
+ n=k
41
+ )
42
+ return [choice.message.content.strip() for choice in response.choices]
43
+ except Exception as e:
44
+ print(f"Error in generate_k_answers: {str(e)}")
45
+ return None
46
+
47
+
48
+ def majority_vote(answers: list, prob: dict) -> tuple:
49
+ """
50
+ Extract answers and use majority voting to determine the final answer.
51
+ Returns (final_answer, is_correct, all_extracted_answers)
52
+ """
53
+ extracted_answers = []
54
+ for ans in answers:
55
+ try:
56
+ extracted = parse(ans)[-1]
57
+ if extracted is not None: # Only include valid parsed answers
58
+ extracted_answers.append(extracted)
59
+ except:
60
+ continue
61
+
62
+ if not extracted_answers:
63
+ return None, 0, []
64
+
65
+ # Get the most common answer
66
+ answer_counts = Counter(extracted_answers)
67
+ final_answer = answer_counts.most_common(1)[0][0]
68
+
69
+ # Check if the majority answer is correct
70
+ is_correct = 1 if math.compute(references=[prob["expected_answer"]], predictions=[final_answer])["accuracy"] > 0.99 else 0
71
+
72
+ return final_answer, is_correct, extracted_answers
73
+
74
+
75
+ async def evaluate_single_problem(
76
+ prob: dict,
77
+ client: AsyncOpenAI,
78
+ model_name: str,
79
+ k: int,
80
+ sem: asyncio.Semaphore
81
+ ) -> dict:
82
+ async with sem:
83
+ try:
84
+ print("Evaluating problem: {}".format(prob["question"]))
85
+
86
+ # Generate k answers
87
+ answers = await generate_k_answers(client, prob["question"], model_name, k)
88
+ if answers is None or len(answers) == 0:
89
+ return None
90
+
91
+ # Get majority vote and check correctness
92
+ final_answer, is_correct, extracted_answers = majority_vote(answers, prob)
93
+ if final_answer is None:
94
+ return None
95
+
96
+ print("------------------------------------------------------------")
97
+ print("Question:", prob["question"])
98
+ print("Expected answer:", prob["expected_answer"])
99
+ print(f"Generated {len(answers)} answers")
100
+ print("Extracted answers:", extracted_answers)
101
+ print("Majority vote answer:", final_answer)
102
+ print("Is correct:", is_correct)
103
+
104
+ result = {
105
+ "question": prob["question"],
106
+ "expected_answer": prob["expected_answer"],
107
+ "generated_answers": answers,
108
+ "extracted_answers": extracted_answers,
109
+ "majority_vote_answer": final_answer,
110
+ "is_correct": is_correct
111
+ }
112
+ return result
113
+ except Exception as e:
114
+ print(f"Error in evaluate_single_problem: {str(e)}")
115
+ return None
116
+
117
+
118
+ async def save_results_async(output_file: str, data: dict):
119
+ async with aiofiles.open(output_file, 'a') as f:
120
+ await f.write(json.dumps(data) + '\n')
121
+
122
+
123
+ async def main(k: int = 5, debug: bool = False, resume: bool = False):
124
+ # Initialize the AsyncOpenAI client
125
+ client = AsyncOpenAI(
126
+ base_url="http://localhost:8015/v1",
127
+ api_key="token-abc123"
128
+ )
129
+
130
+ model_name = "DeepSeek-R1-Distill-Qwen-14B"
131
+
132
+ # Load problems from test500.jsonl
133
+ problems = []
134
+ with open('./test500.jsonl', 'r') as f:
135
+ for line in f:
136
+ problem = json.loads(line)
137
+ problems.append({
138
+ 'question': problem['problem'],
139
+ 'expected_answer': problem['answer']
140
+ })
141
+
142
+ # If debug flag is active, only evaluate the first 50 problems
143
+ if debug:
144
+ problems = problems[:50]
145
+ print("DEBUG MODE: processing only the first 50 problems.")
146
+
147
+ # If resume flag is active, skip already evaluated problems
148
+ output_file = f"majority_vote_k{k}_results.jsonl"
149
+ if resume:
150
+ if os.path.exists(output_file):
151
+ # Deduplicate the results file
152
+ dedup = {}
153
+ with open(output_file, 'r') as res_file:
154
+ for line in res_file:
155
+ if line.strip():
156
+ try:
157
+ rec = json.loads(line)
158
+ question = rec.get("question")
159
+ if question is not None:
160
+ dedup[question] = rec
161
+ except Exception as e:
162
+ continue
163
+
164
+ # Write deduplicated results back to the file
165
+ with open(output_file, 'w') as res_file:
166
+ for rec in dedup.values():
167
+ res_file.write(json.dumps(rec) + "\n")
168
+
169
+ evaluated_questions = set(dedup.keys())
170
+ original_count = len(problems)
171
+ problems = [p for p in problems if p["question"] not in evaluated_questions]
172
+ skipped = original_count - len(problems)
173
+ print(f"Resuming evaluation: Skipping {skipped} already evaluated problems.")
174
+ else:
175
+ print("No previous evaluation results found. Starting from scratch.")
176
+
177
+ # Create a semaphore to limit concurrent tasks
178
+ sem = asyncio.Semaphore(30) # Adjust the number based on your needs
179
+
180
+ # Create tasks for each problem
181
+ tasks = [
182
+ asyncio.create_task(evaluate_single_problem(prob, client, model_name, k, sem))
183
+ for prob in problems
184
+ ]
185
+
186
+ results = []
187
+ # Use as_completed to update progress with tqdm
188
+ for future in tqdm(asyncio.as_completed(tasks), total=len(tasks), desc='Processing problems'):
189
+ result = await future
190
+ if result is not None:
191
+ results.append(result)
192
+ # Save result immediately
193
+ await save_results_async(output_file, result)
194
+
195
+ if results:
196
+ total_correct = sum(result["is_correct"] for result in results)
197
+ accuracy = total_correct / len(results) * 100
198
+ print(f"\nFinal Accuracy with {k}-majority vote: {accuracy:.2f}%")
199
+
200
+ print(f"Evaluation complete. Processed {len(results)} problems successfully.")
201
+ print(f"Results saved to {output_file}")
202
+
203
+
204
+ if __name__ == "__main__":
205
+ parser = argparse.ArgumentParser()
206
+ parser.add_argument("--k", type=int, default=5, help="Number of completions to generate per question")
207
+ parser.add_argument("--debug", action="store_true", help="Run in debug mode (only evaluate the first 50 problems)")
208
+ parser.add_argument("--resume", action="store_true", help="Resume evaluation by skipping already evaluated problems")
209
+ args = parser.parse_args()
210
+ asyncio.run(main(k=args.k, debug=args.debug, resume=args.resume))