File size: 12,636 Bytes
9f3bc09 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 |
#!/usr/bin/env python3
"""
Evaluation script for the fine-tuned Qwen2.5-3B evaluation agent model.
This script evaluates the trained model on various tasks including:
- VBench evaluation (text-to-video generation quality assessment)
- T2I-CompBench evaluation (text-to-image generation quality assessment)
- Open-ended evaluation queries
The model uses CoT-like reasoning format for quality assessment.
"""
import json
import argparse
import requests
import time
from typing import Dict, List, Any, Optional
from pathlib import Path
import pandas as pd
from tqdm import tqdm
class EvalAgentTester:
"""Tester for the fine-tuned evaluation agent model."""
def __init__(self, model_url: str = "http://0.0.0.0:12333/v1/chat/completions",
model_name: str = "eval-agent"):
"""
Initialize the evaluation tester.
Args:
model_url: URL of the model server (launched via vLLM)
model_name: Name of the served model
"""
self.model_url = model_url
self.model_name = model_name
self.test_data = {}
def load_test_data(self, data_path: str = "data/postprocess_20250819/ea_cot_dataset_10k.json"):
"""Load test dataset."""
try:
with open(data_path, 'r', encoding='utf-8') as f:
self.test_data = json.load(f)
print(f"Loaded {len(self.test_data)} test samples from {data_path}")
except FileNotFoundError:
print(f"Test data file not found: {data_path}")
print("Please run the data preprocessing script first.")
return False
except json.JSONDecodeError as e:
print(f"Error parsing JSON file: {e}")
return False
return True
def call_model(self, instruction: str, input_text: str = "", system: str = "",
history: List = None, max_tokens: int = 2048, temperature: float = 0.7) -> Optional[str]:
"""
Call the fine-tuned model via API.
Args:
instruction: Main instruction/question
input_text: Additional input context
system: System prompt
history: Conversation history
max_tokens: Maximum response tokens
temperature: Sampling temperature
Returns:
Model response or None if error
"""
# Construct message based on alpaca format
messages = []
if system:
messages.append({"role": "system", "content": system})
# Add history if provided
if history:
for human, assistant in history:
messages.append({"role": "user", "content": human})
messages.append({"role": "assistant", "content": assistant})
# Construct user message
user_content = instruction
if input_text:
user_content = f"{instruction}\n\n{input_text}"
messages.append({"role": "user", "content": user_content})
payload = {
"model": self.model_name,
"messages": messages,
"max_tokens": max_tokens,
"temperature": temperature,
"stream": False
}
try:
response = requests.post(self.model_url, json=payload, timeout=60)
response.raise_for_status()
result = response.json()
return result["choices"][0]["message"]["content"]
except requests.exceptions.RequestException as e:
print(f"API request failed: {e}")
return None
except (KeyError, IndexError) as e:
print(f"Unexpected response format: {e}")
return None
def evaluate_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:
"""
Evaluate a single sample.
Args:
sample: Test sample with instruction, input, output, system, history
Returns:
Evaluation result with prediction and metadata
"""
instruction = sample.get("instruction", "")
input_text = sample.get("input", "")
expected_output = sample.get("output", "")
system = sample.get("system", "")
history = sample.get("history", [])
# Get model prediction
prediction = self.call_model(
instruction=instruction,
input_text=input_text,
system=system,
history=history
)
result = {
"instruction": instruction,
"input": input_text,
"expected_output": expected_output,
"prediction": prediction,
"system": system,
"history": history,
"success": prediction is not None
}
return result
def run_evaluation(self, num_samples: int = 100, save_results: bool = True,
output_path: str = "evaluation_results.json") -> Dict[str, Any]:
"""
Run evaluation on a subset of test data.
Args:
num_samples: Number of samples to evaluate
save_results: Whether to save results to file
output_path: Path to save results
Returns:
Evaluation summary and results
"""
if not self.test_data:
print("No test data loaded. Please call load_test_data() first.")
return {}
# Select samples to evaluate
test_samples = list(self.test_data)[:num_samples] if isinstance(self.test_data, list) else list(self.test_data.values())[:num_samples]
print(f"Evaluating {len(test_samples)} samples...")
results = []
successful_calls = 0
failed_calls = 0
for i, sample in enumerate(tqdm(test_samples, desc="Evaluating")):
result = self.evaluate_sample(sample)
results.append(result)
if result["success"]:
successful_calls += 1
else:
failed_calls += 1
# Add delay to avoid overwhelming the server
time.sleep(0.1)
# Compile summary
summary = {
"total_samples": len(test_samples),
"successful_calls": successful_calls,
"failed_calls": failed_calls,
"success_rate": successful_calls / len(test_samples) if test_samples else 0,
"results": results
}
# Save results if requested
if save_results:
with open(output_path, 'w', encoding='utf-8') as f:
json.dump(summary, f, indent=2, ensure_ascii=False)
print(f"Results saved to {output_path}")
return summary
def analyze_results(self, results: Dict[str, Any]) -> None:
"""
Analyze and print evaluation results.
Args:
results: Results from run_evaluation()
"""
print("\n" + "="*50)
print("EVALUATION SUMMARY")
print("="*50)
print(f"Total samples evaluated: {results['total_samples']}")
print(f"Successful API calls: {results['successful_calls']}")
print(f"Failed API calls: {results['failed_calls']}")
print(f"Success rate: {results['success_rate']:.2%}")
if results['results']:
print("\n" + "-"*50)
print("SAMPLE RESULTS")
print("-"*50)
# Show first few successful predictions
successful_results = [r for r in results['results'] if r['success']]
for i, result in enumerate(successful_results[:3]):
print(f"\nSample {i+1}:")
print(f"Instruction: {result['instruction'][:100]}...")
print(f"Input: {result['input'][:50]}..." if result['input'] else "Input: (empty)")
print(f"Expected: {result['expected_output'][:100]}...")
print(f"Predicted: {result['prediction'][:100]}..." if result['prediction'] else "Predicted: (failed)")
print("\n" + "="*50)
def test_specific_tasks(self) -> None:
"""Test the model on specific evaluation tasks."""
print("\n" + "="*50)
print("TESTING SPECIFIC EVALUATION TASKS")
print("="*50)
# Test 1: VBench-style video evaluation
print("\n1. Testing VBench-style video evaluation:")
vbench_instruction = "How accurately does the model generate specific object classes as described in the text prompt?"
vbench_system = """
You are an expert in evaluating video generation models. Your task is to dynamically explore the model's capabilities step by step, simulating the process of human exploration.
Dynamic evaluation refers to initially providing a preliminary focus based on the user's question, and then continuously adjusting what aspects to focus on according to the intermediate evaluation results.
Please provide your analysis using the following format:
Sub-aspect: The specific aspect you want to focus on.
Tool: The evaluation tool you choose to use.
Thought: Detailed explanation of your reasoning.
"""
response = self.call_model(
instruction=vbench_instruction,
system=vbench_system
)
print(f"Response: {response[:500]}..." if response else "Failed to get response")
# Test 2: T2I-CompBench-style image evaluation
print("\n2. Testing T2I-CompBench-style image evaluation:")
t2i_instruction = "How well does the model handle color accuracy in generated images?"
t2i_system = """
You are an expert evaluator for text-to-image generation models. Evaluate the model's performance on color accuracy.
Provide your assessment with reasoning and specific examples.
"""
response = self.call_model(
instruction=t2i_instruction,
system=t2i_system
)
print(f"Response: {response[:500]}..." if response else "Failed to get response")
# Test 3: Open-ended evaluation
print("\n3. Testing open-ended evaluation:")
open_instruction = "What are the key strengths and weaknesses of this image generation model?"
response = self.call_model(
instruction=open_instruction
)
print(f"Response: {response[:500]}..." if response else "Failed to get response")
print("\n" + "="*50)
def main():
"""Main evaluation function."""
parser = argparse.ArgumentParser(description="Evaluate the fine-tuned Qwen2.5-3B evaluation agent")
parser.add_argument("--model_url", default="http://0.0.0.0:12333/v1/chat/completions",
help="URL of the model server")
parser.add_argument("--model_name", default="eval-agent",
help="Name of the served model")
parser.add_argument("--data_path", default="data/postprocess_20250819/ea_cot_dataset_10k.json",
help="Path to test dataset")
parser.add_argument("--num_samples", type=int, default=50,
help="Number of samples to evaluate")
parser.add_argument("--output_path", default="evaluation_results.json",
help="Path to save evaluation results")
parser.add_argument("--test_tasks", action="store_true",
help="Run specific task tests instead of full evaluation")
args = parser.parse_args()
# Initialize tester
print("Initializing Evaluation Agent Tester...")
tester = EvalAgentTester(model_url=args.model_url, model_name=args.model_name)
if args.test_tasks:
# Run specific task tests
tester.test_specific_tasks()
else:
# Load test data
print(f"Loading test data from {args.data_path}...")
if not tester.load_test_data(args.data_path):
print("Failed to load test data. Exiting.")
return
# Run evaluation
print("Starting evaluation...")
results = tester.run_evaluation(
num_samples=args.num_samples,
save_results=True,
output_path=args.output_path
)
# Analyze results
tester.analyze_results(results)
print(f"\nEvaluation complete! Results saved to {args.output_path}")
if __name__ == "__main__":
main() |