|
|
""" |
|
|
Evaluator for HuggingFace dataset-based prompt optimization. |
|
|
""" |
|
|
|
|
|
import re |
|
|
import traceback |
|
|
import yaml |
|
|
import os |
|
|
import time |
|
|
from openai import OpenAI |
|
|
from tqdm import tqdm |
|
|
from datasets import load_dataset |
|
|
|
|
|
|
|
|
with open(os.path.join(os.path.dirname(__file__), "config.yaml"), "r") as f: |
|
|
config = yaml.safe_load(f) |
|
|
|
|
|
|
|
|
llm_config = config.get("llm", {}) |
|
|
api_base = llm_config.get("api_base", "http://localhost:1234/v1") |
|
|
|
|
|
|
|
|
models = llm_config.get("models", []) |
|
|
if models: |
|
|
|
|
|
TASK_MODEL_NAME = models[0].get("name", "default-model") |
|
|
else: |
|
|
|
|
|
TASK_MODEL_NAME = llm_config.get("primary_model", "default-model") |
|
|
|
|
|
|
|
|
evaluator_config = config.get("evaluator", {}) |
|
|
MAX_RETRIES = evaluator_config.get("max_retries", 3) |
|
|
|
|
|
|
|
|
MAX_TOKENS = llm_config.get("max_tokens", 16000) |
|
|
print(f"Using max_tokens: {MAX_TOKENS}") |
|
|
|
|
|
|
|
|
test_model = OpenAI(base_url=api_base) |
|
|
print(f"Initialized OpenAI client with model: {TASK_MODEL_NAME}") |
|
|
|
|
|
|
|
|
import sys |
|
|
|
|
|
prompt_file = os.environ.get("OPENEVOLVE_PROMPT") |
|
|
if not prompt_file: |
|
|
|
|
|
evaluator_dir = os.path.dirname(os.path.abspath(__file__)) |
|
|
DATASET_CONFIG_PATH = os.path.join(evaluator_dir, "dataset_settings.yaml") |
|
|
print("Warning: OPENEVOLVE_PROMPT not set. Using default dataset_settings.yaml") |
|
|
else: |
|
|
basename = os.path.basename(prompt_file) |
|
|
dataset_filename = basename.replace("_prompt.txt", "_prompt_dataset.yaml").replace( |
|
|
".txt", "_dataset.yaml" |
|
|
) |
|
|
evaluator_dir = os.path.dirname(os.path.abspath(__file__)) |
|
|
DATASET_CONFIG_PATH = os.path.join(evaluator_dir, dataset_filename) |
|
|
print(f"Dataset configuration: {dataset_filename}") |
|
|
|
|
|
|
|
|
def calculate_prompt_features(prompt): |
|
|
""" |
|
|
Calculate custom features for MAP-Elites |
|
|
|
|
|
IMPORTANT: Returns raw continuous values, not bin indices. |
|
|
The database handles all scaling and binning automatically. |
|
|
|
|
|
Returns: |
|
|
tuple: (prompt_length, reasoning_sophistication_score) |
|
|
- prompt_length: Actual character count |
|
|
- reasoning_sophistication_score: Continuous score 0.0-1.0 |
|
|
""" |
|
|
|
|
|
prompt_length = len(prompt) |
|
|
|
|
|
|
|
|
prompt_lower = prompt.lower() |
|
|
sophistication_score = 0.0 |
|
|
|
|
|
|
|
|
if len(prompt) >= 100: |
|
|
sophistication_score += 0.1 |
|
|
|
|
|
|
|
|
has_example = ( |
|
|
"example" in prompt_lower |
|
|
or prompt.count("####") >= 4 |
|
|
or bool(re.search(r"problem:.*?solution:", prompt_lower, re.DOTALL)) |
|
|
) |
|
|
|
|
|
|
|
|
has_cot = ( |
|
|
"step by step" in prompt_lower |
|
|
or "step-by-step" in prompt_lower |
|
|
or any(phrase in prompt_lower for phrase in ["think through", "reasoning", "explain your"]) |
|
|
or bool(re.search(r"(first|then|next|finally)", prompt_lower)) |
|
|
) |
|
|
|
|
|
|
|
|
has_directive = "solve" in prompt_lower or "calculate" in prompt_lower |
|
|
|
|
|
|
|
|
has_strict = "must" in prompt_lower or "exactly" in prompt_lower |
|
|
|
|
|
|
|
|
if has_example: |
|
|
sophistication_score += 0.6 |
|
|
if has_cot: |
|
|
sophistication_score += 0.3 |
|
|
elif len(prompt) > 1500: |
|
|
sophistication_score += 0.2 |
|
|
else: |
|
|
sophistication_score += 0.1 |
|
|
elif has_cot: |
|
|
sophistication_score += 0.4 |
|
|
if has_strict: |
|
|
sophistication_score += 0.2 |
|
|
elif len(prompt) > 500: |
|
|
sophistication_score += 0.15 |
|
|
else: |
|
|
sophistication_score += 0.1 |
|
|
else: |
|
|
|
|
|
if has_directive: |
|
|
sophistication_score += 0.2 |
|
|
else: |
|
|
sophistication_score += 0.1 |
|
|
|
|
|
|
|
|
sophistication_score = min(1.0, max(0.0, sophistication_score)) |
|
|
|
|
|
return prompt_length, sophistication_score |
|
|
|
|
|
|
|
|
def load_prompt_config(prompt_path): |
|
|
"""Load the prompt from text file and dataset config from matching _dataset.yaml file.""" |
|
|
|
|
|
with open(prompt_path, "r") as f: |
|
|
prompt = f.read().strip() |
|
|
|
|
|
|
|
|
if not os.path.exists(DATASET_CONFIG_PATH): |
|
|
raise FileNotFoundError(f"Dataset configuration not found: {DATASET_CONFIG_PATH}") |
|
|
|
|
|
with open(DATASET_CONFIG_PATH, "r") as f: |
|
|
config = yaml.safe_load(f) |
|
|
|
|
|
return config, prompt |
|
|
|
|
|
|
|
|
def load_hf_dataset(config): |
|
|
"""Load HuggingFace dataset based on configuration.""" |
|
|
dataset_name = config["dataset_name"] |
|
|
dataset_config = config.get("dataset_config", None) |
|
|
split = config.get("split", "test") |
|
|
trust_remote_code = config.get("trust_remote_code", True) |
|
|
|
|
|
print(f"Loading dataset: {dataset_name}") |
|
|
|
|
|
|
|
|
if dataset_name == "hotpot_qa" or config.get("is_hotpotqa", False): |
|
|
print("Using non-streaming mode for HotpotQA to avoid PyArrow issues") |
|
|
streaming = False |
|
|
else: |
|
|
|
|
|
streaming = config.get("streaming", True) |
|
|
|
|
|
try: |
|
|
|
|
|
if dataset_config: |
|
|
dataset = load_dataset( |
|
|
dataset_name, |
|
|
dataset_config, |
|
|
split=split, |
|
|
trust_remote_code=trust_remote_code, |
|
|
streaming=streaming, |
|
|
) |
|
|
else: |
|
|
dataset = load_dataset( |
|
|
dataset_name, split=split, trust_remote_code=trust_remote_code, streaming=streaming |
|
|
) |
|
|
except: |
|
|
|
|
|
print(f"Split '{split}' not found, falling back to 'train'") |
|
|
if dataset_config: |
|
|
dataset = load_dataset( |
|
|
dataset_name, |
|
|
dataset_config, |
|
|
split="train", |
|
|
trust_remote_code=trust_remote_code, |
|
|
streaming=streaming, |
|
|
) |
|
|
else: |
|
|
dataset = load_dataset( |
|
|
dataset_name, |
|
|
split="train", |
|
|
trust_remote_code=trust_remote_code, |
|
|
streaming=streaming, |
|
|
) |
|
|
|
|
|
|
|
|
if hasattr(dataset, "__len__"): |
|
|
print(f"Dataset loaded with {len(dataset)} examples") |
|
|
else: |
|
|
print(f"Dataset loaded (streaming mode)") |
|
|
|
|
|
return dataset |
|
|
|
|
|
|
|
|
def evaluate_prompt(prompt, dataset, config, num_samples): |
|
|
"""Evaluate a prompt on a subset of the dataset.""" |
|
|
input_field = config["input_field"] |
|
|
target_field = config["target_field"] |
|
|
|
|
|
|
|
|
dataset_name = config.get("dataset_name", "").lower() |
|
|
is_emotion = "emotion" in dataset_name |
|
|
is_gsm8k = "gsm8k" in dataset_name |
|
|
is_hotpotqa = config.get("is_hotpotqa", False) |
|
|
is_ifeval = config.get("is_ifeval", False) |
|
|
is_hover = config.get("is_hover", False) |
|
|
|
|
|
|
|
|
if hasattr(dataset, "take"): |
|
|
|
|
|
samples = dataset.take(num_samples) |
|
|
sample_iter = tqdm(samples, desc=f"Evaluating {num_samples} samples", total=num_samples) |
|
|
else: |
|
|
|
|
|
indices = range(min(num_samples, len(dataset))) |
|
|
samples = dataset.select(indices) |
|
|
sample_iter = tqdm(samples, desc=f"Evaluating {num_samples} samples") |
|
|
|
|
|
correct = 0 |
|
|
total = 0 |
|
|
|
|
|
for example in sample_iter: |
|
|
input_text = example[input_field] |
|
|
expected = example[target_field] |
|
|
|
|
|
|
|
|
if is_hotpotqa: |
|
|
|
|
|
context_items = example.get("context", {}) |
|
|
context_text = "" |
|
|
if "title" in context_items and "sentences" in context_items: |
|
|
|
|
|
for i, (title, sentences) in enumerate( |
|
|
zip(context_items["title"], context_items["sentences"]) |
|
|
): |
|
|
context_text += f"Paragraph {i+1} ({title}):\n" |
|
|
context_text += " ".join(sentences) + "\n\n" |
|
|
formatted_prompt = prompt.format(context=context_text.strip(), question=input_text) |
|
|
elif is_ifeval: |
|
|
|
|
|
formatted_prompt = prompt.format(instruction=input_text) |
|
|
elif is_hover: |
|
|
|
|
|
formatted_prompt = prompt.format(claim=input_text) |
|
|
else: |
|
|
|
|
|
formatted_prompt = prompt.format(input_text=input_text) |
|
|
|
|
|
|
|
|
messages = [{"role": "user", "content": formatted_prompt}] |
|
|
|
|
|
|
|
|
for attempt in range(MAX_RETRIES): |
|
|
try: |
|
|
|
|
|
response = test_model.chat.completions.create( |
|
|
model=TASK_MODEL_NAME, |
|
|
messages=messages, |
|
|
temperature=0.1, |
|
|
max_tokens=MAX_TOKENS, |
|
|
) |
|
|
break |
|
|
except Exception as e: |
|
|
if attempt == MAX_RETRIES - 1: |
|
|
print(f"Failed to get response after {MAX_RETRIES} attempts: {e}") |
|
|
raise e |
|
|
time.sleep(1) |
|
|
|
|
|
|
|
|
if not response: |
|
|
print(f"Warning: No response object from LLM") |
|
|
total += 1 |
|
|
continue |
|
|
|
|
|
if not response.choices: |
|
|
print(f"Warning: No choices in response from LLM") |
|
|
total += 1 |
|
|
continue |
|
|
|
|
|
if not response.choices[0].message: |
|
|
print(f"Warning: No message in response choice") |
|
|
total += 1 |
|
|
continue |
|
|
|
|
|
output_text = response.choices[0].message.content |
|
|
if output_text is None: |
|
|
print(f"Warning: None content in LLM response") |
|
|
print(f"Full response: {response}") |
|
|
total += 1 |
|
|
continue |
|
|
|
|
|
output_text = output_text.strip() |
|
|
|
|
|
|
|
|
try: |
|
|
if is_gsm8k: |
|
|
|
|
|
|
|
|
expected_answer = expected.split("####")[-1].strip() |
|
|
try: |
|
|
expected_number = float(expected_answer.replace(",", "")) |
|
|
except: |
|
|
print(f"Warning: Could not parse expected answer: {expected_answer}") |
|
|
total += 1 |
|
|
continue |
|
|
|
|
|
|
|
|
prediction = None |
|
|
if "####" in output_text: |
|
|
predicted_answer = output_text.split("####")[-1].strip() |
|
|
|
|
|
import re |
|
|
|
|
|
numbers = re.findall(r"-?\$?[\d,]+\.?\d*", predicted_answer) |
|
|
if numbers: |
|
|
try: |
|
|
|
|
|
number_str = numbers[0].replace("$", "").replace(",", "") |
|
|
prediction = float(number_str) |
|
|
except: |
|
|
pass |
|
|
|
|
|
|
|
|
if prediction is not None: |
|
|
|
|
|
if abs(prediction - expected_number) < 0.001: |
|
|
correct += 1 |
|
|
|
|
|
total += 1 |
|
|
continue |
|
|
|
|
|
elif is_hotpotqa: |
|
|
|
|
|
output_lower = output_text.lower().strip() |
|
|
expected_lower = str(expected).lower().strip() |
|
|
|
|
|
|
|
|
output_lower = output_lower.rstrip(".,!?;:") |
|
|
expected_lower = expected_lower.rstrip(".,!?;:") |
|
|
|
|
|
if output_lower == expected_lower: |
|
|
correct += 1 |
|
|
elif expected_lower in output_lower: |
|
|
|
|
|
correct += 1 |
|
|
|
|
|
total += 1 |
|
|
continue |
|
|
|
|
|
elif is_ifeval: |
|
|
|
|
|
|
|
|
|
|
|
output_lower = output_text.lower() |
|
|
|
|
|
|
|
|
if len(output_text.strip()) > 10: |
|
|
correct += 1 |
|
|
|
|
|
total += 1 |
|
|
continue |
|
|
|
|
|
elif is_hover: |
|
|
|
|
|
output_upper = output_text.upper() |
|
|
expected_upper = str(expected).upper() |
|
|
|
|
|
|
|
|
if "SUPPORTED" in output_upper and "NOT" not in output_upper.replace( |
|
|
"NOT SUPPORTED", "" |
|
|
): |
|
|
prediction = "SUPPORTED" |
|
|
elif "NOT SUPPORTED" in output_upper or "NOT_SUPPORTED" in output_upper: |
|
|
prediction = "NOT_SUPPORTED" |
|
|
else: |
|
|
prediction = None |
|
|
|
|
|
if prediction == expected_upper: |
|
|
correct += 1 |
|
|
|
|
|
total += 1 |
|
|
continue |
|
|
|
|
|
elif is_emotion: |
|
|
|
|
|
numbers = re.findall(r"\b[0-5]\b", output_text) |
|
|
if numbers: |
|
|
prediction = int(numbers[-1]) |
|
|
else: |
|
|
|
|
|
output_lower = output_text.lower() |
|
|
emotion_map = { |
|
|
"sadness": 0, |
|
|
"sad": 0, |
|
|
"joy": 1, |
|
|
"happy": 1, |
|
|
"happiness": 1, |
|
|
"love": 2, |
|
|
"anger": 3, |
|
|
"angry": 3, |
|
|
"fear": 4, |
|
|
"afraid": 4, |
|
|
"scared": 4, |
|
|
"surprise": 5, |
|
|
"surprised": 5, |
|
|
} |
|
|
prediction = -1 |
|
|
for emotion, label in emotion_map.items(): |
|
|
if emotion in output_lower: |
|
|
prediction = label |
|
|
break |
|
|
else: |
|
|
|
|
|
numbers = re.findall(r"\b[01]\b", output_text) |
|
|
if numbers: |
|
|
prediction = int(numbers[-1]) |
|
|
else: |
|
|
|
|
|
output_lower = output_text.lower() |
|
|
if "positive" in output_lower: |
|
|
prediction = 1 |
|
|
elif "negative" in output_lower: |
|
|
prediction = 0 |
|
|
else: |
|
|
prediction = -1 |
|
|
|
|
|
if prediction == expected: |
|
|
correct += 1 |
|
|
|
|
|
total += 1 |
|
|
|
|
|
except Exception as e: |
|
|
print(f"Error parsing response '{output_text}': {e}") |
|
|
total += 1 |
|
|
|
|
|
accuracy = correct / total if total > 0 else 0.0 |
|
|
return accuracy, correct, total |
|
|
|
|
|
|
|
|
def evaluate_stage1(prompt_path): |
|
|
""" |
|
|
Stage 1 evaluation: Quick evaluation with 10% of samples |
|
|
|
|
|
Args: |
|
|
prompt_path: Path to the prompt file |
|
|
|
|
|
Returns: |
|
|
Dictionary with combined_score metric |
|
|
""" |
|
|
print("-" * 80) |
|
|
print("Starting Stage 1 evaluation...") |
|
|
print("-" * 80) |
|
|
|
|
|
try: |
|
|
|
|
|
config, prompt = load_prompt_config(prompt_path) |
|
|
print(f"Loaded prompt configuration") |
|
|
|
|
|
|
|
|
dataset = load_hf_dataset(config) |
|
|
|
|
|
|
|
|
num_samples = config.get("max_samples", 50) |
|
|
|
|
|
stage1_samples = 10 |
|
|
|
|
|
print(f"Stage 1: Evaluating {stage1_samples} samples...") |
|
|
|
|
|
|
|
|
accuracy, correct, total = evaluate_prompt(prompt, dataset, config, stage1_samples) |
|
|
|
|
|
print(f"Stage 1 accuracy: {accuracy:.3f} ({correct}/{total})") |
|
|
print("-" * 80) |
|
|
|
|
|
|
|
|
prompt_length, reasoning_sophistication = calculate_prompt_features(prompt) |
|
|
print( |
|
|
f"Prompt features - Length: {prompt_length} chars, Reasoning sophistication: {reasoning_sophistication:.3f}" |
|
|
) |
|
|
|
|
|
return { |
|
|
"combined_score": accuracy, |
|
|
"prompt_length": prompt_length, |
|
|
"reasoning_strategy": reasoning_sophistication, |
|
|
} |
|
|
|
|
|
except Exception as e: |
|
|
print(f"Stage 1 evaluation failed: {str(e)}") |
|
|
traceback.print_exc() |
|
|
print("-" * 80) |
|
|
|
|
|
|
|
|
try: |
|
|
|
|
|
with open(prompt_path, "r") as f: |
|
|
failed_prompt = f.read().strip() |
|
|
prompt_length, reasoning_sophistication = calculate_prompt_features(failed_prompt) |
|
|
except: |
|
|
|
|
|
prompt_length, reasoning_sophistication = 0, 0.0 |
|
|
|
|
|
return { |
|
|
"combined_score": 0.0, |
|
|
"prompt_length": prompt_length, |
|
|
"reasoning_strategy": reasoning_sophistication, |
|
|
"error": str(e), |
|
|
} |
|
|
|
|
|
|
|
|
def evaluate_stage2(prompt_path): |
|
|
""" |
|
|
Stage 2 evaluation: Full evaluation with all samples |
|
|
|
|
|
Args: |
|
|
prompt_path: Path to the prompt file |
|
|
|
|
|
Returns: |
|
|
Dictionary with combined_score metric |
|
|
""" |
|
|
print("-" * 80) |
|
|
print("Starting Stage 2 evaluation...") |
|
|
print("-" * 80) |
|
|
|
|
|
try: |
|
|
|
|
|
config, prompt = load_prompt_config(prompt_path) |
|
|
print(f"Loaded prompt configuration") |
|
|
|
|
|
|
|
|
dataset = load_hf_dataset(config) |
|
|
|
|
|
|
|
|
num_samples = config.get("max_samples", 50) |
|
|
|
|
|
stage2_samples = 40 |
|
|
|
|
|
print(f"Stage 2: Evaluating {stage2_samples} samples...") |
|
|
|
|
|
|
|
|
accuracy, correct, total = evaluate_prompt(prompt, dataset, config, stage2_samples) |
|
|
|
|
|
print(f"Stage 2 accuracy: {accuracy:.3f} ({correct}/{total})") |
|
|
print("-" * 80) |
|
|
|
|
|
|
|
|
prompt_length, reasoning_sophistication = calculate_prompt_features(prompt) |
|
|
print( |
|
|
f"Prompt features - Length: {prompt_length} chars, Reasoning sophistication: {reasoning_sophistication:.3f}" |
|
|
) |
|
|
|
|
|
return { |
|
|
"combined_score": accuracy, |
|
|
"prompt_length": prompt_length, |
|
|
"reasoning_strategy": reasoning_sophistication, |
|
|
} |
|
|
|
|
|
except Exception as e: |
|
|
print(f"Stage 2 evaluation failed: {str(e)}") |
|
|
traceback.print_exc() |
|
|
print("-" * 80) |
|
|
|
|
|
|
|
|
try: |
|
|
|
|
|
with open(prompt_path, "r") as f: |
|
|
failed_prompt = f.read().strip() |
|
|
prompt_length, reasoning_sophistication = calculate_prompt_features(failed_prompt) |
|
|
except: |
|
|
|
|
|
prompt_length, reasoning_sophistication = 0, 0.0 |
|
|
|
|
|
return { |
|
|
"combined_score": 0.0, |
|
|
"prompt_length": prompt_length, |
|
|
"reasoning_strategy": reasoning_sophistication, |
|
|
"error": str(e), |
|
|
} |
|
|
|
|
|
|
|
|
def evaluate(prompt_path): |
|
|
""" |
|
|
Main evaluation function - for backwards compatibility |
|
|
Calls evaluate_stage2 for full evaluation |
|
|
|
|
|
Args: |
|
|
prompt_path: Path to the prompt file |
|
|
|
|
|
Returns: |
|
|
Dictionary with combined_score metric |
|
|
""" |
|
|
return evaluate_stage2(prompt_path) |
|
|
|