""" Evaluator for web scraper evolution. This evaluator tests the scraper against real documentation pages, providing feedback on accuracy and robustness. It includes URLs that will be fetched by optillm's readurls plugin during evolution. """ import sys import os import traceback from typing import Dict, List, Any # Add the program directory to the path sys.path.insert(0, os.path.dirname(os.path.abspath(__file__))) def evaluate(program_path: str) -> Dict: """ Evaluate the web scraper program. Args: program_path: Path to the program to evaluate Returns: Dictionary with metrics and artifacts for OpenEvolve compatibility """ try: # Import the program sys.path.insert(0, os.path.dirname(program_path)) program_name = os.path.basename(program_path).replace(".py", "") program = __import__(program_name) # Test data: HTML content from various documentation sources test_cases = get_test_cases() # Evaluate each test case metrics = { "accuracy": 0.0, "completeness": 0.0, "robustness": 0.0, "parsing_errors": 0.0, "total_score": 0.0, } artifacts = {} total_correct = 0 total_expected = 0 parsing_errors = 0 for i, test_case in enumerate(test_cases): try: # Run the scraper docs = program.scrape_api_docs(test_case["html"]) # Evaluate accuracy correct, expected = evaluate_extraction(docs, test_case["expected"]) total_correct += correct total_expected += expected # Test parameter extraction for doc in docs: if "parameters" not in doc: doc["parameters"] = program.extract_parameters(doc.get("signature", "")) # Test formatting formatted = program.format_documentation(docs) # Store results for debugging artifacts[f"test_case_{i}"] = { "expected_count": expected, "found_count": correct, "extracted_functions": [doc.get("name", "unknown") for doc in docs], "formatted_length": len(formatted), } except Exception as e: parsing_errors += 1 artifacts[f"test_case_{i}_error"] = str(e) # Calculate metrics if total_expected > 0: metrics["accuracy"] = total_correct / total_expected metrics["completeness"] = min(1.0, total_correct / 20) # Expect ~20 functions total metrics["robustness"] = max(0.0, 1.0 - (parsing_errors / len(test_cases))) metrics["parsing_errors"] = parsing_errors / len(test_cases) # Overall score - use 'combined_score' as primary metric for evolution metrics["combined_score"] = ( metrics["accuracy"] * 0.4 + metrics["completeness"] * 0.3 + metrics["robustness"] * 0.3 ) # Add detailed feedback for the LLM artifacts["evaluation_feedback"] = generate_feedback(metrics, artifacts) # Return dictionary format for OpenEvolve compatibility return metrics except Exception as e: return { "accuracy": 0.0, "completeness": 0.0, "robustness": 0.0, "parsing_errors": 1.0, "combined_score": 0.0, "error": str(e), "traceback": traceback.format_exc(), "stage": "program_import", } def get_test_cases() -> List[Dict[str, Any]]: """ Get test cases with HTML content and expected results. These test cases include URLs that will be fetched by optillm's readurls plugin during evolution, providing the LLM with actual documentation structure. Returns: List of test cases with HTML content and expected results """ return [ { "name": "json_module_docs", "html": """
Source: https://docs.python.org/3/library/json.html
Serialize obj to a JSON formatted string.
Deserialize s to a Python object.
Refer to https://requests.readthedocs.io/en/latest/api/ for full API
Sends a GET request.
Sends a POST request.
Documentation at https://www.crummy.com/software/BeautifulSoup/bs4/doc/
BeautifulSoup(markup, parser)
Parse a string using a specified parser.
find(name, attrs=None)
Find the first matching tag.
find_all(name, attrs=None, limit=None)
Find all matching tags.
This tests robustness - check https://example.com/weird-api-docs
function_name(arg1, arg2=default_value)
Another description here
| another_func() | Does something |
tags."
)
elif metrics["accuracy"] < 0.8:
feedback.append("✅ **Good Accuracy**: Most functions are found, but some are missed.")
feedback.append("Fine-tune the extraction logic for edge cases.")
else:
feedback.append("🎉 **Excellent Accuracy**: Function extraction is working well!")
feedback.append("")
# Completeness feedback
if metrics["completeness"] < 0.5:
feedback.append("⚠️ **Low Completeness**: Not extracting enough functions overall.")
feedback.append("Increase the limit or improve the search scope.")
# Robustness feedback
if metrics["robustness"] < 0.8:
feedback.append("⚠️ **Low Robustness**: The scraper fails on some HTML formats.")
feedback.append("Add try-catch blocks and handle different documentation structures.")
feedback.append("Consider multiple parsing strategies and fallback methods.")
# Specific improvements
feedback.append("")
feedback.append("## Specific Improvements:")
# Analyze test case results
for key, value in artifacts.items():
if key.startswith("test_case_") and isinstance(value, dict):
if "error" in key:
feedback.append(f"- Fix error in {key}: {value}")
elif value.get("found_count", 0) < value.get("expected_count", 0):
feedback.append(
f"- Improve extraction for {key}: found {value.get('found_count', 0)}/{value.get('expected_count', 0)} functions"
)
# Documentation URL hints (these will be fetched by readurls plugin)
feedback.append("")
feedback.append("## Documentation References:")
feedback.append("For improving parsing, refer to these documentation structures:")
feedback.append("- Python docs: https://docs.python.org/3/library/json.html")
feedback.append("- Requests docs: https://requests.readthedocs.io/en/latest/api/")
feedback.append("- BeautifulSoup docs: https://www.crummy.com/software/BeautifulSoup/bs4/doc/")
return "\n".join(feedback)