File size: 5,337 Bytes
5e4510c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 |
"""
Test utilities for OpenEvolve tests
Provides common functions and constants for consistent testing
"""
import os
import sys
import time
import subprocess
import requests
import socket
from typing import Optional, Tuple
from openai import OpenAI
from openevolve.config import Config, LLMModelConfig
# Standard test model for integration tests - small and fast
TEST_MODEL = "google/gemma-3-270m-it"
DEFAULT_PORT = 8000
DEFAULT_BASE_URL = f"http://localhost:{DEFAULT_PORT}/v1"
def find_free_port(start_port: int = 8000, max_tries: int = 100) -> int:
"""Find a free port starting from start_port"""
for port in range(start_port, start_port + max_tries):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.bind(('localhost', port))
sock.close()
return port
except OSError:
continue
finally:
sock.close()
raise RuntimeError(f"Could not find free port in range {start_port}-{start_port + max_tries}")
def setup_test_env():
"""Set up test environment with local inference"""
os.environ["OPTILLM_API_KEY"] = "optillm"
return TEST_MODEL
def get_test_client(base_url: str = DEFAULT_BASE_URL) -> OpenAI:
"""Get OpenAI client configured for local optillm"""
return OpenAI(api_key="optillm", base_url=base_url)
def start_test_server(model: str = TEST_MODEL, port: Optional[int] = None) -> Tuple[subprocess.Popen, int]:
"""
Start optillm server for testing
Returns tuple of (process_handle, actual_port_used)
"""
if port is None:
port = find_free_port()
# Set environment for local inference
env = os.environ.copy()
env["OPTILLM_API_KEY"] = "optillm"
# Pass HF_TOKEN if available (needed for model downloads in CI)
if "HF_TOKEN" in os.environ:
env["HF_TOKEN"] = os.environ["HF_TOKEN"]
print(f"Starting optillm server on port {port}...")
# Start server (don't capture output to avoid pipe buffer deadlock)
proc = subprocess.Popen([
"optillm",
"--model", model,
"--port", str(port)
], env=env)
# Wait for server to start
for i in range(30):
try:
response = requests.get(f"http://localhost:{port}/health", timeout=2)
if response.status_code == 200:
print(f"✅ optillm server started successfully on port {port}")
return proc, port
except Exception as e:
if i < 5: # Only print for first few attempts to avoid spam
print(f"Attempt {i+1}: Waiting for server... ({e})")
pass
time.sleep(1)
# Server didn't start in time - clean up
error_msg = f"optillm server failed to start on port {port}"
print(f"❌ {error_msg} - check that optillm is installed and model is available")
# Clean up
try:
proc.terminate()
proc.wait(timeout=5)
except subprocess.TimeoutExpired:
proc.kill()
proc.wait()
raise RuntimeError(error_msg)
def stop_test_server(proc: subprocess.Popen):
"""Stop the test server"""
try:
proc.terminate()
proc.wait(timeout=5)
except subprocess.TimeoutExpired:
proc.kill()
proc.wait()
def is_server_running(port: int = DEFAULT_PORT) -> bool:
"""Check if optillm server is running on the given port"""
try:
response = requests.get(f"http://localhost:{port}/health", timeout=2)
return response.status_code == 200
except:
return False
def get_integration_config(port: int = DEFAULT_PORT) -> Config:
"""Get config for integration tests with optillm"""
config = Config()
config.max_iterations = 5 # Very small for CI speed
config.checkpoint_interval = 2
config.database.in_memory = True
config.evaluator.parallel_evaluations = 2
config.evaluator.timeout = 10 # Short timeout for CI
# Disable cascade evaluation to avoid warnings in simple test evaluators
config.evaluator.cascade_evaluation = False
# Set long timeout with no retries for integration tests
config.llm.retries = 0 # No retries to fail fast
config.llm.timeout = 120 # Long timeout to allow model to respond
# Configure to use optillm server
base_url = f"http://localhost:{port}/v1"
config.llm.api_base = base_url
config.llm.models = [
LLMModelConfig(
name=TEST_MODEL,
api_key="optillm",
api_base=base_url,
weight=1.0,
timeout=120, # Long timeout
retries=0 # No retries
)
]
return config
def get_simple_test_messages():
"""Get simple test messages for basic validation"""
return [
{"role": "system", "content": "You are a helpful coding assistant."},
{"role": "user", "content": "Write a simple Python function that returns 'hello'."}
]
def get_evolution_test_program():
"""Get a simple program for evolution testing"""
return """# EVOLVE-BLOCK-START
def solve(x):
return x * 2
# EVOLVE-BLOCK-END
"""
def get_evolution_test_evaluator():
"""Get a simple evaluator for evolution testing"""
return """def evaluate(program_path):
return {"score": 0.5, "complexity": 10, "combined_score": 0.5}
""" |