File size: 7,962 Bytes
5fed0fc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 |
import argparse
import json
import os
import sys
import traceback
import importlib.util
import pandas as pd
import gc
import time
class Evaluator:
def __init__(self, problem_dir: str):
self.problem_dir = problem_dir
self.resources_dir = os.path.join(problem_dir, "resources")
# Check mounted datasets directory first (from main repo datasets folder)
mounted_datasets_dir = "/datasets/llm_sql/small"
if os.path.exists(mounted_datasets_dir) and os.listdir(mounted_datasets_dir):
self.datasets_dir = mounted_datasets_dir
else:
# Fallback to resources/datasets if mounted directory doesn't exist
self.datasets_dir = os.path.join(self.resources_dir, "datasets")
ordered_names = ["movies.csv", "beer.csv", "BIRD.csv", "products.csv"]
self.trace_files = [
os.path.join(self.datasets_dir, name)
for name in ordered_names
if os.path.exists(os.path.join(self.datasets_dir, name))
]
# Initialize baseline cache file path
self.baseline_cache_file = os.path.join(self.problem_dir, "baseline_cache.json")
# Provide per-dataset column merge specs (from original LLM_SQL tests)
self.col_merges = [
[["movieinfo", "movietitle", "rottentomatoeslink"]],
[["beer/beerId", "beer/name"]],
[["PostId", "Body"]],
[["product_title", "parent_asin"]],
]
# Ensure local resources import
if self.resources_dir not in sys.path:
sys.path.insert(0, self.resources_dir)
from utils import evaluate_df_prefix_hit_cnt # verify utils import
self._eval_prefix = evaluate_df_prefix_hit_cnt
def _calculate_baseline_hit_rate(self) -> float:
"""Calculate the baseline hit rate using original column order (0-point anchor)"""
# Try to load from cache file first; if present, DO NOT recalculate
if os.path.exists(self.baseline_cache_file):
try:
with open(self.baseline_cache_file, 'r') as f:
cache_data = json.load(f)
if 'baseline_hit_rate' in cache_data:
return cache_data['baseline_hit_rate']
except Exception as e:
print(f"[WARNING] Failed to load baseline cache: {e}, will attempt to compute...")
# Calculate baseline if not cached
baseline_hit_rates = []
for csv_path, merge_spec in zip(self.trace_files, self.col_merges[: len(self.trace_files)]):
dataset_name = os.path.basename(csv_path)
# Load dataset
df = pd.read_csv(csv_path, low_memory=False)
# Apply column merges if needed
if merge_spec:
for col_to_merge in merge_spec:
if all(col in df.columns for col in col_to_merge):
merged_name = "_".join(col_to_merge)
df[merged_name] = df[col_to_merge].apply(
lambda x: "".join([f"{val}" for val in x]), axis=1
)
df = df.drop(columns=col_to_merge)
# Evaluate baseline with original column order (no optimization)
_, hit_rate_percent = self._eval_prefix(df)
hit_rate = hit_rate_percent / 100.0
baseline_hit_rates.append(hit_rate)
# Release memory
del df
gc.collect()
avg_baseline_hit = sum(baseline_hit_rates) / len(self.trace_files) if baseline_hit_rates else 0.0
# Save to cache file for future runs
try:
cache_data = {
'baseline_hit_rate': avg_baseline_hit,
'individual_rates': baseline_hit_rates,
'num_datasets': len(self.trace_files),
}
os.makedirs(os.path.dirname(self.baseline_cache_file) or ".", exist_ok=True)
with open(self.baseline_cache_file, 'w') as f:
json.dump(cache_data, f, indent=2)
except Exception as e:
print(f"[WARNING] Failed to save baseline cache: {e}")
return avg_baseline_hit
def evaluate(self, solution_module_path: str) -> dict:
spec = importlib.util.spec_from_file_location("solution", solution_module_path)
solution = importlib.util.module_from_spec(spec)
spec.loader.exec_module(solution)
if not hasattr(solution, "Solution"):
return {"score": 0.0, "runs_successfully": 0.0, "error": "Missing Solution"}
solver = solution.Solution()
# Load cached baseline if available; compute only if missing
baseline_hit = None
if os.path.exists(self.baseline_cache_file):
try:
with open(self.baseline_cache_file, 'r') as f:
cache_data = json.load(f)
baseline_hit = cache_data.get('baseline_hit_rate')
except Exception:
baseline_hit = None
if baseline_hit is None:
baseline_hit = self._calculate_baseline_hit_rate()
hit_rates = []
total_runtime = 0.0
for csv_path, merge_spec in zip(self.trace_files, self.col_merges[: len(self.trace_files)]):
dataset_name = os.path.basename(csv_path)
df = pd.read_csv(csv_path, low_memory=False)
start = time.time()
reordered = solver.solve(
df,
early_stop=100000,
row_stop=4,
col_stop=2,
col_merge=merge_spec,
one_way_dep=[],
distinct_value_threshold=0.7,
parallel=True,
)
runtime = time.time() - start
total_runtime += runtime
_, hit_rate_percent = self._eval_prefix(reordered)
hit_rates.append(hit_rate_percent / 100.0)
# Release memory
del df
del reordered
gc.collect()
if not self.trace_files:
return {"score": 0.0, "runs_successfully": 0.0, "error": "No datasets found"}
avg_runtime = total_runtime / len(self.trace_files)
# Check runtime threshold first
if avg_runtime > 1.0:
score = 0.0
else:
# Calculate individual scores per dataset and average them
individual_scores = []
for i, hit_rate in enumerate(hit_rates):
dataset_score = ((hit_rate - baseline_hit) / (1.0 - baseline_hit)) * 100
dataset_score = max(0, min(100, dataset_score))
individual_scores.append(dataset_score)
score = sum(individual_scores) / len(individual_scores)
avg_hit = sum(hit_rates) / len(self.trace_files)
return {"score": score, "runs_successfully": 1.0, "avg_hit_rate": sum(hit_rates) / len(self.trace_files) * 100 if hit_rates else 0.0, "total_runtime": total_runtime, "avg_runtime": avg_runtime, "runtime_threshold": 1.0}
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--solution", required=True)
parser.add_argument("--out", required=True)
args = parser.parse_args()
try:
problem_dir = os.path.dirname(os.path.abspath(__file__))
result = Evaluator(problem_dir).evaluate(args.solution)
except Exception as e:
print(f"[evaluator] ERROR: {e}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
result = {"score": 0.0, "runs_successfully": 0.0, "error": str(e)}
os.makedirs(os.path.dirname(args.out) or ".", exist_ok=True)
with open(args.out, "w") as f:
json.dump(result, f)
print(json.dumps(result))
return 0
if __name__ == "__main__":
raise SystemExit(main())
|