Spaces:
Running
Running
File size: 7,117 Bytes
1027cfb aa07520 1027cfb aa07520 1027cfb aa07520 1027cfb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 |
"""
Simple data loader for OpenHands Index leaderboard.
Loads JSONL files from local directory or GitHub repository.
"""
import os
import pandas as pd
import json
from pathlib import Path
class SimpleLeaderboardViewer:
"""Simple replacement for agent-eval's LeaderboardViewer."""
def __init__(self, data_dir: str, config: str, split: str):
"""
Args:
data_dir: Path to data directory
config: Config name (e.g., "1.0.0-dev1")
split: Split name (e.g., "validation" or "test")
"""
self.data_dir = Path(data_dir)
self.config = config
self.split = split
self.config_path = self.data_dir / config
# Load suite configuration
config_file = self.config_path / "agenteval.json"
if config_file.exists():
with open(config_file) as f:
suite_config = json.load(f)
self.suite_config = suite_config["suite_config"]
else:
self.suite_config = {
"name": "openhands-index",
"version": config,
"splits": []
}
# Build tag map from config
self.tag_map = {}
for split_config in self.suite_config.get("splits", []):
if split_config["name"] == split:
for task in split_config.get("tasks", []):
for tag in task.get("tags", []):
if tag not in self.tag_map:
self.tag_map[tag] = []
self.tag_map[tag].append(task["name"])
def _load(self):
"""Load the JSONL file for the split and return DataFrame and tag map."""
jsonl_file = self.config_path / f"{self.split}.jsonl"
if not jsonl_file.exists():
# Return empty dataframe with error message
return pd.DataFrame({
"Message": [f"No data found for split '{self.split}'. Expected file: {jsonl_file}"]
}), {}
try:
# Read JSONL file
records = []
with open(jsonl_file, 'r') as f:
for line in f:
if line.strip():
records.append(json.loads(line))
if not records:
return pd.DataFrame({
"Message": [f"No data in file: {jsonl_file}"]
}), {}
# Convert to DataFrame
df = pd.DataFrame(records)
# Transform to expected format for leaderboard
# Group by agent to aggregate results across datasets
transformed_records = []
for agent_name in df['agent_name'].unique():
agent_records = df[df['agent_name'] == agent_name]
# Build a single record for this agent
first_record = agent_records.iloc[0]
record = {
# Core agent info - use final display names
'agent': agent_name, # Will become "Agent" after prettifying
'models used': first_record['llm_base'], # Will become "Models Used"
'openness': first_record['openness'], # Will become "Openness"
'agent tooling': first_record['tool_usage'], # Will become "Agent Tooling"
'date': first_record['submission_time'], # Will become "Date"
# Additional columns expected by the transformer
'id': first_record.get('id', agent_name), # Will become "Id"
'submitter': first_record.get('submitter', 'Unknown'), # Will become "Submitter"
'source': first_record.get('source', ''), # Will become "Source"
'logs': first_record.get('logs', ''), # Will become "Logs"
}
# Add per-dataset scores and costs
dataset_scores = []
dataset_costs = []
for _, row in agent_records.iterrows():
tags = row['tags'] if isinstance(row['tags'], list) else [row['tags']]
for tag in tags:
# Add columns for this specific dataset
record[f'{tag} score'] = row['score']
record[f'{tag} cost'] = row['total_cost']
dataset_scores.append(row['score'])
dataset_costs.append(row['total_cost'])
# Calculate overall score and cost (average across datasets)
if dataset_scores:
record['overall score'] = sum(dataset_scores) / len(dataset_scores)
record['overall cost'] = sum(dataset_costs) / len(dataset_costs)
else:
record['overall score'] = None
record['overall cost'] = None
transformed_records.append(record)
transformed_df = pd.DataFrame(transformed_records)
# Build tag map if not already built
if not self.tag_map:
# Create simple tag map from the data
all_tags = set()
for _, row in df.iterrows():
tags = row['tags'] if isinstance(row['tags'], list) else [row['tags']]
all_tags.update(tags)
# Simple mapping: each tag maps to itself
self.tag_map = {tag: [tag] for tag in sorted(all_tags)}
return transformed_df, self.tag_map
except Exception as e:
import traceback
traceback.print_exc()
return pd.DataFrame({
"Message": [f"Error loading data: {e}"]
}), {}
def get_dataframe(self):
"""Get the raw dataframe."""
df, _ = self._load()
return df
def load_mock_data_locally(data_dir: str = "mock_results"):
"""
Load mock data from local directory for testing.
Args:
data_dir: Path to mock results directory
Returns:
Dictionary mapping split names to SimpleLeaderboardViewer instances
"""
viewers = {}
data_path = Path(data_dir)
if not data_path.exists():
print(f"Warning: Mock data directory '{data_dir}' not found")
return viewers
# Find all config directories
for config_dir in data_path.iterdir():
if config_dir.is_dir():
config_name = config_dir.name
# Find all JSONL files (each represents a split)
for jsonl_file in config_dir.glob("*.jsonl"):
split_name = jsonl_file.stem
viewer = SimpleLeaderboardViewer(
data_dir=str(data_path),
config=config_name,
split=split_name
)
viewers[split_name] = viewer
return viewers
|