| { | |
| "@context": { | |
| "@vocab": "https://schema.org/", | |
| "dct": "http://purl.org/dc/terms/", | |
| "conformsTo": "dct:conformsTo" | |
| }, | |
| "@type": "Dataset", | |
| "name": "WebDS", | |
| "description": "WebDS is a reproducible benchmark dataset for evaluating LLM agents on complex web-based data science tasks. It comprises 870 tasks across 29 containerized websites and 10 domains. Each task simulates realistic end-to-end workflows including data acquisition, tool use, analysis, reasoning, and reporting. Tasks span multiple modalities, structured and unstructured data, and multihop reasoning, and are annotated with fine-grained attributes and difficulty levels.", | |
| "conformsTo": "http://mlcommons.org/croissant/1.0", | |
| "license": "https://creativecommons.org/licenses/by/4.0/", | |
| "keywords": [ | |
| "web-based data science", | |
| "benchmark", | |
| "LLM agents", | |
| "multi-hop reasoning", | |
| "tool use", | |
| "agent evaluation" | |
| ], | |
| "creator": { | |
| "@type": "Organization", | |
| "name": "Stanford University, UC Berkeley, SUTD, USC" | |
| }, | |
| "includedInDataCatalog": { | |
| "@type": "DataCatalog", | |
| "name": "MLCommons Croissant Benchmarks" | |
| }, | |
| "spatialCoverage": { | |
| "@type": "Place", | |
| "name": "Web-based / Global" | |
| }, | |
| "temporalCoverage": "2025", | |
| "datePublished": "2025-05-15", | |
| "version": "1.0", | |
| "isAccessibleForFree": true | |
| } | |