File size: 3,297 Bytes
9620e95 5322386 9620e95 5322386 9620e95 9b43091 a3655fb f9d68af c11ab2d 5d05d8e c11ab2d 5d05d8e c11ab2d 9620e95 5322386 9620e95 5322386 8b7333a 5322386 9620e95 05c7a7e 9620e95 05c7a7e 0d4450f 05c7a7e 9620e95 5322386 9620e95 5322386 9620e95 5322386 9620e95 05c7a7e 5322386 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 |
import glob
import os
import datasets
from huggingface_hub import snapshot_download
_CITATION = """TO BE ADDED."""
_DESCRIPTION = """
HeuriGen is a collection of combinatorial-optimization problems
for benchmarking heuristic-program generation by LLMs.
"""
REPO_ID = "heurigen/heurigen-data"
class HeuriGenConfig(datasets.BuilderConfig):
def __init__(self, **kwargs):
super().__init__(version=datasets.Version("0.1.0"), **kwargs)
_BUILDER_CONFIGS = [
HeuriGenConfig(
name="operator_scheduling", description="DFG operator-scheduling instances"
),
HeuriGenConfig(
name="technology_mapping",
description="Logic-network technology-mapping instances",
),
HeuriGenConfig(
name="global_routing", description="Netlist global-routing instances"
),
HeuriGenConfig(
name="egraph_extraction", description="E-graph extraction instances"
),
HeuriGenConfig(name="intra_op_parallel", description="Intra-op parallel instances"),
HeuriGenConfig(
name="protein_sequence_design", description="Protein sequence design instances"
),
HeuriGenConfig(name="pedigree", description="Pedigree problem instances"),
HeuriGenConfig(
name="pickup_delivery_time_windows", description="Pickup-delivery-time-windows (PDPTW) instances"
),
HeuriGenConfig(name="crew_pairing", description="Crew pairing instances"),
HeuriGenConfig(name="frequency_assignment", description="Frequency assignment instances"),
]
class HeuriGen(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = _BUILDER_CONFIGS
DEFAULT_CONFIG_NAME = "operator_scheduling"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
citation=_CITATION,
features=datasets.Features(
{
"file_path": datasets.Value("string"),
}
),
supervised_keys=None,
homepage=f"https://huggingface.co/datasets/{REPO_ID}",
)
def _split_generators(self, dl_manager):
local_root = snapshot_download(
repo_id=REPO_ID,
repo_type="dataset",
revision="main",
allow_patterns=[f"{self.config.name}/**"],
local_dir=dl_manager.manual_dir,
)
base = os.path.join(local_root, self.config.name)
# helper – returns all files in that split, including files in subfolders but not the folders themselves
def files(pattern):
all_files = []
for path in glob.glob(os.path.join(base, pattern), recursive=True):
if os.path.isfile(path):
all_files.append(os.path.abspath(path))
return sorted(all_files)
demo_paths = files("demo/**/*")
eval_paths = files("eval/**/*")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"files": demo_paths},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"files": eval_paths},
),
]
def _generate_examples(self, files):
for idx, path in enumerate(files):
yield idx, {"file_path": path}
|