mnoukhov commited on
Commit
997f311
·
verified ·
1 Parent(s): 2e0b661

Upload create_dataset.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. create_dataset.py +94 -0
create_dataset.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Convert MathArena HMMT datasets to RLVR format.
3
+
4
+ Usage:
5
+
6
+ python scripts/data/rlvr/matharena_hmmt.py --push_to_hub
7
+ python scripts/data/rlvr/matharena_hmmt.py --push_to_hub --hf_entity ai2-adapt-dev
8
+ python scripts/data/rlvr/matharena_hmmt.py --dataset_field_mode math
9
+ """
10
+
11
+ from dataclasses import dataclass
12
+
13
+ import datasets
14
+ from huggingface_hub import HfApi
15
+ from transformers import HfArgumentParser
16
+
17
+ from open_instruct import utils as open_instruct_utils
18
+
19
+ SOURCE_DATASETS = (
20
+ ("MathArena/hmmt_feb_2025", "math_hmmt_feb_2025"),
21
+ ("MathArena/hmmt_nov_2025", "math_hmmt_nov_2025"),
22
+ ("MathArena/brumo_2025", "math_brumo_2025"),
23
+ )
24
+ SOURCE_DATASET_MAP = {name: label for name, label in SOURCE_DATASETS}
25
+
26
+
27
+ @dataclass
28
+ class Args:
29
+ push_to_hub: bool = False
30
+ hf_entity: str | None = None
31
+ dataset_field_mode: str = "source"
32
+ repo_name: str = "rlvr_matharena_2025"
33
+ source_dataset: str = "all"
34
+
35
+
36
+ def _convert_dataset(source_dataset_name: str, dataset_label: str, dataset_field_mode: str) -> datasets.Dataset:
37
+ dataset = datasets.load_dataset(
38
+ source_dataset_name, split="train", num_proc=open_instruct_utils.max_num_processes()
39
+ )
40
+
41
+ def process(example):
42
+ example["messages"] = [{"role": "user", "content": example["problem"]}]
43
+ example["ground_truth"] = example["answer"]
44
+ example["dataset"] = "math" if dataset_field_mode == "math" else dataset_label
45
+ example["source_dataset"] = source_dataset_name
46
+ return example
47
+
48
+ dataset = dataset.map(process, desc=f"Converting {source_dataset_name}")
49
+ ordered_columns = [
50
+ "messages",
51
+ "ground_truth",
52
+ "dataset",
53
+ "source_dataset",
54
+ "problem_idx",
55
+ "problem",
56
+ "answer",
57
+ "problem_type",
58
+ ]
59
+ return dataset.select_columns([column for column in ordered_columns if column in dataset.column_names])
60
+
61
+
62
+ def main(args: Args):
63
+ if args.dataset_field_mode not in {"source", "math"}:
64
+ raise ValueError("--dataset_field_mode must be one of: source, math")
65
+
66
+ if args.source_dataset == "all":
67
+ selected_datasets = SOURCE_DATASETS
68
+ else:
69
+ if args.source_dataset not in SOURCE_DATASET_MAP:
70
+ valid = ", ".join(sorted(SOURCE_DATASET_MAP))
71
+ raise ValueError(f"--source_dataset must be 'all' or one of: {valid}")
72
+ selected_datasets = [(args.source_dataset, SOURCE_DATASET_MAP[args.source_dataset])]
73
+
74
+ converted = [
75
+ _convert_dataset(source_name, dataset_label, args.dataset_field_mode) for source_name, dataset_label in selected_datasets
76
+ ]
77
+ output = datasets.DatasetDict({"train": datasets.concatenate_datasets(converted)})
78
+ print(output)
79
+
80
+ if args.push_to_hub:
81
+ api = HfApi()
82
+ if not args.hf_entity:
83
+ args.hf_entity = api.whoami()["name"]
84
+ repo_id = f"{args.hf_entity}/{args.repo_name}"
85
+ print(f"Pushing dataset to Hub: {repo_id}")
86
+ output.push_to_hub(repo_id)
87
+ api.upload_file(
88
+ path_or_fileobj=__file__, path_in_repo="create_dataset.py", repo_type="dataset", repo_id=repo_id
89
+ )
90
+
91
+
92
+ if __name__ == "__main__":
93
+ parser = HfArgumentParser(Args)
94
+ main(*parser.parse_args_into_dataclasses())