sungyub commited on
Commit
9ebec27
·
verified ·
1 Parent(s): 057cfcb

Upload convert_to_verl.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. convert_to_verl.py +292 -0
convert_to_verl.py ADDED
@@ -0,0 +1,292 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Eurus-2-RL-Data (Code) to VERL converter
4
+ Filters code problems from PRIME-RL/Eurus-2-RL-Data
5
+ Applies best practices from Big-Math-RL-Verified conversion:
6
+ - Multi-file output for memory efficiency
7
+ - Streaming with ParquetWriter
8
+ - Aggressive cache cleanup
9
+ - Progress tracking and resume capability
10
+ """
11
+
12
+ import pyarrow.parquet as pq
13
+ import pyarrow as pa
14
+ from datasets import load_dataset
15
+ from pathlib import Path
16
+ import json
17
+ import shutil
18
+ import time
19
+ from datetime import datetime
20
+
21
+ # Configuration
22
+ REPO_ID = "PRIME-RL/Eurus-2-RL-Data"
23
+ OUTPUT_DIR = "data"
24
+ PROGRESS_FILE = "progress.json"
25
+ DATA_SOURCE_PREFIX = "Eurus-2-Code"
26
+ ABILITY_FILTER = "code" # Filter for code problems only
27
+ BATCH_SIZE = 10000
28
+ ROWS_PER_OUTPUT_FILE = 100000 # 100K rows per file
29
+ CHECKPOINT_INTERVAL = 1 # Save progress after each file
30
+
31
+ # Global state
32
+ current_output_file_idx = 0
33
+ current_output_rows = 0
34
+ current_writer = None
35
+ current_output_path = None
36
+
37
+ def load_progress():
38
+ """Load progress from checkpoint"""
39
+ if Path(PROGRESS_FILE).exists():
40
+ with open(PROGRESS_FILE, 'r') as f:
41
+ return json.load(f)
42
+ return {
43
+ "rows_processed": 0,
44
+ "total_rows": 0,
45
+ "current_output_file_idx": 0,
46
+ "current_output_rows": 0,
47
+ "completed": False
48
+ }
49
+
50
+ def save_progress(progress):
51
+ """Save progress checkpoint"""
52
+ progress["timestamp"] = datetime.now().isoformat()
53
+ with open(PROGRESS_FILE, 'w') as f:
54
+ json.dump(progress, f, indent=2)
55
+
56
+ def check_disk_space():
57
+ """Monitor disk space"""
58
+ usage = shutil.disk_usage("/Users/sungyubkim")
59
+ free_gb = usage.free / (1024**3)
60
+ if free_gb < 5:
61
+ print(f"⚠️ WARNING: Low disk space: {free_gb:.1f} GB remaining")
62
+ return free_gb
63
+
64
+ def get_output_filename(file_idx, total_files=None):
65
+ """Generate output filename with proper padding"""
66
+ if total_files is None:
67
+ # Estimate: 25K rows / 100K per file = ~1 file
68
+ total_files = 5
69
+ return f"{OUTPUT_DIR}/train-{file_idx:05d}-of-{total_files:05d}.parquet"
70
+
71
+ def get_verl_schema():
72
+ """Define the VERL schema (same as source, already in VERL format)"""
73
+ return pa.schema([
74
+ ('data_source', pa.string()),
75
+ ('prompt', pa.list_(pa.struct([
76
+ ('role', pa.string()),
77
+ ('content', pa.string())
78
+ ]))),
79
+ ('ability', pa.string()),
80
+ ('reward_model', pa.struct([
81
+ ('style', pa.string()),
82
+ ('ground_truth', pa.string())
83
+ ])),
84
+ ('extra_info', pa.struct([
85
+ ('split', pa.string()),
86
+ ('index', pa.int64())
87
+ ]))
88
+ ])
89
+
90
+ def init_new_output_file(file_idx):
91
+ """Initialize a new output file with ParquetWriter"""
92
+ global current_writer, current_output_path
93
+
94
+ # Close previous writer if exists
95
+ if current_writer is not None:
96
+ current_writer.close()
97
+ print(f" ✅ Closed output file: {current_output_path}")
98
+
99
+ # Create new output file
100
+ current_output_path = get_output_filename(file_idx)
101
+ schema = get_verl_schema()
102
+ current_writer = pq.ParquetWriter(current_output_path, schema)
103
+ print(f" 📝 Created new output file: {current_output_path}")
104
+
105
+ return current_writer
106
+
107
+ def convert_example_to_verl(example, global_index):
108
+ """Convert a single example to VERL format (minimal changes needed)"""
109
+ # The data is already in VERL format, just ensure consistency
110
+ return {
111
+ 'data_source': example['data_source'],
112
+ 'prompt': example['prompt'],
113
+ 'ability': example['ability'],
114
+ 'reward_model': example['reward_model'],
115
+ 'extra_info': {
116
+ 'split': example['extra_info'].get('split', 'train'),
117
+ 'index': global_index
118
+ }
119
+ }
120
+
121
+ def write_batch_to_output(batch_data):
122
+ """Write a batch to the current output file"""
123
+ global current_writer, current_output_rows, current_output_file_idx
124
+
125
+ # Check if we need to start a new output file
126
+ if current_output_rows >= ROWS_PER_OUTPUT_FILE or current_writer is None:
127
+ init_new_output_file(current_output_file_idx)
128
+ current_output_file_idx += 1
129
+ current_output_rows = 0
130
+
131
+ # Convert batch to PyArrow table
132
+ table = pa.table({
133
+ 'data_source': [x['data_source'] for x in batch_data],
134
+ 'prompt': [x['prompt'] for x in batch_data],
135
+ 'ability': [x['ability'] for x in batch_data],
136
+ 'reward_model': [x['reward_model'] for x in batch_data],
137
+ 'extra_info': [x['extra_info'] for x in batch_data]
138
+ }, schema=get_verl_schema())
139
+
140
+ # Write to current file
141
+ current_writer.write_table(table)
142
+ current_output_rows += len(table)
143
+
144
+ def process_dataset(start_index=0):
145
+ """Process the Eurus-2 dataset and filter code problems"""
146
+ print(f"\n📂 Loading dataset from HuggingFace...")
147
+
148
+ # Load dataset in streaming mode
149
+ dataset = load_dataset(REPO_ID, split='train', streaming=True)
150
+
151
+ print(f" ✅ Dataset loaded in streaming mode")
152
+ print(f" 🔍 Filtering for ability='{ABILITY_FILTER}'")
153
+
154
+ batch = []
155
+ global_index = start_index
156
+ rows_processed = 0
157
+ rows_written = 0
158
+
159
+ print(f"\n 🔄 Processing and filtering...")
160
+
161
+ for example in dataset:
162
+ rows_processed += 1
163
+
164
+ # Filter for code problems only
165
+ if example['ability'] == ABILITY_FILTER:
166
+ verl_example = convert_example_to_verl(example, global_index)
167
+ batch.append(verl_example)
168
+ global_index += 1
169
+
170
+ # Write batch when it reaches BATCH_SIZE
171
+ if len(batch) >= BATCH_SIZE:
172
+ write_batch_to_output(batch)
173
+ rows_written += len(batch)
174
+ batch = []
175
+
176
+ if rows_written % 10000 == 0:
177
+ print(f" ... {rows_written:,} code rows written ({rows_processed:,} total processed)")
178
+
179
+ # Write remaining batch
180
+ if batch:
181
+ write_batch_to_output(batch)
182
+ rows_written += len(batch)
183
+
184
+ print(f" ✅ Complete: {rows_written:,} code rows written from {rows_processed:,} total rows")
185
+
186
+ return rows_written, rows_processed
187
+
188
+ def finalize_output_files():
189
+ """Close all writers and rename files with correct total count"""
190
+ global current_writer, current_output_file_idx
191
+
192
+ # Close final writer
193
+ if current_writer is not None:
194
+ current_writer.close()
195
+ print(f"\n ✅ Closed final output file")
196
+
197
+ # Get actual number of output files
198
+ output_files = sorted(Path(OUTPUT_DIR).glob("train-*-of-*.parquet"))
199
+ actual_count = len(output_files)
200
+
201
+ print(f"\n📝 Finalizing {actual_count} output files...")
202
+
203
+ # Rename files with correct total count
204
+ for idx, old_path in enumerate(output_files):
205
+ new_name = f"train-{idx:05d}-of-{actual_count:05d}.parquet"
206
+ new_path = old_path.parent / new_name
207
+ if old_path != new_path:
208
+ old_path.rename(new_path)
209
+ print(f" Renamed: {old_path.name} -> {new_name}")
210
+
211
+ def main():
212
+ global current_output_file_idx, current_output_rows
213
+
214
+ print("=" * 80)
215
+ print("Eurus-2-RL-Data (Code) Conversion to VERL Format")
216
+ print("=" * 80)
217
+
218
+ # Load progress
219
+ progress = load_progress()
220
+
221
+ if progress.get("completed", False):
222
+ print(f"\n📥 Already processed! Skipping conversion.")
223
+ print(f" Total code rows: {progress.get('total_rows', 0):,}")
224
+ return
225
+
226
+ # Ensure output directory exists
227
+ Path(OUTPUT_DIR).mkdir(parents=True, exist_ok=True)
228
+
229
+ # Check disk space
230
+ free_gb = check_disk_space()
231
+ print(f"\n💾 Disk space: {free_gb:.1f} GB free")
232
+
233
+ # Process dataset
234
+ start_time = time.time()
235
+
236
+ print(f"\n🚀 Starting conversion...")
237
+ print(f" Source: {REPO_ID}")
238
+ print(f" Filter: ability='{ABILITY_FILTER}'")
239
+ print(f" Output: Multiple files, {ROWS_PER_OUTPUT_FILE:,} rows each")
240
+ print()
241
+
242
+ start_index = progress.get("total_rows", 0)
243
+ rows_written, rows_processed = process_dataset(start_index)
244
+
245
+ if rows_written > 0:
246
+ # Finalize output files
247
+ finalize_output_files()
248
+
249
+ # Final save
250
+ progress = {
251
+ "rows_processed": rows_processed,
252
+ "total_rows": rows_written,
253
+ "current_output_file_idx": current_output_file_idx,
254
+ "completed": True,
255
+ "status": "complete"
256
+ }
257
+ save_progress(progress)
258
+
259
+ # Final statistics
260
+ elapsed = time.time() - start_time
261
+ print("\n" + "=" * 80)
262
+ print("✅ CONVERSION COMPLETE!")
263
+ print("=" * 80)
264
+ print(f"📊 Statistics:")
265
+ print(f" Total input rows processed: {rows_processed:,}")
266
+ print(f" Total code rows written: {rows_written:,}")
267
+ print(f" Filter rate: {rows_written/rows_processed*100:.1f}%")
268
+ print(f" Time elapsed: {elapsed/60:.1f} minutes ({elapsed/3600:.2f} hours)")
269
+ print(f" Output directory: {OUTPUT_DIR}")
270
+
271
+ # Check output files
272
+ output_files = sorted(Path(OUTPUT_DIR).glob("train-*-of-*.parquet"))
273
+ if output_files:
274
+ total_size = sum(f.stat().st_size for f in output_files) / (1024**3)
275
+ print(f" Output files: {len(output_files)}")
276
+ print(f" Total size: {total_size:.2f} GB")
277
+
278
+ # Validate first file
279
+ print(f"\n🔍 Validation (first file):")
280
+ table = pq.read_table(output_files[0])
281
+ print(f" Rows in first file: {len(table):,}")
282
+ print(f" Columns: {table.column_names}")
283
+
284
+ # Check sample
285
+ if len(table) > 0:
286
+ print(f" Sample data_source: {table['data_source'][0].as_py()}")
287
+ print(f" Sample ability: {table['ability'][0].as_py()}")
288
+
289
+ print("\n" + "=" * 80)
290
+
291
+ if __name__ == "__main__":
292
+ main()