| | |
| | """ |
| | Generate KdV-Burgers dataset and save to parquet files in chunks. |
| | """ |
| |
|
| | import os |
| | import numpy as np |
| | import pyarrow as pa |
| | import pyarrow.parquet as pq |
| | from burgers_dataset import KdvBurgersDataset |
| |
|
| | def generate_dataset_split( |
| | split_name="train", |
| | num_samples=1000, |
| | chunk_size=100, |
| | output_dir="data" |
| | ): |
| | """Generate a dataset split and save as chunked parquet files.""" |
| | |
| | os.makedirs(output_dir, exist_ok=True) |
| | |
| | dataset = KdvBurgersDataset() |
| | num_chunks = (num_samples + chunk_size - 1) // chunk_size |
| | |
| | print(f"Generating {num_samples} {split_name} samples in {num_chunks} chunks...") |
| | |
| | dataset_iter = iter(dataset) |
| | chunk_data = None |
| | |
| | for i in range(num_samples): |
| | sample = next(dataset_iter) |
| | |
| | if chunk_data is None: |
| | |
| | chunk_data = {key: [] for key in sample.keys()} |
| | |
| | |
| | for key, value in sample.items(): |
| | chunk_data[key].append(value) |
| | |
| | |
| | if (i + 1) % chunk_size == 0 or i == num_samples - 1: |
| | chunk_idx = i // chunk_size |
| | |
| | |
| | table_data = {} |
| | for key, values in chunk_data.items(): |
| | table_data[key] = [arr.tolist() for arr in values] |
| | |
| | |
| | table = pa.table(table_data) |
| | |
| | |
| | filename = f"{split_name}-{chunk_idx:05d}-of-{num_chunks:05d}.parquet" |
| | filepath = os.path.join(output_dir, filename) |
| | pq.write_table(table, filepath) |
| | |
| | print(f"Saved chunk {chunk_idx + 1}/{num_chunks}: {filepath}") |
| | |
| | |
| | chunk_data = {key: [] for key in sample.keys()} |
| | |
| | print(f"Generated {num_samples} {split_name} samples") |
| | return num_samples |
| |
|
| | if __name__ == "__main__": |
| | np.random.seed(42) |
| | |
| | |
| | generate_dataset_split("train", num_samples=1000, chunk_size=100) |
| | |
| | |
| | generate_dataset_split("test", num_samples=200, chunk_size=100) |