File size: 3,239 Bytes
d681572
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
#!/usr/bin/env python3
"""
Generate Schrödinger equation dataset and save to parquet files in chunks.

Creates samples of 1D time-dependent Schrödinger equation solutions
with harmonic oscillator potential and random Gaussian wave packet
initial conditions.
"""

import os
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
from dataset import SchrodingerDataset


def generate_dataset_split(
    split_name="train", num_samples=1000, chunk_size=100, output_dir="data"
):
    """
    Generate a dataset split and save as chunked parquet files.
    
    INSTRUCTIONS FOR CLAUDE:
    - This function should work as-is for any dataset following the template
    - Only modify the dataset instantiation below if you need custom parameters
    """

    os.makedirs(output_dir, exist_ok=True)

    # Create Schrödinger dataset with appropriate parameters
    dataset = SchrodingerDataset(
        Lx=20.0,                # Domain length
        Nx=256,                 # Grid points (reduced for faster generation)
        hbar=1.0,               # Physical parameters
        mass=1.0,
        omega=1.0,
        stop_sim_time=2.0,      # Shorter simulation time for dataset generation
        timestep=1e-3,
    )
    
    num_chunks = (num_samples + chunk_size - 1) // chunk_size  # Ceiling division

    print(f"Generating {num_samples} {split_name} samples in {num_chunks} chunks...")

    dataset_iter = iter(dataset)
    chunk_data = None

    for i in range(num_samples):
        sample = next(dataset_iter)

        if chunk_data is None:
            # Initialize chunk data on first sample
            chunk_data = {key: [] for key in sample.keys()}

        # Add sample to current chunk
        for key, value in sample.items():
            chunk_data[key].append(value)

        # Save chunk when full or at end
        if (i + 1) % chunk_size == 0 or i == num_samples - 1:
            chunk_idx = i // chunk_size

            # Convert data to PyArrow-compatible format
            table_data = {}
            for key, values in chunk_data.items():
                # Handle both arrays and scalars
                converted_values = []
                for value in values:
                    if hasattr(value, 'tolist'):
                        converted_values.append(value.tolist())
                    else:
                        converted_values.append(value)
                table_data[key] = converted_values

            # Convert to PyArrow table
            table = pa.table(table_data)

            # Save chunk
            filename = f"{split_name}-{chunk_idx:05d}-of-{num_chunks:05d}.parquet"
            filepath = os.path.join(output_dir, filename)
            pq.write_table(table, filepath)

            print(f"Saved chunk {chunk_idx + 1}/{num_chunks}: {filepath}")

            # Reset for next chunk
            chunk_data = {key: [] for key in sample.keys()}

    print(f"Generated {num_samples} {split_name} samples")
    return num_samples


if __name__ == "__main__":
    np.random.seed(42)

    # Generate train split
    generate_dataset_split("train", num_samples=1000, chunk_size=100)

    # Generate test split
    generate_dataset_split("test", num_samples=200, chunk_size=100)