schrodinger-dedalus / generate_data.py
ajthor's picture
Upload folder using huggingface_hub
d681572 verified
#!/usr/bin/env python3
"""
Generate Schrödinger equation dataset and save to parquet files in chunks.
Creates samples of 1D time-dependent Schrödinger equation solutions
with harmonic oscillator potential and random Gaussian wave packet
initial conditions.
"""
import os
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
from dataset import SchrodingerDataset
def generate_dataset_split(
split_name="train", num_samples=1000, chunk_size=100, output_dir="data"
):
"""
Generate a dataset split and save as chunked parquet files.
INSTRUCTIONS FOR CLAUDE:
- This function should work as-is for any dataset following the template
- Only modify the dataset instantiation below if you need custom parameters
"""
os.makedirs(output_dir, exist_ok=True)
# Create Schrödinger dataset with appropriate parameters
dataset = SchrodingerDataset(
Lx=20.0, # Domain length
Nx=256, # Grid points (reduced for faster generation)
hbar=1.0, # Physical parameters
mass=1.0,
omega=1.0,
stop_sim_time=2.0, # Shorter simulation time for dataset generation
timestep=1e-3,
)
num_chunks = (num_samples + chunk_size - 1) // chunk_size # Ceiling division
print(f"Generating {num_samples} {split_name} samples in {num_chunks} chunks...")
dataset_iter = iter(dataset)
chunk_data = None
for i in range(num_samples):
sample = next(dataset_iter)
if chunk_data is None:
# Initialize chunk data on first sample
chunk_data = {key: [] for key in sample.keys()}
# Add sample to current chunk
for key, value in sample.items():
chunk_data[key].append(value)
# Save chunk when full or at end
if (i + 1) % chunk_size == 0 or i == num_samples - 1:
chunk_idx = i // chunk_size
# Convert data to PyArrow-compatible format
table_data = {}
for key, values in chunk_data.items():
# Handle both arrays and scalars
converted_values = []
for value in values:
if hasattr(value, 'tolist'):
converted_values.append(value.tolist())
else:
converted_values.append(value)
table_data[key] = converted_values
# Convert to PyArrow table
table = pa.table(table_data)
# Save chunk
filename = f"{split_name}-{chunk_idx:05d}-of-{num_chunks:05d}.parquet"
filepath = os.path.join(output_dir, filename)
pq.write_table(table, filepath)
print(f"Saved chunk {chunk_idx + 1}/{num_chunks}: {filepath}")
# Reset for next chunk
chunk_data = {key: [] for key in sample.keys()}
print(f"Generated {num_samples} {split_name} samples")
return num_samples
if __name__ == "__main__":
np.random.seed(42)
# Generate train split
generate_dataset_split("train", num_samples=1000, chunk_size=100)
# Generate test split
generate_dataset_split("test", num_samples=200, chunk_size=100)