GokseninYuksel's picture
Add files using upload-large-folder tool
50bf69f verified
#!/usr/bin/env python3
"""
Convert [NPY] folder to WebDataset format for reading.
This script is optimized to use all available CPU cores for parallel processing
and includes a progress bar for monitoring.
"""
import tarfile
import numpy as np
from pathlib import Path
import io
import concurrent.futures
from tqdm import tqdm
def _create_shard(args):
"""
Worker function to create a single WebDataset shard from a list of .npy files.
This function is designed to be called by a process pool.
"""
shard_id, npy_files_chunk, output_path = args
shard_name = f"shard-{shard_id:06d}.tar"
shard_path = output_path / shard_name
with tarfile.open(shard_path, 'w') as current_tar:
for npy_file in npy_files_chunk:
try:
# Load numpy array
array = np.load(npy_file)
# Create sample key (filename without extension)
sample_key = npy_file.stem
# Add numpy array as .npy file in tar
npy_buffer = io.BytesIO()
np.save(npy_buffer, array)
npy_buffer.seek(0)
tarinfo = tarfile.TarInfo(name=f"{sample_key}.npy")
tarinfo.size = npy_buffer.getbuffer().nbytes
current_tar.addfile(tarinfo, npy_buffer)
except Exception as e:
print(f"Error processing {npy_file} for shard {shard_id}: {e}")
continue
return shard_path
def create_webdataset_from_npy(
input_dir: str = "[NPY]",
output_dir: str = "webdataset_output",
shard_size: int = 1000
):
"""
Convert .npy files from an input folder to WebDataset format in parallel.
Args:
input_dir (str): Directory containing .npy files.
output_dir (str): Directory to save WebDataset tar files.
shard_size (int): Number of samples per shard.
"""
input_path = Path(input_dir)
output_path = Path(output_dir)
output_path.mkdir(parents=True, exist_ok=True)
# Get and sort all .npy files for consistent ordering
npy_files = sorted(list(input_path.glob("*.npy")), key=lambda x: x.name)
if not npy_files:
raise ValueError(f"No .npy files found in {input_dir}")
print(f"Found {len(npy_files)} .npy files to process.")
# Split files into chunks for each shard
file_chunks = [
npy_files[i:i + shard_size]
for i in range(0, len(npy_files), shard_size)
]
num_shards = len(file_chunks)
print(f"Data will be split into {num_shards} shards.")
# Prepare arguments for the worker function
tasks = [
(shard_id, chunk, output_path)
for shard_id, chunk in enumerate(file_chunks)
]
with concurrent.futures.ProcessPoolExecutor() as executor:
# Use tqdm to create a progress bar
results = list(tqdm(
executor.map(_create_shard, tasks),
total=num_shards,
desc="Creating Shards"
))
print(f"\nConversion complete! Created {len(results)} shards in '{output_dir}'.")
return len(results)
if __name__ == "__main__":
num_created_shards = create_webdataset_from_npy(
input_dir="raw",
output_dir="webdataset_output",
shard_size=1000 # Each .tar file will contain 1000 samples
)
print(f"\nSuccessfully created {num_created_shards} shards.")