| |
| """ |
| Convert [NPY] folder to WebDataset format for reading. |
| |
| This script is optimized to use all available CPU cores for parallel processing |
| and includes a progress bar for monitoring. |
| """ |
|
|
| import tarfile |
| import numpy as np |
| from pathlib import Path |
| import io |
| import concurrent.futures |
| from tqdm import tqdm |
|
|
| def _create_shard(args): |
| """ |
| Worker function to create a single WebDataset shard from a list of .npy files. |
| This function is designed to be called by a process pool. |
| """ |
| shard_id, npy_files_chunk, output_path = args |
| shard_name = f"shard-{shard_id:06d}.tar" |
| shard_path = output_path / shard_name |
|
|
| with tarfile.open(shard_path, 'w') as current_tar: |
| for npy_file in npy_files_chunk: |
| try: |
| |
| array = np.load(npy_file) |
| |
| |
| sample_key = npy_file.stem |
| |
| |
| npy_buffer = io.BytesIO() |
| np.save(npy_buffer, array) |
| npy_buffer.seek(0) |
| |
| tarinfo = tarfile.TarInfo(name=f"{sample_key}.npy") |
| tarinfo.size = npy_buffer.getbuffer().nbytes |
| current_tar.addfile(tarinfo, npy_buffer) |
|
|
| except Exception as e: |
| print(f"Error processing {npy_file} for shard {shard_id}: {e}") |
| continue |
| return shard_path |
|
|
| def create_webdataset_from_npy( |
| input_dir: str = "[NPY]", |
| output_dir: str = "webdataset_output", |
| shard_size: int = 1000 |
| ): |
| """ |
| Convert .npy files from an input folder to WebDataset format in parallel. |
| |
| Args: |
| input_dir (str): Directory containing .npy files. |
| output_dir (str): Directory to save WebDataset tar files. |
| shard_size (int): Number of samples per shard. |
| """ |
| input_path = Path(input_dir) |
| output_path = Path(output_dir) |
| output_path.mkdir(parents=True, exist_ok=True) |
| |
| |
| npy_files = sorted(list(input_path.glob("*.npy")), key=lambda x: x.name) |
| |
| if not npy_files: |
| raise ValueError(f"No .npy files found in {input_dir}") |
| |
| print(f"Found {len(npy_files)} .npy files to process.") |
|
|
| |
| file_chunks = [ |
| npy_files[i:i + shard_size] |
| for i in range(0, len(npy_files), shard_size) |
| ] |
| |
| num_shards = len(file_chunks) |
| print(f"Data will be split into {num_shards} shards.") |
|
|
| |
| tasks = [ |
| (shard_id, chunk, output_path) |
| for shard_id, chunk in enumerate(file_chunks) |
| ] |
|
|
| with concurrent.futures.ProcessPoolExecutor() as executor: |
| |
| results = list(tqdm( |
| executor.map(_create_shard, tasks), |
| total=num_shards, |
| desc="Creating Shards" |
| )) |
|
|
| print(f"\nConversion complete! Created {len(results)} shards in '{output_dir}'.") |
| return len(results) |
|
|
|
|
| if __name__ == "__main__": |
| num_created_shards = create_webdataset_from_npy( |
| input_dir="raw", |
| output_dir="webdataset_output", |
| shard_size=1000 |
| ) |
| |
| print(f"\nSuccessfully created {num_created_shards} shards.") |