scripts / hf_job_add_dimensions.py
vlordier's picture
Upload hf_job_add_dimensions.py with huggingface_hub
6b1a775 verified
#!/usr/bin/env python3
"""
Add height/width columns to nastol-images-full dataset
"""
import argparse
import os
import logging
import sys
from pathlib import Path
logging.basicConfig(
level=logging.INFO,
format='[%(asctime)s] %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
stream=sys.stdout,
force=True
)
logger = logging.getLogger(__name__)
from datasets import load_dataset
from huggingface_hub import HfApi
import pyarrow as pa
import pyarrow.parquet as pq
def main():
ap = argparse.ArgumentParser()
ap.add_argument('--input-dataset', type=str, default='vlordier/nastol-images-full')
ap.add_argument('--output-dataset', type=str, default='vlordier/nastol-images-full')
ap.add_argument('--split', type=str, default='train')
ap.add_argument('--shard-index', type=int, default=0)
ap.add_argument('--num-shards', type=int, default=1)
ap.add_argument('--batch-size', type=int, default=1000)
args = ap.parse_args()
logger.info("="*60)
logger.info("Add Height/Width Columns to Dataset")
logger.info("="*60)
logger.info(f"Arguments: {vars(args)}")
token = os.environ.get('HF_TOKEN')
api = HfApi(token=token)
# Load dataset
logger.info(f"Loading {args.input_dataset}...")
ds = load_dataset(args.input_dataset, split=args.split, streaming=True)
if args.num_shards > 1:
ds = ds.shard(num_shards=args.num_shards, index=args.shard_index)
logger.info(f"Processing shard {args.shard_index+1}/{args.num_shards}")
# Process in batches
buffer = []
batch_count = 0
upload_count = 0
def flush_buffer():
nonlocal buffer, upload_count
if not buffer:
return
# Build columns
image_paths = [b['image_path'] for b in buffer]
images_bytes = [b['image'] for b in buffer]
heights = [b['height'] for b in buffer]
widths = [b['width'] for b in buffer]
table = pa.table({
'image_path': image_paths,
'image': images_bytes,
'height': heights,
'width': widths
})
# Write parquet
local_dir = Path('dimension_batches')
local_dir.mkdir(parents=True, exist_ok=True)
file_name = f"shard-{args.shard_index:03d}-batch-{upload_count:04d}.parquet"
local_path = local_dir / file_name
pq.write_table(table, local_path)
# Upload
path_in_repo = f"data/{file_name}"
logger.info(f"Uploading batch {upload_count} with {len(buffer)} images -> {path_in_repo}")
try:
api.upload_file(
path_or_fileobj=str(local_path),
path_in_repo=path_in_repo,
repo_id=args.output_dataset,
repo_type='dataset',
token=token
)
logger.info("✓ Uploaded")
except Exception as e:
logger.error(f"Upload failed: {e}")
buffer.clear()
upload_count += 1
logger.info("Processing images...")
for idx, sample in enumerate(ds):
image = sample['image']
image_path = sample.get('image_path', f'img_{idx:06d}')
# Get dimensions
width, height = image.size
# Store original image bytes
import io
buf = io.BytesIO()
image.save(buf, format='PNG')
image_bytes = buf.getvalue()
buffer.append({
'image_path': image_path,
'image': image_bytes,
'height': height,
'width': width
})
if len(buffer) >= args.batch_size:
flush_buffer()
batch_count += 1
logger.info(f"Processed {batch_count * args.batch_size} images")
# Final flush
flush_buffer()
logger.info(f"✓ Completed shard {args.shard_index}: {batch_count * args.batch_size + len(buffer)} images")
if __name__ == '__main__':
main()