Upload hf_job_sam3d.py with huggingface_hub
Browse files- hf_job_sam3d.py +101 -43
hf_job_sam3d.py
CHANGED
|
@@ -22,7 +22,8 @@ logger = logging.getLogger(__name__)
|
|
| 22 |
|
| 23 |
import numpy as np
|
| 24 |
import torch
|
| 25 |
-
from datasets import load_dataset
|
|
|
|
| 26 |
from PIL import Image
|
| 27 |
import cv2
|
| 28 |
import json
|
|
@@ -106,6 +107,8 @@ def main():
|
|
| 106 |
ap.add_argument('--batch-size', type=int, default=4)
|
| 107 |
ap.add_argument('--shard-index', type=int, default=0)
|
| 108 |
ap.add_argument('--num-shards', type=int, default=1)
|
|
|
|
|
|
|
| 109 |
args = ap.parse_args()
|
| 110 |
|
| 111 |
logger.info(f"Arguments: {vars(args)}")
|
|
@@ -135,48 +138,103 @@ def main():
|
|
| 135 |
ds = ds.shard(num_shards=args.num_shards, index=args.shard_index)
|
| 136 |
logger.info(f"Using shard {args.shard_index+1}/{args.num_shards}")
|
| 137 |
|
| 138 |
-
#
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 180 |
|
| 181 |
|
| 182 |
if __name__ == '__main__':
|
|
|
|
| 22 |
|
| 23 |
import numpy as np
|
| 24 |
import torch
|
| 25 |
+
from datasets import load_dataset
|
| 26 |
+
from huggingface_hub import HfApi
|
| 27 |
from PIL import Image
|
| 28 |
import cv2
|
| 29 |
import json
|
|
|
|
| 107 |
ap.add_argument('--batch-size', type=int, default=4)
|
| 108 |
ap.add_argument('--shard-index', type=int, default=0)
|
| 109 |
ap.add_argument('--num-shards', type=int, default=1)
|
| 110 |
+
ap.add_argument('--max-images', type=int, default=8000, help='Limit number of images processed per shard')
|
| 111 |
+
ap.add_argument('--upload-interval', type=int, default=500, help='Upload partial results every N images')
|
| 112 |
args = ap.parse_args()
|
| 113 |
|
| 114 |
logger.info(f"Arguments: {vars(args)}")
|
|
|
|
| 138 |
ds = ds.shard(num_shards=args.num_shards, index=args.shard_index)
|
| 139 |
logger.info(f"Using shard {args.shard_index+1}/{args.num_shards}")
|
| 140 |
|
| 141 |
+
# Prepare incremental upload
|
| 142 |
+
api = HfApi(token=os.environ.get('HF_TOKEN'))
|
| 143 |
+
token = os.environ.get('HF_TOKEN')
|
| 144 |
+
repo_id = args.output_dataset
|
| 145 |
+
|
| 146 |
+
# Load existing image_ids (skip logic)
|
| 147 |
+
existing_ids = set()
|
| 148 |
+
try:
|
| 149 |
+
from datasets import load_dataset as _ld
|
| 150 |
+
existing = _ld(repo_id, split=args.split)
|
| 151 |
+
for r in existing:
|
| 152 |
+
existing_ids.add(r['image_id'])
|
| 153 |
+
logger.info(f"Loaded {len(existing_ids)} existing image_ids to skip")
|
| 154 |
+
except Exception:
|
| 155 |
+
logger.info("No existing output dataset found; starting fresh")
|
| 156 |
+
|
| 157 |
+
# Processing loop with manual batching
|
| 158 |
+
logger.info(f"Processing with batch_size={args.batch_size}, max_images={args.max_images}, upload_interval={args.upload_interval}")
|
| 159 |
+
buffer = [] # list of dicts {image_id,num_humans,sam3d_data}
|
| 160 |
+
total_processed = 0
|
| 161 |
+
upload_index = 0
|
| 162 |
+
batch_images = []
|
| 163 |
+
batch_paths = []
|
| 164 |
+
|
| 165 |
+
def flush_buffer():
|
| 166 |
+
nonlocal buffer, upload_index
|
| 167 |
+
if not buffer:
|
| 168 |
+
return
|
| 169 |
+
import pyarrow as pa
|
| 170 |
+
import pyarrow.parquet as pq
|
| 171 |
+
# Build columns
|
| 172 |
+
image_ids = [b['image_id'] for b in buffer]
|
| 173 |
+
num_humans = [b['num_humans'] for b in buffer]
|
| 174 |
+
sam3d_data = [b['sam3d_data'] for b in buffer]
|
| 175 |
+
table = pa.table({'image_id': image_ids, 'num_humans': num_humans, 'sam3d_data': sam3d_data})
|
| 176 |
+
file_name = f"batch-sh{args.shard_index}-u{upload_index:04d}.parquet"
|
| 177 |
+
local_dir = Path('sam3d_batches')
|
| 178 |
+
local_dir.mkdir(parents=True, exist_ok=True)
|
| 179 |
+
local_path = local_dir / file_name
|
| 180 |
+
pq.write_table(table, local_path)
|
| 181 |
+
path_in_repo = f"data/{file_name}"
|
| 182 |
+
logger.info(f"Uploading incremental batch {upload_index} with {len(buffer)} images -> {path_in_repo}")
|
| 183 |
+
try:
|
| 184 |
+
api.upload_file(path_or_fileobj=str(local_path), path_in_repo=path_in_repo, repo_id=repo_id, repo_type='dataset', token=token)
|
| 185 |
+
logger.info("✓ Incremental upload committed")
|
| 186 |
+
except Exception as e:
|
| 187 |
+
logger.error(f"Incremental upload failed: {e}")
|
| 188 |
+
buffer.clear()
|
| 189 |
+
upload_index += 1
|
| 190 |
+
|
| 191 |
+
# Iterate streaming dataset manually
|
| 192 |
+
current_batch_imgs = []
|
| 193 |
+
current_batch_paths = []
|
| 194 |
+
for idx, sample in enumerate(ds):
|
| 195 |
+
if idx >= args.max_images:
|
| 196 |
+
break
|
| 197 |
+
image = sample['image']
|
| 198 |
+
image_path = sample.get('image_path', None)
|
| 199 |
+
image_id = Path(image_path).stem if image_path else f"img_{idx:06d}"
|
| 200 |
+
if image_id in existing_ids:
|
| 201 |
+
continue
|
| 202 |
+
current_batch_imgs.append(image)
|
| 203 |
+
current_batch_paths.append(image_path)
|
| 204 |
+
# When batch full, process
|
| 205 |
+
if len(current_batch_imgs) == args.batch_size:
|
| 206 |
+
batch = {'image': current_batch_imgs, 'image_path': current_batch_paths}
|
| 207 |
+
batch_result = process_batch(batch)
|
| 208 |
+
for i in range(len(batch_result['image_id'])):
|
| 209 |
+
buffer.append({
|
| 210 |
+
'image_id': batch_result['image_id'][i],
|
| 211 |
+
'num_humans': batch_result['num_humans'][i],
|
| 212 |
+
'sam3d_data': batch_result['sam3d_data'][i]
|
| 213 |
+
})
|
| 214 |
+
existing_ids.add(batch_result['image_id'][i])
|
| 215 |
+
total_processed += len(batch_result['image_id'])
|
| 216 |
+
current_batch_imgs = []
|
| 217 |
+
current_batch_paths = []
|
| 218 |
+
if total_processed % 50 == 0:
|
| 219 |
+
logger.info(f"Processed {total_processed} images")
|
| 220 |
+
if total_processed % args.upload_interval == 0:
|
| 221 |
+
flush_buffer()
|
| 222 |
+
# Process any leftover
|
| 223 |
+
if current_batch_imgs:
|
| 224 |
+
batch = {'image': current_batch_imgs, 'image_path': current_batch_paths}
|
| 225 |
+
batch_result = process_batch(batch)
|
| 226 |
+
for i in range(len(batch_result['image_id'])):
|
| 227 |
+
buffer.append({
|
| 228 |
+
'image_id': batch_result['image_id'][i],
|
| 229 |
+
'num_humans': batch_result['num_humans'][i],
|
| 230 |
+
'sam3d_data': batch_result['sam3d_data'][i]
|
| 231 |
+
})
|
| 232 |
+
existing_ids.add(batch_result['image_id'][i])
|
| 233 |
+
total_processed += len(batch_result['image_id'])
|
| 234 |
+
# Final flush
|
| 235 |
+
flush_buffer()
|
| 236 |
+
logger.info(f"✓ Finished shard processing with total images processed: {total_processed}")
|
| 237 |
+
logger.info("All incremental uploads done.")
|
| 238 |
|
| 239 |
|
| 240 |
if __name__ == '__main__':
|