Commit
·
6f5c22f
1
Parent(s):
4f91e22
Enhance documentation and add batch size argument for improved GPU memory management
Browse files- atlas-export.py +40 -23
atlas-export.py
CHANGED
|
@@ -26,6 +26,20 @@ Example usage:
|
|
| 26 |
--image-column image \
|
| 27 |
--model openai/clip-vit-base-patch32 \
|
| 28 |
--sample 10000
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
|
| 30 |
# Run on HF Jobs with GPU (requires HF token for Space deployment)
|
| 31 |
# Get your token: python -c "from huggingface_hub import get_token; print(get_token())"
|
|
@@ -93,7 +107,7 @@ def sample_dataset_to_parquet(
|
|
| 93 |
temp_dir = Path(tempfile.mkdtemp(prefix="atlas_data_"))
|
| 94 |
parquet_path = temp_dir / "data.parquet"
|
| 95 |
|
| 96 |
-
logger.info(
|
| 97 |
sampled_ds.to_parquet(str(parquet_path))
|
| 98 |
|
| 99 |
file_size = parquet_path.stat().st_size / (1024 * 1024) # MB
|
|
@@ -130,6 +144,9 @@ def build_atlas_command(args) -> tuple[list, str, Optional[Path]]:
|
|
| 130 |
if args.model:
|
| 131 |
cmd.extend(["--model", args.model])
|
| 132 |
|
|
|
|
|
|
|
|
|
|
| 133 |
# Always specify text column to avoid interactive prompt
|
| 134 |
text_col = args.text_column or "text" # Default to "text" if not specified
|
| 135 |
cmd.extend(["--text", text_col])
|
|
@@ -346,6 +363,12 @@ def main():
|
|
| 346 |
action="store_true",
|
| 347 |
help="Trust remote code in dataset/model",
|
| 348 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 349 |
|
| 350 |
# Pre-computed embeddings
|
| 351 |
parser.add_argument(
|
|
@@ -408,29 +431,23 @@ def main():
|
|
| 408 |
if hf_token:
|
| 409 |
login(token=hf_token)
|
| 410 |
logger.info("✅ Authenticated with Hugging Face")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 411 |
else:
|
| 412 |
-
#
|
| 413 |
-
|
| 414 |
-
|
| 415 |
-
|
| 416 |
-
|
| 417 |
-
|
| 418 |
-
|
| 419 |
-
|
| 420 |
-
|
| 421 |
-
sys.exit(0)
|
| 422 |
-
else:
|
| 423 |
-
# In non-interactive environments, fail immediately if no token
|
| 424 |
-
logger.error(
|
| 425 |
-
"No HF token found. Cannot deploy to Space in non-interactive environment."
|
| 426 |
-
)
|
| 427 |
-
logger.error(
|
| 428 |
-
"Please set HF_TOKEN environment variable or use --hf-token argument."
|
| 429 |
-
)
|
| 430 |
-
logger.error(
|
| 431 |
-
"Checked: HF_TOKEN, HUGGING_FACE_HUB_TOKEN, and HF CLI login"
|
| 432 |
-
)
|
| 433 |
-
sys.exit(1)
|
| 434 |
|
| 435 |
# Set up output directory
|
| 436 |
if args.output_dir:
|
|
|
|
| 26 |
--image-column image \
|
| 27 |
--model openai/clip-vit-base-patch32 \
|
| 28 |
--sample 10000
|
| 29 |
+
|
| 30 |
+
# With custom batch size for GPU memory management
|
| 31 |
+
uv run atlas-export.py \
|
| 32 |
+
large-text-dataset \
|
| 33 |
+
--space-name large-text-viz \
|
| 34 |
+
--model sentence-transformers/all-mpnet-base-v2 \
|
| 35 |
+
--batch-size 64 # Increase for faster processing on powerful GPUs
|
| 36 |
+
|
| 37 |
+
# Small batch size for limited GPU memory
|
| 38 |
+
uv run atlas-export.py \
|
| 39 |
+
image-dataset \
|
| 40 |
+
--space-name image-viz \
|
| 41 |
+
--image-column image \
|
| 42 |
+
--batch-size 8 # Reduce to avoid OOM errors
|
| 43 |
|
| 44 |
# Run on HF Jobs with GPU (requires HF token for Space deployment)
|
| 45 |
# Get your token: python -c "from huggingface_hub import get_token; print(get_token())"
|
|
|
|
| 107 |
temp_dir = Path(tempfile.mkdtemp(prefix="atlas_data_"))
|
| 108 |
parquet_path = temp_dir / "data.parquet"
|
| 109 |
|
| 110 |
+
logger.info("Saving sampled data to temporary file...")
|
| 111 |
sampled_ds.to_parquet(str(parquet_path))
|
| 112 |
|
| 113 |
file_size = parquet_path.stat().st_size / (1024 * 1024) # MB
|
|
|
|
| 144 |
if args.model:
|
| 145 |
cmd.extend(["--model", args.model])
|
| 146 |
|
| 147 |
+
if args.batch_size:
|
| 148 |
+
cmd.extend(["--batch-size", str(args.batch_size)])
|
| 149 |
+
|
| 150 |
# Always specify text column to avoid interactive prompt
|
| 151 |
text_col = args.text_column or "text" # Default to "text" if not specified
|
| 152 |
cmd.extend(["--text", text_col])
|
|
|
|
| 363 |
action="store_true",
|
| 364 |
help="Trust remote code in dataset/model",
|
| 365 |
)
|
| 366 |
+
parser.add_argument(
|
| 367 |
+
"--batch-size",
|
| 368 |
+
type=int,
|
| 369 |
+
help="Batch size for processing embeddings (default: 32 for text, 16 for images). "
|
| 370 |
+
"Larger values use more memory but may be faster on GPUs",
|
| 371 |
+
)
|
| 372 |
|
| 373 |
# Pre-computed embeddings
|
| 374 |
parser.add_argument(
|
|
|
|
| 431 |
if hf_token:
|
| 432 |
login(token=hf_token)
|
| 433 |
logger.info("✅ Authenticated with Hugging Face")
|
| 434 |
+
elif is_interactive := sys.stdin.isatty():
|
| 435 |
+
logger.warning(
|
| 436 |
+
"No HF token provided. You may not be able to push to the Hub."
|
| 437 |
+
)
|
| 438 |
+
response = input("Continue anyway? (y/n): ")
|
| 439 |
+
if response.lower() != "y":
|
| 440 |
+
sys.exit(0)
|
| 441 |
else:
|
| 442 |
+
# In non-interactive environments, fail immediately if no token
|
| 443 |
+
logger.error(
|
| 444 |
+
"No HF token found. Cannot deploy to Space in non-interactive environment."
|
| 445 |
+
)
|
| 446 |
+
logger.error(
|
| 447 |
+
"Please set HF_TOKEN environment variable or use --hf-token argument."
|
| 448 |
+
)
|
| 449 |
+
logger.error("Checked: HF_TOKEN, HUGGING_FACE_HUB_TOKEN, and HF CLI login")
|
| 450 |
+
sys.exit(1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 451 |
|
| 452 |
# Set up output directory
|
| 453 |
if args.output_dir:
|