| # | |
| # Prepare taco_dataset for HuggingFace upload. | |
| # Splits files > 50GB into 49GB chunks and generates a manifest. | |
| # | |
| # Usage: ./prepare_upload.sh [REPO_ID] | |
| # REPO_ID defaults to mzhobro/taco_dataset | |
| # | |
| # After running, upload with: | |
| # huggingface-cli upload <REPO_ID> . --repo-type dataset | |
| # | |
| set -euo pipefail | |
| cd "$(dirname "$0")" | |
| THRESHOLD=$((50 * 1024 * 1024 * 1024)) # 50 GB | |
| CHUNK="49G" | |
| BACKUP_DIR="../taco_originals_backup" | |
| echo "=== Finding files > 50GB ===" | |
| LARGE_FILES=() | |
| while IFS= read -r -d '' f; do | |
| size=$(stat -c%s "$f") | |
| if [ "$size" -gt "$THRESHOLD" ]; then | |
| LARGE_FILES+=("$f") | |
| fi | |
| done < <(find . -type f -name '*.zip' -print0) | |
| if [ ${#LARGE_FILES[@]} -eq 0 ]; then | |
| echo "No files exceed 50GB. Nothing to split." | |
| else | |
| echo "Found ${#LARGE_FILES[@]} files to split:" | |
| for f in "${LARGE_FILES[@]}"; do | |
| echo " $f ($(du -h "$f" | cut -f1))" | |
| done | |
| echo "" | |
| mkdir -p "$BACKUP_DIR" | |
| for f in "${LARGE_FILES[@]}"; do | |
| echo "[SPLIT] $f" | |
| split -b "$CHUNK" -d --additional-suffix=.part "$f" "${f}." | |
| echo " Parts created:" | |
| ls -lh "${f}".*.part | |
| # Move original to backup (instant on same filesystem) | |
| backup_path="$BACKUP_DIR/$f" | |
| mkdir -p "$(dirname "$backup_path")" | |
| mv "$f" "$backup_path" | |
| echo " Original moved to $backup_path" | |
| echo "" | |
| done | |
| fi | |
| echo "=== Generating manifest ===" | |
| find . -type f \( -name '*.zip' -o -name '*.part' -o -name '*.md' \) | sort | while read -r f; do | |
| size=$(stat -c%s "$f") | |
| echo "$size $f" | |
| done > MANIFEST.txt | |
| echo "Wrote MANIFEST.txt ($(wc -l < MANIFEST.txt) files)" | |
| echo "" | |
| echo "=== Done ===" | |
| echo "To upload:" | |
| echo " huggingface-cli upload ${1:-mzhobro/taco_dataset} . --repo-type dataset" | |