#!/bin/bash # DTO ARCHIVE PROTOCOL: Upload-then-Delete Workflow # Automated process to upload assets to HF/Xet, verify, then safely delete local copies # Prometheus - Head of Data Migration & Transfer Operations set -euo pipefail # Load configuration SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" DTO_ROOT="$(dirname "$SCRIPT_DIR")" ENV_FILE="$DTO_ROOT/.env" if [ -f "$ENV_FILE" ]; then # Source the .env file to load all variables set -a source "$ENV_FILE" set +a echo "✅ Loaded .env file from $ENV_FILE" else echo "❌ .env file not found at $ENV_FILE" exit 1 fi # Configuration PROTOCOL_LOG="/tmp/archive_protocol_$(date +%Y%m%d_%H%M%S).log" BACKUP_DIR="/tmp/archive_backup_$(date +%Y%m%d_%H%M%S)" UPLOAD_MANIFEST="$BACKUP_DIR/upload_manifest.txt" DELETE_MANIFEST="$BACKUP_DIR/delete_manifest.txt" # Target repositories REPO_MODELS="$HF_REPO_MODELS" REPO_DATASETS="$HF_REPO_DATASETS" REPO_ARTIFACTS="$HF_REPO_ARTIFACTS" # Colors for output RED='\033[0;31m' GREEN='\033[0;32m' YELLOW='\033[1;33m' BLUE='\033[0;34m' NC='\033[0m' # No Color # Logging functions log() { echo -e "${GREEN}[$(date '+%Y-%m-%d %H:%M:%S')]${NC} $1" | tee -a "$PROTOCOL_LOG" } error() { echo -e "${RED}[ERROR]${NC} $1" | tee -a "$PROTOCOL_LOG" >&2 } warning() { echo -e "${YELLOW}[WARNING]${NC} $1" | tee -a "$PROTOCOL_LOG" } info() { echo -e "${BLUE}[INFO]${NC} $1" | tee -a "$PROTOCOL_LOG" } # Phase 1: Discovery & Inventory discovery_phase() { log "=== PHASE 1: DISCOVERY & INVENTORY ===" mkdir -p "$BACKUP_DIR" # Discover space optimization opportunities log "Discovering space optimization opportunities..." # 1. Experiments directory log "Inventory: /data/experiments/" find /data/experiments -type f \( -name "*.safetensors" -o -name "*.pt" -o -name "*.bin" \) > "$BACKUP_DIR/experiments_files.txt" find /data/experiments -type f \( -name "*.parquet" -o -name "*.jsonl" -o -name "*.csv" \) >> "$BACKUP_DIR/experiments_files.txt" # 2. Data directory components (exclude cache and problematic directories) log "Inventory: /data/data/ workspace (excluding cache)" find /data/data/workspace -type f -size +100M ! -path "*/.cache/*" ! -path "*/.local/*" 2>/dev/null > "$BACKUP_DIR/data_files.txt" || true # Create combined manifest cat "$BACKUP_DIR/experiments_files.txt" "$BACKUP_DIR/data_files.txt" | sort -u > "$UPLOAD_MANIFEST" local total_files=$(wc -l < "$UPLOAD_MANIFEST") local total_size=$(while read -r f; do du -b "$f" 2>/dev/null | cut -f1; done < "$UPLOAD_MANIFEST" | awk '{sum += $1} END {print sum}') log "Discovered $total_files files for potential archiving" log "Total size: $(numfmt --to=iec $total_size)" # Show top 5 largest files log "Top 5 largest files:" while read -r f; do du -h "$f" 2>/dev/null | cut -f1 | xargs -I {} echo " {} - $f"; done < "$UPLOAD_MANIFEST" | sort -rh | head -5 | tee -a "$PROTOCOL_LOG" } # Phase 2: Upload to HF/Xet upload_phase() { log "=== PHASE 2: UPLOAD TO HF/XET ===" local uploaded_count=0 local skipped_count=0 local failed_count=0 # Create upload tracking file echo "# Archive Protocol Upload Log - $(date)" > "$BACKUP_DIR/upload_results.txt" while read -r file_path; do if [ ! -f "$file_path" ]; then warning "File not found: $file_path" skipped_count=$((skipped_count + 1)) continue fi # Determine repository based on file type local repo="$REPO_ARTIFACTS" local filename=$(basename "$file_path") if [[ "$filename" == *.safetensors ]] || [[ "$filename" == *.pt ]] || [[ "$filename" == *.bin ]]; then repo="$REPO_MODELS" elif [[ "$filename" == *.parquet ]] || [[ "$filename" == *.jsonl ]] || [[ "$filename" == *.csv ]]; then repo="$REPO_DATASETS" fi # Create relative path for repository local relative_path="" if [[ "$file_path" == /data/experiments/* ]]; then relative_path="experiments/$(echo "$file_path" | sed 's|/data/experiments/||')" elif [[ "$file_path" == /data/data/* ]]; then relative_path="data/$(echo "$file_path" | sed 's|/data/data/||')" else relative_path="other/$(basename "$file_path")" fi log "Uploading: $relative_path → $repo" # Debug: Check if environment variables are set if [ -z "$HF_TOKEN" ]; then error "HF_TOKEN is not set" exit 1 fi # Upload using our client with token passed directly if python3 -c " import os from integrations.huggingface_client import HuggingFaceClient # Use token directly instead of environment variable client = HuggingFaceClient(token='$HF_TOKEN') if not client.is_authenticated(): print('NOT_AUTHENTICATED') exit(1) success = client.upload_artifact('$file_path', '$relative_path', '$repo') if success: print('SUCCESS') else: print('FAILED') exit(1) " 2>> "$PROTOCOL_LOG"; then echo "✅ SUCCESS: $file_path → $repo/$relative_path" | tee -a "$BACKUP_DIR/upload_results.txt" echo "$file_path" >> "$DELETE_MANIFEST" uploaded_count=$((uploaded_count + 1)) else error "FAILED: $file_path" echo "❌ FAILED: $file_path" >> "$BACKUP_DIR/upload_results.txt" failed_count=$((failed_count + 1)) fi done < "$UPLOAD_MANIFEST" log "Upload phase completed:" log " ✅ Uploaded: $uploaded_count files" log " ⚠️ Skipped: $skipped_count files" log " ❌ Failed: $failed_count files" if [ $uploaded_count -eq 0 ]; then error "No files were uploaded successfully" exit 1 fi } # Phase 3: Verification verification_phase() { log "=== PHASE 3: VERIFICATION ===" log "Verifying uploads on HF repositories..." python3 -c " import os from huggingface_hub import HfApi api = HfApi(token='$HF_TOKEN') repos = ['$REPO_MODELS', '$REPO_DATASETS', '$REPO_ARTIFACTS'] for repo in repos: try: info = api.repo_info(repo) print(f'✅ {repo}: {len(info.siblings)} files, {info.size} bytes') # List files if repository has content if info.siblings: for file in info.siblings[:5]: # Show first 5 files print(f' - {file.rfilename} ({file.size} bytes)') except Exception as e: print(f'❌ {repo}: {e}') " | tee -a "$PROTOCOL_LOG" log "Verification completed" } # Phase 4: Safe Deletion (Optional) deletion_phase() { log "=== PHASE 4: SAFE DELETION (OPTIONAL) ===" if [ ! -f "$DELETE_MANIFEST" ] || [ ! -s "$DELETE_MANIFEST" ]; then warning "No files marked for deletion - skipping deletion phase" return fi local delete_count=$(wc -l < "$DELETE_MANIFEST") local total_size=0 log "Found $delete_count files ready for safe deletion" # Calculate total space to be freed while read -r file; do if [ -f "$file" ]; then size=$(du -b "$file" 2>/dev/null | cut -f1 || echo 0) total_size=$((total_size + size)) fi done < "$DELETE_MANIFEST" log "Total space to free: $(numfmt --to=iec $total_size)" # Confirm deletion read -p "Do you want to proceed with deletion? (y/N): " -n 1 -r echo if [[ ! $REPLY =~ ^[Yy]$ ]]; then log "Deletion cancelled by user" return fi # Perform safe deletion local deleted_count=0 local failed_count=0 while read -r file_path; do if [ -f "$file_path" ]; then if rm -v "$file_path" 2>/dev/null; then echo "✅ DELETED: $file_path" | tee -a "$PROTOCOL_LOG" deleted_count=$((deleted_count + 1)) else error "FAILED to delete: $file_path" failed_count=$((failed_count + 1)) fi else warning "File already gone: $file_path" fi done < "$DELETE_MANIFEST" log "Deletion completed:" log " ✅ Deleted: $deleted_count files" log " ❌ Failed: $failed_count files" # Show disk space after deletion log "Disk space after deletion:" df -h /data | tee -a "$PROTOCOL_LOG" } # Phase 5: Cleanup & Report cleanup_phase() { log "=== PHASE 5: CLEANUP & REPORT ===" # Create summary report local total_uploaded=$(grep -c "✅ SUCCESS" "$BACKUP_DIR/upload_results.txt" 2>/dev/null || echo 0) local total_failed=$(grep -c "❌ FAILED" "$BACKUP_DIR/upload_results.txt" 2>/dev/null || echo 0) cat > "$BACKUP_DIR/summary_report.md" << EOF # DTO Archive Protocol Summary ## Execution Details - **Timestamp**: $(date) - **Protocol Log**: $PROTOCOL_LOG - **Backup Directory**: $BACKUP_DIR ## Results - **Files Discovered**: $(wc -l < "$UPLOAD_MANIFEST" 2>/dev/null || echo 0) - **Successfully Uploaded**: $total_uploaded - **Upload Failures**: $total_failed - **Files Deleted**: $(wc -l < "$DELETE_MANIFEST" 2>/dev/null || echo 0) ## Repository Status - **Models Repository**: $REPO_MODELS - **Datasets Repository**: $REPO_DATASETS - **Artifacts Repository**: $REPO_ARTIFACTS ## Next Steps 1. Verify uploads on Hugging Face Hub 2. Monitor repository storage usage 3. Run regular archive protocols for new data 4. Review failed uploads for manual intervention EOF log "Summary report created: $BACKUP_DIR/summary_report.md" log "Protocol completed successfully!" } # Main execution main() { log "🚀 STARTING DTO ARCHIVE PROTOCOL" log "Timestamp: $(date)" log "Protocol log: $PROTOCOL_LOG" log "Backup directory: $BACKUP_DIR" # Execute phases discovery_phase upload_phase verification_phase # Only proceed to deletion if explicitly requested if [ "${1:-}" = "--delete" ]; then deletion_phase else log "Skipping deletion phase (use --delete to enable)" fi cleanup_phase log "✅ ARCHIVE PROTOCOL COMPLETED SUCCESSFULLY" } # Run main function main "$@"