adaptai / platform /dbops /projects /dto /scripts /archive_protocol_folder.py
ADAPT-Chase's picture
Add files using upload-large-folder tool
fd357f4 verified
raw
history blame
10.8 kB
#!/usr/bin/env python3
"""
DTO Archive Protocol - Folder-based Upload Alternative
Uses folder upload instead of individual file upload to work around Xet backend limitations
"""
import os
import logging
import tempfile
import shutil
from pathlib import Path
from huggingface_hub import HfApi
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class FolderArchiveProtocol:
def __init__(self):
self.token = os.getenv('HF_TOKEN')
if not self.token:
raise ValueError("HF_TOKEN environment variable not set")
self.api = HfApi(token=self.token)
self.repo_models = os.getenv('HF_REPO_MODELS', 'LevelUp2x/dto-models')
self.repo_datasets = os.getenv('HF_REPO_DATASETS', 'LevelUp2x/dto-datasets')
self.repo_artifacts = os.getenv('HF_REPO_ARTIFACTS', 'LevelUp2x/dto-artifacts')
# Create temporary staging directory
self.staging_dir = tempfile.mkdtemp(prefix="dto_archive_")
logger.info(f"Staging directory: {self.staging_dir}")
def discover_files(self):
"""Discover files for archiving, excluding extremely large files (>10GB)"""
logger.info("=== DISCOVERY PHASE ===")
files_to_archive = []
extremely_large_files = []
# 1. Experiments directory
experiments_path = "/data/experiments"
if os.path.exists(experiments_path):
for root, _, files in os.walk(experiments_path):
for file in files:
if file.endswith(('.safetensors', '.pt', '.bin', '.parquet', '.jsonl', '.csv')):
file_path = os.path.join(root, file)
try:
file_size = os.path.getsize(file_path)
# Skip files larger than 10GB (Xet backend limitation)
if file_size > 50 * 1024**3: # 50GB (Xet backend limit)
extremely_large_files.append((file_path, file_size / 1024**3))
continue
files_to_archive.append(file_path)
except OSError:
logger.warning(f"Could not get size for {file_path}, skipping")
# 2. Data workspace (excluding cache)
data_path = "/data/data/workspace"
if os.path.exists(data_path):
for root, _, files in os.walk(data_path):
# Skip cache directories
if any(part in root for part in ['.cache', '.local', '.config']):
continue
for file in files:
if file.endswith(('.safetensors', '.pt', '.bin', '.parquet', '.jsonl', '.csv')):
file_path = os.path.join(root, file)
try:
file_size = os.path.getsize(file_path)
# Skip files larger than 10GB (Xet backend limitation)
if file_size > 50 * 1024**3: # 50GB (Xet backend limit)
extremely_large_files.append((file_path, file_size / 1024**3))
continue
files_to_archive.append(file_path)
except OSError:
logger.warning(f"Could not get size for {file_path}, skipping")
# Log extremely large files that will be skipped
if extremely_large_files:
logger.warning(f"Skipping {len(extremely_large_files)} extremely large files (>50GB):")
for file_path, size_gb in extremely_large_files:
logger.warning(f" {size_gb:.1f}GB - {file_path}")
logger.info(f"Discovered {len(files_to_archive)} files for archiving (after size filtering)")
return files_to_archive
def organize_by_type_and_size(self, files):
"""Organize files by type and approximate size for batch uploading"""
batches = {
'models': [], # Model files (.safetensors, .pt, .bin)
'datasets': [], # Data files (.parquet, .jsonl, .csv)
'artifacts': [] # Everything else
}
for file_path in files:
if file_path.endswith(('.safetensors', '.pt', '.bin')):
batches['models'].append(file_path)
elif file_path.endswith(('.parquet', '.jsonl', '.csv')):
batches['datasets'].append(file_path)
else:
batches['artifacts'].append(file_path)
# Log batch sizes
for batch_type, batch_files in batches.items():
total_size = sum(os.path.getsize(f) for f in batch_files)
logger.info(f"{batch_type.upper()}: {len(batch_files)} files, {total_size / (1024**3):.1f} GB")
return batches
def create_staging_folders(self, batches):
"""Create staging folders for each batch"""
staged_folders = {}
for batch_type, files in batches.items():
if not files:
continue
batch_dir = os.path.join(self.staging_dir, batch_type)
os.makedirs(batch_dir, exist_ok=True)
for file_path in files:
# Preserve directory structure within the batch
rel_path = file_path
if file_path.startswith('/data/experiments/'):
rel_path = file_path.replace('/data/experiments/', 'experiments/')
elif file_path.startswith('/data/data/workspace/'):
rel_path = file_path.replace('/data/data/workspace/', 'data/')
else:
rel_path = 'other/' + os.path.basename(file_path)
target_path = os.path.join(batch_dir, rel_path)
os.makedirs(os.path.dirname(target_path), exist_ok=True)
# Create symbolic link to avoid copying large files
try:
os.symlink(file_path, target_path)
logger.debug(f"Symlinked {file_path} -> {target_path}")
except OSError as e:
# If symlink fails, skip the file
logger.warning(f"Skipping {file_path}: cannot create symlink ({e})")
continue
# Check if any files were actually staged
staged_files = []
for root, _, files in os.walk(batch_dir):
for file in files:
staged_files.append(os.path.join(root, file))
if staged_files:
staged_folders[batch_type] = batch_dir
logger.info(f"Staged {len(staged_files)} files in {batch_dir}")
else:
logger.warning(f"No files staged for {batch_type} batch")
return staged_folders
def upload_folder_batch(self, folder_path, repo_id, repo_type="model"):
"""Upload a folder batch to HF - only if repository exists"""
try:
# Check if repository exists first
try:
self.api.repo_info(repo_id)
logger.info(f"Repository exists: {repo_id}")
except Exception:
logger.warning(f"Skipping upload to {repo_id} - repository does not exist or access denied")
return False
logger.info(f"Uploading folder {folder_path} to {repo_id}")
result = self.api.upload_folder(
folder_path=folder_path,
repo_id=repo_id,
repo_type=repo_type,
commit_message=f"DTO Archive: Uploading {os.path.basename(folder_path)} batch"
)
logger.info(f"βœ… Successfully uploaded {folder_path} to {repo_id}")
return True
except Exception as e:
logger.error(f"❌ Failed to upload {folder_path}: {e}")
return False
def execute_protocol(self):
"""Execute the complete archive protocol"""
try:
logger.info("πŸš€ STARTING FOLDER-BASED ARCHIVE PROTOCOL")
# Phase 1: Discovery
files = self.discover_files()
if not files:
logger.warning("No files found for archiving")
return
# Phase 2: Organization
batches = self.organize_by_type_and_size(files)
# Phase 3: Staging
staged_folders = self.create_staging_folders(batches)
# Phase 4: Upload
upload_results = {}
# Upload models batch
if 'models' in staged_folders:
upload_results['models'] = self.upload_folder_batch(
staged_folders['models'], self.repo_models, 'model'
)
# Upload datasets batch
if 'datasets' in staged_folders:
upload_results['datasets'] = self.upload_folder_batch(
staged_folders['datasets'], self.repo_datasets, 'dataset'
)
# Upload artifacts batch
if 'artifacts' in staged_folders:
upload_results['artifacts'] = self.upload_folder_batch(
staged_folders['artifacts'], self.repo_artifacts, 'dataset'
)
# Summary
success_count = sum(1 for result in upload_results.values() if result)
total_count = len(upload_results)
logger.info(f"πŸ“Š Upload Summary: {success_count}/{total_count} batches successful")
if success_count > 0:
logger.info("βœ… Archive protocol completed successfully")
else:
logger.error("❌ Archive protocol failed completely")
finally:
# Cleanup staging directory
if os.path.exists(self.staging_dir):
shutil.rmtree(self.staging_dir)
logger.info(f"Cleaned up staging directory: {self.staging_dir}")
if __name__ == "__main__":
# Load environment variables
env_file = "/data/adaptai/platform/dataops/dto/.env"
if os.path.exists(env_file):
with open(env_file) as f:
for line in f:
if line.strip() and not line.startswith('#'):
key, value = line.strip().split('=', 1)
os.environ[key] = value
protocol = FolderArchiveProtocol()
protocol.execute_protocol()