| | |
| | """ |
| | DTO Archive Protocol - Folder-based Upload Alternative |
| | Uses folder upload instead of individual file upload to work around Xet backend limitations |
| | """ |
| |
|
| | import os |
| | import logging |
| | import tempfile |
| | import shutil |
| | from pathlib import Path |
| | from huggingface_hub import HfApi |
| |
|
| | |
| | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') |
| | logger = logging.getLogger(__name__) |
| |
|
| | class FolderArchiveProtocol: |
| | def __init__(self): |
| | self.token = os.getenv('HF_TOKEN') |
| | if not self.token: |
| | raise ValueError("HF_TOKEN environment variable not set") |
| | |
| | self.api = HfApi(token=self.token) |
| | self.repo_models = os.getenv('HF_REPO_MODELS', 'LevelUp2x/dto-models') |
| | self.repo_datasets = os.getenv('HF_REPO_DATASETS', 'LevelUp2x/dto-datasets') |
| | self.repo_artifacts = os.getenv('HF_REPO_ARTIFACTS', 'LevelUp2x/dto-artifacts') |
| | |
| | |
| | self.staging_dir = tempfile.mkdtemp(prefix="dto_archive_") |
| | logger.info(f"Staging directory: {self.staging_dir}") |
| | |
| | def discover_files(self): |
| | """Discover files for archiving, excluding extremely large files (>10GB)""" |
| | logger.info("=== DISCOVERY PHASE ===") |
| | |
| | files_to_archive = [] |
| | extremely_large_files = [] |
| | |
| | |
| | experiments_path = "/data/experiments" |
| | if os.path.exists(experiments_path): |
| | for root, _, files in os.walk(experiments_path): |
| | for file in files: |
| | if file.endswith(('.safetensors', '.pt', '.bin', '.parquet', '.jsonl', '.csv')): |
| | file_path = os.path.join(root, file) |
| | try: |
| | file_size = os.path.getsize(file_path) |
| | |
| | if file_size > 50 * 1024**3: |
| | extremely_large_files.append((file_path, file_size / 1024**3)) |
| | continue |
| | files_to_archive.append(file_path) |
| | except OSError: |
| | logger.warning(f"Could not get size for {file_path}, skipping") |
| | |
| | |
| | data_path = "/data/data/workspace" |
| | if os.path.exists(data_path): |
| | for root, _, files in os.walk(data_path): |
| | |
| | if any(part in root for part in ['.cache', '.local', '.config']): |
| | continue |
| | for file in files: |
| | if file.endswith(('.safetensors', '.pt', '.bin', '.parquet', '.jsonl', '.csv')): |
| | file_path = os.path.join(root, file) |
| | try: |
| | file_size = os.path.getsize(file_path) |
| | |
| | if file_size > 50 * 1024**3: |
| | extremely_large_files.append((file_path, file_size / 1024**3)) |
| | continue |
| | files_to_archive.append(file_path) |
| | except OSError: |
| | logger.warning(f"Could not get size for {file_path}, skipping") |
| | |
| | |
| | if extremely_large_files: |
| | logger.warning(f"Skipping {len(extremely_large_files)} extremely large files (>50GB):") |
| | for file_path, size_gb in extremely_large_files: |
| | logger.warning(f" {size_gb:.1f}GB - {file_path}") |
| | |
| | logger.info(f"Discovered {len(files_to_archive)} files for archiving (after size filtering)") |
| | return files_to_archive |
| | |
| | def organize_by_type_and_size(self, files): |
| | """Organize files by type and approximate size for batch uploading""" |
| | batches = { |
| | 'models': [], |
| | 'datasets': [], |
| | 'artifacts': [] |
| | } |
| | |
| | for file_path in files: |
| | if file_path.endswith(('.safetensors', '.pt', '.bin')): |
| | batches['models'].append(file_path) |
| | elif file_path.endswith(('.parquet', '.jsonl', '.csv')): |
| | batches['datasets'].append(file_path) |
| | else: |
| | batches['artifacts'].append(file_path) |
| | |
| | |
| | for batch_type, batch_files in batches.items(): |
| | total_size = sum(os.path.getsize(f) for f in batch_files) |
| | logger.info(f"{batch_type.upper()}: {len(batch_files)} files, {total_size / (1024**3):.1f} GB") |
| | |
| | return batches |
| | |
| | def create_staging_folders(self, batches): |
| | """Create staging folders for each batch""" |
| | staged_folders = {} |
| | |
| | for batch_type, files in batches.items(): |
| | if not files: |
| | continue |
| | |
| | batch_dir = os.path.join(self.staging_dir, batch_type) |
| | os.makedirs(batch_dir, exist_ok=True) |
| | |
| | for file_path in files: |
| | |
| | rel_path = file_path |
| | if file_path.startswith('/data/experiments/'): |
| | rel_path = file_path.replace('/data/experiments/', 'experiments/') |
| | elif file_path.startswith('/data/data/workspace/'): |
| | rel_path = file_path.replace('/data/data/workspace/', 'data/') |
| | else: |
| | rel_path = 'other/' + os.path.basename(file_path) |
| | |
| | target_path = os.path.join(batch_dir, rel_path) |
| | os.makedirs(os.path.dirname(target_path), exist_ok=True) |
| | |
| | |
| | try: |
| | os.symlink(file_path, target_path) |
| | logger.debug(f"Symlinked {file_path} -> {target_path}") |
| | except OSError as e: |
| | |
| | logger.warning(f"Skipping {file_path}: cannot create symlink ({e})") |
| | continue |
| | |
| | |
| | staged_files = [] |
| | for root, _, files in os.walk(batch_dir): |
| | for file in files: |
| | staged_files.append(os.path.join(root, file)) |
| | |
| | if staged_files: |
| | staged_folders[batch_type] = batch_dir |
| | logger.info(f"Staged {len(staged_files)} files in {batch_dir}") |
| | else: |
| | logger.warning(f"No files staged for {batch_type} batch") |
| | |
| | return staged_folders |
| | |
| | def upload_folder_batch(self, folder_path, repo_id, repo_type="model"): |
| | """Upload a folder batch to HF - only if repository exists""" |
| | try: |
| | |
| | try: |
| | self.api.repo_info(repo_id) |
| | logger.info(f"Repository exists: {repo_id}") |
| | except Exception: |
| | logger.warning(f"Skipping upload to {repo_id} - repository does not exist or access denied") |
| | return False |
| | |
| | logger.info(f"Uploading folder {folder_path} to {repo_id}") |
| | |
| | result = self.api.upload_folder( |
| | folder_path=folder_path, |
| | repo_id=repo_id, |
| | repo_type=repo_type, |
| | commit_message=f"DTO Archive: Uploading {os.path.basename(folder_path)} batch" |
| | ) |
| | |
| | logger.info(f"β
Successfully uploaded {folder_path} to {repo_id}") |
| | return True |
| | |
| | except Exception as e: |
| | logger.error(f"β Failed to upload {folder_path}: {e}") |
| | return False |
| | |
| | def execute_protocol(self): |
| | """Execute the complete archive protocol""" |
| | try: |
| | logger.info("π STARTING FOLDER-BASED ARCHIVE PROTOCOL") |
| | |
| | |
| | files = self.discover_files() |
| | if not files: |
| | logger.warning("No files found for archiving") |
| | return |
| | |
| | |
| | batches = self.organize_by_type_and_size(files) |
| | |
| | |
| | staged_folders = self.create_staging_folders(batches) |
| | |
| | |
| | upload_results = {} |
| | |
| | |
| | if 'models' in staged_folders: |
| | upload_results['models'] = self.upload_folder_batch( |
| | staged_folders['models'], self.repo_models, 'model' |
| | ) |
| | |
| | |
| | if 'datasets' in staged_folders: |
| | upload_results['datasets'] = self.upload_folder_batch( |
| | staged_folders['datasets'], self.repo_datasets, 'dataset' |
| | ) |
| | |
| | |
| | if 'artifacts' in staged_folders: |
| | upload_results['artifacts'] = self.upload_folder_batch( |
| | staged_folders['artifacts'], self.repo_artifacts, 'dataset' |
| | ) |
| | |
| | |
| | success_count = sum(1 for result in upload_results.values() if result) |
| | total_count = len(upload_results) |
| | |
| | logger.info(f"π Upload Summary: {success_count}/{total_count} batches successful") |
| | |
| | if success_count > 0: |
| | logger.info("β
Archive protocol completed successfully") |
| | else: |
| | logger.error("β Archive protocol failed completely") |
| | |
| | finally: |
| | |
| | if os.path.exists(self.staging_dir): |
| | shutil.rmtree(self.staging_dir) |
| | logger.info(f"Cleaned up staging directory: {self.staging_dir}") |
| |
|
| | if __name__ == "__main__": |
| | |
| | env_file = "/data/adaptai/platform/dataops/dto/.env" |
| | if os.path.exists(env_file): |
| | with open(env_file) as f: |
| | for line in f: |
| | if line.strip() and not line.startswith('#'): |
| | key, value = line.strip().split('=', 1) |
| | os.environ[key] = value |
| | |
| | protocol = FolderArchiveProtocol() |
| | protocol.execute_protocol() |