File size: 10,799 Bytes
fd357f4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
#!/usr/bin/env python3
"""
DTO Archive Protocol - Folder-based Upload Alternative
Uses folder upload instead of individual file upload to work around Xet backend limitations
"""

import os
import logging
import tempfile
import shutil
from pathlib import Path
from huggingface_hub import HfApi

# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

class FolderArchiveProtocol:
    def __init__(self):
        self.token = os.getenv('HF_TOKEN')
        if not self.token:
            raise ValueError("HF_TOKEN environment variable not set")
        
        self.api = HfApi(token=self.token)
        self.repo_models = os.getenv('HF_REPO_MODELS', 'LevelUp2x/dto-models')
        self.repo_datasets = os.getenv('HF_REPO_DATASETS', 'LevelUp2x/dto-datasets')
        self.repo_artifacts = os.getenv('HF_REPO_ARTIFACTS', 'LevelUp2x/dto-artifacts')
        
        # Create temporary staging directory
        self.staging_dir = tempfile.mkdtemp(prefix="dto_archive_")
        logger.info(f"Staging directory: {self.staging_dir}")
    
    def discover_files(self):
        """Discover files for archiving, excluding extremely large files (>10GB)"""
        logger.info("=== DISCOVERY PHASE ===")
        
        files_to_archive = []
        extremely_large_files = []
        
        # 1. Experiments directory
        experiments_path = "/data/experiments"
        if os.path.exists(experiments_path):
            for root, _, files in os.walk(experiments_path):
                for file in files:
                    if file.endswith(('.safetensors', '.pt', '.bin', '.parquet', '.jsonl', '.csv')):
                        file_path = os.path.join(root, file)
                        try:
                            file_size = os.path.getsize(file_path)
                            # Skip files larger than 10GB (Xet backend limitation)
                            if file_size > 50 * 1024**3:  # 50GB (Xet backend limit)
                                extremely_large_files.append((file_path, file_size / 1024**3))
                                continue
                            files_to_archive.append(file_path)
                        except OSError:
                            logger.warning(f"Could not get size for {file_path}, skipping")
        
        # 2. Data workspace (excluding cache)
        data_path = "/data/data/workspace"
        if os.path.exists(data_path):
            for root, _, files in os.walk(data_path):
                # Skip cache directories
                if any(part in root for part in ['.cache', '.local', '.config']):
                    continue
                for file in files:
                    if file.endswith(('.safetensors', '.pt', '.bin', '.parquet', '.jsonl', '.csv')):
                        file_path = os.path.join(root, file)
                        try:
                            file_size = os.path.getsize(file_path)
                            # Skip files larger than 10GB (Xet backend limitation)
                            if file_size > 50 * 1024**3:  # 50GB (Xet backend limit)
                                extremely_large_files.append((file_path, file_size / 1024**3))
                                continue
                            files_to_archive.append(file_path)
                        except OSError:
                            logger.warning(f"Could not get size for {file_path}, skipping")
        
        # Log extremely large files that will be skipped
        if extremely_large_files:
            logger.warning(f"Skipping {len(extremely_large_files)} extremely large files (>50GB):")
            for file_path, size_gb in extremely_large_files:
                logger.warning(f"  {size_gb:.1f}GB - {file_path}")
        
        logger.info(f"Discovered {len(files_to_archive)} files for archiving (after size filtering)")
        return files_to_archive
    
    def organize_by_type_and_size(self, files):
        """Organize files by type and approximate size for batch uploading"""
        batches = {
            'models': [],      # Model files (.safetensors, .pt, .bin)
            'datasets': [],    # Data files (.parquet, .jsonl, .csv)
            'artifacts': []    # Everything else
        }
        
        for file_path in files:
            if file_path.endswith(('.safetensors', '.pt', '.bin')):
                batches['models'].append(file_path)
            elif file_path.endswith(('.parquet', '.jsonl', '.csv')):
                batches['datasets'].append(file_path)
            else:
                batches['artifacts'].append(file_path)
        
        # Log batch sizes
        for batch_type, batch_files in batches.items():
            total_size = sum(os.path.getsize(f) for f in batch_files)
            logger.info(f"{batch_type.upper()}: {len(batch_files)} files, {total_size / (1024**3):.1f} GB")
        
        return batches
    
    def create_staging_folders(self, batches):
        """Create staging folders for each batch"""
        staged_folders = {}
        
        for batch_type, files in batches.items():
            if not files:
                continue
                
            batch_dir = os.path.join(self.staging_dir, batch_type)
            os.makedirs(batch_dir, exist_ok=True)
            
            for file_path in files:
                # Preserve directory structure within the batch
                rel_path = file_path
                if file_path.startswith('/data/experiments/'):
                    rel_path = file_path.replace('/data/experiments/', 'experiments/')
                elif file_path.startswith('/data/data/workspace/'):
                    rel_path = file_path.replace('/data/data/workspace/', 'data/')
                else:
                    rel_path = 'other/' + os.path.basename(file_path)
                
                target_path = os.path.join(batch_dir, rel_path)
                os.makedirs(os.path.dirname(target_path), exist_ok=True)
                
                # Create symbolic link to avoid copying large files
                try:
                    os.symlink(file_path, target_path)
                    logger.debug(f"Symlinked {file_path} -> {target_path}")
                except OSError as e:
                    # If symlink fails, skip the file
                    logger.warning(f"Skipping {file_path}: cannot create symlink ({e})")
                    continue
            
            # Check if any files were actually staged
            staged_files = []
            for root, _, files in os.walk(batch_dir):
                for file in files:
                    staged_files.append(os.path.join(root, file))
            
            if staged_files:
                staged_folders[batch_type] = batch_dir
                logger.info(f"Staged {len(staged_files)} files in {batch_dir}")
            else:
                logger.warning(f"No files staged for {batch_type} batch")
        
        return staged_folders
    
    def upload_folder_batch(self, folder_path, repo_id, repo_type="model"):
        """Upload a folder batch to HF - only if repository exists"""
        try:
            # Check if repository exists first
            try:
                self.api.repo_info(repo_id)
                logger.info(f"Repository exists: {repo_id}")
            except Exception:
                logger.warning(f"Skipping upload to {repo_id} - repository does not exist or access denied")
                return False
            
            logger.info(f"Uploading folder {folder_path} to {repo_id}")
            
            result = self.api.upload_folder(
                folder_path=folder_path,
                repo_id=repo_id,
                repo_type=repo_type,
                commit_message=f"DTO Archive: Uploading {os.path.basename(folder_path)} batch"
            )
            
            logger.info(f"โœ… Successfully uploaded {folder_path} to {repo_id}")
            return True
            
        except Exception as e:
            logger.error(f"โŒ Failed to upload {folder_path}: {e}")
            return False
    
    def execute_protocol(self):
        """Execute the complete archive protocol"""
        try:
            logger.info("๐Ÿš€ STARTING FOLDER-BASED ARCHIVE PROTOCOL")
            
            # Phase 1: Discovery
            files = self.discover_files()
            if not files:
                logger.warning("No files found for archiving")
                return
            
            # Phase 2: Organization
            batches = self.organize_by_type_and_size(files)
            
            # Phase 3: Staging
            staged_folders = self.create_staging_folders(batches)
            
            # Phase 4: Upload
            upload_results = {}
            
            # Upload models batch
            if 'models' in staged_folders:
                upload_results['models'] = self.upload_folder_batch(
                    staged_folders['models'], self.repo_models, 'model'
                )
            
            # Upload datasets batch
            if 'datasets' in staged_folders:
                upload_results['datasets'] = self.upload_folder_batch(
                    staged_folders['datasets'], self.repo_datasets, 'dataset'
                )
            
            # Upload artifacts batch
            if 'artifacts' in staged_folders:
                upload_results['artifacts'] = self.upload_folder_batch(
                    staged_folders['artifacts'], self.repo_artifacts, 'dataset'
                )
            
            # Summary
            success_count = sum(1 for result in upload_results.values() if result)
            total_count = len(upload_results)
            
            logger.info(f"๐Ÿ“Š Upload Summary: {success_count}/{total_count} batches successful")
            
            if success_count > 0:
                logger.info("โœ… Archive protocol completed successfully")
            else:
                logger.error("โŒ Archive protocol failed completely")
                
        finally:
            # Cleanup staging directory
            if os.path.exists(self.staging_dir):
                shutil.rmtree(self.staging_dir)
                logger.info(f"Cleaned up staging directory: {self.staging_dir}")

if __name__ == "__main__":
    # Load environment variables
    env_file = "/data/adaptai/platform/dataops/dto/.env"
    if os.path.exists(env_file):
        with open(env_file) as f:
            for line in f:
                if line.strip() and not line.startswith('#'):
                    key, value = line.strip().split('=', 1)
                    os.environ[key] = value
    
    protocol = FolderArchiveProtocol()
    protocol.execute_protocol()