#!/usr/bin/env python3 """ Smart sync script that automatically handles both GitHub and Hugging Face uploads based on file types and sizes. """ import os import subprocess import argparse from pathlib import Path import json def get_file_size_mb(filepath): """Get file size in MB.""" return os.path.getsize(filepath) / (1024 * 1024) def classify_files(dataset_dir): """Classify files for GitHub vs Hugging Face upload.""" github_files = [] # Small files (< 50MB) hf_files = [] # Large files (>= 50MB) for root, dirs, files in os.walk(dataset_dir): for file in files: filepath = os.path.join(root, file) size_mb = get_file_size_mb(filepath) # Classification rules if (file.endswith(('.npy', '.npz')) and size_mb >= 50) or size_mb >= 100: hf_files.append(filepath) else: github_files.append(filepath) return github_files, hf_files def sync_to_github(project_dir, commit_message): """Sync code and small files to GitHub.""" print("šŸ“” Syncing to GitHub...") os.chdir(project_dir) # Add files (excluding large data files) subprocess.run(["git", "add", "."], check=True) # Create .gitignore for large files if not exists gitignore_path = Path(".gitignore") if not gitignore_path.exists(): with open(gitignore_path, 'w') as f: f.write("""# Large data files (synced to Hugging Face) dataset/**/*.npy dataset/**/*.npz dataset/**/autoencoder_stage1/weight/*.pt dataset/**/autoencoder_stage1/infer_result/ # Python __pycache__/ *.pyc *.pyo *.egg-info/ # IDE .vscode/ .idea/ *.swp *.swo # OS .DS_Store Thumbs.db """) subprocess.run(["git", "add", ".gitignore"], check=True) # Commit and push try: subprocess.run(["git", "commit", "-m", commit_message], check=True) subprocess.run(["git", "push"], check=True) print("āœ… GitHub sync completed") return True except subprocess.CalledProcessError as e: print(f"āš ļø GitHub sync failed: {e}") return False def sync_to_huggingface(dataset_dir, hf_repo, commit_message): """Sync large files to Hugging Face.""" print("šŸ¤— Syncing to Hugging Face...") # Use the existing upload script upload_script = os.path.join(dataset_dir, "upload_to_hf.py") if os.path.exists(upload_script): cmd = [ "python", upload_script, "--repo-name", hf_repo, "--commit-message", commit_message, "--dataset-path", dataset_dir ] try: subprocess.run(cmd, check=True) print("āœ… Hugging Face sync completed") return True except subprocess.CalledProcessError as e: print(f"āš ļø Hugging Face sync failed: {e}") return False else: print(f"āŒ Upload script not found: {upload_script}") return False def create_deployment_info(project_dir, github_repo, hf_dataset): """Create deployment information file.""" deployment_info = { "github_repo": github_repo, "hf_dataset": hf_dataset, "quick_setup_command": f"python setup_environment.py --hf-dataset {hf_dataset} --github-repo {github_repo}", "description": "Use the quick_setup_command to recreate this environment on any server" } info_path = os.path.join(project_dir, "DEPLOYMENT.json") with open(info_path, 'w') as f: json.dump(deployment_info, f, indent=2) # Also create a README for deployment readme_path = os.path.join(project_dir, "DEPLOYMENT.md") with open(readme_path, 'w') as f: f.write(f"""# STAMP Project Deployment ## Quick Setup on New Server ```bash # 1. Download setup script wget https://raw.githubusercontent.com/your-username/STAMP/main/setup_environment.py # 2. Run setup (auto-downloads everything) python setup_environment.py --hf-dataset {hf_dataset} --github-repo {github_repo} # 3. Start working cd STAMP python check_environment.py bash quick_train.sh ``` ## Manual Setup 1. **Clone Code**: `git clone {github_repo}` 2. **Download Dataset**: Use Hugging Face CLI or datasets library 3. **Install Dependencies**: `pip install torch torchvision numpy tqdm huggingface_hub datasets` ## Repository Structure - **GitHub**: Code, configs, small files (< 100MB) - **Hugging Face**: Large datasets, model weights (unlimited) ## Available Scripts - `quick_train.sh` - Interactive training - `quick_inference.sh` - Interactive inference - `check_environment.py` - Environment verification """) print(f"āœ… Deployment info created: {info_path}") def main(): parser = argparse.ArgumentParser(description="Smart sync to GitHub and Hugging Face") parser.add_argument("--github-repo", type=str, required=True, help="GitHub repository name") parser.add_argument("--hf-dataset", type=str, required=True, help="Hugging Face dataset name") parser.add_argument("--commit-message", type=str, default="Auto-sync project", help="Commit message") parser.add_argument("--project-dir", type=str, default=".", help="Project directory") args = parser.parse_args() print("šŸ”„ Smart Sync: GitHub + Hugging Face") print("=" * 40) project_dir = os.path.abspath(args.project_dir) dataset_dir = os.path.join(project_dir, "dataset") # Create deployment info create_deployment_info(project_dir, args.github_repo, args.hf_dataset) # Sync to GitHub (code + small files) github_success = sync_to_github(project_dir, args.commit_message) # Sync to Hugging Face (large files) hf_success = sync_to_huggingface(dataset_dir, args.hf_dataset, args.commit_message) # Summary print("\nšŸ“Š Sync Summary:") print(f"GitHub: {'āœ…' if github_success else 'āŒ'}") print(f"Hugging Face: {'āœ…' if hf_success else 'āŒ'}") if github_success and hf_success: print("\nšŸŽ‰ Complete sync successful!") print(f"🌐 GitHub: https://github.com/{args.github_repo}") print(f"šŸ¤— Hugging Face: https://huggingface.co/datasets/{args.hf_dataset}") print(f"\nšŸ“‹ Quick setup command for new servers:") print(f"python setup_environment.py --hf-dataset {args.hf_dataset} --github-repo https://github.com/{args.github_repo}.git") if __name__ == "__main__": main()