|
|
|
|
|
""" |
|
|
Prepare RetinaRadar Model for Hugging Face Deployment |
|
|
|
|
|
This script helps you prepare all necessary files for uploading your |
|
|
trained RetinaRadar model to Hugging Face. |
|
|
|
|
|
Usage: |
|
|
python prepare_for_hf.py --run-dir output/runs/run_ABC123 --output-dir ~/retinaradar_hf |
|
|
""" |
|
|
|
|
|
import argparse |
|
|
import shutil |
|
|
import json |
|
|
from pathlib import Path |
|
|
|
|
|
|
|
|
def prepare_deployment(run_dir: Path, output_dir: Path, checkpoint_type: str = "best"): |
|
|
""" |
|
|
Prepare model files for Hugging Face deployment |
|
|
|
|
|
Args: |
|
|
run_dir: Path to training run directory |
|
|
output_dir: Path to output directory for deployment files |
|
|
checkpoint_type: Which checkpoint to use ('best' or 'last') |
|
|
""" |
|
|
|
|
|
run_dir = Path(run_dir) |
|
|
output_dir = Path(output_dir) |
|
|
|
|
|
|
|
|
if not run_dir.exists(): |
|
|
raise FileNotFoundError(f"Run directory not found: {run_dir}") |
|
|
|
|
|
|
|
|
output_dir.mkdir(parents=True, exist_ok=True) |
|
|
print(f"π Created output directory: {output_dir}") |
|
|
|
|
|
|
|
|
print("\nπ Looking for model checkpoint...") |
|
|
checkpoints_dir = run_dir / "checkpoints" |
|
|
|
|
|
if checkpoint_type == "best": |
|
|
|
|
|
checkpoints = list(checkpoints_dir.glob("*epoch*.ckpt")) |
|
|
if not checkpoints: |
|
|
print("β οΈ No best checkpoint found, using last.ckpt") |
|
|
checkpoint_type = "last" |
|
|
else: |
|
|
|
|
|
checkpoint_path = sorted(checkpoints)[0] |
|
|
|
|
|
if checkpoint_type == "last": |
|
|
checkpoint_path = checkpoints_dir / "last.ckpt" |
|
|
|
|
|
if not checkpoint_path.exists(): |
|
|
raise FileNotFoundError(f"Checkpoint not found: {checkpoint_path}") |
|
|
|
|
|
print(f"β
Found checkpoint: {checkpoint_path.name}") |
|
|
|
|
|
|
|
|
output_checkpoint = output_dir / "retinaradar_model.ckpt" |
|
|
shutil.copy2(checkpoint_path, output_checkpoint) |
|
|
print(f"π Copied to: {output_checkpoint}") |
|
|
|
|
|
|
|
|
size_mb = output_checkpoint.stat().st_size / (1024 * 1024) |
|
|
print(f" Size: {size_mb:.1f} MB") |
|
|
|
|
|
if size_mb > 5000: |
|
|
print(f"β οΈ WARNING: Checkpoint is >5GB. Consider using FP16 quantization.") |
|
|
|
|
|
|
|
|
print("\nπ Looking for metadata...") |
|
|
metadata_path = run_dir / "artifacts" / "label_metadata.json" |
|
|
|
|
|
if not metadata_path.exists(): |
|
|
raise FileNotFoundError(f"Metadata not found: {metadata_path}") |
|
|
|
|
|
print(f"β
Found metadata: {metadata_path}") |
|
|
|
|
|
output_metadata = output_dir / "label_metadata.json" |
|
|
shutil.copy2(metadata_path, output_metadata) |
|
|
print(f"π Copied to: {output_metadata}") |
|
|
|
|
|
|
|
|
with open(metadata_path, 'r') as f: |
|
|
metadata = json.load(f) |
|
|
|
|
|
print(f" Features: {', '.join(metadata.get('feature_names', []))}") |
|
|
print(f" Total labels: {metadata.get('num_labels', 'unknown')}") |
|
|
|
|
|
|
|
|
print("\nπ Looking for inference package...") |
|
|
inference_package_path = run_dir / "artifacts" / "inference_package.json" |
|
|
|
|
|
if inference_package_path.exists(): |
|
|
output_package = output_dir / "inference_package.json" |
|
|
shutil.copy2(inference_package_path, output_package) |
|
|
print(f"β
Copied inference package") |
|
|
|
|
|
|
|
|
with open(inference_package_path, 'r') as f: |
|
|
package = json.load(f) |
|
|
|
|
|
config = package.get('config', {}) |
|
|
print(f" Model: {config.get('model_name', 'unknown')}") |
|
|
print(f" Learning rate: {config.get('learning_rate', 'unknown')}") |
|
|
else: |
|
|
print("β οΈ Inference package not found (optional)") |
|
|
|
|
|
|
|
|
print("\nπΈ Creating examples directory...") |
|
|
examples_dir = output_dir / "examples" |
|
|
examples_dir.mkdir(exist_ok=True) |
|
|
print(f"β
Created: {examples_dir}") |
|
|
print(" π Add 2-3 example images to this directory") |
|
|
|
|
|
|
|
|
print("\n" + "="*60) |
|
|
print("β
DEPLOYMENT PREPARATION COMPLETE!") |
|
|
print("="*60) |
|
|
print(f"\nAll files prepared in: {output_dir}") |
|
|
print("\nFiles created:") |
|
|
print(" β retinaradar_model.ckpt") |
|
|
print(" β label_metadata.json") |
|
|
if inference_package_path.exists(): |
|
|
print(" β inference_package.json") |
|
|
print(" β examples/ (empty - add your images)") |
|
|
|
|
|
print("\nπ Next steps:") |
|
|
print("1. Add 2-3 example images to examples/ directory") |
|
|
print("2. Copy hf_inference.py to this directory") |
|
|
print("3. Copy HF_MODEL_README.md as README.md") |
|
|
print("4. Follow the deployment guide to upload to Hugging Face") |
|
|
|
|
|
print(f"\nDeployment directory ready: {output_dir}") |
|
|
print("="*60) |
|
|
|
|
|
|
|
|
def main(): |
|
|
parser = argparse.ArgumentParser( |
|
|
description='Prepare RetinaRadar model for Hugging Face deployment', |
|
|
formatter_class=argparse.RawDescriptionHelpFormatter, |
|
|
epilog=""" |
|
|
Example: |
|
|
python prepare_for_hf.py \\ |
|
|
--run-dir output/runs/run_ABC123-2025-01-15_120000 \\ |
|
|
--output-dir ~/retinaradar_hf \\ |
|
|
--checkpoint best |
|
|
""" |
|
|
) |
|
|
|
|
|
parser.add_argument( |
|
|
'--run-dir', |
|
|
type=str, |
|
|
required=True, |
|
|
help='Path to the training run directory' |
|
|
) |
|
|
parser.add_argument( |
|
|
'--output-dir', |
|
|
type=str, |
|
|
required=True, |
|
|
help='Path to output directory for deployment files' |
|
|
) |
|
|
parser.add_argument( |
|
|
'--checkpoint', |
|
|
type=str, |
|
|
default='best', |
|
|
choices=['best', 'last'], |
|
|
help='Which checkpoint to use (default: best)' |
|
|
) |
|
|
|
|
|
args = parser.parse_args() |
|
|
|
|
|
try: |
|
|
prepare_deployment( |
|
|
run_dir=args.run_dir, |
|
|
output_dir=args.output_dir, |
|
|
checkpoint_type=args.checkpoint |
|
|
) |
|
|
except Exception as e: |
|
|
print(f"\nβ Error: {e}") |
|
|
return 1 |
|
|
|
|
|
return 0 |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
exit(main()) |
|
|
|