Spaces:
Paused
Paused
File size: 5,554 Bytes
fb867c3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 |
#!/usr/bin/env python3
"""
Felix Framework - Deployment Verification Runner
Quick script to run deployment verification with different configurations.
This provides an easy way to verify deployment readiness across different
scenarios without running the full test suite.
Usage:
python scripts/run_deployment_verification.py
python scripts/run_deployment_verification.py --quick
python scripts/run_deployment_verification.py --gpu-only
python scripts/run_deployment_verification.py --export report.json
"""
import os
import sys
import asyncio
import argparse
# Add src to path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'src'))
from deployment_verification import DeploymentVerificationFramework, setup_logging
async def quick_verification():
"""Run quick deployment verification (essential components only)."""
print("🚀 Running Quick Felix Framework Deployment Verification...")
framework = DeploymentVerificationFramework()
# Run essential components only
await framework._verify_core_mathematical_precision()
await framework._verify_zerogpu_integration()
await framework._verify_web_interface_compatibility()
return framework._generate_deployment_report()
async def gpu_verification():
"""Run GPU-specific verification tests."""
print("⚡ Running GPU-Specific Felix Framework Verification...")
framework = DeploymentVerificationFramework()
# Run GPU-related tests only
await framework._verify_zerogpu_integration()
await framework._verify_gpu_memory_management()
return framework._generate_deployment_report()
def display_summary_report(report):
"""Display a concise summary of the verification report."""
print("\n" + "="*60)
print("🌪️ FELIX FRAMEWORK DEPLOYMENT SUMMARY")
print("="*60)
# Overall status
status_emoji = "✅" if report.ready_for_deployment else "❌"
print(f"Status: {status_emoji} {'READY FOR DEPLOYMENT' if report.ready_for_deployment else 'NOT READY'}")
print(f"Overall Score: {report.overall_score:.1%}")
# Component summary
print("\n📊 Component Results:")
components = {}
for result in report.validation_results:
if result.component not in components:
components[result.component] = []
components[result.component].append(result)
for component, results in components.items():
passed = len([r for r in results if r.success])
total = len(results)
avg_score = sum(r.score for r in results) / total
status = "✅" if passed == total else "⚠️" if passed > 0 else "❌"
print(f" {status} {component}: {passed}/{total} tests passed ({avg_score:.1%})")
# Critical issues
if report.critical_issues:
print("\n🚨 Critical Issues:")
for issue in report.critical_issues[:3]: # Top 3
print(f" - {issue}")
# Top recommendations
if report.recommendations:
print("\n💡 Top Recommendations:")
for rec in report.recommendations[:3]: # Top 3
print(f" - {rec}")
print(f"\n⏱️ Verification completed in {report.system_info.get('total_validation_time', 0):.1f} seconds")
print("="*60)
async def main():
"""Main entry point for deployment verification runner."""
parser = argparse.ArgumentParser(description='Felix Framework Deployment Verification Runner')
parser.add_argument('--quick', action='store_true', help='Run quick verification (essential components)')
parser.add_argument('--gpu-only', action='store_true', help='Run GPU-specific tests only')
parser.add_argument('--full', action='store_true', help='Run full comprehensive verification')
parser.add_argument('--export', help='Export detailed report to JSON file')
parser.add_argument('--verbose', action='store_true', help='Enable verbose output')
args = parser.parse_args()
# Setup logging
setup_logging(args.verbose)
try:
# Run appropriate verification
if args.quick:
report = await quick_verification()
elif args.gpu_only:
report = await gpu_verification()
elif args.full:
framework = DeploymentVerificationFramework()
report = await framework.run_full_verification()
else:
# Default: run key components
print("🔍 Running Standard Felix Framework Deployment Verification...")
framework = DeploymentVerificationFramework()
# Run core components
await framework._verify_core_mathematical_precision()
await framework._verify_zerogpu_integration()
await framework._verify_web_interface_compatibility()
await framework._verify_performance_benchmarks()
report = framework._generate_deployment_report()
# Display results
display_summary_report(report)
# Export if requested
if args.export:
import json
with open(args.export, 'w') as f:
json.dump(report.to_dict(), f, indent=2)
print(f"\n📄 Detailed report exported to: {args.export}")
# Return appropriate exit code
return 0 if report.ready_for_deployment else 1
except Exception as e:
print(f"\n❌ Verification failed: {e}")
if args.verbose:
import traceback
print(traceback.format_exc())
return 2
if __name__ == "__main__":
exit_code = asyncio.run(main())
sys.exit(exit_code) |