Spaces:
Running
on
Zero
Running
on
Zero
| #!/usr/bin/env python3 | |
| """ | |
| API Test Runner Script | |
| Easy-to-use script to run API tests and view results | |
| """ | |
| import os | |
| import sys | |
| import json | |
| import argparse | |
| from datetime import datetime | |
| # Add current directory to path | |
| sys.path.append(os.path.dirname(os.path.abspath(__file__))) | |
| from api_test_suite import APITestSuite | |
| def main(): | |
| parser = argparse.ArgumentParser(description='Run API tests for Secure AI Agents Suite') | |
| parser.add_argument('--config', '-c', default='api_test_config.yaml', | |
| help='Path to configuration file (default: api_test_config.yaml)') | |
| parser.add_argument('--output', '-o', default=f'api_test_results_{datetime.now().strftime("%Y%m%d_%H%M%S")}.json', | |
| help='Output file for results (default: api_test_results_TIMESTAMP.json)') | |
| parser.add_argument('--quiet', '-q', action='store_true', | |
| help='Suppress console output, only save results') | |
| parser.add_argument('--test', '-t', choices=['openai', 'google', 'elevenlabs', 'modal', 'all'], | |
| default='all', help='Which test to run (default: all)') | |
| parser.add_argument('--validate-only', action='store_true', | |
| help='Only validate configuration, do not run tests') | |
| parser.add_argument('--show-summary', action='store_true', | |
| help='Show summary of available tests') | |
| args = parser.parse_args() | |
| if args.show_summary: | |
| print("π Available API Tests:") | |
| print(" β’ openai - OpenAI GPT models for text generation") | |
| print(" β’ google - Google Generative AI models") | |
| print(" β’ elevenlabs - ElevenLabs text-to-speech") | |
| print(" β’ modal - Modal deployment platform") | |
| print(" β’ all - Run all tests (default)") | |
| print() | |
| print("π Configuration:") | |
| print(f" β’ Config file: {args.config}") | |
| print(f" β’ Results will be saved to: {args.output}") | |
| return | |
| print("π Secure AI Agents Suite - API Test Runner") | |
| print("=" * 50) | |
| # Initialize test suite | |
| try: | |
| test_suite = APITestSuite(args.config) | |
| print(f"β Configuration loaded from: {args.config}") | |
| except FileNotFoundError: | |
| print(f"β Configuration file not found: {args.config}") | |
| print("π‘ Please copy api_test_config.yaml and fill in your API keys") | |
| print("π‘ Usage: cp api_test_config.yaml my_config.yaml") | |
| print("π‘ Then edit my_config.yaml with your API keys") | |
| return | |
| except Exception as e: | |
| print(f"β Failed to load configuration: {e}") | |
| return | |
| if args.validate_only: | |
| print("β Configuration validation successful!") | |
| print("π‘ Run tests with: python test_runner.py") | |
| return | |
| # Validate API keys before running tests | |
| print("\nπ Validating API key configuration...") | |
| missing_keys = [] | |
| if args.test in ['openai', 'all']: | |
| if not test_suite.config.get('openai', {}).get('api_key'): | |
| missing_keys.append('OpenAI') | |
| if args.test in ['google', 'all']: | |
| if not test_suite.config.get('google', {}).get('api_key'): | |
| missing_keys.append('Google') | |
| if args.test in ['elevenlabs', 'all']: | |
| if not test_suite.config.get('elevenlabs', {}).get('api_key'): | |
| missing_keys.append('ElevenLabs') | |
| if args.test in ['modal', 'all']: | |
| if not test_suite.config.get('modal', {}).get('api_key'): | |
| missing_keys.append('Modal') | |
| if missing_keys: | |
| print("β οΈ Missing API keys for:") | |
| for key in missing_keys: | |
| print(f" β’ {key}") | |
| print("\nπ‘ Please update your configuration file with valid API keys") | |
| return | |
| print("β All required API keys found!") | |
| # Run tests | |
| print(f"\nπ§ͺ Running {args.test.upper()} tests...") | |
| print("-" * 30) | |
| try: | |
| if args.test == 'openai': | |
| results = test_suite.test_openai_connection() | |
| results = {**results, **test_suite.test_openai_text_generation()} | |
| results = {**results, **test_suite.test_openai_batch_processing()} | |
| elif args.test == 'google': | |
| results = test_suite.test_google_connection() | |
| results = {**results, **test_suite.test_google_text_generation()} | |
| elif args.test == 'elevenlabs': | |
| results = test_suite.test_elevenlabs_connection() | |
| results = {**results, **test_suite.test_elevenlabs_tts()} | |
| results = {**results, **test_suite.test_elevenlabs_voice_cloning()} | |
| elif args.test == 'modal': | |
| results = test_suite.test_modal_connection() | |
| results = {**results, **test_suite.test_modal_function_deployment()} | |
| elif args.test == 'all': | |
| results = test_suite.run_all_tests() | |
| # Save results | |
| with open(args.output, 'w') as f: | |
| json.dump(results, f, indent=2) | |
| print(f"\nπ Test Results Summary") | |
| print("=" * 50) | |
| # Display summary | |
| total_tests = results.get('summary', {}).get('total_tests', 0) | |
| passed_tests = results.get('summary', {}).get('passed_tests', 0) | |
| failed_tests = results.get('summary', {}).get('failed_tests', 0) | |
| success_rate = results.get('summary', {}).get('success_rate', 0) | |
| total_duration = results.get('summary', {}).get('total_duration', 0) | |
| print(f"π Total Tests: {total_tests}") | |
| print(f"β Passed: {passed_tests}") | |
| print(f"β Failed: {failed_tests}") | |
| print(f"π Success Rate: {success_rate:.1f}%") | |
| print(f"β±οΈ Total Duration: {total_duration:.2f}s") | |
| # Show detailed results | |
| print("\nπ Detailed Results:") | |
| print("-" * 30) | |
| for test_name, result in results.items(): | |
| if test_name == 'summary': | |
| continue | |
| if isinstance(result, dict): | |
| status = "β PASS" if result.get('success', False) else "β FAIL" | |
| duration = result.get('duration', 0) | |
| print(f"{test_name:<25} {status} ({duration:.2f}s)") | |
| if not result.get('success', False) and result.get('error'): | |
| print(f" Error: {result['error']}") | |
| # Performance insights | |
| print(f"\nπΎ Full results saved to: {args.output}") | |
| # Success/failure summary | |
| if success_rate >= 80: | |
| print("\nπ All tests passed! Your API integrations are working correctly.") | |
| elif success_rate >= 60: | |
| print("\nβ οΈ Some tests failed. Check the results above for details.") | |
| else: | |
| print("\nβ Many tests failed. Please check your API keys and configuration.") | |
| except Exception as e: | |
| print(f"\nβ Test execution failed: {e}") | |
| print("π‘ Check your API keys and network connection") | |
| return | |
| print("\n⨠Test run completed!") | |
| if __name__ == "__main__": | |
| main() |