#!/usr/bin/env python3 """ API Test Runner Script Easy-to-use script to run API tests and view results """ import os import sys import json import argparse from datetime import datetime # Add current directory to path sys.path.append(os.path.dirname(os.path.abspath(__file__))) from api_test_suite import APITestSuite def main(): parser = argparse.ArgumentParser(description='Run API tests for Secure AI Agents Suite') parser.add_argument('--config', '-c', default='api_test_config.yaml', help='Path to configuration file (default: api_test_config.yaml)') parser.add_argument('--output', '-o', default=f'api_test_results_{datetime.now().strftime("%Y%m%d_%H%M%S")}.json', help='Output file for results (default: api_test_results_TIMESTAMP.json)') parser.add_argument('--quiet', '-q', action='store_true', help='Suppress console output, only save results') parser.add_argument('--test', '-t', choices=['openai', 'google', 'elevenlabs', 'modal', 'all'], default='all', help='Which test to run (default: all)') parser.add_argument('--validate-only', action='store_true', help='Only validate configuration, do not run tests') parser.add_argument('--show-summary', action='store_true', help='Show summary of available tests') args = parser.parse_args() if args.show_summary: print("๐Ÿ” Available API Tests:") print(" โ€ข openai - OpenAI GPT models for text generation") print(" โ€ข google - Google Generative AI models") print(" โ€ข elevenlabs - ElevenLabs text-to-speech") print(" โ€ข modal - Modal deployment platform") print(" โ€ข all - Run all tests (default)") print() print("๐Ÿ“ Configuration:") print(f" โ€ข Config file: {args.config}") print(f" โ€ข Results will be saved to: {args.output}") return print("๐Ÿš€ Secure AI Agents Suite - API Test Runner") print("=" * 50) # Initialize test suite try: test_suite = APITestSuite(args.config) print(f"โœ… Configuration loaded from: {args.config}") except FileNotFoundError: print(f"โŒ Configuration file not found: {args.config}") print("๐Ÿ’ก Please copy api_test_config.yaml and fill in your API keys") print("๐Ÿ’ก Usage: cp api_test_config.yaml my_config.yaml") print("๐Ÿ’ก Then edit my_config.yaml with your API keys") return except Exception as e: print(f"โŒ Failed to load configuration: {e}") return if args.validate_only: print("โœ… Configuration validation successful!") print("๐Ÿ’ก Run tests with: python test_runner.py") return # Validate API keys before running tests print("\n๐Ÿ”‘ Validating API key configuration...") missing_keys = [] if args.test in ['openai', 'all']: if not test_suite.config.get('openai', {}).get('api_key'): missing_keys.append('OpenAI') if args.test in ['google', 'all']: if not test_suite.config.get('google', {}).get('api_key'): missing_keys.append('Google') if args.test in ['elevenlabs', 'all']: if not test_suite.config.get('elevenlabs', {}).get('api_key'): missing_keys.append('ElevenLabs') if args.test in ['modal', 'all']: if not test_suite.config.get('modal', {}).get('api_key'): missing_keys.append('Modal') if missing_keys: print("โš ๏ธ Missing API keys for:") for key in missing_keys: print(f" โ€ข {key}") print("\n๐Ÿ’ก Please update your configuration file with valid API keys") return print("โœ… All required API keys found!") # Run tests print(f"\n๐Ÿงช Running {args.test.upper()} tests...") print("-" * 30) try: if args.test == 'openai': results = test_suite.test_openai_connection() results = {**results, **test_suite.test_openai_text_generation()} results = {**results, **test_suite.test_openai_batch_processing()} elif args.test == 'google': results = test_suite.test_google_connection() results = {**results, **test_suite.test_google_text_generation()} elif args.test == 'elevenlabs': results = test_suite.test_elevenlabs_connection() results = {**results, **test_suite.test_elevenlabs_tts()} results = {**results, **test_suite.test_elevenlabs_voice_cloning()} elif args.test == 'modal': results = test_suite.test_modal_connection() results = {**results, **test_suite.test_modal_function_deployment()} elif args.test == 'all': results = test_suite.run_all_tests() # Save results with open(args.output, 'w') as f: json.dump(results, f, indent=2) print(f"\n๐Ÿ“Š Test Results Summary") print("=" * 50) # Display summary total_tests = results.get('summary', {}).get('total_tests', 0) passed_tests = results.get('summary', {}).get('passed_tests', 0) failed_tests = results.get('summary', {}).get('failed_tests', 0) success_rate = results.get('summary', {}).get('success_rate', 0) total_duration = results.get('summary', {}).get('total_duration', 0) print(f"๐Ÿ“ˆ Total Tests: {total_tests}") print(f"โœ… Passed: {passed_tests}") print(f"โŒ Failed: {failed_tests}") print(f"๐Ÿ“Š Success Rate: {success_rate:.1f}%") print(f"โฑ๏ธ Total Duration: {total_duration:.2f}s") # Show detailed results print("\n๐Ÿ“‹ Detailed Results:") print("-" * 30) for test_name, result in results.items(): if test_name == 'summary': continue if isinstance(result, dict): status = "โœ… PASS" if result.get('success', False) else "โŒ FAIL" duration = result.get('duration', 0) print(f"{test_name:<25} {status} ({duration:.2f}s)") if not result.get('success', False) and result.get('error'): print(f" Error: {result['error']}") # Performance insights print(f"\n๐Ÿ’พ Full results saved to: {args.output}") # Success/failure summary if success_rate >= 80: print("\n๐ŸŽ‰ All tests passed! Your API integrations are working correctly.") elif success_rate >= 60: print("\nโš ๏ธ Some tests failed. Check the results above for details.") else: print("\nโŒ Many tests failed. Please check your API keys and configuration.") except Exception as e: print(f"\nโŒ Test execution failed: {e}") print("๐Ÿ’ก Check your API keys and network connection") return print("\nโœจ Test run completed!") if __name__ == "__main__": main()