File size: 7,231 Bytes
2ec0d39 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 |
#!/usr/bin/env python3
"""
API Test Runner Script
Easy-to-use script to run API tests and view results
"""
import os
import sys
import json
import argparse
from datetime import datetime
# Add current directory to path
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from api_test_suite import APITestSuite
def main():
parser = argparse.ArgumentParser(description='Run API tests for Secure AI Agents Suite')
parser.add_argument('--config', '-c', default='api_test_config.yaml',
help='Path to configuration file (default: api_test_config.yaml)')
parser.add_argument('--output', '-o', default=f'api_test_results_{datetime.now().strftime("%Y%m%d_%H%M%S")}.json',
help='Output file for results (default: api_test_results_TIMESTAMP.json)')
parser.add_argument('--quiet', '-q', action='store_true',
help='Suppress console output, only save results')
parser.add_argument('--test', '-t', choices=['openai', 'google', 'elevenlabs', 'modal', 'all'],
default='all', help='Which test to run (default: all)')
parser.add_argument('--validate-only', action='store_true',
help='Only validate configuration, do not run tests')
parser.add_argument('--show-summary', action='store_true',
help='Show summary of available tests')
args = parser.parse_args()
if args.show_summary:
print("π Available API Tests:")
print(" β’ openai - OpenAI GPT models for text generation")
print(" β’ google - Google Generative AI models")
print(" β’ elevenlabs - ElevenLabs text-to-speech")
print(" β’ modal - Modal deployment platform")
print(" β’ all - Run all tests (default)")
print()
print("π Configuration:")
print(f" β’ Config file: {args.config}")
print(f" β’ Results will be saved to: {args.output}")
return
print("π Secure AI Agents Suite - API Test Runner")
print("=" * 50)
# Initialize test suite
try:
test_suite = APITestSuite(args.config)
print(f"β
Configuration loaded from: {args.config}")
except FileNotFoundError:
print(f"β Configuration file not found: {args.config}")
print("π‘ Please copy api_test_config.yaml and fill in your API keys")
print("π‘ Usage: cp api_test_config.yaml my_config.yaml")
print("π‘ Then edit my_config.yaml with your API keys")
return
except Exception as e:
print(f"β Failed to load configuration: {e}")
return
if args.validate_only:
print("β
Configuration validation successful!")
print("π‘ Run tests with: python test_runner.py")
return
# Validate API keys before running tests
print("\nπ Validating API key configuration...")
missing_keys = []
if args.test in ['openai', 'all']:
if not test_suite.config.get('openai', {}).get('api_key'):
missing_keys.append('OpenAI')
if args.test in ['google', 'all']:
if not test_suite.config.get('google', {}).get('api_key'):
missing_keys.append('Google')
if args.test in ['elevenlabs', 'all']:
if not test_suite.config.get('elevenlabs', {}).get('api_key'):
missing_keys.append('ElevenLabs')
if args.test in ['modal', 'all']:
if not test_suite.config.get('modal', {}).get('api_key'):
missing_keys.append('Modal')
if missing_keys:
print("β οΈ Missing API keys for:")
for key in missing_keys:
print(f" β’ {key}")
print("\nπ‘ Please update your configuration file with valid API keys")
return
print("β
All required API keys found!")
# Run tests
print(f"\nπ§ͺ Running {args.test.upper()} tests...")
print("-" * 30)
try:
if args.test == 'openai':
results = test_suite.test_openai_connection()
results = {**results, **test_suite.test_openai_text_generation()}
results = {**results, **test_suite.test_openai_batch_processing()}
elif args.test == 'google':
results = test_suite.test_google_connection()
results = {**results, **test_suite.test_google_text_generation()}
elif args.test == 'elevenlabs':
results = test_suite.test_elevenlabs_connection()
results = {**results, **test_suite.test_elevenlabs_tts()}
results = {**results, **test_suite.test_elevenlabs_voice_cloning()}
elif args.test == 'modal':
results = test_suite.test_modal_connection()
results = {**results, **test_suite.test_modal_function_deployment()}
elif args.test == 'all':
results = test_suite.run_all_tests()
# Save results
with open(args.output, 'w') as f:
json.dump(results, f, indent=2)
print(f"\nπ Test Results Summary")
print("=" * 50)
# Display summary
total_tests = results.get('summary', {}).get('total_tests', 0)
passed_tests = results.get('summary', {}).get('passed_tests', 0)
failed_tests = results.get('summary', {}).get('failed_tests', 0)
success_rate = results.get('summary', {}).get('success_rate', 0)
total_duration = results.get('summary', {}).get('total_duration', 0)
print(f"π Total Tests: {total_tests}")
print(f"β
Passed: {passed_tests}")
print(f"β Failed: {failed_tests}")
print(f"π Success Rate: {success_rate:.1f}%")
print(f"β±οΈ Total Duration: {total_duration:.2f}s")
# Show detailed results
print("\nπ Detailed Results:")
print("-" * 30)
for test_name, result in results.items():
if test_name == 'summary':
continue
if isinstance(result, dict):
status = "β
PASS" if result.get('success', False) else "β FAIL"
duration = result.get('duration', 0)
print(f"{test_name:<25} {status} ({duration:.2f}s)")
if not result.get('success', False) and result.get('error'):
print(f" Error: {result['error']}")
# Performance insights
print(f"\nπΎ Full results saved to: {args.output}")
# Success/failure summary
if success_rate >= 80:
print("\nπ All tests passed! Your API integrations are working correctly.")
elif success_rate >= 60:
print("\nβ οΈ Some tests failed. Check the results above for details.")
else:
print("\nβ Many tests failed. Please check your API keys and configuration.")
except Exception as e:
print(f"\nβ Test execution failed: {e}")
print("π‘ Check your API keys and network connection")
return
print("\n⨠Test run completed!")
if __name__ == "__main__":
main() |