rajkumarrawal's picture
Initial commit
2ec0d39
#!/usr/bin/env python3
"""
API Test Runner Script
Easy-to-use script to run API tests and view results
"""
import os
import sys
import json
import argparse
from datetime import datetime
# Add current directory to path
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from api_test_suite import APITestSuite
def main():
parser = argparse.ArgumentParser(description='Run API tests for Secure AI Agents Suite')
parser.add_argument('--config', '-c', default='api_test_config.yaml',
help='Path to configuration file (default: api_test_config.yaml)')
parser.add_argument('--output', '-o', default=f'api_test_results_{datetime.now().strftime("%Y%m%d_%H%M%S")}.json',
help='Output file for results (default: api_test_results_TIMESTAMP.json)')
parser.add_argument('--quiet', '-q', action='store_true',
help='Suppress console output, only save results')
parser.add_argument('--test', '-t', choices=['openai', 'google', 'elevenlabs', 'modal', 'all'],
default='all', help='Which test to run (default: all)')
parser.add_argument('--validate-only', action='store_true',
help='Only validate configuration, do not run tests')
parser.add_argument('--show-summary', action='store_true',
help='Show summary of available tests')
args = parser.parse_args()
if args.show_summary:
print("πŸ” Available API Tests:")
print(" β€’ openai - OpenAI GPT models for text generation")
print(" β€’ google - Google Generative AI models")
print(" β€’ elevenlabs - ElevenLabs text-to-speech")
print(" β€’ modal - Modal deployment platform")
print(" β€’ all - Run all tests (default)")
print()
print("πŸ“ Configuration:")
print(f" β€’ Config file: {args.config}")
print(f" β€’ Results will be saved to: {args.output}")
return
print("πŸš€ Secure AI Agents Suite - API Test Runner")
print("=" * 50)
# Initialize test suite
try:
test_suite = APITestSuite(args.config)
print(f"βœ… Configuration loaded from: {args.config}")
except FileNotFoundError:
print(f"❌ Configuration file not found: {args.config}")
print("πŸ’‘ Please copy api_test_config.yaml and fill in your API keys")
print("πŸ’‘ Usage: cp api_test_config.yaml my_config.yaml")
print("πŸ’‘ Then edit my_config.yaml with your API keys")
return
except Exception as e:
print(f"❌ Failed to load configuration: {e}")
return
if args.validate_only:
print("βœ… Configuration validation successful!")
print("πŸ’‘ Run tests with: python test_runner.py")
return
# Validate API keys before running tests
print("\nπŸ”‘ Validating API key configuration...")
missing_keys = []
if args.test in ['openai', 'all']:
if not test_suite.config.get('openai', {}).get('api_key'):
missing_keys.append('OpenAI')
if args.test in ['google', 'all']:
if not test_suite.config.get('google', {}).get('api_key'):
missing_keys.append('Google')
if args.test in ['elevenlabs', 'all']:
if not test_suite.config.get('elevenlabs', {}).get('api_key'):
missing_keys.append('ElevenLabs')
if args.test in ['modal', 'all']:
if not test_suite.config.get('modal', {}).get('api_key'):
missing_keys.append('Modal')
if missing_keys:
print("⚠️ Missing API keys for:")
for key in missing_keys:
print(f" β€’ {key}")
print("\nπŸ’‘ Please update your configuration file with valid API keys")
return
print("βœ… All required API keys found!")
# Run tests
print(f"\nπŸ§ͺ Running {args.test.upper()} tests...")
print("-" * 30)
try:
if args.test == 'openai':
results = test_suite.test_openai_connection()
results = {**results, **test_suite.test_openai_text_generation()}
results = {**results, **test_suite.test_openai_batch_processing()}
elif args.test == 'google':
results = test_suite.test_google_connection()
results = {**results, **test_suite.test_google_text_generation()}
elif args.test == 'elevenlabs':
results = test_suite.test_elevenlabs_connection()
results = {**results, **test_suite.test_elevenlabs_tts()}
results = {**results, **test_suite.test_elevenlabs_voice_cloning()}
elif args.test == 'modal':
results = test_suite.test_modal_connection()
results = {**results, **test_suite.test_modal_function_deployment()}
elif args.test == 'all':
results = test_suite.run_all_tests()
# Save results
with open(args.output, 'w') as f:
json.dump(results, f, indent=2)
print(f"\nπŸ“Š Test Results Summary")
print("=" * 50)
# Display summary
total_tests = results.get('summary', {}).get('total_tests', 0)
passed_tests = results.get('summary', {}).get('passed_tests', 0)
failed_tests = results.get('summary', {}).get('failed_tests', 0)
success_rate = results.get('summary', {}).get('success_rate', 0)
total_duration = results.get('summary', {}).get('total_duration', 0)
print(f"πŸ“ˆ Total Tests: {total_tests}")
print(f"βœ… Passed: {passed_tests}")
print(f"❌ Failed: {failed_tests}")
print(f"πŸ“Š Success Rate: {success_rate:.1f}%")
print(f"⏱️ Total Duration: {total_duration:.2f}s")
# Show detailed results
print("\nπŸ“‹ Detailed Results:")
print("-" * 30)
for test_name, result in results.items():
if test_name == 'summary':
continue
if isinstance(result, dict):
status = "βœ… PASS" if result.get('success', False) else "❌ FAIL"
duration = result.get('duration', 0)
print(f"{test_name:<25} {status} ({duration:.2f}s)")
if not result.get('success', False) and result.get('error'):
print(f" Error: {result['error']}")
# Performance insights
print(f"\nπŸ’Ύ Full results saved to: {args.output}")
# Success/failure summary
if success_rate >= 80:
print("\nπŸŽ‰ All tests passed! Your API integrations are working correctly.")
elif success_rate >= 60:
print("\n⚠️ Some tests failed. Check the results above for details.")
else:
print("\n❌ Many tests failed. Please check your API keys and configuration.")
except Exception as e:
print(f"\n❌ Test execution failed: {e}")
print("πŸ’‘ Check your API keys and network connection")
return
print("\n✨ Test run completed!")
if __name__ == "__main__":
main()