Secure-AI-Agents-Suite / test_suite.sh
rajkumarrawal's picture
Initial commit
2ec0d39
#!/bin/bash
# Comprehensive Testing Suite for Secure AI Agents Suite
# Validates all components and integrations
set -e
# Color codes for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Test results tracking
TOTAL_TESTS=0
PASSED_TESTS=0
FAILED_TESTS=0
# Logging functions
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[PASS]${NC} $1"
((PASSED_TESTS++))
}
log_failure() {
echo -e "${RED}[FAIL]${NC} $1"
((FAILED_TESTS++))
}
log_warning() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
# Test framework
run_test() {
local test_name="$1"
local test_command="$2"
((TOTAL_TESTS++))
log_info "Running test: $test_name"
if eval "$test_command"; then
log_success "$test_name"
return 0
else
log_failure "$test_name"
return 1
fi
}
# Test: Environment Setup
test_environment() {
log_info "Testing environment setup..."
# Check Python version
run_test "Python version check" \
"python3 --version | grep -E '3\.[8-9]|3\.1[0-9]'"
# Check required system tools
run_test "Git availability" \
"command -v git"
run_test "Docker availability" \
"command -v docker"
# Check memory availability
memory_gb=$(free -g | awk '/^Mem:/{print $2}')
if [[ ${memory_gb} -ge 2 ]]; then
run_test "Memory check" \
"true"
else
run_test "Memory check" \
"echo 'Insufficient memory: ${memory_gb}GB'; exit 1"
fi
}
# Test: Dependencies Installation
test_dependencies() {
log_info "Testing dependencies..."
# Test pip installation
run_test "Pip functionality" \
"python3 -m pip --version"
# Test required Python packages
local packages=("gradio" "pandas" "numpy" "plotly" "requests")
for package in "${packages[@]}"; do
run_test "Package availability: $package" \
"python3 -c \"import $package; print('Available')\""
done
}
# Test: Application Structure
test_application_structure() {
log_info "Testing application structure..."
# Check required files
local required_files=("app.py" "autonomous_engine_fixed.py" "requirements.txt" "Dockerfile")
for file in "${required_files[@]}"; do
run_test "Required file exists: $file" \
"test -f '$file'"
done
# Check directory structure
run_test "Directory structure" \
"test -d 'logs' && test -d 'cache' && test -d 'docs'"
}
# Test: Code Quality
test_code_quality() {
log_info "Testing code quality..."
# Install quality tools if not available
python3 -m pip install black flake8 mypy bandit || true
# Test Python syntax
run_test "Python syntax check" \
"python3 -m py_compile app.py autonomous_engine_fixed.py"
# Test code formatting (if black is available)
if command -v black >/dev/null 2>&1; then
run_test "Code formatting (Black)" \
"black --check app.py autonomous_engine_fixed.py"
fi
# Test linting (if flake8 is available)
if command -v flake8 >/dev/null 2>&1; then
run_test "Code linting (Flake8)" \
"flake8 app.py autonomous_engine_fixed.py --count --select=E9,F63,F7,F82 --show-source --statistics"
fi
# Security scan (if bandit is available)
if command -v bandit >/dev/null 2>&1; then
run_test "Security scan (Bandit)" \
"bandit app.py autonomous_engine_fixed.py -r"
fi
}
# Test: Application Functionality
test_application_functionality() {
log_info "Testing application functionality..."
# Test import of main modules
run_test "Import autonomous engine" \
"python3 -c 'from autonomous_engine_fixed import RefactoredAutonomousAgent; print(\"Imported successfully\")'"
run_test "Import application classes" \
"python3 -c 'from app import SpacesApp, AgentManager, RequestValidator; print(\"Imported successfully\")'"
# Test application initialization
run_test "Application initialization" \
"python3 -c 'from app import SpacesApp; app = SpacesApp(); print(\"App initialized\")'"
# Test agent creation
run_test "Agent manager initialization" \
"python3 -c 'from app import SpacesApp; app = SpacesApp(); agents = app.agent_manager.agents; print(f\"Created {len(agents)} agents\")' && test \$(python3 -c 'from app import SpacesApp; app = SpacesApp(); print(len(app.agent_manager.agents))') -ge 1"
}
# Test: Configuration Files
test_configuration() {
log_info "Testing configuration files..."
# Test requirements.txt
run_test "Requirements file validity" \
"python3 -c 'import pkg_resources; pkg_resources.require([line.split(\"==\")[0] for line in open(\"requirements.txt\") if line.strip() and not line.startswith(\"#\")])'"
# Test YAML configuration files
if command -v yamllint >/dev/null 2>&1; then
run_test "YAML configuration validation" \
"yamllint spaces.yaml security_config.yaml"
fi
# Test Docker configuration
run_test "Dockerfile syntax" \
"docker build --dry-run -f Dockerfile . > /dev/null 2>&1"
}
# Test: Security Features
test_security() {
log_info "Testing security features..."
# Test input validation
run_test "Input validation functionality" \
"python3 -c 'from app import RequestValidator; validator = RequestValidator(); valid, msg = validator.validate_input(\"test input\"); assert valid'"
# Test rate limiting
run_test "Rate limiting configuration" \
"python3 -c 'from security_config import *; print(\"Security config loaded\")'"
# Test environment variable handling
run_test "Environment configuration" \
"python3 -c 'from app import SpacesConfig; config = SpacesConfig(); print(f\"Loaded {len(config.config)} config items\")'"
}
# Test: Performance Benchmarks
test_performance() {
log_info "Testing performance benchmarks..."
# Test agent response time
run_test "Agent response time benchmark" \
"python3 -c 'import time; from app import SpacesApp; app = SpacesApp(); start = time.time(); agent = app.agent_manager.get_agent(\"GeneralAgent\"); print(f\"Response time: {time.time() - start:.3f}s\"); assert (time.time() - start) < 1.0'"
# Test memory usage
run_test "Memory usage check" \
"python3 -c 'import psutil; import os; process = psutil.Process(os.getpid()); memory_mb = process.memory_info().rss / 1024 / 1024; print(f\"Memory usage: {memory_mb:.1f}MB\"); assert memory_mb < 500'"
# Test concurrent request handling
run_test "Concurrent request handling" \
"python3 -c 'from app import SpacesApp; import threading; app = SpacesApp(); results = []; def test_request(): results.append(len(app.agent_manager.agents)); threads = [threading.Thread(target=test_request) for _ in range(5)]; [t.start() for t in threads]; [t.join() for t in threads]; print(f\"Concurrent requests: {len(results)}\"); assert len(results) == 5'"
}
# Test: API Endpoints
test_api_endpoints() {
log_info "Testing API endpoints..."
# Start application in background
log_info "Starting test server..."
timeout 30s python3 app.py &
SERVER_PID=$!
sleep 10 # Wait for server to start
# Test health endpoint
run_test "Health endpoint" \
"curl -f http://localhost:7860/health > /dev/null 2>&1"
# Test main interface
run_test "Main interface" \
"curl -f http://localhost:7860 > /dev/null 2>&1"
# Stop test server
kill $SERVER_PID 2>/dev/null || true
wait $SERVER_PID 2>/dev/null || true
}
# Test: Documentation
test_documentation() {
log_info "Testing documentation..."
# Check README exists and has content
run_test "README documentation" \
"test -f README.md && wc -l README.md | awk '{print \$1}' | xargs -I {} test {} -gt 100"
# Check API documentation
run_test "API documentation" \
"test -f docs/api_reference.md"
# Check that documentation is comprehensive
run_test "Documentation completeness" \
"python3 -c 'with open(\"README.md\") as f: content = f.read(); sections = [\"Installation\", \"Usage\", \"API\", \"Configuration\"]; missing = [s for s in sections if s not in content]; print(f\"Missing sections: {missing}\"); assert len(missing) == 0'"
}
# Test: Deployment Readiness
test_deployment_readiness() {
log_info "Testing deployment readiness..."
# Test setup script
run_test "Setup script functionality" \
"test -f setup.sh && chmod +x setup.sh"
# Test GitHub Actions workflow
run_test "CI/CD workflow" \
"test -f .github/workflows/ci-cd.yml"
# Test HuggingFace Spaces configuration
run_test "Spaces configuration" \
"test -f spaces.yaml && python3 -c 'import yaml; yaml.safe_load(open(\"spaces.yaml\"))'"
# Test Docker build
if command -v docker >/dev/null 2>&1; then
run_test "Docker image build" \
"docker build -t secure-ai-agents-test -f Dockerfile . > /dev/null 2>&1"
# Clean up test image
docker rmi secure-ai-agents-test >/dev/null 2>&1 || true
fi
}
# Test: Error Handling
test_error_handling() {
log_info "Testing error handling..."
# Test invalid input handling
run_test "Invalid input handling" \
"python3 -c 'from app import RequestValidator; validator = RequestValidator(); valid, msg = validator.validate_input(\"\"); assert not valid'"
# Test empty context handling
run_test "Empty context handling" \
"python3 -c 'from app import RequestValidator; validator = RequestValidator(); valid, msg = validator.validate_context({}); assert valid'"
# Test oversized input handling
run_test "Oversized input handling" \
"python3 -c 'from app import RequestValidator; validator = RequestValidator(); large_input = \"a\" * 15000; valid, msg = validator.validate_input(large_input); assert not valid'"
}
# Test: Integration Testing
test_integration() {
log_info "Running integration tests..."
# Test end-to-end workflow
run_test "End-to-end workflow" \
"python3 -c 'from app import SpacesApp; app = SpacesApp(); agent = app.agent_manager.get_agent(\"GeneralAgent\"); print(f\"Agent type: {type(agent).__name__}\"); assert agent is not None'"
# Test configuration loading
run_test "Configuration loading" \
"python3 -c 'from app import SpacesConfig; config = SpacesConfig(); print(f\"Config items: {len(config.config)}\"); assert len(config.config) > 0'"
# Test logging setup
run_test "Logging configuration" \
"python3 -c 'import logging; logging.basicConfig(level=logging.INFO); print(\"Logging configured\")'"
}
# Main test execution
main() {
echo "=========================================="
echo "Secure AI Agents Suite - Test Suite"
echo "=========================================="
echo
# Run all test categories
test_environment
echo
test_dependencies
echo
test_application_structure
echo
test_code_quality
echo
test_application_functionality
echo
test_configuration
echo
test_security
echo
test_performance
echo
test_api_endpoints
echo
test_documentation
echo
test_deployment_readiness
echo
test_error_handling
echo
test_integration
echo
# Generate test report
echo "=========================================="
echo "Test Summary"
echo "=========================================="
echo "Total tests run: $TOTAL_TESTS"
echo -e "${GREEN}Passed: $PASSED_TESTS${NC}"
echo -e "${RED}Failed: $FAILED_TESTS${NC}"
if [[ $FAILED_TESTS -eq 0 ]]; then
echo -e "${GREEN}🎉 All tests passed! Application is ready for deployment.${NC}"
exit 0
else
echo -e "${RED}❌ Some tests failed. Please review and fix issues before deployment.${NC}"
exit 1
fi
}
# Script entry point
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
main "$@"
fi