AumCoreAI commited on
Commit
8ebe336
·
verified ·
1 Parent(s): 82e3567

Create test_runner.py

Browse files
Files changed (1) hide show
  1. test_runner.py +511 -0
test_runner.py ADDED
@@ -0,0 +1,511 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # test_runner.py - Automated Testing & Validation System
2
+ import asyncio
3
+ import time
4
+ import json
5
+ import statistics
6
+ from datetime import datetime
7
+ from typing import Dict, List, Any, Optional
8
+ import aiohttp
9
+ import psutil
10
+
11
+ class AumCoreTestRunner:
12
+ """Automated Testing System for AumCore AI"""
13
+
14
+ def __init__(self, base_url: str = "http://localhost:7860"):
15
+ self.base_url = base_url
16
+ self.results = []
17
+ self.metrics = {}
18
+
19
+ async def run_full_test_suite(self) -> Dict:
20
+ """Run complete test suite and return results"""
21
+ print("🧪 Starting AumCore AI Test Suite...")
22
+
23
+ test_results = {
24
+ "timestamp": datetime.now().isoformat(),
25
+ "suite_version": "1.0.0",
26
+ "tests": {},
27
+ "summary": {},
28
+ "health_score": 0
29
+ }
30
+
31
+ # 1. BASIC CONNECTIVITY TESTS
32
+ test_results["tests"]["connectivity"] = await self._test_connectivity()
33
+
34
+ # 2. ENDPOINT FUNCTIONALITY TESTS
35
+ test_results["tests"]["endpoints"] = await self._test_endpoints()
36
+
37
+ # 3. PERFORMANCE TESTS
38
+ test_results["tests"]["performance"] = await self._test_performance()
39
+
40
+ # 4. INTEGRATION TESTS
41
+ test_results["tests"]["integration"] = await self._test_integrations()
42
+
43
+ # 5. SECURITY TESTS
44
+ test_results["tests"]["security"] = await self._test_security()
45
+
46
+ # Calculate summary
47
+ test_results["summary"] = self._calculate_summary(test_results["tests"])
48
+ test_results["health_score"] = test_results["summary"]["score"]
49
+
50
+ print(f"✅ Test Suite Complete. Score: {test_results['health_score']}/100")
51
+ return test_results
52
+
53
+ async def _test_connectivity(self) -> Dict:
54
+ """Test basic system connectivity"""
55
+ tests = []
56
+
57
+ # Test 1: Main UI endpoint
58
+ start = time.time()
59
+ try:
60
+ async with aiohttp.ClientSession() as session:
61
+ async with session.get(f"{self.base_url}/", timeout=10) as response:
62
+ success = response.status == 200
63
+ latency = time.time() - start
64
+ tests.append({
65
+ "name": "UI Endpoint",
66
+ "status": "PASS" if success else "FAIL",
67
+ "latency_ms": round(latency * 1000, 2),
68
+ "status_code": response.status,
69
+ "message": "UI loaded successfully" if success else f"Failed with status {response.status}"
70
+ })
71
+ except Exception as e:
72
+ tests.append({
73
+ "name": "UI Endpoint",
74
+ "status": "FAIL",
75
+ "latency_ms": 0,
76
+ "error": str(e),
77
+ "message": f"Connection failed: {e}"
78
+ })
79
+
80
+ # Test 2: System status endpoint
81
+ start = time.time()
82
+ try:
83
+ async with aiohttp.ClientSession() as session:
84
+ async with session.get(f"{self.base_url}/system/status", timeout=10) as response:
85
+ success = response.status == 200
86
+ latency = time.time() - start
87
+ data = await response.json() if success else {}
88
+ tests.append({
89
+ "name": "System Status",
90
+ "status": "PASS" if success else "FAIL",
91
+ "latency_ms": round(latency * 1000, 2),
92
+ "status_code": response.status,
93
+ "data": data if success else None,
94
+ "message": "System status endpoint working" if success else "System status endpoint failed"
95
+ })
96
+ except Exception as e:
97
+ tests.append({
98
+ "name": "System Status",
99
+ "status": "FAIL",
100
+ "latency_ms": 0,
101
+ "error": str(e),
102
+ "message": f"System status check failed: {e}"
103
+ })
104
+
105
+ return {
106
+ "total_tests": len(tests),
107
+ "passed": sum(1 for t in tests if t["status"] == "PASS"),
108
+ "failed": sum(1 for t in tests if t["status"] == "FAIL"),
109
+ "tests": tests,
110
+ "average_latency_ms": round(statistics.mean([t["latency_ms"] for t in tests if t["latency_ms"] > 0]), 2) if any(t["latency_ms"] > 0 for t in tests) else 0
111
+ }
112
+
113
+ async def _test_endpoints(self) -> Dict:
114
+ """Test all API endpoints"""
115
+ endpoints = [
116
+ ("/", "GET", None, "UI Homepage"),
117
+ ("/system/status", "GET", None, "System Status"),
118
+ ("/system/diagnostics/summary", "GET", None, "Diagnostics Summary"),
119
+ ("/system/diagnostics/full", "GET", None, "Full Diagnostics"),
120
+ ("/system/diagnostics/history", "GET", None, "Diagnostics History"),
121
+ ]
122
+
123
+ tests = []
124
+
125
+ for endpoint, method, payload, name in endpoints:
126
+ start = time.time()
127
+ try:
128
+ async with aiohttp.ClientSession() as session:
129
+ if method == "GET":
130
+ async with session.get(f"{self.base_url}{endpoint}", timeout=15) as response:
131
+ success = response.status in [200, 201]
132
+ latency = time.time() - start
133
+ data = await response.json() if success else {}
134
+ tests.append({
135
+ "name": name,
136
+ "endpoint": endpoint,
137
+ "method": method,
138
+ "status": "PASS" if success else "FAIL",
139
+ "latency_ms": round(latency * 1000, 2),
140
+ "status_code": response.status,
141
+ "response_keys": list(data.keys()) if isinstance(data, dict) else [],
142
+ "message": f"{name} working" if success else f"{name} failed with status {response.status}"
143
+ })
144
+ except Exception as e:
145
+ tests.append({
146
+ "name": name,
147
+ "endpoint": endpoint,
148
+ "method": method,
149
+ "status": "FAIL",
150
+ "latency_ms": 0,
151
+ "error": str(e),
152
+ "message": f"{name} failed: {e}"
153
+ })
154
+
155
+ return {
156
+ "total_endpoints": len(endpoints),
157
+ "tested": len(tests),
158
+ "passed": sum(1 for t in tests if t["status"] == "PASS"),
159
+ "failed": sum(1 for t in tests if t["status"] == "FAIL"),
160
+ "tests": tests,
161
+ "success_rate": round((sum(1 for t in tests if t["status"] == "PASS") / len(tests)) * 100, 2) if tests else 0
162
+ }
163
+
164
+ async def _test_performance(self) -> Dict:
165
+ """Test system performance under load"""
166
+ tests = []
167
+
168
+ # Test 1: Chat endpoint response time
169
+ chat_payload = {"message": "test"}
170
+ latencies = []
171
+
172
+ for i in range(3): # 3 requests for average
173
+ start = time.time()
174
+ try:
175
+ async with aiohttp.ClientSession() as session:
176
+ async with session.post(
177
+ f"{self.base_url}/chat",
178
+ data=chat_payload,
179
+ timeout=30
180
+ ) as response:
181
+ if response.status == 200:
182
+ latencies.append(time.time() - start)
183
+ except:
184
+ pass
185
+
186
+ avg_latency = statistics.mean(latencies) if latencies else 0
187
+ tests.append({
188
+ "name": "Chat Response Time",
189
+ "metric": "latency",
190
+ "value_ms": round(avg_latency * 1000, 2),
191
+ "status": "PASS" if avg_latency < 5 else "WARN" if avg_latency < 10 else "FAIL",
192
+ "threshold_ms": 5000,
193
+ "message": f"Average response time: {round(avg_latency * 1000, 2)}ms" if latencies else "Chat endpoint failed"
194
+ })
195
+
196
+ # Test 2: System resources during test
197
+ cpu_usage = psutil.cpu_percent(interval=1)
198
+ memory_usage = psutil.virtual_memory().percent
199
+
200
+ tests.append({
201
+ "name": "CPU Usage",
202
+ "metric": "cpu_percent",
203
+ "value": cpu_usage,
204
+ "status": "PASS" if cpu_usage < 80 else "WARN" if cpu_usage < 90 else "FAIL",
205
+ "threshold": 80,
206
+ "message": f"CPU usage: {cpu_usage}%"
207
+ })
208
+
209
+ tests.append({
210
+ "name": "Memory Usage",
211
+ "metric": "memory_percent",
212
+ "value": memory_usage,
213
+ "status": "PASS" if memory_usage < 80 else "WARN" if memory_usage < 90 else "FAIL",
214
+ "threshold": 80,
215
+ "message": f"Memory usage: {memory_usage}%"
216
+ })
217
+
218
+ return {
219
+ "tests": tests,
220
+ "performance_score": self._calculate_performance_score(tests),
221
+ "recommendations": self._generate_performance_recommendations(tests)
222
+ }
223
+
224
+ async def _test_integrations(self) -> Dict:
225
+ """Test external integrations"""
226
+ tests = []
227
+
228
+ # Test 1: Groq API connectivity (via chat)
229
+ start = time.time()
230
+ try:
231
+ async with aiohttp.ClientSession() as session:
232
+ async with session.post(
233
+ f"{self.base_url}/chat",
234
+ data={"message": "ping"},
235
+ timeout=15
236
+ ) as response:
237
+ success = response.status == 200
238
+ latency = time.time() - start
239
+ data = await response.json() if success else {}
240
+
241
+ tests.append({
242
+ "name": "Groq API Integration",
243
+ "status": "PASS" if success else "FAIL",
244
+ "latency_ms": round(latency * 1000, 2),
245
+ "response_contains": "response" in data if data else False,
246
+ "message": "Groq API connected successfully" if success else "Groq API connection failed"
247
+ })
248
+ except Exception as e:
249
+ tests.append({
250
+ "name": "Groq API Integration",
251
+ "status": "FAIL",
252
+ "latency_ms": 0,
253
+ "error": str(e),
254
+ "message": f"Groq API test failed: {e}"
255
+ })
256
+
257
+ # Test 2: TiDB connectivity (via reset endpoint)
258
+ try:
259
+ async with aiohttp.ClientSession() as session:
260
+ async with session.post(f"{self.base_url}/reset", timeout=10) as response:
261
+ data = await response.json()
262
+ tests.append({
263
+ "name": "TiDB Database",
264
+ "status": "PASS" if "message" in data else "WARN",
265
+ "message": data.get("message", "TiDB check completed"),
266
+ "details": "Database connectivity verified" if "message" in data else "Database status unknown"
267
+ })
268
+ except Exception as e:
269
+ tests.append({
270
+ "name": "TiDB Database",
271
+ "status": "FAIL",
272
+ "error": str(e),
273
+ "message": f"TiDB check failed: {e}"
274
+ })
275
+
276
+ return {
277
+ "integrations_tested": len(tests),
278
+ "working": sum(1 for t in tests if t["status"] == "PASS"),
279
+ "tests": tests
280
+ }
281
+
282
+ async def _test_security(self) -> Dict:
283
+ """Basic security tests"""
284
+ tests = []
285
+
286
+ # Test 1: CORS headers (if applicable)
287
+ try:
288
+ async with aiohttp.ClientSession() as session:
289
+ async with session.options(f"{self.base_url}/", timeout=5) as response:
290
+ has_cors = "Access-Control-Allow-Origin" in response.headers
291
+ tests.append({
292
+ "name": "CORS Headers",
293
+ "status": "PASS" if has_cors else "INFO",
294
+ "message": "CORS headers present" if has_cors else "No CORS headers (may be intentional)"
295
+ })
296
+ except:
297
+ tests.append({
298
+ "name": "CORS Headers",
299
+ "status": "INFO",
300
+ "message": "CORS check skipped (endpoint may not support OPTIONS)"
301
+ })
302
+
303
+ # Test 2: HTTPS enforcement (for production)
304
+ tests.append({
305
+ "name": "HTTPS Recommendation",
306
+ "status": "INFO",
307
+ "message": "Consider HTTPS for production deployment",
308
+ "recommendation": "Enable HTTPS for secure communication"
309
+ })
310
+
311
+ # Test 3: Sensitive data exposure
312
+ try:
313
+ async with aiohttp.ClientSession() as session:
314
+ async with session.get(f"{self.base_url}/system/status", timeout=10) as response:
315
+ data = await response.json()
316
+ has_sensitive = any(key in str(data).lower() for key in ["key", "password", "secret", "token"])
317
+ tests.append({
318
+ "name": "Sensitive Data Exposure",
319
+ "status": "PASS" if not has_sensitive else "WARN",
320
+ "message": "No sensitive data detected in public endpoints" if not has_sensitive else "Potential sensitive data in responses"
321
+ })
322
+ except:
323
+ tests.append({
324
+ "name": "Sensitive Data Exposure",
325
+ "status": "INFO",
326
+ "message": "Sensitive data check inconclusive"
327
+ })
328
+
329
+ return {
330
+ "security_tests": len(tests),
331
+ "tests": tests,
332
+ "recommendations": [t for t in tests if t["status"] in ["WARN", "INFO"]]
333
+ }
334
+
335
+ def _calculate_performance_score(self, tests: List[Dict]) -> int:
336
+ """Calculate performance score 0-100"""
337
+ score = 100
338
+
339
+ for test in tests:
340
+ if test["status"] == "FAIL":
341
+ score -= 30
342
+ elif test["status"] == "WARN":
343
+ score -= 15
344
+
345
+ return max(0, min(100, score))
346
+
347
+ def _generate_performance_recommendations(self, tests: List[Dict]) -> List[str]:
348
+ """Generate performance recommendations"""
349
+ recommendations = []
350
+
351
+ for test in tests:
352
+ if test["status"] == "FAIL":
353
+ recommendations.append(f"🚨 CRITICAL: {test['name']} failed - {test.get('message', '')}")
354
+ elif test["status"] == "WARN":
355
+ recommendations.append(f"⚠️ WARNING: {test['name']} needs attention - {test.get('message', '')}")
356
+
357
+ if not recommendations:
358
+ recommendations.append("✅ All performance tests passing")
359
+
360
+ return recommendations
361
+
362
+ def _calculate_summary(self, test_results: Dict) -> Dict:
363
+ """Calculate overall test summary"""
364
+ total_tests = 0
365
+ passed_tests = 0
366
+ failed_tests = 0
367
+
368
+ for category in test_results.values():
369
+ if "passed" in category:
370
+ total_tests += category.get("total_tests", 0)
371
+ passed_tests += category.get("passed", 0)
372
+ failed_tests += category.get("failed", 0)
373
+
374
+ score = 0
375
+ if total_tests > 0:
376
+ score = round((passed_tests / total_tests) * 100, 2)
377
+
378
+ return {
379
+ "total_tests": total_tests,
380
+ "passed": passed_tests,
381
+ "failed": failed_tests,
382
+ "score": score,
383
+ "status": "HEALTHY" if score >= 90 else "DEGRADED" if score >= 70 else "CRITICAL"
384
+ }
385
+
386
+ async def run_specific_test(self, test_name: str) -> Dict:
387
+ """Run a specific test by name"""
388
+ test_methods = {
389
+ "connectivity": self._test_connectivity,
390
+ "endpoints": self._test_endpoints,
391
+ "performance": self._test_performance,
392
+ "integration": self._test_integrations,
393
+ "security": self._test_security
394
+ }
395
+
396
+ if test_name in test_methods:
397
+ return await test_methods[test_name]()
398
+
399
+ return {"error": f"Test '{test_name}' not found"}
400
+
401
+ # Test Report Generator
402
+ class TestReportGenerator:
403
+ """Generate human-readable test reports"""
404
+
405
+ @staticmethod
406
+ def generate_html_report(test_results: Dict) -> str:
407
+ """Generate HTML test report"""
408
+ return f"""
409
+ <!DOCTYPE html>
410
+ <html>
411
+ <head>
412
+ <title>AumCore AI Test Report</title>
413
+ <style>
414
+ body {{ font-family: Arial, sans-serif; margin: 20px; }}
415
+ .summary {{ background: #f5f5f5; padding: 20px; border-radius: 10px; }}
416
+ .healthy {{ color: green; }}
417
+ .degraded {{ color: orange; }}
418
+ .critical {{ color: red; }}
419
+ .test-result {{ margin: 10px 0; padding: 10px; border-left: 4px solid; }}
420
+ .pass {{ border-color: green; background: #e8f5e9; }}
421
+ .fail {{ border-color: red; background: #ffebee; }}
422
+ .warn {{ border-color: orange; background: #fff3e0; }}
423
+ table {{ width: 100%; border-collapse: collapse; }}
424
+ th, td {{ padding: 10px; text-align: left; border-bottom: 1px solid #ddd; }}
425
+ </style>
426
+ </head>
427
+ <body>
428
+ <h1>🧪 AumCore AI Test Report</h1>
429
+ <div class="summary">
430
+ <h2>Summary</h2>
431
+ <p><strong>Overall Score:</strong> <span class="{test_results['summary']['status'].lower()}">{test_results['summary']['score']}/100 ({test_results['summary']['status']})</span></p>
432
+ <p><strong>Tests Run:</strong> {test_results['summary']['total_tests']}</p>
433
+ <p><strong>Passed:</strong> {test_results['summary']['passed']}</p>
434
+ <p><strong>Failed:</strong> {test_results['summary']['failed']}</p>
435
+ <p><strong>Timestamp:</strong> {test_results['timestamp']}</p>
436
+ </div>
437
+ <h2>Detailed Results</h2>
438
+ {TestReportGenerator._generate_category_html(test_results['tests'])}
439
+ </body>
440
+ </html>
441
+ """
442
+
443
+ @staticmethod
444
+ def _generate_category_html(categories: Dict) -> str:
445
+ html = ""
446
+ for category_name, category_data in categories.items():
447
+ html += f"<h3>{category_name.title()}</h3>"
448
+ if 'tests' in category_data:
449
+ for test in category_data['tests']:
450
+ status_class = test['status'].lower()
451
+ html += f"""
452
+ <div class="test-result {status_class}">
453
+ <strong>{test['name']}</strong> - {test['status']}<br>
454
+ <small>{test.get('message', '')}</small>
455
+ </div>
456
+ """
457
+ return html
458
+
459
+ # Async function to run tests
460
+ async def run_automated_tests(base_url: str = None) -> Dict:
461
+ """Main function to run automated tests"""
462
+ if base_url is None:
463
+ # Try to detect the base URL
464
+ import os
465
+ base_url = os.environ.get("AUMCORE_BASE_URL", "http://localhost:7860")
466
+
467
+ runner = AumCoreTestRunner(base_url)
468
+ return await runner.run_full_test_suite()
469
+
470
+ # Command line interface
471
+ if __name__ == "__main__":
472
+ import sys
473
+ import asyncio
474
+
475
+ async def main():
476
+ if len(sys.argv) > 1:
477
+ base_url = sys.argv[1]
478
+ else:
479
+ base_url = None
480
+
481
+ print("🚀 Starting AumCore AI Automated Test Suite...")
482
+ results = await run_automated_tests(base_url)
483
+
484
+ # Print summary
485
+ print(f"\n📊 TEST SUMMARY")
486
+ print(f"Score: {results['summary']['score']}/100 ({results['summary']['status']})")
487
+ print(f"Tests Run: {results['summary']['total_tests']}")
488
+ print(f"Passed: {results['summary']['passed']}")
489
+ print(f"Failed: {results['summary']['failed']}")
490
+
491
+ # Save results to file
492
+ with open("test_results.json", "w") as f:
493
+ json.dump(results, f, indent=2)
494
+
495
+ # Generate HTML report
496
+ html_report = TestReportGenerator.generate_html_report(results)
497
+ with open("test_report.html", "w") as f:
498
+ f.write(html_report)
499
+
500
+ print(f"\n📁 Results saved to: test_results.json")
501
+ print(f"📄 HTML report: test_report.html")
502
+
503
+ # Exit with appropriate code
504
+ if results['summary']['status'] == 'CRITICAL':
505
+ sys.exit(1)
506
+ elif results['summary']['status'] == 'DEGRADED':
507
+ sys.exit(2)
508
+ else:
509
+ sys.exit(0)
510
+
511
+ asyncio.run(main())