File size: 10,341 Bytes
1a9e2c2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 | #!/usr/bin/env python3
"""
Grok2API 并发性能测试脚本
测试不同并发级别下的API性能表现
"""
import asyncio
import aiohttp
import time
import statistics
import argparse
from datetime import datetime
from typing import List, Dict, Any
import json
class ConcurrencyTester:
"""并发测试器"""
def __init__(self, base_url: str, api_key: str = None):
self.base_url = base_url.rstrip('/')
self.api_key = api_key
self.results: List[Dict[str, Any]] = []
async def test_request(self, session: aiohttp.ClientSession, request_id: int) -> Dict[str, Any]:
"""发送单个测试请求"""
url = f"{self.base_url}/v1/chat/completions"
headers = {
"Content-Type": "application/json"
}
if self.api_key:
headers["Authorization"] = f"Bearer {self.api_key}"
payload = {
"model": "grok-3-fast",
"messages": [
{"role": "user", "content": f"测试请求 #{request_id},请简短回复OK"}
],
"stream": False,
"max_tokens": 10
}
start_time = time.time()
try:
async with session.post(url, json=payload, headers=headers, timeout=30) as response:
status = response.status
if status == 200:
data = await response.json()
elapsed = time.time() - start_time
return {
"id": request_id,
"status": "success",
"http_status": status,
"elapsed": elapsed,
"response_length": len(json.dumps(data))
}
else:
elapsed = time.time() - start_time
error_text = await response.text()
return {
"id": request_id,
"status": "error",
"http_status": status,
"elapsed": elapsed,
"error": error_text[:200]
}
except asyncio.TimeoutError:
elapsed = time.time() - start_time
return {
"id": request_id,
"status": "timeout",
"elapsed": elapsed,
"error": "Request timeout"
}
except Exception as e:
elapsed = time.time() - start_time
return {
"id": request_id,
"status": "exception",
"elapsed": elapsed,
"error": str(e)
}
async def run_concurrent_test(self, concurrency: int, total_requests: int):
"""运行并发测试"""
print(f"\n{'='*60}")
print(f"📊 测试配置:并发数 {concurrency}, 总请求数 {total_requests}")
print(f"{'='*60}")
connector = aiohttp.TCPConnector(limit=concurrency, limit_per_host=concurrency)
timeout = aiohttp.ClientTimeout(total=60)
async with aiohttp.ClientSession(connector=connector, timeout=timeout) as session:
# 预热
print("🔥 预热中...")
await self.test_request(session, 0)
# 开始测试
print(f"🚀 开始并发测试...")
start_time = time.time()
# 创建任务
tasks = []
for i in range(1, total_requests + 1):
task = asyncio.create_task(self.test_request(session, i))
tasks.append(task)
# 控制并发数
if len(tasks) >= concurrency:
results = await asyncio.gather(*tasks)
self.results.extend(results)
tasks = []
# 显示进度
print(f" 进度: {i}/{total_requests} ({i/total_requests*100:.1f}%)", end='\r')
# 处理剩余任务
if tasks:
results = await asyncio.gather(*tasks)
self.results.extend(results)
total_time = time.time() - start_time
# 统计和输出
self.print_statistics(concurrency, total_requests, total_time)
def print_statistics(self, concurrency: int, total_requests: int, total_time: float):
"""打印统计信息"""
success_results = [r for r in self.results if r["status"] == "success"]
error_results = [r for r in self.results if r["status"] != "success"]
success_count = len(success_results)
error_count = len(error_results)
if success_results:
latencies = [r["elapsed"] for r in success_results]
avg_latency = statistics.mean(latencies)
min_latency = min(latencies)
max_latency = max(latencies)
p50_latency = statistics.median(latencies)
p95_latency = sorted(latencies)[int(len(latencies) * 0.95)] if len(latencies) > 1 else latencies[0]
p99_latency = sorted(latencies)[int(len(latencies) * 0.99)] if len(latencies) > 1 else latencies[0]
else:
avg_latency = min_latency = max_latency = p50_latency = p95_latency = p99_latency = 0
throughput = total_requests / total_time if total_time > 0 else 0
print(f"\n\n{'='*60}")
print(f"📈 测试结果统计")
print(f"{'='*60}")
print(f" 测试时间: {total_time:.2f}s")
print(f" 总请求数: {total_requests}")
print(f" 并发数: {concurrency}")
print(f"")
print(f" 成功请求: {success_count} ({success_count/total_requests*100:.1f}%)")
print(f" 失败请求: {error_count} ({error_count/total_requests*100:.1f}%)")
print(f"")
print(f" 吞吐量: {throughput:.2f} req/s")
print(f"")
print(f" 延迟统计:")
print(f" 最小: {min_latency*1000:.0f}ms")
print(f" 平均: {avg_latency*1000:.0f}ms")
print(f" 最大: {max_latency*1000:.0f}ms")
print(f" P50: {p50_latency*1000:.0f}ms")
print(f" P95: {p95_latency*1000:.0f}ms")
print(f" P99: {p99_latency*1000:.0f}ms")
# 错误详情
if error_results:
print(f"\n ⚠️ 错误详情:")
error_types = {}
for r in error_results:
error_type = r.get("status", "unknown")
error_types[error_type] = error_types.get(error_type, 0) + 1
for error_type, count in error_types.items():
print(f" {error_type}: {count}")
print(f"{'='*60}\n")
# 性能评级
self.print_performance_rating(throughput, avg_latency)
def print_performance_rating(self, throughput: float, avg_latency: float):
"""打印性能评级"""
print(f"🎯 性能评级:")
# 吞吐量评级
if throughput >= 100:
rating = "⭐⭐⭐⭐⭐ 优秀"
elif throughput >= 60:
rating = "⭐⭐⭐⭐ 良好"
elif throughput >= 30:
rating = "⭐⭐⭐ 中等"
elif throughput >= 10:
rating = "⭐⭐ 较低"
else:
rating = "⭐ 需优化"
print(f" 吞吐量 ({throughput:.1f} req/s): {rating}")
# 延迟评级
if avg_latency < 0.5:
rating = "⭐⭐⭐⭐⭐ 优秀"
elif avg_latency < 1.0:
rating = "⭐⭐⭐⭐ 良好"
elif avg_latency < 2.0:
rating = "⭐⭐⭐ 中等"
elif avg_latency < 5.0:
rating = "⭐⭐ 较高"
else:
rating = "⭐ 需优化"
print(f" 平均延迟 ({avg_latency*1000:.0f}ms): {rating}")
print()
async def main():
"""主函数"""
parser = argparse.ArgumentParser(description='Grok2API 并发性能测试')
parser.add_argument('--url', default='http://localhost:8000', help='API 基础URL')
parser.add_argument('--key', default='', help='API Key(可选)')
parser.add_argument('-c', '--concurrency', type=int, default=10, help='并发数')
parser.add_argument('-n', '--requests', type=int, default=50, help='总请求数')
parser.add_argument('--multi-test', action='store_true', help='运行多级并发测试')
args = parser.parse_args()
print(f"""
╔══════════════════════════════════════════════════════════╗
║ Grok2API 并发性能测试工具 ║
╚══════════════════════════════════════════════════════════╝
🔗 测试目标: {args.url}
🔑 API Key: {'已设置' if args.key else '未设置'}
⏰ 开始时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
""")
tester = ConcurrencyTester(args.url, args.key)
if args.multi_test:
# 多级并发测试
test_configs = [
(5, 20), # 5并发,20请求
(10, 50), # 10并发,50请求
(20, 100), # 20并发,100请求
(50, 200), # 50并发,200请求
]
for concurrency, requests in test_configs:
tester.results = [] # 清空结果
await tester.run_concurrent_test(concurrency, requests)
await asyncio.sleep(2) # 间隔2秒
else:
# 单次测试
await tester.run_concurrent_test(args.concurrency, args.requests)
print(f"\n✅ 测试完成!")
print(f"⏰ 结束时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
if __name__ == "__main__":
try:
asyncio.run(main())
except KeyboardInterrupt:
print("\n\n⚠️ 测试被用户中断")
except Exception as e:
print(f"\n\n❌ 测试失败: {e}")
|