Spaces:
Sleeping
Sleeping
Commit ·
0404756
1
Parent(s): 70c3ae8
space fetch
Browse files- .gitignore +5 -0
- Dockerfile +26 -0
- app.py +297 -0
- benchmarks/__init__.py +1 -0
- benchmarks/cpu_bench.py +361 -0
- benchmarks/cpu_ops.c +105 -0
- benchmarks/disk_bench.py +178 -0
- benchmarks/gpu_bench.py +255 -0
- benchmarks/memory_bench.py +331 -0
- benchmarks/memory_bench_c.c +137 -0
- benchmarks/system_info.py +196 -0
- requirements.txt +5 -0
- static/index.html +184 -0
- static/script.js +305 -0
- static/style.css +400 -0
.gitignore
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.venv/
|
| 2 |
+
__pycache__/
|
| 3 |
+
|
| 4 |
+
*.so
|
| 5 |
+
*.out
|
Dockerfile
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM archlinux:latest
|
| 2 |
+
|
| 3 |
+
# 安装系统依赖
|
| 4 |
+
RUN pacman -Syu --noconfirm && \
|
| 5 |
+
pacman -S --noconfirm \
|
| 6 |
+
python python-pip \
|
| 7 |
+
base-devel \
|
| 8 |
+
openssl \
|
| 9 |
+
pciutils \
|
| 10 |
+
&& pacman -Scc --noconfirm
|
| 11 |
+
|
| 12 |
+
# 创建工作目录
|
| 13 |
+
WORKDIR /app
|
| 14 |
+
|
| 15 |
+
# 复制并安装 Python 依赖
|
| 16 |
+
COPY requirements.txt .
|
| 17 |
+
RUN pip install --no-cache-dir -r requirements.txt --break-system-packages
|
| 18 |
+
|
| 19 |
+
# 复制应用代码
|
| 20 |
+
COPY . .
|
| 21 |
+
|
| 22 |
+
# 暴露端口
|
| 23 |
+
EXPOSE 7860
|
| 24 |
+
|
| 25 |
+
# 启动应用
|
| 26 |
+
CMD ["python", "app.py"]
|
app.py
ADDED
|
@@ -0,0 +1,297 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Space Fetch - System Performance Dashboard
|
| 3 |
+
FastAPI 主应用入口
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import asyncio
|
| 7 |
+
from contextlib import asynccontextmanager
|
| 8 |
+
from typing import Dict, Any, Optional
|
| 9 |
+
|
| 10 |
+
import uvicorn
|
| 11 |
+
from fastapi import FastAPI, BackgroundTasks
|
| 12 |
+
from fastapi.staticfiles import StaticFiles
|
| 13 |
+
from fastapi.responses import HTMLResponse, JSONResponse
|
| 14 |
+
from pydantic import BaseModel
|
| 15 |
+
|
| 16 |
+
from benchmarks.system_info import get_all_system_info, get_memory_info
|
| 17 |
+
from benchmarks.cpu_bench import run_all_cpu_benchmarks
|
| 18 |
+
from benchmarks.memory_bench import run_all_memory_benchmarks
|
| 19 |
+
from benchmarks.disk_bench import run_all_disk_benchmarks
|
| 20 |
+
from benchmarks.gpu_bench import run_all_gpu_benchmarks, check_gpu_available
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
# 缓存基准测试结果
|
| 24 |
+
benchmark_cache: Dict[str, Any] = {}
|
| 25 |
+
benchmark_running: Dict[str, bool] = {
|
| 26 |
+
"cpu": False,
|
| 27 |
+
"memory": False,
|
| 28 |
+
"disk": False,
|
| 29 |
+
"gpu": False,
|
| 30 |
+
"all": False,
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class BenchmarkStatus(BaseModel):
|
| 35 |
+
running: bool
|
| 36 |
+
results: Optional[Dict[str, Any]] = None
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
@asynccontextmanager
|
| 40 |
+
async def lifespan(app: FastAPI):
|
| 41 |
+
"""应用生命周期管理"""
|
| 42 |
+
# 启动时收集系统信息
|
| 43 |
+
benchmark_cache["system_info"] = get_all_system_info()
|
| 44 |
+
yield
|
| 45 |
+
# 关闭时清理
|
| 46 |
+
benchmark_cache.clear()
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
app = FastAPI(
|
| 50 |
+
title="Space Fetch",
|
| 51 |
+
description="System Performance Dashboard for Hugging Face Spaces",
|
| 52 |
+
version="1.0.0",
|
| 53 |
+
lifespan=lifespan,
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
# ==================== API 路由 ====================
|
| 58 |
+
|
| 59 |
+
@app.get("/api/system", response_class=JSONResponse)
|
| 60 |
+
async def get_system_info():
|
| 61 |
+
"""获取系统信息"""
|
| 62 |
+
if "system_info" not in benchmark_cache:
|
| 63 |
+
benchmark_cache["system_info"] = get_all_system_info()
|
| 64 |
+
return benchmark_cache["system_info"]
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
@app.get("/api/monitor/memory", response_class=JSONResponse)
|
| 68 |
+
async def get_realtime_memory_info():
|
| 69 |
+
"""获取实时内存信息"""
|
| 70 |
+
return get_memory_info()
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
@app.get("/api/benchmark/status", response_class=JSONResponse)
|
| 75 |
+
async def get_benchmark_status():
|
| 76 |
+
"""获取所有基准测试状态"""
|
| 77 |
+
return {
|
| 78 |
+
"running": benchmark_running,
|
| 79 |
+
"cached": {k: k in benchmark_cache for k in ["cpu", "memory", "disk", "gpu"]},
|
| 80 |
+
"gpu_available": check_gpu_available(),
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
async def run_cpu_benchmark_async():
|
| 85 |
+
"""异步运行 CPU 基准测试"""
|
| 86 |
+
benchmark_running["cpu"] = True
|
| 87 |
+
try:
|
| 88 |
+
# 在线程池中运行 CPU 密集型任务
|
| 89 |
+
loop = asyncio.get_event_loop()
|
| 90 |
+
result = await loop.run_in_executor(None, run_all_cpu_benchmarks)
|
| 91 |
+
benchmark_cache["cpu"] = result
|
| 92 |
+
finally:
|
| 93 |
+
benchmark_running["cpu"] = False
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
@app.post("/api/benchmark/cpu", response_class=JSONResponse)
|
| 97 |
+
async def start_cpu_benchmark(background_tasks: BackgroundTasks):
|
| 98 |
+
"""启动 CPU 基准测试"""
|
| 99 |
+
if benchmark_running["cpu"]:
|
| 100 |
+
return {"status": "already_running"}
|
| 101 |
+
|
| 102 |
+
background_tasks.add_task(run_cpu_benchmark_async)
|
| 103 |
+
return {"status": "started"}
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
@app.get("/api/benchmark/cpu", response_class=JSONResponse)
|
| 107 |
+
async def get_cpu_benchmark():
|
| 108 |
+
"""获取 CPU 基准测试结果"""
|
| 109 |
+
return BenchmarkStatus(
|
| 110 |
+
running=benchmark_running["cpu"],
|
| 111 |
+
results=benchmark_cache.get("cpu")
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
async def run_memory_benchmark_async():
|
| 116 |
+
"""异步运行内存基准测试"""
|
| 117 |
+
benchmark_running["memory"] = True
|
| 118 |
+
try:
|
| 119 |
+
loop = asyncio.get_event_loop()
|
| 120 |
+
result = await loop.run_in_executor(None, run_all_memory_benchmarks)
|
| 121 |
+
benchmark_cache["memory"] = result
|
| 122 |
+
finally:
|
| 123 |
+
benchmark_running["memory"] = False
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
@app.post("/api/benchmark/memory", response_class=JSONResponse)
|
| 127 |
+
async def start_memory_benchmark(background_tasks: BackgroundTasks):
|
| 128 |
+
"""启动内存基准测试"""
|
| 129 |
+
if benchmark_running["memory"]:
|
| 130 |
+
return {"status": "already_running"}
|
| 131 |
+
|
| 132 |
+
background_tasks.add_task(run_memory_benchmark_async)
|
| 133 |
+
return {"status": "started"}
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
@app.get("/api/benchmark/memory", response_class=JSONResponse)
|
| 137 |
+
async def get_memory_benchmark():
|
| 138 |
+
"""获取内存基准测试结果"""
|
| 139 |
+
return BenchmarkStatus(
|
| 140 |
+
running=benchmark_running["memory"],
|
| 141 |
+
results=benchmark_cache.get("memory")
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
async def run_disk_benchmark_async():
|
| 146 |
+
"""异步运行磁盘基准测试"""
|
| 147 |
+
benchmark_running["disk"] = True
|
| 148 |
+
try:
|
| 149 |
+
loop = asyncio.get_event_loop()
|
| 150 |
+
result = await loop.run_in_executor(None, run_all_disk_benchmarks)
|
| 151 |
+
benchmark_cache["disk"] = result
|
| 152 |
+
finally:
|
| 153 |
+
benchmark_running["disk"] = False
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
@app.post("/api/benchmark/disk", response_class=JSONResponse)
|
| 157 |
+
async def start_disk_benchmark(background_tasks: BackgroundTasks):
|
| 158 |
+
"""启动磁盘基准测试"""
|
| 159 |
+
if benchmark_running["disk"]:
|
| 160 |
+
return {"status": "already_running"}
|
| 161 |
+
|
| 162 |
+
background_tasks.add_task(run_disk_benchmark_async)
|
| 163 |
+
return {"status": "started"}
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
@app.get("/api/benchmark/disk", response_class=JSONResponse)
|
| 167 |
+
async def get_disk_benchmark():
|
| 168 |
+
"""获取磁��基准测试结果"""
|
| 169 |
+
return BenchmarkStatus(
|
| 170 |
+
running=benchmark_running["disk"],
|
| 171 |
+
results=benchmark_cache.get("disk")
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
async def run_gpu_benchmark_async():
|
| 176 |
+
"""异步运行 GPU 基准测试"""
|
| 177 |
+
benchmark_running["gpu"] = True
|
| 178 |
+
try:
|
| 179 |
+
loop = asyncio.get_event_loop()
|
| 180 |
+
result = await loop.run_in_executor(None, run_all_gpu_benchmarks)
|
| 181 |
+
benchmark_cache["gpu"] = result
|
| 182 |
+
finally:
|
| 183 |
+
benchmark_running["gpu"] = False
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
@app.post("/api/benchmark/gpu", response_class=JSONResponse)
|
| 187 |
+
async def start_gpu_benchmark(background_tasks: BackgroundTasks):
|
| 188 |
+
"""启动 GPU 基准测试"""
|
| 189 |
+
if not check_gpu_available():
|
| 190 |
+
return {"status": "no_gpu", "error": "No NVIDIA GPU detected"}
|
| 191 |
+
|
| 192 |
+
if benchmark_running["gpu"]:
|
| 193 |
+
return {"status": "already_running"}
|
| 194 |
+
|
| 195 |
+
background_tasks.add_task(run_gpu_benchmark_async)
|
| 196 |
+
return {"status": "started"}
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
@app.get("/api/benchmark/gpu", response_class=JSONResponse)
|
| 200 |
+
async def get_gpu_benchmark():
|
| 201 |
+
"""获取 GPU 基准测试结果"""
|
| 202 |
+
return BenchmarkStatus(
|
| 203 |
+
running=benchmark_running["gpu"],
|
| 204 |
+
results=benchmark_cache.get("gpu")
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
async def run_all_benchmarks_async():
|
| 209 |
+
"""异步运行所有基准测试"""
|
| 210 |
+
benchmark_running["all"] = True
|
| 211 |
+
try:
|
| 212 |
+
loop = asyncio.get_event_loop()
|
| 213 |
+
|
| 214 |
+
# CPU
|
| 215 |
+
benchmark_running["cpu"] = True
|
| 216 |
+
benchmark_cache["cpu"] = await loop.run_in_executor(None, run_all_cpu_benchmarks)
|
| 217 |
+
benchmark_running["cpu"] = False
|
| 218 |
+
|
| 219 |
+
# Memory
|
| 220 |
+
benchmark_running["memory"] = True
|
| 221 |
+
benchmark_cache["memory"] = await loop.run_in_executor(None, run_all_memory_benchmarks)
|
| 222 |
+
benchmark_running["memory"] = False
|
| 223 |
+
|
| 224 |
+
# Disk
|
| 225 |
+
benchmark_running["disk"] = True
|
| 226 |
+
benchmark_cache["disk"] = await loop.run_in_executor(None, run_all_disk_benchmarks)
|
| 227 |
+
benchmark_running["disk"] = False
|
| 228 |
+
|
| 229 |
+
# GPU (if available)
|
| 230 |
+
if check_gpu_available():
|
| 231 |
+
benchmark_running["gpu"] = True
|
| 232 |
+
benchmark_cache["gpu"] = await loop.run_in_executor(None, run_all_gpu_benchmarks)
|
| 233 |
+
benchmark_running["gpu"] = False
|
| 234 |
+
|
| 235 |
+
finally:
|
| 236 |
+
benchmark_running["all"] = False
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
@app.post("/api/benchmark/all", response_class=JSONResponse)
|
| 240 |
+
async def start_all_benchmarks(background_tasks: BackgroundTasks):
|
| 241 |
+
"""启动所有基准测试"""
|
| 242 |
+
if benchmark_running["all"]:
|
| 243 |
+
return {"status": "already_running"}
|
| 244 |
+
|
| 245 |
+
background_tasks.add_task(run_all_benchmarks_async)
|
| 246 |
+
return {"status": "started"}
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
@app.get("/api/benchmark/all", response_class=JSONResponse)
|
| 250 |
+
async def get_all_benchmarks():
|
| 251 |
+
"""获取所有基准测试结果"""
|
| 252 |
+
return {
|
| 253 |
+
"running": benchmark_running["all"],
|
| 254 |
+
"cpu": benchmark_cache.get("cpu"),
|
| 255 |
+
"memory": benchmark_cache.get("memory"),
|
| 256 |
+
"disk": benchmark_cache.get("disk"),
|
| 257 |
+
"gpu": benchmark_cache.get("gpu"),
|
| 258 |
+
}
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
@app.get("/api/export", response_class=JSONResponse)
|
| 262 |
+
async def export_results():
|
| 263 |
+
"""导出所有结果为 JSON"""
|
| 264 |
+
return {
|
| 265 |
+
"system_info": benchmark_cache.get("system_info"),
|
| 266 |
+
"benchmarks": {
|
| 267 |
+
"cpu": benchmark_cache.get("cpu"),
|
| 268 |
+
"memory": benchmark_cache.get("memory"),
|
| 269 |
+
"disk": benchmark_cache.get("disk"),
|
| 270 |
+
"gpu": benchmark_cache.get("gpu"),
|
| 271 |
+
}
|
| 272 |
+
}
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
# ==================== 静态文件服务 ====================
|
| 276 |
+
|
| 277 |
+
# 挂载静态文件目录
|
| 278 |
+
app.mount("/static", StaticFiles(directory="static"), name="static")
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
@app.get("/", response_class=HTMLResponse)
|
| 282 |
+
async def root():
|
| 283 |
+
"""返回主页"""
|
| 284 |
+
with open("static/index.html", "r", encoding="utf-8") as f:
|
| 285 |
+
return HTMLResponse(content=f.read())
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
# ==================== 启动 ====================
|
| 289 |
+
|
| 290 |
+
if __name__ == "__main__":
|
| 291 |
+
uvicorn.run(
|
| 292 |
+
"app:app",
|
| 293 |
+
host="0.0.0.0",
|
| 294 |
+
port=7860,
|
| 295 |
+
reload=False,
|
| 296 |
+
workers=1,
|
| 297 |
+
)
|
benchmarks/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
# Benchmark modules
|
benchmarks/cpu_bench.py
ADDED
|
@@ -0,0 +1,361 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
CPU Benchmark Module
|
| 3 |
+
Optimized with native C library compilation for maximum performance
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import time
|
| 7 |
+
import os
|
| 8 |
+
import ctypes
|
| 9 |
+
import hashlib
|
| 10 |
+
import multiprocessing
|
| 11 |
+
import subprocess
|
| 12 |
+
import zlib
|
| 13 |
+
from concurrent.futures import ProcessPoolExecutor
|
| 14 |
+
from typing import Dict, Any, Optional
|
| 15 |
+
|
| 16 |
+
# Path to the C source and compiled library
|
| 17 |
+
CPU_OPS_SRC = os.path.join(os.path.dirname(__file__), "cpu_ops.c")
|
| 18 |
+
CPU_OPS_LIB = os.path.join(os.path.dirname(__file__), "cpu_ops.so")
|
| 19 |
+
|
| 20 |
+
# Global reference to the Loaded Library
|
| 21 |
+
_lib = None
|
| 22 |
+
|
| 23 |
+
def compile_and_load_lib() -> Optional[ctypes.CDLL]:
|
| 24 |
+
"""Compile and load the C library"""
|
| 25 |
+
global _lib
|
| 26 |
+
if _lib is not None:
|
| 27 |
+
return _lib
|
| 28 |
+
|
| 29 |
+
try:
|
| 30 |
+
# 1. Try to load existing library first (Avoid race condition in workers)
|
| 31 |
+
if os.path.exists(CPU_OPS_LIB):
|
| 32 |
+
try:
|
| 33 |
+
lib = ctypes.CDLL(CPU_OPS_LIB)
|
| 34 |
+
_init_lib_signatures(lib)
|
| 35 |
+
_lib = lib
|
| 36 |
+
return lib
|
| 37 |
+
except OSError:
|
| 38 |
+
# File might be corrupted or empty, proceed to compile
|
| 39 |
+
pass
|
| 40 |
+
|
| 41 |
+
# 2. Check if source exists
|
| 42 |
+
if not os.path.exists(CPU_OPS_SRC):
|
| 43 |
+
print(f"Error: {CPU_OPS_SRC} not found")
|
| 44 |
+
return None
|
| 45 |
+
|
| 46 |
+
# 3. Compile (Only if loading failed or didn't exist)
|
| 47 |
+
# gcc -shared -o cpu_ops.so -fPIC -O3 cpu_ops.c -lm
|
| 48 |
+
cmd = [
|
| 49 |
+
"gcc", "-shared", "-o", CPU_OPS_LIB,
|
| 50 |
+
"-fPIC", "-O3", CPU_OPS_SRC, "-lm"
|
| 51 |
+
]
|
| 52 |
+
|
| 53 |
+
# Run compilation
|
| 54 |
+
result = subprocess.run(cmd, capture_output=True, text=True)
|
| 55 |
+
if result.returncode != 0:
|
| 56 |
+
print(f"Compilation failed: {result.stderr}")
|
| 57 |
+
return None
|
| 58 |
+
|
| 59 |
+
# 4. Load library
|
| 60 |
+
lib = ctypes.CDLL(CPU_OPS_LIB)
|
| 61 |
+
_init_lib_signatures(lib)
|
| 62 |
+
|
| 63 |
+
_lib = lib
|
| 64 |
+
return lib
|
| 65 |
+
except Exception as e:
|
| 66 |
+
print(f"Failed to load native lib: {e}")
|
| 67 |
+
try:
|
| 68 |
+
# Try to cleanup bad file if it exists
|
| 69 |
+
if os.path.exists(CPU_OPS_LIB):
|
| 70 |
+
os.remove(CPU_OPS_LIB)
|
| 71 |
+
except:
|
| 72 |
+
pass
|
| 73 |
+
return None
|
| 74 |
+
|
| 75 |
+
def _init_lib_signatures(lib):
|
| 76 |
+
"""Initialize function signatures"""
|
| 77 |
+
try:
|
| 78 |
+
lib.benchmark_integer_time.argtypes = [ctypes.c_double]
|
| 79 |
+
lib.benchmark_integer_time.restype = ctypes.c_uint64
|
| 80 |
+
|
| 81 |
+
lib.benchmark_float_time.argtypes = [ctypes.c_double]
|
| 82 |
+
lib.benchmark_float_time.restype = ctypes.c_uint64
|
| 83 |
+
except AttributeError:
|
| 84 |
+
pass
|
| 85 |
+
|
| 86 |
+
def _native_worker_integer(duration: float) -> int:
|
| 87 |
+
"""Worker for integer benchmark using native C"""
|
| 88 |
+
lib = compile_and_load_lib()
|
| 89 |
+
if lib:
|
| 90 |
+
return lib.benchmark_integer_time(duration)
|
| 91 |
+
return 0
|
| 92 |
+
|
| 93 |
+
def _native_worker_float(duration: float) -> int:
|
| 94 |
+
"""Worker for float benchmark using native C"""
|
| 95 |
+
lib = compile_and_load_lib()
|
| 96 |
+
if lib:
|
| 97 |
+
return lib.benchmark_float_time(duration)
|
| 98 |
+
return 0
|
| 99 |
+
|
| 100 |
+
# ---------------------------------------------------------------------------
|
| 101 |
+
# Python Fallbacks (Legacy)
|
| 102 |
+
# ---------------------------------------------------------------------------
|
| 103 |
+
def _is_prime(n: int) -> bool:
|
| 104 |
+
if n < 2: return False
|
| 105 |
+
if n == 2: return True
|
| 106 |
+
if n % 2 == 0: return False
|
| 107 |
+
for i in range(3, int(n**0.5) + 1, 2):
|
| 108 |
+
if n % i == 0: return False
|
| 109 |
+
return True
|
| 110 |
+
|
| 111 |
+
def _python_single_core_integer(duration: float) -> int:
|
| 112 |
+
start = time.time()
|
| 113 |
+
n = 3
|
| 114 |
+
ops = 0
|
| 115 |
+
while time.time() - start < duration:
|
| 116 |
+
if _is_prime(n): pass
|
| 117 |
+
n += 1
|
| 118 |
+
ops += 1
|
| 119 |
+
return ops
|
| 120 |
+
|
| 121 |
+
# ---------------------------------------------------------------------------
|
| 122 |
+
# Benchmarks
|
| 123 |
+
# ---------------------------------------------------------------------------
|
| 124 |
+
|
| 125 |
+
def benchmark_single_core_integer(duration: float = 2.0) -> Dict[str, Any]:
|
| 126 |
+
"""单核整数运算测试 (Native C)"""
|
| 127 |
+
# 尝试加载 C 库
|
| 128 |
+
lib = compile_and_load_lib()
|
| 129 |
+
|
| 130 |
+
if lib:
|
| 131 |
+
# Native Run
|
| 132 |
+
start_time = time.time()
|
| 133 |
+
operations = lib.benchmark_integer_time(duration)
|
| 134 |
+
elapsed = time.time() - start_time # Should be close to duration
|
| 135 |
+
|
| 136 |
+
# Fix elapsed if it differs significantly (C function returns strictly after duration)
|
| 137 |
+
if elapsed < duration: elapsed = duration
|
| 138 |
+
|
| 139 |
+
desc = "Prime calculation (Native C)"
|
| 140 |
+
else:
|
| 141 |
+
# Fallback
|
| 142 |
+
start_time = time.time()
|
| 143 |
+
operations = _python_single_core_integer(duration)
|
| 144 |
+
elapsed = time.time() - start_time
|
| 145 |
+
desc = "Prime calculation (Python Fallback)"
|
| 146 |
+
|
| 147 |
+
ops_per_sec = operations / elapsed if elapsed > 0 else 0
|
| 148 |
+
|
| 149 |
+
return {
|
| 150 |
+
"test": "single_core_integer",
|
| 151 |
+
"description": desc,
|
| 152 |
+
"duration_seconds": round(elapsed, 3),
|
| 153 |
+
"operations": operations,
|
| 154 |
+
"ops_per_second": round(ops_per_sec, 2),
|
| 155 |
+
"score": round(ops_per_sec / 100000, 2), # Adjusted score scaling for C speed
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
def benchmark_multi_core_integer(duration: float = 2.0) -> Dict[str, Any]:
|
| 160 |
+
"""多核整数运算测试 (Native C, Parallel)"""
|
| 161 |
+
num_cores = multiprocessing.cpu_count()
|
| 162 |
+
|
| 163 |
+
# Check if native available
|
| 164 |
+
lib = compile_and_load_lib()
|
| 165 |
+
use_native = (lib is not None)
|
| 166 |
+
|
| 167 |
+
start_time = time.time()
|
| 168 |
+
|
| 169 |
+
with ProcessPoolExecutor(max_workers=num_cores) as executor:
|
| 170 |
+
if use_native:
|
| 171 |
+
futures = [executor.submit(_native_worker_integer, duration) for _ in range(num_cores)]
|
| 172 |
+
else:
|
| 173 |
+
# Simple python fallback wrapper
|
| 174 |
+
futures = [executor.submit(_python_single_core_integer, duration) for _ in range(num_cores)]
|
| 175 |
+
|
| 176 |
+
total_ops = sum(f.result() for f in futures)
|
| 177 |
+
|
| 178 |
+
elapsed = time.time() - start_time
|
| 179 |
+
# Parallel execution usually takes slightly longer than 'duration' due to overhead
|
| 180 |
+
if elapsed < duration: elapsed = duration
|
| 181 |
+
|
| 182 |
+
ops_per_sec = total_ops / elapsed
|
| 183 |
+
|
| 184 |
+
# Efficiency
|
| 185 |
+
single_core_perf = ops_per_sec / num_cores # Average per core
|
| 186 |
+
# We can't easily calc efficiency without a distinct single core run, but we can assume ideal
|
| 187 |
+
|
| 188 |
+
desc = f"Parallel Prime Calc ({num_cores} cores, {'Native C' if use_native else 'Python'})"
|
| 189 |
+
|
| 190 |
+
return {
|
| 191 |
+
"test": "multi_core_integer",
|
| 192 |
+
"description": desc,
|
| 193 |
+
"duration_seconds": round(elapsed, 3),
|
| 194 |
+
"cores_used": num_cores,
|
| 195 |
+
"operations": total_ops,
|
| 196 |
+
"ops_per_second": round(ops_per_sec, 2),
|
| 197 |
+
"score": round(ops_per_sec / 100000, 2),
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
def benchmark_single_core_float(duration: float = 2.0) -> Dict[str, Any]:
|
| 202 |
+
"""单核浮点运算测试 (Native C Math)"""
|
| 203 |
+
lib = compile_and_load_lib()
|
| 204 |
+
|
| 205 |
+
if lib:
|
| 206 |
+
start_time = time.time()
|
| 207 |
+
operations = lib.benchmark_float_time(duration)
|
| 208 |
+
elapsed = time.time() - start_time
|
| 209 |
+
desc = "Heavy Math (Native C: sin/cos/sqrt)"
|
| 210 |
+
else:
|
| 211 |
+
# Fallback to simple python
|
| 212 |
+
start_time = time.time()
|
| 213 |
+
# Very simple python float loop
|
| 214 |
+
a = 1.1
|
| 215 |
+
ops = 0
|
| 216 |
+
while time.time() - start_time < duration:
|
| 217 |
+
for _ in range(1000):
|
| 218 |
+
a = (a * 1.000001) + 0.000001
|
| 219 |
+
ops += 1
|
| 220 |
+
elapsed = time.time() - start_time
|
| 221 |
+
operations = ops
|
| 222 |
+
desc = "Float Loop (Python Fallback)"
|
| 223 |
+
|
| 224 |
+
ops_per_sec = operations / elapsed if elapsed > 0 else 0
|
| 225 |
+
|
| 226 |
+
# Estimate FLOPs
|
| 227 |
+
# Native C: sin/cos/sqrt mix -> approx 20 FLOPs per iteration
|
| 228 |
+
# Python: simple mul/add -> approx 2 FLOPs per iteration
|
| 229 |
+
flops_per_op = 20 if lib else 2
|
| 230 |
+
gflops = (ops_per_sec * flops_per_op) / 1e9
|
| 231 |
+
|
| 232 |
+
return {
|
| 233 |
+
"test": "single_core_float",
|
| 234 |
+
"description": desc,
|
| 235 |
+
"duration_seconds": round(elapsed, 3),
|
| 236 |
+
"operations": operations,
|
| 237 |
+
"ops_per_second": round(ops_per_sec, 2),
|
| 238 |
+
"gflops": round(gflops, 4),
|
| 239 |
+
"score": round(ops_per_sec / 100000, 2),
|
| 240 |
+
}
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
def benchmark_multi_core_float(duration: float = 2.0) -> Dict[str, Any]:
|
| 244 |
+
"""多核浮点运算测试 (Native C Math)"""
|
| 245 |
+
num_cores = multiprocessing.cpu_count()
|
| 246 |
+
lib = compile_and_load_lib()
|
| 247 |
+
use_native = (lib is not None)
|
| 248 |
+
|
| 249 |
+
start_time = time.time()
|
| 250 |
+
|
| 251 |
+
with ProcessPoolExecutor(max_workers=num_cores) as executor:
|
| 252 |
+
if use_native:
|
| 253 |
+
futures = [executor.submit(_native_worker_float, duration) for _ in range(num_cores)]
|
| 254 |
+
else:
|
| 255 |
+
# Just return 0 for python fallback multithread float to avoid freezing
|
| 256 |
+
return {"error": "Native lib required for multi-core float bench"}
|
| 257 |
+
|
| 258 |
+
total_ops = sum(f.result() for f in futures)
|
| 259 |
+
|
| 260 |
+
elapsed = time.time() - start_time
|
| 261 |
+
ops_per_sec = total_ops / elapsed if elapsed > 0 else 0
|
| 262 |
+
|
| 263 |
+
# Estimate FLOPs (Native C required)
|
| 264 |
+
flops_per_op = 20
|
| 265 |
+
gflops = (ops_per_sec * flops_per_op) / 1e9
|
| 266 |
+
|
| 267 |
+
return {
|
| 268 |
+
"test": "multi_core_float",
|
| 269 |
+
"description": f"Parallel Math ({num_cores} cores, Native C)",
|
| 270 |
+
"duration_seconds": round(elapsed, 3),
|
| 271 |
+
"cores_available": num_cores,
|
| 272 |
+
"operations": total_ops,
|
| 273 |
+
"ops_per_second": round(ops_per_sec, 2),
|
| 274 |
+
"gflops": round(gflops, 4),
|
| 275 |
+
"score": round(ops_per_sec / 100000, 2),
|
| 276 |
+
}
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
def benchmark_crypto(duration: float = 2.0) -> Dict[str, Any]:
|
| 280 |
+
"""加密性能测试 (OpenSSL via hashlib)"""
|
| 281 |
+
# hashlib calls C-level OpenSSL, so it is already valid "native" benchmark
|
| 282 |
+
data = b'x' * 1024 * 1024 # 1MB
|
| 283 |
+
start_time = time.time()
|
| 284 |
+
iterations = 0
|
| 285 |
+
|
| 286 |
+
while time.time() - start_time < duration:
|
| 287 |
+
hashlib.sha256(data).hexdigest()
|
| 288 |
+
iterations += 1
|
| 289 |
+
|
| 290 |
+
elapsed = time.time() - start_time
|
| 291 |
+
mb_per_sec = iterations / elapsed
|
| 292 |
+
|
| 293 |
+
return {
|
| 294 |
+
"test": "crypto_sha256",
|
| 295 |
+
"description": "SHA256 Hashing (OpenSSL)",
|
| 296 |
+
"duration_seconds": round(elapsed, 3),
|
| 297 |
+
"throughput_mb_per_sec": round(mb_per_sec, 2),
|
| 298 |
+
"score": round(mb_per_sec * 2, 2),
|
| 299 |
+
}
|
| 300 |
+
|
| 301 |
+
def benchmark_compression(duration: float = 2.0) -> Dict[str, Any]:
|
| 302 |
+
"""压缩性能测试 (zlib C library)"""
|
| 303 |
+
# zlib calls C-level library
|
| 304 |
+
chunk_size = 1024 * 1024
|
| 305 |
+
data = os.urandom(chunk_size)
|
| 306 |
+
start_time = time.time()
|
| 307 |
+
total_bytes = 0
|
| 308 |
+
|
| 309 |
+
while time.time() - start_time < duration:
|
| 310 |
+
c = zlib.compress(data, level=6)
|
| 311 |
+
_ = zlib.decompress(c)
|
| 312 |
+
total_bytes += chunk_size
|
| 313 |
+
|
| 314 |
+
elapsed = time.time() - start_time
|
| 315 |
+
mb_per_sec = (total_bytes / elapsed) / (1024 * 1024)
|
| 316 |
+
|
| 317 |
+
return {
|
| 318 |
+
"test": "compression_zlib",
|
| 319 |
+
"description": "Zlib Compression (Native)",
|
| 320 |
+
"duration_seconds": round(elapsed, 3),
|
| 321 |
+
"throughput_mb_per_sec": round(mb_per_sec, 2),
|
| 322 |
+
"score": round(mb_per_sec * 0.5, 2),
|
| 323 |
+
}
|
| 324 |
+
|
| 325 |
+
def benchmark_single_thread_stress(duration: float = 2.0) -> Dict[str, Any]:
|
| 326 |
+
"""单线程调度延迟 (System Call Stress)"""
|
| 327 |
+
# time.sleep calls nanosleep syscall, which is a good test for kernel scheduler
|
| 328 |
+
start_time = time.time()
|
| 329 |
+
iterations = 0
|
| 330 |
+
while time.time() - start_time < duration:
|
| 331 |
+
time.sleep(0.0001)
|
| 332 |
+
iterations += 1
|
| 333 |
+
elapsed = time.time() - start_time
|
| 334 |
+
|
| 335 |
+
return {
|
| 336 |
+
"test": "single_thread_stress",
|
| 337 |
+
"description": "Scheduler Stress (nanosleep syscall)",
|
| 338 |
+
"duration_seconds": round(elapsed, 3),
|
| 339 |
+
"wakeups_per_second": round(iterations / elapsed, 2),
|
| 340 |
+
"score": round(iterations / elapsed / 100, 2),
|
| 341 |
+
}
|
| 342 |
+
|
| 343 |
+
def run_all_cpu_benchmarks() -> Dict[str, Any]:
|
| 344 |
+
# Ensure lib is compiled once at start
|
| 345 |
+
compile_and_load_lib()
|
| 346 |
+
|
| 347 |
+
results = {
|
| 348 |
+
"single_core_integer": benchmark_single_core_integer(),
|
| 349 |
+
"multi_core_integer": benchmark_multi_core_integer(),
|
| 350 |
+
"single_core_float": benchmark_single_core_float(),
|
| 351 |
+
"multi_core_float": benchmark_multi_core_float(),
|
| 352 |
+
"crypto": benchmark_crypto(),
|
| 353 |
+
"compression": benchmark_compression(),
|
| 354 |
+
"stress": benchmark_single_thread_stress(),
|
| 355 |
+
}
|
| 356 |
+
|
| 357 |
+
# Recalculate total score
|
| 358 |
+
total_score = sum(r.get("score", 0) for r in results.values() if "score" in r)
|
| 359 |
+
results["total_score"] = round(total_score, 2)
|
| 360 |
+
|
| 361 |
+
return results
|
benchmarks/cpu_ops.c
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <math.h>
|
| 2 |
+
#include <time.h>
|
| 3 |
+
#include <stdint.h>
|
| 4 |
+
#include <stdlib.h>
|
| 5 |
+
|
| 6 |
+
// --------------------------------------------------------------------------
|
| 7 |
+
// Integer Benchmark: Prime Number Calculation
|
| 8 |
+
// --------------------------------------------------------------------------
|
| 9 |
+
|
| 10 |
+
// Simple unoptimized prime check for stress testing
|
| 11 |
+
int is_prime(uint64_t n) {
|
| 12 |
+
if (n < 2) return 0;
|
| 13 |
+
if (n == 2) return 1;
|
| 14 |
+
if (n % 2 == 0) return 0;
|
| 15 |
+
|
| 16 |
+
// Using sqrt loop
|
| 17 |
+
uint64_t limit = (uint64_t)sqrt(n);
|
| 18 |
+
for (uint64_t i = 3; i <= limit; i += 2) {
|
| 19 |
+
if (n % i == 0) return 0;
|
| 20 |
+
}
|
| 21 |
+
return 1;
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
// Check primes in a range. Returns number of primes found.
|
| 25 |
+
// Used for parallel execution validation, but for pure stress we can just run for time.
|
| 26 |
+
uint64_t count_primes_in_range(uint64_t start, uint64_t end) {
|
| 27 |
+
uint64_t count = 0;
|
| 28 |
+
for (uint64_t n = start; n < end; n++) {
|
| 29 |
+
if (is_prime(n)) {
|
| 30 |
+
count++;
|
| 31 |
+
}
|
| 32 |
+
}
|
| 33 |
+
return count;
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
// Run prime calculation for exactly `duration` seconds
|
| 37 |
+
// Returns total numbers checked
|
| 38 |
+
uint64_t benchmark_integer_time(double duration) {
|
| 39 |
+
struct timespec start_ts, current_ts;
|
| 40 |
+
clock_gettime(CLOCK_MONOTONIC, &start_ts);
|
| 41 |
+
|
| 42 |
+
uint64_t n = 3; // Start from 3
|
| 43 |
+
uint64_t ops = 0;
|
| 44 |
+
|
| 45 |
+
while (1) {
|
| 46 |
+
// Bulk check to reduce clock_gettime overhead
|
| 47 |
+
for (int i = 0; i < 1000; i++) {
|
| 48 |
+
is_prime(n++);
|
| 49 |
+
ops++;
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
clock_gettime(CLOCK_MONOTONIC, ¤t_ts);
|
| 53 |
+
double elapsed = (current_ts.tv_sec - start_ts.tv_sec) +
|
| 54 |
+
(current_ts.tv_nsec - start_ts.tv_nsec) * 1e-9;
|
| 55 |
+
|
| 56 |
+
if (elapsed >= duration) {
|
| 57 |
+
break;
|
| 58 |
+
}
|
| 59 |
+
}
|
| 60 |
+
return ops;
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
// --------------------------------------------------------------------------
|
| 64 |
+
// Single Core Stress: Context Switch Simulation (Busy Wait vs Sleep)
|
| 65 |
+
// --------------------------------------------------------------------------
|
| 66 |
+
// Actually, pure C sleep logic might be better than Python's sleep(0.0001) overhead.
|
| 67 |
+
// But context switching involves system calls.
|
| 68 |
+
// Let's implement a busy-loop or heavy computation stub.
|
| 69 |
+
|
| 70 |
+
// --------------------------------------------------------------------------
|
| 71 |
+
// Float Benchmark: Heavy Math
|
| 72 |
+
// --------------------------------------------------------------------------
|
| 73 |
+
|
| 74 |
+
// Perform heavy floating point ops: sin, cos, sqrt
|
| 75 |
+
uint64_t benchmark_float_time(double duration) {
|
| 76 |
+
struct timespec start_ts, current_ts;
|
| 77 |
+
clock_gettime(CLOCK_MONOTONIC, &start_ts);
|
| 78 |
+
|
| 79 |
+
double a = 1.1;
|
| 80 |
+
double b = 2.2;
|
| 81 |
+
double c = 3.3;
|
| 82 |
+
uint64_t ops = 0;
|
| 83 |
+
|
| 84 |
+
while (1) {
|
| 85 |
+
for (int i = 0; i < 1000; i++) {
|
| 86 |
+
a = sin(a) * cos(b) + sqrt(fabs(c));
|
| 87 |
+
b = cos(a) * sin(c) + sqrt(fabs(b));
|
| 88 |
+
c = a + b;
|
| 89 |
+
// Keep them bounded to avoid inf/nan issues affecting perf slightly
|
| 90 |
+
if (a > 1000.0) a = 1.1;
|
| 91 |
+
if (b > 1000.0) b = 2.2;
|
| 92 |
+
if (c > 1000.0) c = 3.3;
|
| 93 |
+
ops++;
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
clock_gettime(CLOCK_MONOTONIC, ¤t_ts);
|
| 97 |
+
double elapsed = (current_ts.tv_sec - start_ts.tv_sec) +
|
| 98 |
+
(current_ts.tv_nsec - start_ts.tv_nsec) * 1e-9;
|
| 99 |
+
|
| 100 |
+
if (elapsed >= duration) {
|
| 101 |
+
break;
|
| 102 |
+
}
|
| 103 |
+
}
|
| 104 |
+
return ops;
|
| 105 |
+
}
|
benchmarks/disk_bench.py
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Disk Benchmark Module
|
| 3 |
+
磁盘 I/O 性能测试:顺序读写、随机 IOPS
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
import time
|
| 8 |
+
import tempfile
|
| 9 |
+
import numpy as np
|
| 10 |
+
from typing import Dict, Any
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def benchmark_sequential_write(size_mb: int = 256, block_size_kb: int = 1024) -> Dict[str, Any]:
|
| 14 |
+
"""顺序写入测试"""
|
| 15 |
+
block_size = block_size_kb * 1024
|
| 16 |
+
total_size = size_mb * 1024 * 1024
|
| 17 |
+
num_blocks = total_size // block_size
|
| 18 |
+
|
| 19 |
+
# 创建随机数据块
|
| 20 |
+
data = os.urandom(block_size)
|
| 21 |
+
|
| 22 |
+
# 创建临时文件
|
| 23 |
+
with tempfile.NamedTemporaryFile(delete=True) as f:
|
| 24 |
+
start_time = time.time()
|
| 25 |
+
|
| 26 |
+
for _ in range(num_blocks):
|
| 27 |
+
f.write(data)
|
| 28 |
+
f.flush()
|
| 29 |
+
os.fsync(f.fileno())
|
| 30 |
+
|
| 31 |
+
elapsed = time.time() - start_time
|
| 32 |
+
|
| 33 |
+
throughput = size_mb / elapsed
|
| 34 |
+
|
| 35 |
+
return {
|
| 36 |
+
"test": "sequential_write",
|
| 37 |
+
"description": f"Sequential write ({size_mb}MB, {block_size_kb}KB blocks)",
|
| 38 |
+
"size_mb": size_mb,
|
| 39 |
+
"block_size_kb": block_size_kb,
|
| 40 |
+
"duration_seconds": round(elapsed, 3),
|
| 41 |
+
"throughput_mb_s": round(throughput, 2),
|
| 42 |
+
"score": round(throughput / 10, 2),
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def benchmark_sequential_read(size_mb: int = 256, block_size_kb: int = 1024) -> Dict[str, Any]:
|
| 47 |
+
"""顺序读取测试"""
|
| 48 |
+
block_size = block_size_kb * 1024
|
| 49 |
+
total_size = size_mb * 1024 * 1024
|
| 50 |
+
|
| 51 |
+
# 创建临时文件并写入数据
|
| 52 |
+
with tempfile.NamedTemporaryFile(delete=True) as f:
|
| 53 |
+
# 先写入数据
|
| 54 |
+
data = os.urandom(block_size)
|
| 55 |
+
num_blocks = total_size // block_size
|
| 56 |
+
for _ in range(num_blocks):
|
| 57 |
+
f.write(data)
|
| 58 |
+
f.flush()
|
| 59 |
+
os.fsync(f.fileno())
|
| 60 |
+
|
| 61 |
+
# 清除缓存(尝试)
|
| 62 |
+
try:
|
| 63 |
+
os.system('sync; echo 3 > /proc/sys/vm/drop_caches 2>/dev/null')
|
| 64 |
+
except:
|
| 65 |
+
pass
|
| 66 |
+
|
| 67 |
+
# 顺序读取
|
| 68 |
+
f.seek(0)
|
| 69 |
+
start_time = time.time()
|
| 70 |
+
|
| 71 |
+
while f.read(block_size):
|
| 72 |
+
pass
|
| 73 |
+
|
| 74 |
+
elapsed = time.time() - start_time
|
| 75 |
+
|
| 76 |
+
throughput = size_mb / elapsed
|
| 77 |
+
|
| 78 |
+
return {
|
| 79 |
+
"test": "sequential_read",
|
| 80 |
+
"description": f"Sequential read ({size_mb}MB, {block_size_kb}KB blocks)",
|
| 81 |
+
"size_mb": size_mb,
|
| 82 |
+
"block_size_kb": block_size_kb,
|
| 83 |
+
"duration_seconds": round(elapsed, 3),
|
| 84 |
+
"throughput_mb_s": round(throughput, 2),
|
| 85 |
+
"score": round(throughput / 10, 2),
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def benchmark_random_read_iops(duration: float = 3.0, block_size: int = 4096) -> Dict[str, Any]:
|
| 90 |
+
"""随机读取 IOPS 测试"""
|
| 91 |
+
file_size = 128 * 1024 * 1024 # 128MB 文件
|
| 92 |
+
|
| 93 |
+
with tempfile.NamedTemporaryFile(delete=True) as f:
|
| 94 |
+
# 创建文件
|
| 95 |
+
f.write(os.urandom(file_size))
|
| 96 |
+
f.flush()
|
| 97 |
+
os.fsync(f.fileno())
|
| 98 |
+
|
| 99 |
+
# 计算可用的偏移量
|
| 100 |
+
max_offset = file_size - block_size
|
| 101 |
+
|
| 102 |
+
# 随机读取测试
|
| 103 |
+
start_time = time.time()
|
| 104 |
+
operations = 0
|
| 105 |
+
|
| 106 |
+
while time.time() - start_time < duration:
|
| 107 |
+
offset = np.random.randint(0, max_offset)
|
| 108 |
+
f.seek(offset)
|
| 109 |
+
f.read(block_size)
|
| 110 |
+
operations += 1
|
| 111 |
+
|
| 112 |
+
elapsed = time.time() - start_time
|
| 113 |
+
|
| 114 |
+
iops = operations / elapsed
|
| 115 |
+
|
| 116 |
+
return {
|
| 117 |
+
"test": "random_read_iops",
|
| 118 |
+
"description": f"Random 4K read IOPS",
|
| 119 |
+
"block_size_bytes": block_size,
|
| 120 |
+
"duration_seconds": round(elapsed, 3),
|
| 121 |
+
"operations": operations,
|
| 122 |
+
"iops": round(iops, 2),
|
| 123 |
+
"score": round(iops / 100, 2),
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
def benchmark_random_write_iops(duration: float = 3.0, block_size: int = 4096) -> Dict[str, Any]:
|
| 128 |
+
"""随机写入 IOPS 测试"""
|
| 129 |
+
file_size = 128 * 1024 * 1024 # 128MB 文件
|
| 130 |
+
data = os.urandom(block_size)
|
| 131 |
+
|
| 132 |
+
with tempfile.NamedTemporaryFile(delete=True) as f:
|
| 133 |
+
# 预分配文件空间
|
| 134 |
+
f.write(os.urandom(file_size))
|
| 135 |
+
f.flush()
|
| 136 |
+
|
| 137 |
+
max_offset = file_size - block_size
|
| 138 |
+
|
| 139 |
+
# 随机写入测试
|
| 140 |
+
start_time = time.time()
|
| 141 |
+
operations = 0
|
| 142 |
+
|
| 143 |
+
while time.time() - start_time < duration:
|
| 144 |
+
offset = np.random.randint(0, max_offset)
|
| 145 |
+
f.seek(offset)
|
| 146 |
+
f.write(data)
|
| 147 |
+
operations += 1
|
| 148 |
+
|
| 149 |
+
f.flush()
|
| 150 |
+
elapsed = time.time() - start_time
|
| 151 |
+
|
| 152 |
+
iops = operations / elapsed
|
| 153 |
+
|
| 154 |
+
return {
|
| 155 |
+
"test": "random_write_iops",
|
| 156 |
+
"description": f"Random 4K write IOPS",
|
| 157 |
+
"block_size_bytes": block_size,
|
| 158 |
+
"duration_seconds": round(elapsed, 3),
|
| 159 |
+
"operations": operations,
|
| 160 |
+
"iops": round(iops, 2),
|
| 161 |
+
"score": round(iops / 100, 2),
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
def run_all_disk_benchmarks() -> Dict[str, Any]:
|
| 166 |
+
"""运行所有磁盘基准测试"""
|
| 167 |
+
results = {
|
| 168 |
+
"sequential_write": benchmark_sequential_write(),
|
| 169 |
+
"sequential_read": benchmark_sequential_read(),
|
| 170 |
+
"random_read_iops": benchmark_random_read_iops(),
|
| 171 |
+
"random_write_iops": benchmark_random_write_iops(),
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
# 计算总分
|
| 175 |
+
total_score = sum(r.get("score", 0) for r in results.values())
|
| 176 |
+
results["total_score"] = round(total_score, 2)
|
| 177 |
+
|
| 178 |
+
return results
|
benchmarks/gpu_bench.py
ADDED
|
@@ -0,0 +1,255 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
GPU Benchmark Module
|
| 3 |
+
GPU 性能测试:显存带宽、FP32/FP16/Tensor Core 算力
|
| 4 |
+
仅在有 NVIDIA GPU 时可用
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import time
|
| 8 |
+
from typing import Dict, Any, Optional
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def check_gpu_available() -> bool:
|
| 12 |
+
"""检查是否有可用的 GPU"""
|
| 13 |
+
try:
|
| 14 |
+
import subprocess
|
| 15 |
+
result = subprocess.run(['nvidia-smi'], capture_output=True, timeout=5)
|
| 16 |
+
return result.returncode == 0
|
| 17 |
+
except:
|
| 18 |
+
return False
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def check_cuda_available() -> bool:
|
| 22 |
+
"""检查 PyTorch CUDA 是否可用"""
|
| 23 |
+
try:
|
| 24 |
+
import torch
|
| 25 |
+
return torch.cuda.is_available()
|
| 26 |
+
except ImportError:
|
| 27 |
+
return False
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def benchmark_gpu_memory_bandwidth() -> Optional[Dict[str, Any]]:
|
| 31 |
+
"""GPU 显存带宽测试"""
|
| 32 |
+
if not check_cuda_available():
|
| 33 |
+
return None
|
| 34 |
+
|
| 35 |
+
try:
|
| 36 |
+
import torch
|
| 37 |
+
|
| 38 |
+
device = torch.device('cuda')
|
| 39 |
+
|
| 40 |
+
# 测试不同大小
|
| 41 |
+
size_mb = 256
|
| 42 |
+
size_elements = size_mb * 1024 * 1024 // 4 # float32
|
| 43 |
+
|
| 44 |
+
# 创建张量
|
| 45 |
+
src = torch.ones(size_elements, dtype=torch.float32, device=device)
|
| 46 |
+
|
| 47 |
+
# 预热
|
| 48 |
+
for _ in range(10):
|
| 49 |
+
dst = src.clone()
|
| 50 |
+
torch.cuda.synchronize()
|
| 51 |
+
|
| 52 |
+
# 带宽测试
|
| 53 |
+
start_time = time.time()
|
| 54 |
+
iterations = 0
|
| 55 |
+
while time.time() - start_time < 2.0:
|
| 56 |
+
dst = src.clone()
|
| 57 |
+
iterations += 1
|
| 58 |
+
torch.cuda.synchronize()
|
| 59 |
+
elapsed = time.time() - start_time
|
| 60 |
+
|
| 61 |
+
# 计算带宽 (读 + 写)
|
| 62 |
+
bytes_transferred = size_mb * 1024 * 1024 * 2 * iterations
|
| 63 |
+
bandwidth = bytes_transferred / elapsed / (1024**3)
|
| 64 |
+
|
| 65 |
+
return {
|
| 66 |
+
"test": "gpu_memory_bandwidth",
|
| 67 |
+
"description": f"GPU memory bandwidth ({size_mb}MB)",
|
| 68 |
+
"size_mb": size_mb,
|
| 69 |
+
"duration_seconds": round(elapsed, 3),
|
| 70 |
+
"iterations": iterations,
|
| 71 |
+
"bandwidth_gb_s": round(bandwidth, 2),
|
| 72 |
+
"score": round(bandwidth, 2),
|
| 73 |
+
}
|
| 74 |
+
except Exception as e:
|
| 75 |
+
return {"error": str(e)}
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def benchmark_gpu_fp32(matrix_size: int = 4096) -> Optional[Dict[str, Any]]:
|
| 79 |
+
"""GPU FP32 算力测试"""
|
| 80 |
+
if not check_cuda_available():
|
| 81 |
+
return None
|
| 82 |
+
|
| 83 |
+
try:
|
| 84 |
+
import torch
|
| 85 |
+
|
| 86 |
+
device = torch.device('cuda')
|
| 87 |
+
|
| 88 |
+
# 创建矩阵
|
| 89 |
+
a = torch.randn(matrix_size, matrix_size, dtype=torch.float32, device=device)
|
| 90 |
+
b = torch.randn(matrix_size, matrix_size, dtype=torch.float32, device=device)
|
| 91 |
+
|
| 92 |
+
# 预热
|
| 93 |
+
for _ in range(5):
|
| 94 |
+
_ = torch.mm(a, b)
|
| 95 |
+
torch.cuda.synchronize()
|
| 96 |
+
|
| 97 |
+
# 测试
|
| 98 |
+
start_time = time.time()
|
| 99 |
+
iterations = 0
|
| 100 |
+
while time.time() - start_time < 3.0:
|
| 101 |
+
_ = torch.mm(a, b)
|
| 102 |
+
iterations += 1
|
| 103 |
+
torch.cuda.synchronize()
|
| 104 |
+
elapsed = time.time() - start_time
|
| 105 |
+
|
| 106 |
+
# 计算 TFLOPS
|
| 107 |
+
flops_per_matmul = 2 * (matrix_size ** 3)
|
| 108 |
+
total_flops = flops_per_matmul * iterations
|
| 109 |
+
tflops = total_flops / elapsed / 1e12
|
| 110 |
+
|
| 111 |
+
return {
|
| 112 |
+
"test": "gpu_fp32",
|
| 113 |
+
"description": f"GPU FP32 compute ({matrix_size}x{matrix_size} matmul)",
|
| 114 |
+
"matrix_size": matrix_size,
|
| 115 |
+
"duration_seconds": round(elapsed, 3),
|
| 116 |
+
"iterations": iterations,
|
| 117 |
+
"tflops": round(tflops, 3),
|
| 118 |
+
"score": round(tflops * 10, 2),
|
| 119 |
+
}
|
| 120 |
+
except Exception as e:
|
| 121 |
+
return {"error": str(e)}
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
def benchmark_gpu_fp16(matrix_size: int = 4096) -> Optional[Dict[str, Any]]:
|
| 125 |
+
"""GPU FP16 算力测试"""
|
| 126 |
+
if not check_cuda_available():
|
| 127 |
+
return None
|
| 128 |
+
|
| 129 |
+
try:
|
| 130 |
+
import torch
|
| 131 |
+
|
| 132 |
+
device = torch.device('cuda')
|
| 133 |
+
|
| 134 |
+
# 创建矩阵
|
| 135 |
+
a = torch.randn(matrix_size, matrix_size, dtype=torch.float16, device=device)
|
| 136 |
+
b = torch.randn(matrix_size, matrix_size, dtype=torch.float16, device=device)
|
| 137 |
+
|
| 138 |
+
# 预热
|
| 139 |
+
for _ in range(5):
|
| 140 |
+
_ = torch.mm(a, b)
|
| 141 |
+
torch.cuda.synchronize()
|
| 142 |
+
|
| 143 |
+
# 测试
|
| 144 |
+
start_time = time.time()
|
| 145 |
+
iterations = 0
|
| 146 |
+
while time.time() - start_time < 3.0:
|
| 147 |
+
_ = torch.mm(a, b)
|
| 148 |
+
iterations += 1
|
| 149 |
+
torch.cuda.synchronize()
|
| 150 |
+
elapsed = time.time() - start_time
|
| 151 |
+
|
| 152 |
+
# 计算 TFLOPS
|
| 153 |
+
flops_per_matmul = 2 * (matrix_size ** 3)
|
| 154 |
+
total_flops = flops_per_matmul * iterations
|
| 155 |
+
tflops = total_flops / elapsed / 1e12
|
| 156 |
+
|
| 157 |
+
return {
|
| 158 |
+
"test": "gpu_fp16",
|
| 159 |
+
"description": f"GPU FP16 compute ({matrix_size}x{matrix_size} matmul)",
|
| 160 |
+
"matrix_size": matrix_size,
|
| 161 |
+
"duration_seconds": round(elapsed, 3),
|
| 162 |
+
"iterations": iterations,
|
| 163 |
+
"tflops": round(tflops, 3),
|
| 164 |
+
"score": round(tflops * 5, 2),
|
| 165 |
+
}
|
| 166 |
+
except Exception as e:
|
| 167 |
+
return {"error": str(e)}
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
def benchmark_gpu_tensor_cores(matrix_size: int = 4096) -> Optional[Dict[str, Any]]:
|
| 171 |
+
"""GPU Tensor Core 混合精度算力测试"""
|
| 172 |
+
if not check_cuda_available():
|
| 173 |
+
return None
|
| 174 |
+
|
| 175 |
+
try:
|
| 176 |
+
import torch
|
| 177 |
+
|
| 178 |
+
if not hasattr(torch.cuda, 'amp') or torch.cuda.get_device_capability()[0] < 7:
|
| 179 |
+
return {"error": "Tensor Cores not available (requires compute capability >= 7.0)"}
|
| 180 |
+
|
| 181 |
+
device = torch.device('cuda')
|
| 182 |
+
|
| 183 |
+
# 使用自动混合精度
|
| 184 |
+
a = torch.randn(matrix_size, matrix_size, dtype=torch.float16, device=device)
|
| 185 |
+
b = torch.randn(matrix_size, matrix_size, dtype=torch.float16, device=device)
|
| 186 |
+
|
| 187 |
+
# 预热
|
| 188 |
+
with torch.cuda.amp.autocast():
|
| 189 |
+
for _ in range(5):
|
| 190 |
+
_ = torch.mm(a, b)
|
| 191 |
+
torch.cuda.synchronize()
|
| 192 |
+
|
| 193 |
+
# 测试
|
| 194 |
+
start_time = time.time()
|
| 195 |
+
iterations = 0
|
| 196 |
+
with torch.cuda.amp.autocast():
|
| 197 |
+
while time.time() - start_time < 3.0:
|
| 198 |
+
_ = torch.mm(a, b)
|
| 199 |
+
iterations += 1
|
| 200 |
+
torch.cuda.synchronize()
|
| 201 |
+
elapsed = time.time() - start_time
|
| 202 |
+
|
| 203 |
+
# 计算 TFLOPS
|
| 204 |
+
flops_per_matmul = 2 * (matrix_size ** 3)
|
| 205 |
+
total_flops = flops_per_matmul * iterations
|
| 206 |
+
tflops = total_flops / elapsed / 1e12
|
| 207 |
+
|
| 208 |
+
return {
|
| 209 |
+
"test": "gpu_tensor_cores",
|
| 210 |
+
"description": f"GPU Tensor Cores mixed precision ({matrix_size}x{matrix_size})",
|
| 211 |
+
"matrix_size": matrix_size,
|
| 212 |
+
"duration_seconds": round(elapsed, 3),
|
| 213 |
+
"iterations": iterations,
|
| 214 |
+
"tflops": round(tflops, 3),
|
| 215 |
+
"score": round(tflops * 3, 2),
|
| 216 |
+
}
|
| 217 |
+
except Exception as e:
|
| 218 |
+
return {"error": str(e)}
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
def run_all_gpu_benchmarks() -> Optional[Dict[str, Any]]:
|
| 222 |
+
"""运行所有 GPU 基准测试"""
|
| 223 |
+
if not check_gpu_available():
|
| 224 |
+
return None
|
| 225 |
+
|
| 226 |
+
results = {}
|
| 227 |
+
|
| 228 |
+
# 内存带宽
|
| 229 |
+
mem_result = benchmark_gpu_memory_bandwidth()
|
| 230 |
+
if mem_result:
|
| 231 |
+
results["memory_bandwidth"] = mem_result
|
| 232 |
+
|
| 233 |
+
# FP32 算力
|
| 234 |
+
fp32_result = benchmark_gpu_fp32()
|
| 235 |
+
if fp32_result:
|
| 236 |
+
results["fp32"] = fp32_result
|
| 237 |
+
|
| 238 |
+
# FP16 算力
|
| 239 |
+
fp16_result = benchmark_gpu_fp16()
|
| 240 |
+
if fp16_result:
|
| 241 |
+
results["fp16"] = fp16_result
|
| 242 |
+
|
| 243 |
+
# Tensor Cores
|
| 244 |
+
tc_result = benchmark_gpu_tensor_cores()
|
| 245 |
+
if tc_result:
|
| 246 |
+
results["tensor_cores"] = tc_result
|
| 247 |
+
|
| 248 |
+
if not results:
|
| 249 |
+
return None
|
| 250 |
+
|
| 251 |
+
# 计算总分
|
| 252 |
+
total_score = sum(r.get("score", 0) for r in results.values() if isinstance(r, dict) and "error" not in r)
|
| 253 |
+
results["total_score"] = round(total_score, 2)
|
| 254 |
+
|
| 255 |
+
return results
|
benchmarks/memory_bench.py
ADDED
|
@@ -0,0 +1,331 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Memory Benchmark Module
|
| 3 |
+
内存性能测试:带宽测试、延迟测试、缓存性能
|
| 4 |
+
Optimized with ctypes for raw C-level performance
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import time
|
| 8 |
+
import ctypes
|
| 9 |
+
import multiprocessing
|
| 10 |
+
import mmap
|
| 11 |
+
import os
|
| 12 |
+
import numpy as np # Keep for latency/cache tests
|
| 13 |
+
from concurrent.futures import ProcessPoolExecutor
|
| 14 |
+
from typing import Dict, Any
|
| 15 |
+
|
| 16 |
+
# Load C standard library
|
| 17 |
+
try:
|
| 18 |
+
libc = ctypes.CDLL("libc.so.6")
|
| 19 |
+
libc.memset.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_size_t]
|
| 20 |
+
libc.memcpy.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t]
|
| 21 |
+
libc.memchr.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_size_t]
|
| 22 |
+
except Exception:
|
| 23 |
+
libc = None
|
| 24 |
+
|
| 25 |
+
# --- C Extension Handling ---
|
| 26 |
+
C_LIB_PATH = os.path.join(os.path.dirname(__file__), "_memory_bench_c.so")
|
| 27 |
+
C_SRC_PATH = os.path.join(os.path.dirname(__file__), "memory_bench_c.c")
|
| 28 |
+
|
| 29 |
+
def _compile_c_helper():
|
| 30 |
+
"""Compiles the C helper library if it doesn't exist or is outdated."""
|
| 31 |
+
if not os.path.exists(C_SRC_PATH):
|
| 32 |
+
return None
|
| 33 |
+
|
| 34 |
+
needs_compile = False
|
| 35 |
+
if not os.path.exists(C_LIB_PATH):
|
| 36 |
+
needs_compile = True
|
| 37 |
+
else:
|
| 38 |
+
# Check timestamps
|
| 39 |
+
if os.path.getmtime(C_SRC_PATH) > os.path.getmtime(C_LIB_PATH):
|
| 40 |
+
needs_compile = True
|
| 41 |
+
|
| 42 |
+
if needs_compile:
|
| 43 |
+
# User requested max optimization
|
| 44 |
+
cmd = f"gcc -O3 -shared -fPIC -o {C_LIB_PATH} {C_SRC_PATH}"
|
| 45 |
+
if os.system(cmd) != 0:
|
| 46 |
+
print("Failed to compile C helper.")
|
| 47 |
+
return None
|
| 48 |
+
|
| 49 |
+
try:
|
| 50 |
+
lib = ctypes.CDLL(C_LIB_PATH)
|
| 51 |
+
lib.measure_latency_random.argtypes = [ctypes.c_size_t, ctypes.c_size_t]
|
| 52 |
+
lib.measure_latency_random.restype = ctypes.c_double
|
| 53 |
+
|
| 54 |
+
lib.measure_latency_sequential.argtypes = [ctypes.c_size_t, ctypes.c_size_t]
|
| 55 |
+
lib.measure_latency_sequential.restype = ctypes.c_double
|
| 56 |
+
|
| 57 |
+
lib.measure_alloc_rate.argtypes = [ctypes.c_size_t, ctypes.c_size_t]
|
| 58 |
+
lib.measure_alloc_rate.restype = ctypes.c_double
|
| 59 |
+
return lib
|
| 60 |
+
except Exception as e:
|
| 61 |
+
print(f"Failed to load C helper: {e}")
|
| 62 |
+
return None
|
| 63 |
+
|
| 64 |
+
c_lib = _compile_c_helper()
|
| 65 |
+
|
| 66 |
+
def _raw_memory_worker(args):
|
| 67 |
+
"""
|
| 68 |
+
Worker process for memory bandwidth test using raw C calls.
|
| 69 |
+
Equivalent to sysbench memory test.
|
| 70 |
+
"""
|
| 71 |
+
block_size_mb, duration, mode = args
|
| 72 |
+
block_size = block_size_mb * 1024 * 1024
|
| 73 |
+
|
| 74 |
+
# Use mmap for aligned, raw memory allocation (no Python object overhead)
|
| 75 |
+
# Anonymous mapping
|
| 76 |
+
src_map = mmap.mmap(-1, block_size)
|
| 77 |
+
dst_map = None
|
| 78 |
+
|
| 79 |
+
# For copy mode, we need a destination
|
| 80 |
+
if mode == 'copy':
|
| 81 |
+
dst_map = mmap.mmap(-1, block_size)
|
| 82 |
+
|
| 83 |
+
# Get raw pointers
|
| 84 |
+
src_addr = ctypes.addressof(ctypes.c_char.from_buffer(src_map))
|
| 85 |
+
dst_addr = ctypes.addressof(ctypes.c_char.from_buffer(dst_map)) if dst_map else 0
|
| 86 |
+
|
| 87 |
+
# Prepare C function calls
|
| 88 |
+
memset = libc.memset
|
| 89 |
+
memcpy = libc.memcpy
|
| 90 |
+
memchr = libc.memchr
|
| 91 |
+
|
| 92 |
+
start_time = time.time()
|
| 93 |
+
iterations = 0
|
| 94 |
+
|
| 95 |
+
while time.time() - start_time < duration:
|
| 96 |
+
if mode == 'read':
|
| 97 |
+
# Scan memory (read access)
|
| 98 |
+
# Find a byte that (likely) isn't there to force full scan
|
| 99 |
+
memchr(src_addr, 1, block_size)
|
| 100 |
+
elif mode == 'write':
|
| 101 |
+
# Write memory
|
| 102 |
+
memset(src_addr, 0, block_size)
|
| 103 |
+
elif mode == 'copy':
|
| 104 |
+
# Copy memory
|
| 105 |
+
memcpy(dst_addr, src_addr, block_size)
|
| 106 |
+
iterations += 1
|
| 107 |
+
|
| 108 |
+
elapsed = time.time() - start_time
|
| 109 |
+
|
| 110 |
+
# Cleanup
|
| 111 |
+
src_map.close()
|
| 112 |
+
if dst_map:
|
| 113 |
+
dst_map.close()
|
| 114 |
+
|
| 115 |
+
return iterations, elapsed
|
| 116 |
+
|
| 117 |
+
def benchmark_memory_bandwidth(block_size_mb: int = 4) -> Dict[str, Any]:
|
| 118 |
+
"""
|
| 119 |
+
内存带宽测试 (Raw C Performance)
|
| 120 |
+
Uses multiprocessing + ctypes to bypass Python overhead.
|
| 121 |
+
"""
|
| 122 |
+
if not libc:
|
| 123 |
+
return {"error": "libc not found, cannot run optimized benchmark"}
|
| 124 |
+
|
| 125 |
+
num_cores = multiprocessing.cpu_count()
|
| 126 |
+
duration = 3.0
|
| 127 |
+
|
| 128 |
+
# sysbench defaults to 1KB-1MB blocks. User mentioned 1MB.
|
| 129 |
+
# We use a slightly larger buffer per thread to amortize loop overhead if needed,
|
| 130 |
+
# but 1MB-4MB is usually good for L3/RAM cache thrashing.
|
| 131 |
+
# Let's stick to 4MB per thread to ensure we hit RAM.
|
| 132 |
+
|
| 133 |
+
modes = ['read', 'write', 'copy']
|
| 134 |
+
results = {}
|
| 135 |
+
|
| 136 |
+
with ProcessPoolExecutor(max_workers=num_cores) as executor:
|
| 137 |
+
for mode in modes:
|
| 138 |
+
# Submit tasks
|
| 139 |
+
futures = [executor.submit(_raw_memory_worker, (block_size_mb, duration, mode)) for _ in range(num_cores)]
|
| 140 |
+
|
| 141 |
+
total_iterations = 0
|
| 142 |
+
max_elapsed = 0
|
| 143 |
+
|
| 144 |
+
for f in futures:
|
| 145 |
+
iters, elapsed = f.result()
|
| 146 |
+
total_iterations += iters
|
| 147 |
+
max_elapsed = max(max_elapsed, elapsed)
|
| 148 |
+
|
| 149 |
+
# Calculate Bandwidth
|
| 150 |
+
# Data transferred per iteration = block_size
|
| 151 |
+
bytes_per_iter = block_size_mb * 1024 * 1024
|
| 152 |
+
|
| 153 |
+
total_bytes = total_iterations * bytes_per_iter
|
| 154 |
+
|
| 155 |
+
# Note: For 'copy', sysbench counts read+write?
|
| 156 |
+
# Usually bandwidth is defined as bytes processed.
|
| 157 |
+
# If we copy 1GB, we read 1GB and write 1GB.
|
| 158 |
+
# sysbench memory test reports "transferred".
|
| 159 |
+
# For copy, let's report the amount of data moved (Payload).
|
| 160 |
+
# Or if user wants bus bandwidth, it's 2x.
|
| 161 |
+
# Benchmarks usually report the size of the buffer processed.
|
| 162 |
+
# However, previous impl multiplied by 2. Let's stick to total bytes moved over bus.
|
| 163 |
+
if mode == 'copy':
|
| 164 |
+
total_bytes *= 2
|
| 165 |
+
|
| 166 |
+
# Avoid division by zero
|
| 167 |
+
if max_elapsed > 0:
|
| 168 |
+
bandwidth_gb_s = total_bytes / max_elapsed / (1024**3)
|
| 169 |
+
else:
|
| 170 |
+
bandwidth_gb_s = 0
|
| 171 |
+
|
| 172 |
+
results[f"{mode}_bandwidth_gb_s"] = round(bandwidth_gb_s, 3)
|
| 173 |
+
|
| 174 |
+
return {
|
| 175 |
+
"test": "memory_bandwidth",
|
| 176 |
+
"description": f"Memory bandwidth test (Multi-core C-level, {num_cores} threads)",
|
| 177 |
+
"block_size_mb": block_size_mb,
|
| 178 |
+
"read_bandwidth_gb_s": results['read_bandwidth_gb_s'],
|
| 179 |
+
"write_bandwidth_gb_s": results['write_bandwidth_gb_s'],
|
| 180 |
+
"copy_bandwidth_gb_s": results['copy_bandwidth_gb_s'],
|
| 181 |
+
"score": round((results['read_bandwidth_gb_s'] + results['write_bandwidth_gb_s'] + results['copy_bandwidth_gb_s']) * 10, 2),
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def benchmark_memory_latency(iterations: int = 10000000) -> Dict[str, Any]:
|
| 186 |
+
"""
|
| 187 |
+
内存延迟测试(随机访问)
|
| 188 |
+
Uses C helper for precise pointer chasing.
|
| 189 |
+
"""
|
| 190 |
+
if not c_lib:
|
| 191 |
+
return {"error": "C helper not available"}
|
| 192 |
+
|
| 193 |
+
# Test random access latency on a large block (64MB) to hit RAM
|
| 194 |
+
array_size_bytes = 64 * 1024 * 1024
|
| 195 |
+
|
| 196 |
+
elapsed = c_lib.measure_latency_random(array_size_bytes, iterations)
|
| 197 |
+
|
| 198 |
+
if elapsed <= 0:
|
| 199 |
+
return {"error": "Benchmark failed"}
|
| 200 |
+
|
| 201 |
+
latency_ns = (elapsed / iterations) * 1e9
|
| 202 |
+
|
| 203 |
+
return {
|
| 204 |
+
"test": "memory_latency_random",
|
| 205 |
+
"description": "Random access latency (64MB working set, Pointer Chasing)",
|
| 206 |
+
"iterations": iterations,
|
| 207 |
+
"total_time_seconds": round(elapsed, 4),
|
| 208 |
+
"average_latency_ns": round(latency_ns, 2),
|
| 209 |
+
"score": round(100 / latency_ns * 1000, 2), # Adjusted score scale
|
| 210 |
+
}
|
| 211 |
+
|
| 212 |
+
def benchmark_sequential_latency(iterations: int = 10000000) -> Dict[str, Any]:
|
| 213 |
+
"""
|
| 214 |
+
内存延迟测试(顺序访问)
|
| 215 |
+
Uses C helper.
|
| 216 |
+
"""
|
| 217 |
+
if not c_lib:
|
| 218 |
+
return {"error": "C helper not available"}
|
| 219 |
+
|
| 220 |
+
# Same 64MB block
|
| 221 |
+
array_size_bytes = 64 * 1024 * 1024
|
| 222 |
+
|
| 223 |
+
elapsed = c_lib.measure_latency_sequential(array_size_bytes, iterations)
|
| 224 |
+
|
| 225 |
+
if elapsed <= 0:
|
| 226 |
+
return {"error": "Benchmark failed"}
|
| 227 |
+
|
| 228 |
+
latency_ns = (elapsed / iterations) * 1e9
|
| 229 |
+
|
| 230 |
+
return {
|
| 231 |
+
"test": "memory_latency_sequential",
|
| 232 |
+
"description": "Sequential access latency (64MB working set, Strided Read)",
|
| 233 |
+
"iterations": iterations,
|
| 234 |
+
"total_time_seconds": round(elapsed, 4),
|
| 235 |
+
"average_latency_ns": round(latency_ns, 2),
|
| 236 |
+
"score": round(100 / latency_ns * 1000, 2),
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
def benchmark_alloc_rate(iterations: int = 1000000) -> Dict[str, Any]:
|
| 240 |
+
"""
|
| 241 |
+
内存分配/释放速率测试
|
| 242 |
+
"""
|
| 243 |
+
if not c_lib:
|
| 244 |
+
return {"error": "C helper not available"}
|
| 245 |
+
|
| 246 |
+
# Test small allocations (e.g. 1KB) which are common
|
| 247 |
+
alloc_size = 1024
|
| 248 |
+
|
| 249 |
+
elapsed = c_lib.measure_alloc_rate(alloc_size, iterations)
|
| 250 |
+
|
| 251 |
+
if elapsed <= 0:
|
| 252 |
+
return {"error": "Benchmark failed"}
|
| 253 |
+
|
| 254 |
+
ops_per_sec = iterations / elapsed
|
| 255 |
+
|
| 256 |
+
return {
|
| 257 |
+
"test": "memory_alloc_rate",
|
| 258 |
+
"description": f"Malloc/Free rate (Size: {alloc_size} bytes)",
|
| 259 |
+
"iterations": iterations,
|
| 260 |
+
"ops_per_sec": round(ops_per_sec, 2),
|
| 261 |
+
"score": round(ops_per_sec / 10000, 2),
|
| 262 |
+
}
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
def benchmark_cache_latency() -> Dict[str, Any]:
|
| 267 |
+
"""
|
| 268 |
+
缓存层级延迟测试 (L1/L2/L3)
|
| 269 |
+
Uses C helper pointer chasing with smaller working sets.
|
| 270 |
+
"""
|
| 271 |
+
if not c_lib:
|
| 272 |
+
return {"error": "C helper not available"}
|
| 273 |
+
|
| 274 |
+
results = {}
|
| 275 |
+
|
| 276 |
+
# Approximate sizes.
|
| 277 |
+
# Must be small enough to fit in cache, but large enough to measure.
|
| 278 |
+
# Typical: L1=32KB(use 16KB), L2=256KB(use 128KB), L3=8MB+(use 4MB)
|
| 279 |
+
levels = [
|
| 280 |
+
("L1", 16 * 1024),
|
| 281 |
+
("L2", 128 * 1024),
|
| 282 |
+
("L3", 4 * 1024 * 1024)
|
| 283 |
+
]
|
| 284 |
+
|
| 285 |
+
iterations = 10000000 # 10M iterations
|
| 286 |
+
|
| 287 |
+
for name, size in levels:
|
| 288 |
+
elapsed = c_lib.measure_latency_random(size, iterations)
|
| 289 |
+
latency_ns = (elapsed / iterations) * 1e9
|
| 290 |
+
for name, size in levels:
|
| 291 |
+
elapsed = c_lib.measure_latency_random(size, iterations)
|
| 292 |
+
if elapsed <= 0:
|
| 293 |
+
# Fallback or error
|
| 294 |
+
latency_ns = 0.0
|
| 295 |
+
else:
|
| 296 |
+
latency_ns = (elapsed / iterations) * 1e9
|
| 297 |
+
|
| 298 |
+
results[name] = {
|
| 299 |
+
"size_bytes": size,
|
| 300 |
+
"latency_ns": round(latency_ns, 2)
|
| 301 |
+
}
|
| 302 |
+
|
| 303 |
+
l1_lat = results["L1"]["latency_ns"]
|
| 304 |
+
score = 0
|
| 305 |
+
if l1_lat > 0:
|
| 306 |
+
score = round(100 / l1_lat * 500, 2)
|
| 307 |
+
|
| 308 |
+
return {
|
| 309 |
+
"test": "cache_latency",
|
| 310 |
+
"description": "Cache hierarchy latency (Pointer Chasing)",
|
| 311 |
+
"levels": results,
|
| 312 |
+
"score": score
|
| 313 |
+
}
|
| 314 |
+
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
def run_all_memory_benchmarks() -> Dict[str, Any]:
|
| 318 |
+
"""运行所有内存基准测试"""
|
| 319 |
+
results = {
|
| 320 |
+
"bandwidth": benchmark_memory_bandwidth(),
|
| 321 |
+
"latency_random": benchmark_memory_latency(),
|
| 322 |
+
"latency_sequential": benchmark_sequential_latency(),
|
| 323 |
+
"cache_latency": benchmark_cache_latency(),
|
| 324 |
+
"alloc_rate": benchmark_alloc_rate(),
|
| 325 |
+
}
|
| 326 |
+
|
| 327 |
+
# 计算总分
|
| 328 |
+
total_score = sum(r.get("score", 0) for r in results.values())
|
| 329 |
+
results["total_score"] = round(total_score, 2)
|
| 330 |
+
|
| 331 |
+
return results
|
benchmarks/memory_bench_c.c
ADDED
|
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <stdio.h>
|
| 2 |
+
#include <stdlib.h>
|
| 3 |
+
#include <time.h>
|
| 4 |
+
#include <string.h>
|
| 5 |
+
#include <stdint.h>
|
| 6 |
+
|
| 7 |
+
// LCG for fast random numbers
|
| 8 |
+
static inline uint32_t fast_rand(uint32_t *seed) {
|
| 9 |
+
*seed = *seed * 1103515245 + 12345;
|
| 10 |
+
return (*seed / 65536) % 32768;
|
| 11 |
+
}
|
| 12 |
+
|
| 13 |
+
// Measure random access latency (Pointer Chasing)
|
| 14 |
+
// size_bytes: Total size of the working set
|
| 15 |
+
// iterations: Number of pointer chases
|
| 16 |
+
double measure_latency_random(size_t size_bytes, size_t iterations) {
|
| 17 |
+
size_t num_elements = size_bytes / sizeof(void*);
|
| 18 |
+
if (num_elements < 2) return 0.0;
|
| 19 |
+
|
| 20 |
+
void **memory = (void**)malloc(size_bytes);
|
| 21 |
+
if (!memory) return -1.0;
|
| 22 |
+
|
| 23 |
+
// Use a simple Sattolo's algorithm for a single cycle permutation
|
| 24 |
+
// We want to ensure we visit every element exactly once in a full cycle
|
| 25 |
+
// But for latency testing, we just need a long chain.
|
| 26 |
+
// We will do a full shuffle of indices to create the chain.
|
| 27 |
+
|
| 28 |
+
uint32_t *indices = (uint32_t*)malloc(num_elements * sizeof(uint32_t));
|
| 29 |
+
if (!indices) { free(memory); return -1.0; }
|
| 30 |
+
|
| 31 |
+
for (size_t i = 0; i < num_elements; i++) indices[i] = i;
|
| 32 |
+
|
| 33 |
+
uint32_t seed = (uint32_t)time(NULL);
|
| 34 |
+
// Sattolo's algorithm for cyclic permutation
|
| 35 |
+
for (size_t i = num_elements - 1; i > 0; i--) {
|
| 36 |
+
size_t j = fast_rand(&seed) % i; // 0 <= j < i
|
| 37 |
+
uint32_t temp = indices[i];
|
| 38 |
+
indices[i] = indices[j];
|
| 39 |
+
indices[j] = temp;
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
// Link the list
|
| 43 |
+
// indices[i] -> indices[next] is not quite right for pointer chasing setup easily.
|
| 44 |
+
// We want memory[i] to point to memory[indices[i]]? No.
|
| 45 |
+
// We want memory[indices[i]] to point to memory[indices[i+1]].
|
| 46 |
+
// Actually, Sattolo gives us: value at pos i is the NEXT index.
|
| 47 |
+
// So if indices = [2, 0, 1], then 0->2, 2->1, 1->0.
|
| 48 |
+
// So memory[i] = &memory[indices[i]] is WRONG.
|
| 49 |
+
// memory[i] should point to memory[next_index].
|
| 50 |
+
// If indices array IS the permutation, e.g. indices[0]=2 implies 0->2.
|
| 51 |
+
// Yes, array[i] = &array[indices[i]] works if indices[i] is the 'next' node for node i.
|
| 52 |
+
|
| 53 |
+
for (size_t i = 0; i < num_elements; i++) {
|
| 54 |
+
memory[i] = &memory[indices[i]];
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
// Warmup
|
| 58 |
+
void **p = (void**)memory[0];
|
| 59 |
+
for(int i=0; i<1000; i++) p = (void**)*p;
|
| 60 |
+
|
| 61 |
+
struct timespec start, end;
|
| 62 |
+
clock_gettime(CLOCK_MONOTONIC, &start);
|
| 63 |
+
|
| 64 |
+
// Critical Loop
|
| 65 |
+
for (size_t i = 0; i < iterations; i++) {
|
| 66 |
+
p = (void**)*p;
|
| 67 |
+
// Compiler barrier to force the previous load to complete and prevent loop optimization
|
| 68 |
+
__asm__ volatile("" : "+r" (p));
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
clock_gettime(CLOCK_MONOTONIC, &end);
|
| 72 |
+
|
| 73 |
+
// Hand brake to prevent optimization
|
| 74 |
+
volatile void* result = p;
|
| 75 |
+
(void)result;
|
| 76 |
+
|
| 77 |
+
double elapsed = (end.tv_sec - start.tv_sec) + (end.tv_nsec - start.tv_nsec) / 1e9;
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
free(indices);
|
| 81 |
+
free(memory);
|
| 82 |
+
return elapsed;
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
// Measure sequential access latency
|
| 86 |
+
// Simply walk the array with a stride
|
| 87 |
+
double measure_latency_sequential(size_t size_bytes, size_t iterations) {
|
| 88 |
+
volatile char *memory = (char*)malloc(size_bytes);
|
| 89 |
+
if (!memory) return -1.0;
|
| 90 |
+
|
| 91 |
+
// Initialize to force page faults now
|
| 92 |
+
memset((void*)memory, 0, size_bytes);
|
| 93 |
+
|
| 94 |
+
struct timespec start, end;
|
| 95 |
+
clock_gettime(CLOCK_MONOTONIC, &start);
|
| 96 |
+
|
| 97 |
+
size_t pos = 0;
|
| 98 |
+
size_t stride = 64; // Cache Line
|
| 99 |
+
// volatile ensures the read actually happens
|
| 100 |
+
|
| 101 |
+
for (size_t i = 0; i < iterations; i++) {
|
| 102 |
+
char v = memory[pos];
|
| 103 |
+
(void)v;
|
| 104 |
+
pos += stride;
|
| 105 |
+
if (pos >= size_bytes) pos = 0;
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
clock_gettime(CLOCK_MONOTONIC, &end);
|
| 109 |
+
double elapsed = (end.tv_sec - start.tv_sec) + (end.tv_nsec - start.tv_nsec) / 1e9;
|
| 110 |
+
|
| 111 |
+
free((void*)memory);
|
| 112 |
+
return elapsed;
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
// Measure allocation rate
|
| 116 |
+
double measure_alloc_rate(size_t size_bytes, size_t iterations) {
|
| 117 |
+
struct timespec start, end;
|
| 118 |
+
|
| 119 |
+
// Warmup
|
| 120 |
+
void *tmp = malloc(size_bytes);
|
| 121 |
+
free(tmp);
|
| 122 |
+
|
| 123 |
+
clock_gettime(CLOCK_MONOTONIC, &start);
|
| 124 |
+
|
| 125 |
+
for (size_t i = 0; i < iterations; i++) {
|
| 126 |
+
void *p = malloc(size_bytes);
|
| 127 |
+
if (p) {
|
| 128 |
+
// Write something to ensure it's not just a virtual reservation (optional, but realistic)
|
| 129 |
+
*(volatile char*)p = 0;
|
| 130 |
+
free(p);
|
| 131 |
+
}
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
clock_gettime(CLOCK_MONOTONIC, &end);
|
| 135 |
+
double elapsed = (end.tv_sec - start.tv_sec) + (end.tv_nsec - start.tv_nsec) / 1e9;
|
| 136 |
+
return elapsed;
|
| 137 |
+
}
|
benchmarks/system_info.py
ADDED
|
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
System Information Collection Module
|
| 3 |
+
收集系统基础信息:操作系统、CPU、内存、磁盘、GPU
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import platform
|
| 7 |
+
import os
|
| 8 |
+
import psutil
|
| 9 |
+
import cpuinfo
|
| 10 |
+
import time
|
| 11 |
+
from typing import Dict, Any, Optional, List
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def get_os_info() -> Dict[str, str]:
|
| 15 |
+
"""获取操作系统信息"""
|
| 16 |
+
try:
|
| 17 |
+
# 尝试读取 /etc/os-release
|
| 18 |
+
os_release = {}
|
| 19 |
+
if os.path.exists('/etc/os-release'):
|
| 20 |
+
with open('/etc/os-release', 'r') as f:
|
| 21 |
+
for line in f:
|
| 22 |
+
if '=' in line:
|
| 23 |
+
key, value = line.strip().split('=', 1)
|
| 24 |
+
os_release[key] = value.strip('"')
|
| 25 |
+
|
| 26 |
+
# 获取系统负载
|
| 27 |
+
load_avg = [round(x, 2) for x in os.getloadavg()]
|
| 28 |
+
|
| 29 |
+
# 获取启动时间
|
| 30 |
+
boot_time = psutil.boot_time()
|
| 31 |
+
uptime = time.time() - boot_time
|
| 32 |
+
|
| 33 |
+
return {
|
| 34 |
+
"system": platform.system(),
|
| 35 |
+
"distro": os_release.get('PRETTY_NAME', platform.platform()),
|
| 36 |
+
"kernel": platform.release(),
|
| 37 |
+
"hostname": platform.node(),
|
| 38 |
+
"architecture": platform.machine(),
|
| 39 |
+
"load_average": load_avg,
|
| 40 |
+
"uptime_seconds": int(uptime),
|
| 41 |
+
}
|
| 42 |
+
except Exception as e:
|
| 43 |
+
return {"error": str(e)}
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def get_cpu_info() -> Dict[str, Any]:
|
| 47 |
+
"""获取CPU详细信息"""
|
| 48 |
+
try:
|
| 49 |
+
info = cpuinfo.get_cpu_info()
|
| 50 |
+
|
| 51 |
+
# 获取频率信息
|
| 52 |
+
freq = psutil.cpu_freq()
|
| 53 |
+
freq_info = {
|
| 54 |
+
"current_mhz": round(freq.current, 2) if freq else None,
|
| 55 |
+
"min_mhz": round(freq.min, 2) if freq and freq.min else None,
|
| 56 |
+
"max_mhz": round(freq.max, 2) if freq and freq.max else None,
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
# 获取缓存信息
|
| 60 |
+
cache_info = {}
|
| 61 |
+
for key in ['l1_data_cache_size', 'l1_instruction_cache_size', 'l2_cache_size', 'l3_cache_size']:
|
| 62 |
+
if key in info:
|
| 63 |
+
cache_info[key] = info[key]
|
| 64 |
+
|
| 65 |
+
return {
|
| 66 |
+
"brand": info.get('brand_raw', 'Unknown'),
|
| 67 |
+
"arch": info.get('arch', platform.machine()),
|
| 68 |
+
"bits": info.get('bits', 64),
|
| 69 |
+
"cores_physical": psutil.cpu_count(logical=False),
|
| 70 |
+
"cores_logical": psutil.cpu_count(logical=True),
|
| 71 |
+
"frequency": freq_info,
|
| 72 |
+
"cache": cache_info,
|
| 73 |
+
"flags": info.get('flags', [])[:20], # 只取前20个特性
|
| 74 |
+
"stats": {
|
| 75 |
+
"ctx_switches": psutil.cpu_stats().ctx_switches,
|
| 76 |
+
"interrupts": psutil.cpu_stats().interrupts,
|
| 77 |
+
"soft_interrupts": psutil.cpu_stats().soft_interrupts,
|
| 78 |
+
}
|
| 79 |
+
}
|
| 80 |
+
except Exception as e:
|
| 81 |
+
return {"error": str(e)}
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def get_memory_info() -> Dict[str, Any]:
|
| 85 |
+
"""获取内存信息"""
|
| 86 |
+
try:
|
| 87 |
+
mem = psutil.virtual_memory()
|
| 88 |
+
swap = psutil.swap_memory()
|
| 89 |
+
|
| 90 |
+
def format_bytes(b: int) -> str:
|
| 91 |
+
"""格式化字节数"""
|
| 92 |
+
for unit in ['B', 'KB', 'MB', 'GB', 'TB']:
|
| 93 |
+
if b < 1024:
|
| 94 |
+
return f"{b:.2f} {unit}"
|
| 95 |
+
b /= 1024
|
| 96 |
+
return f"{b:.2f} PB"
|
| 97 |
+
|
| 98 |
+
return {
|
| 99 |
+
"total": mem.total,
|
| 100 |
+
"total_formatted": format_bytes(mem.total),
|
| 101 |
+
"available": mem.available,
|
| 102 |
+
"available_formatted": format_bytes(mem.available),
|
| 103 |
+
"used": mem.used,
|
| 104 |
+
"used_formatted": format_bytes(mem.used),
|
| 105 |
+
"percent": mem.percent,
|
| 106 |
+
"swap_total": swap.total,
|
| 107 |
+
"swap_total_formatted": format_bytes(swap.total),
|
| 108 |
+
"swap_used": swap.used,
|
| 109 |
+
"swap_percent": swap.percent,
|
| 110 |
+
}
|
| 111 |
+
except Exception as e:
|
| 112 |
+
return {"error": str(e)}
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def get_disk_info() -> List[Dict[str, Any]]:
|
| 116 |
+
"""获取磁盘信息"""
|
| 117 |
+
try:
|
| 118 |
+
disks = []
|
| 119 |
+
|
| 120 |
+
def format_bytes(b: int) -> str:
|
| 121 |
+
for unit in ['B', 'KB', 'MB', 'GB', 'TB']:
|
| 122 |
+
if b < 1024:
|
| 123 |
+
return f"{b:.2f} {unit}"
|
| 124 |
+
b /= 1024
|
| 125 |
+
return f"{b:.2f} PB"
|
| 126 |
+
|
| 127 |
+
for partition in psutil.disk_partitions():
|
| 128 |
+
try:
|
| 129 |
+
usage = psutil.disk_usage(partition.mountpoint)
|
| 130 |
+
disks.append({
|
| 131 |
+
"device": partition.device,
|
| 132 |
+
"mountpoint": partition.mountpoint,
|
| 133 |
+
"fstype": partition.fstype,
|
| 134 |
+
"total": usage.total,
|
| 135 |
+
"total_formatted": format_bytes(usage.total),
|
| 136 |
+
"used": usage.used,
|
| 137 |
+
"used_formatted": format_bytes(usage.used),
|
| 138 |
+
"free": usage.free,
|
| 139 |
+
"free_formatted": format_bytes(usage.free),
|
| 140 |
+
"percent": usage.percent,
|
| 141 |
+
})
|
| 142 |
+
except PermissionError:
|
| 143 |
+
continue
|
| 144 |
+
|
| 145 |
+
return disks
|
| 146 |
+
except Exception as e:
|
| 147 |
+
return [{"error": str(e)}]
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def get_gpu_info() -> Optional[List[Dict[str, Any]]]:
|
| 151 |
+
"""获取GPU信息(如果有NVIDIA GPU)"""
|
| 152 |
+
try:
|
| 153 |
+
import subprocess
|
| 154 |
+
|
| 155 |
+
# 尝试运行 nvidia-smi
|
| 156 |
+
result = subprocess.run(
|
| 157 |
+
['nvidia-smi', '--query-gpu=name,memory.total,memory.free,memory.used,driver_version,compute_cap',
|
| 158 |
+
'--format=csv,noheader,nounits'],
|
| 159 |
+
capture_output=True,
|
| 160 |
+
text=True,
|
| 161 |
+
timeout=10
|
| 162 |
+
)
|
| 163 |
+
|
| 164 |
+
if result.returncode != 0:
|
| 165 |
+
return None
|
| 166 |
+
|
| 167 |
+
gpus = []
|
| 168 |
+
for i, line in enumerate(result.stdout.strip().split('\n')):
|
| 169 |
+
if line:
|
| 170 |
+
parts = [p.strip() for p in line.split(',')]
|
| 171 |
+
if len(parts) >= 6:
|
| 172 |
+
gpus.append({
|
| 173 |
+
"index": i,
|
| 174 |
+
"name": parts[0],
|
| 175 |
+
"memory_total_mb": int(parts[1]),
|
| 176 |
+
"memory_free_mb": int(parts[2]),
|
| 177 |
+
"memory_used_mb": int(parts[3]),
|
| 178 |
+
"driver_version": parts[4],
|
| 179 |
+
"compute_capability": parts[5],
|
| 180 |
+
})
|
| 181 |
+
|
| 182 |
+
return gpus if gpus else None
|
| 183 |
+
|
| 184 |
+
except (FileNotFoundError, subprocess.TimeoutExpired, Exception):
|
| 185 |
+
return None
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def get_all_system_info() -> Dict[str, Any]:
|
| 189 |
+
"""获取所有系统信息"""
|
| 190 |
+
return {
|
| 191 |
+
"os": get_os_info(),
|
| 192 |
+
"cpu": get_cpu_info(),
|
| 193 |
+
"memory": get_memory_info(),
|
| 194 |
+
"disk": get_disk_info(),
|
| 195 |
+
"gpu": get_gpu_info(),
|
| 196 |
+
}
|
requirements.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi>=0.109.0
|
| 2 |
+
uvicorn[standard]>=0.27.0
|
| 3 |
+
psutil>=5.9.0
|
| 4 |
+
numpy>=1.24.0
|
| 5 |
+
py-cpuinfo>=9.0.0
|
static/index.html
ADDED
|
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="zh-CN">
|
| 3 |
+
<head>
|
| 4 |
+
<meta charset="UTF-8">
|
| 5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 6 |
+
<title>Space Fetch - System Performance Dashboard</title>
|
| 7 |
+
<meta name="description" content="Check computer statistics of your Hugging Face Space">
|
| 8 |
+
<link rel="stylesheet" href="/static/style.css">
|
| 9 |
+
<link rel="preconnect" href="https://fonts.googleapis.com">
|
| 10 |
+
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
|
| 11 |
+
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&family=JetBrains+Mono:wght@400;500&display=swap" rel="stylesheet">
|
| 12 |
+
</head>
|
| 13 |
+
<body>
|
| 14 |
+
<div class="background-effects">
|
| 15 |
+
<div class="gradient-orb orb-1"></div>
|
| 16 |
+
<div class="gradient-orb orb-2"></div>
|
| 17 |
+
<div class="gradient-orb orb-3"></div>
|
| 18 |
+
</div>
|
| 19 |
+
|
| 20 |
+
<div class="container">
|
| 21 |
+
<header class="header">
|
| 22 |
+
<div class="logo">
|
| 23 |
+
<span class="logo-icon">🏆</span>
|
| 24 |
+
<h1>Space Fetch</h1>
|
| 25 |
+
</div>
|
| 26 |
+
<p class="tagline">System Performance Dashboard</p>
|
| 27 |
+
</header>
|
| 28 |
+
|
| 29 |
+
<!-- System Info Section -->
|
| 30 |
+
<section class="section" id="system-section">
|
| 31 |
+
<h2 class="section-title">
|
| 32 |
+
<span class="icon">💻</span>
|
| 33 |
+
System Information
|
| 34 |
+
</h2>
|
| 35 |
+
<div class="cards-grid" id="system-info">
|
| 36 |
+
<div class="card skeleton">
|
| 37 |
+
<div class="skeleton-line"></div>
|
| 38 |
+
<div class="skeleton-line short"></div>
|
| 39 |
+
</div>
|
| 40 |
+
</div>
|
| 41 |
+
</section>
|
| 42 |
+
|
| 43 |
+
<!-- Hardware Section -->
|
| 44 |
+
<section class="section" id="hardware-section">
|
| 45 |
+
<h2 class="section-title">
|
| 46 |
+
<span class="icon">🔧</span>
|
| 47 |
+
Hardware Specifications
|
| 48 |
+
</h2>
|
| 49 |
+
<div class="hardware-grid" id="hardware-info">
|
| 50 |
+
<!-- CPU Card -->
|
| 51 |
+
<div class="hardware-card" id="cpu-card">
|
| 52 |
+
<div class="hardware-icon">🖥️</div>
|
| 53 |
+
<div class="hardware-content">
|
| 54 |
+
<h3>CPU</h3>
|
| 55 |
+
<div class="hardware-value" id="cpu-name">Loading...</div>
|
| 56 |
+
<div class="hardware-details" id="cpu-details"></div>
|
| 57 |
+
</div>
|
| 58 |
+
</div>
|
| 59 |
+
|
| 60 |
+
<!-- Memory Card -->
|
| 61 |
+
<div class="hardware-card" id="memory-card">
|
| 62 |
+
<div class="hardware-icon">🧠</div>
|
| 63 |
+
<div class="hardware-content">
|
| 64 |
+
<h3>Memory</h3>
|
| 65 |
+
<div class="hardware-value" id="memory-total">Loading...</div>
|
| 66 |
+
<div class="progress-bar">
|
| 67 |
+
<div class="progress-fill" id="memory-usage" style="width: 0%"></div>
|
| 68 |
+
</div>
|
| 69 |
+
<div class="hardware-details" id="memory-details"></div>
|
| 70 |
+
</div>
|
| 71 |
+
</div>
|
| 72 |
+
|
| 73 |
+
<!-- Disk Card -->
|
| 74 |
+
<div class="hardware-card" id="disk-card">
|
| 75 |
+
<div class="hardware-icon">💾</div>
|
| 76 |
+
<div class="hardware-content">
|
| 77 |
+
<h3>Storage</h3>
|
| 78 |
+
<div class="hardware-value" id="disk-total">Loading...</div>
|
| 79 |
+
<div class="progress-bar">
|
| 80 |
+
<div class="progress-fill" id="disk-usage" style="width: 0%"></div>
|
| 81 |
+
</div>
|
| 82 |
+
<div class="hardware-details" id="disk-details"></div>
|
| 83 |
+
</div>
|
| 84 |
+
</div>
|
| 85 |
+
|
| 86 |
+
<!-- GPU Card -->
|
| 87 |
+
<div class="hardware-card gpu-card" id="gpu-card">
|
| 88 |
+
<div class="hardware-icon">🎮</div>
|
| 89 |
+
<div class="hardware-content">
|
| 90 |
+
<h3>GPU</h3>
|
| 91 |
+
<div class="hardware-value" id="gpu-name">Detecting...</div>
|
| 92 |
+
<div class="progress-bar">
|
| 93 |
+
<div class="progress-fill gpu-fill" id="gpu-usage" style="width: 0%"></div>
|
| 94 |
+
</div>
|
| 95 |
+
<div class="hardware-details" id="gpu-details"></div>
|
| 96 |
+
</div>
|
| 97 |
+
</div>
|
| 98 |
+
</div>
|
| 99 |
+
</section>
|
| 100 |
+
|
| 101 |
+
<!-- Benchmarks Section -->
|
| 102 |
+
<section class="section" id="benchmark-section">
|
| 103 |
+
<h2 class="section-title">
|
| 104 |
+
<span class="icon">⚡</span>
|
| 105 |
+
Performance Benchmarks
|
| 106 |
+
</h2>
|
| 107 |
+
|
| 108 |
+
<div class="benchmark-controls">
|
| 109 |
+
<button class="btn btn-primary" id="run-all-btn" onclick="runAllBenchmarks()">
|
| 110 |
+
<span class="btn-icon">🚀</span>
|
| 111 |
+
Run Full Benchmark
|
| 112 |
+
</button>
|
| 113 |
+
<button class="btn btn-secondary" id="export-btn" onclick="exportResults()">
|
| 114 |
+
<span class="btn-icon">📋</span>
|
| 115 |
+
Export JSON
|
| 116 |
+
</button>
|
| 117 |
+
</div>
|
| 118 |
+
|
| 119 |
+
<div class="benchmark-grid" id="benchmark-results">
|
| 120 |
+
<!-- CPU Benchmark -->
|
| 121 |
+
<div class="benchmark-card" id="bench-cpu">
|
| 122 |
+
<div class="benchmark-header">
|
| 123 |
+
<span class="benchmark-icon">🖥️</span>
|
| 124 |
+
<h3>CPU Performance</h3>
|
| 125 |
+
<button class="btn-mini" onclick="runBenchmark('cpu')" id="run-cpu-btn">Run</button>
|
| 126 |
+
</div>
|
| 127 |
+
<div class="benchmark-content" id="cpu-bench-content">
|
| 128 |
+
<div class="benchmark-placeholder">Click "Run" to start CPU benchmark</div>
|
| 129 |
+
</div>
|
| 130 |
+
</div>
|
| 131 |
+
|
| 132 |
+
<!-- Memory Benchmark -->
|
| 133 |
+
<div class="benchmark-card" id="bench-memory">
|
| 134 |
+
<div class="benchmark-header">
|
| 135 |
+
<span class="benchmark-icon">🧠</span>
|
| 136 |
+
<h3>Memory Performance</h3>
|
| 137 |
+
<button class="btn-mini" onclick="runBenchmark('memory')" id="run-memory-btn">Run</button>
|
| 138 |
+
</div>
|
| 139 |
+
<div class="benchmark-content" id="memory-bench-content">
|
| 140 |
+
<div class="benchmark-placeholder">Click "Run" to start Memory benchmark</div>
|
| 141 |
+
</div>
|
| 142 |
+
</div>
|
| 143 |
+
|
| 144 |
+
<!-- Disk Benchmark -->
|
| 145 |
+
<div class="benchmark-card" id="bench-disk">
|
| 146 |
+
<div class="benchmark-header">
|
| 147 |
+
<span class="benchmark-icon">💾</span>
|
| 148 |
+
<h3>Disk I/O Performance</h3>
|
| 149 |
+
<button class="btn-mini" onclick="runBenchmark('disk')" id="run-disk-btn">Run</button>
|
| 150 |
+
</div>
|
| 151 |
+
<div class="benchmark-content" id="disk-bench-content">
|
| 152 |
+
<div class="benchmark-placeholder">Click "Run" to start Disk benchmark</div>
|
| 153 |
+
</div>
|
| 154 |
+
</div>
|
| 155 |
+
|
| 156 |
+
<!-- GPU Benchmark -->
|
| 157 |
+
<div class="benchmark-card gpu-bench" id="bench-gpu">
|
| 158 |
+
<div class="benchmark-header">
|
| 159 |
+
<span class="benchmark-icon">🎮</span>
|
| 160 |
+
<h3>GPU Performance</h3>
|
| 161 |
+
<button class="btn-mini" onclick="runBenchmark('gpu')" id="run-gpu-btn">Run</button>
|
| 162 |
+
</div>
|
| 163 |
+
<div class="benchmark-content" id="gpu-bench-content">
|
| 164 |
+
<div class="benchmark-placeholder">Click "Run" to start GPU benchmark</div>
|
| 165 |
+
</div>
|
| 166 |
+
</div>
|
| 167 |
+
</div>
|
| 168 |
+
|
| 169 |
+
<!-- Overall Score -->
|
| 170 |
+
<div class="score-card" id="overall-score" style="display: none;">
|
| 171 |
+
<div class="score-title">Overall Performance Score</div>
|
| 172 |
+
<div class="score-value" id="total-score">0</div>
|
| 173 |
+
<div class="score-breakdown" id="score-breakdown"></div>
|
| 174 |
+
</div>
|
| 175 |
+
</section>
|
| 176 |
+
|
| 177 |
+
<footer class="footer">
|
| 178 |
+
<p>Space Fetch v1.0.0 | Built for <a href="https://huggingface.co/spaces" target="_blank">Hugging Face Spaces</a></p>
|
| 179 |
+
</footer>
|
| 180 |
+
</div>
|
| 181 |
+
|
| 182 |
+
<script src="/static/script.js"></script>
|
| 183 |
+
</body>
|
| 184 |
+
</html>
|
static/script.js
ADDED
|
@@ -0,0 +1,305 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* Space Fetch - System Performance Dashboard
|
| 3 |
+
* Frontend Logic
|
| 4 |
+
*/
|
| 5 |
+
|
| 6 |
+
// Application State
|
| 7 |
+
const state = {
|
| 8 |
+
systemInfo: null,
|
| 9 |
+
benchmarks: {
|
| 10 |
+
cpu: null,
|
| 11 |
+
memory: null,
|
| 12 |
+
disk: null,
|
| 13 |
+
gpu: null
|
| 14 |
+
},
|
| 15 |
+
status: {
|
| 16 |
+
running: {},
|
| 17 |
+
gpu_available: false
|
| 18 |
+
}
|
| 19 |
+
};
|
| 20 |
+
|
| 21 |
+
// Initialization
|
| 22 |
+
document.addEventListener('DOMContentLoaded', async () => {
|
| 23 |
+
await fetchSystemInfo();
|
| 24 |
+
await checkBenchmarkStatus();
|
| 25 |
+
|
| 26 |
+
// Start polling for active benchmarks
|
| 27 |
+
setInterval(pollBenchmarkStatus, 2000);
|
| 28 |
+
|
| 29 |
+
// Start polling for memory status
|
| 30 |
+
setInterval(updateMemoryStatus, 1000);
|
| 31 |
+
});
|
| 32 |
+
|
| 33 |
+
async function fetchSystemInfo() {
|
| 34 |
+
try {
|
| 35 |
+
const response = await fetch('/api/system');
|
| 36 |
+
const data = await response.json();
|
| 37 |
+
state.systemInfo = data;
|
| 38 |
+
renderSystemInfo(data);
|
| 39 |
+
renderHardwareInfo(data);
|
| 40 |
+
} catch (error) {
|
| 41 |
+
console.error('Failed to fetch system info:', error);
|
| 42 |
+
}
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
async function checkBenchmarkStatus() {
|
| 46 |
+
try {
|
| 47 |
+
const response = await fetch('/api/benchmark/status');
|
| 48 |
+
const data = await response.json();
|
| 49 |
+
state.status = data;
|
| 50 |
+
|
| 51 |
+
// Update GPU card visibility
|
| 52 |
+
const gpuCard = document.getElementById('gpu-card');
|
| 53 |
+
const gpuBench = document.getElementById('bench-gpu');
|
| 54 |
+
|
| 55 |
+
if (!data.gpu_available) {
|
| 56 |
+
if (gpuCard) gpuCard.style.display = 'none';
|
| 57 |
+
if (gpuBench) gpuBench.style.display = 'none';
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
// Restore any running or cached benchmarks
|
| 61 |
+
for (const [type, isCached] of Object.entries(data.cached)) {
|
| 62 |
+
if (isCached && !document.getElementById(`${type}-bench-content`).querySelector('.bench-result-item')) {
|
| 63 |
+
await fetchBenchmarkResult(type);
|
| 64 |
+
}
|
| 65 |
+
}
|
| 66 |
+
} catch (error) {
|
| 67 |
+
console.error('Failed to check status:', error);
|
| 68 |
+
}
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
async function pollBenchmarkStatus() {
|
| 72 |
+
const response = await fetch('/api/benchmark/status');
|
| 73 |
+
const data = await response.json();
|
| 74 |
+
|
| 75 |
+
// Check for completed benchmarks
|
| 76 |
+
for (const [type, isRunning] of Object.entries(data.running)) {
|
| 77 |
+
if (state.status.running[type] && !isRunning) {
|
| 78 |
+
// Benchmark finished, fetch results
|
| 79 |
+
await fetchBenchmarkResult(type);
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
// Update button states
|
| 83 |
+
const btn = document.getElementById(`run-${type}-btn`);
|
| 84 |
+
if (btn) {
|
| 85 |
+
if (isRunning) {
|
| 86 |
+
btn.disabled = true;
|
| 87 |
+
btn.innerHTML = '<span class="loading-spinner"></span> Running...';
|
| 88 |
+
} else {
|
| 89 |
+
btn.disabled = false;
|
| 90 |
+
btn.innerText = 'Run';
|
| 91 |
+
}
|
| 92 |
+
}
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
state.status = data;
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
async function updateMemoryStatus() {
|
| 99 |
+
try {
|
| 100 |
+
const response = await fetch('/api/monitor/memory');
|
| 101 |
+
if (response.ok) {
|
| 102 |
+
const data = await response.json();
|
| 103 |
+
document.getElementById('memory-total').innerText = data.total_formatted;
|
| 104 |
+
document.getElementById('memory-usage').style.width = `${data.percent}%`;
|
| 105 |
+
document.getElementById('memory-details').innerText = `${data.used_formatted} used / ${data.available_formatted} available`;
|
| 106 |
+
}
|
| 107 |
+
} catch (error) {
|
| 108 |
+
// Silent fail for polling to avoid console spam
|
| 109 |
+
}
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
// Rendering Functions
|
| 113 |
+
function renderSystemInfo(data) {
|
| 114 |
+
const container = document.getElementById('system-info');
|
| 115 |
+
container.innerHTML = `
|
| 116 |
+
<div class="card">
|
| 117 |
+
<h3>Operating System</h3>
|
| 118 |
+
<div class="info-row">
|
| 119 |
+
<span class="info-label">Distro</span>
|
| 120 |
+
<span class="info-value">${data.os.distro}</span>
|
| 121 |
+
</div>
|
| 122 |
+
<div class="info-row">
|
| 123 |
+
<span class="info-label">Kernel</span>
|
| 124 |
+
<span class="info-value">${data.os.kernel}</span>
|
| 125 |
+
</div>
|
| 126 |
+
<div class="info-row">
|
| 127 |
+
<span class="info-label">Hostname</span>
|
| 128 |
+
<span class="info-value">${data.os.hostname}</span>
|
| 129 |
+
</div>
|
| 130 |
+
<div class="info-row">
|
| 131 |
+
<span class="info-label">Load Avg</span>
|
| 132 |
+
<span class="info-value">${data.os.load_average.join(', ')}</span>
|
| 133 |
+
</div>
|
| 134 |
+
<div class="info-row">
|
| 135 |
+
<span class="info-label">Uptime</span>
|
| 136 |
+
<span class="info-value">${Math.floor(data.os.uptime_seconds / 3600)}h ${Math.floor((data.os.uptime_seconds % 3600) / 60)}m</span>
|
| 137 |
+
</div>
|
| 138 |
+
</div>
|
| 139 |
+
<div class="card">
|
| 140 |
+
<h3>Python Environment</h3>
|
| 141 |
+
<div class="info-row">
|
| 142 |
+
<span class="info-label">Version</span>
|
| 143 |
+
<span class="info-value">Python ${data.os.system === 'Linux' ? '3.x' : 'Unknown'}</span>
|
| 144 |
+
</div>
|
| 145 |
+
<div class="info-row">
|
| 146 |
+
<span class="info-label">Architecture</span>
|
| 147 |
+
<span class="info-value">${data.os.architecture}</span>
|
| 148 |
+
</div>
|
| 149 |
+
</div>
|
| 150 |
+
`;
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
function renderHardwareInfo(data) {
|
| 154 |
+
// CPU
|
| 155 |
+
document.getElementById('cpu-name').innerText = data.cpu.brand;
|
| 156 |
+
const cpuDetails = `${data.cpu.cores_physical} Physical Cores / ${data.cpu.cores_logical} Logical Cores @ ${data.cpu.frequency.current_mhz || 'N/A'} MHz`;
|
| 157 |
+
document.getElementById('cpu-details').innerText = cpuDetails;
|
| 158 |
+
|
| 159 |
+
// Memory
|
| 160 |
+
document.getElementById('memory-total').innerText = data.memory.total_formatted;
|
| 161 |
+
document.getElementById('memory-usage').style.width = `${data.memory.percent}%`;
|
| 162 |
+
document.getElementById('memory-details').innerText = `${data.memory.used_formatted} used / ${data.memory.available_formatted} available`;
|
| 163 |
+
|
| 164 |
+
// Disk (First Partition)
|
| 165 |
+
if (data.disk.length > 0) {
|
| 166 |
+
const disk = data.disk[0];
|
| 167 |
+
document.getElementById('disk-total').innerText = disk.total_formatted;
|
| 168 |
+
document.getElementById('disk-usage').style.width = `${disk.percent}%`;
|
| 169 |
+
document.getElementById('disk-details').innerText = `${disk.used_formatted} used / ${disk.free_formatted} free (${disk.mountpoint})`;
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
// GPU
|
| 173 |
+
if (data.gpu && data.gpu.length > 0) {
|
| 174 |
+
const gpu = data.gpu[0];
|
| 175 |
+
document.getElementById('gpu-name').innerText = gpu.name;
|
| 176 |
+
const memoryUsedPercent = (gpu.memory_used_mb / gpu.memory_total_mb) * 100;
|
| 177 |
+
document.getElementById('gpu-usage').style.width = `${memoryUsedPercent}%`;
|
| 178 |
+
document.getElementById('gpu-details').innerText = `${gpu.memory_used_mb}MB / ${gpu.memory_total_mb}MB VRAM | Driver: ${gpu.driver_version}`;
|
| 179 |
+
} else {
|
| 180 |
+
const gpuCard = document.getElementById('gpu-card');
|
| 181 |
+
if (gpuCard) gpuCard.style.display = 'none';
|
| 182 |
+
}
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
// Benchmark Actions
|
| 186 |
+
async function runBenchmark(type) {
|
| 187 |
+
const container = document.getElementById(`${type}-bench-content`);
|
| 188 |
+
container.innerHTML = `
|
| 189 |
+
<div class="benchmark-placeholder">
|
| 190 |
+
<span class="loading-spinner"></span><br>
|
| 191 |
+
Running benchmark... This may take a moment.
|
| 192 |
+
</div>
|
| 193 |
+
`;
|
| 194 |
+
|
| 195 |
+
try {
|
| 196 |
+
await fetch(`/api/benchmark/${type}`, { method: 'POST' });
|
| 197 |
+
// Polling will handle the update
|
| 198 |
+
} catch (error) {
|
| 199 |
+
container.innerHTML = `<div class="benchmark-placeholder" style="color: var(--danger-color)">Error starting benchmark</div>`;
|
| 200 |
+
}
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
async function runAllBenchmarks() {
|
| 204 |
+
const types = ['cpu', 'memory', 'disk'];
|
| 205 |
+
if (state.status.gpu_available) types.push('gpu');
|
| 206 |
+
|
| 207 |
+
for (const type of types) {
|
| 208 |
+
runBenchmark(type);
|
| 209 |
+
}
|
| 210 |
+
}
|
| 211 |
+
|
| 212 |
+
async function fetchBenchmarkResult(type) {
|
| 213 |
+
try {
|
| 214 |
+
const response = await fetch(`/api/benchmark/${type}`);
|
| 215 |
+
const data = await response.json();
|
| 216 |
+
|
| 217 |
+
if (data.results) {
|
| 218 |
+
renderBenchmarkResult(type, data.results);
|
| 219 |
+
}
|
| 220 |
+
} catch (error) {
|
| 221 |
+
console.error(`Error details for ${type}:`, error);
|
| 222 |
+
}
|
| 223 |
+
}
|
| 224 |
+
|
| 225 |
+
function renderBenchmarkResult(type, results) {
|
| 226 |
+
const container = document.getElementById(`${type}-bench-content`);
|
| 227 |
+
let html = '';
|
| 228 |
+
|
| 229 |
+
if (type === 'cpu') {
|
| 230 |
+
html += createResultItem('Single Core Integer', `${results.single_core_integer.ops_per_second} ops/s`, results.single_core_integer.score);
|
| 231 |
+
html += createResultItem('Multi Core Integer', `${results.multi_core_integer.ops_per_second} ops/s`, results.multi_core_integer.score);
|
| 232 |
+
html += createResultItem('Single Core Float', `${results.single_core_float.gflops} GFLOPS`, results.single_core_float.score);
|
| 233 |
+
html += createResultItem('Multi Core Float', `${results.multi_core_float.gflops} GFLOPS`, results.multi_core_float.score);
|
| 234 |
+
html += createResultItem('SHA256 Hash', `${results.crypto.throughput_mb_per_sec} MB/s`, results.crypto.score);
|
| 235 |
+
html += createResultItem('Zlib Compression', `${results.compression.throughput_mb_per_sec} MB/s`, results.compression.score);
|
| 236 |
+
html += createResultItem('Context Switches', `${results.stress.wakeups_per_second} /s`, results.stress.score);
|
| 237 |
+
} else if (type === 'memory') {
|
| 238 |
+
html += createResultItem('Read Bandwidth', `${results.bandwidth.read_bandwidth_gb_s} GB/s`);
|
| 239 |
+
html += createResultItem('Write Bandwidth', `${results.bandwidth.write_bandwidth_gb_s} GB/s`);
|
| 240 |
+
|
| 241 |
+
// New Metric structure
|
| 242 |
+
if (results.latency_random) {
|
| 243 |
+
html += createResultItem('Random Latency', `${results.latency_random.average_latency_ns} ns`);
|
| 244 |
+
}
|
| 245 |
+
if (results.latency_sequential) {
|
| 246 |
+
html += createResultItem('Seq Latency', `${results.latency_sequential.average_latency_ns} ns`);
|
| 247 |
+
}
|
| 248 |
+
if (results.alloc_rate) {
|
| 249 |
+
html += createResultItem('Alloc Rate', `${(results.alloc_rate.ops_per_sec / 1e6).toFixed(1)} M/s`);
|
| 250 |
+
}
|
| 251 |
+
if (results.cache_latency && results.cache_latency.levels) {
|
| 252 |
+
const l1 = results.cache_latency.levels.L1;
|
| 253 |
+
const l2 = results.cache_latency.levels.L2;
|
| 254 |
+
const l3 = results.cache_latency.levels.L3;
|
| 255 |
+
html += createResultItem('L1 Latency', `${l1.latency_ns} ns`);
|
| 256 |
+
html += createResultItem('L2 Latency', `${l2.latency_ns} ns`);
|
| 257 |
+
html += createResultItem('L3 Latency', `${l3.latency_ns} ns`);
|
| 258 |
+
}
|
| 259 |
+
} else if (type === 'disk') {
|
| 260 |
+
html += createResultItem('Seq Read', `${results.sequential_read.throughput_mb_s} MB/s`, results.sequential_read.score);
|
| 261 |
+
html += createResultItem('Seq Write', `${results.sequential_write.throughput_mb_s} MB/s`, results.sequential_write.score);
|
| 262 |
+
html += createResultItem('Random Read', `${results.random_read_iops.iops} IOPS`, results.random_read_iops.score);
|
| 263 |
+
html += createResultItem('Random Write', `${results.random_write_iops.iops} IOPS`, results.random_write_iops.score);
|
| 264 |
+
} else if (type === 'gpu') {
|
| 265 |
+
if (results.memory_bandwidth) html += createResultItem('Memory Bandwidth', `${results.memory_bandwidth.bandwidth_gb_s} GB/s`, results.memory_bandwidth.score);
|
| 266 |
+
if (results.fp32) html += createResultItem('FP32 Compute', `${results.fp32.tflops} TFLOPS`, results.fp32.score);
|
| 267 |
+
if (results.fp16) html += createResultItem('FP16 Compute', `${results.fp16.tflops} TFLOPS`, results.fp16.score);
|
| 268 |
+
if (results.tensor_cores) html += createResultItem('Tensor Cores', `${results.tensor_cores.tflops} TFLOPS`, results.tensor_cores.score);
|
| 269 |
+
}
|
| 270 |
+
|
| 271 |
+
container.innerHTML = html;
|
| 272 |
+
}
|
| 273 |
+
|
| 274 |
+
function createResultItem(label, value, score) {
|
| 275 |
+
return `
|
| 276 |
+
<div class="bench-result-item">
|
| 277 |
+
<div class="bench-label">
|
| 278 |
+
<span>${label}</span>
|
| 279 |
+
</div>
|
| 280 |
+
<div class="progress-bar" style="height: 4px; margin: 2px 0;">
|
| 281 |
+
<div class="progress-fill" style="width: 100%; opacity: 0.5;"></div>
|
| 282 |
+
</div>
|
| 283 |
+
<div class="bench-value">${value}</div>
|
| 284 |
+
</div>
|
| 285 |
+
`;
|
| 286 |
+
}
|
| 287 |
+
|
| 288 |
+
async function exportResults() {
|
| 289 |
+
try {
|
| 290 |
+
const response = await fetch('/api/export');
|
| 291 |
+
const data = await response.json();
|
| 292 |
+
|
| 293 |
+
const blob = new Blob([JSON.stringify(data, null, 2)], { type: 'application/json' });
|
| 294 |
+
const url = URL.createObjectURL(blob);
|
| 295 |
+
const a = document.createElement('a');
|
| 296 |
+
a.href = url;
|
| 297 |
+
a.download = `space-fetch-results-${new Date().toISOString().slice(0, 10)}.json`;
|
| 298 |
+
document.body.appendChild(a);
|
| 299 |
+
a.click();
|
| 300 |
+
document.body.removeChild(a);
|
| 301 |
+
URL.revokeObjectURL(url);
|
| 302 |
+
} catch (error) {
|
| 303 |
+
alert('Failed to export results');
|
| 304 |
+
}
|
| 305 |
+
}
|
static/style.css
ADDED
|
@@ -0,0 +1,400 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* Global Styles */
|
| 2 |
+
:root {
|
| 3 |
+
--bg-color: #0d1117;
|
| 4 |
+
--card-bg: rgba(22, 27, 34, 0.7);
|
| 5 |
+
--text-primary: #e6edf3;
|
| 6 |
+
--text-secondary: #8b949e;
|
| 7 |
+
--accent-color: #58a6ff;
|
| 8 |
+
--success-color: #3fb950;
|
| 9 |
+
--warning-color: #d29922;
|
| 10 |
+
--danger-color: #f85149;
|
| 11 |
+
--border-color: #30363d;
|
| 12 |
+
--gpu-color: #76e191;
|
| 13 |
+
|
| 14 |
+
--font-heading: 'Inter', -apple-system, BlinkMacSystemFont, sans-serif;
|
| 15 |
+
--font-mono: 'JetBrains Mono', monospace;
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
body {
|
| 19 |
+
margin: 0;
|
| 20 |
+
padding: 0;
|
| 21 |
+
background-color: var(--bg-color);
|
| 22 |
+
color: var(--text-primary);
|
| 23 |
+
font-family: var(--font-heading);
|
| 24 |
+
line-height: 1.6;
|
| 25 |
+
min-height: 100vh;
|
| 26 |
+
overflow-x: hidden;
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
/* Background Effects */
|
| 30 |
+
.background-effects {
|
| 31 |
+
position: fixed;
|
| 32 |
+
top: 0;
|
| 33 |
+
left: 0;
|
| 34 |
+
width: 100%;
|
| 35 |
+
height: 100%;
|
| 36 |
+
z-index: -1;
|
| 37 |
+
overflow: hidden;
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
.gradient-orb {
|
| 41 |
+
position: absolute;
|
| 42 |
+
border-radius: 50%;
|
| 43 |
+
filter: blur(100px);
|
| 44 |
+
opacity: 0.15;
|
| 45 |
+
animation: float 20s infinite ease-in-out;
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
.orb-1 {
|
| 49 |
+
top: -10%;
|
| 50 |
+
left: -10%;
|
| 51 |
+
width: 600px;
|
| 52 |
+
height: 600px;
|
| 53 |
+
background: radial-gradient(circle, #58a6ff, #1f6feb);
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
.orb-2 {
|
| 57 |
+
bottom: -10%;
|
| 58 |
+
right: -10%;
|
| 59 |
+
width: 700px;
|
| 60 |
+
height: 700px;
|
| 61 |
+
background: radial-gradient(circle, #7e3ddb, #a371f7);
|
| 62 |
+
animation-delay: -5s;
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
.orb-3 {
|
| 66 |
+
top: 40%;
|
| 67 |
+
left: 40%;
|
| 68 |
+
width: 500px;
|
| 69 |
+
height: 500px;
|
| 70 |
+
background: radial-gradient(circle, #3fb950, #2ea043);
|
| 71 |
+
animation-delay: -10s;
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
@keyframes float {
|
| 75 |
+
0%, 100% { transform: translate(0, 0); }
|
| 76 |
+
33% { transform: translate(50px, -50px); }
|
| 77 |
+
66% { transform: translate(-30px, 30px); }
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
/* Container */
|
| 81 |
+
.container {
|
| 82 |
+
max-width: 1200px;
|
| 83 |
+
margin: 0 auto;
|
| 84 |
+
padding: 2rem;
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
/* Header */
|
| 88 |
+
.header {
|
| 89 |
+
text-align: center;
|
| 90 |
+
margin-bottom: 4rem;
|
| 91 |
+
animation: fadeIn 1s ease-out;
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
.logo h1 {
|
| 95 |
+
font-size: 3rem;
|
| 96 |
+
margin: 0;
|
| 97 |
+
background: linear-gradient(135deg, #fff 0%, #a5d6ff 100%);
|
| 98 |
+
-webkit-background-clip: text;
|
| 99 |
+
-webkit-text-fill-color: transparent;
|
| 100 |
+
display: inline-block;
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
.logo-icon {
|
| 104 |
+
font-size: 3rem;
|
| 105 |
+
margin-right: 1rem;
|
| 106 |
+
vertical-align: middle;
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
.tagline {
|
| 110 |
+
color: var(--text-secondary);
|
| 111 |
+
font-size: 1.2rem;
|
| 112 |
+
margin-top: 0.5rem;
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
/* Sections */
|
| 116 |
+
.section {
|
| 117 |
+
margin-bottom: 4rem;
|
| 118 |
+
animation: slideUp 0.8s ease-out;
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
.section-title {
|
| 122 |
+
font-size: 1.8rem;
|
| 123 |
+
margin-bottom: 1.5rem;
|
| 124 |
+
display: flex;
|
| 125 |
+
align-items: center;
|
| 126 |
+
border-bottom: 1px solid var(--border-color);
|
| 127 |
+
padding-bottom: 1rem;
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
.section-title .icon {
|
| 131 |
+
margin-right: 0.8rem;
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
/* Cards */
|
| 135 |
+
.card, .hardware-card, .benchmark-card, .score-card {
|
| 136 |
+
background: var(--card-bg);
|
| 137 |
+
backdrop-filter: blur(10px);
|
| 138 |
+
border: 1px solid var(--border-color);
|
| 139 |
+
border-radius: 12px;
|
| 140 |
+
padding: 1.5rem;
|
| 141 |
+
transition: transform 0.2s, box-shadow 0.2s;
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
.card:hover, .hardware-card:hover, .benchmark-card:hover {
|
| 145 |
+
transform: translateY(-2px);
|
| 146 |
+
box-shadow: 0 8px 24px rgba(0, 0, 0, 0.2);
|
| 147 |
+
border-color: var(--accent-color);
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
/* Hardware Grid */
|
| 151 |
+
.hardware-grid {
|
| 152 |
+
display: grid;
|
| 153 |
+
grid-template-columns: repeat(auto-fit, minmax(280px, 1fr));
|
| 154 |
+
gap: 1.5rem;
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
.hardware-card {
|
| 158 |
+
display: flex;
|
| 159 |
+
flex-direction: column;
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
.hardware-icon {
|
| 163 |
+
font-size: 2rem;
|
| 164 |
+
margin-bottom: 1rem;
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
.hardware-content h3 {
|
| 168 |
+
margin: 0 0 0.5rem 0;
|
| 169 |
+
color: var(--text-secondary);
|
| 170 |
+
font-size: 0.9rem;
|
| 171 |
+
text-transform: uppercase;
|
| 172 |
+
letter-spacing: 1px;
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
.hardware-value {
|
| 176 |
+
font-size: 1.2rem;
|
| 177 |
+
font-weight: 600;
|
| 178 |
+
margin-bottom: 0.5rem;
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
.hardware-details {
|
| 182 |
+
margin-top: auto;
|
| 183 |
+
font-size: 0.85rem;
|
| 184 |
+
color: var(--text-secondary);
|
| 185 |
+
line-height: 1.4;
|
| 186 |
+
}
|
| 187 |
+
|
| 188 |
+
/* Progress Bars */
|
| 189 |
+
.progress-bar {
|
| 190 |
+
width: 100%;
|
| 191 |
+
height: 6px;
|
| 192 |
+
background: rgba(255, 255, 255, 0.1);
|
| 193 |
+
border-radius: 3px;
|
| 194 |
+
margin: 0.5rem 0;
|
| 195 |
+
overflow: hidden;
|
| 196 |
+
}
|
| 197 |
+
|
| 198 |
+
.progress-fill {
|
| 199 |
+
height: 100%;
|
| 200 |
+
background: var(--accent-color);
|
| 201 |
+
border-radius: 3px;
|
| 202 |
+
transition: width 1s ease-in-out;
|
| 203 |
+
}
|
| 204 |
+
|
| 205 |
+
.gpu-fill {
|
| 206 |
+
background: var(--gpu-color);
|
| 207 |
+
}
|
| 208 |
+
|
| 209 |
+
/* System Info List */
|
| 210 |
+
.info-row {
|
| 211 |
+
display: flex;
|
| 212 |
+
justify-content: space-between;
|
| 213 |
+
padding: 0.5rem 0;
|
| 214 |
+
border-bottom: 1px solid rgba(255, 255, 255, 0.05);
|
| 215 |
+
}
|
| 216 |
+
|
| 217 |
+
.info-label {
|
| 218 |
+
color: var(--text-secondary);
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
.info-value {
|
| 222 |
+
font-family: var(--font-mono);
|
| 223 |
+
color: var(--accent-color);
|
| 224 |
+
}
|
| 225 |
+
|
| 226 |
+
/* Benchmarks */
|
| 227 |
+
.benchmark-controls {
|
| 228 |
+
display: flex;
|
| 229 |
+
gap: 1rem;
|
| 230 |
+
margin-bottom: 2rem;
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
.btn {
|
| 234 |
+
padding: 0.8rem 1.5rem;
|
| 235 |
+
border-radius: 6px;
|
| 236 |
+
border: none;
|
| 237 |
+
font-weight: 600;
|
| 238 |
+
cursor: pointer;
|
| 239 |
+
display: flex;
|
| 240 |
+
align-items: center;
|
| 241 |
+
transition: all 0.2s;
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
.btn-primary {
|
| 245 |
+
background: var(--accent-color);
|
| 246 |
+
color: #fff;
|
| 247 |
+
}
|
| 248 |
+
|
| 249 |
+
.btn-primary:hover {
|
| 250 |
+
background: #3a8cf0;
|
| 251 |
+
transform: scale(1.02);
|
| 252 |
+
}
|
| 253 |
+
|
| 254 |
+
.btn-secondary {
|
| 255 |
+
background: rgba(255, 255, 255, 0.1);
|
| 256 |
+
color: var(--text-primary);
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
.btn-secondary:hover {
|
| 260 |
+
background: rgba(255, 255, 255, 0.15);
|
| 261 |
+
}
|
| 262 |
+
|
| 263 |
+
.btn-icon {
|
| 264 |
+
margin-right: 0.5rem;
|
| 265 |
+
}
|
| 266 |
+
|
| 267 |
+
.btn-mini {
|
| 268 |
+
background: rgba(255, 255, 255, 0.1);
|
| 269 |
+
border: 1px solid var(--border-color);
|
| 270 |
+
color: var(--text-primary);
|
| 271 |
+
padding: 0.3rem 0.8rem;
|
| 272 |
+
border-radius: 4px;
|
| 273 |
+
font-size: 0.8rem;
|
| 274 |
+
cursor: pointer;
|
| 275 |
+
transition: all 0.2s;
|
| 276 |
+
}
|
| 277 |
+
|
| 278 |
+
.btn-mini:hover {
|
| 279 |
+
background: var(--accent-color);
|
| 280 |
+
border-color: var(--accent-color);
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
.btn-mini:disabled {
|
| 284 |
+
opacity: 0.5;
|
| 285 |
+
cursor: not-allowed;
|
| 286 |
+
}
|
| 287 |
+
|
| 288 |
+
.benchmark-grid {
|
| 289 |
+
display: grid;
|
| 290 |
+
grid-template-columns: repeat(auto-fit, minmax(350px, 1fr));
|
| 291 |
+
gap: 1.5rem;
|
| 292 |
+
}
|
| 293 |
+
|
| 294 |
+
.benchmark-header {
|
| 295 |
+
display: flex;
|
| 296 |
+
justify-content: space-between;
|
| 297 |
+
align-items: center;
|
| 298 |
+
margin-bottom: 1rem;
|
| 299 |
+
padding-bottom: 0.5rem;
|
| 300 |
+
border-bottom: 1px solid rgba(255, 255, 255, 0.1);
|
| 301 |
+
}
|
| 302 |
+
|
| 303 |
+
.benchmark-header h3 {
|
| 304 |
+
margin: 0;
|
| 305 |
+
display: flex;
|
| 306 |
+
align-items: center;
|
| 307 |
+
font-size: 1.1rem;
|
| 308 |
+
}
|
| 309 |
+
|
| 310 |
+
.benchmark-icon {
|
| 311 |
+
margin-right: 0.5rem;
|
| 312 |
+
}
|
| 313 |
+
|
| 314 |
+
.benchmark-placeholder {
|
| 315 |
+
color: var(--text-secondary);
|
| 316 |
+
text-align: center;
|
| 317 |
+
padding: 2rem 0;
|
| 318 |
+
font-style: italic;
|
| 319 |
+
}
|
| 320 |
+
|
| 321 |
+
/* Benchmark Result Items */
|
| 322 |
+
.bench-result-item {
|
| 323 |
+
margin-bottom: 1rem;
|
| 324 |
+
}
|
| 325 |
+
|
| 326 |
+
.bench-label {
|
| 327 |
+
display: flex;
|
| 328 |
+
justify-content: space-between;
|
| 329 |
+
margin-bottom: 0.3rem;
|
| 330 |
+
font-size: 0.9rem;
|
| 331 |
+
}
|
| 332 |
+
|
| 333 |
+
.bench-score {
|
| 334 |
+
font-family: var(--font-mono);
|
| 335 |
+
color: var(--warning-color);
|
| 336 |
+
}
|
| 337 |
+
|
| 338 |
+
.bench-value {
|
| 339 |
+
color: var(--success-color);
|
| 340 |
+
font-weight: 500;
|
| 341 |
+
}
|
| 342 |
+
|
| 343 |
+
/* Animations */
|
| 344 |
+
@keyframes fadeIn {
|
| 345 |
+
from { opacity: 0; }
|
| 346 |
+
to { opacity: 1; }
|
| 347 |
+
}
|
| 348 |
+
|
| 349 |
+
@keyframes slideUp {
|
| 350 |
+
from { transform: translateY(20px); opacity: 0; }
|
| 351 |
+
to { transform: translateY(0); opacity: 1; }
|
| 352 |
+
}
|
| 353 |
+
|
| 354 |
+
.loading-spinner {
|
| 355 |
+
display: inline-block;
|
| 356 |
+
width: 20px;
|
| 357 |
+
height: 20px;
|
| 358 |
+
border: 3px solid rgba(255, 255, 255, 0.3);
|
| 359 |
+
border-radius: 50%;
|
| 360 |
+
border-top-color: var(--accent-color);
|
| 361 |
+
animation: spin 1s ease-in-out infinite;
|
| 362 |
+
margin-right: 0.5rem;
|
| 363 |
+
}
|
| 364 |
+
|
| 365 |
+
@keyframes spin {
|
| 366 |
+
to { transform: rotate(360deg); }
|
| 367 |
+
}
|
| 368 |
+
|
| 369 |
+
/* Skeleton Loading */
|
| 370 |
+
.skeleton {
|
| 371 |
+
animation: pulse 1.5s infinite;
|
| 372 |
+
}
|
| 373 |
+
|
| 374 |
+
.skeleton-line {
|
| 375 |
+
height: 20px;
|
| 376 |
+
background: rgba(255, 255, 255, 0.05);
|
| 377 |
+
border-radius: 4px;
|
| 378 |
+
margin-bottom: 0.5rem;
|
| 379 |
+
}
|
| 380 |
+
|
| 381 |
+
.skeleton-line.short {
|
| 382 |
+
width: 60%;
|
| 383 |
+
}
|
| 384 |
+
|
| 385 |
+
@keyframes pulse {
|
| 386 |
+
0% { opacity: 0.6; }
|
| 387 |
+
50% { opacity: 1; }
|
| 388 |
+
100% { opacity: 0.6; }
|
| 389 |
+
}
|
| 390 |
+
|
| 391 |
+
/* Responsive */
|
| 392 |
+
@media (max-width: 768px) {
|
| 393 |
+
.header h1 {
|
| 394 |
+
font-size: 2rem;
|
| 395 |
+
}
|
| 396 |
+
|
| 397 |
+
.hardware-grid, .benchmark-grid {
|
| 398 |
+
grid-template-columns: 1fr;
|
| 399 |
+
}
|
| 400 |
+
}
|