Datasets:
File size: 5,641 Bytes
9f5cf6f | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 | import { mkdir } from 'node:fs/promises'
import { basename, isAbsolute, join, resolve } from 'node:path'
import { createConfig } from './config/defaults.js'
import { runScenarios } from './harness/runner.js'
import {
collectBootstrapObservability,
collectProcessObservability,
} from './observability/adapters.js'
import {
buildBenchmarkReport,
buildComparisonText,
} from './reporting/report.js'
import { createCorrectnessAndToolScenarios } from './scenarios/correctnessTools.js'
import { createHeadlessScenarios } from './scenarios/headless.js'
import { createRestorationScenario } from './scenarios/restoration.js'
import { createStartupAndCommandScenarios } from './scenarios/startupCommand.js'
import type { BenchmarkMode, BenchmarkReport, BenchmarkRunKind } from './types.js'
import { readJsonFile, writeJsonFile, writeTextFile } from './utils/files.js'
function parseArg(name: string): string | undefined {
const hit = process.argv.find(value => value.startsWith(`${name}=`))
if (!hit) return undefined
return hit.slice(name.length + 1)
}
function resolveMode(): BenchmarkMode {
const raw = parseArg('--mode')
if (raw === 'online' || raw === 'offline' || raw === 'hybrid') return raw
return 'hybrid'
}
function resolveRunKind(): BenchmarkRunKind {
const raw = parseArg('--run')
if (raw === 'smoke' || raw === 'full') return raw
return 'smoke'
}
function resolveOutputDir(rootDir: string): string {
const input = parseArg('--out')
if (!input) return join(rootDir, 'benchmark', 'results')
return isAbsolute(input) ? input : resolve(rootDir, input)
}
async function runBench(): Promise<void> {
const rootDir = process.cwd()
const nowIso = new Date().toISOString()
const mode = resolveMode()
const runKind = resolveRunKind()
const config = createConfig(mode, runKind)
const outputDir = resolveOutputDir(rootDir)
await mkdir(outputDir, { recursive: true })
const context = {
rootDir,
mode,
runKind,
nowIso,
outputDir,
}
const scenarios = [
...createStartupAndCommandScenarios(config),
...createHeadlessScenarios(config),
...createCorrectnessAndToolScenarios(config),
createRestorationScenario(config),
]
const summaries = await runScenarios({
scenarios,
context,
config,
})
const report = buildBenchmarkReport({
rootDir,
generatedAt: nowIso,
config,
scenarios: summaries,
observability: {
bootstrapState: collectBootstrapObservability(),
process: collectProcessObservability(),
},
})
const reportPath = join(
outputDir,
`benchmark-${runKind}-${nowIso.replace(/[:.]/gu, '-')}.json`,
)
await writeJsonFile(reportPath, report)
const latestPath = join(outputDir, 'latest.json')
await writeJsonFile(latestPath, report)
await writeTextFile(
join(outputDir, 'latest-summary.md'),
renderSummaryMarkdown(report, basename(reportPath)),
)
process.stdout.write(`Benchmark report written: ${reportPath}\n`)
process.stdout.write(
`Quality gate: ${report.qualityGate.passed ? 'PASS' : 'FAIL'}\n`,
)
if (!report.qualityGate.passed) {
for (const reason of report.qualityGate.reasons) {
process.stdout.write(`- ${reason}\n`)
}
process.exitCode = 1
}
}
function renderSummaryMarkdown(
report: BenchmarkReport,
fileName: string,
): string {
const lines: string[] = []
lines.push('# Benchmark Summary')
lines.push('')
lines.push(`- Report: ${fileName}`)
lines.push(`- Mode: ${report.mode}`)
lines.push(`- Run: ${report.runKind}`)
lines.push(
`- Overall score: ${report.aggregate.score.total.toFixed(2)} (latency ${report.aggregate.score.latency.toFixed(2)}, stability ${report.aggregate.score.stability.toFixed(2)}, quality ${report.aggregate.score.quality.toFixed(2)}, cost ${report.aggregate.score.cost.toFixed(2)})`,
)
lines.push(`- Success rate: ${report.aggregate.successRate.toFixed(2)}%`)
lines.push('')
lines.push('## Scenario Results')
for (const scenario of report.scenarios) {
const p95 = scenario.durationMs?.p95 ?? 0
lines.push(
`- ${scenario.id} ${scenario.name}: success ${scenario.successRate.toFixed(2)}%, p95 ${p95.toFixed(2)}ms`,
)
}
lines.push('')
lines.push(
`- Quality gate: ${report.qualityGate.passed ? 'PASS' : 'FAIL'} (${report.qualityGate.reasons.length} issue(s))`,
)
if (!report.qualityGate.passed) {
for (const reason of report.qualityGate.reasons) {
lines.push(` - ${reason}`)
}
}
lines.push('')
return `${lines.join('\n')}\n`
}
async function runCompare(): Promise<void> {
const rootDir = process.cwd()
const outputDir = resolveOutputDir(rootDir)
const baselinePath = parseArg('--baseline')
const currentPath = parseArg('--current')
if (!baselinePath || !currentPath) {
throw new Error('--baseline and --current are required for compare mode')
}
const baseline = await readJsonFile<BenchmarkReport>(
isAbsolute(baselinePath) ? baselinePath : resolve(rootDir, baselinePath),
)
const current = await readJsonFile<BenchmarkReport>(
isAbsolute(currentPath) ? currentPath : resolve(rootDir, currentPath),
)
const text = buildComparisonText(baseline, current)
const outPath = join(outputDir, 'comparison.md')
await writeTextFile(outPath, text)
process.stdout.write(`Comparison written: ${outPath}\n`)
}
const command = parseArg('--command') ?? 'run'
if (command === 'compare') {
await runCompare()
} else {
await runBench()
}
|