Datasets:
File size: 2,587 Bytes
9f5cf6f | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 | export type BenchmarkMode = 'offline' | 'online' | 'hybrid'
export type BenchmarkRunKind = 'smoke' | 'full'
export type ScenarioCategory =
| 'startup'
| 'commands'
| 'headless'
| 'correctness'
| 'tools'
| 'restoration'
export type RunContext = {
rootDir: string
mode: BenchmarkMode
runKind: BenchmarkRunKind
nowIso: string
outputDir: string
}
export type SingleExecution = {
ok: boolean
durationMs: number
details?: Record<string, unknown>
error?: string
skipped?: boolean
skipReason?: string
}
export type Scenario = {
id: string
name: string
category: ScenarioCategory
description: string
tags: string[]
run: (context: RunContext) => Promise<SingleExecution[]>
}
export type Distribution = {
min: number
max: number
mean: number
p50: number
p95: number
p99: number
}
export type ScenarioSummary = {
id: string
name: string
category: ScenarioCategory
description: string
tags: string[]
totalRuns: number
skippedRuns: number
successRuns: number
failedRuns: number
successRate: number
durationMs: Distribution | null
examples: Array<Record<string, unknown>>
errors: string[]
}
export type BenchmarkWeights = {
latency: number
stability: number
quality: number
cost: number
}
export type BenchmarkThresholds = {
minimumSuccessRatePct: number
maximumP95MsByCategory: Partial<Record<ScenarioCategory, number>>
maximumMissingImports: number
}
export type BenchmarkConfig = {
mode: BenchmarkMode
runKind: BenchmarkRunKind
iterations: {
startup: number
commandLoad: number
queueCorrectness: number
toolPipeline: number
}
timeoutsMs: {
command: number
onlineHeadless: number
}
weights: BenchmarkWeights
thresholds: BenchmarkThresholds
}
export type QualityGateResult = {
passed: boolean
reasons: string[]
}
export type BenchmarkReport = {
version: 1
generatedAt: string
rootDir: string
mode: BenchmarkMode
runKind: BenchmarkRunKind
config: BenchmarkConfig
scenarios: ScenarioSummary[]
aggregate: {
totalScenarios: number
failedScenarios: number
skippedScenarios: number
totalRuns: number
successRate: number
score: {
latency: number
stability: number
quality: number
cost: number
total: number
}
}
observability: {
bootstrapState?: Record<string, unknown>
process?: Record<string, unknown>
}
qualityGate: QualityGateResult
}
|