zkaedi commited on
Commit
1a198fe
Β·
verified Β·
1 Parent(s): 81e2329

Add unified 4-stage audit pipeline orchestrator

Browse files
Files changed (1) hide show
  1. zkaedi_audit_pipeline.py +575 -0
zkaedi_audit_pipeline.py ADDED
@@ -0,0 +1,575 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ ╔═══════════════════════════════════════════════════════════════════════╗
4
+ β•‘ πŸ”± ZKAEDI PRIME β€” UNIFIED SECURITY AUDIT PIPELINE β•‘
5
+ β•‘ β•‘
6
+ β•‘ Chains all 4 HuggingFace models/tools into a single audit command: β•‘
7
+ β•‘ β•‘
8
+ β•‘ Stage 1: gemma-2-9b-solidity-merged β•‘
9
+ β•‘ β†’ Generates vulnerability energy signatures from Solidity β•‘
10
+ β•‘ β•‘
11
+ β•‘ Stage 2: prime-swarm-hunter (Gradio Space) β•‘
12
+ β•‘ β†’ 12-agent temporal compound detection β•‘
13
+ β•‘ β•‘
14
+ β•‘ Stage 3: leviathan-v2 β•‘
15
+ β•‘ β†’ CNN exploit topology classification + PRIME refinement β•‘
16
+ β•‘ β•‘
17
+ β•‘ Stage 4: solidity-vuln-auditor-7b β•‘
18
+ β•‘ β†’ Synthesizes final professional audit report β•‘
19
+ β•‘ β•‘
20
+ β•‘ Author: ZKAEDI β€” Offensive Healer β•‘
21
+ β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•
22
+
23
+ USAGE:
24
+ # Full pipeline (requires GPU for stages 1 & 4)
25
+ python zkaedi_audit_pipeline.py contract.sol --full
26
+
27
+ # Stages 2+3 only (CPU, uses mock signatures)
28
+ python zkaedi_audit_pipeline.py --preset defi_lending_pool
29
+
30
+ # Custom signatures JSON β†’ swarm + leviathan
31
+ python zkaedi_audit_pipeline.py --signatures vulns.json
32
+
33
+ REQUIREMENTS:
34
+ pip install gradio_client huggingface_hub safetensors numpy
35
+ # For stages 1 & 4: pip install transformers torch accelerate
36
+ """
37
+ from __future__ import annotations
38
+
39
+ import json
40
+ import time
41
+ import sys
42
+ import os
43
+ import argparse
44
+ import numpy as np
45
+ from pathlib import Path
46
+ from dataclasses import dataclass
47
+
48
+
49
+ # ══════════════════════════════════════════════════════════════
50
+ # ANSI COLORS
51
+ # ══════════════════════════════════════════════════════════════
52
+ class C:
53
+ RST = "\033[0m"; B = "\033[1m"; D = "\033[2m"
54
+ CY = "\033[96m"; MG = "\033[95m"; GR = "\033[92m"
55
+ RD = "\033[91m"; YL = "\033[93m"; WH = "\033[97m"
56
+ NC = "\033[38;2;0;255;255m"; NM = "\033[38;2;255;0;255m"
57
+ NG = "\033[38;2;0;255;128m"; NK = "\033[38;2;255;215;0m"
58
+
59
+ @staticmethod
60
+ def grad(text, s, e):
61
+ n = max(len(text), 1)
62
+ return "".join(
63
+ f"\033[38;2;{int(s[0]+(e[0]-s[0])*i/n)};{int(s[1]+(e[1]-s[1])*i/n)};{int(s[2]+(e[2]-s[2])*i/n)}m{c}"
64
+ for i, c in enumerate(text)
65
+ ) + C.RST
66
+
67
+
68
+ # ══════════════════════════════════════════════════════════════
69
+ # PIPELINE STAGES
70
+ # ══════════════════════════════════════════════════════════════
71
+ def stage_1_gemma_signatures(solidity_code: str, hf_token: str | None = None) -> list[dict]:
72
+ """
73
+ Stage 1: gemma-2-9b-solidity-merged
74
+ Generates vulnerability energy signatures from Solidity source code.
75
+ Requires GPU (or HF Inference Endpoint).
76
+ """
77
+ print(f"\n {C.B}{C.NC}STAGE 1: VULNERABILITY SIGNATURE GENERATION{C.RST}")
78
+ print(f" {C.D}Model: zkaedi/gemma-2-9b-solidity-merged (9.2B params){C.RST}")
79
+
80
+ try:
81
+ from huggingface_hub import InferenceClient
82
+
83
+ client = InferenceClient(
84
+ model="zkaedi/gemma-2-9b-solidity-merged",
85
+ token=hf_token,
86
+ )
87
+
88
+ prompt = f"""<start_of_turn>user
89
+ You are ZKAEDI PRIME β€” a Solidity smart contract energy signature auditor.
90
+ Analyze this contract and output vulnerability signatures as a JSON array.
91
+ Each signature must have: id, vuln_type, swc, severity, position [x,y],
92
+ energy, radius, description, compound_partner (if any), compound_type.
93
+
94
+ Contract:
95
+ ```solidity
96
+ {solidity_code[:4000]}
97
+ ```
98
+
99
+ Output ONLY a JSON array of vulnerability signatures, no other text.
100
+ <end_of_turn>
101
+ <start_of_turn>model
102
+ """
103
+ response = client.text_generation(
104
+ prompt,
105
+ max_new_tokens=2000,
106
+ temperature=0.1,
107
+ return_full_text=False,
108
+ )
109
+
110
+ # Parse JSON from response
111
+ text = response.strip()
112
+ if text.startswith("```"):
113
+ text = text.split("```")[1]
114
+ if text.startswith("json"):
115
+ text = text[4:]
116
+ signatures = json.loads(text)
117
+ print(f" {C.NG}Generated {len(signatures)} vulnerability signatures{C.RST}")
118
+ return signatures
119
+
120
+ except ImportError:
121
+ print(f" {C.YL}huggingface_hub InferenceClient not available{C.RST}")
122
+ print(f" {C.D}Falling back to preset signatures{C.RST}")
123
+ return []
124
+ except Exception as e:
125
+ print(f" {C.RD}Stage 1 failed: {e}{C.RST}")
126
+ print(f" {C.D}Falling back to preset signatures{C.RST}")
127
+ return []
128
+
129
+
130
+ def stage_2_swarm_analysis(signatures: list[dict] | str,
131
+ steps: int = 300, window: int = 150,
132
+ seed: int = 42, preset: str | None = None) -> dict:
133
+ """
134
+ Stage 2: prime-swarm-hunter (Gradio Space)
135
+ Runs 12-agent temporal compound detection.
136
+ CPU only β€” calls the HF Space API.
137
+ """
138
+ print(f"\n {C.B}{C.NC}STAGE 2: 12-AGENT SWARM COMPOUND DETECTION{C.RST}")
139
+ print(f" {C.D}Space: zkaedi/prime-swarm-hunter (12 agents, {steps} steps){C.RST}")
140
+
141
+ try:
142
+ from gradio_client import Client
143
+ has_gradio = True
144
+ except ImportError:
145
+ has_gradio = False
146
+
147
+ if has_gradio:
148
+ try:
149
+ client = Client("zkaedi/prime-swarm-hunter", verbose=False)
150
+
151
+ if preset:
152
+ print(f" {C.D}Using preset: {preset}{C.RST}")
153
+ result_json = client.predict(
154
+ preset, steps, window, seed,
155
+ api_name="/api_preset"
156
+ )
157
+ else:
158
+ if isinstance(signatures, list):
159
+ signatures = json.dumps(signatures)
160
+ print(f" {C.D}Using {len(json.loads(signatures))} custom signatures{C.RST}")
161
+ result_json = client.predict(
162
+ signatures, steps, window, seed,
163
+ api_name="/api_custom"
164
+ )
165
+
166
+ result = json.loads(result_json)
167
+ s = result.get("summary", {})
168
+ print(f" {C.NG}Solo: {s.get('solo_detected', 0)}/{s.get('total_signatures', 0)} "
169
+ f"| Compound: {s.get('compounds_detected', 0)}/{s.get('compound_patterns', 0)} "
170
+ f"| Risk: {s.get('risk_score', 0)}{C.RST}")
171
+ return result
172
+ except Exception as e:
173
+ print(f" {C.YL}Space unavailable: {e}{C.RST}")
174
+ has_gradio = False # Fall through to local
175
+
176
+ # Local fallback
177
+ try:
178
+ sys.path.insert(0, str(Path(__file__).parent))
179
+ from prime_swarm_engine import run_preset, run_custom
180
+ print(f" {C.YL}Running swarm locally...{C.RST}")
181
+ if preset:
182
+ result = run_preset(preset, steps, window, seed)
183
+ else:
184
+ sigs = json.loads(signatures) if isinstance(signatures, str) else signatures
185
+ result = run_custom(sigs, steps, window, seed)
186
+ s = result.get("summary", {})
187
+ print(f" {C.NG}Solo: {s.get('solo_detected', 0)}/{s.get('total_signatures', 0)} "
188
+ f"| Compound: {s.get('compounds_detected', 0)}/{s.get('compound_patterns', 0)} "
189
+ f"| Risk: {s.get('risk_score', 0)}{C.RST}")
190
+ return result
191
+ except Exception as e2:
192
+ print(f" {C.RD}Local engine also failed: {e2}{C.RST}")
193
+ return {"error": str(e2)}
194
+
195
+
196
+ def stage_3_leviathan_classify(swarm_result: dict, seed: int = 42) -> dict:
197
+ """
198
+ Stage 3: leviathan-v2
199
+ CNN exploit topology classification + PRIME bistable refinement.
200
+ Generates a synthetic manifold from swarm findings and classifies it.
201
+ """
202
+ print(f"\n {C.B}{C.NC}STAGE 3: LEVIATHAN TOPOLOGY CLASSIFICATION{C.RST}")
203
+ print(f" {C.D}Model: zkaedi/leviathan-v2 (264K params, PRIME refinement){C.RST}")
204
+
205
+ try:
206
+ from huggingface_hub import hf_hub_download
207
+ sys.path.insert(0, str(Path(__file__).parent))
208
+
209
+ # Download and load Leviathan
210
+ weights_path = hf_hub_download("zkaedi/leviathan-v2",
211
+ "leviathan_v2_session_trained.safetensors")
212
+
213
+ from safetensors.numpy import load_file
214
+ weights = load_file(weights_path)
215
+
216
+ # Import Leviathan class (try local, then download)
217
+ try:
218
+ from leviathan import Leviathan
219
+ except ImportError:
220
+ leviathan_py = hf_hub_download("zkaedi/leviathan-v2", "leviathan.py")
221
+ import importlib.util
222
+ spec = importlib.util.spec_from_file_location("leviathan", leviathan_py)
223
+ mod = importlib.util.module_from_spec(spec)
224
+ spec.loader.exec_module(mod)
225
+ Leviathan = mod.Leviathan
226
+
227
+ model = Leviathan(weights)
228
+
229
+ # Build synthetic manifold from swarm findings
230
+ # Each detected vulnerability creates an energy signature in the manifold
231
+ rng = np.random.default_rng(seed)
232
+ H = rng.normal(0, 0.1, (256, 256)).astype(np.float32)
233
+ V = np.zeros((256, 256), dtype=np.float32)
234
+
235
+ solo_findings = swarm_result.get("solo_findings", [])
236
+ compound_findings = swarm_result.get("compound_findings", [])
237
+
238
+ # Inject vulnerability energy signatures into manifold
239
+ for finding in solo_findings:
240
+ pos = finding.get("position", [128, 128])
241
+ energy = {"CRITICAL": 8.0, "HIGH": 5.0, "MEDIUM": 3.0, "LOW": 1.0}.get(
242
+ finding.get("severity", "MEDIUM"), 3.0)
243
+ x, y = int(pos[0] * 2.56), int(pos[1] * 2.56) # Scale 0-100 β†’ 0-256
244
+ x, y = min(max(x, 5), 250), min(max(y, 5), 250)
245
+ # Gaussian energy injection
246
+ for dx in range(-10, 11):
247
+ for dy in range(-10, 11):
248
+ nx, ny = x + dx, y + dy
249
+ if 0 <= nx < 256 and 0 <= ny < 256:
250
+ d = np.sqrt(dx**2 + dy**2)
251
+ H[nx, ny] += energy * np.exp(-d**2 / 50)
252
+
253
+ # Compound findings amplify the manifold
254
+ for comp in compound_findings:
255
+ H *= 1.3 # Compound presence amplifies overall threat energy
256
+
257
+ # Normalize
258
+ if H.max() > 0:
259
+ H = H / H.max()
260
+
261
+ # Run Leviathan audit
262
+ result = model.audit(H, V, seed=seed)
263
+ vc = C.RD if result["verdict"] == "THREAT" else (
264
+ C.NG if result["verdict"] == "CLEAN" else C.YL)
265
+ print(f" {vc}{C.B}Verdict: {result['verdict']}{C.RST} "
266
+ f"raw={result['raw_score']:.4f} refined={result['refined_score']:.4f} "
267
+ f"committed={result['committed']}")
268
+
269
+ return result
270
+
271
+ except Exception as e:
272
+ print(f" {C.RD}Stage 3 failed: {e}{C.RST}")
273
+ # Derive verdict from swarm risk score alone
274
+ risk = swarm_result.get("summary", {}).get("risk_score", 0)
275
+ verdict = "THREAT" if risk > 30 else ("UNCERTAIN" if risk > 10 else "CLEAN")
276
+ return {"raw_score": risk / 100.0, "refined_score": risk / 100.0,
277
+ "verdict": verdict, "committed": verdict != "UNCERTAIN",
278
+ "prime_H": 0.0, "iterations": 0, "fallback": True}
279
+
280
+
281
+ def stage_4_final_report(solidity_code: str, swarm_result: dict,
282
+ leviathan_result: dict, hf_token: str | None = None) -> str:
283
+ """
284
+ Stage 4: solidity-vuln-auditor-7b
285
+ Synthesizes a professional audit report from all findings.
286
+ Requires GPU (or HF Inference Endpoint).
287
+ """
288
+ print(f"\n {C.B}{C.NC}STAGE 4: PROFESSIONAL AUDIT REPORT{C.RST}")
289
+ print(f" {C.D}Model: zkaedi/solidity-vuln-auditor-7b (7.6B params, Qwen2){C.RST}")
290
+
291
+ # Build report context from stages 2+3
292
+ s = swarm_result.get("summary", {})
293
+ findings_text = ""
294
+ for f in swarm_result.get("solo_findings", []):
295
+ findings_text += (f"- [{f.get('severity', 'MEDIUM')}] {f.get('swc', '')} "
296
+ f"{f.get('vuln_type', '')}: {f.get('description', '')}\n")
297
+ for c in swarm_result.get("compound_findings", []):
298
+ findings_text += (f"- [CRITICAL COMPOUND] {c.get('compound_type', '')}: "
299
+ f"{' + '.join(c.get('components', []))}\n")
300
+
301
+ leviathan_verdict = leviathan_result.get("verdict", "UNCERTAIN")
302
+
303
+ try:
304
+ from huggingface_hub import InferenceClient
305
+
306
+ client = InferenceClient(
307
+ model="zkaedi/solidity-vuln-auditor-7b",
308
+ token=hf_token,
309
+ )
310
+
311
+ prompt = f"""<|im_start|>user
312
+ Generate a professional smart contract security audit report.
313
+
314
+ SWARM ANALYSIS (12-agent Hamiltonian compound detection):
315
+ - Solo vulnerabilities: {s.get('solo_detected', 0)}/{s.get('total_signatures', 0)}
316
+ - Compound patterns: {s.get('compounds_detected', 0)}/{s.get('compound_patterns', 0)}
317
+ - Risk score: {s.get('risk_score', 0)}
318
+
319
+ FINDINGS:
320
+ {findings_text}
321
+
322
+ LEVIATHAN TOPOLOGY CLASSIFICATION:
323
+ - Verdict: {leviathan_verdict}
324
+ - Confidence: {leviathan_result.get('refined_score', 0):.4f}
325
+ - PRIME committed: {leviathan_result.get('committed', False)}
326
+
327
+ CONTRACT (first 2000 chars):
328
+ ```solidity
329
+ {solidity_code[:2000]}
330
+ ```
331
+
332
+ Write a professional audit report with executive summary, findings table,
333
+ risk assessment, and remediation recommendations.
334
+ <|im_end|>
335
+ <|im_start|>assistant
336
+ """
337
+ response = client.text_generation(
338
+ prompt, max_new_tokens=2000, temperature=0.2, return_full_text=False)
339
+ print(f" {C.NG}Report generated ({len(response)} chars){C.RST}")
340
+ return response
341
+
342
+ except Exception as e:
343
+ print(f" {C.YL}Stage 4 model unavailable: {e}{C.RST}")
344
+ print(f" {C.D}Generating report from pipeline data...{C.RST}")
345
+
346
+ # Fallback: generate report from pipeline data
347
+ report = _generate_fallback_report(swarm_result, leviathan_result)
348
+ return report
349
+
350
+
351
+ def _generate_fallback_report(swarm_result: dict, leviathan_result: dict) -> str:
352
+ """Generate a structured report without the LLM."""
353
+ s = swarm_result.get("summary", {})
354
+ lines = [
355
+ "=" * 60,
356
+ " ZKAEDI PRIME SECURITY AUDIT REPORT",
357
+ " Generated by: Unified Audit Pipeline v1.0",
358
+ "=" * 60,
359
+ "",
360
+ "EXECUTIVE SUMMARY",
361
+ "-" * 40,
362
+ f" Overall Verdict: {leviathan_result.get('verdict', 'UNKNOWN')}",
363
+ f" Risk Score: {s.get('risk_score', 0)}",
364
+ f" Solo Findings: {s.get('solo_detected', 0)}/{s.get('total_signatures', 0)}",
365
+ f" Compound Patterns: {s.get('compounds_detected', 0)}/{s.get('compound_patterns', 0)}",
366
+ f" Leviathan Score: {leviathan_result.get('refined_score', 0):.4f}",
367
+ f" PRIME Committed: {leviathan_result.get('committed', False)}",
368
+ "",
369
+ "FINDINGS",
370
+ "-" * 40,
371
+ ]
372
+
373
+ for f in swarm_result.get("solo_findings", []):
374
+ lines.append(f" [{f.get('severity', '?'):<8}] {f.get('swc', 'N/A'):<8} "
375
+ f"{f.get('vuln_type', 'unknown')}")
376
+ lines.append(f" {f.get('description', '')}")
377
+ lines.append(f" Detected at step {f.get('detected_at_step', '?')} "
378
+ f"by {f.get('detected_by_role', '?')}")
379
+ lines.append("")
380
+
381
+ if swarm_result.get("compound_findings"):
382
+ lines.append("COMPOUND VULNERABILITIES (CRITICAL)")
383
+ lines.append("-" * 40)
384
+ for c in swarm_result["compound_findings"]:
385
+ lines.append(f" {c.get('compound_type', 'unknown')}")
386
+ lines.append(f" Components: {' + '.join(c.get('components', []))}")
387
+ lines.append(f" Temporal gap: {c.get('temporal_gap', '?')} steps")
388
+ agents = c.get("agents_involved", {})
389
+ lines.append(f" Discovered by: {agents.get('agent_a', {}).get('role', '?')} "
390
+ f"+ {agents.get('agent_b', {}).get('role', '?')}")
391
+ lines.append("")
392
+
393
+ lines.extend([
394
+ "METHODOLOGY",
395
+ "-" * 40,
396
+ " Stage 1: Vulnerability energy signature generation (Gemma 2 9B)",
397
+ " Stage 2: 12-agent Hamiltonian swarm with temporal correlation",
398
+ " Stage 3: Leviathan CNN topology classification + PRIME refinement",
399
+ " Stage 4: Report synthesis",
400
+ "",
401
+ "PIPELINE PARAMETERS",
402
+ "-" * 40,
403
+ f" Swarm agents: 12",
404
+ f" Swarm steps: {swarm_result.get('config', {}).get('steps', 300)}",
405
+ f" Temporal window: {swarm_result.get('config', {}).get('temporal_window', 150)}",
406
+ f" PRIME eta: 3.50",
407
+ f" PRIME gamma: 0.30",
408
+ f" Leviathan params: 264,897",
409
+ "",
410
+ "=" * 60,
411
+ " ZKAEDI PRIME β€” This is not standard auditing.",
412
+ " This is computational physics meeting security analysis.",
413
+ "=" * 60,
414
+ ])
415
+
416
+ return "\n".join(lines)
417
+
418
+
419
+ # ══════════════════════════════════════════════════════════════
420
+ # MAIN ORCHESTRATOR
421
+ # ══════════════════════════════════════════════════════════════
422
+ def run_pipeline(solidity_code: str | None = None,
423
+ signatures: list[dict] | None = None,
424
+ preset: str | None = None,
425
+ steps: int = 300,
426
+ window: int = 150,
427
+ seed: int = 42,
428
+ hf_token: str | None = None,
429
+ skip_stage_1: bool = False,
430
+ skip_stage_4: bool = False) -> dict:
431
+ """
432
+ Run the full ZKAEDI PRIME audit pipeline.
433
+
434
+ Args:
435
+ solidity_code: Raw Solidity source code (for stages 1 & 4)
436
+ signatures: Pre-computed vulnerability signatures (skips stage 1)
437
+ preset: Use a preset scenario instead of real code
438
+ steps: Swarm simulation steps
439
+ window: Temporal correlation window
440
+ seed: Random seed
441
+ hf_token: HuggingFace API token
442
+ skip_stage_1: Skip Gemma signature generation
443
+ skip_stage_4: Skip Qwen report generation
444
+ """
445
+ t_start = time.time()
446
+
447
+ b = C.grad("=" * 65, (0, 255, 255), (255, 0, 255))
448
+ print(f"\n{b}")
449
+ print(C.grad(" πŸ”± ZKAEDI PRIME β€” UNIFIED SECURITY AUDIT PIPELINE πŸ”± ",
450
+ (255, 0, 255), (0, 255, 255)))
451
+ print(f" {C.D}4-stage: Gemma β†’ Swarm β†’ Leviathan β†’ Auditor{C.RST}")
452
+ print(f"{b}\n")
453
+
454
+ # ── Stage 1: Signature Generation ─────────────────────────
455
+ if signatures:
456
+ print(f" {C.D}Stage 1: Skipped (signatures provided){C.RST}")
457
+ sigs = signatures
458
+ elif preset:
459
+ print(f" {C.D}Stage 1: Skipped (using preset: {preset}){C.RST}")
460
+ sigs = None
461
+ elif solidity_code and not skip_stage_1:
462
+ sigs = stage_1_gemma_signatures(solidity_code, hf_token)
463
+ if not sigs:
464
+ print(f" {C.YL}No signatures generated, falling back to preset{C.RST}")
465
+ preset = "defi_lending_pool"
466
+ sigs = None
467
+ else:
468
+ print(f" {C.D}Stage 1: Skipped{C.RST}")
469
+ if not preset:
470
+ preset = "defi_lending_pool"
471
+ sigs = None
472
+
473
+ # ── Stage 2: Swarm Analysis ───────────────────────────────
474
+ if preset:
475
+ swarm_result = stage_2_swarm_analysis(None, steps, window, seed, preset=preset)
476
+ else:
477
+ swarm_result = stage_2_swarm_analysis(sigs, steps, window, seed)
478
+
479
+ if "error" in swarm_result:
480
+ print(f" {C.RD}Pipeline aborted at Stage 2{C.RST}")
481
+ return {"error": swarm_result["error"]}
482
+
483
+ # ── Stage 3: Leviathan Classification ─────────────────────
484
+ leviathan_result = stage_3_leviathan_classify(swarm_result, seed)
485
+
486
+ # ── Stage 4: Report Generation ────────────────────────────
487
+ if solidity_code and not skip_stage_4:
488
+ report = stage_4_final_report(solidity_code, swarm_result, leviathan_result, hf_token)
489
+ else:
490
+ report = _generate_fallback_report(swarm_result, leviathan_result)
491
+
492
+ elapsed = time.time() - t_start
493
+
494
+ # ── Final Summary ─────────────────────────────────────────
495
+ s = swarm_result.get("summary", {})
496
+ verdict = leviathan_result.get("verdict", "UNKNOWN")
497
+ vc = C.RD if verdict == "THREAT" else (C.NG if verdict == "CLEAN" else C.YL)
498
+
499
+ print(f"\n{C.grad('=' * 65, (0, 255, 255), (255, 0, 255))}")
500
+ print(f" {C.B}{C.NK}AUDIT COMPLETE{C.RST} {elapsed:.1f}s")
501
+ print(f"\n {C.B}Verdict: {vc}{verdict}{C.RST}")
502
+ print(f" {C.B}Risk Score: {s.get('risk_score', 0)}{C.RST}")
503
+ print(f" {C.B}Solo: {s.get('solo_detected', 0)}/{s.get('total_signatures', 0)}{C.RST}")
504
+ print(f" {C.B}Compound: {s.get('compounds_detected', 0)}/{s.get('compound_patterns', 0)}{C.RST}")
505
+ print(f" {C.B}Leviathan: {leviathan_result.get('refined_score', 0):.4f} "
506
+ f"(committed={leviathan_result.get('committed', False)}){C.RST}")
507
+ print(f"{C.grad('=' * 65, (255, 0, 255), (0, 255, 255))}\n")
508
+
509
+ return {
510
+ "verdict": verdict,
511
+ "risk_score": s.get("risk_score", 0),
512
+ "elapsed": elapsed,
513
+ "swarm": swarm_result,
514
+ "leviathan": leviathan_result,
515
+ "report": report,
516
+ }
517
+
518
+
519
+ # ══════════════════════════════════════════════════════════════
520
+ # CLI
521
+ # ══════════════════════════════════════════════════════════════
522
+ def main():
523
+ parser = argparse.ArgumentParser(
524
+ description="πŸ”± ZKAEDI PRIME Unified Security Audit Pipeline")
525
+ parser.add_argument("contract", nargs="?", help="Solidity file path")
526
+ parser.add_argument("--preset", choices=["defi_lending_pool", "nft_marketplace", "token_bridge"],
527
+ help="Use preset vulnerability scenario")
528
+ parser.add_argument("--signatures", help="JSON file with vulnerability signatures")
529
+ parser.add_argument("--steps", type=int, default=300, help="Swarm steps (default: 300)")
530
+ parser.add_argument("--window", type=int, default=150, help="Temporal window (default: 150)")
531
+ parser.add_argument("--seed", type=int, default=42, help="Random seed")
532
+ parser.add_argument("--token", help="HuggingFace API token")
533
+ parser.add_argument("--full", action="store_true", help="Run all 4 stages (requires GPU)")
534
+ parser.add_argument("--report-only", action="store_true", help="Print report to stdout")
535
+ parser.add_argument("--json", action="store_true", help="Output results as JSON")
536
+
537
+ args = parser.parse_args()
538
+
539
+ hf_token = args.token or os.environ.get("HF_TOKEN")
540
+ solidity_code = None
541
+ signatures = None
542
+
543
+ if args.contract:
544
+ with open(args.contract) as f:
545
+ solidity_code = f.read()
546
+
547
+ if args.signatures:
548
+ with open(args.signatures) as f:
549
+ signatures = json.load(f)
550
+
551
+ result = run_pipeline(
552
+ solidity_code=solidity_code,
553
+ signatures=signatures,
554
+ preset=args.preset or (None if (args.contract or args.signatures) else "defi_lending_pool"),
555
+ steps=args.steps,
556
+ window=args.window,
557
+ seed=args.seed,
558
+ hf_token=hf_token,
559
+ skip_stage_1=not args.full,
560
+ skip_stage_4=not args.full,
561
+ )
562
+
563
+ if args.json:
564
+ # Clean for JSON serialization
565
+ output = {k: v for k, v in result.items() if k != "report"}
566
+ output["report_length"] = len(result.get("report", ""))
567
+ print(json.dumps(output, indent=2, default=str))
568
+ elif args.report_only:
569
+ print(result.get("report", "No report generated"))
570
+ else:
571
+ print(result.get("report", ""))
572
+
573
+
574
+ if __name__ == "__main__":
575
+ main()