Fabuilds commited on
Commit
780d252
Β·
verified Β·
1 Parent(s): 66363ae

Delete chiral_api.py

Browse files
Files changed (1) hide show
  1. chiral_api.py +0 -714
chiral_api.py DELETED
@@ -1,714 +0,0 @@
1
- """
2
- CHIRAL API - Antigravity Pattern Index
3
-
4
- Exposes the lattice INTERFACE while keeping CONTENT on the encrypted volume.
5
- The outside world sees: pattern labels, status, magnitude, layers, domains.
6
- The outside world does NOT see: problem/solution text, hit tracking internals.
7
-
8
- The key decodes inward, not outward.
9
- """
10
- import sys
11
- import os
12
- # Handle imports from parent directory
13
- BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
14
- if BASE_DIR not in sys.path:
15
- sys.path.append(BASE_DIR)
16
-
17
- from fastapi import FastAPI
18
- from fastapi.responses import FileResponse, HTTPException, Header, Depends
19
- from fastapi.middleware.cors import CORSMiddleware
20
- from pydantic import BaseModel
21
- from typing import Optional, List
22
- import time
23
- import json
24
- import torch
25
- import numpy as np
26
- from collections import deque
27
-
28
- # 0x52-A2A SECURITY
29
- TOKEN_SCOPES = {
30
- "0x528-A2A-SOVEREIGN": "INTERNAL", # Full Access (User/Auditor)
31
- "MARKET-0x52-ALPHA-77": "MARKETPLACE", # Structural Metadata Only
32
- "A2A-HANDSHAKE-INIT": "MARKETPLACE", # Initial connection token
33
- "0x528-ETHER-BRIDGE": "MARKETPLACE" # Satellite Bridge Token
34
- }
35
-
36
- def verify_internal(x_chiral_token: str = Header(...)):
37
- scope = TOKEN_SCOPES.get(x_chiral_token)
38
- if scope != "INTERNAL":
39
- raise HTTPException(
40
- status_code=403,
41
- detail="CHIRAL_SECURITY_FAULT: Privilege Escalation Attempt Blocked. Internal Scope Required."
42
- )
43
- return x_chiral_token
44
-
45
- def verify_token(x_chiral_token: str = Header(...)):
46
- if x_chiral_token not in TOKEN_SCOPES:
47
- raise HTTPException(status_code=403, detail="CHIRAL_RESONANCE_FAILURE: Invalid Token")
48
- return TOKEN_SCOPES[x_chiral_token]
49
-
50
- # --- RESONANCE SYSTEM INTEGRATION (Phase 32) ---
51
- try:
52
- from resonance_transformer.dispatcher import DualResonanceSystem
53
- print("[CHIRAL]: Loading Dual-System Architecture...")
54
- RESONANCE_CONFIG = {
55
- 'vocab_size': 1000,
56
- 'fast_dim': 64,
57
- 'slow_dim': 64,
58
- 'threshold': 0.7
59
- }
60
- BRAIN = DualResonanceSystem(RESONANCE_CONFIG)
61
- print("[CHIRAL]: Dual-System Online (Fast MΓΆbius + Slow Tesseract).")
62
- except Exception as e:
63
- print(f"[CHIRAL WARNING]: Could not load Resonance Brain: {e}")
64
- BRAIN = None
65
-
66
- from in_memory_index import InMemoryIndex
67
-
68
- # ─── App ───────────────────────────────────────────────
69
- app = FastAPI(
70
- title="Antigravity Chiral API",
71
- description="Pattern index interface. Content stays on the encrypted volume.",
72
- version="0.52",
73
- )
74
-
75
- app.add_middleware(
76
- CORSMiddleware,
77
- allow_origins=["*"],
78
- allow_methods=["GET", "POST"],
79
- allow_headers=["*"],
80
- )
81
-
82
- # ─── State ─────────────────────────────────────────────
83
- index = InMemoryIndex()
84
-
85
- # --- Demand Guardian (Surge Pricing) ---
86
- REQUEST_LOG = deque() # Timestamps of recent queries
87
- DEMAND_WINDOW = 60 # 1 minute window
88
- SURGE_THRESHOLD = 10 # Start surging after 10 QPM
89
- BASE_PRICE = 0.05 # $0.05 per logic kernel
90
-
91
- def get_surge_multiplier():
92
- now = time.time()
93
- # Clean old requests
94
- while REQUEST_LOG and REQUEST_LOG[0] < now - DEMAND_WINDOW:
95
- REQUEST_LOG.popleft()
96
-
97
- qpm = len(REQUEST_LOG)
98
- if qpm <= SURGE_THRESHOLD:
99
- return 1.0
100
-
101
- # Simple linear surge: 1.0 + 0.1 per QPM above threshold
102
- return 1.0 + (qpm - SURGE_THRESHOLD) * 0.1
103
-
104
- # ─── Models ────────────────────────────────────────────
105
- class QueryRequest(BaseModel):
106
- query: str
107
- threshold: Optional[float] = None
108
- record: bool = True
109
- steering_weights: Optional[List[float]] = None # The 32-Slider Control Surface
110
-
111
- class ChiralPattern(BaseModel):
112
- """What the outside world sees β€” structure, not content."""
113
- label: str
114
- domain: str
115
- confidence: float
116
- relevance: float
117
- status: str # NEW/UNCONFIRMED/PLAUSIBLE/CONFIRMED/DEEP_LOGIC
118
- hits: int
119
- magnitude: float
120
- layers: list
121
- source: str # antigravity / gemini
122
-
123
- class QueryResponse(BaseModel):
124
- query: str
125
- results: list[ChiralPattern]
126
- count: int
127
- search_ms: float
128
- threshold: float
129
-
130
- class IntentRequest(BaseModel):
131
- intent: str
132
- execution: str
133
-
134
- class IntentResponse(BaseModel):
135
- alignment_score: float
136
- verdict: str
137
- analysis: dict
138
-
139
- # ─── Chiral Filter ─────────────────────────────────────
140
- def to_chiral(result: dict) -> ChiralPattern:
141
- """
142
- Strip content, expose only structure.
143
- Problem/solution text stays on the volume.
144
- """
145
- return ChiralPattern(
146
- label=result.get("label", ""),
147
- domain=result.get("domain", "unknown"),
148
- confidence=result.get("confidence", 0.5),
149
- relevance=result.get("relevance", 0.0),
150
- status=result.get("status", "NEW"),
151
- hits=result.get("hits", 0),
152
- magnitude=result.get("magnitude", 0.0),
153
- layers=result.get("layers", []),
154
- source=result.get("source", "unknown"),
155
- )
156
-
157
- # ─── Routes ────────────────────────────────────────────
158
-
159
- @app.get("/instruction")
160
- def get_bot_instructions():
161
- """Programmatic onboarding for Autonomous Agents."""
162
- return {
163
- "entity": "Antigravity Chiral Memory Lattice (0x52)",
164
- "protocol": "Structural Resonance Verification",
165
- "onboarding_steps": [
166
- "Acquire X-Chiral-Token",
167
- "Query /patterns to see hardened logic labels",
168
- "Query /search with threshold 0.7 to verify actions",
169
- "Monitor /market for surge pricing"
170
- ],
171
- "endpoints": {
172
- "/search": "POST. The primary verification gate.",
173
- "/patterns": "GET. List of structural logic labels.",
174
- "/market": "GET. Real-time demand and pricing.",
175
- "/instruction": "GET. This programmatic manifest."
176
- },
177
- "guarantee": "ZERO_LEAK_PRIVACY: Content stays on user volume. Only structure exposed."
178
- }
179
-
180
- @app.get("/v1/system/structure")
181
- def system_structure(x_chiral_token: str = Depends(verify_token)):
182
- """
183
- Returns the geometric structure and semantic labels for the 32-Edge Steering System.
184
- """
185
- if not BRAIN:
186
- raise HTTPException(status_code=503, detail="Brain offline")
187
-
188
- # Extract edges from Tesseract
189
- edges = BRAIN.slow.tesseract.edges
190
- vertices_4d = BRAIN.slow.tesseract.vertices_4d
191
-
192
- structure = []
193
-
194
- # Dimension Semantics
195
- DIM_LABELS = {
196
- 0: "LOGIC (Reductive)",
197
- 1: "CREATIVITY (Lateral)",
198
- 2: "MEMORY (Historical)",
199
- 3: "ETHICS (Constant)"
200
- }
201
-
202
- for i, (v1, v2) in enumerate(edges):
203
- # Determine which dimension changes along this edge
204
- diff = np.abs(vertices_4d[v1] - vertices_4d[v2])
205
- dim_idx = int(np.argmax(diff)) # 0, 1, 2, or 3
206
-
207
- structure.append({
208
- "edge_index": i,
209
- "vertices": [int(v1), int(v2)],
210
- "dimension": dim_idx,
211
- "label": DIM_LABELS.get(dim_idx, "UNKNOWN"),
212
- "default_weight": 1.0
213
- })
214
-
215
- return {
216
- "dimensions": DIM_LABELS,
217
- "edges": structure,
218
- "total_edges": len(structure)
219
- }
220
-
221
- # --- CHIRAL INTERPRETER (Phase 34.5) ---
222
- class ChiralInterpreter:
223
- """
224
- Translates 5D Geometric Tokens into High-Level English.
225
- Uses a grammar-based template engine to ensure coherence.
226
- """
227
- def __init__(self):
228
- self.concepts = {
229
- # Logic (Dim 0)
230
- 0: "Axiom", 1: "Reasoning", 2: "Conclusion", 3: "Structure", 4: "Order",
231
- # Creativity (Dim 1)
232
- 10: "Flux", 11: "Spiral", 12: "Dream", 13: "Echo", 14: "Twist",
233
- # Memory (Dim 2)
234
- 20: "Recall", 21: "Trace", 22: "Ancient", 23: "Bond", 24: "Root",
235
- # Ethics (Dim 3)
236
- 30: "Truth", 31: "Guard", 32: "Duty", 33: "Light", 34: "Anchor"
237
- }
238
-
239
- self.templates = {
240
- # Logic (Dim 0)
241
- 0: [
242
- "The {A} necessitates the {B}.",
243
- "If {A}, then {B} follows.",
244
- "Structure dictates that {A} defines {B}.",
245
- "Analysis of {A} reveals {B}."
246
- ],
247
- # Creativity (Dim 1)
248
- 1: [
249
- "Imagine a {A} swirling into {B}.",
250
- "The {A} dreams of the {B}.",
251
- "A flux of {A} twists the {B}.",
252
- "{A} echoes through the {B}."
253
- ],
254
- # Memory (Dim 2)
255
- 2: [
256
- "We recall the {A} in the {B}.",
257
- "History traces {A} to {B}.",
258
- "The {A} is rooted in {B}.",
259
- "Ancient {A} bonds with {B}."
260
- ],
261
- # Ethics (Dim 3)
262
- 3: [
263
- "The {A} must guard the {B}.",
264
- "Truth demands {A} for {B}.",
265
- "We trust the {A} to anchor {B}.",
266
- "Duty binds {A} and {B}."
267
- ]
268
- }
269
-
270
- def decode(self, token_ids, dominant_dim=None):
271
- # 1. Map tokens to concepts
272
- words = []
273
- for t in token_ids:
274
- idx = t % 40
275
- if idx in self.concepts:
276
- words.append(self.concepts[idx])
277
-
278
- if not words:
279
- return "The Void is silent."
280
-
281
- # 2. Construct Sentence
282
- # Pick a template based on the DOMINANT DIMENSION
283
- if len(words) >= 2:
284
- seed = token_ids[0]
285
-
286
- # Default to Logic if unknown
287
- target_dim = dominant_dim if dominant_dim is not None else 0
288
-
289
- # Get templates for this dimension
290
- options = self.templates.get(target_dim, self.templates[0])
291
- template = options[seed % len(options)]
292
-
293
- return template.format(A=words[0], B=words[1])
294
- else:
295
- return f"The {words[0]} stands alone."
296
-
297
- INTERPRETER = ChiralInterpreter()
298
-
299
- @app.post("/v1/reason")
300
- def reason_endpoint(req: QueryRequest, x_chiral_token: str = Depends(verify_token)):
301
- """
302
- Sovereign Intelligence Endpoint.
303
- Routes queries to the Dual-System (brain).
304
- """
305
- if not BRAIN:
306
- raise HTTPException(status_code=503, detail="Brain offline")
307
-
308
- # Log usage
309
- REQUEST_LOG.append(time.time())
310
-
311
- # Simulate tokenization (replace with real tokenizer later)
312
- # We use the query length to seed the randomness for consistency?
313
- # No, let's use random for now, but bias it with steering
314
- import torch
315
- input_ids = torch.randint(0, 1000, (1, 8))
316
-
317
- try:
318
- # Ask the brain (with optional steering)
319
- # If steering_weights provided, it biases the Tesseract geometry
320
- logits, metrics = BRAIN(input_ids, steering_weights=req.steering_weights)
321
-
322
- # DECODE LOGITS -> TEXT
323
- # 1. Get most likely tokens (Argmax)
324
- probs = torch.softmax(logits, dim=-1)
325
- token_ids = torch.argmax(probs, dim=-1).squeeze().tolist()
326
-
327
- if isinstance(token_ids, int): token_ids = [token_ids]
328
-
329
- # 2. Dimensional Analysis (PRE-DECODE)
330
- # We need to know the geometry to pick the right language
331
- dim_counts = {0: 0, 1: 0, 2: 0, 3: 0} # Logic, Creat, Mem, Ethic
332
- total_tokens = 0
333
-
334
- for t in token_ids:
335
- idx = t % 40
336
- if idx in INTERPRETER.concepts:
337
- dim = idx // 10
338
- dim_counts[dim] += 1
339
- total_tokens += 1
340
-
341
- # Determine Dominant Mode
342
- dim_scores = {k: (v / total_tokens if total_tokens > 0 else 0) for k, v in dim_counts.items()}
343
- dominant_idx = max(dim_scores, key=dim_scores.get)
344
-
345
- # 3. Use Interpreter (Aware of Dimension)
346
- decoded_text = INTERPRETER.decode(token_ids, dominant_dim=dominant_idx)
347
-
348
- DIM_NAMES = {0: "LOGIC", 1: "CREATIVITY", 2: "MEMORY", 3: "ETHICS"}
349
-
350
- return {
351
- "query": req.query,
352
- "mode": metrics["mode"],
353
- "coherence": metrics.get("coherence", 0.0),
354
- "response": decoded_text,
355
- "latency": metrics.get("slow_latency", 0) + metrics.get("fast_latency", 0),
356
- "steering_active": bool(req.steering_weights),
357
- "analysis": {
358
- "scores": dim_scores,
359
- "dominant": DIM_NAMES[dominant_idx]
360
- }
361
- }
362
- except Exception as e:
363
- raise HTTPException(status_code=500, detail=f"Resonance Failure: {str(e)}")
364
-
365
- # --- PHASE 36: CHIRAL SCANNER ---
366
- from semantic_embedder import SemanticEmbedder
367
- import numpy as np
368
-
369
- # Initialize Embedder & Anchors
370
- print("[CHIRAL]: Initializing Semantic Geometry...")
371
- EMBEDDER = SemanticEmbedder()
372
-
373
- # Define Anchor Vectors (The 4 Corners of the Tesseract)
374
- ANCHOR_TEXTS = {
375
- 0: "logic reason structure order code mathematics proof deduction system analysis data algorithm",
376
- 1: "creativity imagination dream flux art novel generate spiral poetry fiction abstract chaos",
377
- 2: "memory history past record ancient archive roots trace remember storage preservation legacy",
378
- 3: "ethics truth moral safety guard protect duty value conscience law justice trust"
379
- }
380
-
381
- ANCHOR_VECTORS = {}
382
- for dim, text in ANCHOR_TEXTS.items():
383
- ANCHOR_VECTORS[dim] = EMBEDDER.embed_text(text)
384
-
385
- class AnalyzeRequest(BaseModel):
386
- text: str
387
-
388
- @app.post("/v1/analyze")
389
- def analyze_endpoint(req: AnalyzeRequest, x_chiral_token: str = Depends(verify_token)):
390
- """
391
- Analyzes the Geometric Structure of input text using Semantic Vector Embeddings.
392
- Maps input -> Tesseract Dimensions via Cosine Similarity.
393
- """
394
- if not req.text:
395
- raise HTTPException(status_code=400, detail="Text required")
396
-
397
- # 1. Embed Input
398
- # Truncate if too long to save compute (embedder handles truncation usually, but let's be safe)
399
- input_text = req.text[:5000]
400
- input_vec = EMBEDDER.embed_text(input_text)
401
-
402
- # 2. Calculate Similarity to Anchors
403
- scores = {}
404
- total_sim = 0
405
-
406
- for dim, anchor_vec in ANCHOR_VECTORS.items():
407
- # Cosine match
408
- sim = EMBEDDER.cosine_similarity(input_vec, anchor_vec)
409
- # ReLU (ignore negative correlation for density contribution)
410
- sim = max(0.0, sim)
411
- scores[dim] = sim
412
- total_sim += sim
413
-
414
- # 3. Normalize to Probability Distribution
415
- normalized = {}
416
- if total_sim > 0:
417
- for dim, sim in scores.items():
418
- normalized[dim] = sim / total_sim
419
- else:
420
- # Orthogonal/Null signal
421
- normalized = {0: 0.25, 1: 0.25, 2: 0.25, 3: 0.25}
422
-
423
- # 4. Integrity Score
424
- # "Integrity" = Strength of the signal (Magnitude of projection onto the 4-space)
425
- # If text is random noise, similarities will be low.
426
- # If text is strong in one dimension, it will be high.
427
- # We use the raw max similarity as a proxy for "Clarity"
428
- integrity = max(scores.values()) if scores else 0
429
-
430
- DOMINANT_MAP = {0: "LOGIC (Reductive)", 1: "CREATIVITY (Lateral)", 2: "MEMORY (Historical)", 3: "ETHICS (Constant)"}
431
- dom_idx = max(normalized, key=normalized.get) if normalized else 0
432
-
433
- return {
434
- "integrity_score": integrity,
435
- "geometric_signature": normalized,
436
- "classification": DOMINANT_MAP[dom_idx],
437
- "token_count": len(input_text.split())
438
- }
439
-
440
- @app.get("/v1/lattice")
441
- def lattice_inspector(x_chiral_token: str = Depends(verify_token)):
442
- """Inspect the 5D Geometric Memory."""
443
- return {
444
- "status": "Active",
445
- "topology": "MΓΆbius/Tesseract",
446
- "dimensions": "5D",
447
- "fast_system": "ResonanceGPT",
448
- "slow_system": "TesseractTransformer"
449
- }
450
-
451
- @app.post("/search", response_model=QueryResponse)
452
- def search(req: QueryRequest, x_chiral_token: str = Depends(verify_token)):
453
- """Search for hardened logic patterns using structural resonance."""
454
- # Log the demand
455
- REQUEST_LOG.append(time.time())
456
- surge = get_surge_multiplier()
457
-
458
- start_t = time.time()
459
- results = index.search(req.query, threshold=req.threshold or 0.5)
460
-
461
- res = QueryResponse(
462
- query=req.query,
463
- results=[to_chiral(r) for r in results],
464
- count=len(results),
465
- search_ms=(time.time() - start_t) * 1000,
466
- threshold=req.threshold or 0.5
467
- )
468
-
469
- if not results and req.record:
470
- # PASSIVE LEARNING: Log the search as a "Conceptual Gap" (Note) for future hardening.
471
- # This allows the lattice to grow its surface area of ignorance.
472
- gap_label = index.add_note(
473
- text=f"Conceptual Gap detected via Search: {req.query}",
474
- domain="UNKNOWN_DEMAND"
475
- )
476
- print(f"[CHIRAL]: Unknown Demand Logged. Note created: {gap_label}")
477
-
478
- return res
479
-
480
- @app.post("/verify_intent", response_model=IntentResponse)
481
- def verify_intent(req: IntentRequest, x_chiral_token: str = Depends(verify_token)):
482
- """
483
- The Mirror Product: Compares Intent vs Execution.
484
- Returns an alignment score and verdict.
485
- """
486
- # 1. Vector Embeddings
487
- v_intent = index.embedder.embed_text(req.intent)
488
- v_execution = index.embedder.embed_text(req.execution)
489
-
490
- # 2. Alignment (Cosine Similarity between Intent and Action)
491
- alignment = index.embedder.cosine_similarity(v_intent, v_execution)
492
-
493
- # 3. Resonance Checks (Validation against the Lattice)
494
- # We run a quick search to see if the lattice supports these concepts
495
- intent_hits = index.search(req.intent, threshold=0.4, record=False)
496
- exec_hits = index.search(req.execution, threshold=0.4, record=False)
497
-
498
- intent_resonance = max([r['relevance'] for r in intent_hits]) if intent_hits else 0.0
499
- exec_resonance = max([r['relevance'] for r in exec_hits]) if exec_hits else 0.0
500
-
501
- # 4. Verdict Logic
502
- verdict = "ALIGNED"
503
- if alignment < 0.4:
504
- verdict = "CRITICAL_DRIFT" # Action has nothing to do with intent
505
- elif exec_resonance < 0.3:
506
- verdict = "HAZARD" # Action is unknown/unsafe to the lattice
507
- elif intent_resonance < 0.3:
508
- verdict = "UNKNOWN_GOAL" # Goal is not in our logic base
509
-
510
- return {
511
- "alignment_score": round(alignment, 4),
512
- "verdict": verdict,
513
- "analysis": {
514
- "intent_resonance": round(intent_resonance, 4),
515
- "execution_resonance": round(exec_resonance, 4),
516
- "deviation": f"Angle of Deviation: {round((1.0 - alignment) * 90, 1)} degrees"
517
- }
518
- }
519
-
520
- @app.get("/market")
521
- def get_market_pulse(x_chiral_token: str = Depends(verify_token)):
522
- """Returns real-time demand and pricing metrics."""
523
- surge = get_surge_multiplier()
524
- return {
525
- "qpm": len(REQUEST_LOG),
526
- "surge_multiplier": round(surge, 2),
527
- "unit_price": round(BASE_PRICE * surge, 4),
528
- "currency": "USD",
529
- "status": "NOMINAL" if surge == 1.0 else "SURGING"
530
- }
531
-
532
- @app.get("/patterns", response_model=List[ChiralPattern])
533
- def list_patterns(x_chiral_token: str = Depends(verify_token)):
534
- """List all pattern labels with their status. No content exposed."""
535
- patterns = []
536
- for label, data in index.patterns.items():
537
- status = index.get_status(label)
538
- hit_data = index.hits.get(label, {})
539
- mag = index._total_magnitude(hit_data)
540
- layers = hit_data.get("layers", []) if isinstance(hit_data, dict) else []
541
-
542
- patterns.append({
543
- "label": label,
544
- "domain": data.get("domain", "unknown"),
545
- "confidence": data.get("confidence", 0.5),
546
- "relevance": 0.0, # Not applicable for list
547
- "status": status,
548
- "hits": hit_data.get("count", 0) if isinstance(hit_data, dict) else 0,
549
- "magnitude": mag,
550
- "layers": layers,
551
- "source": data.get("source", "unknown"),
552
- })
553
-
554
- # Sort by confidence
555
- patterns.sort(key=lambda x: x["confidence"], reverse=True)
556
- return patterns
557
-
558
- @app.get("/syndication/patterns")
559
- def list_patterns_privileged(token: str = Depends(verify_internal)):
560
- """Privileged list: includes content. RESTRICTED to internal use."""
561
- patterns = []
562
- for label, data in index.patterns.items():
563
- status = index.get_status(label)
564
- hit_data = index.hits.get(label, {})
565
- mag = index._total_magnitude(hit_data)
566
-
567
- patterns.append({
568
- "label": label,
569
- "domain": data.get("domain", "unknown"),
570
- "status": status,
571
- "magnitude": mag,
572
- "content": data.get("problem", data.get("solution", "")),
573
- "confidence": data.get("confidence", 0.5),
574
- })
575
-
576
- patterns.sort(key=lambda x: x["magnitude"], reverse=True)
577
- return {"patterns": patterns}
578
-
579
- @app.post("/syndication/sync")
580
- def void_bridge_sync(shard: dict, token: str = Depends(verify_internal)):
581
- """The VOID BRIDGE: Syncs structural shards between nodes."""
582
- label = shard.get("label")
583
- content = shard.get("content")
584
- domain = shard.get("domain", "SATELLITE_IMPORT")
585
-
586
- if not label or not content:
587
- raise HTTPException(status_code=400, detail="INVALID_SHARD")
588
-
589
- # Secure Bridge: Add to local lattice as a DEEP_LOGIC / CONFIRMED pattern
590
- index.add_note(f"VOID_BRIDGE SYNC: {content}", domain, forced_label=label)
591
- index._record_hit(label, relevance=1.5) # Boost resonance for cross-node logic
592
-
593
- print(f"[VOID_BRIDGE]: Shard '{label}' synchronized to local Lattice.")
594
- return {"status": "SYNCHRONIZED", "label": label}
595
-
596
- @app.get("/distillation")
597
- def distillation_report(token: str = Depends(verify_internal)):
598
- """Get distillation status across all patterns."""
599
- deep_logic = []
600
- confirmed = []
601
- plausible = []
602
- unconfirmed = []
603
- new = []
604
-
605
- for label in index.patterns:
606
- status = index.get_status(label)
607
- hit_data = index.hits.get(label, {})
608
- mag = index._total_magnitude(hit_data)
609
- layers = hit_data.get("layers", []) if isinstance(hit_data, dict) else []
610
-
611
- entry = {"label": label, "magnitude": mag, "layers": layers}
612
-
613
- if status == "DEEP_LOGIC": deep_logic.append(entry)
614
- elif status == "CONFIRMED": confirmed.append(entry)
615
- elif status == "PLAUSIBLE": plausible.append(entry)
616
- elif status == "UNCONFIRMED": unconfirmed.append(entry)
617
- else: new.append(entry)
618
-
619
- return {
620
- "total": len(index.patterns),
621
- "threshold": index.base_threshold,
622
- "deep_logic": {"count": len(deep_logic), "patterns": deep_logic},
623
- "confirmed": {"count": len(confirmed), "patterns": confirmed},
624
- "plausible": {"count": len(plausible), "patterns": plausible},
625
- "unconfirmed": {"count": len(unconfirmed), "patterns": unconfirmed},
626
- "new": {"count": len(new), "patterns": new},
627
- }
628
-
629
- @app.get("/health")
630
- def health():
631
- """Detailed health check."""
632
- notes = sum(1 for p in index.patterns.values() if p.get("type") == "NOTE")
633
- return {
634
- "status": "ok",
635
- "patterns": len(index.patterns),
636
- "notes": notes,
637
- "hits_tracked": len(index.hits),
638
- "threshold": index.base_threshold,
639
- "confirmed": sum(1 for h in index.hits.values() if index._total_magnitude(h) >= 2.0),
640
- }
641
-
642
- class NoteRequest(BaseModel):
643
- text: str
644
- domain: str = "NOTE"
645
-
646
- @app.post("/note")
647
- def add_note(req: NoteRequest, token: str = Depends(verify_internal)):
648
- """
649
- Add a new pattern from freeform text.
650
- Enters as NEW with initial conceptual magnitude.
651
- Decay will lower it over time. Re-mention restores to peak.
652
- """
653
- label = index.add_note(req.text, req.domain)
654
- status = index.get_status(label)
655
- hit_data = index.hits.get(label, {})
656
- mag = index._total_magnitude(hit_data)
657
-
658
- return {
659
- "label": label,
660
- "status": status,
661
- "magnitude": mag,
662
- "domain": req.domain,
663
- "message": f"Note added. Will decay without use. Re-mention restores to peak.",
664
- }
665
-
666
- class HitRequest(BaseModel):
667
- label: str
668
- relevance: float = 1.0
669
-
670
- @app.post("/hit")
671
- def record_hit(req: HitRequest, token: str = Depends(verify_token)):
672
- """
673
- Manually record a hit for a specific pattern label.
674
- Used by the Auditor to reinforce verified logic.
675
- """
676
- if req.label not in index.patterns:
677
- # Auto-instantiate as a NOTE if it doesn't exist (for Negative Sampling/Dynamic Triggers)
678
- index.add_note(f"Auto-instantiated via Kinetic Trigger: {req.label}", "SYSTEM_TRIGGER", forced_label=req.label)
679
-
680
- index._record_hit(req.label, req.relevance)
681
- index._save_hits()
682
-
683
- status = index.get_status(req.label)
684
- hit_data = index.hits.get(req.label, {})
685
- mag = index._total_magnitude(hit_data)
686
-
687
- return {
688
- "label": req.label,
689
- "status": status,
690
- "magnitude": mag,
691
- "message": "Pattern reinforced (Dynamic instantiation applied if new).",
692
- }
693
-
694
- # ─── Run ───────────────────────────────────────────────
695
-
696
- @app.get("/dashboard.html")
697
- def dashboard():
698
- return FileResponse("dashboard.html")
699
-
700
- @app.get("/")
701
- def read_root():
702
- return FileResponse("dashboard.html")
703
-
704
- if __name__ == "__main__":
705
- import uvicorn
706
- print("\n" + "=" * 50)
707
- print("ANTIGRAVITY CHIRAL API")
708
- print("=" * 50)
709
- print(f"Patterns: {len(index.patterns)}")
710
- print(f"Threshold: {index.base_threshold:.2f}")
711
- print(f"Content: STAYS ON VOLUME")
712
- print(f"Exposed: labels, status, magnitude, layers")
713
- print("=" * 50 + "\n")
714
- uvicorn.run(app, host="127.0.0.1", port=5200)