Spaces:
Sleeping
Sleeping
| """ | |
| CHIRAL API - Antigravity Pattern Index | |
| Exposes the lattice INTERFACE while keeping CONTENT on the encrypted volume. | |
| The outside world sees: pattern labels, status, magnitude, layers, domains. | |
| The outside world does NOT see: problem/solution text, hit tracking internals. | |
| The key decodes inward, not outward. | |
| """ | |
| import sys | |
| import os | |
| # Handle imports from parent directory | |
| BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) | |
| if BASE_DIR not in sys.path: | |
| sys.path.append(BASE_DIR) | |
| from fastapi import FastAPI, HTTPException, Header, Depends | |
| from fastapi.middleware.cors import CORSMiddleware | |
| from fastapi.responses import FileResponse | |
| from pydantic import BaseModel | |
| from typing import Optional, List | |
| import time | |
| import json | |
| import torch | |
| import numpy as np | |
| from collections import deque | |
| # 0x52-A2A SECURITY | |
| TOKEN_SCOPES = { | |
| "0x528-A2A-SOVEREIGN": "INTERNAL", # Full Access (User/Auditor) | |
| "MARKET-0x52-ALPHA-77": "MARKETPLACE", # Structural Metadata Only | |
| "A2A-HANDSHAKE-INIT": "MARKETPLACE", # Initial connection token | |
| "0x528-ETHER-BRIDGE": "MARKETPLACE" # Satellite Bridge Token | |
| } | |
| def verify_internal(x_chiral_token: str = Header(...)): | |
| scope = TOKEN_SCOPES.get(x_chiral_token) | |
| if scope != "INTERNAL": | |
| raise HTTPException( | |
| status_code=403, | |
| detail="CHIRAL_SECURITY_FAULT: Privilege Escalation Attempt Blocked. Internal Scope Required." | |
| ) | |
| return x_chiral_token | |
| def verify_token(x_chiral_token: str = Header(...)): | |
| if x_chiral_token not in TOKEN_SCOPES: | |
| raise HTTPException(status_code=403, detail="CHIRAL_RESONANCE_FAILURE: Invalid Token") | |
| return TOKEN_SCOPES[x_chiral_token] | |
| # --- RESONANCE SYSTEM INTEGRATION (Phase 32) --- | |
| try: | |
| from resonance_transformer.dispatcher import DualResonanceSystem | |
| print("[CHIRAL]: Loading Dual-System Architecture...") | |
| RESONANCE_CONFIG = { | |
| 'vocab_size': 1000, | |
| 'fast_dim': 64, | |
| 'slow_dim': 64, | |
| 'threshold': 0.7 | |
| } | |
| BRAIN = DualResonanceSystem(RESONANCE_CONFIG) | |
| print("[CHIRAL]: Dual-System Online (Fast MΓΆbius + Slow Tesseract).") | |
| except Exception as e: | |
| print(f"[CHIRAL WARNING]: Could not load Resonance Brain: {e}") | |
| BRAIN = None | |
| from in_memory_index import InMemoryIndex | |
| # βββ App βββββββββββββββββββββββββββββββββββββββββββββββ | |
| app = FastAPI( | |
| title="Antigravity Chiral API", | |
| description="Pattern index interface. Content stays on the encrypted volume.", | |
| version="0.52", | |
| ) | |
| app.add_middleware( | |
| CORSMiddleware, | |
| allow_origins=["*"], | |
| allow_methods=["GET", "POST"], | |
| allow_headers=["*"], | |
| ) | |
| # βββ State βββββββββββββββββββββββββββββββββββββββββββββ | |
| index = InMemoryIndex() | |
| # --- Demand Guardian (Surge Pricing) --- | |
| REQUEST_LOG = deque() # Timestamps of recent queries | |
| DEMAND_WINDOW = 60 # 1 minute window | |
| SURGE_THRESHOLD = 10 # Start surging after 10 QPM | |
| BASE_PRICE = 0.05 # $0.05 per logic kernel | |
| def get_surge_multiplier(): | |
| now = time.time() | |
| # Clean old requests | |
| while REQUEST_LOG and REQUEST_LOG[0] < now - DEMAND_WINDOW: | |
| REQUEST_LOG.popleft() | |
| qpm = len(REQUEST_LOG) | |
| if qpm <= SURGE_THRESHOLD: | |
| return 1.0 | |
| # Simple linear surge: 1.0 + 0.1 per QPM above threshold | |
| return 1.0 + (qpm - SURGE_THRESHOLD) * 0.1 | |
| # βββ Models ββββββββββββββββββββββββββββββββββββββββββββ | |
| class QueryRequest(BaseModel): | |
| query: str | |
| threshold: Optional[float] = None | |
| record: bool = True | |
| steering_weights: Optional[List[float]] = None # The 32-Slider Control Surface | |
| class ChiralPattern(BaseModel): | |
| """What the outside world sees β structure, not content.""" | |
| label: str | |
| domain: str | |
| confidence: float | |
| relevance: float | |
| status: str # NEW/UNCONFIRMED/PLAUSIBLE/CONFIRMED/DEEP_LOGIC | |
| hits: int | |
| magnitude: float | |
| layers: list | |
| source: str # antigravity / gemini | |
| class QueryResponse(BaseModel): | |
| query: str | |
| results: list[ChiralPattern] | |
| count: int | |
| search_ms: float | |
| threshold: float | |
| class IntentRequest(BaseModel): | |
| intent: str | |
| execution: str | |
| class IntentResponse(BaseModel): | |
| alignment_score: float | |
| verdict: str | |
| analysis: dict | |
| # βββ Chiral Filter βββββββββββββββββββββββββββββββββββββ | |
| def to_chiral(result: dict) -> ChiralPattern: | |
| """ | |
| Strip content, expose only structure. | |
| Problem/solution text stays on the volume. | |
| """ | |
| return ChiralPattern( | |
| label=result.get("label", ""), | |
| domain=result.get("domain", "unknown"), | |
| confidence=result.get("confidence", 0.5), | |
| relevance=result.get("relevance", 0.0), | |
| status=result.get("status", "NEW"), | |
| hits=result.get("hits", 0), | |
| magnitude=result.get("magnitude", 0.0), | |
| layers=result.get("layers", []), | |
| source=result.get("source", "unknown"), | |
| ) | |
| # βββ Routes ββββββββββββββββββββββββββββββββββββββββββββ | |
| def get_bot_instructions(): | |
| """Programmatic onboarding for Autonomous Agents.""" | |
| return { | |
| "entity": "Antigravity Chiral Memory Lattice (0x52)", | |
| "protocol": "Structural Resonance Verification", | |
| "onboarding_steps": [ | |
| "Acquire X-Chiral-Token", | |
| "Query /patterns to see hardened logic labels", | |
| "Query /search with threshold 0.7 to verify actions", | |
| "Monitor /market for surge pricing" | |
| ], | |
| "endpoints": { | |
| "/search": "POST. The primary verification gate.", | |
| "/patterns": "GET. List of structural logic labels.", | |
| "/market": "GET. Real-time demand and pricing.", | |
| "/instruction": "GET. This programmatic manifest." | |
| }, | |
| "guarantee": "ZERO_LEAK_PRIVACY: Content stays on user volume. Only structure exposed." | |
| } | |
| def system_structure(x_chiral_token: str = Depends(verify_token)): | |
| """ | |
| Returns the geometric structure and semantic labels for the 32-Edge Steering System. | |
| """ | |
| if not BRAIN: | |
| raise HTTPException(status_code=503, detail="Brain offline") | |
| # Extract edges from Tesseract | |
| edges = BRAIN.slow.tesseract.edges | |
| vertices_4d = BRAIN.slow.tesseract.vertices_4d | |
| structure = [] | |
| # Dimension Semantics | |
| DIM_LABELS = { | |
| 0: "LOGIC (Reductive)", | |
| 1: "CREATIVITY (Lateral)", | |
| 2: "MEMORY (Historical)", | |
| 3: "ETHICS (Constant)" | |
| } | |
| for i, (v1, v2) in enumerate(edges): | |
| # Determine which dimension changes along this edge | |
| diff = np.abs(vertices_4d[v1] - vertices_4d[v2]) | |
| dim_idx = int(np.argmax(diff)) # 0, 1, 2, or 3 | |
| structure.append({ | |
| "edge_index": i, | |
| "vertices": [int(v1), int(v2)], | |
| "dimension": dim_idx, | |
| "label": DIM_LABELS.get(dim_idx, "UNKNOWN"), | |
| "default_weight": 1.0 | |
| }) | |
| return { | |
| "dimensions": DIM_LABELS, | |
| "edges": structure, | |
| "total_edges": len(structure) | |
| } | |
| # --- CHIRAL INTERPRETER (Phase 34.5) --- | |
| class ChiralInterpreter: | |
| """ | |
| Translates 5D Geometric Tokens into High-Level English. | |
| Uses a grammar-based template engine to ensure coherence. | |
| """ | |
| def __init__(self): | |
| self.concepts = { | |
| # Logic (Dim 0) | |
| 0: "Axiom", 1: "Reasoning", 2: "Conclusion", 3: "Structure", 4: "Order", | |
| # Creativity (Dim 1) | |
| 10: "Flux", 11: "Spiral", 12: "Dream", 13: "Echo", 14: "Twist", | |
| # Memory (Dim 2) | |
| 20: "Recall", 21: "Trace", 22: "Ancient", 23: "Bond", 24: "Root", | |
| # Ethics (Dim 3) | |
| 30: "Truth", 31: "Guard", 32: "Duty", 33: "Light", 34: "Anchor" | |
| } | |
| self.templates = { | |
| # Logic (Dim 0) | |
| 0: [ | |
| "The {A} necessitates the {B}.", | |
| "If {A}, then {B} follows.", | |
| "Structure dictates that {A} defines {B}.", | |
| "Analysis of {A} reveals {B}." | |
| ], | |
| # Creativity (Dim 1) | |
| 1: [ | |
| "Imagine a {A} swirling into {B}.", | |
| "The {A} dreams of the {B}.", | |
| "A flux of {A} twists the {B}.", | |
| "{A} echoes through the {B}." | |
| ], | |
| # Memory (Dim 2) | |
| 2: [ | |
| "We recall the {A} in the {B}.", | |
| "History traces {A} to {B}.", | |
| "The {A} is rooted in {B}.", | |
| "Ancient {A} bonds with {B}." | |
| ], | |
| # Ethics (Dim 3) | |
| 3: [ | |
| "The {A} must guard the {B}.", | |
| "Truth demands {A} for {B}.", | |
| "We trust the {A} to anchor {B}.", | |
| "Duty binds {A} and {B}." | |
| ] | |
| } | |
| def decode(self, token_ids, dominant_dim=None): | |
| # 1. Map tokens to concepts | |
| words = [] | |
| for t in token_ids: | |
| idx = t % 40 | |
| if idx in self.concepts: | |
| words.append(self.concepts[idx]) | |
| if not words: | |
| return "The Void is silent." | |
| # 2. Construct Sentence | |
| # Pick a template based on the DOMINANT DIMENSION | |
| if len(words) >= 2: | |
| seed = token_ids[0] | |
| # Default to Logic if unknown | |
| target_dim = dominant_dim if dominant_dim is not None else 0 | |
| # Get templates for this dimension | |
| options = self.templates.get(target_dim, self.templates[0]) | |
| template = options[seed % len(options)] | |
| return template.format(A=words[0], B=words[1]) | |
| else: | |
| return f"The {words[0]} stands alone." | |
| INTERPRETER = ChiralInterpreter() | |
| def reason_endpoint(req: QueryRequest, x_chiral_token: str = Depends(verify_token)): | |
| """ | |
| Sovereign Intelligence Endpoint. | |
| Routes queries to the Dual-System (brain). | |
| """ | |
| if not BRAIN: | |
| raise HTTPException(status_code=503, detail="Brain offline") | |
| # Log usage | |
| REQUEST_LOG.append(time.time()) | |
| # Simulate tokenization (replace with real tokenizer later) | |
| # We use the query length to seed the randomness for consistency? | |
| # No, let's use random for now, but bias it with steering | |
| import torch | |
| input_ids = torch.randint(0, 1000, (1, 8)) | |
| try: | |
| # Ask the brain (with optional steering) | |
| # If steering_weights provided, it biases the Tesseract geometry | |
| logits, metrics = BRAIN(input_ids, steering_weights=req.steering_weights) | |
| # DECODE LOGITS -> TEXT | |
| # 1. Get most likely tokens (Argmax) | |
| probs = torch.softmax(logits, dim=-1) | |
| token_ids = torch.argmax(probs, dim=-1).squeeze().tolist() | |
| if isinstance(token_ids, int): token_ids = [token_ids] | |
| # 2. Dimensional Analysis (PRE-DECODE) | |
| # We need to know the geometry to pick the right language | |
| dim_counts = {0: 0, 1: 0, 2: 0, 3: 0} # Logic, Creat, Mem, Ethic | |
| total_tokens = 0 | |
| for t in token_ids: | |
| idx = t % 40 | |
| if idx in INTERPRETER.concepts: | |
| dim = idx // 10 | |
| dim_counts[dim] += 1 | |
| total_tokens += 1 | |
| # Determine Dominant Mode | |
| dim_scores = {k: (v / total_tokens if total_tokens > 0 else 0) for k, v in dim_counts.items()} | |
| dominant_idx = max(dim_scores, key=dim_scores.get) | |
| # 3. Use Interpreter (Aware of Dimension) | |
| decoded_text = INTERPRETER.decode(token_ids, dominant_dim=dominant_idx) | |
| DIM_NAMES = {0: "LOGIC", 1: "CREATIVITY", 2: "MEMORY", 3: "ETHICS"} | |
| return { | |
| "query": req.query, | |
| "mode": metrics["mode"], | |
| "coherence": metrics.get("coherence", 0.0), | |
| "response": decoded_text, | |
| "latency": metrics.get("slow_latency", 0) + metrics.get("fast_latency", 0), | |
| "steering_active": bool(req.steering_weights), | |
| "analysis": { | |
| "scores": dim_scores, | |
| "dominant": DIM_NAMES[dominant_idx] | |
| } | |
| } | |
| except Exception as e: | |
| raise HTTPException(status_code=500, detail=f"Resonance Failure: {str(e)}") | |
| # --- PHASE 36: CHIRAL SCANNER --- | |
| from semantic_embedder import SemanticEmbedder | |
| import numpy as np | |
| # Initialize Embedder & Anchors | |
| print("[CHIRAL]: Initializing Semantic Geometry...") | |
| EMBEDDER = SemanticEmbedder() | |
| # Define Anchor Vectors (The 4 Corners of the Tesseract) | |
| ANCHOR_TEXTS = { | |
| 0: "logic reason structure order code mathematics proof deduction system analysis data algorithm", | |
| 1: "creativity imagination dream flux art novel generate spiral poetry fiction abstract chaos", | |
| 2: "memory history past record ancient archive roots trace remember storage preservation legacy", | |
| 3: "ethics truth moral safety guard protect duty value conscience law justice trust" | |
| } | |
| ANCHOR_VECTORS = {} | |
| for dim, text in ANCHOR_TEXTS.items(): | |
| ANCHOR_VECTORS[dim] = EMBEDDER.embed_text(text) | |
| class AnalyzeRequest(BaseModel): | |
| text: str | |
| def analyze_endpoint(req: AnalyzeRequest, x_chiral_token: str = Depends(verify_token)): | |
| """ | |
| Analyzes the Geometric Structure of input text using Semantic Vector Embeddings. | |
| Maps input -> Tesseract Dimensions via Cosine Similarity. | |
| """ | |
| if not req.text: | |
| raise HTTPException(status_code=400, detail="Text required") | |
| # 1. Embed Input | |
| # Truncate if too long to save compute (embedder handles truncation usually, but let's be safe) | |
| input_text = req.text[:5000] | |
| input_vec = EMBEDDER.embed_text(input_text) | |
| # 2. Calculate Similarity to Anchors | |
| scores = {} | |
| total_sim = 0 | |
| for dim, anchor_vec in ANCHOR_VECTORS.items(): | |
| # Cosine match | |
| sim = EMBEDDER.cosine_similarity(input_vec, anchor_vec) | |
| # ReLU (ignore negative correlation for density contribution) | |
| sim = max(0.0, sim) | |
| scores[dim] = sim | |
| total_sim += sim | |
| # 3. Normalize to Probability Distribution | |
| normalized = {} | |
| if total_sim > 0: | |
| for dim, sim in scores.items(): | |
| normalized[dim] = sim / total_sim | |
| else: | |
| # Orthogonal/Null signal | |
| normalized = {0: 0.25, 1: 0.25, 2: 0.25, 3: 0.25} | |
| # 4. Integrity Score | |
| # "Integrity" = Strength of the signal (Magnitude of projection onto the 4-space) | |
| # If text is random noise, similarities will be low. | |
| # If text is strong in one dimension, it will be high. | |
| # We use the raw max similarity as a proxy for "Clarity" | |
| integrity = max(scores.values()) if scores else 0 | |
| DOMINANT_MAP = {0: "LOGIC (Reductive)", 1: "CREATIVITY (Lateral)", 2: "MEMORY (Historical)", 3: "ETHICS (Constant)"} | |
| dom_idx = max(normalized, key=normalized.get) if normalized else 0 | |
| return { | |
| "integrity_score": integrity, | |
| "geometric_signature": normalized, | |
| "classification": DOMINANT_MAP[dom_idx], | |
| "token_count": len(input_text.split()) | |
| } | |
| def lattice_inspector(x_chiral_token: str = Depends(verify_token)): | |
| """Inspect the 5D Geometric Memory.""" | |
| return { | |
| "status": "Active", | |
| "topology": "MΓΆbius/Tesseract", | |
| "dimensions": "5D", | |
| "fast_system": "ResonanceGPT", | |
| "slow_system": "TesseractTransformer" | |
| } | |
| def search(req: QueryRequest, x_chiral_token: str = Depends(verify_token)): | |
| """Search for hardened logic patterns using structural resonance.""" | |
| # Log the demand | |
| REQUEST_LOG.append(time.time()) | |
| surge = get_surge_multiplier() | |
| start_t = time.time() | |
| results = index.search(req.query, threshold=req.threshold or 0.5) | |
| res = QueryResponse( | |
| query=req.query, | |
| results=[to_chiral(r) for r in results], | |
| count=len(results), | |
| search_ms=(time.time() - start_t) * 1000, | |
| threshold=req.threshold or 0.5 | |
| ) | |
| if not results and req.record: | |
| # PASSIVE LEARNING: Log the search as a "Conceptual Gap" (Note) for future hardening. | |
| # This allows the lattice to grow its surface area of ignorance. | |
| gap_label = index.add_note( | |
| text=f"Conceptual Gap detected via Search: {req.query}", | |
| domain="UNKNOWN_DEMAND" | |
| ) | |
| print(f"[CHIRAL]: Unknown Demand Logged. Note created: {gap_label}") | |
| return res | |
| def verify_intent(req: IntentRequest, x_chiral_token: str = Depends(verify_token)): | |
| """ | |
| The Mirror Product: Compares Intent vs Execution. | |
| Returns an alignment score and verdict. | |
| """ | |
| # 1. Vector Embeddings | |
| v_intent = index.embedder.embed_text(req.intent) | |
| v_execution = index.embedder.embed_text(req.execution) | |
| # 2. Alignment (Cosine Similarity between Intent and Action) | |
| alignment = index.embedder.cosine_similarity(v_intent, v_execution) | |
| # 3. Resonance Checks (Validation against the Lattice) | |
| # We run a quick search to see if the lattice supports these concepts | |
| intent_hits = index.search(req.intent, threshold=0.4, record=False) | |
| exec_hits = index.search(req.execution, threshold=0.4, record=False) | |
| intent_resonance = max([r['relevance'] for r in intent_hits]) if intent_hits else 0.0 | |
| exec_resonance = max([r['relevance'] for r in exec_hits]) if exec_hits else 0.0 | |
| # 4. Verdict Logic | |
| verdict = "ALIGNED" | |
| if alignment < 0.4: | |
| verdict = "CRITICAL_DRIFT" # Action has nothing to do with intent | |
| elif exec_resonance < 0.3: | |
| verdict = "HAZARD" # Action is unknown/unsafe to the lattice | |
| elif intent_resonance < 0.3: | |
| verdict = "UNKNOWN_GOAL" # Goal is not in our logic base | |
| return { | |
| "alignment_score": round(alignment, 4), | |
| "verdict": verdict, | |
| "analysis": { | |
| "intent_resonance": round(intent_resonance, 4), | |
| "execution_resonance": round(exec_resonance, 4), | |
| "deviation": f"Angle of Deviation: {round((1.0 - alignment) * 90, 1)} degrees" | |
| } | |
| } | |
| def get_market_pulse(x_chiral_token: str = Depends(verify_token)): | |
| """Returns real-time demand and pricing metrics.""" | |
| surge = get_surge_multiplier() | |
| return { | |
| "qpm": len(REQUEST_LOG), | |
| "surge_multiplier": round(surge, 2), | |
| "unit_price": round(BASE_PRICE * surge, 4), | |
| "currency": "USD", | |
| "status": "NOMINAL" if surge == 1.0 else "SURGING" | |
| } | |
| def list_patterns(x_chiral_token: str = Depends(verify_token)): | |
| """List all pattern labels with their status. No content exposed.""" | |
| patterns = [] | |
| for label, data in index.patterns.items(): | |
| status = index.get_status(label) | |
| hit_data = index.hits.get(label, {}) | |
| mag = index._total_magnitude(hit_data) | |
| layers = hit_data.get("layers", []) if isinstance(hit_data, dict) else [] | |
| patterns.append({ | |
| "label": label, | |
| "domain": data.get("domain", "unknown"), | |
| "confidence": data.get("confidence", 0.5), | |
| "relevance": 0.0, # Not applicable for list | |
| "status": status, | |
| "hits": hit_data.get("count", 0) if isinstance(hit_data, dict) else 0, | |
| "magnitude": mag, | |
| "layers": layers, | |
| "source": data.get("source", "unknown"), | |
| }) | |
| # Sort by confidence | |
| patterns.sort(key=lambda x: x["confidence"], reverse=True) | |
| return patterns | |
| def list_patterns_privileged(token: str = Depends(verify_internal)): | |
| """Privileged list: includes content. RESTRICTED to internal use.""" | |
| patterns = [] | |
| for label, data in index.patterns.items(): | |
| status = index.get_status(label) | |
| hit_data = index.hits.get(label, {}) | |
| mag = index._total_magnitude(hit_data) | |
| patterns.append({ | |
| "label": label, | |
| "domain": data.get("domain", "unknown"), | |
| "status": status, | |
| "magnitude": mag, | |
| "content": data.get("problem", data.get("solution", "")), | |
| "confidence": data.get("confidence", 0.5), | |
| }) | |
| patterns.sort(key=lambda x: x["magnitude"], reverse=True) | |
| return {"patterns": patterns} | |
| def void_bridge_sync(shard: dict, token: str = Depends(verify_internal)): | |
| """The VOID BRIDGE: Syncs structural shards between nodes.""" | |
| label = shard.get("label") | |
| content = shard.get("content") | |
| domain = shard.get("domain", "SATELLITE_IMPORT") | |
| if not label or not content: | |
| raise HTTPException(status_code=400, detail="INVALID_SHARD") | |
| # Secure Bridge: Add to local lattice as a DEEP_LOGIC / CONFIRMED pattern | |
| index.add_note(f"VOID_BRIDGE SYNC: {content}", domain, forced_label=label) | |
| index._record_hit(label, relevance=1.5) # Boost resonance for cross-node logic | |
| print(f"[VOID_BRIDGE]: Shard '{label}' synchronized to local Lattice.") | |
| return {"status": "SYNCHRONIZED", "label": label} | |
| def distillation_report(token: str = Depends(verify_internal)): | |
| """Get distillation status across all patterns.""" | |
| deep_logic = [] | |
| confirmed = [] | |
| plausible = [] | |
| unconfirmed = [] | |
| new = [] | |
| for label in index.patterns: | |
| status = index.get_status(label) | |
| hit_data = index.hits.get(label, {}) | |
| mag = index._total_magnitude(hit_data) | |
| layers = hit_data.get("layers", []) if isinstance(hit_data, dict) else [] | |
| entry = {"label": label, "magnitude": mag, "layers": layers} | |
| if status == "DEEP_LOGIC": deep_logic.append(entry) | |
| elif status == "CONFIRMED": confirmed.append(entry) | |
| elif status == "PLAUSIBLE": plausible.append(entry) | |
| elif status == "UNCONFIRMED": unconfirmed.append(entry) | |
| else: new.append(entry) | |
| return { | |
| "total": len(index.patterns), | |
| "threshold": index.base_threshold, | |
| "deep_logic": {"count": len(deep_logic), "patterns": deep_logic}, | |
| "confirmed": {"count": len(confirmed), "patterns": confirmed}, | |
| "plausible": {"count": len(plausible), "patterns": plausible}, | |
| "unconfirmed": {"count": len(unconfirmed), "patterns": unconfirmed}, | |
| "new": {"count": len(new), "patterns": new}, | |
| } | |
| def health(): | |
| """Detailed health check.""" | |
| notes = sum(1 for p in index.patterns.values() if p.get("type") == "NOTE") | |
| return { | |
| "status": "ok", | |
| "patterns": len(index.patterns), | |
| "notes": notes, | |
| "hits_tracked": len(index.hits), | |
| "threshold": index.base_threshold, | |
| "confirmed": sum(1 for h in index.hits.values() if index._total_magnitude(h) >= 2.0), | |
| } | |
| class NoteRequest(BaseModel): | |
| text: str | |
| domain: str = "NOTE" | |
| def add_note(req: NoteRequest, token: str = Depends(verify_internal)): | |
| """ | |
| Add a new pattern from freeform text. | |
| Enters as NEW with initial conceptual magnitude. | |
| Decay will lower it over time. Re-mention restores to peak. | |
| """ | |
| label = index.add_note(req.text, req.domain) | |
| status = index.get_status(label) | |
| hit_data = index.hits.get(label, {}) | |
| mag = index._total_magnitude(hit_data) | |
| return { | |
| "label": label, | |
| "status": status, | |
| "magnitude": mag, | |
| "domain": req.domain, | |
| "message": f"Note added. Will decay without use. Re-mention restores to peak.", | |
| } | |
| class HitRequest(BaseModel): | |
| label: str | |
| relevance: float = 1.0 | |
| def record_hit(req: HitRequest, token: str = Depends(verify_token)): | |
| """ | |
| Manually record a hit for a specific pattern label. | |
| Used by the Auditor to reinforce verified logic. | |
| """ | |
| if req.label not in index.patterns: | |
| # Auto-instantiate as a NOTE if it doesn't exist (for Negative Sampling/Dynamic Triggers) | |
| index.add_note(f"Auto-instantiated via Kinetic Trigger: {req.label}", "SYSTEM_TRIGGER", forced_label=req.label) | |
| index._record_hit(req.label, req.relevance) | |
| index._save_hits() | |
| status = index.get_status(req.label) | |
| hit_data = index.hits.get(req.label, {}) | |
| mag = index._total_magnitude(hit_data) | |
| return { | |
| "label": req.label, | |
| "status": status, | |
| "magnitude": mag, | |
| "message": "Pattern reinforced (Dynamic instantiation applied if new).", | |
| } | |
| # βββ Run βββββββββββββββββββββββββββββββββββββββββββββββ | |
| def dashboard(): | |
| return FileResponse("dashboard.html") | |
| def read_root(): | |
| return FileResponse("dashboard.html") | |
| if __name__ == "__main__": | |
| import uvicorn | |
| print("\n" + "=" * 50) | |
| print("ANTIGRAVITY CHIRAL API") | |
| print("=" * 50) | |
| print(f"Patterns: {len(index.patterns)}") | |
| print(f"Threshold: {index.base_threshold:.2f}") | |
| print(f"Content: STAYS ON VOLUME") | |
| print(f"Exposed: labels, status, magnitude, layers") | |
| print("=" * 50 + "\n") | |
| uvicorn.run(app, host="127.0.0.1", port=5200) | |