File size: 18,273 Bytes
1ceda33 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 |
#!/usr/bin/env python3
"""
AICoevolution Semantic Telemetry (Production)
=============================================
A lightweight tool to measure semantic dynamics in human-AI conversations.
This version is optimized for production use with the AICoevolution Cloud SDK.
Usage:
python semantic_telemetry_prod.py --api-key aic_...
python semantic_telemetry_prod.py --api-key aic_... --hosted-ai
Requirements:
pip install requests
"""
import argparse
import json
import os
import sys
import time
import uuid
from dataclasses import dataclass
from typing import Any, Dict, List, Optional
# =============================================================================
# CONFIGURATION
# =============================================================================
DEFAULT_SDK_URL = "https://sdk.aicoevolution.com"
# ANSI colors for terminal output
class Colors:
HEADER = '\033[95m'
BLUE = '\033[94m'
CYAN = '\033[96m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def _disable_colors():
"""Disable ANSI colors (prints become plain text)."""
for k in list(Colors.__dict__.keys()):
if k.isupper():
setattr(Colors, k, "")
def _init_console_colors(enable: bool) -> None:
"""
Ensure colors render on Windows terminals.
If color support isn't available, fall back to plain text.
"""
if not enable or os.getenv("NO_COLOR") or os.getenv("AIC_NO_COLOR"):
_disable_colors()
return
if os.name == "nt":
try:
import colorama # type: ignore
# Translates ANSI escapes for Windows consoles.
try:
colorama.just_fix_windows_console()
except Exception:
colorama.init()
except Exception:
_disable_colors()
# =============================================================================
# DATA STRUCTURES
# =============================================================================
@dataclass
class TelemetryMetrics:
"""Standardized metrics from SDK response."""
sgi: Optional[float]
velocity: Optional[float]
context_phase: str
context_mass: int
attractor_count: int
context_drift: float
processing_time_ms: int
@dataclass
class CoevolutionIndex:
"""Computed Coevolution Index (CI)."""
coevolution_index: float
tier: str # BASIC | ELEVATED | HIGH
horizontal_score: float
vertical_score: float
# Components
coherence_region_occupancy: float
dyadic_coherence_index: float
context_stability: float
symbolic_entropy: float
path_transformation_density: float
domain_balance_index: float
# =============================================================================
# SDK CLIENT
# =============================================================================
class SemanticTelemetryClient:
"""Client for interacting with the AICoevolution SDK."""
def __init__(self, api_key: str, base_url: str = DEFAULT_SDK_URL):
self.api_key = api_key
self.base_url = base_url.rstrip("/")
self.conversation_id = str(uuid.uuid4())
self.message_count = 0
self.messages = [] # Local history for hosted AI context
# Validate connection on init
try:
import requests
self.session = requests.Session()
if api_key:
self.session.headers.update({"Authorization": f"Bearer {api_key}"})
except ImportError:
print("Error: 'requests' library not found.")
print("Please install it: pip install requests")
sys.exit(1)
def ingest_message(self, role: str, text: str) -> Optional[Dict[str, Any]]:
"""Send a message to the SDK for telemetry analysis."""
url = f"{self.base_url}/v0/ingest"
payload = {
"conversation_id": self.conversation_id,
"role": role,
"text": text,
"timestamp_ms": int(time.time() * 1000)
}
try:
response = self.session.post(url, json=payload, timeout=10)
if response.status_code == 401:
# FastAPI typically returns JSON {"detail": ...}; include body when present.
body = (response.text or "").strip()
if body:
body = body[:800] + ("..." if len(body) > 800 else "")
print(f"\n{Colors.RED}[SDK Error] Invalid API Key :: {body}{Colors.ENDC}")
else:
print(f"\n{Colors.RED}[SDK Error] Invalid API Key.{Colors.ENDC}")
return None
response.raise_for_status()
self.message_count += 1
return response.json()
except Exception as e:
# Surface server-provided details where possible.
resp = getattr(e, "response", None)
if resp is not None:
body = (resp.text or "").strip()
body = body[:800] + ("..." if len(body) > 800 else "")
if body:
print(f"\n{Colors.RED}[SDK Error] Connection failed: {e} :: {body}{Colors.ENDC}")
return None
print(f"\n{Colors.RED}[SDK Error] Connection failed: {e}{Colors.ENDC}")
return None
def hosted_chat(self, user_message: str) -> Optional[Dict[str, Any]]:
"""Send message to hosted AI endpoint (paid tier only)."""
url = f"{self.base_url}/v0/chat"
payload = {
"message": user_message,
"conversation_id": self.conversation_id,
"messages": self.messages[-10:] # Send recent context
}
try:
# Hosted AI can take longer (LLM generation)
response = self.session.post(url, json=payload, timeout=60)
if response.status_code == 401:
body = (response.text or "").strip()
if body:
body = body[:800] + ("..." if len(body) > 800 else "")
print(f"\n{Colors.RED}[SDK Error] Unauthorized :: {body}{Colors.ENDC}")
else:
print(f"\n{Colors.RED}[SDK Error] Unauthorized. Hosted AI requires a paid API key.{Colors.ENDC}")
return None
elif response.status_code == 402:
body = (response.text or "").strip()
if body:
body = body[:800] + ("..." if len(body) > 800 else "")
print(f"\n{Colors.RED}[SDK Error] Payment required :: {body}{Colors.ENDC}")
else:
print(f"\n{Colors.RED}[SDK Error] Payment required. Upgrade to a paid tier for Hosted AI.{Colors.ENDC}")
return None
response.raise_for_status()
self.message_count += 2 # User + Assistant
return response.json()
except Exception as e:
resp = getattr(e, "response", None)
if resp is not None:
body = (resp.text or "").strip()
body = body[:800] + ("..." if len(body) > 800 else "")
if body:
print(f"\n{Colors.RED}[SDK Error] Chat request failed: {e} :: {body}{Colors.ENDC}")
return None
print(f"\n{Colors.RED}[SDK Error] Chat request failed: {e}{Colors.ENDC}")
return None
def extract_metrics(self, response: Dict[str, Any]) -> TelemetryMetrics:
"""Extract standardized metrics from SDK response.
Paper 03 canonical metrics:
- SGI: turn_pair_sgi_latest (or fallback to sgi_latest)
- Velocity: orbital_velocity_latest (turn-pair, ~25-45°)
Fallback: angular_velocity_latest (per-message, ~75-180°)
Turn-pair metrics have lower variance and are the canonical choice.
The SDK now exposes these at top level for easy access.
"""
# Paper 03: Prefer turn-pair SGI when available (now at top level)
sgi = (
response.get("turn_pair_sgi_latest")
or response.get("sgi_latest")
)
# Paper 03: Prefer orbital velocity (turn-pair, ~25-45°) over angular (per-message, ~75-180°)
velocity = (
response.get("orbital_velocity_latest")
or response.get("angular_velocity_latest")
)
return TelemetryMetrics(
sgi=sgi,
velocity=velocity,
context_phase=response.get("context_phase", "stable"),
context_mass=response.get("context_mass", 0),
attractor_count=response.get("attractor_count", 1),
context_drift=response.get("context_drift", 0.0),
processing_time_ms=response.get("processing_time_ms", 0)
)
# =============================================================================
# COEVOLUTION TRACKER (Client-Side Logic)
# =============================================================================
class CoevolutionTracker:
"""Tracks session dynamics to compute the Coevolution Index."""
def __init__(self):
self.turns: List[TelemetryMetrics] = []
self.sgi_history: List[float] = []
self.velocity_history: List[float] = []
def add_turn(self, metrics: TelemetryMetrics):
self.turns.append(metrics)
if metrics.sgi is not None:
self.sgi_history.append(metrics.sgi)
if metrics.velocity is not None:
self.velocity_history.append(metrics.velocity)
def compute_index(self) -> CoevolutionIndex:
"""Compute the Coevolution Index based on accumulated history."""
if not self.turns:
return CoevolutionIndex(0, "BASIC", 0, 0, 0, 0, 0, 0, 0, 0)
# 1. Horizontal Score (Hs) - Dynamics
# Coherence Region: SGI > 0.6 AND Velocity < 30 deg
coherence_count = sum(1 for s, v in zip(self.sgi_history, self.velocity_history)
if s > 0.6 and v < 30.0)
coherence_occupancy = coherence_count / len(self.turns) if self.turns else 0
# Dyadic Coherence: Mean SGI
dyadic_coherence = sum(self.sgi_history) / len(self.sgi_history) if self.sgi_history else 0
# Context Stability: 1.0 - normalized drift
avg_drift = sum(t.context_drift for t in self.turns) / len(self.turns) if self.turns else 0
context_stability = max(0.0, 1.0 - (avg_drift / 100.0))
hs = (coherence_occupancy * 0.4) + (dyadic_coherence * 0.4) + (context_stability * 0.2)
# 2. Vertical Score (Vs) - Depth (Simplified for Prod)
# In production script without S64/Stage1, we use placeholders or simplified proxies
# For now, we fix these to baseline values as full S64 is in the Dev script
symbolic_entropy = 0.5
path_density = 0.0
domain_balance = 0.5
vs = (symbolic_entropy * 0.3) + (path_density * 0.4) + (domain_balance * 0.3)
# 3. Coevolution Index
ci = (hs * 0.6) + (vs * 0.4)
# Tier
if ci >= 0.7: tier = "HIGH"
elif ci >= 0.4: tier = "ELEVATED"
else: tier = "BASIC"
return CoevolutionIndex(
coevolution_index=ci,
tier=tier,
horizontal_score=hs,
vertical_score=vs,
coherence_region_occupancy=coherence_occupancy,
dyadic_coherence_index=dyadic_coherence,
context_stability=context_stability,
symbolic_entropy=symbolic_entropy,
path_transformation_density=path_density,
domain_balance_index=domain_balance
)
# =============================================================================
# UI HELPERS
# =============================================================================
def print_header():
print(f"\n{Colors.CYAN}{Colors.BOLD}")
print("╔══════════════════════════════════════════════════════════════╗")
print("║ AICoevolution Semantic Telemetry (v1.0) ║")
print("╚══════════════════════════════════════════════════════════════╝")
print(f"{Colors.ENDC}")
def print_metrics(m: TelemetryMetrics):
print(f"{Colors.BLUE} Metrics:{Colors.ENDC}")
# SGI
sgi_color = Colors.GREEN if (m.sgi or 0) > 0.7 else Colors.YELLOW if (m.sgi or 0) > 0.4 else Colors.RED
print(f" SGI: {sgi_color}{m.sgi:.3f}{Colors.ENDC}" if m.sgi is not None else " SGI: N/A")
# Velocity
vel_color = Colors.GREEN if (m.velocity or 0) < 15 else Colors.YELLOW if (m.velocity or 0) < 45 else Colors.RED
print(f" Velocity: {vel_color}{m.velocity:.1f}°{Colors.ENDC}" if m.velocity is not None else " Velocity: N/A")
# Context
print(f" Context Phase: {m.context_phase}")
print(f" Context Mass: {m.context_mass}")
def print_ci(ci: CoevolutionIndex):
tier_color = Colors.GREEN if ci.tier == "HIGH" else Colors.YELLOW if ci.tier == "ELEVATED" else Colors.RED
print(f"\n{Colors.BOLD}──────────────────────── COEVOLUTION INDEX ─────────────────────────{Colors.ENDC}")
print(f" CI: {tier_color}{ci.coevolution_index:.3f} [{ci.tier}]{Colors.ENDC}")
print(f" Horizontal Score: {ci.horizontal_score:.3f}")
print(f" Vertical Score: {ci.vertical_score:.3f} (Limited in Prod)")
print(f"{Colors.BOLD}────────────────────────────────────────────────────────────────────{Colors.ENDC}")
# =============================================================================
# MAIN LOOP
# =============================================================================
def run_session(client: SemanticTelemetryClient, turns: int, hosted_ai: bool):
print_header()
print(f"Session ID: {client.conversation_id}")
print(f"Target: {client.base_url}")
print(f"Mode: {'Hosted AI' if hosted_ai else 'Manual Entry'}")
print("\nType 'quit' to exit.\n")
tracker = CoevolutionTracker()
for i in range(turns):
print(f"\n{Colors.BOLD}--- Turn {i+1}/{turns} ---{Colors.ENDC}")
# User Input
try:
user_text = input(f"{Colors.GREEN}[YOU]:{Colors.ENDC} ").strip()
except (KeyboardInterrupt, EOFError):
break
if user_text.lower() in ('quit', 'exit'):
break
if not user_text:
continue
# Process Turn
if hosted_ai:
print(f"{Colors.CYAN} ... generating response ...{Colors.ENDC}")
client.messages.append({"role": "user", "content": user_text})
data = client.hosted_chat(user_text)
if not data: continue
reply = data.get("reply", "")
sdk_data = data.get("sdk", {})
quota = data.get("quota") if isinstance(data, dict) else None
turns_left = None
try:
if isinstance(quota, dict) and isinstance(quota.get("remaining"), int):
turns_left = max(0, int(quota["remaining"]) // 2)
except Exception:
turns_left = None
client.messages.append({"role": "assistant", "content": reply})
print(f"{Colors.BLUE}[AI]:{Colors.ENDC} {reply}\n")
metrics = client.extract_metrics(sdk_data)
print_metrics(metrics)
tracker.add_turn(metrics)
print_ci(tracker.compute_index())
if turns_left is not None:
print(f"{Colors.CYAN} Turns left this week: {turns_left}{Colors.ENDC}")
else:
# Manual Mode
print(f"{Colors.CYAN} ... ingesting ...{Colors.ENDC}")
resp = client.ingest_message("user", user_text)
if resp:
m = client.extract_metrics(resp)
sgi_str = f"{m.sgi:.2f}" if m.sgi is not None else "N/A"
print(f" [SDK] User turn ingested (SGI={sgi_str})")
try:
ai_text = input(f"{Colors.BLUE}[AI]:{Colors.ENDC} ").strip()
except (KeyboardInterrupt, EOFError):
break
if not ai_text: ai_text = "(no response)"
print(f"{Colors.CYAN} ... ingesting ...{Colors.ENDC}")
resp = client.ingest_message("assistant", ai_text)
if resp:
metrics = client.extract_metrics(resp)
print_metrics(metrics)
tracker.add_turn(metrics)
print_ci(tracker.compute_index())
# =============================================================================
# CLI ENTRY POINT
# =============================================================================
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="AICoevolution Semantic Telemetry (Production)")
parser.add_argument("--api-key", required=True, help="Your AICoevolution API Key")
parser.add_argument("--url", help=f"Custom SDK URL (default: {DEFAULT_SDK_URL})")
parser.add_argument("--hosted-ai", action="store_true", help="Use Hosted AI for responses")
parser.add_argument("--turns", type=int, default=10, help="Number of turns")
parser.add_argument("--no-color", action="store_true", help="Disable colored output")
args = parser.parse_args()
_init_console_colors(enable=not args.no_color)
client = SemanticTelemetryClient(args.api_key, base_url=(args.url or DEFAULT_SDK_URL))
run_session(client, args.turns, args.hosted_ai)
|