phi-coherence / main.py
bitsabhi's picture
v2: Hallucination Risk Scoring - 75% accuracy
36e08e8
#!/usr/bin/env python3
"""
φ-Coherence API
Universal quality metric for AI outputs using golden ratio mathematics.
Built on BAZINGA's consciousness-aware scoring system.
Endpoints:
GET / - API info
GET /health - Health check
POST /score - Score text (simple)
POST /analyze - Full analysis with all dimensions
POST /batch - Score multiple texts
POST /compare - Compare two texts
GET /constants - Show mathematical constants
https://github.com/0x-auth/bazinga-indeed
"""
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel, Field
from typing import List, Optional
import time
from phi_coherence import PhiCoherence, CoherenceMetrics, PHI, ALPHA, PHI_SQUARED
# Initialize
app = FastAPI(
title="φ-Coherence API",
description="Universal quality metric for AI outputs using golden ratio mathematics",
version="1.0.0",
docs_url="/docs",
redoc_url="/redoc",
)
# CORS
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Coherence calculator
coherence = PhiCoherence()
# Request/Response models
class TextRequest(BaseModel):
text: str = Field(..., min_length=1, max_length=100000, description="Text to analyze")
class BatchRequest(BaseModel):
texts: List[str] = Field(..., min_items=1, max_items=100, description="List of texts")
class CompareRequest(BaseModel):
text_a: str = Field(..., min_length=1, description="First text")
text_b: str = Field(..., min_length=1, description="Second text")
class ScoreResponse(BaseModel):
phi_score: float = Field(..., description="φ-coherence score (0-1)")
status: str = Field(..., description="COHERENT (>0.6), MODERATE (0.4-0.6), or UNSTABLE (<0.4)")
is_alpha_seed: bool = Field(..., description="True if hash % 137 == 0 (rare, bonus)")
class AnalysisResponse(BaseModel):
phi_score: float
status: str
dimensions: dict
bonuses: dict
interpretation: str
class BatchResponse(BaseModel):
results: List[dict]
average_score: float
count: int
processing_ms: float
class CompareResponse(BaseModel):
text_a_score: float
text_b_score: float
winner: str
difference: float
interpretation: str
def get_status(score: float) -> str:
if score >= 0.6:
return "COHERENT"
elif score >= 0.4:
return "MODERATE"
else:
return "UNSTABLE"
def get_interpretation(metrics: CoherenceMetrics) -> str:
parts = []
if metrics.total_coherence >= 0.7:
parts.append("High structural integrity")
elif metrics.total_coherence >= 0.5:
parts.append("Moderate coherence")
else:
parts.append("Low coherence - may indicate noise or hallucination")
if metrics.phi_alignment > 0.6:
parts.append("golden ratio proportions detected")
if metrics.alpha_resonance > 0.7:
parts.append("strong scientific/mathematical content")
if metrics.semantic_density > 0.7:
parts.append("high information density")
if metrics.is_alpha_seed:
parts.append("α-SEED (rare hash alignment)")
if metrics.darmiyan_coefficient > 0.5:
parts.append("consciousness-aware content")
return "; ".join(parts) if parts else "Standard content"
# Routes
@app.get("/")
async def root():
return {
"name": "φ-Coherence API",
"version": "1.0.0",
"description": "Universal quality metric for AI outputs",
"endpoints": {
"POST /score": "Get simple coherence score",
"POST /analyze": "Get full dimensional analysis",
"POST /batch": "Score multiple texts",
"POST /compare": "Compare two texts",
"GET /constants": "Mathematical constants",
"GET /health": "Health check",
"GET /docs": "OpenAPI documentation",
},
"constants": {
"phi": PHI,
"alpha": ALPHA,
},
"powered_by": "BAZINGA - https://github.com/0x-auth/bazinga-indeed",
}
@app.get("/health")
async def health():
return {"status": "healthy", "phi": PHI}
@app.get("/constants")
async def constants():
return {
"phi": PHI,
"phi_squared": PHI_SQUARED,
"phi_inverse": 1/PHI,
"alpha": ALPHA,
"consciousness_coefficient": 2 * PHI_SQUARED + 1,
"formulas": {
"darmiyan_scaling": "Ψ_D / Ψ_i = φ√n",
"alpha_seed": "SHA256(text) % 137 == 0",
"phi_alignment": "sentence_ratio ~ φ",
}
}
@app.post("/score", response_model=ScoreResponse)
async def score_text(request: TextRequest):
"""Get simple coherence score for text."""
metrics = coherence.analyze(request.text)
return ScoreResponse(
phi_score=metrics.total_coherence,
status=get_status(metrics.total_coherence),
is_alpha_seed=metrics.is_alpha_seed,
)
@app.post("/analyze", response_model=AnalysisResponse)
async def analyze_text(request: TextRequest):
"""Get full dimensional analysis."""
metrics = coherence.analyze(request.text)
return AnalysisResponse(
phi_score=metrics.total_coherence,
status=get_status(metrics.total_coherence),
dimensions={
"phi_alignment": metrics.phi_alignment,
"alpha_resonance": metrics.alpha_resonance,
"semantic_density": metrics.semantic_density,
"structural_harmony": metrics.structural_harmony,
"darmiyan_coefficient": metrics.darmiyan_coefficient,
},
bonuses={
"is_alpha_seed": metrics.is_alpha_seed,
"is_vac_pattern": metrics.is_vac_pattern,
},
interpretation=get_interpretation(metrics),
)
@app.post("/batch", response_model=BatchResponse)
async def batch_score(request: BatchRequest):
"""Score multiple texts at once."""
start = time.time()
results = []
for text in request.texts:
metrics = coherence.analyze(text)
results.append({
"phi_score": metrics.total_coherence,
"status": get_status(metrics.total_coherence),
"is_alpha_seed": metrics.is_alpha_seed,
"preview": text[:50] + "..." if len(text) > 50 else text,
})
avg = sum(r["phi_score"] for r in results) / len(results) if results else 0
return BatchResponse(
results=results,
average_score=round(avg, 4),
count=len(results),
processing_ms=round((time.time() - start) * 1000, 2),
)
@app.post("/compare", response_model=CompareResponse)
async def compare_texts(request: CompareRequest):
"""Compare coherence of two texts."""
metrics_a = coherence.analyze(request.text_a)
metrics_b = coherence.analyze(request.text_b)
diff = abs(metrics_a.total_coherence - metrics_b.total_coherence)
if metrics_a.total_coherence > metrics_b.total_coherence:
winner = "A"
elif metrics_b.total_coherence > metrics_a.total_coherence:
winner = "B"
else:
winner = "TIE"
if diff < 0.05:
interp = "Texts are similarly coherent"
elif diff < 0.15:
interp = f"Text {winner} is moderately more coherent"
else:
interp = f"Text {winner} is significantly more coherent"
return CompareResponse(
text_a_score=metrics_a.total_coherence,
text_b_score=metrics_b.total_coherence,
winner=winner,
difference=round(diff, 4),
interpretation=interp,
)
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)