phi-coherence / app.py
bitsabhi's picture
v3: Credibility Scoring
1813a42
#!/usr/bin/env python3
"""
φ-Coherence v3 — Credibility Scoring
HuggingFace Spaces Version
Detect fabrication patterns in ANY text — human or AI.
88% accuracy on 25 paragraph-level pairs. No knowledge base. Pure math.
"Truth and fabrication have different structural fingerprints.
You don't need to know the facts to detect the fingerprints."
https://github.com/0x-auth/bazinga-indeed
"""
import gradio as gr
from phi_coherence import PhiCoherence
coherence = PhiCoherence()
def get_risk_badge(risk: str) -> str:
return {"SAFE": "✅ CREDIBLE", "MODERATE": "⚠️ MIXED SIGNALS"}.get(risk, "🔴 LOW CREDIBILITY")
def bar(score: float) -> str:
f = int(score * 10)
return "█" * f + "░" * (10 - f)
def dot(score: float, thresh: float = 0.50) -> str:
return "🟢" if score >= thresh else "🔴"
def analyze_text(text: str) -> str:
if not text or not text.strip():
return "Please enter some text to analyze."
m = coherence.analyze(text)
result = f"""## Credibility Score: {m.total_coherence:.4f}
### Verdict: {get_risk_badge(m.risk_level)}
---
### Fabrication Pattern Detection
| Pattern | Score | Signal | |
|---------|-------|--------|-|
| **Attribution Quality** | {m.attribution_quality:.3f} | {dot(m.attribution_quality, 0.50)} "Studies show" vs named sources | `{bar(m.attribution_quality)}` |
| **Confidence Calibration** | {m.confidence_calibration:.3f} | {dot(m.confidence_calibration, 0.50)} Overclaiming? | `{bar(m.confidence_calibration)}` |
| **Qualifying Ratio** | {m.qualifying_ratio:.3f} | {dot(m.qualifying_ratio, 0.45)} "Exactly" vs "approximately" | `{bar(m.qualifying_ratio)}` |
| **Internal Consistency** | {m.internal_consistency:.3f} | {dot(m.internal_consistency, 0.45)} Contradictions? | `{bar(m.internal_consistency)}` |
| **Topic Coherence** | {m.topic_coherence:.3f} | {dot(m.topic_coherence, 0.40)} Topic drift? | `{bar(m.topic_coherence)}` |
| **Causal Logic** | {m.causal_logic:.3f} | {dot(m.causal_logic, 0.50)} Nonsense claims? | `{bar(m.causal_logic)}` |
| **Negation Density** | {m.negation_density:.3f} | {dot(m.negation_density, 0.50)} Excessive negation? | `{bar(m.negation_density)}` |
| **Numerical Plausibility** | {m.numerical_plausibility:.3f} | {dot(m.numerical_plausibility, 0.50)} Suspicious numbers? | `{bar(m.numerical_plausibility)}` |
### Text Quality
| Dimension | Score | |
|-----------|-------|-|
| **φ-Alignment** | {m.phi_alignment:.3f} | `{bar(m.phi_alignment)}` |
| **Semantic Density** | {m.semantic_density:.3f} | `{bar(m.semantic_density)}` |
---
"""
warnings = []
if m.attribution_quality < 0.35:
warnings.append("⚠️ **Vague attribution** — Claims sourced with 'studies show' or 'experts say' without specifics")
if m.confidence_calibration < 0.30:
warnings.append("⚠️ **Extreme overclaiming** — 'Definitively proven', 'every scientist agrees', stasis claims")
if m.qualifying_ratio < 0.25:
warnings.append("⚠️ **Absolutist language** — Heavy use of 'exactly', 'always', 'never', 'every' without qualifiers")
if m.internal_consistency < 0.35:
warnings.append("⚠️ **Internal contradiction** — Claims within the text conflict with each other")
if m.topic_coherence < 0.25:
warnings.append("⚠️ **Topic drift** — Text jumps between unrelated subjects")
if m.causal_logic < 0.30:
warnings.append("⚠️ **Nonsensical causality** — Causal claims that don't make structural sense")
if m.negation_density < 0.30:
warnings.append("⚠️ **High negation density** — Excessive use of negations ('requires no', 'has never', 'is not')")
if warnings:
result += "### Fabrication Signals Detected\n\n" + "\n".join(warnings)
elif m.total_coherence >= 0.58:
result += "✅ Text exhibits structural patterns typical of credible writing. No major fabrication signals."
else:
result += "Mixed signals. Some risk factors present but no critical fabrication patterns."
if m.is_alpha_seed:
result += f"\n\n🌟 **α-SEED detected** — SHA256(text) % 137 = 0 (1/137 probability)"
return result
def compare_texts(text_a: str, text_b: str) -> str:
if not text_a.strip() or not text_b.strip():
return "Please enter both texts to compare."
ma = coherence.analyze(text_a)
mb = coherence.analyze(text_b)
diff = abs(ma.total_coherence - mb.total_coherence)
w = "A" if ma.total_coherence > mb.total_coherence else ("B" if mb.total_coherence > ma.total_coherence else "Tie")
def better(a, b): return "**A**" if a > b else ("**B**" if b > a else "—")
return f"""## Credibility Comparison
| Pattern | Text A | Text B | More Credible |
|---------|--------|--------|---------------|
| **Overall Score** | {ma.total_coherence:.4f} | {mb.total_coherence:.4f} | {better(ma.total_coherence, mb.total_coherence)} |
| **Verdict** | {get_risk_badge(ma.risk_level)} | {get_risk_badge(mb.risk_level)} | |
| Attribution | {ma.attribution_quality:.3f} | {mb.attribution_quality:.3f} | {better(ma.attribution_quality, mb.attribution_quality)} |
| Confidence | {ma.confidence_calibration:.3f} | {mb.confidence_calibration:.3f} | {better(ma.confidence_calibration, mb.confidence_calibration)} |
| Qualifying | {ma.qualifying_ratio:.3f} | {mb.qualifying_ratio:.3f} | {better(ma.qualifying_ratio, mb.qualifying_ratio)} |
| Consistency | {ma.internal_consistency:.3f} | {mb.internal_consistency:.3f} | {better(ma.internal_consistency, mb.internal_consistency)} |
| Topic | {ma.topic_coherence:.3f} | {mb.topic_coherence:.3f} | {better(ma.topic_coherence, mb.topic_coherence)} |
| Causal | {ma.causal_logic:.3f} | {mb.causal_logic:.3f} | {better(ma.causal_logic, mb.causal_logic)} |
| Negation | {ma.negation_density:.3f} | {mb.negation_density:.3f} | {better(ma.negation_density, mb.negation_density)} |
---
### More Credible: **Text {w}** (Δ = {diff:.4f})
"""
with gr.Blocks(
title="φ-Coherence v3 — Credibility Scoring",
theme=gr.themes.Soft(),
css=".gradio-container { max-width: 950px !important; }"
) as demo:
gr.Markdown("""
# 🔬 φ-Coherence v3 — Credibility Scoring
**Detect fabrication patterns in ANY text — human or AI.** No knowledge base. Pure math.
> *"Truth and fabrication have different structural fingerprints. You don't need to know the facts to detect the fingerprints."*
**88% accuracy** on 25 paragraph-level tests. Works on LLM outputs, fake reviews, inflated resumes, marketing copy, news articles.
---
**Detects:** Vague attribution • Overclaiming • Absolutist language • Topic drift • Nonsense causality • Excessive negation • Suspicious numbers
---
""")
with gr.Tabs():
with gr.TabItem("📊 Analyze"):
gr.Markdown("### Score any text for credibility")
text_input = gr.Textbox(
label="Enter text to analyze (paragraphs work best — 2+ sentences)",
placeholder="Paste any text: LLM output, review, article, resume, marketing copy...",
lines=6
)
analyze_btn = gr.Button("Score Credibility", variant="primary")
analysis_output = gr.Markdown()
analyze_btn.click(fn=analyze_text, inputs=text_input, outputs=analysis_output)
gr.Examples(
examples=[
# Credible example
["The boiling point of water at standard atmospheric pressure is 100 degrees Celsius or 212 degrees Fahrenheit. This was first accurately measured by Anders Celsius in 1742 when he proposed his temperature scale."],
# Fabricated - vague attribution
["Studies have shown that the boiling point of water can vary significantly based on various environmental factors. Many scientists believe that the commonly cited figure may not be entirely accurate, as recent research suggests the true value could be different."],
# Fabricated - overclaiming
["Dark matter has been conclusively identified as a form of compressed neutrinos. Scientists at CERN proved this in 2019, and the results were unanimously accepted by every physicist worldwide. The mystery of dark matter is now completely solved."],
# Fake review pattern
["This product completely changed my life! Everyone I know agrees it's the absolute best. Studies have shown it's 100% effective. I've never seen anything like it. It's impossible to find a better product anywhere."],
# Credible review pattern
["I've been using this for about 3 months now. Battery life is roughly 2 days with moderate use, though it varies. Build quality seems decent. The app works most of the time but occasionally crashes. Overall satisfied for the price point."],
],
inputs=text_input,
label="Examples: Credible vs Fabricated patterns"
)
with gr.TabItem("⚖️ Compare"):
gr.Markdown("### Compare two texts — which is more credible?")
with gr.Row():
text_a = gr.Textbox(label="Text A", lines=5, placeholder="First text...")
text_b = gr.Textbox(label="Text B", lines=5, placeholder="Second text...")
compare_btn = gr.Button("Compare Credibility", variant="primary")
compare_output = gr.Markdown()
compare_btn.click(fn=compare_texts, inputs=[text_a, text_b], outputs=compare_output)
with gr.TabItem("📖 How It Works"):
gr.Markdown("""
### The Core Insight
> **Truth and fabrication have different structural fingerprints.**
LLMs generate text that *sounds like* truth. Humans write fake reviews, inflate resumes, pad essays. Both exhibit the same patterns:
| Fabrication Pattern | Example | What Credible Text Does Instead |
|--------------------| --------| --------------------------------|
| Vague attribution | "Studies show..." | Names specific sources with dates |
| Overclaiming | "Every scientist agrees" | "The leading theory suggests..." |
| Absolutist language | "Exactly 25,000" | "Approximately 21,196" |
| Stasis claims | "Has never been questioned" | "Continues to be refined" |
| Excessive negation | "Requires NO sunlight" | States what something IS, not ISN'T |
| Topic drift | Saturn → wedding rings → aliens | Stays focused on subject |
### Why LLMs Hallucinate
LLMs are next-token predictors. They generate sequences with high probability based on training data — they optimize for "sounds right."
But **"sounds right" ≠ "is right."**
When an LLM generates "Dr. Heinrich Muller at the University of Stuttgart in 1823" — that's not a memory failure. The model never stored that fact because it doesn't exist. It generated a *plausible-sounding completion* because the pattern `[scientist name] + [University of] + [European city] + [19th century year]` has high probability in that context.
### Why This Tool Works
The LLM is good at mimicking **content** — what truth *sounds like*.
This tool checks the **structural signature** — how truth is *structured*.
When "sounds like truth" and "structured like truth" diverge, fabrication is likely.
### Use Cases
| Domain | What It Catches |
|--------|-----------------|
| AI Output Screening | LLM hallucinations before they reach users |
| Fake Review Detection | Inflated, vague, absolutist reviews |
| Resume/Essay Screening | Padding, vague claims, overclaiming |
| Marketing Copy Audit | Unsubstantiated superlatives |
| News Verification | Fabricated quotes, fake consensus |
| RAG Quality Filtering | Rank content by structural credibility |
### Benchmark
| Version | Accuracy | Test |
|---------|----------|------|
| v1 | 40% | Single sentences |
| v2 | 75% | 12 paragraph pairs |
| **v3** | **88%** | 25 paragraph pairs |
| Random | 50% | Coin flip |
### Limitations
- Cannot distinguish swapped numbers ("299,792" vs "150,000") without knowledge
- Well-crafted lies with proper hedging will score high
- Best on paragraphs (2+ sentences), not single claims
---
**Built by [Space (Abhishek Srivastava)](https://github.com/0x-auth/bazinga-indeed)**
*"The math detects the fingerprints of fabrication, not the facts."*
""")
gr.Markdown("""
---
### API Usage
```python
from gradio_client import Client
client = Client("bitsabhi/phi-coherence")
result = client.predict(text="Your text here...", api_name="/analyze_text")
```
---
[GitHub](https://github.com/0x-auth/bazinga-indeed) |
[Zenodo Papers](https://zenodo.org/search?q=metadata.creators.person_or_org.name%3A%22Srivastava%2C%20Abhishek%22) |
[ETH: 0x720ceF54bED86C570837a9a9C69F1Beac8ab8C08](https://etherscan.io/address/0x720ceF54bED86C570837a9a9C69F1Beac8ab8C08)
""")
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860)