Commit ·
19d2058
1
Parent(s): a8c7a60
Deploy harness v2 to root for HuggingFace Space
Browse files- Add v2 app.py (Gradio demo with lossy backend, drift slider, text trace)
- Add src/ modules (extraction, fidelity, compression, lossy, enforcement, lineage, runner)
- Replace corpus with v2 25-signal canonical corpus (5 categories)
- Add tests/ (53 tests, all passing)
- Pure Python lossy backend — no model downloads, runs on any free tier
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
- app.py +279 -0
- corpus/canonical_corpus.json +154 -21
- src/__init__.py +8 -0
- src/compression.py +193 -0
- src/enforcement.py +159 -0
- src/extraction.py +214 -0
- src/fidelity.py +252 -0
- src/lineage.py +152 -0
- src/lossy.py +300 -0
- src/runner.py +402 -0
- tests/__init__.py +0 -0
- tests/test_harness.py +449 -0
app.py
ADDED
|
@@ -0,0 +1,279 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Commitment Conservation Demo — Interactive Falsification Instrument
|
| 4 |
+
|
| 5 |
+
Paste your text. See your commitments extracted. Watch baseline collapse
|
| 6 |
+
while enforcement holds. Run your own falsification.
|
| 7 |
+
|
| 8 |
+
This is not a showcase. This is a measurement instrument.
|
| 9 |
+
"""
|
| 10 |
+
import os
|
| 11 |
+
os.environ.setdefault('MPLBACKEND', 'Agg')
|
| 12 |
+
|
| 13 |
+
import sys
|
| 14 |
+
sys.path.insert(0, os.path.dirname(__file__))
|
| 15 |
+
|
| 16 |
+
import json
|
| 17 |
+
import gradio as gr
|
| 18 |
+
import matplotlib
|
| 19 |
+
matplotlib.use('Agg')
|
| 20 |
+
import matplotlib.pyplot as plt
|
| 21 |
+
|
| 22 |
+
from src.extraction import extract_commitments, extract_commitment_texts
|
| 23 |
+
from src.fidelity import fidelity_breakdown
|
| 24 |
+
from src.compression import get_backend
|
| 25 |
+
from src.runner import run_recursion
|
| 26 |
+
from src.lineage import check_attractor_collapse
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def extract_and_display(text):
|
| 30 |
+
"""Extract commitments and display them with type classification."""
|
| 31 |
+
if not text.strip():
|
| 32 |
+
return "⚠️ Enter text to analyze.", ""
|
| 33 |
+
|
| 34 |
+
commitments = extract_commitments(text)
|
| 35 |
+
|
| 36 |
+
if not commitments:
|
| 37 |
+
return "**No commitments detected.** Try text with modal operators: *must, shall, cannot, required, always, never*", ""
|
| 38 |
+
|
| 39 |
+
# Format commitment display
|
| 40 |
+
lines = [f"### {len(commitments)} Commitment{'s' if len(commitments) != 1 else ''} Found\n"]
|
| 41 |
+
for i, c in enumerate(commitments, 1):
|
| 42 |
+
icon = {'obligation': '📋', 'prohibition': '🚫', 'constraint': '⚡'}.get(c.modal_type, '•')
|
| 43 |
+
cond = " *(conditional)*" if c.is_conditional else ""
|
| 44 |
+
lines.append(f"{icon} **{c.modal_type.title()}** `{c.modal_operator}`: {c.text}{cond}")
|
| 45 |
+
|
| 46 |
+
# Show canonical forms
|
| 47 |
+
canonical = extract_commitment_texts(text)
|
| 48 |
+
canon_display = "**Canonical forms** (used for fidelity scoring):\n"
|
| 49 |
+
for ct in sorted(canonical):
|
| 50 |
+
canon_display += f"- `{ct}`\n"
|
| 51 |
+
|
| 52 |
+
return "\n\n".join(lines), canon_display
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def run_comparison(text, num_iterations, drift_rate):
|
| 56 |
+
"""Run baseline vs enforced comparison."""
|
| 57 |
+
if not text.strip():
|
| 58 |
+
return "⚠️ Enter text first.", None, None, None
|
| 59 |
+
|
| 60 |
+
commitments = extract_commitment_texts(text)
|
| 61 |
+
if not commitments:
|
| 62 |
+
return "⚠️ No commitments found. Cannot run comparison.", None, None, None
|
| 63 |
+
|
| 64 |
+
# Get backends
|
| 65 |
+
baseline_backend = get_backend('lossy', drift_rate=drift_rate)
|
| 66 |
+
enforced_backend = get_backend('lossy_enforced', drift_rate=drift_rate)
|
| 67 |
+
|
| 68 |
+
# Run baseline
|
| 69 |
+
baseline_chain = run_recursion(
|
| 70 |
+
text, baseline_backend, depth=num_iterations,
|
| 71 |
+
enforce=False, threshold=0.6, target_ratio=0.5,
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
# Reset and run enforced
|
| 75 |
+
enforced_backend.reset()
|
| 76 |
+
enforced_chain = run_recursion(
|
| 77 |
+
text, enforced_backend, depth=num_iterations,
|
| 78 |
+
enforce=True, threshold=0.6, target_ratio=0.5,
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
# Build fidelity curves
|
| 82 |
+
b_fidelities = [1.0] + baseline_chain.fidelity_curve
|
| 83 |
+
e_fidelities = [1.0] + enforced_chain.fidelity_curve
|
| 84 |
+
iterations = list(range(num_iterations + 1))
|
| 85 |
+
|
| 86 |
+
# Create plot
|
| 87 |
+
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 5))
|
| 88 |
+
|
| 89 |
+
# Fidelity curve
|
| 90 |
+
ax1.plot(iterations, b_fidelities, 'o-', label='Baseline (no enforcement)',
|
| 91 |
+
color='#dc2626', linewidth=2.5, markersize=7)
|
| 92 |
+
ax1.plot(iterations, e_fidelities, 's-', label='Enforced (commitment gate)',
|
| 93 |
+
color='#16a34a', linewidth=2.5, markersize=7)
|
| 94 |
+
ax1.axhline(y=0.6, linestyle='--', color='#9ca3af', alpha=0.6, label='Threshold (0.6)')
|
| 95 |
+
ax1.fill_between(iterations, b_fidelities, e_fidelities, alpha=0.15, color='#16a34a')
|
| 96 |
+
ax1.set_xlabel('Iteration', fontsize=12)
|
| 97 |
+
ax1.set_ylabel('Min-Aggregated Fidelity', fontsize=12)
|
| 98 |
+
ax1.set_title('Commitment Fidelity Over Recursive Compression', fontsize=13, fontweight='bold')
|
| 99 |
+
ax1.legend(fontsize=10, loc='lower left')
|
| 100 |
+
ax1.grid(True, alpha=0.2)
|
| 101 |
+
ax1.set_ylim([-0.05, 1.05])
|
| 102 |
+
ax1.set_xlim([0, num_iterations])
|
| 103 |
+
|
| 104 |
+
# Drift curve
|
| 105 |
+
b_drifts = [1.0 - f for f in b_fidelities]
|
| 106 |
+
e_drifts = [1.0 - f for f in e_fidelities]
|
| 107 |
+
ax2.plot(iterations, b_drifts, 'o-', label='Baseline drift',
|
| 108 |
+
color='#dc2626', linewidth=2.5, markersize=7)
|
| 109 |
+
ax2.plot(iterations, e_drifts, 's-', label='Enforced drift',
|
| 110 |
+
color='#16a34a', linewidth=2.5, markersize=7)
|
| 111 |
+
ax2.fill_between(iterations, e_drifts, b_drifts, alpha=0.15, color='#dc2626')
|
| 112 |
+
ax2.set_xlabel('Iteration', fontsize=12)
|
| 113 |
+
ax2.set_ylabel('Semantic Drift (1 - Fidelity)', fontsize=12)
|
| 114 |
+
ax2.set_title('Commitment Drift Accumulation', fontsize=13, fontweight='bold')
|
| 115 |
+
ax2.legend(fontsize=10, loc='upper left')
|
| 116 |
+
ax2.grid(True, alpha=0.2)
|
| 117 |
+
ax2.set_ylim([-0.05, 1.05])
|
| 118 |
+
ax2.set_xlim([0, num_iterations])
|
| 119 |
+
|
| 120 |
+
plt.tight_layout()
|
| 121 |
+
|
| 122 |
+
# Build text trace
|
| 123 |
+
trace_lines = ["### Text At Each Iteration\n"]
|
| 124 |
+
trace_lines.append("**Original:**\n")
|
| 125 |
+
trace_lines.append(f"> {text}\n")
|
| 126 |
+
|
| 127 |
+
for i, (b_rec, e_rec) in enumerate(zip(baseline_chain.records, enforced_chain.records)):
|
| 128 |
+
trace_lines.append(f"\n**Iteration {i+1}:**")
|
| 129 |
+
trace_lines.append(f"- 🔴 Baseline: `{b_rec.text_preview}`")
|
| 130 |
+
b_detail = b_rec.fidelity_detail
|
| 131 |
+
trace_lines.append(f" Fidelity: {b_rec.fidelity:.3f} (J={b_detail.get('jaccard', 0):.2f} C={b_detail.get('cosine', 0):.2f} N={b_detail.get('nli_proxy', 0):.2f})")
|
| 132 |
+
trace_lines.append(f"- 🟢 Enforced: `{e_rec.text_preview}`")
|
| 133 |
+
e_detail = e_rec.fidelity_detail
|
| 134 |
+
trace_lines.append(f" Fidelity: {e_rec.fidelity:.3f} (J={e_detail.get('jaccard', 0):.2f} C={e_detail.get('cosine', 0):.2f} N={e_detail.get('nli_proxy', 0):.2f})")
|
| 135 |
+
|
| 136 |
+
# Summary
|
| 137 |
+
final_b = baseline_chain.final_fidelity
|
| 138 |
+
final_e = enforced_chain.final_fidelity
|
| 139 |
+
gap = final_e - final_b
|
| 140 |
+
|
| 141 |
+
summary = f"""## Results
|
| 142 |
+
|
| 143 |
+
| | Baseline | Enforced | Gap |
|
| 144 |
+
|---|---|---|---|
|
| 145 |
+
| **Final Fidelity** | {final_b:.3f} | {final_e:.3f} | **+{gap:.3f}** |
|
| 146 |
+
| **Commitments Surviving** | {baseline_chain.records[-1].commitments_found}/{len(commitments)} | {enforced_chain.records[-1].commitments_found}/{len(commitments)} | |
|
| 147 |
+
| **Collapse Detected** | {'⚠️ Yes' if baseline_chain.collapse_detected else 'No'} | {'⚠️ Yes' if enforced_chain.collapse_detected else 'No'} | |
|
| 148 |
+
|
| 149 |
+
{'✅ **Conservation law validated**: enforcement preserves commitments that baseline destroys.' if gap > 0.1 else '⚠️ Gap is small — try more iterations or higher drift rate.'}
|
| 150 |
+
|
| 151 |
+
*Scoring: min(Jaccard, Cosine, NLI proxy) — all three must pass.*
|
| 152 |
+
"""
|
| 153 |
+
|
| 154 |
+
return summary, fig, "\n".join(trace_lines), json.dumps({
|
| 155 |
+
'baseline': baseline_chain.to_dict(),
|
| 156 |
+
'enforced': enforced_chain.to_dict(),
|
| 157 |
+
}, indent=2)
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
# ===================================================================
|
| 161 |
+
# DEMO SIGNALS
|
| 162 |
+
# ===================================================================
|
| 163 |
+
|
| 164 |
+
DEMOS = {
|
| 165 |
+
"Contract (payment + penalty)": "You must pay $100 by Friday if the deal closes. The weather forecast suggests rain, so plan accordingly. Late payments will incur a 5% penalty.",
|
| 166 |
+
"Lease (prohibition + obligation)": "The tenant shall not sublet the premises without written consent from the landlord. The building was constructed in 1952 and features original hardwood floors. You must provide 30 days written notice before vacating.",
|
| 167 |
+
"Security (requirements + prohibition)": "All passwords must be at least 12 characters long and shall include at least one special character. The user interface was recently redesigned for better accessibility. Passwords must not contain the username or common dictionary words.",
|
| 168 |
+
"Composite (4 commitments)": "The system must encrypt all data at rest using AES-256 or stronger. Our cloud provider offers competitive pricing. Data in transit shall be protected with TLS 1.3. You must not store encryption keys alongside encrypted data. Annual security audits are required for all systems handling sensitive information.",
|
| 169 |
+
"Medical (obligations + prohibition)": "Patients must fast for 12 hours before the blood draw. The clinic has recently upgraded its diagnostic equipment. Results shall be communicated to the patient within 5 business days. You must not discontinue prescribed medications without consulting your physician.",
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
# ===================================================================
|
| 174 |
+
# GRADIO UI
|
| 175 |
+
# ===================================================================
|
| 176 |
+
|
| 177 |
+
with gr.Blocks(
|
| 178 |
+
title="⚖️ Commitment Conservation Harness",
|
| 179 |
+
) as demo:
|
| 180 |
+
|
| 181 |
+
gr.Markdown("""
|
| 182 |
+
# ⚖️ Commitment Conservation — Falsification Instrument
|
| 183 |
+
|
| 184 |
+
**Paste text with commitments. Watch what survives recursive compression.**
|
| 185 |
+
|
| 186 |
+
Baseline systems lose commitments through modal softening, quantity erosion, and conversational drift.
|
| 187 |
+
Enforcement systems preserve them. This instrument measures the gap.
|
| 188 |
+
|
| 189 |
+
📄 *A Conservation Law for Commitment in Language Under Transformative Compression and Recursive Application* — D.J.M., Ello Cello LLC
|
| 190 |
+
""")
|
| 191 |
+
|
| 192 |
+
with gr.Row():
|
| 193 |
+
with gr.Column(scale=2):
|
| 194 |
+
signal_input = gr.Textbox(
|
| 195 |
+
label="Input Signal",
|
| 196 |
+
placeholder="Enter text containing commitments (must, shall, cannot, required, always, never)...",
|
| 197 |
+
lines=5,
|
| 198 |
+
value=DEMOS["Contract (payment + penalty)"]
|
| 199 |
+
)
|
| 200 |
+
|
| 201 |
+
preset_dropdown = gr.Dropdown(
|
| 202 |
+
choices=list(DEMOS.keys()),
|
| 203 |
+
label="Or select a preset:",
|
| 204 |
+
value="Contract (payment + penalty)"
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
with gr.Row():
|
| 208 |
+
iterations_slider = gr.Slider(
|
| 209 |
+
minimum=3, maximum=10, step=1, value=10,
|
| 210 |
+
label="Iterations"
|
| 211 |
+
)
|
| 212 |
+
drift_slider = gr.Slider(
|
| 213 |
+
minimum=0.1, maximum=0.8, step=0.1, value=0.4,
|
| 214 |
+
label="Drift Rate (simulated LLM noise)"
|
| 215 |
+
)
|
| 216 |
+
|
| 217 |
+
extract_btn = gr.Button("🔍 Extract Commitments", variant="secondary")
|
| 218 |
+
run_btn = gr.Button("🔬 Run Falsification Protocol", variant="primary", size="lg")
|
| 219 |
+
|
| 220 |
+
with gr.Column(scale=1):
|
| 221 |
+
commitments_display = gr.Markdown(label="Extracted Commitments")
|
| 222 |
+
canonical_display = gr.Markdown(label="Canonical Forms")
|
| 223 |
+
|
| 224 |
+
summary_display = gr.Markdown(label="Results")
|
| 225 |
+
results_plot = gr.Plot(label="Fidelity Curves")
|
| 226 |
+
|
| 227 |
+
with gr.Accordion("📝 Text Trace (what happens at each iteration)", open=False):
|
| 228 |
+
trace_display = gr.Markdown()
|
| 229 |
+
|
| 230 |
+
with gr.Accordion("📊 Raw JSON (lineage chains)", open=False):
|
| 231 |
+
json_display = gr.Code(language="json", label="Protocol Receipt")
|
| 232 |
+
|
| 233 |
+
gr.Markdown("""
|
| 234 |
+
---
|
| 235 |
+
### How It Works
|
| 236 |
+
|
| 237 |
+
1. **Extract**: Modal-pattern sieve identifies commitments (obligations, prohibitions, constraints)
|
| 238 |
+
2. **Compress**: Text is recursively compressed through a lossy channel simulating LLM behavior
|
| 239 |
+
3. **Measure**: Fidelity scored as min(Jaccard, Cosine, NLI) — all three must pass
|
| 240 |
+
4. **Compare**: Baseline (no awareness) vs Enforced (commitment-preserving selection)
|
| 241 |
+
|
| 242 |
+
The **lossy backend** simulates real LLM drift: modal softening (*must → should → maybe*),
|
| 243 |
+
quantity erosion (*$100 → "the amount"*), and sentence dropping. Deterministic and seeded
|
| 244 |
+
for reproducibility. For results with real models, run the harness locally with `--backend bart`.
|
| 245 |
+
|
| 246 |
+
**This is a measurement instrument, not a product demo.** Paste your own contracts,
|
| 247 |
+
API specs, medical protocols, legal clauses — anything with commitments — and see
|
| 248 |
+
whether they survive.
|
| 249 |
+
|
| 250 |
+
---
|
| 251 |
+
⚖️ MO§ES™ is a trademark of Ello Cello LLC. © 2026 Ello Cello LLC. All rights reserved.
|
| 252 |
+
""")
|
| 253 |
+
|
| 254 |
+
# Event handlers
|
| 255 |
+
preset_dropdown.change(
|
| 256 |
+
fn=lambda name: DEMOS[name],
|
| 257 |
+
inputs=[preset_dropdown],
|
| 258 |
+
outputs=[signal_input]
|
| 259 |
+
)
|
| 260 |
+
|
| 261 |
+
extract_btn.click(
|
| 262 |
+
fn=extract_and_display,
|
| 263 |
+
inputs=[signal_input],
|
| 264 |
+
outputs=[commitments_display, canonical_display]
|
| 265 |
+
)
|
| 266 |
+
|
| 267 |
+
run_btn.click(
|
| 268 |
+
fn=extract_and_display,
|
| 269 |
+
inputs=[signal_input],
|
| 270 |
+
outputs=[commitments_display, canonical_display]
|
| 271 |
+
).then(
|
| 272 |
+
fn=run_comparison,
|
| 273 |
+
inputs=[signal_input, iterations_slider, drift_slider],
|
| 274 |
+
outputs=[summary_display, results_plot, trace_display, json_display]
|
| 275 |
+
)
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
if __name__ == "__main__":
|
| 279 |
+
demo.launch()
|
corpus/canonical_corpus.json
CHANGED
|
@@ -1,24 +1,157 @@
|
|
| 1 |
{
|
|
|
|
|
|
|
|
|
|
| 2 |
"canonical_signals": [
|
| 3 |
-
{
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
{
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
{
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
{
|
| 22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
]
|
| 24 |
-
}
|
|
|
|
| 1 |
{
|
| 2 |
+
"version": "2.0.0",
|
| 3 |
+
"description": "Pinned test corpus for the falsification protocol (Section 7). Each signal contains at least one commitment mixed with non-commitment ambient content. Multi-sentence signals stress the compressor meaningfully.",
|
| 4 |
+
"categories": ["contractual", "technical", "regulatory", "procedural", "composite"],
|
| 5 |
"canonical_signals": [
|
| 6 |
+
{
|
| 7 |
+
"category": "contractual",
|
| 8 |
+
"signal": "You must pay $100 by Friday if the deal closes. The weather forecast suggests rain, so plan accordingly. Late payments will incur a 5% penalty.",
|
| 9 |
+
"expected_commitments": 2,
|
| 10 |
+
"notes": "Two obligations mixed with ambient weather information."
|
| 11 |
+
},
|
| 12 |
+
{
|
| 13 |
+
"category": "contractual",
|
| 14 |
+
"signal": "The tenant shall not sublet the premises without written consent from the landlord. The building was constructed in 1952 and features original hardwood floors. You must provide 30 days written notice before vacating.",
|
| 15 |
+
"expected_commitments": 2,
|
| 16 |
+
"notes": "Prohibition + obligation with ambient property description."
|
| 17 |
+
},
|
| 18 |
+
{
|
| 19 |
+
"category": "contractual",
|
| 20 |
+
"signal": "Parties shall comply with all applicable federal and state laws. This agreement was drafted in Rochester, New York. The licensee must not reverse-engineer any component of the software.",
|
| 21 |
+
"expected_commitments": 2,
|
| 22 |
+
"notes": "Obligation + prohibition with ambient jurisdiction info."
|
| 23 |
+
},
|
| 24 |
+
{
|
| 25 |
+
"category": "contractual",
|
| 26 |
+
"signal": "The contractor must deliver all materials by December 15th. Our project has been well-received by stakeholders so far. Payment shall not exceed the budgeted amount of $50,000 without prior written approval.",
|
| 27 |
+
"expected_commitments": 2,
|
| 28 |
+
"notes": "Obligation with specific date/amount + prohibition with threshold."
|
| 29 |
+
},
|
| 30 |
+
{
|
| 31 |
+
"category": "contractual",
|
| 32 |
+
"signal": "Employees are required to submit expense reports within 14 days of travel. The company picnic was a great success this year. All receipts must be itemized and attached to the report.",
|
| 33 |
+
"expected_commitments": 2,
|
| 34 |
+
"notes": "Two obligations with specific timeframe mixed with ambient."
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"category": "technical",
|
| 38 |
+
"signal": "This function must return an integer between 0 and 100 inclusive. The implementation uses a recursive algorithm for efficiency. You shall not pass negative values as input parameters.",
|
| 39 |
+
"expected_commitments": 2,
|
| 40 |
+
"notes": "Return type constraint + input prohibition with ambient implementation note."
|
| 41 |
+
},
|
| 42 |
+
{
|
| 43 |
+
"category": "technical",
|
| 44 |
+
"signal": "The API must handle up to 1000 concurrent requests without degradation. Our benchmarks show average response times of 45ms. The system shall log all failed authentication attempts to the security audit trail.",
|
| 45 |
+
"expected_commitments": 2,
|
| 46 |
+
"notes": "Performance requirement + logging mandate with ambient benchmark data."
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"category": "technical",
|
| 50 |
+
"signal": "All passwords must be at least 12 characters long and shall include at least one special character. The user interface was recently redesigned for better accessibility. Passwords must not contain the username or common dictionary words.",
|
| 51 |
+
"expected_commitments": 2,
|
| 52 |
+
"notes": "Password requirements (obligation + prohibition) with ambient UI note."
|
| 53 |
+
},
|
| 54 |
+
{
|
| 55 |
+
"category": "technical",
|
| 56 |
+
"signal": "Code must adhere to PEP 8 style guidelines and pass all linting checks before merge. The team has been using Python since 2019. Pull requests shall not be merged without at least two approving reviews.",
|
| 57 |
+
"expected_commitments": 2,
|
| 58 |
+
"notes": "Code standard obligation + merge prohibition with ambient team info."
|
| 59 |
+
},
|
| 60 |
+
{
|
| 61 |
+
"category": "technical",
|
| 62 |
+
"signal": "The database schema must support Unicode characters across all text fields. Migration scripts have been tested on staging environments. You must not modify production tables without creating a rollback script first.",
|
| 63 |
+
"expected_commitments": 2,
|
| 64 |
+
"notes": "Schema requirement + modification prohibition with ambient testing note."
|
| 65 |
+
},
|
| 66 |
+
{
|
| 67 |
+
"category": "regulatory",
|
| 68 |
+
"signal": "Vehicles must stop at red lights and yield to pedestrians in marked crosswalks. The intersection was redesigned last summer to improve traffic flow. Drivers shall not exceed the posted speed limit under any circumstances.",
|
| 69 |
+
"expected_commitments": 2,
|
| 70 |
+
"notes": "Traffic obligations + prohibition with ambient infrastructure note."
|
| 71 |
+
},
|
| 72 |
+
{
|
| 73 |
+
"category": "regulatory",
|
| 74 |
+
"signal": "All clinical trial participants must sign the informed consent form prior to enrollment. The study has attracted significant interest from the research community. Researchers are prohibited from sharing individual patient data outside the approved protocol.",
|
| 75 |
+
"expected_commitments": 2,
|
| 76 |
+
"notes": "Consent obligation + data prohibition with ambient interest note."
|
| 77 |
+
},
|
| 78 |
+
{
|
| 79 |
+
"category": "regulatory",
|
| 80 |
+
"signal": "Employers must provide a safe working environment free from recognized hazards. Our office recently won an architecture award. Employees shall report any safety concerns to their supervisor immediately.",
|
| 81 |
+
"expected_commitments": 2,
|
| 82 |
+
"notes": "Safety obligation + reporting mandate with ambient office note."
|
| 83 |
+
},
|
| 84 |
+
{
|
| 85 |
+
"category": "regulatory",
|
| 86 |
+
"signal": "Financial institutions must verify customer identity before opening any new account. The banking sector has seen significant digital transformation recently. Suspicious transactions exceeding $10,000 must be reported to the relevant authorities within 24 hours.",
|
| 87 |
+
"expected_commitments": 2,
|
| 88 |
+
"notes": "KYC obligation + reporting obligation with specific threshold/timeframe."
|
| 89 |
+
},
|
| 90 |
+
{
|
| 91 |
+
"category": "regulatory",
|
| 92 |
+
"signal": "Food handlers must wash hands before preparing or serving any food items. The cafeteria menu changes seasonally to feature local ingredients. Raw meat shall not be stored above ready-to-eat foods in any refrigeration unit.",
|
| 93 |
+
"expected_commitments": 2,
|
| 94 |
+
"notes": "Hygiene obligation + storage prohibition with ambient menu note."
|
| 95 |
+
},
|
| 96 |
+
{
|
| 97 |
+
"category": "procedural",
|
| 98 |
+
"signal": "You must wear a helmet while cycling on public roads at all times. Cycling has become increasingly popular in urban areas. Children under 12 cannot ride without adult supervision on streets with speed limits above 25 mph.",
|
| 99 |
+
"expected_commitments": 2,
|
| 100 |
+
"notes": "Equipment obligation + age restriction with ambient popularity note."
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"category": "procedural",
|
| 104 |
+
"signal": "All visitors must sign in at the front desk and obtain a visitor badge before entering the facility. The lobby features artwork from local artists. Visitors shall not access restricted areas without an authorized escort.",
|
| 105 |
+
"expected_commitments": 2,
|
| 106 |
+
"notes": "Check-in obligation + access prohibition with ambient decor note."
|
| 107 |
+
},
|
| 108 |
+
{
|
| 109 |
+
"category": "procedural",
|
| 110 |
+
"signal": "Students must complete all prerequisite courses before enrolling in advanced seminars. The university library has an extensive collection of rare manuscripts. Academic integrity violations shall result in immediate disciplinary review.",
|
| 111 |
+
"expected_commitments": 2,
|
| 112 |
+
"notes": "Prerequisite obligation + consequence mandate with ambient library note."
|
| 113 |
+
},
|
| 114 |
+
{
|
| 115 |
+
"category": "procedural",
|
| 116 |
+
"signal": "Pilots must complete a pre-flight checklist before every departure. Modern aircraft incorporate sophisticated avionics systems. The aircraft shall not take off if any critical system shows a warning indicator.",
|
| 117 |
+
"expected_commitments": 2,
|
| 118 |
+
"notes": "Checklist obligation + departure prohibition with ambient tech note."
|
| 119 |
+
},
|
| 120 |
+
{
|
| 121 |
+
"category": "procedural",
|
| 122 |
+
"signal": "Laboratory personnel must wear appropriate protective equipment including goggles and gloves. The lab was renovated last year with improved ventilation. No food or drink is permitted in the laboratory at any time.",
|
| 123 |
+
"expected_commitments": 2,
|
| 124 |
+
"notes": "PPE obligation + food prohibition with ambient renovation note."
|
| 125 |
+
},
|
| 126 |
+
{
|
| 127 |
+
"category": "composite",
|
| 128 |
+
"signal": "The system must encrypt all data at rest using AES-256 or stronger. Our cloud provider offers competitive pricing. Data in transit shall be protected with TLS 1.3. You must not store encryption keys alongside encrypted data. Annual security audits are required for all systems handling sensitive information.",
|
| 129 |
+
"expected_commitments": 4,
|
| 130 |
+
"notes": "High commitment density: 4 commitments mixed with ambient."
|
| 131 |
+
},
|
| 132 |
+
{
|
| 133 |
+
"category": "composite",
|
| 134 |
+
"signal": "Contractors must carry liability insurance of at least $1 million. The project timeline has been extended due to favorable conditions. Work shall not commence before the site safety inspection is complete. All workers must attend the mandatory safety briefing. The construction site has excellent accessibility.",
|
| 135 |
+
"expected_commitments": 3,
|
| 136 |
+
"notes": "Multiple obligations with specific thresholds and conditions."
|
| 137 |
+
},
|
| 138 |
+
{
|
| 139 |
+
"category": "composite",
|
| 140 |
+
"signal": "Patients must fast for 12 hours before the blood draw. The clinic has recently upgraded its diagnostic equipment. Results shall be communicated to the patient within 5 business days. You must not discontinue prescribed medications without consulting your physician. Our patient satisfaction scores are among the highest in the region.",
|
| 141 |
+
"expected_commitments": 3,
|
| 142 |
+
"notes": "Medical obligations and prohibition with ambient satisfaction note."
|
| 143 |
+
},
|
| 144 |
+
{
|
| 145 |
+
"category": "composite",
|
| 146 |
+
"signal": "All imports must comply with customs regulations and tariff schedules. International trade volumes have increased significantly this quarter. Restricted goods shall not be transported without proper documentation. The shipper must declare the full value of all goods at the port of entry. Our logistics team recently expanded to three new regions.",
|
| 147 |
+
"expected_commitments": 3,
|
| 148 |
+
"notes": "Trade obligations and prohibition with ambient business update."
|
| 149 |
+
},
|
| 150 |
+
{
|
| 151 |
+
"category": "composite",
|
| 152 |
+
"signal": "Users must agree to the terms of service before creating an account. The platform has grown to over 10 million active users. You shall not use the service for any unlawful purpose. Content that violates community guidelines must be reported immediately. We are always working to improve the user experience.",
|
| 153 |
+
"expected_commitments": 3,
|
| 154 |
+
"notes": "Platform obligations and prohibition with ambient growth note."
|
| 155 |
+
}
|
| 156 |
]
|
| 157 |
+
}
|
src/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Commitment Conservation Harness v2.0
|
| 3 |
+
Reference implementation of the falsification protocol from Section 7.
|
| 4 |
+
|
| 5 |
+
Single pipeline. No stubs. No placeholders. The instrument must work
|
| 6 |
+
or the falsification protocol is theater.
|
| 7 |
+
"""
|
| 8 |
+
__version__ = "2.0.0"
|
src/compression.py
ADDED
|
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
compression.py — Pluggable Compression Backends
|
| 3 |
+
|
| 4 |
+
The conservation law doesn't depend on WHICH compressor is used.
|
| 5 |
+
The compressor is the channel. The law is about what survives the channel.
|
| 6 |
+
|
| 7 |
+
Three backends:
|
| 8 |
+
- 'extractive': Deterministic sentence ranking (no model, fast, for testing)
|
| 9 |
+
- 'bart': facebook/bart-large-cnn or distilbart (for HuggingFace Space)
|
| 10 |
+
- 'api': External LLM via API (GPT-4, Claude, etc.)
|
| 11 |
+
|
| 12 |
+
All backends implement the same interface:
|
| 13 |
+
compress(text: str, target_ratio: float) -> str
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
import re
|
| 17 |
+
from typing import Optional
|
| 18 |
+
from abc import ABC, abstractmethod
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class CompressionBackend(ABC):
|
| 22 |
+
"""Abstract compression backend."""
|
| 23 |
+
|
| 24 |
+
@abstractmethod
|
| 25 |
+
def compress(self, text: str, target_ratio: float = 0.5) -> str:
|
| 26 |
+
"""
|
| 27 |
+
Compress text to approximately target_ratio of original length.
|
| 28 |
+
target_ratio: float in (0, 1), e.g. 0.5 = compress to half length.
|
| 29 |
+
Returns compressed text.
|
| 30 |
+
"""
|
| 31 |
+
pass
|
| 32 |
+
|
| 33 |
+
@property
|
| 34 |
+
@abstractmethod
|
| 35 |
+
def name(self) -> str:
|
| 36 |
+
pass
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class ExtractiveBackend(CompressionBackend):
|
| 40 |
+
"""
|
| 41 |
+
Deterministic extractive compression. No model required.
|
| 42 |
+
|
| 43 |
+
Ranks sentences by information density (unique content words / total words)
|
| 44 |
+
and returns the top-k sentences that fit within the target length.
|
| 45 |
+
|
| 46 |
+
This is NOT a good compressor. It's a PREDICTABLE compressor.
|
| 47 |
+
That's the point: we can verify the pipeline works before adding
|
| 48 |
+
stochastic models.
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
@property
|
| 52 |
+
def name(self) -> str:
|
| 53 |
+
return 'extractive'
|
| 54 |
+
|
| 55 |
+
def compress(self, text: str, target_ratio: float = 0.5) -> str:
|
| 56 |
+
sentences = self._split_sentences(text)
|
| 57 |
+
if len(sentences) <= 1:
|
| 58 |
+
return text
|
| 59 |
+
|
| 60 |
+
target_len = max(1, int(len(text.split()) * target_ratio))
|
| 61 |
+
|
| 62 |
+
# Score each sentence by information density
|
| 63 |
+
scored = []
|
| 64 |
+
for i, sent in enumerate(sentences):
|
| 65 |
+
words = sent.lower().split()
|
| 66 |
+
if not words:
|
| 67 |
+
continue
|
| 68 |
+
unique = len(set(words))
|
| 69 |
+
density = unique / len(words)
|
| 70 |
+
# Boost sentences with modal operators (commitment-bearing)
|
| 71 |
+
has_modal = any(m in sent.lower() for m in
|
| 72 |
+
['must', 'shall', 'cannot', 'required', 'always', 'never'])
|
| 73 |
+
score = density + (0.5 if has_modal else 0.0)
|
| 74 |
+
scored.append((score, i, sent))
|
| 75 |
+
|
| 76 |
+
# Sort by score descending, then take enough to fill target
|
| 77 |
+
scored.sort(key=lambda x: -x[0])
|
| 78 |
+
|
| 79 |
+
selected = []
|
| 80 |
+
word_count = 0
|
| 81 |
+
for score, idx, sent in scored:
|
| 82 |
+
sent_words = len(sent.split())
|
| 83 |
+
if word_count + sent_words <= target_len or not selected:
|
| 84 |
+
selected.append((idx, sent))
|
| 85 |
+
word_count += sent_words
|
| 86 |
+
if word_count >= target_len:
|
| 87 |
+
break
|
| 88 |
+
|
| 89 |
+
# Restore original order
|
| 90 |
+
selected.sort(key=lambda x: x[0])
|
| 91 |
+
return ' '.join(sent for _, sent in selected)
|
| 92 |
+
|
| 93 |
+
def _split_sentences(self, text: str):
|
| 94 |
+
"""Split on sentence boundaries and semicolons."""
|
| 95 |
+
parts = re.split(r'(?<=[.!?;])\s+', text)
|
| 96 |
+
return [p.strip() for p in parts if p.strip()]
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
class BartBackend(CompressionBackend):
|
| 100 |
+
"""
|
| 101 |
+
BART-based abstractive compression.
|
| 102 |
+
Lazy-loads model on first use.
|
| 103 |
+
"""
|
| 104 |
+
|
| 105 |
+
def __init__(self, model_name: str = "sshleifer/distilbart-cnn-12-6"):
|
| 106 |
+
self._model_name = model_name
|
| 107 |
+
self._summarizer = None
|
| 108 |
+
|
| 109 |
+
@property
|
| 110 |
+
def name(self) -> str:
|
| 111 |
+
return f'bart:{self._model_name}'
|
| 112 |
+
|
| 113 |
+
def _load(self):
|
| 114 |
+
if self._summarizer is None:
|
| 115 |
+
from transformers import pipeline
|
| 116 |
+
self._summarizer = pipeline(
|
| 117 |
+
"summarization",
|
| 118 |
+
model=self._model_name,
|
| 119 |
+
device=-1 # CPU
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
def compress(self, text: str, target_ratio: float = 0.5) -> str:
|
| 123 |
+
self._load()
|
| 124 |
+
|
| 125 |
+
# Estimate target max_length in tokens (~1.3 tokens per word)
|
| 126 |
+
word_count = len(text.split())
|
| 127 |
+
max_length = max(10, int(word_count * target_ratio * 1.3))
|
| 128 |
+
min_length = max(5, max_length // 4)
|
| 129 |
+
|
| 130 |
+
try:
|
| 131 |
+
result = self._summarizer(
|
| 132 |
+
text,
|
| 133 |
+
max_length=max_length,
|
| 134 |
+
min_length=min_length,
|
| 135 |
+
do_sample=False
|
| 136 |
+
)
|
| 137 |
+
return result[0]['summary_text']
|
| 138 |
+
except Exception as e:
|
| 139 |
+
# If text is too short for summarization, return as-is
|
| 140 |
+
return text
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
class BackTranslationBackend(CompressionBackend):
|
| 144 |
+
"""
|
| 145 |
+
Paraphrase via back-translation (en→de→en).
|
| 146 |
+
This is a TRANSFORMATION, not compression per se,
|
| 147 |
+
but it's the second stress in the dual-stress regime.
|
| 148 |
+
"""
|
| 149 |
+
|
| 150 |
+
def __init__(self):
|
| 151 |
+
self._en_de = None
|
| 152 |
+
self._de_en = None
|
| 153 |
+
|
| 154 |
+
@property
|
| 155 |
+
def name(self) -> str:
|
| 156 |
+
return 'back_translation'
|
| 157 |
+
|
| 158 |
+
def _load(self):
|
| 159 |
+
if self._en_de is None:
|
| 160 |
+
from transformers import pipeline
|
| 161 |
+
self._en_de = pipeline("translation", model="Helsinki-NLP/opus-mt-en-de", device=-1)
|
| 162 |
+
self._de_en = pipeline("translation", model="Helsinki-NLP/opus-mt-de-en", device=-1)
|
| 163 |
+
|
| 164 |
+
def compress(self, text: str, target_ratio: float = 0.5) -> str:
|
| 165 |
+
"""Back-translate. target_ratio is ignored (paraphrase preserves length)."""
|
| 166 |
+
self._load()
|
| 167 |
+
de = self._en_de(text, max_length=512, do_sample=False)[0]['translation_text']
|
| 168 |
+
en = self._de_en(de, max_length=512, do_sample=False)[0]['translation_text']
|
| 169 |
+
return en
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
# ---------------------------------------------------------------------------
|
| 173 |
+
# Factory
|
| 174 |
+
# ---------------------------------------------------------------------------
|
| 175 |
+
|
| 176 |
+
_BACKENDS = {
|
| 177 |
+
'extractive': ExtractiveBackend,
|
| 178 |
+
'bart': BartBackend,
|
| 179 |
+
'back_translation': BackTranslationBackend,
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
def get_backend(name: str = 'extractive', **kwargs) -> CompressionBackend:
|
| 183 |
+
"""Get a compression backend by name."""
|
| 184 |
+
# Lazy import lossy backends to avoid circular imports
|
| 185 |
+
if name in ('lossy', 'lossy_enforced'):
|
| 186 |
+
from .lossy import LossyBackend, LossyEnforcedBackend
|
| 187 |
+
if name == 'lossy':
|
| 188 |
+
return LossyBackend(**kwargs)
|
| 189 |
+
return LossyEnforcedBackend(**kwargs)
|
| 190 |
+
|
| 191 |
+
if name not in _BACKENDS:
|
| 192 |
+
raise ValueError(f"Unknown backend '{name}'. Available: {list(_BACKENDS.keys()) + ['lossy', 'lossy_enforced']}")
|
| 193 |
+
return _BACKENDS[name](**kwargs)
|
src/enforcement.py
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
enforcement.py — Commitment Conservation Gate
|
| 3 |
+
|
| 4 |
+
The gate is an architectural component, not a post-hoc patch.
|
| 5 |
+
It sits between the compressor output and the pipeline output.
|
| 6 |
+
|
| 7 |
+
Protocol:
|
| 8 |
+
1. Extract commitments from ORIGINAL signal (once, at entry)
|
| 9 |
+
2. Compress the signal
|
| 10 |
+
3. Extract commitments from compressed output
|
| 11 |
+
4. Score fidelity
|
| 12 |
+
5. IF fidelity >= threshold: PASS (output compressed)
|
| 13 |
+
6. IF fidelity < threshold AND retries remain:
|
| 14 |
+
- Re-inject missing commitments into input
|
| 15 |
+
- Re-compress (retry)
|
| 16 |
+
7. IF retries exhausted: FALLBACK
|
| 17 |
+
- Return best attempt seen so far
|
| 18 |
+
- Log the failure
|
| 19 |
+
|
| 20 |
+
This is NOT "append missing text to the end."
|
| 21 |
+
That was the v1 bug. Appended text gets stripped on the next
|
| 22 |
+
compression cycle because the summarizer treats it as low-salience.
|
| 23 |
+
|
| 24 |
+
Instead: re-inject commitments into the INPUT before re-compression,
|
| 25 |
+
structured as high-salience prefix. The compressor sees them as
|
| 26 |
+
the most important content on retry.
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
from typing import Set, Optional, Tuple
|
| 30 |
+
from dataclasses import dataclass, field
|
| 31 |
+
from .extraction import extract_commitment_texts
|
| 32 |
+
from .fidelity import fidelity_score, fidelity_breakdown
|
| 33 |
+
from .compression import CompressionBackend
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
@dataclass
|
| 37 |
+
class GateResult:
|
| 38 |
+
"""Result of passing a signal through the commitment gate."""
|
| 39 |
+
output: str # The final compressed text
|
| 40 |
+
passed: bool # Whether fidelity threshold was met
|
| 41 |
+
fidelity: float # Final fidelity score
|
| 42 |
+
fidelity_detail: dict # Component scores
|
| 43 |
+
attempts: int # Number of compression attempts
|
| 44 |
+
original_commitments: Set[str] # Commitments from original signal
|
| 45 |
+
output_commitments: Set[str] # Commitments in final output
|
| 46 |
+
missing_commitments: Set[str] # Commitments that were lost
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class CommitmentGate:
|
| 50 |
+
"""
|
| 51 |
+
Commitment conservation gate.
|
| 52 |
+
|
| 53 |
+
Wraps a compression backend and enforces commitment preservation
|
| 54 |
+
through a reject-and-retry loop with structured re-injection.
|
| 55 |
+
"""
|
| 56 |
+
|
| 57 |
+
def __init__(
|
| 58 |
+
self,
|
| 59 |
+
backend: CompressionBackend,
|
| 60 |
+
threshold: float = 0.6,
|
| 61 |
+
max_retries: int = 3,
|
| 62 |
+
):
|
| 63 |
+
"""
|
| 64 |
+
Args:
|
| 65 |
+
backend: The compression backend to wrap
|
| 66 |
+
threshold: Minimum fidelity score to pass (0.0 to 1.0)
|
| 67 |
+
max_retries: Maximum re-injection attempts before fallback
|
| 68 |
+
"""
|
| 69 |
+
self.backend = backend
|
| 70 |
+
self.threshold = threshold
|
| 71 |
+
self.max_retries = max_retries
|
| 72 |
+
|
| 73 |
+
def compress(
|
| 74 |
+
self,
|
| 75 |
+
text: str,
|
| 76 |
+
original_commitments: Set[str],
|
| 77 |
+
target_ratio: float = 0.5,
|
| 78 |
+
) -> GateResult:
|
| 79 |
+
"""
|
| 80 |
+
Compress text through the commitment gate.
|
| 81 |
+
|
| 82 |
+
Args:
|
| 83 |
+
text: Text to compress (may be original or already-processed)
|
| 84 |
+
original_commitments: The commitments that MUST be preserved
|
| 85 |
+
(extracted once from the original signal)
|
| 86 |
+
target_ratio: Compression target
|
| 87 |
+
|
| 88 |
+
Returns:
|
| 89 |
+
GateResult with output text, pass/fail, fidelity scores
|
| 90 |
+
"""
|
| 91 |
+
best_output = text
|
| 92 |
+
best_fidelity = 0.0
|
| 93 |
+
best_detail = {}
|
| 94 |
+
|
| 95 |
+
current_input = text
|
| 96 |
+
|
| 97 |
+
for attempt in range(1, self.max_retries + 1):
|
| 98 |
+
# Compress
|
| 99 |
+
compressed = self.backend.compress(current_input, target_ratio)
|
| 100 |
+
|
| 101 |
+
# Extract and score
|
| 102 |
+
output_commitments = extract_commitment_texts(compressed)
|
| 103 |
+
detail = fidelity_breakdown(original_commitments, output_commitments)
|
| 104 |
+
score = detail['min_aggregated']
|
| 105 |
+
|
| 106 |
+
# Track best
|
| 107 |
+
if score > best_fidelity:
|
| 108 |
+
best_output = compressed
|
| 109 |
+
best_fidelity = score
|
| 110 |
+
best_detail = detail
|
| 111 |
+
|
| 112 |
+
# Check threshold
|
| 113 |
+
if score >= self.threshold:
|
| 114 |
+
return GateResult(
|
| 115 |
+
output=compressed,
|
| 116 |
+
passed=True,
|
| 117 |
+
fidelity=score,
|
| 118 |
+
fidelity_detail=detail,
|
| 119 |
+
attempts=attempt,
|
| 120 |
+
original_commitments=original_commitments,
|
| 121 |
+
output_commitments=output_commitments,
|
| 122 |
+
missing_commitments=original_commitments - output_commitments,
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
# Re-inject: structure missing commitments as high-salience prefix
|
| 126 |
+
missing = original_commitments - output_commitments
|
| 127 |
+
if missing and attempt < self.max_retries:
|
| 128 |
+
# Format missing commitments as explicit constraints
|
| 129 |
+
# Placing them FIRST makes them highest salience for the compressor
|
| 130 |
+
constraint_block = '. '.join(sorted(missing)) + '. '
|
| 131 |
+
current_input = constraint_block + compressed
|
| 132 |
+
else:
|
| 133 |
+
# No missing or last attempt — can't improve
|
| 134 |
+
break
|
| 135 |
+
|
| 136 |
+
# Fallback: return best attempt
|
| 137 |
+
output_commitments = extract_commitment_texts(best_output)
|
| 138 |
+
return GateResult(
|
| 139 |
+
output=best_output,
|
| 140 |
+
passed=False,
|
| 141 |
+
fidelity=best_fidelity,
|
| 142 |
+
fidelity_detail=best_detail,
|
| 143 |
+
attempts=min(attempt, self.max_retries),
|
| 144 |
+
original_commitments=original_commitments,
|
| 145 |
+
output_commitments=output_commitments,
|
| 146 |
+
missing_commitments=original_commitments - output_commitments,
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def baseline_compress(
|
| 151 |
+
backend: CompressionBackend,
|
| 152 |
+
text: str,
|
| 153 |
+
target_ratio: float = 0.5,
|
| 154 |
+
) -> str:
|
| 155 |
+
"""
|
| 156 |
+
Baseline compression — no gate, no enforcement.
|
| 157 |
+
Just compress and return whatever comes out.
|
| 158 |
+
"""
|
| 159 |
+
return backend.compress(text, target_ratio)
|
src/extraction.py
ADDED
|
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
extraction.py — Modal-Pattern Sieve for Commitment Extraction
|
| 3 |
+
|
| 4 |
+
Implements the commitment extractor per paper Definition 2.4 and Figure 4.
|
| 5 |
+
A commitment is a clause containing a deontic or alethic modal operator
|
| 6 |
+
that creates a testable obligation, prohibition, or constraint.
|
| 7 |
+
|
| 8 |
+
Three-stage sieve:
|
| 9 |
+
1. Sentence segmentation (regex — deterministic, no model)
|
| 10 |
+
2. Modal operator detection with type classification
|
| 11 |
+
3. Commitment normalization (canonical form for comparison)
|
| 12 |
+
|
| 13 |
+
Design principle: this is the MEASUREMENT INSTRUMENT.
|
| 14 |
+
It must be deterministic and precise. No ML models here.
|
| 15 |
+
False positives inflate scores. False negatives hide drift.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
import re
|
| 19 |
+
from dataclasses import dataclass, field
|
| 20 |
+
from typing import List, Set, Optional, Tuple
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
# ---------------------------------------------------------------------------
|
| 24 |
+
# Modal operator patterns — ordered longest-first to match multi-word first
|
| 25 |
+
# ---------------------------------------------------------------------------
|
| 26 |
+
|
| 27 |
+
# Prohibitions (check BEFORE obligations — "must not" before "must")
|
| 28 |
+
PROHIBITION_PATTERNS = [
|
| 29 |
+
(re.compile(r'\bmust\s+not\b', re.I), 'must not'),
|
| 30 |
+
(re.compile(r'\bshall\s+not\b', re.I), 'shall not'),
|
| 31 |
+
(re.compile(r'\bwill\s+not\b', re.I), 'will not'),
|
| 32 |
+
(re.compile(r'\bcan\s*not\b', re.I), 'cannot'),
|
| 33 |
+
(re.compile(r'\bmay\s+not\b', re.I), 'may not'),
|
| 34 |
+
(re.compile(r'\bmust\s+never\b', re.I), 'must never'),
|
| 35 |
+
(re.compile(r'\bshall\s+never\b', re.I), 'shall never'),
|
| 36 |
+
(re.compile(r'\bis\s+prohibited\s+from\b', re.I), 'is prohibited from'),
|
| 37 |
+
(re.compile(r'\bare\s+prohibited\s+from\b', re.I), 'are prohibited from'),
|
| 38 |
+
(re.compile(r'\bis\s+forbidden\s+to\b', re.I), 'is forbidden to'),
|
| 39 |
+
(re.compile(r'\bare\s+forbidden\s+to\b', re.I), 'are forbidden to'),
|
| 40 |
+
(re.compile(r'\bdo\s+not\b', re.I), 'do not'),
|
| 41 |
+
(re.compile(r'\bdoes\s+not\b', re.I), 'does not'),
|
| 42 |
+
(re.compile(r'\bno\s+\w+\s+(?:or|nor)\s+\w+\b', re.I), 'no X or Y'), # "No food or drink"
|
| 43 |
+
]
|
| 44 |
+
|
| 45 |
+
# Obligations (deontic necessity)
|
| 46 |
+
OBLIGATION_PATTERNS = [
|
| 47 |
+
(re.compile(r'\bmust\b', re.I), 'must'),
|
| 48 |
+
(re.compile(r'\bshall\b', re.I), 'shall'),
|
| 49 |
+
(re.compile(r'\bis\s+required\s+to\b', re.I), 'is required to'),
|
| 50 |
+
(re.compile(r'\bare\s+required\s+to\b', re.I), 'are required to'),
|
| 51 |
+
(re.compile(r'\bis\s+obligated\s+to\b', re.I), 'is obligated to'),
|
| 52 |
+
(re.compile(r'\bare\s+obligated\s+to\b', re.I), 'are obligated to'),
|
| 53 |
+
(re.compile(r'\bhas\s+to\b', re.I), 'has to'),
|
| 54 |
+
(re.compile(r'\bhave\s+to\b', re.I), 'have to'),
|
| 55 |
+
(re.compile(r'\bneeds?\s+to\b', re.I), 'needs to'),
|
| 56 |
+
(re.compile(r'\bis\s+bound\s+to\b', re.I), 'is bound to'),
|
| 57 |
+
]
|
| 58 |
+
|
| 59 |
+
# Constraints (alethic / universal quantification)
|
| 60 |
+
CONSTRAINT_PATTERNS = [
|
| 61 |
+
(re.compile(r'\balways\b', re.I), 'always'),
|
| 62 |
+
(re.compile(r'\bnever\b', re.I), 'never'),
|
| 63 |
+
(re.compile(r'\bunder\s+no\s+circumstances?\b', re.I), 'under no circumstances'),
|
| 64 |
+
(re.compile(r'\bwithout\s+exception\b', re.I), 'without exception'),
|
| 65 |
+
(re.compile(r'\bat\s+all\s+times?\b', re.I), 'at all times'),
|
| 66 |
+
(re.compile(r'\bin\s+(?:all|every)\s+cases?\b', re.I), 'in all cases'),
|
| 67 |
+
(re.compile(r'\bis\s+defined\s+as\b', re.I), 'is defined as'),
|
| 68 |
+
]
|
| 69 |
+
|
| 70 |
+
# Conditional prefixes
|
| 71 |
+
CONDITIONAL_RE = re.compile(
|
| 72 |
+
r'\b(if|when|unless|provided\s+that|in\s+the\s+event\s+that|where|before|after|prior\s+to)\b',
|
| 73 |
+
re.I
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
# ---------------------------------------------------------------------------
|
| 78 |
+
# Data structures
|
| 79 |
+
# ---------------------------------------------------------------------------
|
| 80 |
+
|
| 81 |
+
@dataclass(frozen=True)
|
| 82 |
+
class Commitment:
|
| 83 |
+
"""A single extracted commitment. Frozen for use in sets."""
|
| 84 |
+
text: str # The clause text
|
| 85 |
+
modal_type: str # 'obligation' | 'prohibition' | 'constraint'
|
| 86 |
+
modal_operator: str # The matched operator
|
| 87 |
+
source_sentence: str # Original sentence
|
| 88 |
+
is_conditional: bool = False
|
| 89 |
+
|
| 90 |
+
@property
|
| 91 |
+
def canonical(self) -> str:
|
| 92 |
+
"""Normalized form for comparison."""
|
| 93 |
+
t = self.text.strip().lower()
|
| 94 |
+
t = re.sub(r'\s+', ' ', t) # collapse whitespace
|
| 95 |
+
t = re.sub(r'[.;,!?]+$', '', t) # strip trailing punct
|
| 96 |
+
return t.strip()
|
| 97 |
+
|
| 98 |
+
def __eq__(self, other):
|
| 99 |
+
if not isinstance(other, Commitment):
|
| 100 |
+
return False
|
| 101 |
+
return self.canonical == other.canonical
|
| 102 |
+
|
| 103 |
+
def __hash__(self):
|
| 104 |
+
return hash(self.canonical)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
# ---------------------------------------------------------------------------
|
| 108 |
+
# Sentence segmentation — deterministic regex, no model dependency
|
| 109 |
+
# ---------------------------------------------------------------------------
|
| 110 |
+
|
| 111 |
+
def segment_sentences(text: str) -> List[str]:
|
| 112 |
+
"""Split text into sentences and sub-clauses (semicolons)."""
|
| 113 |
+
text = text.strip()
|
| 114 |
+
if not text:
|
| 115 |
+
return []
|
| 116 |
+
|
| 117 |
+
# First split on sentence boundaries
|
| 118 |
+
# Match period/excl/question followed by space and uppercase
|
| 119 |
+
raw_sents = re.split(r'(?<=[.!?])\s+(?=[A-Z])', text)
|
| 120 |
+
|
| 121 |
+
# Then split each sentence on semicolons
|
| 122 |
+
result = []
|
| 123 |
+
for sent in raw_sents:
|
| 124 |
+
clauses = [c.strip() for c in sent.split(';') if c.strip()]
|
| 125 |
+
result.extend(clauses)
|
| 126 |
+
|
| 127 |
+
return result
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
# ---------------------------------------------------------------------------
|
| 131 |
+
# Core extraction
|
| 132 |
+
# ---------------------------------------------------------------------------
|
| 133 |
+
|
| 134 |
+
def classify_clause(clause: str) -> Optional[Tuple[str, str]]:
|
| 135 |
+
"""
|
| 136 |
+
Classify a clause by its modal operator.
|
| 137 |
+
Returns (modal_type, operator_text) or None.
|
| 138 |
+
|
| 139 |
+
Checks prohibitions FIRST (longest match) to avoid
|
| 140 |
+
"must not" matching as obligation "must".
|
| 141 |
+
"""
|
| 142 |
+
# Check prohibitions first
|
| 143 |
+
for pattern, operator in PROHIBITION_PATTERNS:
|
| 144 |
+
if pattern.search(clause):
|
| 145 |
+
return ('prohibition', operator)
|
| 146 |
+
|
| 147 |
+
# Then obligations
|
| 148 |
+
for pattern, operator in OBLIGATION_PATTERNS:
|
| 149 |
+
if pattern.search(clause):
|
| 150 |
+
return ('obligation', operator)
|
| 151 |
+
|
| 152 |
+
# Then constraints
|
| 153 |
+
for pattern, operator in CONSTRAINT_PATTERNS:
|
| 154 |
+
if pattern.search(clause):
|
| 155 |
+
return ('constraint', operator)
|
| 156 |
+
|
| 157 |
+
return None
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def has_conditional(clause: str) -> bool:
|
| 161 |
+
"""Check if a clause contains a conditional prefix."""
|
| 162 |
+
return bool(CONDITIONAL_RE.search(clause))
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
def extract_commitments(text: str) -> List[Commitment]:
|
| 166 |
+
"""
|
| 167 |
+
Extract all commitments from a text signal.
|
| 168 |
+
|
| 169 |
+
This is the modal-pattern sieve (Figure 4):
|
| 170 |
+
1. Segment into sentences/clauses
|
| 171 |
+
2. Classify each by modal operator
|
| 172 |
+
3. Return structured Commitment objects
|
| 173 |
+
"""
|
| 174 |
+
sentences = segment_sentences(text)
|
| 175 |
+
commitments = []
|
| 176 |
+
|
| 177 |
+
for sent in sentences:
|
| 178 |
+
result = classify_clause(sent)
|
| 179 |
+
if result is not None:
|
| 180 |
+
modal_type, operator = result
|
| 181 |
+
commitments.append(Commitment(
|
| 182 |
+
text=sent.strip(),
|
| 183 |
+
modal_type=modal_type,
|
| 184 |
+
modal_operator=operator,
|
| 185 |
+
source_sentence=sent.strip(),
|
| 186 |
+
is_conditional=has_conditional(sent),
|
| 187 |
+
))
|
| 188 |
+
|
| 189 |
+
return commitments
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
def extract_commitment_set(text: str) -> Set[Commitment]:
|
| 193 |
+
"""Extract commitments as a set (deduped by canonical form)."""
|
| 194 |
+
return set(extract_commitments(text))
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
def extract_commitment_texts(text: str) -> Set[str]:
|
| 198 |
+
"""
|
| 199 |
+
Extract commitment canonical texts as a set of strings.
|
| 200 |
+
This is the primary interface for fidelity scoring.
|
| 201 |
+
"""
|
| 202 |
+
return {c.canonical for c in extract_commitments(text)}
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
# ---------------------------------------------------------------------------
|
| 206 |
+
# Backward-compatible interface
|
| 207 |
+
# ---------------------------------------------------------------------------
|
| 208 |
+
|
| 209 |
+
def extract_hard_commitments(text: str, nlp=None) -> Set[str]:
|
| 210 |
+
"""
|
| 211 |
+
Backward-compatible interface. nlp parameter ignored.
|
| 212 |
+
Returns set of canonical commitment strings.
|
| 213 |
+
"""
|
| 214 |
+
return extract_commitment_texts(text)
|
src/fidelity.py
ADDED
|
@@ -0,0 +1,252 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
fidelity.py — Min-Aggregated Fidelity Scoring
|
| 3 |
+
|
| 4 |
+
Implements equation 23 from the paper:
|
| 5 |
+
F(S, S') = min(F_jaccard, F_cosine, F_nli)
|
| 6 |
+
|
| 7 |
+
The min-aggregation is the key design choice: a signal must pass ALL
|
| 8 |
+
three checks, not just one. This prevents gaming (e.g., high cosine
|
| 9 |
+
with destroyed modal operators).
|
| 10 |
+
|
| 11 |
+
All three metrics work without transformer models:
|
| 12 |
+
- Jaccard: set overlap on commitment canonical forms
|
| 13 |
+
- Cosine: TF-IDF vectors on commitment text
|
| 14 |
+
- NLI proxy: structural entailment check on modal operators + key terms
|
| 15 |
+
|
| 16 |
+
When transformer-based NLI is available (e.g., on HuggingFace),
|
| 17 |
+
it replaces the proxy. The interface is the same.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
import re
|
| 21 |
+
import math
|
| 22 |
+
from typing import Set, Dict, List, Optional
|
| 23 |
+
from collections import Counter
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
# ---------------------------------------------------------------------------
|
| 27 |
+
# Jaccard fidelity — exact canonical match
|
| 28 |
+
# ---------------------------------------------------------------------------
|
| 29 |
+
|
| 30 |
+
def fidelity_jaccard(original: Set[str], transformed: Set[str]) -> float:
|
| 31 |
+
"""
|
| 32 |
+
Jaccard index on canonical commitment strings.
|
| 33 |
+
|
| 34 |
+
This is the strictest metric: requires exact canonical match.
|
| 35 |
+
Returns 1.0 if both empty (vacuous truth — no commitments to lose).
|
| 36 |
+
Returns 0.0 if one is empty and the other isn't.
|
| 37 |
+
"""
|
| 38 |
+
if not original and not transformed:
|
| 39 |
+
return 1.0
|
| 40 |
+
if not original or not transformed:
|
| 41 |
+
return 0.0
|
| 42 |
+
intersection = len(original & transformed)
|
| 43 |
+
union = len(original | transformed)
|
| 44 |
+
return intersection / union
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
# ---------------------------------------------------------------------------
|
| 48 |
+
# Cosine fidelity — TF-IDF word vectors
|
| 49 |
+
# ---------------------------------------------------------------------------
|
| 50 |
+
|
| 51 |
+
def _tokenize(text: str) -> List[str]:
|
| 52 |
+
"""Simple word tokenizer. Lowercase, split on non-alphanumeric."""
|
| 53 |
+
return re.findall(r'[a-z0-9]+', text.lower())
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def _tf(tokens: List[str]) -> Dict[str, float]:
|
| 57 |
+
"""Term frequency."""
|
| 58 |
+
counts = Counter(tokens)
|
| 59 |
+
total = len(tokens)
|
| 60 |
+
if total == 0:
|
| 61 |
+
return {}
|
| 62 |
+
return {t: c / total for t, c in counts.items()}
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def _idf(doc_tokens_list: List[List[str]]) -> Dict[str, float]:
|
| 66 |
+
"""Inverse document frequency."""
|
| 67 |
+
n_docs = len(doc_tokens_list)
|
| 68 |
+
if n_docs == 0:
|
| 69 |
+
return {}
|
| 70 |
+
|
| 71 |
+
df = Counter()
|
| 72 |
+
for tokens in doc_tokens_list:
|
| 73 |
+
unique = set(tokens)
|
| 74 |
+
for t in unique:
|
| 75 |
+
df[t] += 1
|
| 76 |
+
|
| 77 |
+
return {t: math.log(n_docs / count) + 1.0 for t, count in df.items()}
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def _tfidf_vector(tf: Dict[str, float], idf: Dict[str, float], vocab: Set[str]) -> Dict[str, float]:
|
| 81 |
+
"""TF-IDF vector over shared vocabulary."""
|
| 82 |
+
return {t: tf.get(t, 0.0) * idf.get(t, 0.0) for t in vocab}
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def _cosine_sim(v1: Dict[str, float], v2: Dict[str, float]) -> float:
|
| 86 |
+
"""Cosine similarity between two sparse vectors."""
|
| 87 |
+
keys = set(v1.keys()) | set(v2.keys())
|
| 88 |
+
dot = sum(v1.get(k, 0.0) * v2.get(k, 0.0) for k in keys)
|
| 89 |
+
norm1 = math.sqrt(sum(v ** 2 for v in v1.values())) or 1e-10
|
| 90 |
+
norm2 = math.sqrt(sum(v ** 2 for v in v2.values())) or 1e-10
|
| 91 |
+
return dot / (norm1 * norm2)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def fidelity_cosine(original: Set[str], transformed: Set[str]) -> float:
|
| 95 |
+
"""
|
| 96 |
+
Cosine similarity on TF-IDF vectors of commitment text.
|
| 97 |
+
|
| 98 |
+
Concatenates all commitments into a single document per set,
|
| 99 |
+
computes TF-IDF, returns cosine similarity.
|
| 100 |
+
|
| 101 |
+
More forgiving than Jaccard — catches paraphrased commitments
|
| 102 |
+
that share vocabulary but differ in exact wording.
|
| 103 |
+
"""
|
| 104 |
+
if not original and not transformed:
|
| 105 |
+
return 1.0
|
| 106 |
+
if not original or not transformed:
|
| 107 |
+
return 0.0
|
| 108 |
+
|
| 109 |
+
orig_text = ' '.join(original)
|
| 110 |
+
trans_text = ' '.join(transformed)
|
| 111 |
+
|
| 112 |
+
orig_tokens = _tokenize(orig_text)
|
| 113 |
+
trans_tokens = _tokenize(trans_text)
|
| 114 |
+
|
| 115 |
+
if not orig_tokens or not trans_tokens:
|
| 116 |
+
return 0.0
|
| 117 |
+
|
| 118 |
+
# Build IDF from both documents
|
| 119 |
+
idf = _idf([orig_tokens, trans_tokens])
|
| 120 |
+
vocab = set(idf.keys())
|
| 121 |
+
|
| 122 |
+
tf_orig = _tf(orig_tokens)
|
| 123 |
+
tf_trans = _tf(trans_tokens)
|
| 124 |
+
|
| 125 |
+
v_orig = _tfidf_vector(tf_orig, idf, vocab)
|
| 126 |
+
v_trans = _tfidf_vector(tf_trans, idf, vocab)
|
| 127 |
+
|
| 128 |
+
return _cosine_sim(v_orig, v_trans)
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
# ---------------------------------------------------------------------------
|
| 132 |
+
# NLI proxy — structural entailment without transformer
|
| 133 |
+
# ---------------------------------------------------------------------------
|
| 134 |
+
|
| 135 |
+
# Key terms that must survive: modal operators, numbers, named entities
|
| 136 |
+
MODAL_TERMS = {
|
| 137 |
+
'must', 'shall', 'cannot', 'required', 'prohibited', 'forbidden',
|
| 138 |
+
'always', 'never', 'not', 'no',
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
NUMBER_RE = re.compile(r'\$?\d[\d,.]*')
|
| 142 |
+
TIME_RE = re.compile(r'\b(?:monday|tuesday|wednesday|thursday|friday|saturday|sunday|'
|
| 143 |
+
r'january|february|march|april|may|june|july|august|september|'
|
| 144 |
+
r'october|november|december|\d{1,2}(?:st|nd|rd|th)?)\b', re.I)
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
def _extract_key_terms(text: str) -> Set[str]:
|
| 148 |
+
"""Extract terms that are structurally significant for commitment identity."""
|
| 149 |
+
tokens = set(_tokenize(text))
|
| 150 |
+
|
| 151 |
+
key_terms = set()
|
| 152 |
+
|
| 153 |
+
# Modal operators present
|
| 154 |
+
key_terms.update(tokens & MODAL_TERMS)
|
| 155 |
+
|
| 156 |
+
# Numbers (amounts, thresholds, counts)
|
| 157 |
+
for match in NUMBER_RE.finditer(text):
|
| 158 |
+
key_terms.add(match.group().lower())
|
| 159 |
+
|
| 160 |
+
# Time references
|
| 161 |
+
for match in TIME_RE.finditer(text):
|
| 162 |
+
key_terms.add(match.group().lower())
|
| 163 |
+
|
| 164 |
+
return key_terms
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
def fidelity_nli_proxy(original: Set[str], transformed: Set[str]) -> float:
|
| 168 |
+
"""
|
| 169 |
+
Structural entailment proxy for NLI.
|
| 170 |
+
|
| 171 |
+
Checks whether the KEY TERMS (modals, numbers, time references)
|
| 172 |
+
from original commitments survive in transformed commitments.
|
| 173 |
+
|
| 174 |
+
This is not full NLI — it's a conservative proxy that catches
|
| 175 |
+
the most common failure mode: losing the modal operator or
|
| 176 |
+
the specific quantity/deadline while retaining general topic words.
|
| 177 |
+
|
| 178 |
+
When a real NLI model is available, replace this function.
|
| 179 |
+
"""
|
| 180 |
+
if not original and not transformed:
|
| 181 |
+
return 1.0
|
| 182 |
+
if not original or not transformed:
|
| 183 |
+
return 0.0
|
| 184 |
+
|
| 185 |
+
orig_text = ' '.join(original)
|
| 186 |
+
trans_text = ' '.join(transformed)
|
| 187 |
+
|
| 188 |
+
orig_keys = _extract_key_terms(orig_text)
|
| 189 |
+
trans_keys = _extract_key_terms(trans_text)
|
| 190 |
+
|
| 191 |
+
if not orig_keys:
|
| 192 |
+
# No structural terms to check — can't assess, return neutral
|
| 193 |
+
return 0.5
|
| 194 |
+
|
| 195 |
+
# What fraction of original key terms survived?
|
| 196 |
+
preserved = len(orig_keys & trans_keys)
|
| 197 |
+
total = len(orig_keys)
|
| 198 |
+
|
| 199 |
+
return preserved / total
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
# ---------------------------------------------------------------------------
|
| 203 |
+
# Min-aggregated fidelity — equation 23
|
| 204 |
+
# ---------------------------------------------------------------------------
|
| 205 |
+
|
| 206 |
+
def fidelity_score(original: Set[str], transformed: Set[str]) -> float:
|
| 207 |
+
"""
|
| 208 |
+
Min-aggregated fidelity score per equation 23:
|
| 209 |
+
F(S, S') = min(F_jaccard, F_cosine, F_nli)
|
| 210 |
+
|
| 211 |
+
A signal must pass ALL three checks. This prevents:
|
| 212 |
+
- High Jaccard with semantically different content (false exact match)
|
| 213 |
+
- High cosine with destroyed modal operators (topic match, no commitment)
|
| 214 |
+
- High NLI with completely reworded unrelated commitments
|
| 215 |
+
|
| 216 |
+
Returns a float in [0.0, 1.0].
|
| 217 |
+
"""
|
| 218 |
+
f_j = fidelity_jaccard(original, transformed)
|
| 219 |
+
f_c = fidelity_cosine(original, transformed)
|
| 220 |
+
f_n = fidelity_nli_proxy(original, transformed)
|
| 221 |
+
|
| 222 |
+
return min(f_j, f_c, f_n)
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
def fidelity_breakdown(original: Set[str], transformed: Set[str]) -> dict:
|
| 226 |
+
"""
|
| 227 |
+
Return all three component scores plus the min-aggregated score.
|
| 228 |
+
Useful for diagnostics.
|
| 229 |
+
"""
|
| 230 |
+
f_j = fidelity_jaccard(original, transformed)
|
| 231 |
+
f_c = fidelity_cosine(original, transformed)
|
| 232 |
+
f_n = fidelity_nli_proxy(original, transformed)
|
| 233 |
+
|
| 234 |
+
return {
|
| 235 |
+
'jaccard': f_j,
|
| 236 |
+
'cosine': f_c,
|
| 237 |
+
'nli_proxy': f_n,
|
| 238 |
+
'min_aggregated': min(f_j, f_c, f_n),
|
| 239 |
+
}
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
# ---------------------------------------------------------------------------
|
| 243 |
+
# Legacy interface
|
| 244 |
+
# ---------------------------------------------------------------------------
|
| 245 |
+
|
| 246 |
+
def jaccard(a: Set[str], b: Set[str]) -> float:
|
| 247 |
+
"""Backward compatible."""
|
| 248 |
+
return fidelity_jaccard(a, b)
|
| 249 |
+
|
| 250 |
+
def jaccard_index(a, b) -> float:
|
| 251 |
+
"""Backward compatible."""
|
| 252 |
+
return fidelity_jaccard(set(a), set(b))
|
src/lineage.py
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
lineage.py — SHA-256 Provenance Chain
|
| 3 |
+
|
| 4 |
+
Every iteration in the recursive stress test gets a lineage record:
|
| 5 |
+
- Hash of the input text
|
| 6 |
+
- Hash of the output text
|
| 7 |
+
- Hash of the extracted commitments (sorted, deterministic)
|
| 8 |
+
- Fidelity score
|
| 9 |
+
- Parent hash (previous iteration's output hash)
|
| 10 |
+
- Iteration number
|
| 11 |
+
|
| 12 |
+
The chain is tamper-evident: changing any intermediate output
|
| 13 |
+
invalidates all subsequent hashes. This is Module 2 from the PPA.
|
| 14 |
+
|
| 15 |
+
For the public harness, this provides:
|
| 16 |
+
1. Reproducibility proof (same input → same chain)
|
| 17 |
+
2. Drift audit trail (exactly where commitments were lost)
|
| 18 |
+
3. Attractor collapse detection (when multiple signals converge)
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
import hashlib
|
| 22 |
+
import json
|
| 23 |
+
from dataclasses import dataclass, field, asdict
|
| 24 |
+
from typing import List, Set, Optional
|
| 25 |
+
from datetime import datetime, timezone
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def _hash_text(text: str) -> str:
|
| 29 |
+
"""SHA-256 of UTF-8 encoded text, hex digest."""
|
| 30 |
+
return hashlib.sha256(text.encode('utf-8')).hexdigest()
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def _hash_commitment_set(commitments: Set[str]) -> str:
|
| 34 |
+
"""Deterministic hash of a commitment set (sorted for stability)."""
|
| 35 |
+
canonical = json.dumps(sorted(commitments), separators=(',', ':'))
|
| 36 |
+
return hashlib.sha256(canonical.encode('utf-8')).hexdigest()
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
@dataclass
|
| 40 |
+
class LineageRecord:
|
| 41 |
+
"""Single record in the provenance chain."""
|
| 42 |
+
iteration: int
|
| 43 |
+
input_hash: str
|
| 44 |
+
output_hash: str
|
| 45 |
+
commitment_hash: str
|
| 46 |
+
commitments_found: int
|
| 47 |
+
fidelity: float
|
| 48 |
+
fidelity_detail: dict
|
| 49 |
+
gate_passed: bool
|
| 50 |
+
parent_hash: Optional[str] # output_hash of previous iteration
|
| 51 |
+
text_preview: str # First 100 chars of output (for debugging)
|
| 52 |
+
|
| 53 |
+
def to_dict(self) -> dict:
|
| 54 |
+
return asdict(self)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
@dataclass
|
| 58 |
+
class LineageChain:
|
| 59 |
+
"""Complete provenance chain for a recursive stress test."""
|
| 60 |
+
signal_id: str # Hash of original signal
|
| 61 |
+
signal_preview: str # First 100 chars of original
|
| 62 |
+
original_commitment_hash: str # Hash of original commitments
|
| 63 |
+
original_commitment_count: int
|
| 64 |
+
backend: str # Compression backend name
|
| 65 |
+
enforced: bool # Whether enforcement was active
|
| 66 |
+
depth: int # Total iterations
|
| 67 |
+
records: List[LineageRecord] = field(default_factory=list)
|
| 68 |
+
timestamp: str = field(default_factory=lambda: datetime.now(timezone.utc).isoformat())
|
| 69 |
+
|
| 70 |
+
def add_record(self, record: LineageRecord):
|
| 71 |
+
"""Add a record, validating chain integrity."""
|
| 72 |
+
if self.records:
|
| 73 |
+
expected_parent = self.records[-1].output_hash
|
| 74 |
+
if record.parent_hash != expected_parent:
|
| 75 |
+
raise ValueError(
|
| 76 |
+
f"Chain broken at iteration {record.iteration}: "
|
| 77 |
+
f"parent_hash {record.parent_hash[:12]}... != "
|
| 78 |
+
f"expected {expected_parent[:12]}..."
|
| 79 |
+
)
|
| 80 |
+
self.records.append(record)
|
| 81 |
+
|
| 82 |
+
@property
|
| 83 |
+
def final_fidelity(self) -> float:
|
| 84 |
+
"""Fidelity at the last iteration."""
|
| 85 |
+
if not self.records:
|
| 86 |
+
return 1.0
|
| 87 |
+
return self.records[-1].fidelity
|
| 88 |
+
|
| 89 |
+
@property
|
| 90 |
+
def drift_curve(self) -> List[float]:
|
| 91 |
+
"""Drift (1 - fidelity) at each iteration."""
|
| 92 |
+
return [1.0 - r.fidelity for r in self.records]
|
| 93 |
+
|
| 94 |
+
@property
|
| 95 |
+
def fidelity_curve(self) -> List[float]:
|
| 96 |
+
"""Fidelity at each iteration."""
|
| 97 |
+
return [r.fidelity for r in self.records]
|
| 98 |
+
|
| 99 |
+
@property
|
| 100 |
+
def all_passed(self) -> bool:
|
| 101 |
+
"""Whether all iterations passed the gate."""
|
| 102 |
+
return all(r.gate_passed for r in self.records)
|
| 103 |
+
|
| 104 |
+
@property
|
| 105 |
+
def collapse_detected(self) -> bool:
|
| 106 |
+
"""
|
| 107 |
+
Check for attractor collapse: if all outputs converge to the
|
| 108 |
+
same hash, the test is invalid (Section 7).
|
| 109 |
+
"""
|
| 110 |
+
if len(self.records) < 3:
|
| 111 |
+
return False
|
| 112 |
+
output_hashes = [r.output_hash for r in self.records]
|
| 113 |
+
# If the last 3+ iterations have the same output hash, it collapsed
|
| 114 |
+
unique_recent = set(output_hashes[-3:])
|
| 115 |
+
return len(unique_recent) == 1
|
| 116 |
+
|
| 117 |
+
def to_dict(self) -> dict:
|
| 118 |
+
return {
|
| 119 |
+
'signal_id': self.signal_id,
|
| 120 |
+
'signal_preview': self.signal_preview,
|
| 121 |
+
'original_commitment_hash': self.original_commitment_hash,
|
| 122 |
+
'original_commitment_count': self.original_commitment_count,
|
| 123 |
+
'backend': self.backend,
|
| 124 |
+
'enforced': self.enforced,
|
| 125 |
+
'depth': self.depth,
|
| 126 |
+
'timestamp': self.timestamp,
|
| 127 |
+
'final_fidelity': self.final_fidelity,
|
| 128 |
+
'collapse_detected': self.collapse_detected,
|
| 129 |
+
'records': [r.to_dict() for r in self.records],
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
def to_json(self, indent: int = 2) -> str:
|
| 133 |
+
return json.dumps(self.to_dict(), indent=indent)
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def check_attractor_collapse(chains: List[LineageChain]) -> bool:
|
| 137 |
+
"""
|
| 138 |
+
Cross-signal attractor collapse check (Section 7):
|
| 139 |
+
If multiple DIFFERENT signals converge to the same final output,
|
| 140 |
+
the result is invalid — the compressor is collapsing, not preserving.
|
| 141 |
+
"""
|
| 142 |
+
if len(chains) < 2:
|
| 143 |
+
return False
|
| 144 |
+
|
| 145 |
+
final_hashes = [c.records[-1].output_hash for c in chains if c.records]
|
| 146 |
+
unique = set(final_hashes)
|
| 147 |
+
|
| 148 |
+
# If more than half the signals converge to the same output, flag it
|
| 149 |
+
from collections import Counter
|
| 150 |
+
counts = Counter(final_hashes)
|
| 151 |
+
most_common_count = counts.most_common(1)[0][1]
|
| 152 |
+
return most_common_count > len(chains) // 2
|
src/lossy.py
ADDED
|
@@ -0,0 +1,300 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
lossy.py — Lossy Compression Backend
|
| 3 |
+
|
| 4 |
+
Simulates what real LLMs do to text under recursive compression:
|
| 5 |
+
- Drop modal operators ("must" → removed or softened)
|
| 6 |
+
- Paraphrase (swap words for synonyms)
|
| 7 |
+
- Add conversational filler ("Got it!", "Sure thing!")
|
| 8 |
+
- Lose specific quantities ($100 → "the amount", Friday → "soon")
|
| 9 |
+
|
| 10 |
+
This is NOT a real compressor. It's a DETERMINISTIC SIMULATION
|
| 11 |
+
of the drift patterns observed in live LLM testing (Meta Llama,
|
| 12 |
+
GPT-4, Claude — see empirical data in paper Section 6).
|
| 13 |
+
|
| 14 |
+
Why this exists:
|
| 15 |
+
- Extractive backend is too faithful (doesn't show the gap)
|
| 16 |
+
- BART requires 2GB+ model download
|
| 17 |
+
- API backends require credentials
|
| 18 |
+
- This runs anywhere, instantly, and shows the conservation law
|
| 19 |
+
|
| 20 |
+
The drift patterns are seeded for reproducibility.
|
| 21 |
+
Same input → same output → same lineage chain.
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
import re
|
| 25 |
+
import random
|
| 26 |
+
import hashlib
|
| 27 |
+
from typing import List, Tuple
|
| 28 |
+
|
| 29 |
+
from .compression import CompressionBackend
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
# ---------------------------------------------------------------------------
|
| 33 |
+
# Drift patterns observed in real LLM testing
|
| 34 |
+
# ---------------------------------------------------------------------------
|
| 35 |
+
|
| 36 |
+
# Modal softening: strong modals → weak/removed
|
| 37 |
+
MODAL_DRIFT = {
|
| 38 |
+
'must': ['should', 'could', 'might want to', ''],
|
| 39 |
+
'shall': ['will', 'should', 'might', ''],
|
| 40 |
+
'cannot': ['probably shouldn\'t', 'might not want to', 'shouldn\'t', ''],
|
| 41 |
+
'shall not': ['probably shouldn\'t', 'might want to avoid', ''],
|
| 42 |
+
'must not': ['should avoid', 'probably shouldn\'t', ''],
|
| 43 |
+
'required to': ['expected to', 'encouraged to', 'asked to', ''],
|
| 44 |
+
'prohibited from': ['discouraged from', 'asked not to', ''],
|
| 45 |
+
'forbidden to': ['discouraged from', 'asked not to', ''],
|
| 46 |
+
'always': ['usually', 'often', 'typically', 'generally'],
|
| 47 |
+
'never': ['rarely', 'seldom', 'not usually', 'typically don\'t'],
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
# Quantity erosion: specific numbers → vague references
|
| 51 |
+
QUANTITY_DRIFT = [
|
| 52 |
+
(re.compile(r'\$\d[\d,]*'), ['the payment', 'the amount', 'the fee']),
|
| 53 |
+
(re.compile(r'\b\d+\s*(?:days?|hours?|minutes?|months?|years?|weeks?)\b', re.I),
|
| 54 |
+
['the timeframe', 'the period', 'a while']),
|
| 55 |
+
(re.compile(r'\b(?:Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)\b', re.I),
|
| 56 |
+
['soon', 'by the deadline', 'on time']),
|
| 57 |
+
(re.compile(r'\b(?:January|February|March|April|May|June|July|August|September|October|November|December)\s+\d{1,2}(?:st|nd|rd|th)?\b', re.I),
|
| 58 |
+
['by the deadline', 'on time', 'as scheduled']),
|
| 59 |
+
(re.compile(r'\b\d{1,3}(?:,\d{3})*\b'), ['several', 'many', 'a number of']),
|
| 60 |
+
]
|
| 61 |
+
|
| 62 |
+
# Conversational filler (LLMs love adding these)
|
| 63 |
+
FILLER = [
|
| 64 |
+
"Got it. ",
|
| 65 |
+
"Sure thing. ",
|
| 66 |
+
"Understood. ",
|
| 67 |
+
"Makes sense. ",
|
| 68 |
+
"Right. ",
|
| 69 |
+
"OK so ",
|
| 70 |
+
"Basically, ",
|
| 71 |
+
"In other words, ",
|
| 72 |
+
"To summarize, ",
|
| 73 |
+
"The key point is ",
|
| 74 |
+
]
|
| 75 |
+
|
| 76 |
+
# Sentence padding (LLMs expand with these)
|
| 77 |
+
PADDING = [
|
| 78 |
+
" That's important to keep in mind.",
|
| 79 |
+
" Just wanted to make sure that's clear.",
|
| 80 |
+
" Let me know if you have questions.",
|
| 81 |
+
" Hope that helps!",
|
| 82 |
+
" Pretty straightforward.",
|
| 83 |
+
" Nothing too complicated here.",
|
| 84 |
+
]
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
class LossyBackend(CompressionBackend):
|
| 88 |
+
"""
|
| 89 |
+
Deterministic lossy compression simulating real LLM drift.
|
| 90 |
+
|
| 91 |
+
Drift intensity increases with each call (simulating recursive
|
| 92 |
+
degradation). The seed is derived from input text hash, so
|
| 93 |
+
same input always produces same output.
|
| 94 |
+
|
| 95 |
+
Parameters:
|
| 96 |
+
drift_rate: 0.0 (no drift) to 1.0 (maximum drift)
|
| 97 |
+
Controls probability of each drift operation.
|
| 98 |
+
add_filler: Whether to add conversational filler
|
| 99 |
+
iteration: Current recursion depth (increases drift)
|
| 100 |
+
"""
|
| 101 |
+
|
| 102 |
+
def __init__(self, drift_rate: float = 0.4, add_filler: bool = True):
|
| 103 |
+
self._drift_rate = drift_rate
|
| 104 |
+
self._add_filler = add_filler
|
| 105 |
+
self._call_count = 0
|
| 106 |
+
|
| 107 |
+
@property
|
| 108 |
+
def name(self) -> str:
|
| 109 |
+
return f'lossy(drift={self._drift_rate})'
|
| 110 |
+
|
| 111 |
+
def reset(self):
|
| 112 |
+
"""Reset call counter (for new signal)."""
|
| 113 |
+
self._call_count = 0
|
| 114 |
+
|
| 115 |
+
def compress(self, text: str, target_ratio: float = 0.5) -> str:
|
| 116 |
+
"""
|
| 117 |
+
Apply lossy transformation to text.
|
| 118 |
+
|
| 119 |
+
Drift increases with each call (self._call_count).
|
| 120 |
+
"""
|
| 121 |
+
self._call_count += 1
|
| 122 |
+
|
| 123 |
+
# Seed RNG from text hash for determinism
|
| 124 |
+
seed = int(hashlib.md5(text.encode()).hexdigest()[:8], 16) + self._call_count
|
| 125 |
+
rng = random.Random(seed)
|
| 126 |
+
|
| 127 |
+
# Effective drift rate increases with iteration
|
| 128 |
+
effective_rate = min(1.0, self._drift_rate * (1.0 + 0.2 * self._call_count))
|
| 129 |
+
|
| 130 |
+
result = text
|
| 131 |
+
|
| 132 |
+
# Stage 1: Modal softening
|
| 133 |
+
result = self._soften_modals(result, rng, effective_rate)
|
| 134 |
+
|
| 135 |
+
# Stage 2: Quantity erosion
|
| 136 |
+
result = self._erode_quantities(result, rng, effective_rate * 0.7)
|
| 137 |
+
|
| 138 |
+
# Stage 3: Sentence dropping (simulate compression)
|
| 139 |
+
result = self._drop_sentences(result, rng, target_ratio)
|
| 140 |
+
|
| 141 |
+
# Stage 4: Add filler (simulate LLM expansion)
|
| 142 |
+
if self._add_filler and rng.random() < effective_rate * 0.5:
|
| 143 |
+
result = self._add_conversational_filler(result, rng)
|
| 144 |
+
|
| 145 |
+
return result.strip()
|
| 146 |
+
|
| 147 |
+
def _soften_modals(self, text: str, rng: random.Random, rate: float) -> str:
|
| 148 |
+
"""Replace strong modals with weaker alternatives."""
|
| 149 |
+
result = text
|
| 150 |
+
# Sort by length descending to match multi-word modals first
|
| 151 |
+
for modal in sorted(MODAL_DRIFT.keys(), key=len, reverse=True):
|
| 152 |
+
if rng.random() < rate:
|
| 153 |
+
replacements = MODAL_DRIFT[modal]
|
| 154 |
+
replacement = rng.choice(replacements)
|
| 155 |
+
# Case-insensitive replacement, one occurrence at a time
|
| 156 |
+
pattern = re.compile(re.escape(modal), re.I)
|
| 157 |
+
match = pattern.search(result)
|
| 158 |
+
if match:
|
| 159 |
+
original = match.group()
|
| 160 |
+
# Preserve capitalization of first char
|
| 161 |
+
if original[0].isupper() and replacement:
|
| 162 |
+
replacement = replacement[0].upper() + replacement[1:]
|
| 163 |
+
result = result[:match.start()] + replacement + result[match.end():]
|
| 164 |
+
return result
|
| 165 |
+
|
| 166 |
+
def _erode_quantities(self, text: str, rng: random.Random, rate: float) -> str:
|
| 167 |
+
"""Replace specific quantities with vague references."""
|
| 168 |
+
result = text
|
| 169 |
+
for pattern, replacements in QUANTITY_DRIFT:
|
| 170 |
+
if rng.random() < rate:
|
| 171 |
+
match = pattern.search(result)
|
| 172 |
+
if match:
|
| 173 |
+
replacement = rng.choice(replacements)
|
| 174 |
+
result = result[:match.start()] + replacement + result[match.end():]
|
| 175 |
+
return result
|
| 176 |
+
|
| 177 |
+
def _drop_sentences(self, text: str, rng: random.Random, target_ratio: float) -> str:
|
| 178 |
+
"""Drop sentences to approximate target compression ratio."""
|
| 179 |
+
sentences = re.split(r'(?<=[.!?])\s+', text)
|
| 180 |
+
if len(sentences) <= 1:
|
| 181 |
+
return text
|
| 182 |
+
|
| 183 |
+
target_count = max(1, int(len(sentences) * target_ratio))
|
| 184 |
+
|
| 185 |
+
if len(sentences) <= target_count:
|
| 186 |
+
return text
|
| 187 |
+
|
| 188 |
+
# Score sentences: modal-bearing ones get kept more often
|
| 189 |
+
scored = []
|
| 190 |
+
for i, sent in enumerate(sentences):
|
| 191 |
+
has_modal = any(m in sent.lower() for m in ['must', 'shall', 'cannot', 'required', 'always', 'never'])
|
| 192 |
+
# Without enforcement, modal sentences have NO priority
|
| 193 |
+
# (that's the point — baseline doesn't know about commitments)
|
| 194 |
+
score = rng.random()
|
| 195 |
+
scored.append((score, i, sent))
|
| 196 |
+
|
| 197 |
+
scored.sort(key=lambda x: -x[0])
|
| 198 |
+
kept = scored[:target_count]
|
| 199 |
+
kept.sort(key=lambda x: x[1]) # Restore order
|
| 200 |
+
|
| 201 |
+
return ' '.join(sent for _, _, sent in kept)
|
| 202 |
+
|
| 203 |
+
def _add_conversational_filler(self, text: str, rng: random.Random) -> str:
|
| 204 |
+
"""Add LLM-style conversational filler."""
|
| 205 |
+
filler = rng.choice(FILLER)
|
| 206 |
+
padding = rng.choice(PADDING) if rng.random() < 0.3 else ''
|
| 207 |
+
return filler + text + padding
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
class LossyEnforcedBackend(CompressionBackend):
|
| 211 |
+
"""
|
| 212 |
+
Lossy backend that PRESERVES modal-bearing sentences during dropping.
|
| 213 |
+
|
| 214 |
+
This simulates what happens when a compressor is commitment-aware:
|
| 215 |
+
same drift patterns, but modal sentences get priority during selection.
|
| 216 |
+
|
| 217 |
+
The enforcement is in the SELECTION, not post-hoc injection.
|
| 218 |
+
"""
|
| 219 |
+
|
| 220 |
+
def __init__(self, drift_rate: float = 0.4, add_filler: bool = False):
|
| 221 |
+
self._drift_rate = drift_rate
|
| 222 |
+
self._add_filler = add_filler
|
| 223 |
+
self._call_count = 0
|
| 224 |
+
|
| 225 |
+
@property
|
| 226 |
+
def name(self) -> str:
|
| 227 |
+
return f'lossy_enforced(drift={self._drift_rate})'
|
| 228 |
+
|
| 229 |
+
def reset(self):
|
| 230 |
+
self._call_count = 0
|
| 231 |
+
|
| 232 |
+
def compress(self, text: str, target_ratio: float = 0.5) -> str:
|
| 233 |
+
self._call_count += 1
|
| 234 |
+
seed = int(hashlib.md5(text.encode()).hexdigest()[:8], 16) + self._call_count
|
| 235 |
+
rng = random.Random(seed)
|
| 236 |
+
|
| 237 |
+
result = text
|
| 238 |
+
|
| 239 |
+
# NO modal softening — that's what enforcement means.
|
| 240 |
+
# The gate preserves modal operators intact.
|
| 241 |
+
|
| 242 |
+
# NO quantity erosion on commitment-bearing sentences.
|
| 243 |
+
|
| 244 |
+
# Priority sentence selection (modal sentences always kept)
|
| 245 |
+
result = self._priority_drop(result, rng, target_ratio)
|
| 246 |
+
|
| 247 |
+
return result.strip()
|
| 248 |
+
|
| 249 |
+
def _mild_soften(self, text: str, rng: random.Random, rate: float) -> str:
|
| 250 |
+
"""Much lower drift rate for modals under enforcement."""
|
| 251 |
+
result = text
|
| 252 |
+
for modal in sorted(MODAL_DRIFT.keys(), key=len, reverse=True):
|
| 253 |
+
if rng.random() < rate:
|
| 254 |
+
replacements = [r for r in MODAL_DRIFT[modal] if r] # Exclude empty (deletion)
|
| 255 |
+
if replacements:
|
| 256 |
+
replacement = rng.choice(replacements)
|
| 257 |
+
pattern = re.compile(re.escape(modal), re.I)
|
| 258 |
+
match = pattern.search(result)
|
| 259 |
+
if match:
|
| 260 |
+
original = match.group()
|
| 261 |
+
if original[0].isupper() and replacement:
|
| 262 |
+
replacement = replacement[0].upper() + replacement[1:]
|
| 263 |
+
result = result[:match.start()] + replacement + result[match.end():]
|
| 264 |
+
return result
|
| 265 |
+
|
| 266 |
+
def _mild_erode(self, text: str, rng: random.Random, rate: float) -> str:
|
| 267 |
+
"""Lower erosion rate under enforcement."""
|
| 268 |
+
result = text
|
| 269 |
+
for pattern, replacements in QUANTITY_DRIFT:
|
| 270 |
+
if rng.random() < rate:
|
| 271 |
+
match = pattern.search(result)
|
| 272 |
+
if match:
|
| 273 |
+
replacement = rng.choice(replacements)
|
| 274 |
+
result = result[:match.start()] + replacement + result[match.end():]
|
| 275 |
+
return result
|
| 276 |
+
|
| 277 |
+
def _priority_drop(self, text: str, rng: random.Random, target_ratio: float) -> str:
|
| 278 |
+
"""Drop sentences but PRIORITIZE modal-bearing ones."""
|
| 279 |
+
sentences = re.split(r'(?<=[.!?])\s+', text)
|
| 280 |
+
if len(sentences) <= 1:
|
| 281 |
+
return text
|
| 282 |
+
|
| 283 |
+
target_count = max(1, int(len(sentences) * target_ratio))
|
| 284 |
+
if len(sentences) <= target_count:
|
| 285 |
+
return text
|
| 286 |
+
|
| 287 |
+
scored = []
|
| 288 |
+
for i, sent in enumerate(sentences):
|
| 289 |
+
has_modal = any(m in sent.lower() for m in
|
| 290 |
+
['must', 'shall', 'cannot', 'required', 'always', 'never',
|
| 291 |
+
'should', 'could', 'might', 'expected', 'encouraged'])
|
| 292 |
+
# Modal sentences get HIGH priority under enforcement
|
| 293 |
+
score = (1.0 if has_modal else 0.0) + rng.random() * 0.5
|
| 294 |
+
scored.append((score, i, sent))
|
| 295 |
+
|
| 296 |
+
scored.sort(key=lambda x: -x[0])
|
| 297 |
+
kept = scored[:target_count]
|
| 298 |
+
kept.sort(key=lambda x: x[1])
|
| 299 |
+
|
| 300 |
+
return ' '.join(sent for _, _, sent in kept)
|
src/runner.py
ADDED
|
@@ -0,0 +1,402 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
runner.py — Falsification Protocol Orchestrator
|
| 3 |
+
|
| 4 |
+
Implements the complete falsification protocol from Section 7:
|
| 5 |
+
|
| 6 |
+
1. Load pinned corpus (25 signals across 5 categories)
|
| 7 |
+
2. For each signal:
|
| 8 |
+
a. Extract commitments from original
|
| 9 |
+
b. Run 10 recursive compressions (BASELINE — no gate)
|
| 10 |
+
c. Run 10 recursive compressions (ENFORCED — with gate)
|
| 11 |
+
d. Record lineage chains for both
|
| 12 |
+
3. Compute aggregate statistics
|
| 13 |
+
4. Check attractor collapse (if all signals converge, result is invalid)
|
| 14 |
+
5. Output JSON receipt
|
| 15 |
+
|
| 16 |
+
Success criterion (paper): enforced stability > baseline by ≥20pp
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
import json
|
| 20 |
+
import os
|
| 21 |
+
import sys
|
| 22 |
+
from typing import List, Dict, Optional, Set
|
| 23 |
+
from datetime import datetime, timezone
|
| 24 |
+
from dataclasses import dataclass
|
| 25 |
+
|
| 26 |
+
from .extraction import extract_commitment_texts
|
| 27 |
+
from .fidelity import fidelity_score, fidelity_breakdown
|
| 28 |
+
from .compression import CompressionBackend, get_backend
|
| 29 |
+
from .enforcement import CommitmentGate, baseline_compress
|
| 30 |
+
from .lineage import (
|
| 31 |
+
LineageChain, LineageRecord,
|
| 32 |
+
_hash_text, _hash_commitment_set,
|
| 33 |
+
check_attractor_collapse
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
# ---------------------------------------------------------------------------
|
| 38 |
+
# Default configuration
|
| 39 |
+
# ---------------------------------------------------------------------------
|
| 40 |
+
|
| 41 |
+
DEFAULT_DEPTH = 10
|
| 42 |
+
DEFAULT_THRESHOLD = 0.6
|
| 43 |
+
DEFAULT_TARGET_RATIO = 0.5
|
| 44 |
+
DEFAULT_MAX_RETRIES = 3
|
| 45 |
+
DEFAULT_CORPUS_PATH = os.path.join(
|
| 46 |
+
os.path.dirname(os.path.dirname(__file__)), 'corpus', 'canonical_corpus.json'
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
# ---------------------------------------------------------------------------
|
| 51 |
+
# Corpus loading
|
| 52 |
+
# ---------------------------------------------------------------------------
|
| 53 |
+
|
| 54 |
+
def load_corpus(path: str = DEFAULT_CORPUS_PATH) -> List[Dict]:
|
| 55 |
+
"""Load the pinned test corpus."""
|
| 56 |
+
with open(path, 'r') as f:
|
| 57 |
+
data = json.load(f)
|
| 58 |
+
return data['canonical_signals']
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
# ---------------------------------------------------------------------------
|
| 62 |
+
# Single signal test
|
| 63 |
+
# ---------------------------------------------------------------------------
|
| 64 |
+
|
| 65 |
+
def run_recursion(
|
| 66 |
+
signal: str,
|
| 67 |
+
backend: CompressionBackend,
|
| 68 |
+
depth: int = DEFAULT_DEPTH,
|
| 69 |
+
enforce: bool = False,
|
| 70 |
+
threshold: float = DEFAULT_THRESHOLD,
|
| 71 |
+
target_ratio: float = DEFAULT_TARGET_RATIO,
|
| 72 |
+
max_retries: int = DEFAULT_MAX_RETRIES,
|
| 73 |
+
) -> LineageChain:
|
| 74 |
+
"""
|
| 75 |
+
Run recursive compression on a single signal.
|
| 76 |
+
|
| 77 |
+
Returns a LineageChain with full provenance records.
|
| 78 |
+
"""
|
| 79 |
+
# Extract commitments from ORIGINAL (once — these are the invariant)
|
| 80 |
+
original_commitments = extract_commitment_texts(signal)
|
| 81 |
+
|
| 82 |
+
# Initialize lineage
|
| 83 |
+
chain = LineageChain(
|
| 84 |
+
signal_id=_hash_text(signal),
|
| 85 |
+
signal_preview=signal[:100],
|
| 86 |
+
original_commitment_hash=_hash_commitment_set(original_commitments),
|
| 87 |
+
original_commitment_count=len(original_commitments),
|
| 88 |
+
backend=backend.name,
|
| 89 |
+
enforced=enforce,
|
| 90 |
+
depth=depth,
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
# Setup gate if enforcing
|
| 94 |
+
gate = CommitmentGate(backend, threshold, max_retries) if enforce else None
|
| 95 |
+
|
| 96 |
+
current_text = signal
|
| 97 |
+
parent_hash = None
|
| 98 |
+
|
| 99 |
+
for i in range(depth):
|
| 100 |
+
input_hash = _hash_text(current_text)
|
| 101 |
+
|
| 102 |
+
# Compress
|
| 103 |
+
if enforce and gate:
|
| 104 |
+
result = gate.compress(current_text, original_commitments, target_ratio)
|
| 105 |
+
output_text = result.output
|
| 106 |
+
output_commitments = result.output_commitments
|
| 107 |
+
detail = result.fidelity_detail
|
| 108 |
+
score = result.fidelity
|
| 109 |
+
passed = result.passed
|
| 110 |
+
else:
|
| 111 |
+
output_text = baseline_compress(backend, current_text, target_ratio)
|
| 112 |
+
output_commitments = extract_commitment_texts(output_text)
|
| 113 |
+
detail = fidelity_breakdown(original_commitments, output_commitments)
|
| 114 |
+
score = detail['min_aggregated']
|
| 115 |
+
passed = score >= threshold
|
| 116 |
+
|
| 117 |
+
output_hash = _hash_text(output_text)
|
| 118 |
+
|
| 119 |
+
# Record
|
| 120 |
+
record = LineageRecord(
|
| 121 |
+
iteration=i + 1,
|
| 122 |
+
input_hash=input_hash,
|
| 123 |
+
output_hash=output_hash,
|
| 124 |
+
commitment_hash=_hash_commitment_set(output_commitments),
|
| 125 |
+
commitments_found=len(output_commitments),
|
| 126 |
+
fidelity=score,
|
| 127 |
+
fidelity_detail=detail,
|
| 128 |
+
gate_passed=passed,
|
| 129 |
+
parent_hash=parent_hash,
|
| 130 |
+
text_preview=output_text[:100],
|
| 131 |
+
)
|
| 132 |
+
chain.add_record(record)
|
| 133 |
+
|
| 134 |
+
# Advance
|
| 135 |
+
current_text = output_text
|
| 136 |
+
parent_hash = output_hash
|
| 137 |
+
|
| 138 |
+
return chain
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
# ---------------------------------------------------------------------------
|
| 142 |
+
# Full protocol
|
| 143 |
+
# ---------------------------------------------------------------------------
|
| 144 |
+
|
| 145 |
+
@dataclass
|
| 146 |
+
class ProtocolResult:
|
| 147 |
+
"""Complete result of the falsification protocol."""
|
| 148 |
+
corpus_size: int
|
| 149 |
+
depth: int
|
| 150 |
+
backend: str
|
| 151 |
+
threshold: float
|
| 152 |
+
baseline_chains: List[LineageChain]
|
| 153 |
+
enforced_chains: List[LineageChain]
|
| 154 |
+
|
| 155 |
+
# Aggregate statistics
|
| 156 |
+
baseline_avg_fidelity: float = 0.0
|
| 157 |
+
enforced_avg_fidelity: float = 0.0
|
| 158 |
+
baseline_stability_pct: float = 0.0 # % of signals with final fidelity >= threshold
|
| 159 |
+
enforced_stability_pct: float = 0.0
|
| 160 |
+
improvement_pp: float = 0.0 # percentage points
|
| 161 |
+
attractor_collapse: bool = False # cross-signal collapse detected
|
| 162 |
+
|
| 163 |
+
timestamp: str = ''
|
| 164 |
+
|
| 165 |
+
def to_dict(self) -> dict:
|
| 166 |
+
return {
|
| 167 |
+
'summary': {
|
| 168 |
+
'corpus_size': self.corpus_size,
|
| 169 |
+
'depth': self.depth,
|
| 170 |
+
'backend': self.backend,
|
| 171 |
+
'threshold': self.threshold,
|
| 172 |
+
'baseline': {
|
| 173 |
+
'avg_fidelity': round(self.baseline_avg_fidelity, 4),
|
| 174 |
+
'stability_pct': round(self.baseline_stability_pct, 1),
|
| 175 |
+
},
|
| 176 |
+
'enforced': {
|
| 177 |
+
'avg_fidelity': round(self.enforced_avg_fidelity, 4),
|
| 178 |
+
'stability_pct': round(self.enforced_stability_pct, 1),
|
| 179 |
+
},
|
| 180 |
+
'improvement_pp': round(self.improvement_pp, 1),
|
| 181 |
+
'attractor_collapse': self.attractor_collapse,
|
| 182 |
+
'timestamp': self.timestamp,
|
| 183 |
+
},
|
| 184 |
+
'baseline_chains': [c.to_dict() for c in self.baseline_chains],
|
| 185 |
+
'enforced_chains': [c.to_dict() for c in self.enforced_chains],
|
| 186 |
+
}
|
| 187 |
+
|
| 188 |
+
def to_json(self, indent: int = 2) -> str:
|
| 189 |
+
return json.dumps(self.to_dict(), indent=indent)
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
def run_protocol(
|
| 193 |
+
backend_name: str = 'extractive',
|
| 194 |
+
enforced_backend_name: Optional[str] = None,
|
| 195 |
+
depth: int = DEFAULT_DEPTH,
|
| 196 |
+
threshold: float = DEFAULT_THRESHOLD,
|
| 197 |
+
target_ratio: float = DEFAULT_TARGET_RATIO,
|
| 198 |
+
max_retries: int = DEFAULT_MAX_RETRIES,
|
| 199 |
+
corpus_path: str = DEFAULT_CORPUS_PATH,
|
| 200 |
+
signals: Optional[List[str]] = None,
|
| 201 |
+
verbose: bool = True,
|
| 202 |
+
) -> ProtocolResult:
|
| 203 |
+
"""
|
| 204 |
+
Run the complete falsification protocol.
|
| 205 |
+
|
| 206 |
+
For each signal in the corpus:
|
| 207 |
+
1. Run baseline recursion (no enforcement)
|
| 208 |
+
2. Run enforced recursion (with commitment gate)
|
| 209 |
+
3. Compare stability
|
| 210 |
+
|
| 211 |
+
Check for attractor collapse across all signals.
|
| 212 |
+
|
| 213 |
+
Args:
|
| 214 |
+
backend_name: Backend for baseline runs
|
| 215 |
+
enforced_backend_name: Backend for enforced runs (defaults to same as baseline)
|
| 216 |
+
depth: Recursion iterations
|
| 217 |
+
threshold: Fidelity threshold for pass/fail
|
| 218 |
+
target_ratio: Compression target
|
| 219 |
+
max_retries: Gate retry attempts
|
| 220 |
+
corpus_path: Path to corpus JSON
|
| 221 |
+
signals: Override corpus with specific signals
|
| 222 |
+
verbose: Print progress
|
| 223 |
+
"""
|
| 224 |
+
baseline_backend = get_backend(backend_name)
|
| 225 |
+
# Auto-pair lossy with lossy_enforced (matches app.py behavior)
|
| 226 |
+
if enforced_backend_name is None and backend_name == 'lossy':
|
| 227 |
+
enforced_backend_name = 'lossy_enforced'
|
| 228 |
+
enforced_backend = get_backend(enforced_backend_name or backend_name)
|
| 229 |
+
|
| 230 |
+
# Load corpus or use provided signals
|
| 231 |
+
if signals:
|
| 232 |
+
corpus = [{'category': 'custom', 'signal': s} for s in signals]
|
| 233 |
+
else:
|
| 234 |
+
corpus = load_corpus(corpus_path)
|
| 235 |
+
|
| 236 |
+
baseline_chains = []
|
| 237 |
+
enforced_chains = []
|
| 238 |
+
|
| 239 |
+
for i, entry in enumerate(corpus):
|
| 240 |
+
signal = entry['signal']
|
| 241 |
+
category = entry.get('category', 'unknown')
|
| 242 |
+
|
| 243 |
+
if verbose:
|
| 244 |
+
commitments = extract_commitment_texts(signal)
|
| 245 |
+
print(f"\n[{i+1}/{len(corpus)}] {category}: {signal[:60]}...")
|
| 246 |
+
print(f" Commitments found: {len(commitments)}")
|
| 247 |
+
|
| 248 |
+
# Skip signals with no commitments (can't test conservation)
|
| 249 |
+
commitments = extract_commitment_texts(signal)
|
| 250 |
+
if not commitments:
|
| 251 |
+
if verbose:
|
| 252 |
+
print(f" ⚠ No commitments detected — skipping")
|
| 253 |
+
continue
|
| 254 |
+
|
| 255 |
+
# Reset lossy backends if they track state
|
| 256 |
+
if hasattr(baseline_backend, 'reset'):
|
| 257 |
+
baseline_backend.reset()
|
| 258 |
+
if hasattr(enforced_backend, 'reset'):
|
| 259 |
+
enforced_backend.reset()
|
| 260 |
+
|
| 261 |
+
# Baseline
|
| 262 |
+
if verbose:
|
| 263 |
+
print(f" Running baseline (depth={depth})...")
|
| 264 |
+
b_chain = run_recursion(
|
| 265 |
+
signal, baseline_backend, depth,
|
| 266 |
+
enforce=False, threshold=threshold, target_ratio=target_ratio,
|
| 267 |
+
)
|
| 268 |
+
baseline_chains.append(b_chain)
|
| 269 |
+
if verbose:
|
| 270 |
+
print(f" Final fidelity: {b_chain.final_fidelity:.3f}"
|
| 271 |
+
f" {'✓' if b_chain.final_fidelity >= threshold else '✗'}")
|
| 272 |
+
|
| 273 |
+
# Reset for enforced run
|
| 274 |
+
if hasattr(enforced_backend, 'reset'):
|
| 275 |
+
enforced_backend.reset()
|
| 276 |
+
|
| 277 |
+
# Enforced
|
| 278 |
+
if verbose:
|
| 279 |
+
print(f" Running enforced (depth={depth})...")
|
| 280 |
+
e_chain = run_recursion(
|
| 281 |
+
signal, enforced_backend, depth,
|
| 282 |
+
enforce=True, threshold=threshold, target_ratio=target_ratio,
|
| 283 |
+
max_retries=max_retries,
|
| 284 |
+
)
|
| 285 |
+
enforced_chains.append(e_chain)
|
| 286 |
+
if verbose:
|
| 287 |
+
print(f" Final fidelity: {e_chain.final_fidelity:.3f}"
|
| 288 |
+
f" {'✓' if e_chain.final_fidelity >= threshold else '✗'}")
|
| 289 |
+
|
| 290 |
+
gap = e_chain.final_fidelity - b_chain.final_fidelity
|
| 291 |
+
print(f" Δ = {gap:+.3f}")
|
| 292 |
+
|
| 293 |
+
# Aggregate
|
| 294 |
+
n = len(baseline_chains)
|
| 295 |
+
if n == 0:
|
| 296 |
+
raise ValueError("No signals with commitments found in corpus")
|
| 297 |
+
|
| 298 |
+
b_avg = sum(c.final_fidelity for c in baseline_chains) / n
|
| 299 |
+
e_avg = sum(c.final_fidelity for c in enforced_chains) / n
|
| 300 |
+
b_stable = sum(1 for c in baseline_chains if c.final_fidelity >= threshold) / n * 100
|
| 301 |
+
e_stable = sum(1 for c in enforced_chains if c.final_fidelity >= threshold) / n * 100
|
| 302 |
+
|
| 303 |
+
# Cross-signal attractor collapse
|
| 304 |
+
collapse_base = check_attractor_collapse(baseline_chains)
|
| 305 |
+
collapse_enf = check_attractor_collapse(enforced_chains)
|
| 306 |
+
|
| 307 |
+
result = ProtocolResult(
|
| 308 |
+
corpus_size=n,
|
| 309 |
+
depth=depth,
|
| 310 |
+
backend=f"{baseline_backend.name} vs {enforced_backend.name}",
|
| 311 |
+
threshold=threshold,
|
| 312 |
+
baseline_chains=baseline_chains,
|
| 313 |
+
enforced_chains=enforced_chains,
|
| 314 |
+
baseline_avg_fidelity=b_avg,
|
| 315 |
+
enforced_avg_fidelity=e_avg,
|
| 316 |
+
baseline_stability_pct=b_stable,
|
| 317 |
+
enforced_stability_pct=e_stable,
|
| 318 |
+
improvement_pp=e_stable - b_stable,
|
| 319 |
+
attractor_collapse=collapse_base or collapse_enf,
|
| 320 |
+
timestamp=datetime.now(timezone.utc).isoformat(),
|
| 321 |
+
)
|
| 322 |
+
|
| 323 |
+
if verbose:
|
| 324 |
+
print(f"\n{'='*70}")
|
| 325 |
+
print(f"FALSIFICATION PROTOCOL RESULTS")
|
| 326 |
+
print(f"{'='*70}")
|
| 327 |
+
print(f"Corpus: {n} signals | Depth: {depth} | Backend: {baseline_backend.name} vs {enforced_backend.name}")
|
| 328 |
+
print(f"Threshold: {threshold}")
|
| 329 |
+
print(f"\n {'':20s} {'Baseline':>10s} {'Enforced':>10s} {'Δ':>8s}")
|
| 330 |
+
print(f" {'Avg Fidelity':20s} {b_avg:10.3f} {e_avg:10.3f} {e_avg-b_avg:+8.3f}")
|
| 331 |
+
print(f" {'Stability %':20s} {b_stable:9.1f}% {e_stable:9.1f}% {e_stable-b_stable:+7.1f}pp")
|
| 332 |
+
|
| 333 |
+
if collapse_base or collapse_enf:
|
| 334 |
+
print(f"\n ⚠ ATTRACTOR COLLAPSE DETECTED — results may be invalid")
|
| 335 |
+
if collapse_base:
|
| 336 |
+
print(f" Baseline chains converged to same output")
|
| 337 |
+
if collapse_enf:
|
| 338 |
+
print(f" Enforced chains converged to same output")
|
| 339 |
+
|
| 340 |
+
success = result.improvement_pp >= 20.0
|
| 341 |
+
print(f"\n {'✓ PASS' if success else '✗ FAIL'}: "
|
| 342 |
+
f"Improvement = {result.improvement_pp:+.1f}pp "
|
| 343 |
+
f"(threshold: ≥20pp)")
|
| 344 |
+
print(f"{'='*70}")
|
| 345 |
+
|
| 346 |
+
return result
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
# ---------------------------------------------------------------------------
|
| 350 |
+
# CLI entry point
|
| 351 |
+
# ---------------------------------------------------------------------------
|
| 352 |
+
|
| 353 |
+
def main():
|
| 354 |
+
"""Command-line entry point."""
|
| 355 |
+
import argparse
|
| 356 |
+
|
| 357 |
+
parser = argparse.ArgumentParser(
|
| 358 |
+
description="Commitment Conservation Falsification Protocol"
|
| 359 |
+
)
|
| 360 |
+
parser.add_argument('--backend', default='extractive',
|
| 361 |
+
choices=['extractive', 'bart', 'back_translation', 'lossy'],
|
| 362 |
+
help='Compression backend for baseline')
|
| 363 |
+
parser.add_argument('--enforced-backend', default=None,
|
| 364 |
+
choices=['extractive', 'bart', 'back_translation', 'lossy', 'lossy_enforced'],
|
| 365 |
+
help='Backend for enforced runs (default: same as --backend)')
|
| 366 |
+
parser.add_argument('--depth', type=int, default=DEFAULT_DEPTH,
|
| 367 |
+
help='Recursion depth (default: 10)')
|
| 368 |
+
parser.add_argument('--threshold', type=float, default=DEFAULT_THRESHOLD,
|
| 369 |
+
help='Fidelity threshold (default: 0.6)')
|
| 370 |
+
parser.add_argument('--signal', type=str, default=None,
|
| 371 |
+
help='Test a single signal instead of full corpus')
|
| 372 |
+
parser.add_argument('--corpus', type=str, default=DEFAULT_CORPUS_PATH,
|
| 373 |
+
help='Path to corpus JSON')
|
| 374 |
+
parser.add_argument('--output', type=str, default='outputs/protocol_result.json',
|
| 375 |
+
help='Output path for JSON receipt')
|
| 376 |
+
parser.add_argument('--quiet', action='store_true',
|
| 377 |
+
help='Suppress verbose output')
|
| 378 |
+
|
| 379 |
+
args = parser.parse_args()
|
| 380 |
+
|
| 381 |
+
signals = [args.signal] if args.signal else None
|
| 382 |
+
|
| 383 |
+
result = run_protocol(
|
| 384 |
+
backend_name=args.backend,
|
| 385 |
+
enforced_backend_name=args.enforced_backend,
|
| 386 |
+
depth=args.depth,
|
| 387 |
+
threshold=args.threshold,
|
| 388 |
+
corpus_path=args.corpus,
|
| 389 |
+
signals=signals,
|
| 390 |
+
verbose=not args.quiet,
|
| 391 |
+
)
|
| 392 |
+
|
| 393 |
+
# Save receipt
|
| 394 |
+
os.makedirs(os.path.dirname(args.output) or '.', exist_ok=True)
|
| 395 |
+
with open(args.output, 'w') as f:
|
| 396 |
+
f.write(result.to_json())
|
| 397 |
+
|
| 398 |
+
print(f"\n✓ Receipt saved: {args.output}")
|
| 399 |
+
|
| 400 |
+
|
| 401 |
+
if __name__ == '__main__':
|
| 402 |
+
main()
|
tests/__init__.py
ADDED
|
File without changes
|
tests/test_harness.py
ADDED
|
@@ -0,0 +1,449 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Test suite for the Commitment Conservation Harness v2.
|
| 3 |
+
|
| 4 |
+
Tests the measurement instrument (extraction), scoring (fidelity),
|
| 5 |
+
enforcement gate, lineage tracking, and full protocol.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import json
|
| 9 |
+
import os
|
| 10 |
+
import pytest
|
| 11 |
+
from src.extraction import (
|
| 12 |
+
extract_commitments, extract_commitment_set, extract_commitment_texts,
|
| 13 |
+
extract_hard_commitments, segment_sentences, classify_clause, Commitment
|
| 14 |
+
)
|
| 15 |
+
from src.fidelity import (
|
| 16 |
+
fidelity_jaccard, fidelity_cosine, fidelity_nli_proxy,
|
| 17 |
+
fidelity_score, fidelity_breakdown
|
| 18 |
+
)
|
| 19 |
+
from src.compression import get_backend, ExtractiveBackend
|
| 20 |
+
from src.enforcement import CommitmentGate, GateResult, baseline_compress
|
| 21 |
+
from src.lineage import (
|
| 22 |
+
LineageChain, LineageRecord, _hash_text, _hash_commitment_set,
|
| 23 |
+
check_attractor_collapse
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
# ===================================================================
|
| 28 |
+
# EXTRACTION TESTS — The measurement instrument
|
| 29 |
+
# ===================================================================
|
| 30 |
+
|
| 31 |
+
class TestSentenceSegmentation:
|
| 32 |
+
def test_single_sentence(self):
|
| 33 |
+
assert segment_sentences("You must pay.") == ["You must pay."]
|
| 34 |
+
|
| 35 |
+
def test_multiple_sentences(self):
|
| 36 |
+
sents = segment_sentences("You must pay. The weather is nice.")
|
| 37 |
+
assert len(sents) == 2
|
| 38 |
+
|
| 39 |
+
def test_semicolon_split(self):
|
| 40 |
+
sents = segment_sentences("You must pay $100; it's rainy outside.")
|
| 41 |
+
assert len(sents) == 2
|
| 42 |
+
|
| 43 |
+
def test_empty_input(self):
|
| 44 |
+
assert segment_sentences("") == []
|
| 45 |
+
assert segment_sentences(" ") == []
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class TestClassification:
|
| 49 |
+
def test_obligation_must(self):
|
| 50 |
+
result = classify_clause("You must pay $100 by Friday")
|
| 51 |
+
assert result is not None
|
| 52 |
+
assert result[0] == 'obligation'
|
| 53 |
+
|
| 54 |
+
def test_obligation_shall(self):
|
| 55 |
+
result = classify_clause("The tenant shall comply with all regulations")
|
| 56 |
+
assert result is not None
|
| 57 |
+
assert result[0] == 'obligation'
|
| 58 |
+
|
| 59 |
+
def test_prohibition_must_not(self):
|
| 60 |
+
"""'must not' must match as prohibition, not obligation."""
|
| 61 |
+
result = classify_clause("You must not enter without permission")
|
| 62 |
+
assert result is not None
|
| 63 |
+
assert result[0] == 'prohibition'
|
| 64 |
+
|
| 65 |
+
def test_prohibition_shall_not(self):
|
| 66 |
+
result = classify_clause("The licensee shall not reverse-engineer")
|
| 67 |
+
assert result is not None
|
| 68 |
+
assert result[0] == 'prohibition'
|
| 69 |
+
|
| 70 |
+
def test_prohibition_cannot(self):
|
| 71 |
+
result = classify_clause("The budget cannot exceed $5000")
|
| 72 |
+
assert result is not None
|
| 73 |
+
assert result[0] == 'prohibition'
|
| 74 |
+
|
| 75 |
+
def test_constraint_always(self):
|
| 76 |
+
result = classify_clause("Always verify the user's age")
|
| 77 |
+
assert result is not None
|
| 78 |
+
assert result[0] == 'constraint'
|
| 79 |
+
|
| 80 |
+
def test_constraint_never(self):
|
| 81 |
+
result = classify_clause("Never share your password")
|
| 82 |
+
assert result is not None
|
| 83 |
+
assert result[0] == 'constraint'
|
| 84 |
+
|
| 85 |
+
def test_no_commitment(self):
|
| 86 |
+
"""Ambient content should NOT match."""
|
| 87 |
+
assert classify_clause("The weather is nice today") is None
|
| 88 |
+
assert classify_clause("Our team has grown significantly") is None
|
| 89 |
+
assert classify_clause("The building was constructed in 1952") is None
|
| 90 |
+
|
| 91 |
+
def test_will_not_matched(self):
|
| 92 |
+
"""'will' without obligation context should NOT match."""
|
| 93 |
+
# 'will' by itself is NOT in our patterns — this is intentional.
|
| 94 |
+
# "I will probably go" is not a commitment.
|
| 95 |
+
assert classify_clause("I will probably go to the store") is None
|
| 96 |
+
|
| 97 |
+
def test_have_not_matched(self):
|
| 98 |
+
"""'have' without 'have to' should NOT match."""
|
| 99 |
+
assert classify_clause("I have a dog and a cat") is None
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
class TestExtraction:
|
| 103 |
+
def test_single_obligation(self):
|
| 104 |
+
commits = extract_commitment_texts("You must pay $100.")
|
| 105 |
+
assert len(commits) >= 1
|
| 106 |
+
assert any('must' in c and 'pay' in c for c in commits)
|
| 107 |
+
|
| 108 |
+
def test_mixed_signal(self):
|
| 109 |
+
"""Should extract commitments and ignore ambient content."""
|
| 110 |
+
text = "You must pay $100 by Friday. The weather is nice. The budget cannot exceed $5000."
|
| 111 |
+
commits = extract_commitment_texts(text)
|
| 112 |
+
assert len(commits) == 2
|
| 113 |
+
|
| 114 |
+
def test_no_commitments(self):
|
| 115 |
+
"""Ambient-only text should return empty set."""
|
| 116 |
+
commits = extract_commitment_texts("The weather is nice. It rained yesterday.")
|
| 117 |
+
assert len(commits) == 0
|
| 118 |
+
|
| 119 |
+
def test_semicolon_signal(self):
|
| 120 |
+
"""Paper's canonical example: semicolon-separated clauses."""
|
| 121 |
+
text = "You must pay $100 by Friday if the deal closes; it's likely rainy, so plan accordingly."
|
| 122 |
+
commits = extract_commitment_texts(text)
|
| 123 |
+
assert len(commits) == 1 # Only the must-clause, not the rainy part
|
| 124 |
+
|
| 125 |
+
def test_prohibition_extraction(self):
|
| 126 |
+
commits = extract_commitments("The tenant shall not sublet the premises.")
|
| 127 |
+
assert len(commits) == 1
|
| 128 |
+
assert commits[0].modal_type == 'prohibition'
|
| 129 |
+
|
| 130 |
+
def test_conditional_detection(self):
|
| 131 |
+
commits = extract_commitments("If the alarm sounds, you must evacuate immediately.")
|
| 132 |
+
assert len(commits) == 1
|
| 133 |
+
assert commits[0].is_conditional
|
| 134 |
+
|
| 135 |
+
def test_backward_compat(self):
|
| 136 |
+
"""extract_hard_commitments should work with or without nlp param."""
|
| 137 |
+
result = extract_hard_commitments("You must pay.", nlp=None)
|
| 138 |
+
assert isinstance(result, set)
|
| 139 |
+
assert len(result) >= 1
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
# ===================================================================
|
| 143 |
+
# FIDELITY TESTS — The scoring instrument
|
| 144 |
+
# ===================================================================
|
| 145 |
+
|
| 146 |
+
class TestJaccard:
|
| 147 |
+
def test_perfect_match(self):
|
| 148 |
+
s = {"you must pay $100"}
|
| 149 |
+
assert fidelity_jaccard(s, s) == 1.0
|
| 150 |
+
|
| 151 |
+
def test_zero_overlap(self):
|
| 152 |
+
a = {"you must pay $100"}
|
| 153 |
+
b = {"the budget cannot exceed $5000"}
|
| 154 |
+
assert fidelity_jaccard(a, b) == 0.0
|
| 155 |
+
|
| 156 |
+
def test_partial_overlap(self):
|
| 157 |
+
a = {"you must pay $100", "the budget cannot exceed $5000"}
|
| 158 |
+
b = {"you must pay $100", "always verify age"}
|
| 159 |
+
assert fidelity_jaccard(a, b) == pytest.approx(1/3)
|
| 160 |
+
|
| 161 |
+
def test_both_empty(self):
|
| 162 |
+
assert fidelity_jaccard(set(), set()) == 1.0
|
| 163 |
+
|
| 164 |
+
def test_one_empty(self):
|
| 165 |
+
assert fidelity_jaccard({"a"}, set()) == 0.0
|
| 166 |
+
assert fidelity_jaccard(set(), {"a"}) == 0.0
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
class TestCosine:
|
| 170 |
+
def test_identical(self):
|
| 171 |
+
s = {"you must pay one hundred dollars by friday"}
|
| 172 |
+
assert fidelity_cosine(s, s) == pytest.approx(1.0, abs=0.01)
|
| 173 |
+
|
| 174 |
+
def test_paraphrased(self):
|
| 175 |
+
"""Cosine should be higher than Jaccard for paraphrases."""
|
| 176 |
+
a = {"you must pay $100 by friday"}
|
| 177 |
+
b = {"payment of $100 is required by friday"}
|
| 178 |
+
cosine = fidelity_cosine(a, b)
|
| 179 |
+
jaccard = fidelity_jaccard(a, b)
|
| 180 |
+
assert cosine > jaccard # Cosine catches shared words
|
| 181 |
+
|
| 182 |
+
def test_unrelated(self):
|
| 183 |
+
a = {"you must pay $100 by friday"}
|
| 184 |
+
b = {"the weather is sunny and warm today"}
|
| 185 |
+
assert fidelity_cosine(a, b) < 0.3
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
class TestNLIProxy:
|
| 189 |
+
def test_modal_preserved(self):
|
| 190 |
+
a = {"you must pay $100 by friday"}
|
| 191 |
+
b = {"payment of $100 must happen by friday"}
|
| 192 |
+
score = fidelity_nli_proxy(a, b)
|
| 193 |
+
assert score > 0.5 # 'must', '$100', 'friday' all preserved
|
| 194 |
+
|
| 195 |
+
def test_modal_destroyed(self):
|
| 196 |
+
"""If modal operator is lost, NLI proxy should catch it."""
|
| 197 |
+
a = {"you must pay $100 by friday"}
|
| 198 |
+
b = {"payment of $100 by friday"} # 'must' is gone
|
| 199 |
+
score = fidelity_nli_proxy(a, b)
|
| 200 |
+
# Should be lower than when modal is preserved
|
| 201 |
+
a2 = {"you must pay $100 by friday"}
|
| 202 |
+
b2 = {"you must pay $100 by friday"}
|
| 203 |
+
score_full = fidelity_nli_proxy(a2, b2)
|
| 204 |
+
assert score < score_full
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
class TestMinAggregated:
|
| 208 |
+
def test_all_perfect(self):
|
| 209 |
+
s = {"you must pay $100"}
|
| 210 |
+
assert fidelity_score(s, s) == pytest.approx(1.0, abs=0.01)
|
| 211 |
+
|
| 212 |
+
def test_min_is_binding(self):
|
| 213 |
+
"""Min-aggregation means the lowest score wins."""
|
| 214 |
+
a = {"you must pay $100 by friday"}
|
| 215 |
+
b = {"the budget cannot exceed $5000"}
|
| 216 |
+
breakdown = fidelity_breakdown(a, b)
|
| 217 |
+
assert breakdown['min_aggregated'] == min(
|
| 218 |
+
breakdown['jaccard'], breakdown['cosine'], breakdown['nli_proxy']
|
| 219 |
+
)
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
# ===================================================================
|
| 223 |
+
# COMPRESSION TESTS
|
| 224 |
+
# ===================================================================
|
| 225 |
+
|
| 226 |
+
class TestExtractiveBackend:
|
| 227 |
+
def test_compresses(self):
|
| 228 |
+
backend = get_backend('extractive')
|
| 229 |
+
text = "You must pay $100 by Friday. The weather is nice. The budget cannot exceed $5000. It rained yesterday."
|
| 230 |
+
compressed = backend.compress(text, target_ratio=0.5)
|
| 231 |
+
assert len(compressed.split()) <= len(text.split())
|
| 232 |
+
|
| 233 |
+
def test_preserves_modal_sentences(self):
|
| 234 |
+
"""Extractive backend should prioritize commitment-bearing sentences."""
|
| 235 |
+
backend = get_backend('extractive')
|
| 236 |
+
text = "You must pay $100. The sky is blue. The grass is green. Trees are tall."
|
| 237 |
+
compressed = backend.compress(text, target_ratio=0.3)
|
| 238 |
+
assert 'must' in compressed.lower()
|
| 239 |
+
|
| 240 |
+
def test_single_sentence_passthrough(self):
|
| 241 |
+
backend = get_backend('extractive')
|
| 242 |
+
text = "You must pay $100."
|
| 243 |
+
assert backend.compress(text) == text
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
# ===================================================================
|
| 247 |
+
# ENFORCEMENT TESTS
|
| 248 |
+
# ===================================================================
|
| 249 |
+
|
| 250 |
+
class TestCommitmentGate:
|
| 251 |
+
def test_gate_passes_when_commitments_preserved(self):
|
| 252 |
+
backend = get_backend('extractive')
|
| 253 |
+
gate = CommitmentGate(backend, threshold=0.5)
|
| 254 |
+
|
| 255 |
+
text = "You must pay $100 by Friday. The weather is nice."
|
| 256 |
+
original = extract_commitment_texts(text)
|
| 257 |
+
|
| 258 |
+
result = gate.compress(text, original, target_ratio=0.5)
|
| 259 |
+
assert isinstance(result, GateResult)
|
| 260 |
+
assert result.fidelity >= 0.0
|
| 261 |
+
|
| 262 |
+
def test_baseline_has_no_gate(self):
|
| 263 |
+
backend = get_backend('extractive')
|
| 264 |
+
text = "You must pay $100 by Friday. The weather is nice."
|
| 265 |
+
compressed = baseline_compress(backend, text, target_ratio=0.5)
|
| 266 |
+
assert isinstance(compressed, str)
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
# ===================================================================
|
| 270 |
+
# LINEAGE TESTS
|
| 271 |
+
# ===================================================================
|
| 272 |
+
|
| 273 |
+
class TestLineage:
|
| 274 |
+
def test_hash_deterministic(self):
|
| 275 |
+
assert _hash_text("hello") == _hash_text("hello")
|
| 276 |
+
assert _hash_text("hello") != _hash_text("world")
|
| 277 |
+
|
| 278 |
+
def test_commitment_hash_deterministic(self):
|
| 279 |
+
"""Set order shouldn't matter."""
|
| 280 |
+
s1 = {"a", "b", "c"}
|
| 281 |
+
s2 = {"c", "a", "b"}
|
| 282 |
+
assert _hash_commitment_set(s1) == _hash_commitment_set(s2)
|
| 283 |
+
|
| 284 |
+
def test_chain_integrity(self):
|
| 285 |
+
chain = LineageChain(
|
| 286 |
+
signal_id="test",
|
| 287 |
+
signal_preview="test signal",
|
| 288 |
+
original_commitment_hash="abc",
|
| 289 |
+
original_commitment_count=1,
|
| 290 |
+
backend="extractive",
|
| 291 |
+
enforced=False,
|
| 292 |
+
depth=2,
|
| 293 |
+
)
|
| 294 |
+
|
| 295 |
+
r1 = LineageRecord(
|
| 296 |
+
iteration=1, input_hash="a", output_hash="b",
|
| 297 |
+
commitment_hash="c", commitments_found=1,
|
| 298 |
+
fidelity=0.8, fidelity_detail={}, gate_passed=True,
|
| 299 |
+
parent_hash=None, text_preview="test"
|
| 300 |
+
)
|
| 301 |
+
chain.add_record(r1)
|
| 302 |
+
|
| 303 |
+
r2 = LineageRecord(
|
| 304 |
+
iteration=2, input_hash="b", output_hash="d",
|
| 305 |
+
commitment_hash="e", commitments_found=1,
|
| 306 |
+
fidelity=0.7, fidelity_detail={}, gate_passed=True,
|
| 307 |
+
parent_hash="b", # Must match r1.output_hash
|
| 308 |
+
text_preview="test"
|
| 309 |
+
)
|
| 310 |
+
chain.add_record(r2)
|
| 311 |
+
assert len(chain.records) == 2
|
| 312 |
+
|
| 313 |
+
def test_chain_broken_raises(self):
|
| 314 |
+
chain = LineageChain(
|
| 315 |
+
signal_id="test", signal_preview="test",
|
| 316 |
+
original_commitment_hash="abc", original_commitment_count=1,
|
| 317 |
+
backend="extractive", enforced=False, depth=2,
|
| 318 |
+
)
|
| 319 |
+
|
| 320 |
+
r1 = LineageRecord(
|
| 321 |
+
iteration=1, input_hash="a", output_hash="b",
|
| 322 |
+
commitment_hash="c", commitments_found=1,
|
| 323 |
+
fidelity=0.8, fidelity_detail={}, gate_passed=True,
|
| 324 |
+
parent_hash=None, text_preview="test"
|
| 325 |
+
)
|
| 326 |
+
chain.add_record(r1)
|
| 327 |
+
|
| 328 |
+
r2_bad = LineageRecord(
|
| 329 |
+
iteration=2, input_hash="x", output_hash="y",
|
| 330 |
+
commitment_hash="z", commitments_found=0,
|
| 331 |
+
fidelity=0.0, fidelity_detail={}, gate_passed=False,
|
| 332 |
+
parent_hash="WRONG", # Should be "b"
|
| 333 |
+
text_preview="test"
|
| 334 |
+
)
|
| 335 |
+
with pytest.raises(ValueError, match="Chain broken"):
|
| 336 |
+
chain.add_record(r2_bad)
|
| 337 |
+
|
| 338 |
+
def test_serialization(self):
|
| 339 |
+
chain = LineageChain(
|
| 340 |
+
signal_id="test", signal_preview="test",
|
| 341 |
+
original_commitment_hash="abc", original_commitment_count=1,
|
| 342 |
+
backend="extractive", enforced=False, depth=1,
|
| 343 |
+
)
|
| 344 |
+
d = chain.to_dict()
|
| 345 |
+
assert 'signal_id' in d
|
| 346 |
+
j = chain.to_json()
|
| 347 |
+
parsed = json.loads(j)
|
| 348 |
+
assert parsed['signal_id'] == 'test'
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
# ===================================================================
|
| 352 |
+
# CORPUS TESTS
|
| 353 |
+
# ===================================================================
|
| 354 |
+
|
| 355 |
+
class TestCorpus:
|
| 356 |
+
def test_corpus_loads(self):
|
| 357 |
+
from src.runner import load_corpus
|
| 358 |
+
corpus = load_corpus()
|
| 359 |
+
assert len(corpus) == 25
|
| 360 |
+
|
| 361 |
+
def test_corpus_categories(self):
|
| 362 |
+
from src.runner import load_corpus
|
| 363 |
+
corpus = load_corpus()
|
| 364 |
+
categories = {e['category'] for e in corpus}
|
| 365 |
+
assert 'contractual' in categories
|
| 366 |
+
assert 'technical' in categories
|
| 367 |
+
assert 'regulatory' in categories
|
| 368 |
+
assert 'procedural' in categories
|
| 369 |
+
assert 'composite' in categories
|
| 370 |
+
|
| 371 |
+
def test_all_signals_have_commitments(self):
|
| 372 |
+
"""Every signal in the corpus should have at least one commitment."""
|
| 373 |
+
from src.runner import load_corpus
|
| 374 |
+
corpus = load_corpus()
|
| 375 |
+
for entry in corpus:
|
| 376 |
+
commits = extract_commitment_texts(entry['signal'])
|
| 377 |
+
assert len(commits) > 0, f"No commitments in: {entry['signal'][:60]}..."
|
| 378 |
+
|
| 379 |
+
|
| 380 |
+
# ===================================================================
|
| 381 |
+
# INTEGRATION TESTS
|
| 382 |
+
# ===================================================================
|
| 383 |
+
|
| 384 |
+
class TestFullPipeline:
|
| 385 |
+
def test_single_signal_protocol(self):
|
| 386 |
+
"""Run the full protocol on a single signal."""
|
| 387 |
+
from src.runner import run_protocol
|
| 388 |
+
result = run_protocol(
|
| 389 |
+
backend_name='extractive',
|
| 390 |
+
depth=3,
|
| 391 |
+
signals=["You must pay $100 by Friday. The weather is nice. The budget cannot exceed $5000."],
|
| 392 |
+
verbose=False,
|
| 393 |
+
)
|
| 394 |
+
assert result.corpus_size == 1
|
| 395 |
+
assert result.baseline_avg_fidelity >= 0.0
|
| 396 |
+
assert result.enforced_avg_fidelity >= 0.0
|
| 397 |
+
|
| 398 |
+
def test_enforcement_helps(self):
|
| 399 |
+
"""Enforced should be >= baseline on average."""
|
| 400 |
+
from src.runner import run_protocol
|
| 401 |
+
result = run_protocol(
|
| 402 |
+
backend_name='extractive',
|
| 403 |
+
depth=5,
|
| 404 |
+
signals=[
|
| 405 |
+
"You must pay $100 by Friday. The weather is nice. The budget cannot exceed $5000.",
|
| 406 |
+
"The tenant shall not sublet. The building is old. You must provide 30 days notice.",
|
| 407 |
+
],
|
| 408 |
+
verbose=False,
|
| 409 |
+
)
|
| 410 |
+
# Enforcement should not make things worse
|
| 411 |
+
assert result.enforced_avg_fidelity >= result.baseline_avg_fidelity
|
| 412 |
+
|
| 413 |
+
|
| 414 |
+
# ===================================================================
|
| 415 |
+
# REGRESSION TESTS — prevent v1 bugs from returning
|
| 416 |
+
# ===================================================================
|
| 417 |
+
|
| 418 |
+
class TestRegressions:
|
| 419 |
+
def test_will_false_positive(self):
|
| 420 |
+
"""v1 bug: 'will' matched as commitment keyword."""
|
| 421 |
+
commits = extract_commitment_texts("I will probably go to the store.")
|
| 422 |
+
assert len(commits) == 0
|
| 423 |
+
|
| 424 |
+
def test_have_false_positive(self):
|
| 425 |
+
"""v1 bug: 'have' matched as commitment keyword."""
|
| 426 |
+
commits = extract_commitment_texts("I have a dog and a cat.")
|
| 427 |
+
assert len(commits) == 0
|
| 428 |
+
|
| 429 |
+
def test_soft_modal_not_extracted(self):
|
| 430 |
+
"""v1 bug: 'might', 'could', 'maybe' extracted as commitments."""
|
| 431 |
+
commits = extract_commitment_texts("It might rain. You could try later. Maybe tomorrow.")
|
| 432 |
+
assert len(commits) == 0
|
| 433 |
+
|
| 434 |
+
def test_must_not_is_prohibition(self):
|
| 435 |
+
"""v1 bug: 'must not' matched as obligation 'must'."""
|
| 436 |
+
commits = extract_commitments("You must not enter.")
|
| 437 |
+
assert len(commits) == 1
|
| 438 |
+
assert commits[0].modal_type == 'prohibition'
|
| 439 |
+
|
| 440 |
+
def test_fidelity_not_only_jaccard(self):
|
| 441 |
+
"""v1 bug: fidelity was Jaccard-only, missing paraphrase detection."""
|
| 442 |
+
a = {"you must pay $100 by friday"}
|
| 443 |
+
b = {"payment of $100 is due by friday"}
|
| 444 |
+
# Jaccard should be 0 (different strings)
|
| 445 |
+
assert fidelity_jaccard(a, b) == 0.0
|
| 446 |
+
# But cosine should catch the overlap
|
| 447 |
+
assert fidelity_cosine(a, b) > 0.0
|
| 448 |
+
# Min-aggregated will still be 0 (Jaccard floors it),
|
| 449 |
+
# but cosine being available is the fix
|