Spaces:
Running
Running
| """ | |
| Axiom-Ref β HuggingFace Space / Gradio App | |
| Governed Language Model: every output ships its own proof. | |
| """ | |
| import sys | |
| import json | |
| sys.path.insert(0, ".") | |
| import torch | |
| import torch.nn as nn | |
| import torch.nn.functional as F | |
| from tokenizers import Tokenizer | |
| from datetime import datetime, timezone | |
| from hashlib import sha256 | |
| import gradio as gr | |
| from pipeline.mdlm.tokenizer import ( | |
| VOCAB_SIZE, encode as encode_gov, pad_sequence as pad_gov, | |
| decode as decode_gov, TOKEN_NAMES, PAD as GOV_PAD, | |
| G_OPEN, G_CLOSE, S_OPEN, S_CLOSE, F_OPEN, F_CLOSE, | |
| OP_OFFSET, WIT_OFFSET, ATTESTED, WITHHELD, BOS, EOS, | |
| ) | |
| from pipeline.mdlm.model import StructureModel, MaskingSchedule, generate | |
| from pipeline.mdlm.decoder import ConstrainedDecoder | |
| from pipeline.mdlm.governed_pipeline import ( | |
| propose, decide, promote, execute, tokens_to_example, | |
| ) | |
| from pipeline.stages.s4_validate import validate_and_score, TigStatus | |
| # ββ Load models ββ | |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| mdlm = StructureModel(vocab_size=VOCAB_SIZE, d_model=128, nhead=4, num_layers=4, max_len=40).to(device) | |
| mdlm.load_state_dict(torch.load("models/axiom-ref/mdlm_best.pt", weights_only=True, map_location=device)) | |
| tokenizer = Tokenizer.from_file("models/axiom-ref/bpe_tokenizer.json") | |
| bpe_vocab = tokenizer.get_vocab_size() | |
| BPE_BOS = tokenizer.token_to_id("<bos>") | |
| BPE_EOS = tokenizer.token_to_id("<eos>") | |
| decoder = ConstrainedDecoder( | |
| gov_vocab=VOCAB_SIZE, prose_vocab=bpe_vocab, d_model=256, nhead=8, | |
| num_encoder_layers=3, num_decoder_layers=6, max_struct_len=40, max_prose_len=128, | |
| ).to(device) | |
| _dec_state = torch.load("models/axiom-ref/decoder_best.pt", weights_only=True, map_location=device) | |
| # Remap legacy weight names | |
| _dec_state = {k.replace("triad_embedding", "struct_embedding").replace("triad_pos", "struct_pos"): v for k, v in _dec_state.items()} | |
| decoder.load_state_dict(_dec_state) | |
| decoder.eval() | |
| def generate_governed(num_candidates=10, temperature=0.7): | |
| """Run the full 4-phase governed pipeline.""" | |
| # Phase 1: PROPOSE | |
| candidates = propose(mdlm, num_candidates=num_candidates, g_slots=2, s_slots=2, f_slots=2) | |
| # Phase 2: DECIDE | |
| decided = decide(candidates) | |
| t_count = sum(1 for _, d, _ in decided if d.tig_status == "T") | |
| f_count = sum(1 for _, d, _ in decided if d.tig_status == "F") | |
| admitted = [(c, d, e) for c, d, e in decided if d.tig_status == "T" and e is not None] | |
| # Phase 3: PROMOTE | |
| promoted = promote(admitted) | |
| if not promoted: | |
| return "No candidates passed governance.", "", "{}", "" | |
| # Phase 4: EXECUTE | |
| outputs = execute(promoted) | |
| example, commitment = promoted[0] | |
| gov_dict = outputs[0].gov_structure | |
| # Generate prose | |
| tt = torch.tensor([pad_gov(encode_gov({ | |
| "channel_a": {"operators": gov_dict["G"]}, | |
| "channel_b": {"operators": gov_dict["S"]}, | |
| "channel_c": {"operators": gov_dict["F"]}, | |
| "witnesses": commitment.witnesses, | |
| }), 40)], dtype=torch.long, device=device) | |
| struct_h = decoder.struct_embedding(tt) + decoder.struct_pos(torch.arange(40, device=device).unsqueeze(0)) | |
| mem = decoder.encoder(struct_h, src_key_padding_mask=(tt == GOV_PAD)) | |
| ids = torch.tensor([[BPE_BOS]], dtype=torch.long, device=device) | |
| gen = [] | |
| with torch.no_grad(): | |
| for _ in range(120): | |
| ph = decoder.prose_embedding(ids) + decoder.prose_pos(torch.arange(ids.size(1), device=device).unsqueeze(0)) | |
| dec = decoder.decoder(ph, mem, | |
| tgt_mask=nn.Transformer.generate_square_subsequent_mask(ids.size(1), device=device), | |
| memory_key_padding_mask=(tt == GOV_PAD)) | |
| nxt = torch.multinomial(F.softmax(decoder.output_proj(dec[:, -1, :]) / temperature, dim=-1), 1) | |
| ids = torch.cat([ids, nxt], dim=1) | |
| if nxt.item() == BPE_EOS: | |
| break | |
| gen.append(nxt.item()) | |
| prose = tokenizer.decode(gen) | |
| # Build governance trace | |
| output_hash = sha256(prose.encode()).hexdigest() | |
| gate_html = "" | |
| gate_names = ["G1 Structural Integrity", "G2 Completeness", "G3 Witness Sufficiency", | |
| "G4 Authority Separation", "G5 Provenance Continuity", | |
| "G6 Semantic Stability", "G7 Behavioral Prediction"] | |
| for g in gate_names: | |
| gate_html += f'<div style="padding:4px 0"><span style="color:#4ade80;font-weight:bold">PASS</span> {g}</div>' | |
| witness_html = "" | |
| for w_name, w_data in commitment.witnesses.items(): | |
| status = "ATTESTED" if w_data["attested"] else "WITHHELD" | |
| color = "#4ade80" if w_data["attested"] else "#e94560" | |
| witness_html += f'<div style="padding:2px 0"><span style="color:{color};font-weight:bold">{status}</span> {w_name}</div>' | |
| trace = { | |
| "output_hash": output_hash[:32] + "...", | |
| "commitment": commitment.witness_bundle_hash[:32] + "...", | |
| "gov_structure": { | |
| "G": [op["operator"] for op in gov_dict["G"]], | |
| "S": [op["operator"] for op in gov_dict["S"]], | |
| "F": [op["operator"] for op in gov_dict["F"]], | |
| }, | |
| "gates_passed": 7, | |
| "witnesses_attested": 7, | |
| "admission": f"{t_count}/{num_candidates}", | |
| "timestamp": datetime.now(timezone.utc).isoformat(), | |
| } | |
| stats_html = f""" | |
| <div style="font-family:monospace;font-size:13px"> | |
| <div style="margin-bottom:12px"> | |
| <div style="color:#888;font-size:11px">PIPELINE STATS</div> | |
| <div>Proposed: {num_candidates} | Admitted: {t_count} | Rejected: {f_count}</div> | |
| </div> | |
| <div style="margin-bottom:12px"> | |
| <div style="color:#888;font-size:11px">GATES</div> | |
| {gate_html} | |
| </div> | |
| <div style="margin-bottom:12px"> | |
| <div style="color:#888;font-size:11px">WITNESSES</div> | |
| {witness_html} | |
| </div> | |
| <div> | |
| <div style="color:#888;font-size:11px">COMMITMENT</div> | |
| <div style="word-break:break-all;color:#666">{commitment.witness_bundle_hash[:48]}...</div> | |
| <div style="color:#4ade80;font-weight:bold;margin-top:4px">Irrevocable</div> | |
| </div> | |
| </div> | |
| """ | |
| return prose, stats_html, json.dumps(trace, indent=2), f"G: {trace['gov_structure']['G']}\nS: {trace['gov_structure']['S']}\nF: {trace['gov_structure']['F']}" | |
| # ββ Gradio Interface ββ | |
| with gr.Blocks( | |
| title="Axiom-Ref: Governed Language Model", | |
| theme=gr.themes.Base(primary_hue="green", neutral_hue="slate"), | |
| css=""" | |
| .output-prose { font-family: 'Courier New', monospace; font-size: 14px; } | |
| """ | |
| ) as app: | |
| gr.Markdown(""" | |
| # Axiom-Ref | |
| **Governed Language Model β every output ships its own proof.** | |
| Four phases: PROPOSE β DECIDE β PROMOTE β EXECUTE. | |
| No other language model ships a machine-verifiable governance trace with its output. | |
| """) | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| num_candidates = gr.Slider(1, 50, value=10, step=1, label="Candidates to propose") | |
| temperature = gr.Slider(0.1, 1.5, value=0.7, step=0.1, label="Decoder temperature") | |
| generate_btn = gr.Button("Generate Governed Output", variant="primary", size="lg") | |
| gr.Markdown("### Generated Output") | |
| output_prose = gr.Code(label="Governed Prose", language="c", lines=12) | |
| output_structure = gr.Textbox(label="Governed Structure", lines=3) | |
| with gr.Column(scale=1): | |
| gr.Markdown("### Governance Trace") | |
| governance_panel = gr.HTML() | |
| trace_json = gr.Code(label="Machine-Verifiable Trace (JSON)", language="json", lines=15) | |
| generate_btn.click( | |
| fn=generate_governed, | |
| inputs=[num_candidates, temperature], | |
| outputs=[output_prose, governance_panel, trace_json, output_structure], | |
| ) | |
| gr.Markdown(""" | |
| --- | |
| *[MetaCortex Dynamics DAO](https://github.com/MetaCortex-Dynamics) Β· [Source](https://github.com/MetaCortex-Dynamics/Axiom-Ref) Β· MIT License* | |
| """) | |
| if __name__ == "__main__": | |
| app.launch() | |