MetaCortex-Dynamics commited on
Commit
994a4be
Β·
verified Β·
1 Parent(s): 3a4328e

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +207 -0
app.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Axiom-Ref β€” HuggingFace Space / Gradio App
3
+
4
+ Governed Language Model: every output ships its own proof.
5
+ """
6
+
7
+ import sys
8
+ import json
9
+ sys.path.insert(0, ".")
10
+
11
+ import torch
12
+ import torch.nn as nn
13
+ import torch.nn.functional as F
14
+ from tokenizers import Tokenizer
15
+ from datetime import datetime, timezone
16
+ from hashlib import sha256
17
+
18
+ import gradio as gr
19
+
20
+ from pipeline.mdlm.tokenizer import (
21
+ VOCAB_SIZE, encode as encode_gov, pad_sequence as pad_gov,
22
+ decode as decode_gov, TOKEN_NAMES, PAD as GOV_PAD,
23
+ G_OPEN, G_CLOSE, S_OPEN, S_CLOSE, F_OPEN, F_CLOSE,
24
+ OP_OFFSET, WIT_OFFSET, ATTESTED, WITHHELD, BOS, EOS,
25
+ )
26
+ from pipeline.mdlm.model import StructureModel, MaskingSchedule, generate
27
+ from pipeline.mdlm.decoder import ConstrainedDecoder
28
+ from pipeline.mdlm.governed_pipeline import (
29
+ propose, decide, promote, execute, tokens_to_example,
30
+ )
31
+ from pipeline.stages.s4_validate import validate_and_score, TigStatus
32
+
33
+ # ── Load models ──
34
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
35
+
36
+ mdlm = StructureModel(vocab_size=VOCAB_SIZE, d_model=128, nhead=4, num_layers=4, max_len=40).to(device)
37
+ mdlm.load_state_dict(torch.load("models/axiom-ref/mdlm_best.pt", weights_only=True, map_location=device))
38
+
39
+ tokenizer = Tokenizer.from_file("models/axiom-ref/bpe_tokenizer.json")
40
+ bpe_vocab = tokenizer.get_vocab_size()
41
+ BPE_BOS = tokenizer.token_to_id("<bos>")
42
+ BPE_EOS = tokenizer.token_to_id("<eos>")
43
+
44
+ decoder = ConstrainedDecoder(
45
+ gov_vocab=VOCAB_SIZE, prose_vocab=bpe_vocab, d_model=256, nhead=8,
46
+ num_encoder_layers=3, num_decoder_layers=6, max_struct_len=40, max_prose_len=128,
47
+ ).to(device)
48
+ _dec_state = torch.load("models/axiom-ref/decoder_best.pt", weights_only=True, map_location=device)
49
+ # Remap legacy weight names
50
+ _dec_state = {k.replace("triad_embedding", "struct_embedding").replace("triad_pos", "struct_pos"): v for k, v in _dec_state.items()}
51
+ decoder.load_state_dict(_dec_state)
52
+ decoder.eval()
53
+
54
+
55
+ def generate_governed(num_candidates=10, temperature=0.7):
56
+ """Run the full 4-phase governed pipeline."""
57
+
58
+ # Phase 1: PROPOSE
59
+ candidates = propose(mdlm, num_candidates=num_candidates, g_slots=2, s_slots=2, f_slots=2)
60
+
61
+ # Phase 2: DECIDE
62
+ decided = decide(candidates)
63
+
64
+ t_count = sum(1 for _, d, _ in decided if d.tig_status == "T")
65
+ f_count = sum(1 for _, d, _ in decided if d.tig_status == "F")
66
+
67
+ admitted = [(c, d, e) for c, d, e in decided if d.tig_status == "T" and e is not None]
68
+
69
+ # Phase 3: PROMOTE
70
+ promoted = promote(admitted)
71
+
72
+ if not promoted:
73
+ return "No candidates passed governance.", "", "{}", ""
74
+
75
+ # Phase 4: EXECUTE
76
+ outputs = execute(promoted)
77
+ example, commitment = promoted[0]
78
+ gov_dict = outputs[0].gov_structure
79
+
80
+ # Generate prose
81
+ tt = torch.tensor([pad_gov(encode_gov({
82
+ "channel_a": {"operators": gov_dict["G"]},
83
+ "channel_b": {"operators": gov_dict["S"]},
84
+ "channel_c": {"operators": gov_dict["F"]},
85
+ "witnesses": commitment.witnesses,
86
+ }), 40)], dtype=torch.long, device=device)
87
+
88
+ struct_h = decoder.struct_embedding(tt) + decoder.struct_pos(torch.arange(40, device=device).unsqueeze(0))
89
+ mem = decoder.encoder(struct_h, src_key_padding_mask=(tt == GOV_PAD))
90
+
91
+ ids = torch.tensor([[BPE_BOS]], dtype=torch.long, device=device)
92
+ gen = []
93
+ with torch.no_grad():
94
+ for _ in range(120):
95
+ ph = decoder.prose_embedding(ids) + decoder.prose_pos(torch.arange(ids.size(1), device=device).unsqueeze(0))
96
+ dec = decoder.decoder(ph, mem,
97
+ tgt_mask=nn.Transformer.generate_square_subsequent_mask(ids.size(1), device=device),
98
+ memory_key_padding_mask=(tt == GOV_PAD))
99
+ nxt = torch.multinomial(F.softmax(decoder.output_proj(dec[:, -1, :]) / temperature, dim=-1), 1)
100
+ ids = torch.cat([ids, nxt], dim=1)
101
+ if nxt.item() == BPE_EOS:
102
+ break
103
+ gen.append(nxt.item())
104
+
105
+ prose = tokenizer.decode(gen)
106
+
107
+ # Build governance trace
108
+ output_hash = sha256(prose.encode()).hexdigest()
109
+
110
+ gate_html = ""
111
+ gate_names = ["G1 Structural Integrity", "G2 Completeness", "G3 Witness Sufficiency",
112
+ "G4 Authority Separation", "G5 Provenance Continuity",
113
+ "G6 Semantic Stability", "G7 Behavioral Prediction"]
114
+ for g in gate_names:
115
+ gate_html += f'<div style="padding:4px 0"><span style="color:#4ade80;font-weight:bold">PASS</span> {g}</div>'
116
+
117
+ witness_html = ""
118
+ for w_name, w_data in commitment.witnesses.items():
119
+ status = "ATTESTED" if w_data["attested"] else "WITHHELD"
120
+ color = "#4ade80" if w_data["attested"] else "#e94560"
121
+ witness_html += f'<div style="padding:2px 0"><span style="color:{color};font-weight:bold">{status}</span> {w_name}</div>'
122
+
123
+ trace = {
124
+ "output_hash": output_hash[:32] + "...",
125
+ "commitment": commitment.witness_bundle_hash[:32] + "...",
126
+ "gov_structure": {
127
+ "G": [op["operator"] for op in gov_dict["G"]],
128
+ "S": [op["operator"] for op in gov_dict["S"]],
129
+ "F": [op["operator"] for op in gov_dict["F"]],
130
+ },
131
+ "gates_passed": 7,
132
+ "witnesses_attested": 7,
133
+ "admission": f"{t_count}/{num_candidates}",
134
+ "timestamp": datetime.now(timezone.utc).isoformat(),
135
+ }
136
+
137
+ stats_html = f"""
138
+ <div style="font-family:monospace;font-size:13px">
139
+ <div style="margin-bottom:12px">
140
+ <div style="color:#888;font-size:11px">PIPELINE STATS</div>
141
+ <div>Proposed: {num_candidates} | Admitted: {t_count} | Rejected: {f_count}</div>
142
+ </div>
143
+ <div style="margin-bottom:12px">
144
+ <div style="color:#888;font-size:11px">GATES</div>
145
+ {gate_html}
146
+ </div>
147
+ <div style="margin-bottom:12px">
148
+ <div style="color:#888;font-size:11px">WITNESSES</div>
149
+ {witness_html}
150
+ </div>
151
+ <div>
152
+ <div style="color:#888;font-size:11px">COMMITMENT</div>
153
+ <div style="word-break:break-all;color:#666">{commitment.witness_bundle_hash[:48]}...</div>
154
+ <div style="color:#4ade80;font-weight:bold;margin-top:4px">Irrevocable</div>
155
+ </div>
156
+ </div>
157
+ """
158
+
159
+ return prose, stats_html, json.dumps(trace, indent=2), f"G: {trace['gov_structure']['G']}\nS: {trace['gov_structure']['S']}\nF: {trace['gov_structure']['F']}"
160
+
161
+
162
+ # ── Gradio Interface ──
163
+ with gr.Blocks(
164
+ title="Axiom-Ref: Governed Language Model",
165
+ theme=gr.themes.Base(primary_hue="green", neutral_hue="slate"),
166
+ css="""
167
+ .output-prose { font-family: 'Courier New', monospace; font-size: 14px; }
168
+ """
169
+ ) as app:
170
+
171
+ gr.Markdown("""
172
+ # Axiom-Ref
173
+ **Governed Language Model β€” every output ships its own proof.**
174
+
175
+ Four phases: PROPOSE β†’ DECIDE β†’ PROMOTE β†’ EXECUTE.
176
+ No other language model ships a machine-verifiable governance trace with its output.
177
+ """)
178
+
179
+ with gr.Row():
180
+ with gr.Column(scale=2):
181
+ num_candidates = gr.Slider(1, 50, value=10, step=1, label="Candidates to propose")
182
+ temperature = gr.Slider(0.1, 1.5, value=0.7, step=0.1, label="Decoder temperature")
183
+ generate_btn = gr.Button("Generate Governed Output", variant="primary", size="lg")
184
+
185
+ gr.Markdown("### Generated Output")
186
+ output_prose = gr.Code(label="Governed Prose", language="c", lines=12)
187
+ output_structure = gr.Textbox(label="Governed Structure", lines=3)
188
+
189
+ with gr.Column(scale=1):
190
+ gr.Markdown("### Governance Trace")
191
+ governance_panel = gr.HTML()
192
+ trace_json = gr.Code(label="Machine-Verifiable Trace (JSON)", language="json", lines=15)
193
+
194
+ generate_btn.click(
195
+ fn=generate_governed,
196
+ inputs=[num_candidates, temperature],
197
+ outputs=[output_prose, governance_panel, trace_json, output_structure],
198
+ )
199
+
200
+ gr.Markdown("""
201
+ ---
202
+ *[MetaCortex Dynamics DAO](https://github.com/MetaCortex-Dynamics) Β· [Source](https://github.com/MetaCortex-Dynamics/Axiom-Ref) Β· MIT License*
203
+ """)
204
+
205
+
206
+ if __name__ == "__main__":
207
+ app.launch()