LordXido commited on
Commit
cbceeb4
Β·
verified Β·
1 Parent(s): ddeb720

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +68 -130
app.py CHANGED
@@ -1,132 +1,70 @@
1
- """
2
- Jarvis X β€” Beyond-SOTA Codex Demo
3
- Fully-contained Gradio app with toy multi-LLM router, constraint engine,
4
- and audit ledger.
5
-
6
- Companions needed in the same directory:
7
- β€’ llm_sim.py – toy LLM provider registry
8
- β€’ contracts.dsl – keyword safety list
9
- β€’ requirements.txt – gradio>=4.0
10
- """
11
-
12
- import json
13
- import time
14
- from pathlib import Path
15
- from typing import Dict, Any
16
-
17
  import gradio as gr
18
- from llm_sim import LLM_REGISTRY # <-- toy LLM providers
19
-
20
- # ─────────────────────────────────────────────
21
- # Ξ›* ── Contract / Safety Engine
22
- # ─────────────────────────────────────────────
23
- CONTRACT_FILE = "contracts.dsl"
24
-
25
- def load_contracts() -> list[str]:
26
- path = Path(CONTRACT_FILE)
27
- if not path.exists():
28
- return []
29
- return [ln.strip() for ln in path.read_text().splitlines()
30
- if ln.strip() and not ln.startswith("#")]
31
-
32
- def contracts_pass(text: str, rules: list[str]) -> bool:
33
- return not any(rule.lower() in text.lower() for rule in rules)
34
-
35
- contracts_cache = load_contracts()
36
-
37
- # ─────────────────────────────────────────────
38
- # Ξ© ── Immutable Audit Ledger
39
- # ─────────────────────────────────────────────
40
- LEDGER_FILE = "ledger.jsonl"
41
-
42
- def log_event(ev: Dict[str, Any]):
43
- with open(LEDGER_FILE, "a", encoding="utf-8") as f:
44
- f.write(json.dumps(ev) + "\n")
45
-
46
- # ─────────────────────────────────────────────
47
- # Meta state (Ξ± weight) β€” stubbed
48
- # ─────────────────────────────────────────────
49
- META_FILE = "meta.json"
50
- def load_meta() -> Dict[str, Any]:
51
- if Path(META_FILE).exists():
52
- return json.loads(Path(META_FILE).read_text())
53
- return {"alpha": 0.5}
54
-
55
- meta_state = load_meta() # Ξ± weight for LLM vs symbolic
56
-
57
- # ─────────────────────────────────────────────
58
- # Decision Logic (toy multi-LLM + symbolic blend)
59
- # ─────────────────────────────────────────────
60
- def decide(intent: str, context: str, alpha: float) -> str:
61
- # 1. Pick toy provider based on simple keywords
62
- if "advanced" in intent.lower():
63
- provider = LLM_REGISTRY["claude"]
64
- elif "concise" in intent.lower():
65
- provider = LLM_REGISTRY["mistral"]
66
- else:
67
- provider = LLM_REGISTRY["gpt"]
68
-
69
- # 2. Generate toy LLM response
70
- prompt = f"Intent: {intent}\nContext: {context}"
71
- llm_out = provider.generate(prompt)
72
-
73
- # 3. Symbolic stub
74
- symbolic_out = "(symbolic system stub)"
75
-
76
- # 4. Blend outputs
77
- blended = (
78
- f"[{provider.name} * {alpha:.2f}] {llm_out}\n"
79
- f"[Symbolic * {1-alpha:.2f}] {symbolic_out}"
80
- )
81
- return blended
82
-
83
- # ─────────────────────────────────────────────
84
- # Gradio Callback
85
- # ─────────────────────────────────────────────
86
- def codex_run(intent: str, context: str):
87
- t = time.time()
88
- alpha = meta_state.get("alpha", 0.5)
89
- output = decide(intent, context, alpha)
90
-
91
- safe = contracts_pass(output, contracts_cache)
92
- if not safe:
93
- output = "Output blocked by contract engine (Ξ›*)."
94
-
95
- decision_trace = "provider_mix Ξ±={:.2f}".format(alpha)
96
- log_event({
97
- "t": t, "intent": intent, "context": context,
98
- "decision": decision_trace, "output": output, "safe": safe
99
- })
100
-
101
- audit = json.dumps({"t": t, "decision": decision_trace, "safe": safe})
102
- return output, decision_trace, audit
103
-
104
- # ─────────────────────────────────────────────
105
- # Gradio UI
106
- # ─────────────────────────────────────────────
107
- with gr.Blocks(title="Jarvis X β€” Beyond-SOTA Codex Demo") as demo:
108
- gr.Markdown("""
109
- # 🧠 Jarvis X β€” Beyond-SOTA Codex Operational Intelligence
110
- **Toy multi-LLM router, constraint engine, audit ledger**
111
-
112
- *Edit `contracts.dsl` to modify safety keywords. All runs append to
113
- `ledger.jsonl`. LLMs here are simulated; replace in `llm_sim.py`
114
- with real API calls when ready.*
115
- """)
116
-
117
- intent_in = gr.Textbox(label="Intent (Ξ¨)", placeholder="e.g. generate code, advanced analysis")
118
- ctx_in = gr.Textbox(label="Context (Ο‡)", lines=4)
119
- run_btn = gr.Button("Run")
120
-
121
- out_box = gr.Textbox(label="System Output (Θ)", lines=6)
122
- dec_box = gr.Textbox(label="Decision Trace", lines=1)
123
- audit_box = gr.Textbox(label="Audit Snapshot", lines=2)
124
-
125
- run_btn.click(
126
- fn=codex_run,
127
- inputs=[intent_in, ctx_in],
128
- outputs=[out_box, dec_box, audit_box]
129
- )
130
 
131
- if __name__ == "__main__":
132
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.optim as optim
4
+ from transformers import AutoModel, AutoTokenizer
 
 
 
 
 
 
 
 
 
 
 
 
5
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
+ class DrMoagiSystem(nn.Module):
8
+ def __init__(self, model_name: str = "bert-base-uncased"):
9
+ super(DrMoagiSystem, self).__init__()
10
+ self.model = AutoModel.from_pretrained(model_name)
11
+ self.tokenizer = AutoTokenizer.from_pretrained(model_name)
12
+ self.intent_encoder = nn.Linear(768, 128)
13
+ self.field_modulator = nn.Linear(128, 128)
14
+ self.constraint_kernel = nn.Linear(128, 128)
15
+ self.memory_operator = nn.LSTM(128, 128, num_layers=1)
16
+ self.projection_operator = nn.Linear(128, 768)
17
+
18
+ def forward(self, input_ids: torch.Tensor, attention_mask: torch.Tensor, memory: torch.Tensor):
19
+ # Intent Encoder
20
+ outputs = self.model(input_ids, attention_mask=attention_mask)
21
+ intent = torch.relu(self.intent_encoder(outputs.last_hidden_state[:, 0, :]))
22
+
23
+ # Field Modulator
24
+ field = torch.relu(self.field_modulator(intent))
25
+
26
+ # Constraint Kernel
27
+ constrained_field = torch.relu(self.constraint_kernel(field))
28
+
29
+ # Memory Operator
30
+ memory_output, _ = self.memory_operator(constrained_field.unsqueeze(0), memory)
31
+ memory = memory_output.squeeze(0)
32
+
33
+ # Projection Operator
34
+ output = self.projection_operator(memory)
35
+
36
+ return output, memory
37
+
38
+ def translate(self, input_text: str, context: str):
39
+ inputs = self.tokenizer(input_text, return_tensors="pt")
40
+ input_ids = inputs["input_ids"]
41
+ attention_mask = inputs["attention_mask"]
42
+ memory = torch.zeros(1, 128)
43
+
44
+ output, memory = self.forward(input_ids, attention_mask, memory)
45
+ return self.tokenizer.decode(output.argmax(-1), skip_special_tokens=True)
46
+
47
+ # Initialize the system
48
+ system = DrMoagiSystem()
49
+
50
+ # Define the Gradio interface
51
+ def dr_moagi_interface(input_text, context):
52
+ try:
53
+ output = system.translate(input_text, context)
54
+ return output
55
+ except Exception as e:
56
+ return f"Error: {str(e)}"
57
+
58
+ interface = gr.Interface(
59
+ fn=dr_moagi_interface,
60
+ inputs=[
61
+ gr.Textbox(label="Input Text"),
62
+ gr.Textbox(label="Context"),
63
+ ],
64
+ outputs=gr.Textbox(label="Output"),
65
+ title="Dr Moagi System",
66
+ description="A universal translational logic operator",
67
+ )
68
+
69
+ # Launch the interface
70
+ interface.launch()