botbottingbot's picture
Update app.py
c1cb680 verified
raw
history blame
7.54 kB
import json
from functools import partial
import gradio as gr
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
# -------------------------------------------------------------
# Load Modules
# -------------------------------------------------------------
with open("modules.json", "r", encoding="utf-8") as f:
MODULES = json.load(f)["modules"]
GENERATORS = [m for m in MODULES if m.get("type") == "generator"]
CHECKERS = {m["id"]: m for m in MODULES if m.get("type") == "checker"}
GEN_BY_ID = {m["id"]: m for m in GENERATORS}
LABEL_TO_ID = {m["label"]: m["id"] for m in GENERATORS}
LABEL_LIST = list(LABEL_TO_ID.keys())
# -------------------------------------------------------------
# Load Model (base LLM)
# Swappable engine: GPT-2 / Llama / Mistral etc.
# -------------------------------------------------------------
tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
llm = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=300)
# -------------------------------------------------------------
# Automatic Router Components
# -------------------------------------------------------------
from router.rules import rule_router
from router.zero_shot import classify_task # zero-shot classifier
# -------------------------------------------------------------
# Domain Adapters (LoRA)
# -------------------------------------------------------------
from domain_heads.loader import load_adapter # load domain-specific adapter
# -------------------------------------------------------------
# Reasoning Scaffolds
# -------------------------------------------------------------
from reasoning_scaffolds.cot import apply_cot
from reasoning_scaffolds.critique_loop import critique_and_refine
# -------------------------------------------------------------
# Helper: LLM call
# -------------------------------------------------------------
def call_llm(prompt: str) -> str:
out = llm(prompt, do_sample=False)[0]["generated_text"]
if out.startswith(prompt):
out = out[len(prompt):]
return out.strip()
# -------------------------------------------------------------
# Build prompts
# -------------------------------------------------------------
def build_generator_prompt(module_id: str, *inputs: str) -> str:
m = GEN_BY_ID[module_id]
keys = list(m["input_placeholders"].keys())
vals = {k: inputs[i] if i < len(inputs) else "" for i, k in enumerate(keys)}
secs = m["output_sections"]
p = []
p.append("You are a structured reasoning module.")
p.append(f"MODULE: {m['label']} (id={module_id})")
p.append("")
p.append("INPUTS:")
for k, v in vals.items():
p.append(f"{k.upper()}: {v}")
p.append("")
p.append("You must respond using these sections:")
for s in secs:
p.append(f"- {s}")
p.append("")
for s in secs:
p.append(f"{s}:")
p.append("[content]")
p.append("")
return "\n".join(p)
def build_checker_prompt(checker_id: str, *vals: str) -> str:
c = CHECKERS[checker_id]
secs = c["output_sections"]
if len(vals) < 2:
original_task = ""
draft = vals[0] if vals else ""
else:
original_task = "\n\n".join(vals[:-1])
draft = vals[-1]
p = []
p.append("You are a strict reviewer.")
p.append(f"CHECKER: {c['label']} (id={checker_id})")
p.append("")
p.append("ORIGINAL TASK:")
p.append(original_task)
p.append("")
p.append("DRAFT OUTPUT:")
p.append(draft)
p.append("")
p.append("You must respond using:")
for s in secs:
p.append(f"- {s}")
p.append("")
for s in secs:
p.append(f"{s}:")
p.append("[content]")
p.append("")
return "\n".join(p)
# -------------------------------------------------------------
# Generator & Checker Execution
# -------------------------------------------------------------
def run_generator(module_id: str, *inputs: str) -> str:
m = GEN_BY_ID[module_id]
# Load domain adapter if defined
if m.get("domain"):
load_adapter(model, m["domain"])
# Build prompt
prompt = build_generator_prompt(module_id, *inputs)
# Apply reasoning scaffolds (CoT + critique loop)
prompt = apply_cot(prompt)
draft = call_llm(prompt)
final = critique_and_refine(draft)
return final
def run_checker(checker_id: str, *inputs: str) -> str:
prompt = build_checker_prompt(checker_id, *inputs)
prompt = apply_cot(prompt)
reviewed = call_llm(prompt)
return reviewed
# -------------------------------------------------------------
# Hybrid Router (rules + zero-shot)
# -------------------------------------------------------------
def hybrid_route(task_text: str):
if not task_text or not task_text.strip():
return "No task provided", "", ""
# 1. Rule-based (deterministic)
rule_choice = rule_router(task_text)
if rule_choice:
return GEN_BY_ID[rule_choice]["label"], rule_choice, "Rule-based match"
# 2. Zero-shot fallback
predicted_label, module_id, scores = classify_task(task_text)
return predicted_label, module_id, scores
# -------------------------------------------------------------
# UI
# -------------------------------------------------------------
def build_ui():
with gr.Blocks(title="Modular Intelligence") as demo:
gr.Markdown("# Modular Intelligence\nUpgraded architecture with routing, adapters, and reasoning layers.")
# -------------------- Auto-Route Tab --------------------
with gr.Tab("Auto-Route"):
task_box = gr.Textbox(label="Describe your task", lines=6)
module_name = gr.Textbox(label="Suggested Module", interactive=False)
module_id = gr.Textbox(label="Module ID", interactive=False)
scores = gr.Textbox(label="Routing Details", lines=12, interactive=False)
classify_btn = gr.Button("Classify Task")
classify_btn.click(
fn=hybrid_route,
inputs=[task_box],
outputs=[module_name, module_id, scores],
)
# -------------------- Module Tabs ------------------------
for m in GENERATORS:
with gr.Tab(m["label"]):
gr.Markdown(f"**Module ID:** `{m['id']}` | **Domain:** `{m.get('domain', 'general')}`")
inputs = []
for key, placeholder in m["input_placeholders"].items():
t = gr.Textbox(label=key, placeholder=placeholder, lines=4)
inputs.append(t)
output_box = gr.Textbox(label="Module Output", lines=20)
gr.Button("Run Module").click(
fn=partial(run_generator, m["id"]),
inputs=inputs,
outputs=output_box,
)
checker_id = m.get("checker_id")
if checker_id and checker_id in CHECKERS:
checker_output = gr.Textbox(label="Checker Output", lines=15)
gr.Button("Run Checker").click(
fn=partial(run_checker, checker_id),
inputs=inputs + [output_box],
outputs=checker_output,
)
else:
gr.Markdown("_No checker available for this module._")
return demo
if __name__ == "__main__":
app = build_ui()
app.launch()