| import json |
| from functools import partial |
| import gradio as gr |
| from transformers import pipeline |
|
|
| with open("modules.json", "r", encoding="utf-8") as f: |
| MODULES = json.load(f)["modules"] |
|
|
| GENERATORS = [m for m in MODULES if m["type"] == "generator"] |
| CHECKERS = {m["id"]: m for m in MODULES if m["type"] == "checker"} |
| GEN_BY_ID = {m["id"]: m for m in GENERATORS} |
|
|
| llm = pipeline("text-generation", model="gpt2", max_new_tokens=512) |
|
|
| def call_llm(prompt): |
| o = llm(prompt, max_new_tokens=512, do_sample=False)[0]["generated_text"] |
| return o[len(prompt):].strip() if o.startswith(prompt) else o |
|
|
| def generator_prompt(mid, *inputs): |
| m = GEN_BY_ID[mid] |
| keys = list(m["input_placeholders"].keys()) |
| vals = {k: inputs[i] if i < len(inputs) else "" for i, k in enumerate(keys)} |
| secs = m["output_sections"] |
|
|
| p = [] |
| p.append(f"MODULE: {m['label']}") |
| p.append("INPUT:") |
| for k, v in vals.items(): |
| p.append(f"{k.upper()}: {v}") |
| p.append("") |
| p.append("OUTPUT SECTIONS:") |
| for s in secs: |
| p.append(f"- {s}") |
| p.append("") |
| for s in secs: |
| p.append(f"{s}:") |
| p.append("[content]") |
| p.append("") |
| return "\n".join(p) |
|
|
| def checker_prompt(cid, *vals): |
| c = CHECKERS[cid] |
| secs = c["output_sections"] |
| if len(vals) < 2: |
| orig, draft = "", vals[0] if vals else "" |
| else: |
| orig = "\n\n".join(vals[:-1]) |
| draft = vals[-1] |
|
|
| p = [] |
| p.append(f"CHECKER: {c['label']}") |
| p.append("ORIGINAL TASK:") |
| p.append(orig) |
| p.append("") |
| p.append("DRAFT:") |
| p.append(draft) |
| p.append("") |
| p.append("RESPOND WITH:") |
| for s in secs: |
| p.append(f"- {s}") |
| p.append("") |
| for s in secs: |
| p.append(f"{s}:") |
| p.append("[content]") |
| p.append("") |
| return "\n".join(p) |
|
|
| def run_generator(mid, *inputs): |
| return call_llm(generator_prompt(mid, *inputs)) |
|
|
| def run_checker(cid, *inputs): |
| return call_llm(checker_prompt(cid, *inputs)) |
|
|
| def build_ui(): |
| with gr.Blocks(title="Modular Intelligence") as demo: |
| gr.Markdown("# Modular Intelligence Demo") |
|
|
| for m in GENERATORS: |
| with gr.Tab(m["label"]): |
| gr.Markdown(m["description"]) |
| inputs = [] |
| for k, ph in m["input_placeholders"].items(): |
| t = gr.Textbox(label=k, placeholder=ph, lines=4) |
| inputs.append(t) |
|
|
| out = gr.Textbox(label="Output", lines=16) |
| gr.Button("Run").click( |
| fn=partial(run_generator, m["id"]), |
| inputs=inputs, |
| outputs=out |
| ) |
|
|
| if m.get("has_checker"): |
| cid = m.get("checker_id") |
| if cid in CHECKERS: |
| gr.Markdown("### Checker") |
| chk = gr.Textbox(label="Checker Output", lines=14) |
| gr.Button("Check").click( |
| fn=partial(run_checker, cid), |
| inputs=inputs + [out], |
| outputs=chk |
| ) |
| return demo |
|
|
| if __name__ == "__main__": |
| app = build_ui() |
| app.launch() |