| import json | |
| from functools import partial | |
| import gradio as gr | |
| from transformers import pipeline | |
| with open("modules.json", "r", encoding="utf-8") as f: | |
| MODULES = json.load(f)["modules"] | |
| GENERATORS = [m for m in MODULES if m["type"] == "generator"] | |
| CHECKERS = {m["id"]: m for m in MODULES if m["type"] == "checker"} | |
| GEN_BY_ID = {m["id"]: m for m in GENERATORS} | |
| llm = pipeline("text-generation", model="gpt2", max_new_tokens=512) | |
| def call_llm(prompt): | |
| out = llm(prompt, max_new_tokens=512, do_sample=False)[0]["generated_text"] | |
| return out[len(prompt):].strip() if out.startswith(prompt) else out | |
| def generator_prompt(module_id, *inputs): | |
| m = GEN_BY_ID[module_id] | |
| keys = list(m["input_placeholders"].keys()) | |
| inputs = {k: inputs[i] if i < len(inputs) else "" for i, k in enumerate(keys)} | |
| sections = m["output_sections"] | |
| p = [] | |
| p.append("You are a structured reasoning module.") | |
| p.append(f"MODULE: {m['label']}") | |
| p.append("INPUTS:") | |
| for k, v in inputs.items(): | |
| p.append(f"{k.upper()}: {v}") | |
| p.append("") | |
| p.append("Produce the following sections:") | |
| for s in sections: | |
| p.append(f"- {s}") | |
| p.append("") | |
| for s in sections: | |
| p.append(f"{s}:") | |
| p.append("[content]") | |
| p.append("") | |
| return "\n".join(p) | |
| def checker_prompt(checker_id, *vals): | |
| c = CHECKERS[checker_id] | |
| secs = c["output_sections"] | |
| if len(vals) == 0: | |
| orig, draft = "", "" | |
| elif len(vals) == 1: | |
| orig, draft = "", vals[0] | |
| else: | |
| *orig_parts, draft = vals | |
| orig = "\n\n".join(orig_parts) | |
| p = [] | |
| p.append("You are a strict evaluator.") | |
| p.append(f"CHECKER: {c['label']}") | |
| p.append("ORIGINAL TASK:") | |
| p.append(orig) | |
| p.append("") | |
| p.append("DRAFT OUTPUT:") | |
| p.append(draft) | |
| p.append("") | |
| p.append("Respond with sections:") | |
| for s in secs: | |
| p.append(f"- {s}") | |
| p.append("") | |
| for s in secs: | |
| p.append(f"{s}:") | |
| p.append("[content]") | |
| p.append("") | |
| return "\n".join(p) | |
| def run_generator(mid, *inputs): | |
| return call_llm(generator_prompt(mid, *inputs)) | |
| def run_checker(cid, *inputs): | |
| return call_llm(checker_prompt(cid, *inputs)) | |
| def build_ui(): | |
| with gr.Blocks(title="Modular Intelligence") as demo: | |
| gr.Markdown("# Modular Intelligence\nSelect a module and generate output.") | |
| for m in GENERATORS: | |
| with gr.Tab(m["label"]): | |
| gr.Markdown(m["description"]) | |
| input_boxes = [] | |
| for key, ph in m["input_placeholders"].items(): | |
| tb = gr.Textbox(label=key, placeholder=ph, lines=4) | |
| input_boxes.append(tb) | |
| output_box = gr.Textbox(label="Module output", lines=18) | |
| gr.Button("Run module").click( | |
| fn=partial(run_generator, m["id"]), | |
| inputs=input_boxes, | |
| outputs=output_box | |
| ) | |
| if m.get("has_checker"): | |
| cid = m.get("checker_id") | |
| if cid in CHECKERS: | |
| gr.Markdown("### Checker") | |
| checker_out = gr.Textbox(label="Checker output", lines=14) | |
| gr.Button("Run checker").click( | |
| fn=partial(run_checker, cid), | |
| inputs=input_boxes + [output_box], | |
| outputs=checker_out | |
| ) | |
| return demo | |
| if __name__ == "__main__": | |
| app = build_ui() | |
| app.launch() |