File size: 3,223 Bytes
dea14e3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bb444e1
 
dea14e3
bb444e1
 
dea14e3
bb444e1
 
dea14e3
 
 
bb444e1
 
dea14e3
 
bb444e1
 
dea14e3
 
bb444e1
dea14e3
 
 
 
 
bb444e1
 
dea14e3
bb444e1
 
dea14e3
bb444e1
 
dea14e3
 
 
 
 
 
bb444e1
dea14e3
 
bb444e1
dea14e3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bb444e1
dea14e3
 
 
 
bb444e1
 
 
 
dea14e3
bb444e1
 
dea14e3
bb444e1
 
dea14e3
 
 
 
 
 
bb444e1
 
dea14e3
bb444e1
 
dea14e3
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import json
from functools import partial
import gradio as gr
from transformers import pipeline

with open("modules.json", "r", encoding="utf-8") as f:
    MODULES = json.load(f)["modules"]

GENERATORS = [m for m in MODULES if m["type"] == "generator"]
CHECKERS = {m["id"]: m for m in MODULES if m["type"] == "checker"}
GEN_BY_ID = {m["id"]: m for m in GENERATORS}

llm = pipeline("text-generation", model="gpt2", max_new_tokens=512)

def call_llm(prompt):
    o = llm(prompt, max_new_tokens=512, do_sample=False)[0]["generated_text"]
    return o[len(prompt):].strip() if o.startswith(prompt) else o

def generator_prompt(mid, *inputs):
    m = GEN_BY_ID[mid]
    keys = list(m["input_placeholders"].keys())
    vals = {k: inputs[i] if i < len(inputs) else "" for i, k in enumerate(keys)}
    secs = m["output_sections"]

    p = []
    p.append(f"MODULE: {m['label']}")
    p.append("INPUT:")
    for k, v in vals.items():
        p.append(f"{k.upper()}: {v}")
    p.append("")
    p.append("OUTPUT SECTIONS:")
    for s in secs:
        p.append(f"- {s}")
    p.append("")
    for s in secs:
        p.append(f"{s}:")
        p.append("[content]")
        p.append("")
    return "\n".join(p)

def checker_prompt(cid, *vals):
    c = CHECKERS[cid]
    secs = c["output_sections"]
    if len(vals) < 2:
        orig, draft = "", vals[0] if vals else ""
    else:
        orig = "\n\n".join(vals[:-1])
        draft = vals[-1]

    p = []
    p.append(f"CHECKER: {c['label']}")
    p.append("ORIGINAL TASK:")
    p.append(orig)
    p.append("")
    p.append("DRAFT:")
    p.append(draft)
    p.append("")
    p.append("RESPOND WITH:")
    for s in secs:
        p.append(f"- {s}")
    p.append("")
    for s in secs:
        p.append(f"{s}:")
        p.append("[content]")
        p.append("")
    return "\n".join(p)

def run_generator(mid, *inputs):
    return call_llm(generator_prompt(mid, *inputs))

def run_checker(cid, *inputs):
    return call_llm(checker_prompt(cid, *inputs))

def build_ui():
    with gr.Blocks(title="Modular Intelligence") as demo:
        gr.Markdown("# Modular Intelligence Demo")

        for m in GENERATORS:
            with gr.Tab(m["label"]):
                gr.Markdown(m["description"])
                inputs = []
                for k, ph in m["input_placeholders"].items():
                    t = gr.Textbox(label=k, placeholder=ph, lines=4)
                    inputs.append(t)

                out = gr.Textbox(label="Output", lines=16)
                gr.Button("Run").click(
                    fn=partial(run_generator, m["id"]),
                    inputs=inputs,
                    outputs=out
                )

                if m.get("has_checker"):
                    cid = m.get("checker_id")
                    if cid in CHECKERS:
                        gr.Markdown("### Checker")
                        chk = gr.Textbox(label="Checker Output", lines=14)
                        gr.Button("Check").click(
                            fn=partial(run_checker, cid),
                            inputs=inputs + [out],
                            outputs=chk
                        )
    return demo

if __name__ == "__main__":
    app = build_ui()
    app.launch()