File size: 4,534 Bytes
dc6b438
 
 
 
 
 
 
 
7f5e68f
 
 
 
 
 
dc6b438
 
 
7f5e68f
 
 
 
dc6b438
 
 
 
 
 
 
7f5e68f
dc6b438
 
7f5e68f
dc6b438
 
 
7f5e68f
dc6b438
 
7f5e68f
dc6b438
 
 
7f5e68f
dc6b438
 
7f5e68f
dc6b438
 
 
 
 
 
7f5e68f
dc6b438
 
 
 
 
 
 
 
 
 
7f5e68f
dc6b438
7f5e68f
 
 
dc6b438
 
 
7f5e68f
dc6b438
7f5e68f
 
dc6b438
7f5e68f
dc6b438
7f5e68f
 
dc6b438
7f5e68f
 
dc6b438
7f5e68f
 
 
dc6b438
 
 
7f5e68f
 
 
dc6b438
 
 
 
 
 
 
7f5e68f
 
 
 
dc6b438
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
import gradio as gr

from src.config import ModelSpec, RunConfig
from src.orchestrator import Orchestrator
from src.tasks import TaskContext
from src.utils import pretty_json


DEFAULT_MODELS = [
    "openai/gpt-oss-20b",          # OpenAI open-weight :contentReference[oaicite:4]{index=4}
    "deepseek-ai/DeepSeek-R1",     # DeepSeek :contentReference[oaicite:5]{index=5}
    "google/gemma-2-2b-it",        # Gemma (open) :contentReference[oaicite:6]{index=6}
    "google/gemma-3-270m",         # Gemma 3 (multimodal family) :contentReference[oaicite:7]{index=7}
]


def run_pipeline(
    req_text, img,
    analyzer_model, analyzer_backend, analyzer_temp, analyzer_tokens,
    refactor_model, refactor_backend, refactor_temp, refactor_tokens,
    critic_model, critic_backend, critic_temp, critic_tokens,
):
    if not req_text or not req_text.strip():
        return "Вставь текст требований.", "", "", "{}"

    cfg = RunConfig(
        analyzer=ModelSpec(
            model_id=analyzer_model,
            backend=analyzer_backend,
            temperature=analyzer_temp,
            max_new_tokens=analyzer_tokens,
            system_prompt="Отвечай по-русски. Будь точным, не выдумывай факты.",
        ),
        refactor=ModelSpec(
            model_id=refactor_model,
            backend=refactor_backend,
            temperature=refactor_temp,
            max_new_tokens=refactor_tokens,
            system_prompt="Отвечай по-русски. Не добавляй требований из головы.",
        ),
        critic=ModelSpec(
            model_id=critic_model,
            backend=critic_backend,
            temperature=critic_temp,
            max_new_tokens=critic_tokens,
            system_prompt="Отвечай по-русски. Будь строгим ревьюером.",
        ),
    )

    ctx = TaskContext(name="requirements_analysis_and_refactor", language="ru")
    orch = Orchestrator(cfg, ctx=ctx)

    trace = orch.run(req_text, image=img)
    d = trace.to_dict()

    return (
        trace.analyzer.output,
        trace.refactor.output,
        trace.critic.output,
        pretty_json(d),
    )


with gr.Blocks(title="Multi-agent Requirements Lab") as demo:
    gr.Markdown(
        "# Multi-agent Requirements Lab (text + diagram)\n"
        "LLM-агенты для анализа/рефакторинга требований + извлечение контекста из фото диаграммы.\n\n"
        "**Для работы через HF Inference API добавь `HF_TOKEN` в Settings → Secrets.**"
    )

    with gr.Row():
        req_text = gr.Textbox(label="Текст требований", lines=14, placeholder="Вставь требования…")

    with gr.Row():
        img = gr.Image(label="Фото/скрин диаграммы (опционально)", type="pil")

    def agent_controls(title: str, default_model: str):
        with gr.Accordion(title, open=False):
            model = gr.Dropdown(DEFAULT_MODELS, value=default_model, label="Model ID")
            backend = gr.Radio(["hf_inference_api"], value="hf_inference_api", label="Backend")
            temp = gr.Slider(0.0, 1.5, value=0.2, step=0.05, label="Temperature")
            tokens = gr.Slider(64, 2000, value=600, step=32, label="Max new tokens")
        return model, backend, temp, tokens

    analyzer_model, analyzer_backend, analyzer_temp, analyzer_tokens = agent_controls("Analyzer agent", "openai/gpt-oss-20b")
    refactor_model, refactor_backend, refactor_temp, refactor_tokens = agent_controls("Refactor agent", "google/gemma-2-2b-it")
    critic_model, critic_backend, critic_temp, critic_tokens = agent_controls("Critic agent", "deepseek-ai/DeepSeek-R1")

    run_btn = gr.Button("Run", variant="primary")

    out_an = gr.Markdown(label="Analyzer output")
    out_rf = gr.Markdown(label="Refactor output")
    out_cr = gr.Markdown(label="Critic output")

    with gr.Accordion("Trace (JSON)", open=False):
        trace_json = gr.Code(language="json")

    run_btn.click(
        fn=run_pipeline,
        inputs=[
            req_text, img,
            analyzer_model, analyzer_backend, analyzer_temp, analyzer_tokens,
            refactor_model, refactor_backend, refactor_temp, refactor_tokens,
            critic_model, critic_backend, critic_temp, critic_tokens,
        ],
        outputs=[out_an, out_rf, out_cr, trace_json],
    )

if __name__ == "__main__":
    demo.launch()