AlsuGibadullina's picture
Update app.py
7f5e68f verified
import gradio as gr
from src.config import ModelSpec, RunConfig
from src.orchestrator import Orchestrator
from src.tasks import TaskContext
from src.utils import pretty_json
DEFAULT_MODELS = [
"openai/gpt-oss-20b", # OpenAI open-weight :contentReference[oaicite:4]{index=4}
"deepseek-ai/DeepSeek-R1", # DeepSeek :contentReference[oaicite:5]{index=5}
"google/gemma-2-2b-it", # Gemma (open) :contentReference[oaicite:6]{index=6}
"google/gemma-3-270m", # Gemma 3 (multimodal family) :contentReference[oaicite:7]{index=7}
]
def run_pipeline(
req_text, img,
analyzer_model, analyzer_backend, analyzer_temp, analyzer_tokens,
refactor_model, refactor_backend, refactor_temp, refactor_tokens,
critic_model, critic_backend, critic_temp, critic_tokens,
):
if not req_text or not req_text.strip():
return "Вставь текст требований.", "", "", "{}"
cfg = RunConfig(
analyzer=ModelSpec(
model_id=analyzer_model,
backend=analyzer_backend,
temperature=analyzer_temp,
max_new_tokens=analyzer_tokens,
system_prompt="Отвечай по-русски. Будь точным, не выдумывай факты.",
),
refactor=ModelSpec(
model_id=refactor_model,
backend=refactor_backend,
temperature=refactor_temp,
max_new_tokens=refactor_tokens,
system_prompt="Отвечай по-русски. Не добавляй требований из головы.",
),
critic=ModelSpec(
model_id=critic_model,
backend=critic_backend,
temperature=critic_temp,
max_new_tokens=critic_tokens,
system_prompt="Отвечай по-русски. Будь строгим ревьюером.",
),
)
ctx = TaskContext(name="requirements_analysis_and_refactor", language="ru")
orch = Orchestrator(cfg, ctx=ctx)
trace = orch.run(req_text, image=img)
d = trace.to_dict()
return (
trace.analyzer.output,
trace.refactor.output,
trace.critic.output,
pretty_json(d),
)
with gr.Blocks(title="Multi-agent Requirements Lab") as demo:
gr.Markdown(
"# Multi-agent Requirements Lab (text + diagram)\n"
"LLM-агенты для анализа/рефакторинга требований + извлечение контекста из фото диаграммы.\n\n"
"**Для работы через HF Inference API добавь `HF_TOKEN` в Settings → Secrets.**"
)
with gr.Row():
req_text = gr.Textbox(label="Текст требований", lines=14, placeholder="Вставь требования…")
with gr.Row():
img = gr.Image(label="Фото/скрин диаграммы (опционально)", type="pil")
def agent_controls(title: str, default_model: str):
with gr.Accordion(title, open=False):
model = gr.Dropdown(DEFAULT_MODELS, value=default_model, label="Model ID")
backend = gr.Radio(["hf_inference_api"], value="hf_inference_api", label="Backend")
temp = gr.Slider(0.0, 1.5, value=0.2, step=0.05, label="Temperature")
tokens = gr.Slider(64, 2000, value=600, step=32, label="Max new tokens")
return model, backend, temp, tokens
analyzer_model, analyzer_backend, analyzer_temp, analyzer_tokens = agent_controls("Analyzer agent", "openai/gpt-oss-20b")
refactor_model, refactor_backend, refactor_temp, refactor_tokens = agent_controls("Refactor agent", "google/gemma-2-2b-it")
critic_model, critic_backend, critic_temp, critic_tokens = agent_controls("Critic agent", "deepseek-ai/DeepSeek-R1")
run_btn = gr.Button("Run", variant="primary")
out_an = gr.Markdown(label="Analyzer output")
out_rf = gr.Markdown(label="Refactor output")
out_cr = gr.Markdown(label="Critic output")
with gr.Accordion("Trace (JSON)", open=False):
trace_json = gr.Code(language="json")
run_btn.click(
fn=run_pipeline,
inputs=[
req_text, img,
analyzer_model, analyzer_backend, analyzer_temp, analyzer_tokens,
refactor_model, refactor_backend, refactor_temp, refactor_tokens,
critic_model, critic_backend, critic_temp, critic_tokens,
],
outputs=[out_an, out_rf, out_cr, trace_json],
)
if __name__ == "__main__":
demo.launch()