AlsuGibadullina commited on
Commit
7f5e68f
·
verified ·
1 Parent(s): 5297a41

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -52
app.py CHANGED
@@ -6,56 +6,51 @@ from src.tasks import TaskContext
6
  from src.utils import pretty_json
7
 
8
 
9
- PROVIDERS = ["openai", "gemini", "deepseek"]
10
-
11
- DEFAULTS = {
12
- "openai": "gpt-5.2",
13
- "gemini": "gemini-3-flash-preview",
14
- "deepseek": "deepseek-chat",
15
- }
16
 
17
 
18
  def run_pipeline(
19
- req_text: str,
20
- image_file, # filepath (str) or None
21
-
22
- analyzer_provider: str, analyzer_model: str, analyzer_temp: float, analyzer_tokens: int,
23
- refactor_provider: str, refactor_model: str, refactor_temp: float, refactor_tokens: int,
24
- critic_provider: str, critic_model: str, critic_temp: float, critic_tokens: int,
25
  ):
26
  if not req_text or not req_text.strip():
27
  return "Вставь текст требований.", "", "", "{}"
28
 
29
- image_path = image_file if isinstance(image_file, str) else None
30
-
31
  cfg = RunConfig(
32
  analyzer=ModelSpec(
33
- provider=analyzer_provider,
34
  model_id=analyzer_model,
 
35
  temperature=analyzer_temp,
36
  max_new_tokens=analyzer_tokens,
37
- system_prompt="Отвечай по-русски. Будь точным. Не выдумывай факты. Если не хватает данных — задавай вопросы.",
38
  ),
39
  refactor=ModelSpec(
40
- provider=refactor_provider,
41
  model_id=refactor_model,
 
42
  temperature=refactor_temp,
43
  max_new_tokens=refactor_tokens,
44
- system_prompt="Отвечай по-русски. Не добавляй требований из головы. Если опираешься на диаграмму — явно укажи, что именно взял.",
45
  ),
46
  critic=ModelSpec(
47
- provider=critic_provider,
48
  model_id=critic_model,
 
49
  temperature=critic_temp,
50
  max_new_tokens=critic_tokens,
51
- system_prompt="Отвечай по-русски. Будь строгим ревьюером. Не прощай двусмысленности и нетестируемые формулировки.",
52
  ),
53
  )
54
 
55
  ctx = TaskContext(name="requirements_analysis_and_refactor", language="ru")
56
  orch = Orchestrator(cfg, ctx=ctx)
57
 
58
- trace = orch.run(req_text, image_path=image_path)
59
  d = trace.to_dict()
60
 
61
  return (
@@ -66,44 +61,36 @@ def run_pipeline(
66
  )
67
 
68
 
69
- with gr.Blocks(title="Multi-agent Requirements Lab (Vision)") as demo:
70
  gr.Markdown(
71
- "# Multi-agent Requirements Lab (Vision)\n"
72
- "Анализ + рефакторинг требований, с поддержкой диаграмм (изображение).\n\n"
73
- "## Secrets для Spaces\n"
74
- "- `OPENAI_API_KEY`\n"
75
- "- `GEMINI_API_KEY`\n"
76
- "- `DEEPSEEK_API_KEY`\n"
77
  )
78
 
79
  with gr.Row():
80
- req_text = gr.Textbox(label="Текст требований", lines=14, placeholder="Вставь требования (сырой текст).")
81
- with gr.Row():
82
- diagram = gr.Image(
83
- label="Фото/диаграмма (опционально)",
84
- type="filepath",
85
- )
86
 
87
- gr.Markdown("## Настройки агентов")
 
88
 
89
- def agent_controls(title: str, default_provider: str):
90
  with gr.Accordion(title, open=False):
91
- provider = gr.Dropdown(PROVIDERS, value=default_provider, label="Provider")
92
- model = gr.Textbox(value=DEFAULTS[default_provider], label="Model ID")
93
  temp = gr.Slider(0.0, 1.5, value=0.2, step=0.05, label="Temperature")
94
- tokens = gr.Slider(64, 4000, value=900, step=32, label="Max output tokens")
95
- return provider, model, temp, tokens
96
 
97
- analyzer_provider, analyzer_model, analyzer_temp, analyzer_tokens = agent_controls("Analyzer agent", "gemini")
98
- refactor_provider, refactor_model, refactor_temp, refactor_tokens = agent_controls("Refactor agent", "openai")
99
- critic_provider, critic_model, critic_temp, critic_tokens = agent_controls("Critic agent", "deepseek")
100
 
101
  run_btn = gr.Button("Run", variant="primary")
102
 
103
- gr.Markdown("## Результаты")
104
- out_an = gr.Markdown()
105
- out_rf = gr.Markdown()
106
- out_cr = gr.Markdown()
107
 
108
  with gr.Accordion("Trace (JSON)", open=False):
109
  trace_json = gr.Code(language="json")
@@ -111,10 +98,10 @@ with gr.Blocks(title="Multi-agent Requirements Lab (Vision)") as demo:
111
  run_btn.click(
112
  fn=run_pipeline,
113
  inputs=[
114
- req_text, diagram,
115
- analyzer_provider, analyzer_model, analyzer_temp, analyzer_tokens,
116
- refactor_provider, refactor_model, refactor_temp, refactor_tokens,
117
- critic_provider, critic_model, critic_temp, critic_tokens,
118
  ],
119
  outputs=[out_an, out_rf, out_cr, trace_json],
120
  )
 
6
  from src.utils import pretty_json
7
 
8
 
9
+ DEFAULT_MODELS = [
10
+ "openai/gpt-oss-20b", # OpenAI open-weight :contentReference[oaicite:4]{index=4}
11
+ "deepseek-ai/DeepSeek-R1", # DeepSeek :contentReference[oaicite:5]{index=5}
12
+ "google/gemma-2-2b-it", # Gemma (open) :contentReference[oaicite:6]{index=6}
13
+ "google/gemma-3-270m", # Gemma 3 (multimodal family) :contentReference[oaicite:7]{index=7}
14
+ ]
 
15
 
16
 
17
  def run_pipeline(
18
+ req_text, img,
19
+ analyzer_model, analyzer_backend, analyzer_temp, analyzer_tokens,
20
+ refactor_model, refactor_backend, refactor_temp, refactor_tokens,
21
+ critic_model, critic_backend, critic_temp, critic_tokens,
 
 
22
  ):
23
  if not req_text or not req_text.strip():
24
  return "Вставь текст требований.", "", "", "{}"
25
 
 
 
26
  cfg = RunConfig(
27
  analyzer=ModelSpec(
 
28
  model_id=analyzer_model,
29
+ backend=analyzer_backend,
30
  temperature=analyzer_temp,
31
  max_new_tokens=analyzer_tokens,
32
+ system_prompt="Отвечай по-русски. Будь точным, не выдумывай факты.",
33
  ),
34
  refactor=ModelSpec(
 
35
  model_id=refactor_model,
36
+ backend=refactor_backend,
37
  temperature=refactor_temp,
38
  max_new_tokens=refactor_tokens,
39
+ system_prompt="Отвечай по-русски. Не добавляй требований из головы.",
40
  ),
41
  critic=ModelSpec(
 
42
  model_id=critic_model,
43
+ backend=critic_backend,
44
  temperature=critic_temp,
45
  max_new_tokens=critic_tokens,
46
+ system_prompt="Отвечай по-русски. Будь строгим ревьюером.",
47
  ),
48
  )
49
 
50
  ctx = TaskContext(name="requirements_analysis_and_refactor", language="ru")
51
  orch = Orchestrator(cfg, ctx=ctx)
52
 
53
+ trace = orch.run(req_text, image=img)
54
  d = trace.to_dict()
55
 
56
  return (
 
61
  )
62
 
63
 
64
+ with gr.Blocks(title="Multi-agent Requirements Lab") as demo:
65
  gr.Markdown(
66
+ "# Multi-agent Requirements Lab (text + diagram)\n"
67
+ "LLM-агенты для анализа/рефакторинга требований + извлечение контекста из фото диаграммы.\n\n"
68
+ "**Для работы через HF Inference API добавь `HF_TOKEN` в Settings → Secrets.**"
 
 
 
69
  )
70
 
71
  with gr.Row():
72
+ req_text = gr.Textbox(label="Текст требований", lines=14, placeholder="Вставь требования")
 
 
 
 
 
73
 
74
+ with gr.Row():
75
+ img = gr.Image(label="Фото/скрин диаграммы (опционально)", type="pil")
76
 
77
+ def agent_controls(title: str, default_model: str):
78
  with gr.Accordion(title, open=False):
79
+ model = gr.Dropdown(DEFAULT_MODELS, value=default_model, label="Model ID")
80
+ backend = gr.Radio(["hf_inference_api"], value="hf_inference_api", label="Backend")
81
  temp = gr.Slider(0.0, 1.5, value=0.2, step=0.05, label="Temperature")
82
+ tokens = gr.Slider(64, 2000, value=600, step=32, label="Max new tokens")
83
+ return model, backend, temp, tokens
84
 
85
+ analyzer_model, analyzer_backend, analyzer_temp, analyzer_tokens = agent_controls("Analyzer agent", "openai/gpt-oss-20b")
86
+ refactor_model, refactor_backend, refactor_temp, refactor_tokens = agent_controls("Refactor agent", "google/gemma-2-2b-it")
87
+ critic_model, critic_backend, critic_temp, critic_tokens = agent_controls("Critic agent", "deepseek-ai/DeepSeek-R1")
88
 
89
  run_btn = gr.Button("Run", variant="primary")
90
 
91
+ out_an = gr.Markdown(label="Analyzer output")
92
+ out_rf = gr.Markdown(label="Refactor output")
93
+ out_cr = gr.Markdown(label="Critic output")
 
94
 
95
  with gr.Accordion("Trace (JSON)", open=False):
96
  trace_json = gr.Code(language="json")
 
98
  run_btn.click(
99
  fn=run_pipeline,
100
  inputs=[
101
+ req_text, img,
102
+ analyzer_model, analyzer_backend, analyzer_temp, analyzer_tokens,
103
+ refactor_model, refactor_backend, refactor_temp, refactor_tokens,
104
+ critic_model, critic_backend, critic_temp, critic_tokens,
105
  ],
106
  outputs=[out_an, out_rf, out_cr, trace_json],
107
  )