lvwerra HF Staff Claude Opus 4.6 commited on
Commit
9bcfe23
·
1 Parent(s): 7ed22a0

Add abort/stop feature and agentui CLI command

Browse files

Adds cooperative abort via threading.Event so users can stop running agents.
Backend abort registry tracks parent-child relationships for recursive abort.
Frontend toggles SEND→STOP during generation, with AbortController on fetch.
Command center STOP aborts all child agents. Aborted agents produce a result
message so parent agents are aware. Also adds `agentui` CLI entry point.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

backend/agent.py CHANGED
@@ -93,7 +93,8 @@ def stream_agent_execution(
93
  model: str,
94
  messages: List[Dict],
95
  serper_key: str,
96
- extra_params: Optional[Dict] = None
 
97
  ):
98
  """
99
  Run the agent tool-calling loop.
@@ -115,6 +116,11 @@ def stream_agent_execution(
115
  has_result = False
116
 
117
  while not done and turns < MAX_TURNS:
 
 
 
 
 
118
  turns += 1
119
 
120
  # --- LLM call with retry ---
@@ -145,7 +151,13 @@ def stream_agent_execution(
145
  "delay": delay,
146
  "message": error_info["message"],
147
  }
148
- time.sleep(delay)
 
 
 
 
 
 
149
  else:
150
  yield {"type": "error", "content": error_info["message"]}
151
  return
@@ -182,6 +194,11 @@ def stream_agent_execution(
182
  # --- Handle tool calls ---
183
  if tool_calls:
184
  for tool_call in tool_calls:
 
 
 
 
 
185
  func_name = tool_call.function.name
186
 
187
  # Parse arguments
 
93
  model: str,
94
  messages: List[Dict],
95
  serper_key: str,
96
+ extra_params: Optional[Dict] = None,
97
+ abort_event=None
98
  ):
99
  """
100
  Run the agent tool-calling loop.
 
116
  has_result = False
117
 
118
  while not done and turns < MAX_TURNS:
119
+ # Check abort before each turn
120
+ if abort_event and abort_event.is_set():
121
+ yield {"type": "aborted"}
122
+ return
123
+
124
  turns += 1
125
 
126
  # --- LLM call with retry ---
 
151
  "delay": delay,
152
  "message": error_info["message"],
153
  }
154
+ if abort_event:
155
+ abort_event.wait(delay)
156
+ if abort_event.is_set():
157
+ yield {"type": "aborted"}
158
+ return
159
+ else:
160
+ time.sleep(delay)
161
  else:
162
  yield {"type": "error", "content": error_info["message"]}
163
  return
 
194
  # --- Handle tool calls ---
195
  if tool_calls:
196
  for tool_call in tool_calls:
197
+ # Check abort between tool calls
198
+ if abort_event and abort_event.is_set():
199
+ yield {"type": "aborted"}
200
+ return
201
+
202
  func_name = tool_call.function.name
203
 
204
  # Parse arguments
backend/agents.py CHANGED
@@ -79,7 +79,11 @@ AGENT_REGISTRY = {
79
  "- Do NOT ask agents to save or create files unless the user explicitly requests it.\n"
80
  "- NEVER overwrite existing files without explicit user permission.\n"
81
  "- Each agent has a task_id. Reuse the task_id when a new message relates to an existing agent "
82
- "(this preserves context and the Jupyter kernel for code agents)."
 
 
 
 
83
  ),
84
  "tool": None,
85
  "tool_arg": None,
 
79
  "- Do NOT ask agents to save or create files unless the user explicitly requests it.\n"
80
  "- NEVER overwrite existing files without explicit user permission.\n"
81
  "- Each agent has a task_id. Reuse the task_id when a new message relates to an existing agent "
82
+ "(this preserves context and the Jupyter kernel for code agents).\n\n"
83
+ "## Handling Aborted Agents\n\n"
84
+ "If an agent's result is 'Generation aborted by user.', the user deliberately stopped it. "
85
+ "Do NOT automatically re-launch the same task. Instead, briefly acknowledge the abort and "
86
+ "ask the user what went wrong or how they'd like to proceed."
87
  ),
88
  "tool": None,
89
  "tool_arg": None,
backend/code.py CHANGED
@@ -204,7 +204,7 @@ def download_files_from_sandbox(sbx: Sandbox, files: List[Dict], files_root: str
204
  return "\n".join(results)
205
 
206
 
207
- def stream_code_execution(client, model: str, messages: List[Dict], sbx: Sandbox, files_root: str = None, extra_params: Optional[Dict] = None):
208
  """
209
  Stream code execution results
210
 
@@ -226,6 +226,11 @@ def stream_code_execution(client, model: str, messages: List[Dict], sbx: Sandbox
226
  has_result = False
227
 
228
  while not done and turns < MAX_TURNS:
 
 
 
 
 
229
  turns += 1
230
 
231
  # Retry loop for LLM calls
@@ -261,7 +266,13 @@ def stream_code_execution(client, model: str, messages: List[Dict], sbx: Sandbox
261
  "error_type": error_info["type"]
262
  }
263
  import time
264
- time.sleep(delay)
 
 
 
 
 
 
265
  else:
266
  # Final attempt failed or non-retryable error
267
  yield {"type": "error", "content": error_info["message"]}
@@ -298,6 +309,11 @@ def stream_code_execution(client, model: str, messages: List[Dict], sbx: Sandbox
298
  # Handle tool calls
299
  if tool_calls:
300
  for tool_call in tool_calls:
 
 
 
 
 
301
  if tool_call.function.name == "execute_code":
302
  # Parse arguments
303
  try:
 
204
  return "\n".join(results)
205
 
206
 
207
+ def stream_code_execution(client, model: str, messages: List[Dict], sbx: Sandbox, files_root: str = None, extra_params: Optional[Dict] = None, abort_event=None):
208
  """
209
  Stream code execution results
210
 
 
226
  has_result = False
227
 
228
  while not done and turns < MAX_TURNS:
229
+ # Check abort before each turn
230
+ if abort_event and abort_event.is_set():
231
+ yield {"type": "aborted"}
232
+ return
233
+
234
  turns += 1
235
 
236
  # Retry loop for LLM calls
 
266
  "error_type": error_info["type"]
267
  }
268
  import time
269
+ if abort_event:
270
+ abort_event.wait(delay)
271
+ if abort_event.is_set():
272
+ yield {"type": "aborted"}
273
+ return
274
+ else:
275
+ time.sleep(delay)
276
  else:
277
  # Final attempt failed or non-retryable error
278
  yield {"type": "error", "content": error_info["message"]}
 
309
  # Handle tool calls
310
  if tool_calls:
311
  for tool_call in tool_calls:
312
+ # Check abort between tool calls
313
+ if abort_event and abort_event.is_set():
314
+ yield {"type": "aborted"}
315
+ return
316
+
317
  if tool_call.function.name == "execute_code":
318
  # Parse arguments
319
  try:
backend/command.py CHANGED
@@ -56,7 +56,7 @@ def parse_llm_error(error: Exception) -> dict:
56
  }
57
 
58
 
59
- def stream_command_center(client, model: str, messages: List[Dict], extra_params: dict = None):
60
  """
61
  Stream command center responses with agent launching capabilities
62
 
@@ -67,6 +67,11 @@ def stream_command_center(client, model: str, messages: List[Dict], extra_params
67
  done = False
68
 
69
  while not done and turns < MAX_TURNS:
 
 
 
 
 
70
  turns += 1
71
 
72
  # Retry loop for LLM calls
@@ -105,7 +110,13 @@ def stream_command_center(client, model: str, messages: List[Dict], extra_params
105
  "error_type": error_info["type"]
106
  }
107
  import time
108
- time.sleep(delay)
 
 
 
 
 
 
109
  else:
110
  # Final attempt failed or non-retryable error
111
  yield {"type": "error", "content": error_info["message"]}
@@ -127,6 +138,11 @@ def stream_command_center(client, model: str, messages: List[Dict], extra_params
127
  # Handle tool calls (agent launches)
128
  if tool_calls:
129
  for tool_call in tool_calls:
 
 
 
 
 
130
  function_name = tool_call.function.name
131
 
132
  # Parse arguments
 
56
  }
57
 
58
 
59
+ def stream_command_center(client, model: str, messages: List[Dict], extra_params: dict = None, abort_event=None):
60
  """
61
  Stream command center responses with agent launching capabilities
62
 
 
67
  done = False
68
 
69
  while not done and turns < MAX_TURNS:
70
+ # Check abort before each turn
71
+ if abort_event and abort_event.is_set():
72
+ yield {"type": "aborted"}
73
+ return
74
+
75
  turns += 1
76
 
77
  # Retry loop for LLM calls
 
110
  "error_type": error_info["type"]
111
  }
112
  import time
113
+ if abort_event:
114
+ abort_event.wait(delay)
115
+ if abort_event.is_set():
116
+ yield {"type": "aborted"}
117
+ return
118
+ else:
119
+ time.sleep(delay)
120
  else:
121
  # Final attempt failed or non-retryable error
122
  yield {"type": "error", "content": error_info["message"]}
 
138
  # Handle tool calls (agent launches)
139
  if tool_calls:
140
  for tool_call in tool_calls:
141
+ # Check abort between tool calls
142
+ if abort_event and abort_event.is_set():
143
+ yield {"type": "aborted"}
144
+ return
145
+
146
  function_name = tool_call.function.name
147
 
148
  # Parse arguments
backend/image.py CHANGED
@@ -194,7 +194,8 @@ def stream_image_execution(
194
  hf_token: str,
195
  image_gen_model: Optional[str] = None,
196
  image_edit_model: Optional[str] = None,
197
- extra_params: Optional[Dict] = None
 
198
  ):
199
  """
200
  Run the image agent tool-calling loop.
@@ -218,6 +219,11 @@ def stream_image_execution(
218
  result_sent = False
219
 
220
  while not done and turns < MAX_TURNS:
 
 
 
 
 
221
  turns += 1
222
 
223
  # --- LLM call with retry ---
@@ -248,7 +254,13 @@ def stream_image_execution(
248
  "delay": delay,
249
  "message": error_info["message"],
250
  }
251
- time.sleep(delay)
 
 
 
 
 
 
252
  else:
253
  yield {"type": "error", "content": error_info["message"]}
254
  return
@@ -286,6 +298,11 @@ def stream_image_execution(
286
  # --- Handle tool calls ---
287
  if tool_calls:
288
  for tool_call in tool_calls:
 
 
 
 
 
289
  func_name = tool_call.function.name
290
 
291
  # Parse arguments
 
194
  hf_token: str,
195
  image_gen_model: Optional[str] = None,
196
  image_edit_model: Optional[str] = None,
197
+ extra_params: Optional[Dict] = None,
198
+ abort_event=None
199
  ):
200
  """
201
  Run the image agent tool-calling loop.
 
219
  result_sent = False
220
 
221
  while not done and turns < MAX_TURNS:
222
+ # Check abort before each turn
223
+ if abort_event and abort_event.is_set():
224
+ yield {"type": "aborted"}
225
+ return
226
+
227
  turns += 1
228
 
229
  # --- LLM call with retry ---
 
254
  "delay": delay,
255
  "message": error_info["message"],
256
  }
257
+ if abort_event:
258
+ abort_event.wait(delay)
259
+ if abort_event.is_set():
260
+ yield {"type": "aborted"}
261
+ return
262
+ else:
263
+ time.sleep(delay)
264
  else:
265
  yield {"type": "error", "content": error_info["message"]}
266
  return
 
298
  # --- Handle tool calls ---
299
  if tool_calls:
300
  for tool_call in tool_calls:
301
+ # Check abort between tool calls
302
+ if abort_event and abort_event.is_set():
303
+ yield {"type": "aborted"}
304
+ return
305
+
306
  func_name = tool_call.function.name
307
 
308
  # Parse arguments
backend/main.py CHANGED
@@ -11,6 +11,7 @@ import logging
11
  import asyncio
12
  import signal
13
  import sys
 
14
  from datetime import datetime
15
  from concurrent.futures import ThreadPoolExecutor
16
  from .agents import AGENT_REGISTRY, get_default_counters, get_registry_for_frontend
@@ -34,6 +35,39 @@ def signal_handler(signum, frame):
34
  signal.signal(signal.SIGINT, signal_handler)
35
  signal.signal(signal.SIGTERM, signal_handler)
36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  # Configure logging to match uvicorn's format
38
  logging.basicConfig(
39
  level=logging.INFO,
@@ -213,9 +247,14 @@ class ChatRequest(BaseModel):
213
  research_parallel_workers: Optional[int] = None # Number of parallel workers for research
214
  research_max_websites: Optional[int] = None # Max websites to analyze per research session
215
  agent_id: Optional[str] = None # Unique agent/tab ID for session management
 
216
  frontend_context: Optional[FrontendContext] = None # Dynamic context from frontend
217
 
218
 
 
 
 
 
219
  class TitleRequest(BaseModel):
220
  query: str
221
  endpoint: str # User's configured LLM endpoint
@@ -240,11 +279,19 @@ async def stream_code_agent(
240
  e2b_key: str,
241
  session_id: str,
242
  tab_id: str = "default",
 
243
  frontend_context: Optional[Dict] = None,
244
  extra_params: Optional[Dict] = None
245
  ):
246
  """Handle code agent with execution capabilities"""
 
 
 
 
 
 
247
 
 
248
  if not E2B_AVAILABLE:
249
  yield f"data: {json.dumps({'type': 'error', 'content': 'E2B not available. Install with: pip install e2b-code-interpreter'})}\n\n"
250
  return
@@ -279,7 +326,7 @@ async def stream_code_agent(
279
 
280
  def run_sync_generator():
281
  try:
282
- for update in stream_code_execution(client, model, full_messages, sbx, files_root=FILES_ROOT, extra_params=extra_params):
283
  loop.call_soon_threadsafe(queue.put_nowait, update)
284
  finally:
285
  loop.call_soon_threadsafe(queue.put_nowait, None) # Signal completion
@@ -329,7 +376,7 @@ async def stream_code_agent(
329
 
330
  def run_retry_generator():
331
  try:
332
- for update in stream_code_execution(client, model, full_messages, sbx, files_root=FILES_ROOT, extra_params=extra_params):
333
  loop.call_soon_threadsafe(retry_queue.put_nowait, update)
334
  finally:
335
  loop.call_soon_threadsafe(retry_queue.put_nowait, None)
@@ -360,13 +407,21 @@ async def stream_research_agent(
360
  parallel_workers: Optional[int] = None,
361
  max_websites: Optional[int] = None,
362
  tab_id: str = "default",
 
363
  sub_agent_endpoint: Optional[str] = None,
364
  sub_agent_token: Optional[str] = None,
365
  extra_params: Optional[Dict] = None,
366
  sub_agent_extra_params: Optional[Dict] = None
367
  ):
368
  """Handle research agent with web search"""
 
 
 
 
 
 
369
 
 
370
  if not RESEARCH_AVAILABLE:
371
  yield f"data: {json.dumps({'type': 'error', 'content': 'Research dependencies not available. Install with: pip install trafilatura requests'})}\n\n"
372
  return
@@ -413,7 +468,7 @@ async def stream_research_agent(
413
 
414
  def run_sync_generator():
415
  try:
416
- for update in stream_research(client, model, question, serper_key, max_websites=max_sites, system_prompt=system_prompt, sub_agent_model=analysis_model, parallel_workers=workers, sub_agent_client=sub_agent_client, extra_params=extra_params, sub_agent_extra_params=sub_agent_extra_params):
417
  loop.call_soon_threadsafe(queue.put_nowait, update)
418
  finally:
419
  loop.call_soon_threadsafe(queue.put_nowait, None) # Signal completion
@@ -447,7 +502,14 @@ async def stream_command_center_handler(
447
  extra_params: Optional[Dict] = None
448
  ):
449
  """Handle command center with tool-based agent launching"""
 
 
 
 
 
 
450
 
 
451
  if not COMMAND_AVAILABLE:
452
  # Fallback to regular chat if command tools not available
453
  async for chunk in stream_chat_response(messages, endpoint, token, model, "command", tab_id, extra_params):
@@ -474,7 +536,7 @@ async def stream_command_center_handler(
474
 
475
  def run_sync_generator():
476
  try:
477
- for update in stream_command_center(client, model, full_messages, extra_params=extra_params):
478
  loop.call_soon_threadsafe(queue.put_nowait, update)
479
  finally:
480
  loop.call_soon_threadsafe(queue.put_nowait, None) # Signal completion
@@ -506,10 +568,18 @@ async def stream_web_agent(
506
  model: str,
507
  serper_key: str,
508
  tab_id: str = "default",
 
509
  extra_params: Optional[Dict] = None
510
  ):
511
  """Handle web agent with tools (search, read, screenshot)"""
 
 
 
 
 
 
512
 
 
513
  if not AGENT_AVAILABLE:
514
  async for chunk in stream_chat_response(messages, endpoint, token, model, "agent", tab_id, extra_params):
515
  yield chunk
@@ -528,7 +598,7 @@ async def stream_web_agent(
528
 
529
  def run_sync_generator():
530
  try:
531
- for update in stream_agent_execution(client, model, full_messages, serper_key, extra_params=extra_params):
532
  loop.call_soon_threadsafe(queue.put_nowait, update)
533
  finally:
534
  loop.call_soon_threadsafe(queue.put_nowait, None)
@@ -559,10 +629,18 @@ async def stream_image_agent(
559
  image_gen_model: Optional[str] = None,
560
  image_edit_model: Optional[str] = None,
561
  tab_id: str = "default",
 
562
  extra_params: Optional[Dict] = None
563
  ):
564
  """Handle image agent with HuggingFace image generation tools"""
 
 
 
 
 
 
565
 
 
566
  if not IMAGE_AVAILABLE:
567
  yield f"data: {json.dumps({'type': 'error', 'content': 'Image agent not available. Install with: pip install huggingface_hub Pillow'})}\n\n"
568
  return
@@ -584,7 +662,7 @@ async def stream_image_agent(
584
 
585
  def run_sync_generator():
586
  try:
587
- for update in stream_image_execution(client, model, full_messages, hf_token, image_gen_model=image_gen_model, image_edit_model=image_edit_model, extra_params=extra_params):
588
  loop.call_soon_threadsafe(queue.put_nowait, update)
589
  finally:
590
  loop.call_soon_threadsafe(queue.put_nowait, None)
@@ -775,6 +853,14 @@ async def generate_title(request: TitleRequest):
775
  raise HTTPException(status_code=500, detail=str(e))
776
 
777
 
 
 
 
 
 
 
 
 
778
  @app.post("/api/chat/stream")
779
  async def chat_stream(request: ChatRequest):
780
  """Proxy streaming chat to user's configured LLM endpoint"""
@@ -819,6 +905,7 @@ async def chat_stream(request: ChatRequest):
819
  e2b_key or "",
820
  session_id,
821
  tab_id,
 
822
  frontend_context,
823
  request.extra_params
824
  ),
@@ -847,6 +934,7 @@ async def chat_stream(request: ChatRequest):
847
  request.research_parallel_workers,
848
  None,
849
  tab_id,
 
850
  sub_agent_endpoint,
851
  sub_agent_token,
852
  request.extra_params,
@@ -872,6 +960,7 @@ async def chat_stream(request: ChatRequest):
872
  request.image_gen_model,
873
  request.image_edit_model,
874
  tab_id,
 
875
  request.extra_params
876
  ),
877
  media_type="text/event-stream",
@@ -892,6 +981,7 @@ async def chat_stream(request: ChatRequest):
892
  request.model or "gpt-4",
893
  serper_key or "",
894
  tab_id,
 
895
  request.extra_params
896
  ),
897
  media_type="text/event-stream",
 
11
  import asyncio
12
  import signal
13
  import sys
14
+ import threading
15
  from datetime import datetime
16
  from concurrent.futures import ThreadPoolExecutor
17
  from .agents import AGENT_REGISTRY, get_default_counters, get_registry_for_frontend
 
35
  signal.signal(signal.SIGINT, signal_handler)
36
  signal.signal(signal.SIGTERM, signal_handler)
37
 
38
+ # ============================================
39
+ # Abort Registry
40
+ # ============================================
41
+ # Per-agent abort flags for cooperative cancellation
42
+ _abort_flags: Dict[str, threading.Event] = {}
43
+ _agent_children: Dict[str, set] = {} # parent_id -> set of child_ids
44
+
45
+ def register_agent(agent_id: str, parent_id: str = None) -> threading.Event:
46
+ """Register an agent and return its abort event."""
47
+ event = threading.Event()
48
+ _abort_flags[agent_id] = event
49
+ if parent_id:
50
+ _agent_children.setdefault(parent_id, set()).add(agent_id)
51
+ return event
52
+
53
+ def unregister_agent(agent_id: str):
54
+ """Clean up abort flag and parent-child mappings."""
55
+ _abort_flags.pop(agent_id, None)
56
+ # Remove from parent's children set
57
+ for children in _agent_children.values():
58
+ children.discard(agent_id)
59
+ _agent_children.pop(agent_id, None)
60
+
61
+ def abort_agent_tree(agent_id: str) -> list:
62
+ """Recursively abort an agent and all its children. Returns list of aborted IDs."""
63
+ aborted = []
64
+ if agent_id in _abort_flags:
65
+ _abort_flags[agent_id].set()
66
+ aborted.append(agent_id)
67
+ for child_id in list(_agent_children.get(agent_id, [])):
68
+ aborted.extend(abort_agent_tree(child_id))
69
+ return aborted
70
+
71
  # Configure logging to match uvicorn's format
72
  logging.basicConfig(
73
  level=logging.INFO,
 
247
  research_parallel_workers: Optional[int] = None # Number of parallel workers for research
248
  research_max_websites: Optional[int] = None # Max websites to analyze per research session
249
  agent_id: Optional[str] = None # Unique agent/tab ID for session management
250
+ parent_agent_id: Optional[str] = None # Parent agent ID for abort propagation
251
  frontend_context: Optional[FrontendContext] = None # Dynamic context from frontend
252
 
253
 
254
+ class AbortRequest(BaseModel):
255
+ agent_id: str
256
+
257
+
258
  class TitleRequest(BaseModel):
259
  query: str
260
  endpoint: str # User's configured LLM endpoint
 
279
  e2b_key: str,
280
  session_id: str,
281
  tab_id: str = "default",
282
+ parent_agent_id: Optional[str] = None,
283
  frontend_context: Optional[Dict] = None,
284
  extra_params: Optional[Dict] = None
285
  ):
286
  """Handle code agent with execution capabilities"""
287
+ abort_event = register_agent(tab_id, parent_agent_id)
288
+ try:
289
+ async for chunk in _stream_code_agent_inner(messages, endpoint, token, model, e2b_key, session_id, tab_id, frontend_context, extra_params, abort_event):
290
+ yield chunk
291
+ finally:
292
+ unregister_agent(tab_id)
293
 
294
+ async def _stream_code_agent_inner(messages, endpoint, token, model, e2b_key, session_id, tab_id, frontend_context, extra_params, abort_event):
295
  if not E2B_AVAILABLE:
296
  yield f"data: {json.dumps({'type': 'error', 'content': 'E2B not available. Install with: pip install e2b-code-interpreter'})}\n\n"
297
  return
 
326
 
327
  def run_sync_generator():
328
  try:
329
+ for update in stream_code_execution(client, model, full_messages, sbx, files_root=FILES_ROOT, extra_params=extra_params, abort_event=abort_event):
330
  loop.call_soon_threadsafe(queue.put_nowait, update)
331
  finally:
332
  loop.call_soon_threadsafe(queue.put_nowait, None) # Signal completion
 
376
 
377
  def run_retry_generator():
378
  try:
379
+ for update in stream_code_execution(client, model, full_messages, sbx, files_root=FILES_ROOT, extra_params=extra_params, abort_event=abort_event):
380
  loop.call_soon_threadsafe(retry_queue.put_nowait, update)
381
  finally:
382
  loop.call_soon_threadsafe(retry_queue.put_nowait, None)
 
407
  parallel_workers: Optional[int] = None,
408
  max_websites: Optional[int] = None,
409
  tab_id: str = "default",
410
+ parent_agent_id: Optional[str] = None,
411
  sub_agent_endpoint: Optional[str] = None,
412
  sub_agent_token: Optional[str] = None,
413
  extra_params: Optional[Dict] = None,
414
  sub_agent_extra_params: Optional[Dict] = None
415
  ):
416
  """Handle research agent with web search"""
417
+ abort_event = register_agent(tab_id, parent_agent_id)
418
+ try:
419
+ async for chunk in _stream_research_agent_inner(messages, endpoint, token, model, serper_key, sub_agent_model, parallel_workers, max_websites, tab_id, sub_agent_endpoint, sub_agent_token, extra_params, sub_agent_extra_params, abort_event):
420
+ yield chunk
421
+ finally:
422
+ unregister_agent(tab_id)
423
 
424
+ async def _stream_research_agent_inner(messages, endpoint, token, model, serper_key, sub_agent_model, parallel_workers, max_websites, tab_id, sub_agent_endpoint, sub_agent_token, extra_params, sub_agent_extra_params, abort_event):
425
  if not RESEARCH_AVAILABLE:
426
  yield f"data: {json.dumps({'type': 'error', 'content': 'Research dependencies not available. Install with: pip install trafilatura requests'})}\n\n"
427
  return
 
468
 
469
  def run_sync_generator():
470
  try:
471
+ for update in stream_research(client, model, question, serper_key, max_websites=max_sites, system_prompt=system_prompt, sub_agent_model=analysis_model, parallel_workers=workers, sub_agent_client=sub_agent_client, extra_params=extra_params, sub_agent_extra_params=sub_agent_extra_params, abort_event=abort_event):
472
  loop.call_soon_threadsafe(queue.put_nowait, update)
473
  finally:
474
  loop.call_soon_threadsafe(queue.put_nowait, None) # Signal completion
 
502
  extra_params: Optional[Dict] = None
503
  ):
504
  """Handle command center with tool-based agent launching"""
505
+ abort_event = register_agent(tab_id)
506
+ try:
507
+ async for chunk in _stream_command_center_inner(messages, endpoint, token, model, tab_id, extra_params, abort_event):
508
+ yield chunk
509
+ finally:
510
+ unregister_agent(tab_id)
511
 
512
+ async def _stream_command_center_inner(messages, endpoint, token, model, tab_id, extra_params, abort_event):
513
  if not COMMAND_AVAILABLE:
514
  # Fallback to regular chat if command tools not available
515
  async for chunk in stream_chat_response(messages, endpoint, token, model, "command", tab_id, extra_params):
 
536
 
537
  def run_sync_generator():
538
  try:
539
+ for update in stream_command_center(client, model, full_messages, extra_params=extra_params, abort_event=abort_event):
540
  loop.call_soon_threadsafe(queue.put_nowait, update)
541
  finally:
542
  loop.call_soon_threadsafe(queue.put_nowait, None) # Signal completion
 
568
  model: str,
569
  serper_key: str,
570
  tab_id: str = "default",
571
+ parent_agent_id: Optional[str] = None,
572
  extra_params: Optional[Dict] = None
573
  ):
574
  """Handle web agent with tools (search, read, screenshot)"""
575
+ abort_event = register_agent(tab_id, parent_agent_id)
576
+ try:
577
+ async for chunk in _stream_web_agent_inner(messages, endpoint, token, model, serper_key, tab_id, extra_params, abort_event):
578
+ yield chunk
579
+ finally:
580
+ unregister_agent(tab_id)
581
 
582
+ async def _stream_web_agent_inner(messages, endpoint, token, model, serper_key, tab_id, extra_params, abort_event):
583
  if not AGENT_AVAILABLE:
584
  async for chunk in stream_chat_response(messages, endpoint, token, model, "agent", tab_id, extra_params):
585
  yield chunk
 
598
 
599
  def run_sync_generator():
600
  try:
601
+ for update in stream_agent_execution(client, model, full_messages, serper_key, extra_params=extra_params, abort_event=abort_event):
602
  loop.call_soon_threadsafe(queue.put_nowait, update)
603
  finally:
604
  loop.call_soon_threadsafe(queue.put_nowait, None)
 
629
  image_gen_model: Optional[str] = None,
630
  image_edit_model: Optional[str] = None,
631
  tab_id: str = "default",
632
+ parent_agent_id: Optional[str] = None,
633
  extra_params: Optional[Dict] = None
634
  ):
635
  """Handle image agent with HuggingFace image generation tools"""
636
+ abort_event = register_agent(tab_id, parent_agent_id)
637
+ try:
638
+ async for chunk in _stream_image_agent_inner(messages, endpoint, token, model, hf_token, image_gen_model, image_edit_model, tab_id, extra_params, abort_event):
639
+ yield chunk
640
+ finally:
641
+ unregister_agent(tab_id)
642
 
643
+ async def _stream_image_agent_inner(messages, endpoint, token, model, hf_token, image_gen_model, image_edit_model, tab_id, extra_params, abort_event):
644
  if not IMAGE_AVAILABLE:
645
  yield f"data: {json.dumps({'type': 'error', 'content': 'Image agent not available. Install with: pip install huggingface_hub Pillow'})}\n\n"
646
  return
 
662
 
663
  def run_sync_generator():
664
  try:
665
+ for update in stream_image_execution(client, model, full_messages, hf_token, image_gen_model=image_gen_model, image_edit_model=image_edit_model, extra_params=extra_params, abort_event=abort_event):
666
  loop.call_soon_threadsafe(queue.put_nowait, update)
667
  finally:
668
  loop.call_soon_threadsafe(queue.put_nowait, None)
 
853
  raise HTTPException(status_code=500, detail=str(e))
854
 
855
 
856
+ @app.post("/api/abort")
857
+ async def abort_agent(request: AbortRequest):
858
+ """Abort a running agent and all its children."""
859
+ aborted = abort_agent_tree(request.agent_id)
860
+ logger.info(f"Aborted agents: {aborted}")
861
+ return {"aborted": aborted}
862
+
863
+
864
  @app.post("/api/chat/stream")
865
  async def chat_stream(request: ChatRequest):
866
  """Proxy streaming chat to user's configured LLM endpoint"""
 
905
  e2b_key or "",
906
  session_id,
907
  tab_id,
908
+ request.parent_agent_id,
909
  frontend_context,
910
  request.extra_params
911
  ),
 
934
  request.research_parallel_workers,
935
  None,
936
  tab_id,
937
+ request.parent_agent_id,
938
  sub_agent_endpoint,
939
  sub_agent_token,
940
  request.extra_params,
 
960
  request.image_gen_model,
961
  request.image_edit_model,
962
  tab_id,
963
+ request.parent_agent_id,
964
  request.extra_params
965
  ),
966
  media_type="text/event-stream",
 
981
  request.model or "gpt-4",
982
  serper_key or "",
983
  tab_id,
984
+ request.parent_agent_id,
985
  request.extra_params
986
  ),
987
  media_type="text/event-stream",
backend/research.py CHANGED
@@ -233,6 +233,7 @@ def stream_research(
233
  sub_agent_model: Optional[str] = None,
234
  parallel_workers: int = 8,
235
  max_tool_calls: int = 20,
 
236
  **kwargs
237
  ):
238
  """
@@ -264,6 +265,12 @@ def stream_research(
264
  iterations_without_tool_calls = 0
265
 
266
  while tool_call_count < max_tool_calls:
 
 
 
 
 
 
267
  iteration += 1
268
 
269
  # Call DR-TULU
@@ -348,6 +355,12 @@ def stream_research(
348
  search_idx = 0 # Track which search query we're on within this batch
349
 
350
  for i, tc in enumerate(tool_calls):
 
 
 
 
 
 
351
  tool_call_count += 1
352
 
353
  if tc["name"] == "google_search":
 
233
  sub_agent_model: Optional[str] = None,
234
  parallel_workers: int = 8,
235
  max_tool_calls: int = 20,
236
+ abort_event=None,
237
  **kwargs
238
  ):
239
  """
 
265
  iterations_without_tool_calls = 0
266
 
267
  while tool_call_count < max_tool_calls:
268
+ # Check abort before each iteration
269
+ if abort_event and abort_event.is_set():
270
+ yield {"type": "aborted"}
271
+ yield {"type": "done"}
272
+ return
273
+
274
  iteration += 1
275
 
276
  # Call DR-TULU
 
355
  search_idx = 0 # Track which search query we're on within this batch
356
 
357
  for i, tc in enumerate(tool_calls):
358
+ # Check abort between tool executions
359
+ if abort_event and abort_event.is_set():
360
+ yield {"type": "aborted"}
361
+ yield {"type": "done"}
362
+ return
363
+
364
  tool_call_count += 1
365
 
366
  if tc["name"] == "google_search":
frontend/index.html CHANGED
@@ -7,7 +7,7 @@
7
  <link href="https://fonts.googleapis.com/css2?family=JetBrains+Mono:wght@300;400;500;700&display=swap" rel="stylesheet">
8
  <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/prism/1.29.0/themes/prism.min.css">
9
  <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.16.9/dist/katex.min.css">
10
- <link rel="stylesheet" href="style.css?v=89">
11
  </head>
12
  <body>
13
  <div class="app-container">
@@ -479,6 +479,6 @@
479
  <script src="https://cdn.jsdelivr.net/npm/marked/marked.min.js"></script>
480
  <script src="https://cdn.jsdelivr.net/npm/katex@0.16.9/dist/katex.min.js"></script>
481
  <script src="research-ui.js?v=23"></script>
482
- <script src="script.js?v=75"></script>
483
  </body>
484
  </html>
 
7
  <link href="https://fonts.googleapis.com/css2?family=JetBrains+Mono:wght@300;400;500;700&display=swap" rel="stylesheet">
8
  <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/prism/1.29.0/themes/prism.min.css">
9
  <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.16.9/dist/katex.min.css">
10
+ <link rel="stylesheet" href="style.css?v=90">
11
  </head>
12
  <body>
13
  <div class="app-container">
 
479
  <script src="https://cdn.jsdelivr.net/npm/marked/marked.min.js"></script>
480
  <script src="https://cdn.jsdelivr.net/npm/katex@0.16.9/dist/katex.min.js"></script>
481
  <script src="research-ui.js?v=23"></script>
482
+ <script src="script.js?v=79"></script>
483
  </body>
484
  </html>
frontend/script.js CHANGED
@@ -92,6 +92,9 @@ let agentCounters = getDefaultCounters();
92
  // Debounce timer for workspace saving
93
  let saveWorkspaceTimer = null;
94
 
 
 
 
95
  // Timeline data structure for sidebar
96
  // Maps tabId -> { type, title, events: [{type: 'user'|'assistant'|'agent', content, childTabId?}], parentTabId?, isGenerating }
97
  const timelineData = {
@@ -482,7 +485,7 @@ function renderAgentTimeline(tabId, notebook, isNested = false) {
482
  ${childIsGenerating ? `
483
  <div class="agent-progress"><span></span><span></span><span></span></div>
484
  ` : `
485
- <div class="agent-done"></div>
486
  `}
487
  </div>
488
  </div>
@@ -1516,7 +1519,62 @@ function scrollToTimelineEvent(tabId, eventIndex) {
1516
  });
1517
  }
1518
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1519
  async function sendMessage(tabId) {
 
 
 
 
 
 
 
1520
  const content = document.querySelector(`[data-content-id="${tabId}"]`);
1521
  if (!content) return;
1522
 
@@ -1587,6 +1645,9 @@ async function sendMessage(tabId) {
1587
  );
1588
  if (anyChildGenerating || pendingAgentLaunches > 0) {
1589
  commandInputBlocked = true;
 
 
 
1590
  } else {
1591
  input.disabled = false;
1592
  input.focus();
@@ -1600,6 +1661,21 @@ async function sendMessage(tabId) {
1600
  saveWorkspaceDebounced();
1601
  }
1602
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1603
  async function continueCommandCenter() {
1604
  // Called when all launched agents finish — re-run command center with actual results in history
1605
  const chatContainer = document.getElementById('messages-command');
@@ -1619,6 +1695,8 @@ async function continueCommandCenter() {
1619
  );
1620
  if (anyChildGenerating || pendingAgentLaunches > 0) {
1621
  commandInputBlocked = true;
 
 
1622
  } else if (commandInput) {
1623
  commandInput.disabled = false;
1624
  commandInput.focus();
@@ -1802,10 +1880,22 @@ async function streamChatResponse(messages, chatContainer, agentType, tabId) {
1802
  ? currentSettings.models?.[currentSettings.imageEditModel]?.modelId || null
1803
  : null;
1804
 
 
 
 
 
 
 
1805
  try {
 
 
 
 
 
1806
  const response = await fetch(`${backendEndpoint}/chat/stream`, {
1807
  method: 'POST',
1808
  headers: { 'Content-Type': 'application/json' },
 
1809
  body: JSON.stringify({
1810
  messages: messages,
1811
  agent_type: agentType,
@@ -1826,6 +1916,7 @@ async function streamChatResponse(messages, chatContainer, agentType, tabId) {
1826
  research_parallel_workers: currentSettings.researchParallelWorkers || null,
1827
  research_max_websites: currentSettings.researchMaxWebsites || null,
1828
  agent_id: tabId.toString(), // Send unique tab ID for sandbox sessions
 
1829
  frontend_context: getFrontendContext() // Dynamic context for system prompts
1830
  })
1831
  });
@@ -2225,6 +2316,29 @@ async function streamChatResponse(messages, chatContainer, agentType, tabId) {
2225
  // Reset current message element so any subsequent thinking starts fresh
2226
  currentMessageEl = null;
2227
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2228
  } else if (data.type === 'done') {
2229
  // Remove retry indicator on success
2230
  removeRetryIndicator(chatContainer);
@@ -2299,13 +2413,45 @@ async function streamChatResponse(messages, chatContainer, agentType, tabId) {
2299
  }
2300
  } catch (error) {
2301
  hideProgressWidget(chatContainer);
2302
- const errorDiv = document.createElement('div');
2303
- errorDiv.className = 'message assistant';
2304
- errorDiv.innerHTML = `<div class="message-content" style="color: #c62828;">Connection error: ${escapeHtml(error.message)}</div>`;
2305
- chatContainer.appendChild(errorDiv);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2306
  if (tabId) {
2307
  setTabGenerating(tabId, false);
2308
  }
 
 
 
2309
  }
2310
  }
2311
 
@@ -2697,6 +2843,22 @@ function setTabGenerating(tabId, isGenerating) {
2697
  }, 300);
2698
  }
2699
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2700
  // Update timeline to reflect generating state
2701
  setTimelineGenerating(tabId, isGenerating);
2702
 
@@ -2712,6 +2874,7 @@ function setTabGenerating(tabId, isGenerating) {
2712
  );
2713
  if (!anyStillGenerating && pendingAgentLaunches === 0) {
2714
  commandInputBlocked = false;
 
2715
  // Auto-continue: call command center again with agent results now in history
2716
  continueCommandCenter();
2717
  }
 
92
  // Debounce timer for workspace saving
93
  let saveWorkspaceTimer = null;
94
 
95
+ // Abort controllers for in-flight fetch requests (tabId -> AbortController)
96
+ const activeAbortControllers = {};
97
+
98
  // Timeline data structure for sidebar
99
  // Maps tabId -> { type, title, events: [{type: 'user'|'assistant'|'agent', content, childTabId?}], parentTabId?, isGenerating }
100
  const timelineData = {
 
485
  ${childIsGenerating ? `
486
  <div class="agent-progress"><span></span><span></span><span></span></div>
487
  ` : `
488
+ <div class="agent-done${childNotebook.aborted ? ' aborted' : ''}"></div>
489
  `}
490
  </div>
491
  </div>
 
1519
  });
1520
  }
1521
 
1522
+ async function abortAgent(tabId) {
1523
+ // Abort the frontend fetch
1524
+ const controller = activeAbortControllers[tabId];
1525
+ if (controller) {
1526
+ controller.abort();
1527
+ }
1528
+
1529
+ // For command center (tab 0): also abort all generating child tabs
1530
+ if (tabId === 0) {
1531
+ for (const [childId, td] of Object.entries(timelineData)) {
1532
+ if (td.parentTabId === 0 && td.isGenerating) {
1533
+ // Abort frontend fetch
1534
+ const childController = activeAbortControllers[childId];
1535
+ if (childController) {
1536
+ childController.abort();
1537
+ }
1538
+ // Abort backend agent
1539
+ try {
1540
+ await fetch('/api/abort', {
1541
+ method: 'POST',
1542
+ headers: { 'Content-Type': 'application/json' },
1543
+ body: JSON.stringify({ agent_id: childId.toString() })
1544
+ });
1545
+ } catch (e) { /* ignore */ }
1546
+ }
1547
+ }
1548
+ // Unblock command center input and restore SEND button
1549
+ commandInputBlocked = false;
1550
+ pendingAgentLaunches = 0;
1551
+ setCommandCenterStopState(false);
1552
+ const commandInput = document.getElementById('input-command');
1553
+ if (commandInput) {
1554
+ commandInput.disabled = false;
1555
+ }
1556
+ }
1557
+
1558
+ // Tell the backend to set the abort flag for this agent
1559
+ try {
1560
+ await fetch('/api/abort', {
1561
+ method: 'POST',
1562
+ headers: { 'Content-Type': 'application/json' },
1563
+ body: JSON.stringify({ agent_id: tabId.toString() })
1564
+ });
1565
+ } catch (e) {
1566
+ // Ignore abort endpoint errors
1567
+ }
1568
+ }
1569
+
1570
  async function sendMessage(tabId) {
1571
+ // If tab is currently generating, abort instead of sending
1572
+ // For command center: also abort if input is blocked (sub-agents still running)
1573
+ if (timelineData[tabId]?.isGenerating || (tabId === 0 && commandInputBlocked)) {
1574
+ abortAgent(tabId);
1575
+ return;
1576
+ }
1577
+
1578
  const content = document.querySelector(`[data-content-id="${tabId}"]`);
1579
  if (!content) return;
1580
 
 
1645
  );
1646
  if (anyChildGenerating || pendingAgentLaunches > 0) {
1647
  commandInputBlocked = true;
1648
+ // Keep STOP button visible and input disabled while children run
1649
+ input.disabled = true;
1650
+ setCommandCenterStopState(true);
1651
  } else {
1652
  input.disabled = false;
1653
  input.focus();
 
1661
  saveWorkspaceDebounced();
1662
  }
1663
 
1664
+ function setCommandCenterStopState(showStop) {
1665
+ const content = document.querySelector('[data-content-id="0"]');
1666
+ if (!content) return;
1667
+ const sendBtn = content.querySelector('.input-container button');
1668
+ if (!sendBtn) return;
1669
+ if (showStop) {
1670
+ sendBtn.textContent = 'STOP';
1671
+ sendBtn.classList.add('stop-btn');
1672
+ sendBtn.disabled = false;
1673
+ } else {
1674
+ sendBtn.textContent = 'SEND';
1675
+ sendBtn.classList.remove('stop-btn');
1676
+ }
1677
+ }
1678
+
1679
  async function continueCommandCenter() {
1680
  // Called when all launched agents finish — re-run command center with actual results in history
1681
  const chatContainer = document.getElementById('messages-command');
 
1695
  );
1696
  if (anyChildGenerating || pendingAgentLaunches > 0) {
1697
  commandInputBlocked = true;
1698
+ if (commandInput) commandInput.disabled = true;
1699
+ setCommandCenterStopState(true);
1700
  } else if (commandInput) {
1701
  commandInput.disabled = false;
1702
  commandInput.focus();
 
1880
  ? currentSettings.models?.[currentSettings.imageEditModel]?.modelId || null
1881
  : null;
1882
 
1883
+ // Set up AbortController for this request
1884
+ const abortController = new AbortController();
1885
+ if (tabId !== undefined) {
1886
+ activeAbortControllers[tabId] = abortController;
1887
+ }
1888
+
1889
  try {
1890
+ // Determine parent agent ID for abort propagation
1891
+ const parentAgentId = timelineData[tabId]?.parentTabId != null
1892
+ ? timelineData[tabId].parentTabId.toString()
1893
+ : null;
1894
+
1895
  const response = await fetch(`${backendEndpoint}/chat/stream`, {
1896
  method: 'POST',
1897
  headers: { 'Content-Type': 'application/json' },
1898
+ signal: abortController.signal,
1899
  body: JSON.stringify({
1900
  messages: messages,
1901
  agent_type: agentType,
 
1916
  research_parallel_workers: currentSettings.researchParallelWorkers || null,
1917
  research_max_websites: currentSettings.researchMaxWebsites || null,
1918
  agent_id: tabId.toString(), // Send unique tab ID for sandbox sessions
1919
+ parent_agent_id: parentAgentId, // Parent agent ID for abort propagation
1920
  frontend_context: getFrontendContext() // Dynamic context for system prompts
1921
  })
1922
  });
 
2316
  // Reset current message element so any subsequent thinking starts fresh
2317
  currentMessageEl = null;
2318
 
2319
+ } else if (data.type === 'aborted') {
2320
+ // Agent was aborted by user
2321
+ hideProgressWidget(chatContainer);
2322
+ removeRetryIndicator(chatContainer);
2323
+
2324
+ // Mark the action widget as aborted (show × instead of ✓)
2325
+ const widget = actionWidgets[tabId];
2326
+ if (widget) {
2327
+ const orbitIndicator = widget.querySelector('.orbit-indicator');
2328
+ if (orbitIndicator) {
2329
+ const abortedIndicator = document.createElement('div');
2330
+ abortedIndicator.className = 'done-indicator aborted';
2331
+ orbitIndicator.replaceWith(abortedIndicator);
2332
+ }
2333
+ }
2334
+
2335
+ // For timeline agent boxes, mark as aborted
2336
+ if (timelineData[tabId]) {
2337
+ timelineData[tabId].aborted = true;
2338
+ }
2339
+
2340
+ break; // Stop reading stream
2341
+
2342
  } else if (data.type === 'done') {
2343
  // Remove retry indicator on success
2344
  removeRetryIndicator(chatContainer);
 
2413
  }
2414
  } catch (error) {
2415
  hideProgressWidget(chatContainer);
2416
+ if (error.name === 'AbortError') {
2417
+ // User-initiated abort — show as a result block
2418
+ const abortResultText = 'Generation aborted by user.';
2419
+ const resultDiv = document.createElement('div');
2420
+ resultDiv.className = 'research-report';
2421
+ resultDiv.innerHTML = `
2422
+ <div class="report-header">Result</div>
2423
+ <div class="report-content"><p>${abortResultText}</p></div>
2424
+ `;
2425
+ chatContainer.appendChild(resultDiv);
2426
+
2427
+ // Send abort result to parent action widget (so command center knows it was aborted)
2428
+ updateActionWidgetWithResult(tabId, abortResultText, {}, {});
2429
+
2430
+ // Override the done indicator to show × instead of ✓
2431
+ const widget = actionWidgets[tabId];
2432
+ if (widget) {
2433
+ const doneIndicator = widget.querySelector('.done-indicator');
2434
+ if (doneIndicator) {
2435
+ doneIndicator.classList.add('aborted');
2436
+ }
2437
+ }
2438
+
2439
+ // Mark timeline data as aborted
2440
+ if (timelineData[tabId]) {
2441
+ timelineData[tabId].aborted = true;
2442
+ }
2443
+ } else {
2444
+ const errorDiv = document.createElement('div');
2445
+ errorDiv.className = 'message assistant';
2446
+ errorDiv.innerHTML = `<div class="message-content" style="color: #c62828;">Connection error: ${escapeHtml(error.message)}</div>`;
2447
+ chatContainer.appendChild(errorDiv);
2448
+ }
2449
  if (tabId) {
2450
  setTabGenerating(tabId, false);
2451
  }
2452
+ } finally {
2453
+ // Clean up abort controller
2454
+ delete activeAbortControllers[tabId];
2455
  }
2456
  }
2457
 
 
2843
  }, 300);
2844
  }
2845
 
2846
+ // Toggle SEND/STOP button
2847
+ const content = document.querySelector(`[data-content-id="${tabId}"]`);
2848
+ if (content) {
2849
+ const sendBtn = content.querySelector('.input-container button');
2850
+ if (sendBtn) {
2851
+ if (isGenerating) {
2852
+ sendBtn.textContent = 'STOP';
2853
+ sendBtn.classList.add('stop-btn');
2854
+ sendBtn.disabled = false; // Keep enabled so user can click STOP
2855
+ } else {
2856
+ sendBtn.textContent = 'SEND';
2857
+ sendBtn.classList.remove('stop-btn');
2858
+ }
2859
+ }
2860
+ }
2861
+
2862
  // Update timeline to reflect generating state
2863
  setTimelineGenerating(tabId, isGenerating);
2864
 
 
2874
  );
2875
  if (!anyStillGenerating && pendingAgentLaunches === 0) {
2876
  commandInputBlocked = false;
2877
+ setCommandCenterStopState(false);
2878
  // Auto-continue: call command center again with agent results now in history
2879
  continueCommandCenter();
2880
  }
frontend/style.css CHANGED
@@ -765,6 +765,18 @@ body {
765
  transform: rotate(45deg) translate(-1px, -1px);
766
  }
767
 
 
 
 
 
 
 
 
 
 
 
 
 
768
  /* Simple turn - natural height from label, min-height fallback */
769
  .tl-row.turn {
770
  min-height: 12px;
@@ -1772,6 +1784,18 @@ pre code [class*="token"] {
1772
  transform: rotate(45deg) translate(-1px, -1px);
1773
  }
1774
 
 
 
 
 
 
 
 
 
 
 
 
 
1775
  .action-widget-body {
1776
  padding: 12px;
1777
  background: var(--bg-tertiary);
 
765
  transform: rotate(45deg) translate(-1px, -1px);
766
  }
767
 
768
+ /* Aborted agent - cross instead of checkmark */
769
+ .agent-done.aborted::before {
770
+ content: '×';
771
+ width: auto;
772
+ height: auto;
773
+ border: none;
774
+ transform: none;
775
+ font-size: 10px;
776
+ line-height: 1;
777
+ color: var(--text-secondary);
778
+ }
779
+
780
  /* Simple turn - natural height from label, min-height fallback */
781
  .tl-row.turn {
782
  min-height: 12px;
 
1784
  transform: rotate(45deg) translate(-1px, -1px);
1785
  }
1786
 
1787
+ /* Aborted action widget - cross instead of checkmark */
1788
+ .action-widget .done-indicator.aborted::before {
1789
+ content: '×';
1790
+ width: auto;
1791
+ height: auto;
1792
+ border: none;
1793
+ transform: none;
1794
+ font-size: 12px;
1795
+ line-height: 1;
1796
+ color: var(--bg-primary);
1797
+ }
1798
+
1799
  .action-widget-body {
1800
  padding: 12px;
1801
  background: var(--bg-tertiary);
pyproject.toml CHANGED
@@ -16,6 +16,9 @@ dependencies = [
16
  "markdownify",
17
  ]
18
 
 
 
 
19
  [build-system]
20
  requires = ["hatchling"]
21
  build-backend = "hatchling.build"
 
16
  "markdownify",
17
  ]
18
 
19
+ [project.scripts]
20
+ agentui = "backend.main:start"
21
+
22
  [build-system]
23
  requires = ["hatchling"]
24
  build-backend = "hatchling.build"