frdel commited on
Commit
c9c68de
·
2 Parent(s): ed9df3988f85d2

Merge branch 'development' into testing

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .dockerignore +2 -1
  2. .gitignore +9 -3
  3. agent.py +88 -30
  4. conf/model_providers.yaml +9 -0
  5. conf/projects.default.gitignore +13 -0
  6. docker/run/fs/ins/install_A0.sh +2 -0
  7. docs/res/banner_high.png +3 -0
  8. models.py +59 -41
  9. prompts/agent.extras.project.file_structure.md +9 -0
  10. prompts/agent.system.main.tips.md +1 -1
  11. prompts/agent.system.projects.active.md +12 -0
  12. prompts/agent.system.projects.inactive.md +1 -0
  13. prompts/agent.system.projects.main.md +5 -0
  14. prompts/agent.system.tool.document_query.md +43 -41
  15. prompts/agent.system.tool.wait.md +34 -0
  16. prompts/fw.code.running.md +1 -0
  17. prompts/fw.wait_complete.md +1 -0
  18. python/api/api_log_get.py +1 -1
  19. python/api/api_message.py +2 -1
  20. python/api/api_reset_chat.py +1 -1
  21. python/api/api_terminate_chat.py +1 -1
  22. python/api/chat_create.py +32 -0
  23. python/api/chat_export.py +1 -1
  24. python/api/chat_files_path_get.py +23 -0
  25. python/api/chat_remove.py +1 -1
  26. python/api/chat_reset.py +1 -1
  27. python/api/csrf_token.py +89 -2
  28. python/api/ctx_window_get.py +1 -1
  29. python/api/get_work_dir_files.py +2 -2
  30. python/api/history_get.py +1 -1
  31. python/api/import_knowledge.py +1 -1
  32. python/api/knowledge_path_get.py +25 -0
  33. python/api/knowledge_reindex.py +21 -0
  34. python/api/memory_dashboard.py +8 -20
  35. python/api/message.py +1 -1
  36. python/api/nudge.py +1 -1
  37. python/api/pause.py +1 -1
  38. python/api/poll.py +19 -11
  39. python/api/projects.py +91 -0
  40. python/api/scheduler_task_create.py +32 -5
  41. python/api/scheduler_task_delete.py +1 -1
  42. python/api/scheduler_task_update.py +4 -0
  43. python/api/scheduler_tasks_list.py +2 -2
  44. python/api/synthesize.py +4 -2
  45. python/api/transcribe.py +4 -2
  46. python/api/tunnel.py +42 -39
  47. python/api/tunnel_proxy.py +26 -23
  48. python/extensions/error_format/_10_mask_errors.py +2 -2
  49. python/extensions/hist_add_before/_10_mask_content.py +2 -2
  50. python/extensions/message_loop_prompts_after/_75_include_project_extras.py +47 -0
.dockerignore CHANGED
@@ -5,9 +5,10 @@
5
  # Large / generated data
6
  memory/**
7
 
8
- # Logs & tmp
9
  logs/*
10
  tmp/*
 
11
 
12
  # Knowledge directory – keep only default/
13
  knowledge/**
 
5
  # Large / generated data
6
  memory/**
7
 
8
+ # Logs, tmp, usr
9
  logs/*
10
  tmp/*
11
+ usr/*
12
 
13
  # Knowledge directory – keep only default/
14
  knowledge/**
.gitignore CHANGED
@@ -2,10 +2,12 @@
2
  **/.DS_Store
3
  **/.env
4
  **/__pycache__/
 
5
  **/.conda/
6
 
7
- #Ignore cursor rules
8
  .cursor/
 
9
 
10
  # ignore test files in root dir
11
  /*.test.py
@@ -20,8 +22,9 @@ memory/**
20
  # Handle logs directory
21
  logs/*
22
 
23
- # Handle tmp directory
24
  tmp/*
 
25
 
26
  # Handle knowledge directory
27
  knowledge/**
@@ -39,4 +42,7 @@ instruments/**
39
 
40
  # Global rule to include .gitkeep files anywhere
41
  !**/.gitkeep
42
- agent_history.gif
 
 
 
 
2
  **/.DS_Store
3
  **/.env
4
  **/__pycache__/
5
+ *.py[cod]
6
  **/.conda/
7
 
8
+ #Ignore IDE files
9
  .cursor/
10
+ .windsurf/
11
 
12
  # ignore test files in root dir
13
  /*.test.py
 
22
  # Handle logs directory
23
  logs/*
24
 
25
+ # Handle tmp and usr directory
26
  tmp/*
27
+ usr/*
28
 
29
  # Handle knowledge directory
30
  knowledge/**
 
42
 
43
  # Global rule to include .gitkeep files anywhere
44
  !**/.gitkeep
45
+
46
+ # for browser-use
47
+ agent_history.gif
48
+
agent.py CHANGED
@@ -11,7 +11,7 @@ from enum import Enum
11
  import uuid
12
  import models
13
 
14
- from python.helpers import extract_tools, files, errors, history, tokens
15
  from python.helpers import dirty_json
16
  from python.helpers.print_style import PrintStyle
17
 
@@ -53,12 +53,24 @@ class AgentContext:
53
  created_at: datetime | None = None,
54
  type: AgentContextType = AgentContextType.USER,
55
  last_message: datetime | None = None,
 
 
 
56
  ):
57
- # build context
58
  self.id = id or AgentContext.generate_id()
 
 
 
 
 
 
 
 
59
  self.name = name
60
  self.config = config
61
  self.log = log or Log.Log()
 
62
  self.agent0 = agent0 or Agent(0, self.config, self)
63
  self.paused = paused
64
  self.streaming_agent = streaming_agent
@@ -67,18 +79,36 @@ class AgentContext:
67
  self.type = type
68
  AgentContext._counter += 1
69
  self.no = AgentContext._counter
70
- # set to start of unix epoch
71
  self.last_message = last_message or datetime.now(timezone.utc)
 
 
 
72
 
73
- existing = self._contexts.get(self.id, None)
74
- if existing:
75
- AgentContext.remove(self.id)
76
- self._contexts[self.id] = self
77
 
78
  @staticmethod
79
  def get(id: str):
80
  return AgentContext._contexts.get(id, None)
81
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
  @staticmethod
83
  def first():
84
  if not AgentContext._contexts:
@@ -112,7 +142,23 @@ class AgentContext:
112
  context.task.kill()
113
  return context
114
 
115
- def serialize(self):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
  return {
117
  "id": self.id,
118
  "name": self.name,
@@ -132,6 +178,7 @@ class AgentContext:
132
  else Localization.get().serialize_datetime(datetime.fromtimestamp(0))
133
  ),
134
  "type": self.type.value,
 
135
  }
136
 
137
  @staticmethod
@@ -260,6 +307,7 @@ class LoopData:
260
  self.last_response = ""
261
  self.params_temporary: dict = {}
262
  self.params_persistent: dict = {}
 
263
 
264
  # override values with kwargs
265
  for key, value in kwargs.items():
@@ -671,7 +719,7 @@ class Agent:
671
  response, _reasoning = await call_data["model"].unified_call(
672
  system_message=call_data["system"],
673
  user_message=call_data["message"],
674
- response_callback=stream_callback,
675
  rate_limiter_callback=self.rate_limiter_callback if not call_data["background"] else None,
676
  )
677
 
@@ -714,6 +762,13 @@ class Agent:
714
  ): # if there is an intervention message, but not yet processed
715
  msg = self.intervention
716
  self.intervention = None # reset the intervention message
 
 
 
 
 
 
 
717
  if progress.strip():
718
  self.hist_add_ai_response(progress)
719
  # append the intervention message
@@ -766,27 +821,30 @@ class Agent:
766
  )
767
 
768
  if tool:
769
- await self.handle_intervention()
770
-
771
-
772
- # Call tool hooks for compatibility
773
- await tool.before_execution(**tool_args)
774
- await self.handle_intervention()
775
-
776
- # Allow extensions to preprocess tool arguments
777
- await self.call_extensions("tool_execute_before", tool_args=tool_args or {}, tool_name=tool_name)
778
-
779
- response = await tool.execute(**tool_args)
780
- await self.handle_intervention()
781
-
782
- # Allow extensions to postprocess tool response
783
- await self.call_extensions("tool_execute_after", response=response, tool_name=tool_name)
784
-
785
- await tool.after_execution(response)
786
- await self.handle_intervention()
787
-
788
- if response.break_loop:
789
- return response.message
 
 
 
790
  else:
791
  error_detail = (
792
  f"Tool '{raw_tool_name}' not found or could not be initialized."
 
11
  import uuid
12
  import models
13
 
14
+ from python.helpers import extract_tools, files, errors, history, tokens, context as context_helper
15
  from python.helpers import dirty_json
16
  from python.helpers.print_style import PrintStyle
17
 
 
53
  created_at: datetime | None = None,
54
  type: AgentContextType = AgentContextType.USER,
55
  last_message: datetime | None = None,
56
+ data: dict | None = None,
57
+ output_data: dict | None = None,
58
+ set_current: bool = False,
59
  ):
60
+ # initialize context
61
  self.id = id or AgentContext.generate_id()
62
+ existing = self._contexts.get(self.id, None)
63
+ if existing:
64
+ AgentContext.remove(self.id)
65
+ self._contexts[self.id] = self
66
+ if set_current:
67
+ AgentContext.set_current(self.id)
68
+
69
+ # initialize state
70
  self.name = name
71
  self.config = config
72
  self.log = log or Log.Log()
73
+ self.log.context = self
74
  self.agent0 = agent0 or Agent(0, self.config, self)
75
  self.paused = paused
76
  self.streaming_agent = streaming_agent
 
79
  self.type = type
80
  AgentContext._counter += 1
81
  self.no = AgentContext._counter
 
82
  self.last_message = last_message or datetime.now(timezone.utc)
83
+ self.data = data or {}
84
+ self.output_data = output_data or {}
85
+
86
 
 
 
 
 
87
 
88
  @staticmethod
89
  def get(id: str):
90
  return AgentContext._contexts.get(id, None)
91
 
92
+ @staticmethod
93
+ def use(id: str):
94
+ context = AgentContext.get(id)
95
+ if context:
96
+ AgentContext.set_current(id)
97
+ else:
98
+ AgentContext.set_current("")
99
+ return context
100
+
101
+ @staticmethod
102
+ def current():
103
+ ctxid = context_helper.get_context_data("agent_context_id","")
104
+ if not ctxid:
105
+ return None
106
+ return AgentContext.get(ctxid)
107
+
108
+ @staticmethod
109
+ def set_current(ctxid: str):
110
+ context_helper.set_context_data("agent_context_id", ctxid)
111
+
112
  @staticmethod
113
  def first():
114
  if not AgentContext._contexts:
 
142
  context.task.kill()
143
  return context
144
 
145
+ def get_data(self, key: str, recursive: bool = True):
146
+ # recursive is not used now, prepared for context hierarchy
147
+ return self.data.get(key, None)
148
+
149
+ def set_data(self, key: str, value: Any, recursive: bool = True):
150
+ # recursive is not used now, prepared for context hierarchy
151
+ self.data[key] = value
152
+
153
+ def get_output_data(self, key: str, recursive: bool = True):
154
+ # recursive is not used now, prepared for context hierarchy
155
+ return self.output_data.get(key, None)
156
+
157
+ def set_output_data(self, key: str, value: Any, recursive: bool = True):
158
+ # recursive is not used now, prepared for context hierarchy
159
+ self.output_data[key] = value
160
+
161
+ def output(self):
162
  return {
163
  "id": self.id,
164
  "name": self.name,
 
178
  else Localization.get().serialize_datetime(datetime.fromtimestamp(0))
179
  ),
180
  "type": self.type.value,
181
+ **self.output_data,
182
  }
183
 
184
  @staticmethod
 
307
  self.last_response = ""
308
  self.params_temporary: dict = {}
309
  self.params_persistent: dict = {}
310
+ self.current_tool = None
311
 
312
  # override values with kwargs
313
  for key, value in kwargs.items():
 
719
  response, _reasoning = await call_data["model"].unified_call(
720
  system_message=call_data["system"],
721
  user_message=call_data["message"],
722
+ response_callback=stream_callback if call_data["callback"] else None,
723
  rate_limiter_callback=self.rate_limiter_callback if not call_data["background"] else None,
724
  )
725
 
 
762
  ): # if there is an intervention message, but not yet processed
763
  msg = self.intervention
764
  self.intervention = None # reset the intervention message
765
+ # If a tool was running, save its progress to history
766
+ last_tool = self.loop_data.current_tool
767
+ if last_tool:
768
+ tool_progress = last_tool.progress.strip()
769
+ if tool_progress:
770
+ self.hist_add_tool_result(last_tool.name, tool_progress)
771
+ last_tool.set_progress(None)
772
  if progress.strip():
773
  self.hist_add_ai_response(progress)
774
  # append the intervention message
 
821
  )
822
 
823
  if tool:
824
+ self.loop_data.current_tool = tool # type: ignore
825
+ try:
826
+ await self.handle_intervention()
827
+
828
+ # Call tool hooks for compatibility
829
+ await tool.before_execution(**tool_args)
830
+ await self.handle_intervention()
831
+
832
+ # Allow extensions to preprocess tool arguments
833
+ await self.call_extensions("tool_execute_before", tool_args=tool_args or {}, tool_name=tool_name)
834
+
835
+ response = await tool.execute(**tool_args)
836
+ await self.handle_intervention()
837
+
838
+ # Allow extensions to postprocess tool response
839
+ await self.call_extensions("tool_execute_after", response=response, tool_name=tool_name)
840
+
841
+ await tool.after_execution(response)
842
+ await self.handle_intervention()
843
+
844
+ if response.break_loop:
845
+ return response.message
846
+ finally:
847
+ self.loop_data.current_tool = None
848
  else:
849
  error_detail = (
850
  f"Tool '{raw_tool_name}' not found or could not be initialized."
conf/model_providers.yaml CHANGED
@@ -110,6 +110,15 @@ embedding:
110
  azure:
111
  name: OpenAI Azure
112
  litellm_provider: azure
 
 
 
 
 
 
 
 
 
113
  other:
114
  name: Other OpenAI compatible
115
  litellm_provider: openai
 
110
  azure:
111
  name: OpenAI Azure
112
  litellm_provider: azure
113
+ # TODO: OpenRouter not yet supported by LiteLLM, replace with native litellm_provider openrouter and remove api_base when ready
114
+ openrouter:
115
+ name: OpenRouter
116
+ litellm_provider: openai
117
+ kwargs:
118
+ api_base: https://openrouter.ai/api/v1
119
+ extra_headers:
120
+ "HTTP-Referer": "https://agent-zero.ai/"
121
+ "X-Title": "Agent Zero"
122
  other:
123
  name: Other OpenAI compatible
124
  litellm_provider: openai
conf/projects.default.gitignore ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A0 project meta folder
2
+ .a0proj/
3
+
4
+ # Python environments & cache
5
+ venv/
6
+ **/__pycache__/
7
+
8
+ # Node.js dependencies
9
+ **/node_modules/
10
+ **/.npm/
11
+
12
+ # Version control metadata
13
+ **/.git/
docker/run/fs/ins/install_A0.sh CHANGED
@@ -36,6 +36,8 @@ fi
36
 
37
  # Install remaining A0 python packages
38
  uv pip install -r /git/agent-zero/requirements.txt
 
 
39
 
40
  # install playwright
41
  bash /ins/install_playwright.sh "$@"
 
36
 
37
  # Install remaining A0 python packages
38
  uv pip install -r /git/agent-zero/requirements.txt
39
+ # override for packages that have unnecessarily strict dependencies
40
+ uv pip install -r /git/agent-zero/requirements2.txt
41
 
42
  # install playwright
43
  bash /ins/install_playwright.sh "$@"
docs/res/banner_high.png ADDED

Git LFS Details

  • SHA256: 861196c98cda9997e2bfa1cd1633ee5e819b54e1800d849d446b4ce0da55adc1
  • Pointer size: 131 Bytes
  • Size of remote file: 444 kB
models.py CHANGED
@@ -41,6 +41,7 @@ from langchain_core.messages import (
41
  )
42
  from langchain.embeddings.base import Embeddings
43
  from sentence_transformers import SentenceTransformer
 
44
 
45
 
46
  # disable extra logging, must be done repeatedly, otherwise browser-use will turn it back on for some reason
@@ -106,17 +107,17 @@ class ChatGenerationResult:
106
  def add_chunk(self, chunk: ChatChunk) -> ChatChunk:
107
  if chunk["reasoning_delta"]:
108
  self.native_reasoning = True
109
-
110
  # if native reasoning detection works, there's no need to worry about thinking tags
111
  if self.native_reasoning:
112
  processed_chunk = ChatChunk(response_delta=chunk["response_delta"], reasoning_delta=chunk["reasoning_delta"])
113
  else:
114
  # if the model outputs thinking tags, we ned to parse them manually as reasoning
115
  processed_chunk = self._process_thinking_chunk(chunk)
116
-
117
  self.reasoning += processed_chunk["reasoning_delta"]
118
  self.response += processed_chunk["response_delta"]
119
-
120
  return processed_chunk
121
 
122
  def _process_thinking_chunk(self, chunk: ChatChunk) -> ChatChunk:
@@ -145,7 +146,7 @@ class ChatGenerationResult:
145
  response = response[len(opening_tag):]
146
  self.thinking = True
147
  self.thinking_tag = closing_tag
148
-
149
  close_pos = response.find(closing_tag)
150
  if close_pos != -1:
151
  reasoning += response[:close_pos]
@@ -164,7 +165,7 @@ class ChatGenerationResult:
164
  self.unprocessed = response
165
  response = ""
166
  break
167
-
168
  return ChatChunk(response_delta=response, reasoning_delta=reasoning)
169
 
170
  def _is_partial_opening_tag(self, text: str, opening_tag: str) -> bool:
@@ -191,7 +192,7 @@ class ChatGenerationResult:
191
  else:
192
  response += self.unprocessed
193
  return ChatChunk(response_delta=response, reasoning_delta=reasoning)
194
-
195
 
196
  rate_limiters: dict[str, RateLimiter] = {}
197
  api_keys_round_robin: dict[str, int] = {}
@@ -293,10 +294,11 @@ class LiteLLMChatWrapper(SimpleChatModel):
293
  provider: str
294
  kwargs: dict = {}
295
 
296
- class Config:
297
- arbitrary_types_allowed = True
298
- extra = "allow" # Allow extra attributes
299
- validate_assignment = False # Don't validate on assignment
 
300
 
301
  def __init__(
302
  self,
@@ -487,6 +489,7 @@ class LiteLLMChatWrapper(SimpleChatModel):
487
  call_kwargs: dict[str, Any] = {**self.kwargs, **kwargs}
488
  max_retries: int = int(call_kwargs.pop("a0_retry_attempts", 2))
489
  retry_delay_s: float = float(call_kwargs.pop("a0_retry_delay_seconds", 1.5))
 
490
 
491
  # results
492
  result = ChatGenerationResult()
@@ -499,48 +502,59 @@ class LiteLLMChatWrapper(SimpleChatModel):
499
  _completion = await acompletion(
500
  model=self.model_name,
501
  messages=msgs_conv,
502
- stream=True,
503
  **call_kwargs,
504
  )
505
 
506
- # iterate over chunks
507
- async for chunk in _completion: # type: ignore
508
- got_any_chunk = True
509
- # parse chunk
510
- parsed = _parse_chunk(chunk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
511
  output = result.add_chunk(parsed)
512
-
513
- # collect reasoning delta and call callbacks
514
- if output["reasoning_delta"]:
515
- if reasoning_callback:
516
- await reasoning_callback(output["reasoning_delta"], result.reasoning)
517
- if tokens_callback:
518
- await tokens_callback(
519
- output["reasoning_delta"],
520
- approximate_tokens(output["reasoning_delta"]),
521
- )
522
- # Add output tokens to rate limiter if configured
523
- if limiter:
524
- limiter.add(output=approximate_tokens(output["reasoning_delta"]))
525
- # collect response delta and call callbacks
526
- if output["response_delta"]:
527
- if response_callback:
528
- await response_callback(output["response_delta"], result.response)
529
- if tokens_callback:
530
- await tokens_callback(
531
- output["response_delta"],
532
- approximate_tokens(output["response_delta"]),
533
- )
534
- # Add output tokens to rate limiter if configured
535
- if limiter:
536
  limiter.add(output=approximate_tokens(output["response_delta"]))
 
 
537
 
538
  # Successful completion of stream
539
  return result.response, result.reasoning
540
 
541
  except Exception as e:
542
  import asyncio
543
-
544
  # Retry only if no chunks received and error is transient
545
  if got_any_chunk or not _is_transient_litellm_error(e) or attempt >= max_retries:
546
  raise
@@ -804,6 +818,10 @@ def _parse_chunk(chunk: Any) -> ChatChunk:
804
  delta.get("reasoning_content", "")
805
  if isinstance(delta, dict)
806
  else getattr(delta, "reasoning_content", "")
 
 
 
 
807
  )
808
 
809
  return ChatChunk(reasoning_delta=reasoning_delta, response_delta=response_delta)
 
41
  )
42
  from langchain.embeddings.base import Embeddings
43
  from sentence_transformers import SentenceTransformer
44
+ from pydantic import ConfigDict
45
 
46
 
47
  # disable extra logging, must be done repeatedly, otherwise browser-use will turn it back on for some reason
 
107
  def add_chunk(self, chunk: ChatChunk) -> ChatChunk:
108
  if chunk["reasoning_delta"]:
109
  self.native_reasoning = True
110
+
111
  # if native reasoning detection works, there's no need to worry about thinking tags
112
  if self.native_reasoning:
113
  processed_chunk = ChatChunk(response_delta=chunk["response_delta"], reasoning_delta=chunk["reasoning_delta"])
114
  else:
115
  # if the model outputs thinking tags, we ned to parse them manually as reasoning
116
  processed_chunk = self._process_thinking_chunk(chunk)
117
+
118
  self.reasoning += processed_chunk["reasoning_delta"]
119
  self.response += processed_chunk["response_delta"]
120
+
121
  return processed_chunk
122
 
123
  def _process_thinking_chunk(self, chunk: ChatChunk) -> ChatChunk:
 
146
  response = response[len(opening_tag):]
147
  self.thinking = True
148
  self.thinking_tag = closing_tag
149
+
150
  close_pos = response.find(closing_tag)
151
  if close_pos != -1:
152
  reasoning += response[:close_pos]
 
165
  self.unprocessed = response
166
  response = ""
167
  break
168
+
169
  return ChatChunk(response_delta=response, reasoning_delta=reasoning)
170
 
171
  def _is_partial_opening_tag(self, text: str, opening_tag: str) -> bool:
 
192
  else:
193
  response += self.unprocessed
194
  return ChatChunk(response_delta=response, reasoning_delta=reasoning)
195
+
196
 
197
  rate_limiters: dict[str, RateLimiter] = {}
198
  api_keys_round_robin: dict[str, int] = {}
 
294
  provider: str
295
  kwargs: dict = {}
296
 
297
+ model_config = ConfigDict(
298
+ arbitrary_types_allowed=True,
299
+ extra="allow",
300
+ validate_assignment=False,
301
+ )
302
 
303
  def __init__(
304
  self,
 
489
  call_kwargs: dict[str, Any] = {**self.kwargs, **kwargs}
490
  max_retries: int = int(call_kwargs.pop("a0_retry_attempts", 2))
491
  retry_delay_s: float = float(call_kwargs.pop("a0_retry_delay_seconds", 1.5))
492
+ stream = reasoning_callback is not None or response_callback is not None or tokens_callback is not None
493
 
494
  # results
495
  result = ChatGenerationResult()
 
502
  _completion = await acompletion(
503
  model=self.model_name,
504
  messages=msgs_conv,
505
+ stream=stream,
506
  **call_kwargs,
507
  )
508
 
509
+ if stream:
510
+ # iterate over chunks
511
+ async for chunk in _completion: # type: ignore
512
+ got_any_chunk = True
513
+ # parse chunk
514
+ parsed = _parse_chunk(chunk)
515
+ output = result.add_chunk(parsed)
516
+
517
+ # collect reasoning delta and call callbacks
518
+ if output["reasoning_delta"]:
519
+ if reasoning_callback:
520
+ await reasoning_callback(output["reasoning_delta"], result.reasoning)
521
+ if tokens_callback:
522
+ await tokens_callback(
523
+ output["reasoning_delta"],
524
+ approximate_tokens(output["reasoning_delta"]),
525
+ )
526
+ # Add output tokens to rate limiter if configured
527
+ if limiter:
528
+ limiter.add(output=approximate_tokens(output["reasoning_delta"]))
529
+ # collect response delta and call callbacks
530
+ if output["response_delta"]:
531
+ if response_callback:
532
+ await response_callback(output["response_delta"], result.response)
533
+ if tokens_callback:
534
+ await tokens_callback(
535
+ output["response_delta"],
536
+ approximate_tokens(output["response_delta"]),
537
+ )
538
+ # Add output tokens to rate limiter if configured
539
+ if limiter:
540
+ limiter.add(output=approximate_tokens(output["response_delta"]))
541
+
542
+ # non-stream response
543
+ else:
544
+ parsed = _parse_chunk(_completion)
545
  output = result.add_chunk(parsed)
546
+ if limiter:
547
+ if output["response_delta"]:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
548
  limiter.add(output=approximate_tokens(output["response_delta"]))
549
+ if output["reasoning_delta"]:
550
+ limiter.add(output=approximate_tokens(output["reasoning_delta"]))
551
 
552
  # Successful completion of stream
553
  return result.response, result.reasoning
554
 
555
  except Exception as e:
556
  import asyncio
557
+
558
  # Retry only if no chunks received and error is transient
559
  if got_any_chunk or not _is_transient_litellm_error(e) or attempt >= max_retries:
560
  raise
 
818
  delta.get("reasoning_content", "")
819
  if isinstance(delta, dict)
820
  else getattr(delta, "reasoning_content", "")
821
+ ) or (
822
+ message.get("reasoning_content", "")
823
+ if isinstance(message, dict)
824
+ else getattr(message, "reasoning_content", "")
825
  )
826
 
827
  return ChatChunk(reasoning_delta=reasoning_delta, response_delta=response_delta)
prompts/agent.extras.project.file_structure.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # File structure of project {{project_name}}
2
+ - this is filtered overview not full scan
3
+ - list yourself if needed
4
+ - maximum depth: {{max_depth}}
5
+ - ignored:
6
+ {{gitignore}}
7
+
8
+ ## file tree
9
+ {{file_structure}}
prompts/agent.system.main.tips.md CHANGED
@@ -7,7 +7,7 @@ never assume success
7
  memory refers memory tools not own knowledge
8
 
9
  ## Files
10
- save files in /root
11
  don't use spaces in file names
12
 
13
  ## Instruments
 
7
  memory refers memory tools not own knowledge
8
 
9
  ## Files
10
+ when not in project save files in /root
11
  don't use spaces in file names
12
 
13
  ## Instruments
prompts/agent.system.projects.active.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Active project
2
+ Path: {{project_path}}
3
+ Title: {{project_name}}
4
+ Description: {{project_description}}
5
+
6
+
7
+ ### Important project instructions MUST follow
8
+ - always work inside {{project_path}} directory
9
+ - do not rename project directory do not change meta files in .a0proj folder
10
+ - cleanup when code accidentaly creates files outside move them
11
+
12
+ {{project_instructions}}
prompts/agent.system.projects.inactive.md ADDED
@@ -0,0 +1 @@
 
 
1
+ no project currently activated
prompts/agent.system.projects.main.md ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Projects
2
+ - user can create and activate projects
3
+ - projects have work folder in /usr/projects/<name> and instructions and config in /usr/projects/<name>/.a0proj
4
+ - when activated agent works in project follows project instructions
5
+ - agent cannot manipulate or switch projects
prompts/agent.system.tool.document_query.md CHANGED
@@ -1,60 +1,62 @@
1
- ### document_query:
2
- This tool can be used to read or analyze remote and local documents.
3
- It can be used to:
4
- * Get webpage or remote document text content
5
- * Get local document text content
6
- * Answer queries about a webpage, remote or local document
7
- By default, when the "queries" argument is empty, this tool returns the text content of the document retrieved using OCR.
8
- Additionally, you can pass a list of "queries" - in this case, the tool returns the answers to all the passed queries about the document.
9
- !!! This is a universal document reader qnd query tool
10
- !!! Supported document formats: HTML, PDF, Office Documents (word,excel, powerpoint), Textfiles and many more.
11
 
12
- #### Arguments:
13
- * "document" (string) : The web address or local path to the document in question. Webdocuments need "http://" or "https://" protocol prefix. For local files the "file:" protocol prefix is optional. Local files MUST be passed with full filesystem path.
14
- * "queries" (Optional, list[str]) : Optionally, here you can pass one or more queries to be answered (using and/or about) the document
15
-
16
- #### Usage example 1:
17
- ##### Request:
18
- ```json
19
  {
20
  "thoughts": [
21
- "...",
22
  ],
23
- "headline": "Reading web document content",
24
  "tool_name": "document_query",
25
  "tool_args": {
26
- "document": "https://...somexample",
27
  }
28
  }
29
- ```
30
- ##### Response:
31
- ```plaintext
32
- ... Here is the entire content of the web document requested ...
33
- ```
34
 
35
- #### Usage example 2:
36
- ##### Request:
37
- ```json
38
  {
39
  "thoughts": [
40
- "...",
41
  ],
42
- "headline": "Analyzing document to answer specific questions",
43
  "tool_name": "document_query",
44
  "tool_args": {
45
- "document": "https://...somexample",
46
  "queries": [
47
- "What is the topic?",
48
- "Who is the audience?"
49
  ]
50
  }
51
  }
52
- ```
53
- ##### Response:
54
- ```plaintext
55
- # What is the topic?
56
- ... Description of the document topic ...
57
 
58
- # Who is the audience?
59
- ... The intended document audience list with short descriptions ...
60
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### document_query
2
+ read and analyze remote/local documents get text content or answer questions
3
+ pass a single url/path or a list for multiple documents in "document"
4
+ for web documents use "http://" or "https://"" prefix
5
+ for local files "file://" prefix is optional but full path is required
6
+ if "queries" is empty tool returns document content
7
+ if "queries" is a list of strings tool returns answers
8
+ supports various formats HTML PDF Office Text etc
9
+ usage:
 
10
 
11
+ 1 get content
12
+ ~~~json
 
 
 
 
 
13
  {
14
  "thoughts": [
15
+ "I need to read..."
16
  ],
17
+ "headline": "...",
18
  "tool_name": "document_query",
19
  "tool_args": {
20
+ "document": "https://.../document"
21
  }
22
  }
23
+ ~~~
 
 
 
 
24
 
25
+ 2 query document
26
+ ~~~json
 
27
  {
28
  "thoughts": [
29
+ "I need to answer..."
30
  ],
31
+ "headline": "...",
32
  "tool_name": "document_query",
33
  "tool_args": {
34
+ "document": "https://.../document",
35
  "queries": [
36
+ "What is...",
37
+ "Who is..."
38
  ]
39
  }
40
  }
41
+ ~~~
 
 
 
 
42
 
43
+ 3 query multiple documents
44
+ ~~~json
45
+ {
46
+ "thoughts": [
47
+ "I need to compare..."
48
+ ],
49
+ "headline": "...",
50
+ "tool_name": "document_query",
51
+ "tool_args": {
52
+ "document": [
53
+ "https://.../document-one",
54
+ "file:///path/to/document-two"
55
+ ],
56
+ "queries": [
57
+ "Compare the main conclusions...",
58
+ "What are the key differences..."
59
+ ]
60
+ }
61
+ }
62
+ ~~~
prompts/agent.system.tool.wait.md ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### wait
2
+ pause execution for a set time or until a timestamp
3
+ use args "seconds" "minutes" "hours" "days" for duration
4
+ use "until" with ISO timestamp for a specific time
5
+ usage:
6
+
7
+ 1 wait duration
8
+ ~~~json
9
+ {
10
+ "thoughts": [
11
+ "I need to wait..."
12
+ ],
13
+ "headline": "...",
14
+ "tool_name": "wait",
15
+ "tool_args": {
16
+ "minutes": 1,
17
+ "seconds": 30
18
+ }
19
+ }
20
+ ~~~
21
+
22
+ 2 wait timestamp
23
+ ~~~json
24
+ {
25
+ "thoughts": [
26
+ "I will wait until..."
27
+ ],
28
+ "headline": "...",
29
+ "tool_name": "wait",
30
+ "tool_args": {
31
+ "until": "2025-10-20T10:00:00Z"
32
+ }
33
+ }
34
+ ~~~
prompts/fw.code.running.md ADDED
@@ -0,0 +1 @@
 
 
1
+ Terminal session {{session}} is still running. Decide to wait for more 'output', 'reset', or use another session number based on situation.
prompts/fw.wait_complete.md ADDED
@@ -0,0 +1 @@
 
 
1
+ Wait complete. Reached {{target_time}}.
python/api/api_log_get.py CHANGED
@@ -32,7 +32,7 @@ class ApiLogGet(ApiHandler):
32
  return Response('{"error": "context_id is required"}', status=400, mimetype="application/json")
33
 
34
  # Get context
35
- context = AgentContext.get(context_id)
36
  if not context:
37
  return Response('{"error": "Context not found"}', status=404, mimetype="application/json")
38
 
 
32
  return Response('{"error": "context_id is required"}', status=400, mimetype="application/json")
33
 
34
  # Get context
35
+ context = AgentContext.use(context_id)
36
  if not context:
37
  return Response('{"error": "Context not found"}', status=404, mimetype="application/json")
38
 
python/api/api_message.py CHANGED
@@ -68,12 +68,13 @@ class ApiMessage(ApiHandler):
68
 
69
  # Get or create context
70
  if context_id:
71
- context = AgentContext.get(context_id)
72
  if not context:
73
  return Response('{"error": "Context not found"}', status=404, mimetype="application/json")
74
  else:
75
  config = initialize_agent()
76
  context = AgentContext(config=config, type=AgentContextType.USER)
 
77
  context_id = context.id
78
 
79
  # Update chat lifetime
 
68
 
69
  # Get or create context
70
  if context_id:
71
+ context = AgentContext.use(context_id)
72
  if not context:
73
  return Response('{"error": "Context not found"}', status=404, mimetype="application/json")
74
  else:
75
  config = initialize_agent()
76
  context = AgentContext(config=config, type=AgentContextType.USER)
77
+ AgentContext.use(context.id)
78
  context_id = context.id
79
 
80
  # Update chat lifetime
python/api/api_reset_chat.py CHANGED
@@ -35,7 +35,7 @@ class ApiResetChat(ApiHandler):
35
  )
36
 
37
  # Check if context exists
38
- context = AgentContext.get(context_id)
39
  if not context:
40
  return Response(
41
  '{"error": "Chat context not found"}',
 
35
  )
36
 
37
  # Check if context exists
38
+ context = AgentContext.use(context_id)
39
  if not context:
40
  return Response(
41
  '{"error": "Chat context not found"}',
python/api/api_terminate_chat.py CHANGED
@@ -35,7 +35,7 @@ class ApiTerminateChat(ApiHandler):
35
  )
36
 
37
  # Check if context exists
38
- context = AgentContext.get(context_id)
39
  if not context:
40
  return Response(
41
  '{"error": "Chat context not found"}',
 
35
  )
36
 
37
  # Check if context exists
38
+ context = AgentContext.use(context_id)
39
  if not context:
40
  return Response(
41
  '{"error": "Chat context not found"}',
python/api/chat_create.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from python.helpers.api import ApiHandler, Input, Output, Request, Response
2
+
3
+
4
+ from python.helpers import projects, guids
5
+ from agent import AgentContext
6
+
7
+
8
+ class CreateChat(ApiHandler):
9
+ async def process(self, input: Input, request: Request) -> Output:
10
+ current_ctxid = input.get("current_context", "") # current context id
11
+ new_ctxid = input.get("new_context", guids.generate_id()) # given or new guid
12
+
13
+ # context instance - get or create
14
+ current_context = AgentContext.get(current_ctxid)
15
+
16
+ # get/create new context
17
+ new_context = self.use_context(new_ctxid)
18
+
19
+ # copy selected data from current to new context
20
+ if current_context:
21
+ current_data_1 = current_context.get_data(projects.CONTEXT_DATA_KEY_PROJECT)
22
+ if current_data_1:
23
+ new_context.set_data(projects.CONTEXT_DATA_KEY_PROJECT, current_data_1)
24
+ current_data_2 = current_context.get_output_data(projects.CONTEXT_DATA_KEY_PROJECT)
25
+ if current_data_2:
26
+ new_context.set_output_data(projects.CONTEXT_DATA_KEY_PROJECT, current_data_2)
27
+
28
+ return {
29
+ "ok": True,
30
+ "ctxid": new_context.id,
31
+ "message": "Context created.",
32
+ }
python/api/chat_export.py CHANGED
@@ -8,7 +8,7 @@ class ExportChat(ApiHandler):
8
  if not ctxid:
9
  raise Exception("No context id provided")
10
 
11
- context = self.get_context(ctxid)
12
  content = persist_chat.export_json_chat(context)
13
  return {
14
  "message": "Chats exported.",
 
8
  if not ctxid:
9
  raise Exception("No context id provided")
10
 
11
+ context = self.use_context(ctxid)
12
  content = persist_chat.export_json_chat(context)
13
  return {
14
  "message": "Chats exported.",
python/api/chat_files_path_get.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from python.helpers.api import ApiHandler, Request, Response
2
+ from python.helpers import files, memory, notification, projects, notification, runtime
3
+ import os
4
+ from werkzeug.utils import secure_filename
5
+
6
+
7
+ class GetChatFilesPath(ApiHandler):
8
+ async def process(self, input: dict, request: Request) -> dict | Response:
9
+ ctxid = input.get("ctxid", "")
10
+ if not ctxid:
11
+ raise Exception("No context id provided")
12
+ context = self.use_context(ctxid)
13
+
14
+ project_name = projects.get_context_project_name(context)
15
+ if project_name:
16
+ folder = files.normalize_a0_path(projects.get_project_folder(project_name))
17
+ else:
18
+ folder = "/root" # root in container
19
+
20
+ return {
21
+ "ok": True,
22
+ "path": folder,
23
+ }
python/api/chat_remove.py CHANGED
@@ -8,7 +8,7 @@ class RemoveChat(ApiHandler):
8
  async def process(self, input: Input, request: Request) -> Output:
9
  ctxid = input.get("context", "")
10
 
11
- context = AgentContext.get(ctxid)
12
  if context:
13
  # stop processing any tasks
14
  context.reset()
 
8
  async def process(self, input: Input, request: Request) -> Output:
9
  ctxid = input.get("context", "")
10
 
11
+ context = AgentContext.use(ctxid)
12
  if context:
13
  # stop processing any tasks
14
  context.reset()
python/api/chat_reset.py CHANGED
@@ -9,7 +9,7 @@ class Reset(ApiHandler):
9
  ctxid = input.get("context", "")
10
 
11
  # context instance - get or create
12
- context = self.get_context(ctxid)
13
  context.reset()
14
  persist_chat.save_tmp_chat(context)
15
  persist_chat.remove_msg_files(ctxid)
 
9
  ctxid = input.get("context", "")
10
 
11
  # context instance - get or create
12
+ context = self.use_context(ctxid)
13
  context.reset()
14
  persist_chat.save_tmp_chat(context)
15
  persist_chat.remove_msg_files(ctxid)
python/api/csrf_token.py CHANGED
@@ -1,4 +1,5 @@
1
  import secrets
 
2
  from python.helpers.api import (
3
  ApiHandler,
4
  Input,
@@ -7,7 +8,9 @@ from python.helpers.api import (
7
  Response,
8
  session,
9
  )
10
- from python.helpers import runtime
 
 
11
 
12
  class GetCsrfToken(ApiHandler):
13
 
@@ -20,6 +23,90 @@ class GetCsrfToken(ApiHandler):
20
  return False
21
 
22
  async def process(self, input: Input, request: Request) -> Output:
 
 
 
 
 
 
 
 
 
 
23
  if "csrf_token" not in session:
24
  session["csrf_token"] = secrets.token_urlsafe(32)
25
- return {"token": session["csrf_token"], "runtime_id": runtime.get_runtime_id()}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import secrets
2
+ from urllib.parse import urlparse
3
  from python.helpers.api import (
4
  ApiHandler,
5
  Input,
 
8
  Response,
9
  session,
10
  )
11
+ from python.helpers import runtime, dotenv, login
12
+ import fnmatch
13
+
14
 
15
  class GetCsrfToken(ApiHandler):
16
 
 
23
  return False
24
 
25
  async def process(self, input: Input, request: Request) -> Output:
26
+
27
+ # check for allowed origin to prevent dns rebinding attacks
28
+ origin_check = await self.check_allowed_origin(request)
29
+ if not origin_check["ok"]:
30
+ return {
31
+ "ok": False,
32
+ "error": f"Origin {self.get_origin_from_request(request)} not allowed when login is disabled. Set login and password or add your URL to ALLOWED_ORIGINS env variable. Currently allowed origins: {",".join(origin_check['allowed_origins'])}",
33
+ }
34
+
35
+ # generate a csrf token if it doesn't exist
36
  if "csrf_token" not in session:
37
  session["csrf_token"] = secrets.token_urlsafe(32)
38
+
39
+ # return the csrf token and runtime id
40
+ return {
41
+ "ok": True,
42
+ "token": session["csrf_token"],
43
+ "runtime_id": runtime.get_runtime_id(),
44
+ }
45
+
46
+ async def check_allowed_origin(self, request: Request):
47
+ # if login is required, this che
48
+ if login.is_login_required():
49
+ return {"ok": True, "origin": "", "allowed_origins": ""}
50
+ # otherwise, check if the origin is allowed
51
+ return await self.is_allowed_origin(request)
52
+
53
+ async def is_allowed_origin(self, request: Request):
54
+ # get the origin from the request
55
+ origin = self.get_origin_from_request(request)
56
+ if not origin:
57
+ return {"ok": False, "origin": "", "allowed_origins": ""}
58
+
59
+ # list of allowed origins
60
+ allowed_origins = await self.get_allowed_origins()
61
+
62
+ # check if the origin is allowed
63
+ match = any(
64
+ fnmatch.fnmatch(origin, allowed_origin)
65
+ for allowed_origin in allowed_origins
66
+ )
67
+ return {"ok": match, "origin": origin, "allowed_origins": allowed_origins}
68
+
69
+ def get_origin_from_request(self, request: Request):
70
+ # get from origin
71
+ r = request.headers.get("Origin") or request.environ.get("HTTP_ORIGIN")
72
+ if not r:
73
+ # try referer if origin not present
74
+ r = (
75
+ request.headers.get("Referer")
76
+ or request.referrer
77
+ or request.environ.get("HTTP_REFERER")
78
+ )
79
+ if not r:
80
+ return None
81
+ # parse and normalize
82
+ p = urlparse(r)
83
+ if not p.scheme or not p.hostname:
84
+ return None
85
+ return f"{p.scheme}://{p.hostname}" + (f":{p.port}" if p.port else "")
86
+
87
+ async def get_allowed_origins(self) -> list[str]:
88
+ # get the allowed origins from the environment
89
+ allowed_origins = [
90
+ origin.strip()
91
+ for origin in (dotenv.get_dotenv_value("ALLOWED_ORIGINS") or "").split(",")
92
+ if origin.strip()
93
+ ]
94
+
95
+ # if there are no allowed origins, allow default localhosts
96
+ if not allowed_origins:
97
+ allowed_origins = self.get_default_allowed_origins()
98
+
99
+ # always allow tunnel url if running
100
+ try:
101
+ from python.api.tunnel_proxy import process as tunnel_api_process
102
+
103
+ tunnel = await tunnel_api_process({"action": "get"})
104
+ if tunnel and isinstance(tunnel, dict) and tunnel["success"]:
105
+ allowed_origins.append(tunnel["tunnel_url"])
106
+ except Exception:
107
+ pass
108
+
109
+ return allowed_origins
110
+
111
+ def get_default_allowed_origins(self) -> list[str]:
112
+ return ["*://localhost:*", "*://127.0.0.1:*", "*://0.0.0.0:*"]
python/api/ctx_window_get.py CHANGED
@@ -6,7 +6,7 @@ from python.helpers import tokens
6
  class GetCtxWindow(ApiHandler):
7
  async def process(self, input: Input, request: Request) -> Output:
8
  ctxid = input.get("context", [])
9
- context = self.get_context(ctxid)
10
  agent = context.streaming_agent or context.agent0
11
  window = agent.get_data(agent.DATA_NAME_CTX_WINDOW)
12
  if not window or not isinstance(window, dict):
 
6
  class GetCtxWindow(ApiHandler):
7
  async def process(self, input: Input, request: Request) -> Output:
8
  ctxid = input.get("context", [])
9
+ context = self.use_context(ctxid)
10
  agent = context.streaming_agent or context.agent0
11
  window = agent.get_data(agent.DATA_NAME_CTX_WINDOW)
12
  if not window or not isinstance(window, dict):
python/api/get_work_dir_files.py CHANGED
@@ -1,6 +1,6 @@
1
  from python.helpers.api import ApiHandler, Request, Response
2
  from python.helpers.file_browser import FileBrowser
3
- from python.helpers import runtime
4
 
5
  class GetWorkDirFiles(ApiHandler):
6
 
@@ -15,7 +15,7 @@ class GetWorkDirFiles(ApiHandler):
15
  # current_path = "work_dir"
16
  # else:
17
  # current_path = "root"
18
- current_path = "root"
19
 
20
  # browser = FileBrowser()
21
  # result = browser.get_files(current_path)
 
1
  from python.helpers.api import ApiHandler, Request, Response
2
  from python.helpers.file_browser import FileBrowser
3
+ from python.helpers import runtime, files
4
 
5
  class GetWorkDirFiles(ApiHandler):
6
 
 
15
  # current_path = "work_dir"
16
  # else:
17
  # current_path = "root"
18
+ current_path = "/a0"
19
 
20
  # browser = FileBrowser()
21
  # result = browser.get_files(current_path)
python/api/history_get.py CHANGED
@@ -4,7 +4,7 @@ from python.helpers.api import ApiHandler, Request, Response
4
  class GetHistory(ApiHandler):
5
  async def process(self, input: dict, request: Request) -> dict | Response:
6
  ctxid = input.get("context", [])
7
- context = self.get_context(ctxid)
8
  agent = context.streaming_agent or context.agent0
9
  history = agent.history.output_text()
10
  size = agent.history.get_tokens()
 
4
  class GetHistory(ApiHandler):
5
  async def process(self, input: dict, request: Request) -> dict | Response:
6
  ctxid = input.get("context", [])
7
+ context = self.use_context(ctxid)
8
  agent = context.streaming_agent or context.agent0
9
  history = agent.history.output_text()
10
  size = agent.history.get_tokens()
python/api/import_knowledge.py CHANGED
@@ -13,7 +13,7 @@ class ImportKnowledge(ApiHandler):
13
  if not ctxid:
14
  raise Exception("No context id provided")
15
 
16
- context = self.get_context(ctxid)
17
 
18
  file_list = request.files.getlist("files[]")
19
  KNOWLEDGE_FOLDER = files.get_abs_path(memory.get_custom_knowledge_subdir_abs(context.agent0), "main")
 
13
  if not ctxid:
14
  raise Exception("No context id provided")
15
 
16
+ context = self.use_context(ctxid)
17
 
18
  file_list = request.files.getlist("files[]")
19
  KNOWLEDGE_FOLDER = files.get_abs_path(memory.get_custom_knowledge_subdir_abs(context.agent0), "main")
python/api/knowledge_path_get.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from python.helpers.api import ApiHandler, Request, Response
2
+ from python.helpers import files, memory, notification, projects, notification
3
+ import os
4
+ from werkzeug.utils import secure_filename
5
+
6
+
7
+ class GetKnowledgePath(ApiHandler):
8
+ async def process(self, input: dict, request: Request) -> dict | Response:
9
+ ctxid = input.get("ctxid", "")
10
+ if not ctxid:
11
+ raise Exception("No context id provided")
12
+ context = self.use_context(ctxid)
13
+
14
+ project_name = projects.get_context_project_name(context)
15
+ if project_name:
16
+ knowledge_folder = projects.get_project_meta_folder(project_name, "knowledge")
17
+ else:
18
+ knowledge_folder = memory.get_custom_knowledge_subdir_abs(context.agent0)
19
+
20
+ knowledge_folder = files.normalize_a0_path(knowledge_folder)
21
+
22
+ return {
23
+ "ok": True,
24
+ "path": knowledge_folder,
25
+ }
python/api/knowledge_reindex.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from python.helpers.api import ApiHandler, Request, Response
2
+ from python.helpers import files, memory, notification, projects, notification
3
+ import os
4
+ from werkzeug.utils import secure_filename
5
+
6
+
7
+ class ReindexKnowledge(ApiHandler):
8
+ async def process(self, input: dict, request: Request) -> dict | Response:
9
+ ctxid = input.get("ctxid", "")
10
+ if not ctxid:
11
+ raise Exception("No context id provided")
12
+ context = self.use_context(ctxid)
13
+
14
+ # reload memory to re-import knowledge
15
+ await memory.Memory.reload(context.agent0)
16
+ context.log.set_initial_progress()
17
+
18
+ return {
19
+ "ok": True,
20
+ "message": "Knowledge re-indexed",
21
+ }
python/api/memory_dashboard.py CHANGED
@@ -1,8 +1,9 @@
1
  from python.helpers.api import ApiHandler, Request, Response
2
- from python.helpers.memory import Memory
3
  from python.helpers import files
4
  from models import ModelConfig, ModelType
5
  from langchain_core.documents import Document
 
6
 
7
 
8
  class MemoryDashboard(ApiHandler):
@@ -113,21 +114,13 @@ class MemoryDashboard(ApiHandler):
113
  # Fallback to default if no context available
114
  return {"success": True, "memory_subdir": "default"}
115
 
116
- # Import AgentContext here to avoid circular imports
117
- from agent import AgentContext
118
-
119
- # Get the context and extract memory subdirectory
120
- context = AgentContext.get(context_id)
121
- if (
122
- context
123
- and hasattr(context, "config")
124
- and hasattr(context.config, "memory_subdir")
125
- ):
126
- memory_subdir = context.config.memory_subdir or "default"
127
- return {"success": True, "memory_subdir": memory_subdir}
128
- else:
129
  return {"success": True, "memory_subdir": "default"}
130
 
 
 
 
131
  except Exception:
132
  return {
133
  "success": True, # Still success, just fallback to default
@@ -138,12 +131,7 @@ class MemoryDashboard(ApiHandler):
138
  """Get available memory subdirectories."""
139
  try:
140
  # Get subdirectories from memory folder
141
- subdirs = files.get_subdirectories("memory", exclude="embeddings")
142
-
143
- # Ensure 'default' is always available
144
- if "default" not in subdirs:
145
- subdirs.insert(0, "default")
146
-
147
  return {"success": True, "subdirs": subdirs}
148
  except Exception as e:
149
  return {
 
1
  from python.helpers.api import ApiHandler, Request, Response
2
+ from python.helpers.memory import Memory, get_existing_memory_subdirs, get_context_memory_subdir
3
  from python.helpers import files
4
  from models import ModelConfig, ModelType
5
  from langchain_core.documents import Document
6
+ from agent import AgentContext
7
 
8
 
9
  class MemoryDashboard(ApiHandler):
 
114
  # Fallback to default if no context available
115
  return {"success": True, "memory_subdir": "default"}
116
 
117
+ context = AgentContext.use(context_id)
118
+ if not context:
 
 
 
 
 
 
 
 
 
 
 
119
  return {"success": True, "memory_subdir": "default"}
120
 
121
+ memory_subdir = get_context_memory_subdir(context)
122
+ return {"success": True, "memory_subdir": memory_subdir or "default"}
123
+
124
  except Exception:
125
  return {
126
  "success": True, # Still success, just fallback to default
 
131
  """Get available memory subdirectories."""
132
  try:
133
  # Get subdirectories from memory folder
134
+ subdirs = get_existing_memory_subdirs()
 
 
 
 
 
135
  return {"success": True, "subdirs": subdirs}
136
  except Exception as e:
137
  return {
python/api/message.py CHANGED
@@ -53,7 +53,7 @@ class Message(ApiHandler):
53
  message = text
54
 
55
  # Obtain agent context
56
- context = self.get_context(ctxid)
57
 
58
  # Store attachments in agent data
59
  # context.agent0.set_data("attachments", attachment_paths)
 
53
  message = text
54
 
55
  # Obtain agent context
56
+ context = self.use_context(ctxid)
57
 
58
  # Store attachments in agent data
59
  # context.agent0.set_data("attachments", attachment_paths)
python/api/nudge.py CHANGED
@@ -6,7 +6,7 @@ class Nudge(ApiHandler):
6
  if not ctxid:
7
  raise Exception("No context id provided")
8
 
9
- context = self.get_context(ctxid)
10
  context.nudge()
11
 
12
  msg = "Process reset, agent nudged."
 
6
  if not ctxid:
7
  raise Exception("No context id provided")
8
 
9
+ context = self.use_context(ctxid)
10
  context.nudge()
11
 
12
  msg = "Process reset, agent nudged."
python/api/pause.py CHANGED
@@ -8,7 +8,7 @@ class Pause(ApiHandler):
8
  ctxid = input.get("context", "")
9
 
10
  # context instance - get or create
11
- context = self.get_context(ctxid)
12
 
13
  context.paused = paused
14
 
 
8
  ctxid = input.get("context", "")
9
 
10
  # context instance - get or create
11
+ context = self.use_context(ctxid)
12
 
13
  context.paused = paused
14
 
python/api/poll.py CHANGED
@@ -18,10 +18,17 @@ class Poll(ApiHandler):
18
  timezone = input.get("timezone", get_dotenv_value("DEFAULT_USER_TIMEZONE", "UTC"))
19
  Localization.get().set_timezone(timezone)
20
 
21
- # context instance - get or create
22
- context = self.get_context(ctxid)
23
-
24
- logs = context.log.output(start=from_no)
 
 
 
 
 
 
 
25
 
26
  # Get notifications from global notification manager
27
  notification_manager = AgentContext.get_notification_manager()
@@ -54,7 +61,7 @@ class Poll(ApiHandler):
54
  continue
55
 
56
  # Create the base context data that will be returned
57
- context_data = ctx.serialize()
58
 
59
  context_task = scheduler.get_task_by_uuid(ctx.id)
60
  # Determine if this is a task-dedicated context by checking if a task with this UUID exists
@@ -102,15 +109,16 @@ class Poll(ApiHandler):
102
 
103
  # data from this server
104
  return {
105
- "context": context.id,
 
106
  "contexts": ctxs,
107
  "tasks": tasks,
108
  "logs": logs,
109
- "log_guid": context.log.guid,
110
- "log_version": len(context.log.updates),
111
- "log_progress": context.log.progress,
112
- "log_progress_active": context.log.progress_active,
113
- "paused": context.paused,
114
  "notifications": notifications,
115
  "notifications_guid": notification_manager.guid,
116
  "notifications_version": len(notification_manager.updates),
 
18
  timezone = input.get("timezone", get_dotenv_value("DEFAULT_USER_TIMEZONE", "UTC"))
19
  Localization.get().set_timezone(timezone)
20
 
21
+ # context instance - get or create only if ctxid is provided
22
+ if ctxid:
23
+ try:
24
+ context = self.use_context(ctxid, create_if_not_exists=False)
25
+ except Exception as e:
26
+ context = None
27
+ else:
28
+ context = None
29
+
30
+ # Get logs only if we have a context
31
+ logs = context.log.output(start=from_no) if context else []
32
 
33
  # Get notifications from global notification manager
34
  notification_manager = AgentContext.get_notification_manager()
 
61
  continue
62
 
63
  # Create the base context data that will be returned
64
+ context_data = ctx.output()
65
 
66
  context_task = scheduler.get_task_by_uuid(ctx.id)
67
  # Determine if this is a task-dedicated context by checking if a task with this UUID exists
 
109
 
110
  # data from this server
111
  return {
112
+ "deselect_chat": ctxid and not context,
113
+ "context": context.id if context else "",
114
  "contexts": ctxs,
115
  "tasks": tasks,
116
  "logs": logs,
117
+ "log_guid": context.log.guid if context else "",
118
+ "log_version": len(context.log.updates) if context else 0,
119
+ "log_progress": context.log.progress if context else 0,
120
+ "log_progress_active": context.log.progress_active if context else False,
121
+ "paused": context.paused if context else False,
122
  "notifications": notifications,
123
  "notifications_guid": notification_manager.guid,
124
  "notifications_version": len(notification_manager.updates),
python/api/projects.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from python.helpers.api import ApiHandler, Input, Output, Request, Response
2
+ from python.helpers import projects
3
+
4
+
5
+ class Projects(ApiHandler):
6
+ async def process(self, input: Input, request: Request) -> Output:
7
+ action = input.get("action", "")
8
+ ctxid = input.get("context_id", None)
9
+
10
+ if ctxid:
11
+ _context = self.use_context(ctxid)
12
+
13
+ try:
14
+ if action == "list":
15
+ data = self.get_active_projects_list()
16
+ elif action == "load":
17
+ data = self.load_project(input.get("name", None))
18
+ elif action == "create":
19
+ data = self.create_project(input.get("project", None))
20
+ elif action == "update":
21
+ data = self.update_project(input.get("project", None))
22
+ elif action == "delete":
23
+ data = self.delete_project(input.get("name", None))
24
+ elif action == "activate":
25
+ data = self.activate_project(ctxid, input.get("name", None))
26
+ elif action == "deactivate":
27
+ data = self.deactivate_project(ctxid)
28
+ elif action == "file_structure":
29
+ data = self.get_file_structure(input.get("name", None), input.get("settings"))
30
+ else:
31
+ raise Exception("Invalid action")
32
+
33
+ return {
34
+ "ok": True,
35
+ "data": data,
36
+ }
37
+ except Exception as e:
38
+ return {
39
+ "ok": False,
40
+ "error": str(e),
41
+ }
42
+
43
+ def get_active_projects_list(self):
44
+ return projects.get_active_projects_list()
45
+
46
+ def create_project(self, project: dict|None):
47
+ if project is None:
48
+ raise Exception("Project data is required")
49
+ data = projects.BasicProjectData(**project)
50
+ name = projects.create_project(project["name"], data)
51
+ return projects.load_edit_project_data(name)
52
+
53
+ def load_project(self, name: str|None):
54
+ if name is None:
55
+ raise Exception("Project name is required")
56
+ return projects.load_edit_project_data(name)
57
+
58
+ def update_project(self, project: dict|None):
59
+ if project is None:
60
+ raise Exception("Project data is required")
61
+ data = projects.EditProjectData(**project)
62
+ name = projects.update_project(project["name"], data)
63
+ return projects.load_edit_project_data(name)
64
+
65
+ def delete_project(self, name: str|None):
66
+ if name is None:
67
+ raise Exception("Project name is required")
68
+ return projects.delete_project(name)
69
+
70
+ def activate_project(self, context_id: str|None, name: str|None):
71
+ if not context_id:
72
+ raise Exception("Context ID is required")
73
+ if not name:
74
+ raise Exception("Project name is required")
75
+ return projects.activate_project(context_id, name)
76
+
77
+ def deactivate_project(self, context_id: str|None):
78
+ if not context_id:
79
+ raise Exception("Context ID is required")
80
+ return projects.deactivate_project(context_id)
81
+
82
+ def get_file_structure(self, name: str|None, settings: dict|None):
83
+ if not name:
84
+ raise Exception("Project name is required")
85
+ # project data
86
+ basic_data = projects.load_basic_project_data(name)
87
+ # override file structure settings
88
+ if settings:
89
+ basic_data["file_structure"] = settings # type: ignore
90
+ # get structure
91
+ return projects.get_file_structure(name, basic_data)
python/api/scheduler_task_create.py CHANGED
@@ -3,6 +3,7 @@ from python.helpers.task_scheduler import (
3
  TaskScheduler, ScheduledTask, AdHocTask, PlannedTask, TaskSchedule,
4
  serialize_task, parse_task_schedule, parse_task_plan, TaskType
5
  )
 
6
  from python.helpers.localization import Localization
7
  from python.helpers.print_style import PrintStyle
8
  import random
@@ -27,7 +28,26 @@ class SchedulerTaskCreate(ApiHandler):
27
  system_prompt = input.get("system_prompt", "")
28
  prompt = input.get("prompt")
29
  attachments = input.get("attachments", [])
30
- context_id = input.get("context_id", None)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
  # Check if schedule is provided (for ScheduledTask)
33
  schedule = input.get("schedule", {})
@@ -77,8 +97,10 @@ class SchedulerTaskCreate(ApiHandler):
77
  prompt=prompt,
78
  schedule=task_schedule,
79
  attachments=attachments,
80
- context_id=context_id,
81
- timezone=timezone
 
 
82
  )
83
  elif plan:
84
  # Create a planned task
@@ -94,7 +116,9 @@ class SchedulerTaskCreate(ApiHandler):
94
  prompt=prompt,
95
  plan=task_plan,
96
  attachments=attachments,
97
- context_id=context_id
 
 
98
  )
99
  else:
100
  # Create an ad-hoc task
@@ -105,7 +129,9 @@ class SchedulerTaskCreate(ApiHandler):
105
  prompt=prompt,
106
  token=token,
107
  attachments=attachments,
108
- context_id=context_id
 
 
109
  )
110
  # Verify token after creation
111
  if isinstance(task, AdHocTask):
@@ -132,5 +158,6 @@ class SchedulerTaskCreate(ApiHandler):
132
  printer.print(f"Serialized adhoc task, token in response: '{task_dict.get('token')}'")
133
 
134
  return {
 
135
  "task": task_dict
136
  }
 
3
  TaskScheduler, ScheduledTask, AdHocTask, PlannedTask, TaskSchedule,
4
  serialize_task, parse_task_schedule, parse_task_plan, TaskType
5
  )
6
+ from python.helpers.projects import load_basic_project_data
7
  from python.helpers.localization import Localization
8
  from python.helpers.print_style import PrintStyle
9
  import random
 
28
  system_prompt = input.get("system_prompt", "")
29
  prompt = input.get("prompt")
30
  attachments = input.get("attachments", [])
31
+
32
+ requested_project_slug = input.get("project_name")
33
+ if isinstance(requested_project_slug, str):
34
+ requested_project_slug = requested_project_slug.strip() or None
35
+ else:
36
+ requested_project_slug = None
37
+
38
+ project_slug = requested_project_slug
39
+ project_color = None
40
+
41
+ if project_slug:
42
+ try:
43
+ metadata = load_basic_project_data(requested_project_slug)
44
+ project_color = metadata.get("color") or None
45
+ except Exception as exc:
46
+ printer.error(f"SchedulerTaskCreate: failed to load project '{project_slug}': {exc}")
47
+ return {"error": f"Saving project failed: {project_slug}"}
48
+
49
+ # Always dedicated context for scheduler tasks created by ui
50
+ task_context_id = None
51
 
52
  # Check if schedule is provided (for ScheduledTask)
53
  schedule = input.get("schedule", {})
 
97
  prompt=prompt,
98
  schedule=task_schedule,
99
  attachments=attachments,
100
+ context_id=task_context_id,
101
+ timezone=timezone,
102
+ project_name=project_slug,
103
+ project_color=project_color,
104
  )
105
  elif plan:
106
  # Create a planned task
 
116
  prompt=prompt,
117
  plan=task_plan,
118
  attachments=attachments,
119
+ context_id=task_context_id,
120
+ project_name=project_slug,
121
+ project_color=project_color,
122
  )
123
  else:
124
  # Create an ad-hoc task
 
129
  prompt=prompt,
130
  token=token,
131
  attachments=attachments,
132
+ context_id=task_context_id,
133
+ project_name=project_slug,
134
+ project_color=project_color,
135
  )
136
  # Verify token after creation
137
  if isinstance(task, AdHocTask):
 
158
  printer.print(f"Serialized adhoc task, token in response: '{task_dict.get('token')}'")
159
 
160
  return {
161
+ "ok": True,
162
  "task": task_dict
163
  }
python/api/scheduler_task_delete.py CHANGED
@@ -30,7 +30,7 @@ class SchedulerTaskDelete(ApiHandler):
30
 
31
  context = None
32
  if task.context_id:
33
- context = self.get_context(task.context_id)
34
 
35
  # If the task is running, update its state to IDLE first
36
  if task.state == TaskState.RUNNING:
 
30
 
31
  context = None
32
  if task.context_id:
33
+ context = self.use_context(task.context_id)
34
 
35
  # If the task is running, update its state to IDLE first
36
  if task.state == TaskState.RUNNING:
python/api/scheduler_task_update.py CHANGED
@@ -48,6 +48,9 @@ class SchedulerTaskUpdate(ApiHandler):
48
  if "attachments" in input:
49
  update_params["attachments"] = input.get("attachments", [])
50
 
 
 
 
51
  # Update schedule if this is a scheduled task and schedule is provided
52
  if isinstance(task, ScheduledTask) and "schedule" in input:
53
  schedule_data = input.get("schedule", {})
@@ -85,5 +88,6 @@ class SchedulerTaskUpdate(ApiHandler):
85
  task_dict = serialize_task(updated_task)
86
 
87
  return {
 
88
  "task": task_dict
89
  }
 
48
  if "attachments" in input:
49
  update_params["attachments"] = input.get("attachments", [])
50
 
51
+ if "project_name" in input or "project_color" in input:
52
+ return {"error": "Project changes are not allowed"}
53
+
54
  # Update schedule if this is a scheduled task and schedule is provided
55
  if isinstance(task, ScheduledTask) and "schedule" in input:
56
  schedule_data = input.get("schedule", {})
 
88
  task_dict = serialize_task(updated_task)
89
 
90
  return {
91
+ "ok": True,
92
  "task": task_dict
93
  }
python/api/scheduler_tasks_list.py CHANGED
@@ -22,8 +22,8 @@ class SchedulerTasksList(ApiHandler):
22
  # Use the scheduler's convenience method for task serialization
23
  tasks_list = scheduler.serialize_all_tasks()
24
 
25
- return {"tasks": tasks_list}
26
 
27
  except Exception as e:
28
  PrintStyle.error(f"Failed to list tasks: {str(e)} {traceback.format_exc()}")
29
- return {"error": f"Failed to list tasks: {str(e)} {traceback.format_exc()}", "tasks": []}
 
22
  # Use the scheduler's convenience method for task serialization
23
  tasks_list = scheduler.serialize_all_tasks()
24
 
25
+ return {"ok": True, "tasks": tasks_list}
26
 
27
  except Exception as e:
28
  PrintStyle.error(f"Failed to list tasks: {str(e)} {traceback.format_exc()}")
29
+ return {"ok": False, "error": f"Failed to list tasks: {str(e)} {traceback.format_exc()}", "tasks": []}
python/api/synthesize.py CHANGED
@@ -7,9 +7,11 @@ from python.helpers import runtime, settings, kokoro_tts
7
  class Synthesize(ApiHandler):
8
  async def process(self, input: dict, request: Request) -> dict | Response:
9
  text = input.get("text", "")
10
- # ctxid = input.get("ctxid", "")
11
 
12
- # context = self.get_context(ctxid)
 
 
13
  # if not await kokoro_tts.is_downloaded():
14
  # context.log.log(type="info", content="Kokoro TTS model is currently being initialized, please wait...")
15
 
 
7
  class Synthesize(ApiHandler):
8
  async def process(self, input: dict, request: Request) -> dict | Response:
9
  text = input.get("text", "")
10
+ ctxid = input.get("ctxid", "")
11
 
12
+ if ctxid:
13
+ context = self.use_context(ctxid)
14
+
15
  # if not await kokoro_tts.is_downloaded():
16
  # context.log.log(type="info", content="Kokoro TTS model is currently being initialized, please wait...")
17
 
python/api/transcribe.py CHANGED
@@ -5,9 +5,11 @@ from python.helpers import runtime, settings, whisper
5
  class Transcribe(ApiHandler):
6
  async def process(self, input: dict, request: Request) -> dict | Response:
7
  audio = input.get("audio")
8
- # ctxid = input.get("ctxid", "")
 
 
 
9
 
10
- # context = self.get_context(ctxid)
11
  # if not await whisper.is_downloaded():
12
  # context.log.log(type="info", content="Whisper STT model is currently being initialized, please wait...")
13
 
 
5
  class Transcribe(ApiHandler):
6
  async def process(self, input: dict, request: Request) -> dict | Response:
7
  audio = input.get("audio")
8
+ ctxid = input.get("ctxid", "")
9
+
10
+ if ctxid:
11
+ context = self.use_context(ctxid)
12
 
 
13
  # if not await whisper.is_downloaded():
14
  # context.log.log(type="info", content="Whisper STT model is currently being initialized, please wait...")
15
 
python/api/tunnel.py CHANGED
@@ -4,48 +4,51 @@ from python.helpers.tunnel_manager import TunnelManager
4
 
5
  class Tunnel(ApiHandler):
6
  async def process(self, input: dict, request: Request) -> dict | Response:
7
- action = input.get("action", "get")
8
-
9
- tunnel_manager = TunnelManager.get_instance()
10
 
11
- if action == "health":
12
- return {"success": True}
13
-
14
- if action == "create":
15
- port = runtime.get_web_ui_port()
16
- provider = input.get("provider", "serveo") # Default to serveo
17
- tunnel_url = tunnel_manager.start_tunnel(port, provider)
18
- if tunnel_url is None:
19
- # Add a little delay and check again - tunnel might be starting
20
- import time
21
- time.sleep(2)
22
- tunnel_url = tunnel_manager.get_tunnel_url()
23
-
24
- return {
25
- "success": tunnel_url is not None,
26
- "tunnel_url": tunnel_url,
27
- "message": "Tunnel creation in progress" if tunnel_url is None else "Tunnel created successfully"
28
- }
29
-
30
- elif action == "stop":
31
- return self.stop()
32
-
33
- elif action == "get":
34
  tunnel_url = tunnel_manager.get_tunnel_url()
35
- return {
36
- "success": tunnel_url is not None,
37
- "tunnel_url": tunnel_url,
38
- "is_running": tunnel_manager.is_running
39
- }
40
 
41
  return {
42
- "success": False,
43
- "error": "Invalid action. Use 'create', 'stop', or 'get'."
44
- }
45
-
46
- def stop(self):
47
- tunnel_manager = TunnelManager.get_instance()
48
- tunnel_manager.stop_tunnel()
 
 
 
49
  return {
50
- "success": True
 
 
51
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
  class Tunnel(ApiHandler):
6
  async def process(self, input: dict, request: Request) -> dict | Response:
7
+ return await process(input)
 
 
8
 
9
+ async def process(input: dict) -> dict | Response:
10
+ action = input.get("action", "get")
11
+
12
+ tunnel_manager = TunnelManager.get_instance()
13
+
14
+ if action == "health":
15
+ return {"success": True}
16
+
17
+ if action == "create":
18
+ port = runtime.get_web_ui_port()
19
+ provider = input.get("provider", "serveo") # Default to serveo
20
+ tunnel_url = tunnel_manager.start_tunnel(port, provider)
21
+ if tunnel_url is None:
22
+ # Add a little delay and check again - tunnel might be starting
23
+ import time
24
+ time.sleep(2)
 
 
 
 
 
 
 
25
  tunnel_url = tunnel_manager.get_tunnel_url()
 
 
 
 
 
26
 
27
  return {
28
+ "success": tunnel_url is not None,
29
+ "tunnel_url": tunnel_url,
30
+ "message": "Tunnel creation in progress" if tunnel_url is None else "Tunnel created successfully"
31
+ }
32
+
33
+ elif action == "stop":
34
+ return stop()
35
+
36
+ elif action == "get":
37
+ tunnel_url = tunnel_manager.get_tunnel_url()
38
  return {
39
+ "success": tunnel_url is not None,
40
+ "tunnel_url": tunnel_url,
41
+ "is_running": tunnel_manager.is_running
42
  }
43
+
44
+ return {
45
+ "success": False,
46
+ "error": "Invalid action. Use 'create', 'stop', or 'get'."
47
+ }
48
+
49
+ def stop():
50
+ tunnel_manager = TunnelManager.get_instance()
51
+ tunnel_manager.stop_tunnel()
52
+ return {
53
+ "success": True
54
+ }
python/api/tunnel_proxy.py CHANGED
@@ -6,30 +6,33 @@ import requests
6
 
7
  class TunnelProxy(ApiHandler):
8
  async def process(self, input: dict, request: Request) -> dict | Response:
9
- # Get configuration from environment
10
- tunnel_api_port = (
11
- runtime.get_arg("tunnel_api_port")
12
- or int(dotenv.get_dotenv_value("TUNNEL_API_PORT", 0))
13
- or 55520
14
- )
15
 
16
- # first verify the service is running:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  service_ok = False
 
 
 
18
  try:
19
- response = requests.post(f"http://localhost:{tunnel_api_port}/", json={"action": "health"})
20
- if response.status_code == 200:
21
- service_ok = True
22
  except Exception as e:
23
- service_ok = False
24
-
25
- # forward this request to the tunnel service if OK
26
- if service_ok:
27
- try:
28
- response = requests.post(f"http://localhost:{tunnel_api_port}/", json=input)
29
- return response.json()
30
- except Exception as e:
31
- return {"error": str(e)}
32
- else:
33
- # forward to API handler directly
34
- from python.api.tunnel import Tunnel
35
- return await Tunnel(self.app, self.thread_lock).process(input, request)
 
6
 
7
  class TunnelProxy(ApiHandler):
8
  async def process(self, input: dict, request: Request) -> dict | Response:
9
+ return await process(input)
 
 
 
 
 
10
 
11
+ async def process(input: dict) -> dict | Response:
12
+ # Get configuration from environment
13
+ tunnel_api_port = (
14
+ runtime.get_arg("tunnel_api_port")
15
+ or int(dotenv.get_dotenv_value("TUNNEL_API_PORT", 0))
16
+ or 55520
17
+ )
18
+
19
+ # first verify the service is running:
20
+ service_ok = False
21
+ try:
22
+ response = requests.post(f"http://localhost:{tunnel_api_port}/", json={"action": "health"})
23
+ if response.status_code == 200:
24
+ service_ok = True
25
+ except Exception as e:
26
  service_ok = False
27
+
28
+ # forward this request to the tunnel service if OK
29
+ if service_ok:
30
  try:
31
+ response = requests.post(f"http://localhost:{tunnel_api_port}/", json=input)
32
+ return response.json()
 
33
  except Exception as e:
34
+ return {"error": str(e)}
35
+ else:
36
+ # forward to API handler directly
37
+ from python.api.tunnel import process as local_process
38
+ return await local_process(input)
 
 
 
 
 
 
 
 
python/extensions/error_format/_10_mask_errors.py CHANGED
@@ -1,5 +1,5 @@
1
  from python.helpers.extension import Extension
2
- from python.helpers.secrets import SecretsManager
3
 
4
 
5
  class MaskErrorSecrets(Extension):
@@ -10,7 +10,7 @@ class MaskErrorSecrets(Extension):
10
  if not msg:
11
  return
12
 
13
- secrets_mgr = SecretsManager.get_instance()
14
 
15
  # Mask the error message
16
  if "message" in msg:
 
1
  from python.helpers.extension import Extension
2
+ from python.helpers.secrets import get_secrets_manager
3
 
4
 
5
  class MaskErrorSecrets(Extension):
 
10
  if not msg:
11
  return
12
 
13
+ secrets_mgr = get_secrets_manager(self.agent.context)
14
 
15
  # Mask the error message
16
  if "message" in msg:
python/extensions/hist_add_before/_10_mask_content.py CHANGED
@@ -1,4 +1,5 @@
1
  from python.helpers.extension import Extension
 
2
 
3
 
4
  class MaskHistoryContent(Extension):
@@ -10,8 +11,7 @@ class MaskHistoryContent(Extension):
10
  return
11
 
12
  try:
13
- from python.helpers.secrets import SecretsManager
14
- secrets_mgr = SecretsManager.get_instance()
15
 
16
  # Mask the content before adding to history
17
  content_data["content"] = self._mask_content(content_data["content"], secrets_mgr)
 
1
  from python.helpers.extension import Extension
2
+ from python.helpers.secrets import get_secrets_manager
3
 
4
 
5
  class MaskHistoryContent(Extension):
 
11
  return
12
 
13
  try:
14
+ secrets_mgr = get_secrets_manager(self.agent.context)
 
15
 
16
  # Mask the content before adding to history
17
  content_data["content"] = self._mask_content(content_data["content"], secrets_mgr)
python/extensions/message_loop_prompts_after/_75_include_project_extras.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from python.helpers.extension import Extension
2
+ from agent import LoopData
3
+ from python.helpers import projects
4
+
5
+
6
+ class IncludeProjectExtras(Extension):
7
+ async def execute(self, loop_data: LoopData = LoopData(), **kwargs):
8
+
9
+ # active project
10
+ project_name = projects.get_context_project_name(self.agent.context)
11
+ if not project_name:
12
+ return
13
+
14
+ # project config
15
+ project = projects.load_basic_project_data(project_name)
16
+
17
+ # load file structure if enabled
18
+ if project["file_structure"]["enabled"]:
19
+ file_structure = projects.get_file_structure(project_name)
20
+ gitignore = cleanup_gitignore(project["file_structure"]["gitignore"])
21
+
22
+ # read prompt
23
+ file_structure_prompt = self.agent.read_prompt(
24
+ "agent.extras.project.file_structure.md",
25
+ max_depth=project["file_structure"]["max_depth"],
26
+ gitignore=gitignore,
27
+ project_name=project_name,
28
+ file_structure=file_structure,
29
+ )
30
+ # add file structure to the prompt
31
+ loop_data.extras_temporary["project_file_structure"] = file_structure_prompt
32
+
33
+
34
+ def cleanup_gitignore(gitignore_raw: str) -> str:
35
+ """Process gitignore: split lines, strip, remove comments, remove empty lines."""
36
+ gitignore_lines = []
37
+ for line in gitignore_raw.split('\n'):
38
+ # Strip whitespace
39
+ line = line.strip()
40
+ # Remove inline comments (everything after #)
41
+ if '#' in line:
42
+ line = line.split('#')[0].strip()
43
+ # Keep only non-empty lines
44
+ if line:
45
+ gitignore_lines.append(line)
46
+
47
+ return '\n'.join(gitignore_lines) if gitignore_lines else "nothing ignored"