frdel commited on
Commit
20bc5ae
Β·
1 Parent(s): fb60c87

Updates for v0.6

Browse files

Main pack of updates:
- web ui
- knowledge import
- FAISS DB
- prompt subfolders

This view is limited to 50 files because it contains too many changes. Β  See raw diff
Files changed (50) hide show
  1. .gitignore +11 -1
  2. .vscode/launch.json +10 -2
  3. README.md +9 -9
  4. agent.py +66 -18
  5. docs/win_installation_guide.txt +4 -4
  6. example.env +6 -9
  7. initialize.py +59 -0
  8. {memory β†’ knowledge}/.gitkeep +0 -0
  9. prompts/{agent.memory.md β†’ default/agent.memory.md} +0 -0
  10. prompts/{agent.system.md β†’ default/agent.system.md} +0 -0
  11. prompts/{agent.tools.md β†’ default/agent.tools.md} +0 -0
  12. prompts/{fw.code_no_output.md β†’ default/fw.code_no_output.md} +0 -0
  13. prompts/{fw.code_runtime_wrong.md β†’ default/fw.code_runtime_wrong.md} +0 -0
  14. prompts/{fw.error.md β†’ default/fw.error.md} +0 -0
  15. prompts/{fw.intervention.md β†’ default/fw.intervention.md} +0 -0
  16. prompts/{fw.memories_deleted.md β†’ default/fw.memories_deleted.md} +0 -0
  17. prompts/{fw.memories_not_found.md β†’ default/fw.memories_not_found.md} +0 -0
  18. prompts/{fw.memory_saved.md β†’ default/fw.memory_saved.md} +0 -0
  19. prompts/{fw.msg_cleanup.md β†’ default/fw.msg_cleanup.md} +0 -0
  20. prompts/{fw.msg_from_subordinate.md β†’ default/fw.msg_from_subordinate.md} +0 -0
  21. prompts/{fw.msg_misformat.md β†’ default/fw.msg_misformat.md} +0 -0
  22. prompts/{fw.msg_repeat.md β†’ default/fw.msg_repeat.md} +0 -0
  23. prompts/{fw.msg_timeout.md β†’ default/fw.msg_timeout.md} +0 -0
  24. prompts/{fw.msg_truncated.md β†’ default/fw.msg_truncated.md} +0 -0
  25. prompts/{fw.tool_not_found.md β†’ default/fw.tool_not_found.md} +0 -0
  26. prompts/{fw.tool_response.md β†’ default/fw.tool_response.md} +0 -0
  27. prompts/{fw.user_message.md β†’ default/fw.user_message.md} +0 -0
  28. prompts/{msg.memory_cleanup.md β†’ default/msg.memory_cleanup.md} +0 -0
  29. prompts/{tool.knowledge.response.md β†’ default/tool.knowledge.response.md} +0 -0
  30. python/helpers/docker.py +14 -3
  31. python/helpers/extract_tools.py +0 -1
  32. python/helpers/knowledge_import.py +87 -0
  33. python/helpers/log.py +58 -0
  34. python/helpers/messages.py +4 -4
  35. python/helpers/rate_limiter.py +2 -14
  36. python/helpers/shell_ssh.py +49 -17
  37. python/helpers/strings.py +125 -0
  38. python/helpers/tool.py +5 -2
  39. python/helpers/vdb.py +0 -69
  40. python/helpers/vector_db.py +82 -12
  41. python/tools/code_execution_tool.py +15 -3
  42. python/tools/knowledge_tool.py +3 -2
  43. python/tools/memory_tool.py +26 -16
  44. python/tools/online_knowledge_tool.py +0 -13
  45. python/tools/response.py +3 -1
  46. python/tools/task_done.py +2 -1
  47. python/tools/unknown.py +2 -2
  48. requirements.txt +5 -2
  49. main.py β†’ run_cli.py +17 -84
  50. run_ui.py +192 -0
.gitignore CHANGED
@@ -20,4 +20,14 @@ memory/*
20
  # Ignore all contents of the directory "logs"
21
  logs/*
22
  # But do not ignore the directory itself
23
- !logs/.gitkeep
 
 
 
 
 
 
 
 
 
 
 
20
  # Ignore all contents of the directory "logs"
21
  logs/*
22
  # But do not ignore the directory itself
23
+ !logs/.gitkeep
24
+
25
+ # Ignore all contents of the directory "tmp"
26
+ tmp/*
27
+ # But do not ignore the directory itself
28
+ !tmp/.gitkeep
29
+
30
+ # Ignore all contents of the directory "knowledge"
31
+ knowledge/*
32
+ # But do not ignore the directory itself
33
+ !knowledge/.gitkeep
.vscode/launch.json CHANGED
@@ -2,10 +2,18 @@
2
  "version": "0.2.0",
3
  "configurations": [
4
  {
5
- "name": "Debug main.py",
6
  "type": "debugpy",
7
  "request": "launch",
8
- "program": "./main.py",
 
 
 
 
 
 
 
 
9
  "console": "integratedTerminal",
10
  "args": ["-Xfrozen_modules=off"]
11
  },
 
2
  "version": "0.2.0",
3
  "configurations": [
4
  {
5
+ "name": "Debug run_ui.py",
6
  "type": "debugpy",
7
  "request": "launch",
8
+ "program": "./run_ui.py",
9
+ "console": "integratedTerminal",
10
+ "args": ["-Xfrozen_modules=off"]
11
+ },
12
+ {
13
+ "name": "Debug run_cli.py",
14
+ "type": "debugpy",
15
+ "request": "launch",
16
+ "program": "./run_cli.py",
17
  "console": "integratedTerminal",
18
  "args": ["-Xfrozen_modules=off"]
19
  },
README.md CHANGED
@@ -1,6 +1,6 @@
1
  # Agent Zero
2
 
3
- [![Join our Discord](https://img.shields.io/badge/Discord-Join%20our%20server-5865F2?style=for-the-badge&logo=discord&logoColor=white)](https://discord.gg/B8KZKNsPpj) [![Subscribe on YouTube](https://img.shields.io/badge/YouTube-Subscribe-red?style=for-the-badge&logo=youtube&logoColor=white)](https://www.youtube.com/@AgentZeroFW) [![Connect on LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue?style=for-the-badge&logo=linkedin&logoColor=white)](https://www.linkedin.com/in/jan-tomasek/) [![Follow on X.com](https://img.shields.io/badge/X.com-Follow-1DA1F2?style=for-the-badge&logo=x&logoColor=white)](https://x.com/JanTomasekDev)
4
 
5
 
6
 
@@ -23,12 +23,12 @@
23
 
24
  3. **Multi-agent cooperation**
25
  - Every agent has a superior agent giving it tasks and instructions. Every agent then reports back to its superior.
26
- - In the case of the first agent, the superior is the human user; the agent sees no difference.
27
  - Every agent can create its subordinate agent to help break down and solve subtasks. This helps all agents keep their context clean and focused.
28
 
29
  4. **Completely customizable and extensible**
30
  - Almost nothing in this framework is hard-coded. Nothing is hidden. Everything can be extended or changed by the user.
31
- - The whole behavior is defined by a system prompt in the **prompts/agent.system.md** file. Change this prompt and change the framework dramatically.
32
  - The framework does not guide or limit the agent in any way. There are no hard-coded rails that agents have to follow.
33
  - Every prompt, every small message template sent to the agent in its communication loop, can be found in the **prompts/** folder and changed.
34
  - Every default tool can be found in the **python/tools/** folder and changed or copied to create new predefined tools.
@@ -81,10 +81,10 @@ If you cannot provide all the necessary conditions or API keys, just change the
81
  Update: [Guide by CheezChat for Windows](./docs/win_installation_guide.txt)
82
 
83
  1. **Required API keys:**
84
- - At the moment, the only recommended API key is for https://www.perplexity.ai/ API. Perplexity is used as a convenient web search tool and has not yet been replaced by an open-source alternative. If you do not have an API key for Perplexity, leave it empty in the .env file and Perplexity will not be used.
85
- - Chat models and embedding models can be executed locally via Ollama and HuggingFace or via API as well.
86
 
87
- 2. **Enter your API keys:**
88
  - You can enter your API keys into the **.env** file, which you can copy from **example.env**
89
  - Or you can export your API keys in the terminal session:
90
  ~~~bash
@@ -97,9 +97,9 @@ export API_KEY_OPENAI="your-api-key-here"
97
  pip install -r requirements.txt
98
  ~~~
99
 
100
- 3. **Choose your chat, utility and embeddings model:**
101
- - In the **main.py** file, right at the start of the **chat()** function, you can see how the chat model and embedding model are set.
102
- - You can choose between online models (OpenAI, Anthropic, Groq) or offline (Ollama, HuggingFace) for both.
103
 
104
  4. **run Docker:**
105
  - Easiest way is to install Docker Desktop application and just run it. The rest will be handled by the framework itself.
 
1
  # Agent Zero
2
 
3
+ [![Join our Skool Community](https://img.shields.io/badge/Skool-Join%20our%20Community-4A90E2?style=for-the-badge&logo=skool&logoColor=white)](https://www.skool.com/agent-zero) [![Join our Discord](https://img.shields.io/badge/Discord-Join%20our%20server-5865F2?style=for-the-badge&logo=discord&logoColor=white)](https://discord.gg/B8KZKNsPpj) [![Subscribe on YouTube](https://img.shields.io/badge/YouTube-Subscribe-red?style=for-the-badge&logo=youtube&logoColor=white)](https://www.youtube.com/@AgentZeroFW) [![Connect on LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue?style=for-the-badge&logo=linkedin&logoColor=white)](https://www.linkedin.com/in/jan-tomasek/) [![Follow on X.com](https://img.shields.io/badge/X.com-Follow-1DA1F2?style=for-the-badge&logo=x&logoColor=white)](https://x.com/JanTomasekDev)
4
 
5
 
6
 
 
23
 
24
  3. **Multi-agent cooperation**
25
  - Every agent has a superior agent giving it tasks and instructions. Every agent then reports back to its superior.
26
+ - In the case of the first agent in the chain (Agent 0), the superior is the human user; the agent sees no difference.
27
  - Every agent can create its subordinate agent to help break down and solve subtasks. This helps all agents keep their context clean and focused.
28
 
29
  4. **Completely customizable and extensible**
30
  - Almost nothing in this framework is hard-coded. Nothing is hidden. Everything can be extended or changed by the user.
31
+ - The whole behavior is defined by a system prompt in the **prompts/default/agent.system.md** file. Change this prompt and change the framework dramatically.
32
  - The framework does not guide or limit the agent in any way. There are no hard-coded rails that agents have to follow.
33
  - Every prompt, every small message template sent to the agent in its communication loop, can be found in the **prompts/** folder and changed.
34
  - Every default tool can be found in the **python/tools/** folder and changed or copied to create new predefined tools.
 
81
  Update: [Guide by CheezChat for Windows](./docs/win_installation_guide.txt)
82
 
83
  1. **Required API keys:**
84
+ - No API keys are required. Models can run locally. The only recommended API key is for https://www.perplexity.ai/ API. Perplexity is used as a convenient web search tool and has not yet been replaced by an open-source alternative in Agent Zero. If you do not have an API key for Perplexity, leave it empty in the .env file and Perplexity will not be used.
85
+ - Chat models and embedding models can be executed locally via Ollama, LMStudio and HuggingFace or via API as well.
86
 
87
+ 2. **Enter your API keys if needed:**
88
  - You can enter your API keys into the **.env** file, which you can copy from **example.env**
89
  - Or you can export your API keys in the terminal session:
90
  ~~~bash
 
97
  pip install -r requirements.txt
98
  ~~~
99
 
100
+ 3. **Choose your chat, utility and embeddings model and check other configuration options:**
101
+ - In the **initialize.py** file, you can see how the chat model and embedding model are set.
102
+ - You can choose between online models (OpenAI, Anthropic, Groq) or offline (Ollama, LMStudio, HuggingFace) for both.
103
 
104
  4. **run Docker:**
105
  - Easiest way is to install Docker Desktop application and just run it. The rest will be handled by the framework itself.
agent.py CHANGED
@@ -9,18 +9,24 @@ from langchain_core.messages import HumanMessage, SystemMessage
9
  from langchain_core.language_models.chat_models import BaseChatModel
10
  from langchain_core.language_models.llms import BaseLLM
11
  from langchain_core.embeddings import Embeddings
 
 
 
 
12
 
13
  @dataclass
14
  class AgentConfig:
15
  chat_model: BaseChatModel | BaseLLM
16
  utility_model: BaseChatModel | BaseLLM
17
  embeddings_model:Embeddings
 
18
  memory_subdir: str = ""
 
19
  auto_memory_count: int = 3
20
  auto_memory_skip: int = 2
21
  rate_limit_seconds: int = 60
22
  rate_limit_requests: int = 15
23
- rate_limit_input_tokens: int = 1000000
24
  rate_limit_output_tokens: int = 0
25
  msgs_keep_max: int = 25
26
  msgs_keep_start: int = 5
@@ -54,23 +60,35 @@ class Agent:
54
  self.number = number
55
  self.agent_name = f"Agent {self.number}"
56
 
57
- self.system_prompt = files.read_file("./prompts/agent.system.md", agent_name=self.agent_name)
58
- self.tools_prompt = files.read_file("./prompts/agent.tools.md")
59
-
60
  self.history = []
61
  self.last_message = ""
62
  self.intervention_message = ""
63
  self.intervention_status = False
64
  self.rate_limiter = rate_limiter.RateLimiter(max_calls=self.config.rate_limit_requests,max_input_tokens=self.config.rate_limit_input_tokens,max_output_tokens=self.config.rate_limit_output_tokens,window_seconds=self.config.rate_limit_seconds)
65
  self.data = {} # free data object all the tools can use
 
 
66
 
67
  os.chdir(files.get_abs_path("./work_dir")) #change CWD to work_dir
 
 
 
68
 
 
 
 
 
 
 
 
 
 
69
 
70
  def message_loop(self, msg: str):
71
  try:
 
72
  printer = PrintStyle(italic=True, font_color="#b3ffd9", padding=False)
73
- user_message = files.read_file("./prompts/fw.user_message.md", message=msg)
74
  self.append_message(user_message, human=True) # Append the user's input to the history
75
  memories = self.fetch_memories(True)
76
 
@@ -81,7 +99,7 @@ class Agent:
81
 
82
  try:
83
 
84
- system = self.system_prompt + "\n\n" + self.tools_prompt
85
  memories = self.fetch_memories()
86
  if memories: system+= "\n\n"+memories
87
 
@@ -97,8 +115,9 @@ class Agent:
97
  self.rate_limiter.limit_call_and_input(tokens)
98
 
99
  # output that the agent is starting
100
- PrintStyle(bold=True, font_color="green", padding=True, background_color="white").print(f"{self.agent_name}: Starting a message:")
101
-
 
102
  for chunk in chain.stream(inputs):
103
  if self.handle_intervention(agent_response): break # wait for intervention and handle it, if paused
104
 
@@ -107,33 +126,50 @@ class Agent:
107
  else: content = str(chunk)
108
 
109
  if content:
110
- printer.stream(content) # output the agent response stream
111
  agent_response += content # concatenate stream into the response
 
112
 
113
  self.rate_limiter.set_output_tokens(int(len(agent_response)/4))
114
 
115
  if not self.handle_intervention(agent_response):
116
  if self.last_message == agent_response: #if assistant_response is the same as last message in history, let him know
117
  self.append_message(agent_response) # Append the assistant's response to the history
118
- warning_msg = files.read_file("./prompts/fw.msg_repeat.md")
119
  self.append_message(warning_msg, human=True) # Append warning message to the history
120
  PrintStyle(font_color="orange", padding=True).print(warning_msg)
 
121
 
122
  else: #otherwise proceed with tool
123
  self.append_message(agent_response) # Append the assistant's response to the history
124
  tools_result = self.process_tools(agent_response) # process tools requested in agent message
125
- if tools_result: return tools_result #break the execution if the task is done
 
 
126
 
127
- # Forward errors to the LLM, maybe he can fix them
128
  except Exception as e:
129
  error_message = errors.format_error(e)
130
- msg_response = files.read_file("./prompts/fw.error.md", error=error_message) # error message template
131
  self.append_message(msg_response, human=True)
132
  PrintStyle(font_color="red", padding=True).print(msg_response)
 
 
133
 
134
  finally:
135
  Agent.streaming_agent = None # unset current streamer
136
 
 
 
 
 
 
 
 
 
 
 
 
137
  def get_data(self, field:str):
138
  return self.data.get(field, None)
139
 
@@ -162,10 +198,12 @@ class Agent:
162
  chain = prompt | self.config.utility_model
163
  response = ""
164
  printer = None
 
165
 
166
  if output_label:
167
  PrintStyle(bold=True, font_color="orange", padding=True, background_color="white").print(f"{self.agent_name}: {output_label}:")
168
- printer = PrintStyle(italic=True, font_color="orange", padding=False)
 
169
 
170
  formatted_inputs = prompt.format()
171
  tokens = int(len(formatted_inputs)/4)
@@ -180,6 +218,7 @@ class Agent:
180
 
181
  if printer: printer.stream(content)
182
  response+=content
 
183
 
184
  self.rate_limiter.set_output_tokens(int(len(response)/4))
185
 
@@ -190,7 +229,7 @@ class Agent:
190
  return self.history[-1]
191
 
192
  def replace_middle_messages(self,middle_messages):
193
- cleanup_prompt = files.read_file("./prompts/fw.msg_cleanup.md")
194
  summary = self.send_adhoc_message(system=cleanup_prompt,msg=self.concat_messages(middle_messages), output_label="Mid messages cleanup summary")
195
  new_human_message = HumanMessage(content=summary)
196
  return [new_human_message]
@@ -225,7 +264,7 @@ class Agent:
225
  while self.paused: time.sleep(0.1) # wait if paused
226
  if self.intervention_message and not self.intervention_status: # if there is an intervention message, but not yet processed
227
  if progress.strip(): self.append_message(progress) # append the response generated so far
228
- user_msg = files.read_file("./prompts/fw.intervention.md", user_message=self.intervention_message) # format the user intervention template
229
  self.append_message(user_msg,human=True) # append the intervention message
230
  self.intervention_message = "" # reset the intervention message
231
  self.intervention_status = True
@@ -253,9 +292,10 @@ class Agent:
253
  if self.handle_intervention(): return # wait if paused and handle intervention message if needed
254
  if response.break_loop: return response.message
255
  else:
256
- msg = files.read_file("prompts/fw.msg_misformat.md")
257
  self.append_message(msg, human=True)
258
  PrintStyle(font_color="red", padding=True).print(msg)
 
259
 
260
 
261
  def get_tool(self, name: str, args: dict, message: str, **kwargs):
@@ -290,9 +330,17 @@ class Agent:
290
  "conversation_history" : messages,
291
  "raw_memories": memories
292
  }
293
- cleanup_prompt = files.read_file("./prompts/msg.memory_cleanup.md").replace("{", "{{")
294
  clean_memories = self.send_adhoc_message(cleanup_prompt,json.dumps(input), output_label="Memory injection")
295
  return clean_memories
296
 
 
 
 
 
 
 
 
 
297
  def call_extension(self, name: str, **kwargs) -> Any:
298
  pass
 
9
  from langchain_core.language_models.chat_models import BaseChatModel
10
  from langchain_core.language_models.llms import BaseLLM
11
  from langchain_core.embeddings import Embeddings
12
+ from concurrent.futures import Future
13
+ from python.helpers.log import Log
14
+ from python.helpers.dirty_json import DirtyJson
15
+
16
 
17
  @dataclass
18
  class AgentConfig:
19
  chat_model: BaseChatModel | BaseLLM
20
  utility_model: BaseChatModel | BaseLLM
21
  embeddings_model:Embeddings
22
+ prompts_subdir: str = ""
23
  memory_subdir: str = ""
24
+ knowledge_subdir: str = ""
25
  auto_memory_count: int = 3
26
  auto_memory_skip: int = 2
27
  rate_limit_seconds: int = 60
28
  rate_limit_requests: int = 15
29
+ rate_limit_input_tokens: int = 0
30
  rate_limit_output_tokens: int = 0
31
  msgs_keep_max: int = 25
32
  msgs_keep_start: int = 5
 
60
  self.number = number
61
  self.agent_name = f"Agent {self.number}"
62
 
 
 
 
63
  self.history = []
64
  self.last_message = ""
65
  self.intervention_message = ""
66
  self.intervention_status = False
67
  self.rate_limiter = rate_limiter.RateLimiter(max_calls=self.config.rate_limit_requests,max_input_tokens=self.config.rate_limit_input_tokens,max_output_tokens=self.config.rate_limit_output_tokens,window_seconds=self.config.rate_limit_seconds)
68
  self.data = {} # free data object all the tools can use
69
+ self.future: Future|None = None
70
+
71
 
72
  os.chdir(files.get_abs_path("./work_dir")) #change CWD to work_dir
73
+
74
+ def communicate(self, msg: str):
75
+ Agent.paused=False #unpause if paused
76
 
77
+ if not self.future or self.future.done():
78
+ return self.message_loop(msg)
79
+ else:
80
+ if Agent.streaming_agent: current_agent = Agent.streaming_agent
81
+ else: current_agent = self
82
+
83
+ current_agent.intervention_message = msg #intervene current agent
84
+ if self.future: return self.future.result() #wait for original agent
85
+ else: return ""
86
 
87
  def message_loop(self, msg: str):
88
  try:
89
+ self.future = Future()
90
  printer = PrintStyle(italic=True, font_color="#b3ffd9", padding=False)
91
+ user_message = self.read_prompt("fw.user_message.md", message=msg)
92
  self.append_message(user_message, human=True) # Append the user's input to the history
93
  memories = self.fetch_memories(True)
94
 
 
99
 
100
  try:
101
 
102
+ system = self.read_prompt("agent.system.md", agent_name=self.agent_name) + "\n\n" + self.read_prompt("agent.tools.md")
103
  memories = self.fetch_memories()
104
  if memories: system+= "\n\n"+memories
105
 
 
115
  self.rate_limiter.limit_call_and_input(tokens)
116
 
117
  # output that the agent is starting
118
+ PrintStyle(bold=True, font_color="green", padding=True, background_color="white").print(f"{self.agent_name}: Generating:")
119
+ log = Log(type="agent", heading=f"{self.agent_name}: Generating:")
120
+
121
  for chunk in chain.stream(inputs):
122
  if self.handle_intervention(agent_response): break # wait for intervention and handle it, if paused
123
 
 
126
  else: content = str(chunk)
127
 
128
  if content:
129
+ printer.stream(content) # output the agent response stream
130
  agent_response += content # concatenate stream into the response
131
+ self.log_from_stream(agent_response, log)
132
 
133
  self.rate_limiter.set_output_tokens(int(len(agent_response)/4))
134
 
135
  if not self.handle_intervention(agent_response):
136
  if self.last_message == agent_response: #if assistant_response is the same as last message in history, let him know
137
  self.append_message(agent_response) # Append the assistant's response to the history
138
+ warning_msg = self.read_prompt("fw.msg_repeat.md")
139
  self.append_message(warning_msg, human=True) # Append warning message to the history
140
  PrintStyle(font_color="orange", padding=True).print(warning_msg)
141
+ Log.log(type="warning", content=warning_msg)
142
 
143
  else: #otherwise proceed with tool
144
  self.append_message(agent_response) # Append the assistant's response to the history
145
  tools_result = self.process_tools(agent_response) # process tools requested in agent message
146
+ if tools_result: #final response of message loop available
147
+ self.future.set_result(tools_result) #set result to future
148
+ return tools_result #break the execution if the task is done
149
 
150
+ # Forward errors to the LLM, maybe it can fix them
151
  except Exception as e:
152
  error_message = errors.format_error(e)
153
+ msg_response = self.read_prompt("fw.error.md", error=error_message) # error message template
154
  self.append_message(msg_response, human=True)
155
  PrintStyle(font_color="red", padding=True).print(msg_response)
156
+ Log.log(type="error", content=msg_response)
157
+ self.future.set_exception(e) #set result to future
158
 
159
  finally:
160
  Agent.streaming_agent = None # unset current streamer
161
 
162
+ def read_prompt(self, file:str, **kwargs):
163
+ content = ""
164
+ if self.config.prompts_subdir:
165
+ try:
166
+ content = files.read_file(files.get_abs_path(f"./prompts/{self.config.prompts_subdir}/{file}"), **kwargs)
167
+ except Exception as e:
168
+ pass
169
+ if not content:
170
+ content = files.read_file(files.get_abs_path(f"./prompts/default/{file}"), **kwargs)
171
+ return content
172
+
173
  def get_data(self, field:str):
174
  return self.data.get(field, None)
175
 
 
198
  chain = prompt | self.config.utility_model
199
  response = ""
200
  printer = None
201
+ logger = None
202
 
203
  if output_label:
204
  PrintStyle(bold=True, font_color="orange", padding=True, background_color="white").print(f"{self.agent_name}: {output_label}:")
205
+ printer = PrintStyle(italic=True, font_color="orange", padding=False)
206
+ logger = Log(type="adhoc", heading=f"{self.agent_name}: {output_label}:")
207
 
208
  formatted_inputs = prompt.format()
209
  tokens = int(len(formatted_inputs)/4)
 
218
 
219
  if printer: printer.stream(content)
220
  response+=content
221
+ if logger: logger.update(content=response)
222
 
223
  self.rate_limiter.set_output_tokens(int(len(response)/4))
224
 
 
229
  return self.history[-1]
230
 
231
  def replace_middle_messages(self,middle_messages):
232
+ cleanup_prompt = self.read_prompt("fw.msg_cleanup.md")
233
  summary = self.send_adhoc_message(system=cleanup_prompt,msg=self.concat_messages(middle_messages), output_label="Mid messages cleanup summary")
234
  new_human_message = HumanMessage(content=summary)
235
  return [new_human_message]
 
264
  while self.paused: time.sleep(0.1) # wait if paused
265
  if self.intervention_message and not self.intervention_status: # if there is an intervention message, but not yet processed
266
  if progress.strip(): self.append_message(progress) # append the response generated so far
267
+ user_msg = self.read_prompt("fw.intervention.md", user_message=self.intervention_message) # format the user intervention template
268
  self.append_message(user_msg,human=True) # append the intervention message
269
  self.intervention_message = "" # reset the intervention message
270
  self.intervention_status = True
 
292
  if self.handle_intervention(): return # wait if paused and handle intervention message if needed
293
  if response.break_loop: return response.message
294
  else:
295
+ msg = self.read_prompt("fw.msg_misformat.md")
296
  self.append_message(msg, human=True)
297
  PrintStyle(font_color="red", padding=True).print(msg)
298
+ Log.log(type="error", content=f"{self.agent_name}: Message misformat:")
299
 
300
 
301
  def get_tool(self, name: str, args: dict, message: str, **kwargs):
 
330
  "conversation_history" : messages,
331
  "raw_memories": memories
332
  }
333
+ cleanup_prompt = self.read_prompt("msg.memory_cleanup.md").replace("{", "{{")
334
  clean_memories = self.send_adhoc_message(cleanup_prompt,json.dumps(input), output_label="Memory injection")
335
  return clean_memories
336
 
337
+ def log_from_stream(self, stream: str, log: Log):
338
+ try:
339
+ if len(stream) < 25: return # no reason to try
340
+ response = DirtyJson.parse_string(stream)
341
+ if isinstance(response, dict): log.update(content=stream, kvps=response) #log if result is a dictionary already
342
+ except Exception as e:
343
+ pass
344
+
345
  def call_extension(self, name: str, **kwargs) -> Any:
346
  pass
docs/win_installation_guide.txt CHANGED
@@ -49,7 +49,7 @@ pip install -r requirements.txt
49
  ## Run the program
50
  - Just run the **main.py** file in Python:
51
  ~~~bash
52
- python main.py
53
  ~~~
54
  - Or run it in debug mode in VS Code using the **debug** button in the top right corner of the editor. I have provided config files for VS Code for this purpose.
55
 
@@ -140,7 +140,7 @@ PYDEVD_DISABLE_FILE_VALIDATION=1
140
  6. Configure API Preferences
141
 
142
  If you entered an openAI API key earlier, you may skip this step. If you entered an alternative key,
143
- * Right Click on the file **main.py** in the Agent-Zero root folder
144
  * Select **Open With**
145
  * Select **Notepad** or your preferred text editor
146
  * Scroll about 20 lines down from the top until you see lines that look like this: *chat_llm = models.get_*
@@ -182,11 +182,11 @@ If all of the requirements installed succesfully, you can proceed to run the pro
182
  * Activate the Agent-Zero environment by double clicking on its name
183
  * click Open Terminal
184
  * Navigate to the agent-zero folder
185
- * Type **python main.py** and press enter
186
 
187
  ```
188
  (Agent-Zero) C:\Users\yourUserName>cd c:\projects\agent-zero
189
- (Agent-Zero) C:\projects\agent-zero\python main.py
190
  Initializing framework...
191
 
192
  User message ('e' to leave):
 
49
  ## Run the program
50
  - Just run the **main.py** file in Python:
51
  ~~~bash
52
+ python run_ui.py
53
  ~~~
54
  - Or run it in debug mode in VS Code using the **debug** button in the top right corner of the editor. I have provided config files for VS Code for this purpose.
55
 
 
140
  6. Configure API Preferences
141
 
142
  If you entered an openAI API key earlier, you may skip this step. If you entered an alternative key,
143
+ * Right Click on the file **initialize.py** in the Agent-Zero root folder
144
  * Select **Open With**
145
  * Select **Notepad** or your preferred text editor
146
  * Scroll about 20 lines down from the top until you see lines that look like this: *chat_llm = models.get_*
 
182
  * Activate the Agent-Zero environment by double clicking on its name
183
  * click Open Terminal
184
  * Navigate to the agent-zero folder
185
+ * Type **python run_ui.py** and press enter
186
 
187
  ```
188
  (Agent-Zero) C:\Users\yourUserName>cd c:\projects\agent-zero
189
+ (Agent-Zero) C:\projects\agent-zero\python run_ui.py
190
  Initializing framework...
191
 
192
  User message ('e' to leave):
example.env CHANGED
@@ -9,14 +9,11 @@ API_KEY_OPENAI_AZURE=
9
  OPENAI_AZURE_ENDPOINT=
10
  OPENAI_API_VERSION=
11
 
12
- TOKENIZERS_PARALLELISM=true
13
- PYDEVD_DISABLE_FILE_VALIDATION=1
14
-
15
  HF_TOKEN=
16
 
17
- EDGE_TTS_MODEL="fr-FR-RemyMultilingualNeural"
18
- # en-US-AndrewMultilingualNeural
19
- # de-DE-FlorianMultilingualNeural
20
- # es-ES-AlvaroNeural
21
- # zh-CN-shaanxi-XiaoniNeural
22
- # more check https://huggingface.co/spaces/innoai/Edge-TTS-Text-to-Speech and at bottom use via api
 
9
  OPENAI_AZURE_ENDPOINT=
10
  OPENAI_API_VERSION=
11
 
 
 
 
12
  HF_TOKEN=
13
 
14
+
15
+ WEB_UI_PORT=50001
16
+
17
+
18
+ TOKENIZERS_PARALLELISM=true
19
+ PYDEVD_DISABLE_FILE_VALIDATION=1
initialize.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import models
2
+ from agent import Agent, AgentConfig
3
+
4
+ def initialize():
5
+
6
+ # main chat model used by agents (smarter, more accurate)
7
+ chat_llm = models.get_openai_chat(model_name="gpt-4o-mini", temperature=0)
8
+ # chat_llm = models.get_ollama_chat(model_name="gemma2:latest", temperature=0)
9
+ chat_llm = models.get_lmstudio_chat(model_name="TheBloke/Mistral-7B-Instruct-v0.2-GGUF", temperature=0)
10
+ # chat_llm = models.get_openrouter(model_name="meta-llama/llama-3-8b-instruct:free")
11
+ # chat_llm = models.get_azure_openai_chat(deployment_name="gpt-4o-mini", temperature=0)
12
+ # chat_llm = models.get_anthropic_chat(model_name="claude-3-5-sonnet-20240620", temperature=0)
13
+ # chat_llm = models.get_google_chat(model_name="gemini-1.5-flash", temperature=0)
14
+ # chat_llm = models.get_groq_chat(model_name="llama-3.1-70b-versatile", temperature=0)
15
+
16
+ # utility model used for helper functions (cheaper, faster)
17
+ utility_llm = chat_llm # change if you want to use a different utility model
18
+
19
+ # embedding model used for memory
20
+ embedding_llm = models.get_openai_embedding(model_name="text-embedding-3-small")
21
+ # embedding_llm = models.get_ollama_embedding(model_name="nomic-embed-text")
22
+ # embedding_llm = models.get_huggingface_embedding(model_name="sentence-transformers/all-MiniLM-L6-v2")
23
+
24
+ # agent configuration
25
+ config = AgentConfig(
26
+ chat_model = chat_llm,
27
+ utility_model = utility_llm,
28
+ embeddings_model = embedding_llm,
29
+ prompts_subdir = "custom",
30
+ # memory_subdir = "",
31
+ auto_memory_count = 0,
32
+ # auto_memory_skip = 2,
33
+ # rate_limit_seconds = 60,
34
+ rate_limit_requests = 15,
35
+ # rate_limit_input_tokens = 0,
36
+ # rate_limit_output_tokens = 0,
37
+ # msgs_keep_max = 25,
38
+ # msgs_keep_start = 5,
39
+ # msgs_keep_end = 10,
40
+ max_tool_response_length = 3000,
41
+ # response_timeout_seconds = 60,
42
+ code_exec_docker_enabled = True,
43
+ # code_exec_docker_name = "agent-zero-exe",
44
+ # code_exec_docker_image = "frdel/agent-zero-exe:latest",
45
+ # code_exec_docker_ports = { "22/tcp": 50022 }
46
+ # code_exec_docker_volumes = { files.get_abs_path("work_dir"): {"bind": "/root", "mode": "rw"} }
47
+ code_exec_ssh_enabled = True,
48
+ # code_exec_ssh_addr = "localhost",
49
+ # code_exec_ssh_port = 50022,
50
+ # code_exec_ssh_user = "root",
51
+ # code_exec_ssh_pass = "toor",
52
+ # additional = {},
53
+ )
54
+
55
+ # create the first agent
56
+ agent0 = Agent( number = 0, config = config )
57
+
58
+ # return initialized agent
59
+ return agent0
{memory β†’ knowledge}/.gitkeep RENAMED
File without changes
prompts/{agent.memory.md β†’ default/agent.memory.md} RENAMED
File without changes
prompts/{agent.system.md β†’ default/agent.system.md} RENAMED
File without changes
prompts/{agent.tools.md β†’ default/agent.tools.md} RENAMED
File without changes
prompts/{fw.code_no_output.md β†’ default/fw.code_no_output.md} RENAMED
File without changes
prompts/{fw.code_runtime_wrong.md β†’ default/fw.code_runtime_wrong.md} RENAMED
File without changes
prompts/{fw.error.md β†’ default/fw.error.md} RENAMED
File without changes
prompts/{fw.intervention.md β†’ default/fw.intervention.md} RENAMED
File without changes
prompts/{fw.memories_deleted.md β†’ default/fw.memories_deleted.md} RENAMED
File without changes
prompts/{fw.memories_not_found.md β†’ default/fw.memories_not_found.md} RENAMED
File without changes
prompts/{fw.memory_saved.md β†’ default/fw.memory_saved.md} RENAMED
File without changes
prompts/{fw.msg_cleanup.md β†’ default/fw.msg_cleanup.md} RENAMED
File without changes
prompts/{fw.msg_from_subordinate.md β†’ default/fw.msg_from_subordinate.md} RENAMED
File without changes
prompts/{fw.msg_misformat.md β†’ default/fw.msg_misformat.md} RENAMED
File without changes
prompts/{fw.msg_repeat.md β†’ default/fw.msg_repeat.md} RENAMED
File without changes
prompts/{fw.msg_timeout.md β†’ default/fw.msg_timeout.md} RENAMED
File without changes
prompts/{fw.msg_truncated.md β†’ default/fw.msg_truncated.md} RENAMED
File without changes
prompts/{fw.tool_not_found.md β†’ default/fw.tool_not_found.md} RENAMED
File without changes
prompts/{fw.tool_response.md β†’ default/fw.tool_response.md} RENAMED
File without changes
prompts/{fw.user_message.md β†’ default/fw.user_message.md} RENAMED
File without changes
prompts/{msg.memory_cleanup.md β†’ default/msg.memory_cleanup.md} RENAMED
File without changes
prompts/{tool.knowledge.response.md β†’ default/tool.knowledge.response.md} RENAMED
File without changes
python/helpers/docker.py CHANGED
@@ -5,6 +5,7 @@ from typing import Optional
5
  from python.helpers.files import get_abs_path
6
  from python.helpers.errors import format_error
7
  from python.helpers.print_style import PrintStyle
 
8
 
9
  class DockerContainerManager:
10
  def __init__(self, image: str, name: str, ports: Optional[dict[str, int]] = None, volumes: Optional[dict[str, dict[str, str]]] = None):
@@ -24,7 +25,9 @@ class DockerContainerManager:
24
  err = format_error(e)
25
  if ("ConnectionRefusedError(61," in err or "Error while fetching server API version" in err):
26
  PrintStyle.hint("Connection to Docker failed. Is docker or Docker Desktop running?") # hint for user
 
27
  PrintStyle.error(err)
 
28
  time.sleep(5) # try again in 5 seconds
29
  else: raise
30
  return self.client
@@ -35,8 +38,11 @@ class DockerContainerManager:
35
  self.container.stop()
36
  self.container.remove()
37
  print(f"Stopped and removed the container: {self.container.id}")
 
38
  except Exception as e:
39
  print(f"Failed to stop and remove the container: {e}")
 
 
40
 
41
  def start_container(self) -> None:
42
  if not self.client: self.client = self.init_docker()
@@ -49,6 +55,8 @@ class DockerContainerManager:
49
  if existing_container:
50
  if existing_container.status != 'running':
51
  print(f"Starting existing container: {self.name} for safe code execution...")
 
 
52
  existing_container.start()
53
  self.container = existing_container
54
  time.sleep(2) # this helps to get SSH ready
@@ -58,13 +66,16 @@ class DockerContainerManager:
58
  # print(f"Container with name '{self.name}' is already running with ID: {existing_container.id}")
59
  else:
60
  print(f"Initializing docker container {self.name} for safe code execution...")
 
 
61
  self.container = self.client.containers.run(
62
  self.image,
63
  detach=True,
64
- ports=self.ports,
65
  name=self.name,
66
- volumes=self.volumes,
67
- )
68
  atexit.register(self.cleanup_container)
69
  print(f"Started container with ID: {self.container.id}")
 
70
  time.sleep(5) # this helps to get SSH ready
 
5
  from python.helpers.files import get_abs_path
6
  from python.helpers.errors import format_error
7
  from python.helpers.print_style import PrintStyle
8
+ from python.helpers.log import Log
9
 
10
  class DockerContainerManager:
11
  def __init__(self, image: str, name: str, ports: Optional[dict[str, int]] = None, volumes: Optional[dict[str, dict[str, str]]] = None):
 
25
  err = format_error(e)
26
  if ("ConnectionRefusedError(61," in err or "Error while fetching server API version" in err):
27
  PrintStyle.hint("Connection to Docker failed. Is docker or Docker Desktop running?") # hint for user
28
+ Log.log(type="hint", content="Connection to Docker failed. Is docker or Docker Desktop running?")
29
  PrintStyle.error(err)
30
+ Log.log(type="error", content=err)
31
  time.sleep(5) # try again in 5 seconds
32
  else: raise
33
  return self.client
 
38
  self.container.stop()
39
  self.container.remove()
40
  print(f"Stopped and removed the container: {self.container.id}")
41
+ Log.log(type="info", content=f"Stopped and removed the container: {self.container.id}")
42
  except Exception as e:
43
  print(f"Failed to stop and remove the container: {e}")
44
+ Log.log(type="error", content=f"Failed to stop and remove the container: {e}")
45
+
46
 
47
  def start_container(self) -> None:
48
  if not self.client: self.client = self.init_docker()
 
55
  if existing_container:
56
  if existing_container.status != 'running':
57
  print(f"Starting existing container: {self.name} for safe code execution...")
58
+ Log.log(type="info", content=f"Starting existing container: {self.name} for safe code execution...")
59
+
60
  existing_container.start()
61
  self.container = existing_container
62
  time.sleep(2) # this helps to get SSH ready
 
66
  # print(f"Container with name '{self.name}' is already running with ID: {existing_container.id}")
67
  else:
68
  print(f"Initializing docker container {self.name} for safe code execution...")
69
+ Log.log(type="info", content=f"Initializing docker container {self.name} for safe code execution...")
70
+
71
  self.container = self.client.containers.run(
72
  self.image,
73
  detach=True,
74
+ ports=self.ports, # type: ignore
75
  name=self.name,
76
+ volumes=self.volumes, # type: ignore
77
+ )
78
  atexit.register(self.cleanup_container)
79
  print(f"Started container with ID: {self.container.id}")
80
+ Log.log(type="info", content=f"Started container with ID: {self.container.id}")
81
  time.sleep(5) # this helps to get SSH ready
python/helpers/extract_tools.py CHANGED
@@ -39,7 +39,6 @@ def extract_json_string(content):
39
  # Return the matched JSON string
40
  return match.group(0)
41
  else:
42
- print("No JSON content found.")
43
  return ""
44
 
45
  def fix_json_string(json_string):
 
39
  # Return the matched JSON string
40
  return match.group(0)
41
  else:
 
42
  return ""
43
 
44
  def fix_json_string(json_string):
python/helpers/knowledge_import.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import os
3
+ import hashlib
4
+ import json
5
+ from typing import Any, Dict, Literal, TypedDict
6
+ from langchain_community.document_loaders import (
7
+ CSVLoader, JSONLoader, PyPDFLoader, TextLoader, UnstructuredHTMLLoader,
8
+ UnstructuredMarkdownLoader
9
+ )
10
+ from python.helpers import files
11
+ from python.helpers.log import Log
12
+
13
+ text_loader_kwargs = {'autodetect_encoding': True}
14
+
15
+
16
+ class KnowledgeImport(TypedDict):
17
+ file: str
18
+ checksum: str
19
+ ids: list[str]
20
+ state: Literal["changed", "original", "removed"]
21
+ documents: list[Any]
22
+
23
+
24
+ def calculate_checksum(file_path: str) -> str:
25
+ hasher = hashlib.md5()
26
+ with open(file_path, 'rb') as f:
27
+ buf = f.read()
28
+ hasher.update(buf)
29
+ return hasher.hexdigest()
30
+
31
+ def load_knowledge(knowledge_dir: str, index: Dict[str, KnowledgeImport]) -> Dict[str, KnowledgeImport]:
32
+ knowledge_dir = files.get_abs_path(knowledge_dir)
33
+
34
+
35
+ # Mapping file extensions to corresponding loader classes
36
+ file_types_loaders = {
37
+ 'txt': TextLoader,
38
+ 'pdf': PyPDFLoader,
39
+ 'csv': CSVLoader,
40
+ 'html': UnstructuredHTMLLoader,
41
+ 'json': JSONLoader,
42
+ 'md': UnstructuredMarkdownLoader
43
+ }
44
+
45
+ cnt_files = 0
46
+ cnt_docs = 0
47
+
48
+ # Fetch all files in the directory with specified extensions
49
+ kn_files = glob.glob(knowledge_dir + '/**/*', recursive=True)
50
+ if kn_files:
51
+ print(f"Found {len(kn_files)} knowledge files in {knowledge_dir}, processing...")
52
+ Log.log(type="info", content=f"Found {len(kn_files)} knowledge files in {knowledge_dir}, processing...")
53
+
54
+ for file_path in kn_files:
55
+ ext = file_path.split('.')[-1].lower()
56
+ if ext in file_types_loaders:
57
+ checksum = calculate_checksum(file_path)
58
+ file_key = os.path.relpath(file_path, knowledge_dir)
59
+
60
+ # Load existing data from the index or create a new entry
61
+ file_data = index.get(file_key, {})
62
+
63
+ if file_data.get('checksum') == checksum:
64
+ file_data['state'] = 'original'
65
+ else:
66
+ file_data['state'] = 'changed'
67
+
68
+ if file_data['state'] == 'changed':
69
+ file_data['checksum'] = checksum
70
+ loader_cls = file_types_loaders[ext]
71
+ loader = loader_cls(file_path, **(text_loader_kwargs if ext in ['txt', 'csv', 'html', 'md'] else {}))
72
+ file_data['documents'] = loader.load_and_split()
73
+ cnt_files += 1
74
+ cnt_docs += len(file_data['documents'])
75
+ # print(f"Imported {len(file_data['documents'])} documents from {file_path}")
76
+
77
+ # Update the index
78
+ index[file_key] = file_data # type: ignore
79
+
80
+ # loop index where state is not set and mark it as removed
81
+ for file_key, file_data in index.items():
82
+ if not file_data.get('state', ''):
83
+ index[file_key]['state'] = 'removed'
84
+
85
+ print(f"Processed {cnt_docs} documents from {cnt_files} files.")
86
+ Log.log(type="info", content=f"Processed {cnt_docs} documents from {cnt_files} files.")
87
+ return index
python/helpers/log.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import Optional, Dict
3
+ import uuid
4
+
5
+ @dataclass
6
+ class LogItem:
7
+ no: int
8
+ type: str
9
+ heading: str
10
+ content: str
11
+ kvps: Optional[Dict] = None
12
+
13
+
14
+ class Log:
15
+
16
+ guid = uuid.uuid4()
17
+ version: int = 0
18
+ last_updated: int = 0
19
+ logs: list = []
20
+
21
+ def __init__(self, type: str="placeholder", heading: str="", content: str="", kvps: dict|None = None):
22
+ self.item = Log.log(type, heading, content, kvps) # create placeholder log item that will be updated
23
+
24
+ def update(self, type: Optional[str] = None, heading: str|None = None, content: str|None = None, kvps: dict|None = None):
25
+ Log.edit(self.item.no, type=type, heading=heading, content=content, kvps=kvps)
26
+
27
+ @staticmethod
28
+ def reset():
29
+ Log.guid = uuid.uuid4()
30
+ Log.version = 0
31
+ Log.last_updated = 0
32
+ Log.logs = []
33
+
34
+ @staticmethod
35
+ def log(type: str, heading: str|None = None, content: str|None = None, kvps: dict|None = None):
36
+ item = LogItem(len(Log.logs), type, heading or "", content or "", kvps)
37
+ Log.logs.append(item)
38
+ Log.version += 1
39
+ Log.last_updated = item.no
40
+ return item
41
+
42
+ @staticmethod
43
+ def edit(no: int, type: Optional[str] = None, heading: str|None = None, content: str|None = None, kvps: dict|None = None):
44
+ if 0 <= no < len(Log.logs):
45
+ item = Log.logs[no]
46
+ if type is not None:
47
+ item.type = type
48
+ if heading is not None:
49
+ item.heading = heading
50
+ if content is not None:
51
+ item.content = content
52
+ if kvps is not None:
53
+ item.kvps = kvps
54
+
55
+ Log.version += 1
56
+ Log.last_updated = no
57
+ else:
58
+ raise IndexError("Log item number out of range")
python/helpers/messages.py CHANGED
@@ -1,12 +1,12 @@
1
- from . import files
2
 
3
-
4
- def truncate_text(output, threshold=1000):
5
  if len(output) <= threshold:
6
  return output
7
 
8
  # Adjust the file path as needed
9
- placeholder = files.read_file("./prompts/fw.msg_truncated.md", removed_chars=(len(output) - threshold))
 
10
 
11
  start_len = (threshold - len(placeholder)) // 2
12
  end_len = threshold - len(placeholder) - start_len
 
1
+ # from . import files
2
 
3
+ def truncate_text(agent, output, threshold=1000):
 
4
  if len(output) <= threshold:
5
  return output
6
 
7
  # Adjust the file path as needed
8
+ placeholder = agent.read_prompt("fw.msg_truncated.md", removed_chars=(len(output) - threshold))
9
+ # placeholder = files.read_file("./prompts/default/fw.msg_truncated.md", removed_chars=(len(output) - threshold))
10
 
11
  start_len = (threshold - len(placeholder)) // 2
12
  end_len = threshold - len(placeholder) - start_len
python/helpers/rate_limiter.py CHANGED
@@ -3,6 +3,7 @@ from collections import deque
3
  from dataclasses import dataclass
4
  from typing import List, Tuple
5
  from .print_style import PrintStyle
 
6
 
7
  @dataclass
8
  class CallRecord:
@@ -48,6 +49,7 @@ class RateLimiter:
48
  wait_time = oldest_record.timestamp + self.window_seconds - current_time
49
  if wait_time > 0:
50
  PrintStyle(font_color="yellow", padding=True).print(f"Rate limit exceeded. Waiting for {wait_time:.2f} seconds due to: {', '.join(wait_reasons)}")
 
51
  time.sleep(wait_time)
52
  current_time = time.time()
53
 
@@ -62,17 +64,3 @@ class RateLimiter:
62
  if self.call_records:
63
  self.call_records[-1].output_tokens += output_token_count
64
  return self
65
-
66
- # Example usage
67
- rate_limiter = RateLimiter(max_calls=5, max_input_tokens=1000, max_output_tokens=2000)
68
-
69
- def rate_limited_function(input_token_count: int, output_token_count: int):
70
- # First, limit the call and input tokens (this may wait)
71
- rate_limiter.limit_call_and_input(input_token_count)
72
-
73
- # Your function logic here
74
- print(f"Function called with {input_token_count} input tokens")
75
-
76
- # After processing, set the output tokens (this doesn't wait)
77
- rate_limiter.set_output_tokens(output_token_count)
78
- print(f"Function completed with {output_token_count} output tokens")
 
3
  from dataclasses import dataclass
4
  from typing import List, Tuple
5
  from .print_style import PrintStyle
6
+ from .log import Log
7
 
8
  @dataclass
9
  class CallRecord:
 
49
  wait_time = oldest_record.timestamp + self.window_seconds - current_time
50
  if wait_time > 0:
51
  PrintStyle(font_color="yellow", padding=True).print(f"Rate limit exceeded. Waiting for {wait_time:.2f} seconds due to: {', '.join(wait_reasons)}")
52
+ Log.log("rate_limit","Rate limit exceeded",f"Rate limit exceeded. Waiting for {wait_time:.2f} seconds due to: {', '.join(wait_reasons)}")
53
  time.sleep(wait_time)
54
  current_time = time.time()
55
 
 
64
  if self.call_records:
65
  self.call_records[-1].output_tokens += output_token_count
66
  return self
 
 
 
 
 
 
 
 
 
 
 
 
 
 
python/helpers/shell_ssh.py CHANGED
@@ -2,12 +2,13 @@ import paramiko
2
  import time
3
  import re
4
  from typing import Optional, Tuple
 
 
5
 
6
  class SSHInteractiveSession:
7
 
8
- end_comment = "# @@==>> SSHInteractiveSession End-of-Command <<==@@"
9
-
10
- ps1_label = "SSHInteractiveSession CLI>"
11
 
12
  def __init__(self, hostname: str, port: int, username: str, password: str):
13
  self.hostname = hostname
@@ -18,6 +19,9 @@ class SSHInteractiveSession:
18
  self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
19
  self.shell = None
20
  self.full_output = b''
 
 
 
21
 
22
  def connect(self):
23
  # try 3 times with wait and then except
@@ -27,15 +31,17 @@ class SSHInteractiveSession:
27
  self.client.connect(self.hostname, self.port, self.username, self.password)
28
  self.shell = self.client.invoke_shell(width=160,height=48)
29
  # self.shell.send(f'PS1="{SSHInteractiveSession.ps1_label}"'.encode())
30
- return
31
- # while True: # wait for end of initial output
32
- # full, part = self.read_output()
33
- # if full and not part: return
34
- # time.sleep(0.1)
35
  except Exception as e:
36
  errors += 1
37
  if errors < 3:
38
  print(f"SSH Connection attempt {errors}...")
 
 
39
  time.sleep(5)
40
  else:
41
  raise e
@@ -50,15 +56,32 @@ class SSHInteractiveSession:
50
  if not self.shell:
51
  raise Exception("Shell not connected")
52
  self.full_output = b""
53
- self.shell.send((command + " \\\n" +SSHInteractiveSession.end_comment + "\n").encode())
 
 
 
 
 
 
54
 
55
  def read_output(self) -> Tuple[str, str]:
56
  if not self.shell:
57
  raise Exception("Shell not connected")
58
 
59
  partial_output = b''
 
60
  while self.shell.recv_ready():
61
  data = self.shell.recv(1024)
 
 
 
 
 
 
 
 
 
 
62
  partial_output += data
63
  self.full_output += data
64
  time.sleep(0.1) # Prevent busy waiting
@@ -66,15 +89,15 @@ class SSHInteractiveSession:
66
  # Decode once at the end
67
  decoded_partial_output = partial_output.decode('utf-8', errors='replace')
68
  decoded_full_output = self.full_output.decode('utf-8', errors='replace')
69
-
70
  decoded_partial_output = self.clean_string(decoded_partial_output)
71
  decoded_full_output = self.clean_string(decoded_full_output)
72
 
73
- # Split output at end_comment
74
- if SSHInteractiveSession.end_comment in decoded_full_output:
75
- decoded_full_output = decoded_full_output.split(SSHInteractiveSession.end_comment)[-1].lstrip("\r\n")
76
- decoded_partial_output = decoded_partial_output.split(SSHInteractiveSession.end_comment)[-1].lstrip("\r\n")
77
-
78
  return decoded_full_output, decoded_partial_output
79
 
80
 
@@ -85,5 +108,14 @@ class SSHInteractiveSession:
85
 
86
  # Replace '\r\n' with '\n'
87
  cleaned = cleaned.replace('\r\n', '\n')
88
-
89
- return cleaned
 
 
 
 
 
 
 
 
 
 
2
  import time
3
  import re
4
  from typing import Optional, Tuple
5
+ from python.helpers.log import Log
6
+ from python.helpers.strings import calculate_valid_match_lengths
7
 
8
  class SSHInteractiveSession:
9
 
10
+ # end_comment = "# @@==>> SSHInteractiveSession End-of-Command <<==@@"
11
+ # ps1_label = "SSHInteractiveSession CLI>"
 
12
 
13
  def __init__(self, hostname: str, port: int, username: str, password: str):
14
  self.hostname = hostname
 
19
  self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
20
  self.shell = None
21
  self.full_output = b''
22
+ self.last_command = b''
23
+ self.trimmed_command_length = 0 # Initialize trimmed_command_length
24
+
25
 
26
  def connect(self):
27
  # try 3 times with wait and then except
 
31
  self.client.connect(self.hostname, self.port, self.username, self.password)
32
  self.shell = self.client.invoke_shell(width=160,height=48)
33
  # self.shell.send(f'PS1="{SSHInteractiveSession.ps1_label}"'.encode())
34
+ # return
35
+ while True: # wait for end of initial output
36
+ full, part = self.read_output()
37
+ if full and not part: return
38
+ time.sleep(0.1)
39
  except Exception as e:
40
  errors += 1
41
  if errors < 3:
42
  print(f"SSH Connection attempt {errors}...")
43
+ Log.log(type="info", content=f"SSH Connection attempt {errors}...")
44
+
45
  time.sleep(5)
46
  else:
47
  raise e
 
56
  if not self.shell:
57
  raise Exception("Shell not connected")
58
  self.full_output = b""
59
+ # if len(command) > 10: # if command is long, add end_comment to split output
60
+ # command = (command + " \\\n" +SSHInteractiveSession.end_comment + "\n")
61
+ # else:
62
+ command = command + "\n"
63
+ self.last_command = command.encode()
64
+ self.trimmed_command_length = 0
65
+ self.shell.send(self.last_command)
66
 
67
  def read_output(self) -> Tuple[str, str]:
68
  if not self.shell:
69
  raise Exception("Shell not connected")
70
 
71
  partial_output = b''
72
+
73
  while self.shell.recv_ready():
74
  data = self.shell.recv(1024)
75
+
76
+ # Trim own command from output
77
+ if self.last_command and len(self.last_command) > self.trimmed_command_length:
78
+ command_to_trim = self.last_command[self.trimmed_command_length:]
79
+
80
+ trim_com, trim_out = calculate_valid_match_lengths(command_to_trim, data, deviation_threshold=8, deviation_reset=2, ignore_patterns=[rb'\x1b\[\?\d{4}[a-zA-Z]',rb'\r',rb'>'])
81
+ if(trim_com > 0 and trim_out > 0):
82
+ data = data[trim_out:]
83
+ self.trimmed_command_length += trim_com
84
+
85
  partial_output += data
86
  self.full_output += data
87
  time.sleep(0.1) # Prevent busy waiting
 
89
  # Decode once at the end
90
  decoded_partial_output = partial_output.decode('utf-8', errors='replace')
91
  decoded_full_output = self.full_output.decode('utf-8', errors='replace')
92
+
93
  decoded_partial_output = self.clean_string(decoded_partial_output)
94
  decoded_full_output = self.clean_string(decoded_full_output)
95
 
96
+ # # Split output at end_comment
97
+ # if SSHInteractiveSession.end_comment in decoded_full_output:
98
+ # decoded_full_output = decoded_full_output.split(SSHInteractiveSession.end_comment)[-1].lstrip("\r\n")
99
+ # decoded_partial_output = decoded_partial_output.split(SSHInteractiveSession.end_comment)[-1].lstrip("\r\n")
100
+
101
  return decoded_full_output, decoded_partial_output
102
 
103
 
 
108
 
109
  # Replace '\r\n' with '\n'
110
  cleaned = cleaned.replace('\r\n', '\n')
111
+
112
+ # Split the string by newline characters to process each segment separately
113
+ lines = cleaned.split('\n')
114
+
115
+ for i in range(len(lines)):
116
+ # Handle carriage returns '\r' by splitting and taking the last part
117
+ parts = [part for part in lines[i].split('\r') if part.strip()]
118
+ if parts: lines[i] = parts[-1].rstrip() # Overwrite with the last part after the last '\r'
119
+
120
+ return '\n'.join(lines)
121
+
python/helpers/strings.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # def calculate_valid_match_lengths(first: bytes | str, second: bytes | str, deviation_threshold: int = 5, deviation_reset: int = 5) -> tuple[int, int]:
2
+ # first_length = len(first)
3
+ # second_length = len(second)
4
+
5
+ # i, j = 0, 0
6
+ # deviations = 0
7
+ # matched_since_deviation = 0
8
+ # last_matched_i, last_matched_j = 0, 0 # Track the last matched index
9
+
10
+ # while i < first_length and j < second_length:
11
+ # if first[i] == second[j]:
12
+ # last_matched_i, last_matched_j = i + 1, j + 1 # Update last matched position
13
+ # i += 1
14
+ # j += 1
15
+ # matched_since_deviation += 1
16
+
17
+ # # Reset the deviation counter if we've matched enough characters since the last deviation
18
+ # if matched_since_deviation >= deviation_reset:
19
+ # deviations = 0
20
+ # matched_since_deviation = 0
21
+ # else:
22
+ # # Determine the look-ahead based on the remaining deviation threshold
23
+ # look_ahead = deviation_threshold - deviations
24
+
25
+ # # Look ahead to find the best match within the remaining deviation allowance
26
+ # best_match = None
27
+ # for k in range(1, look_ahead + 1):
28
+ # if i + k < first_length and first[i + k] == second[j]:
29
+ # best_match = ('i', k)
30
+ # break
31
+ # if j + k < second_length and first[i] == second[j + k]:
32
+ # best_match = ('j', k)
33
+ # break
34
+
35
+ # if best_match:
36
+ # if best_match[0] == 'i':
37
+ # i += best_match[1]
38
+ # elif best_match[0] == 'j':
39
+ # j += best_match[1]
40
+ # else:
41
+ # i += 1
42
+ # j += 1
43
+
44
+ # deviations += 1
45
+ # matched_since_deviation = 0
46
+
47
+ # if deviations > deviation_threshold:
48
+ # break
49
+
50
+ # # Return the last matched positions instead of the current indices
51
+ # return last_matched_i, last_matched_j
52
+
53
+ import re
54
+
55
+ def calculate_valid_match_lengths(first: bytes | str, second: bytes | str,
56
+ deviation_threshold: int = 5,
57
+ deviation_reset: int = 5,
58
+ ignore_patterns: list[bytes|str] = []) -> tuple[int, int]:
59
+
60
+ first_length = len(first)
61
+ second_length = len(second)
62
+
63
+ i, j = 0, 0
64
+ deviations = 0
65
+ matched_since_deviation = 0
66
+ last_matched_i, last_matched_j = 0, 0 # Track the last matched index
67
+
68
+ def skip_ignored_patterns(s, index):
69
+ """Skip characters in `s` that match any pattern in `ignore_patterns` starting from `index`."""
70
+ while index < len(s):
71
+ for pattern in ignore_patterns:
72
+ match = re.match(pattern, s[index:])
73
+ if match:
74
+ index += len(match.group(0))
75
+ break
76
+ else:
77
+ break
78
+ return index
79
+
80
+ while i < first_length and j < second_length:
81
+ # Skip ignored patterns
82
+ i = skip_ignored_patterns(first, i)
83
+ j = skip_ignored_patterns(second, j)
84
+
85
+ if i < first_length and j < second_length and first[i] == second[j]:
86
+ last_matched_i, last_matched_j = i + 1, j + 1 # Update last matched position
87
+ i += 1
88
+ j += 1
89
+ matched_since_deviation += 1
90
+
91
+ # Reset the deviation counter if we've matched enough characters since the last deviation
92
+ if matched_since_deviation >= deviation_reset:
93
+ deviations = 0
94
+ matched_since_deviation = 0
95
+ else:
96
+ # Determine the look-ahead based on the remaining deviation threshold
97
+ look_ahead = deviation_threshold - deviations
98
+
99
+ # Look ahead to find the best match within the remaining deviation allowance
100
+ best_match = None
101
+ for k in range(1, look_ahead + 1):
102
+ if i + k < first_length and first[i + k] == second[j]:
103
+ best_match = ('i', k)
104
+ break
105
+ if j + k < second_length and first[i] == second[j + k]:
106
+ best_match = ('j', k)
107
+ break
108
+
109
+ if best_match:
110
+ if best_match[0] == 'i':
111
+ i += best_match[1]
112
+ elif best_match[0] == 'j':
113
+ j += best_match[1]
114
+ else:
115
+ i += 1
116
+ j += 1
117
+
118
+ deviations += 1
119
+ matched_since_deviation = 0
120
+
121
+ if deviations > deviation_threshold:
122
+ break
123
+
124
+ # Return the last matched positions instead of the current indices
125
+ return last_matched_i, last_matched_j
python/helpers/tool.py CHANGED
@@ -3,6 +3,7 @@ from typing import TypedDict
3
  from agent import Agent
4
  from python.helpers.print_style import PrintStyle
5
  from python.helpers import files, messages
 
6
 
7
  class Response:
8
  def __init__(self, message: str, break_loop: bool) -> None:
@@ -24,6 +25,7 @@ class Tool:
24
  def before_execution(self, **kwargs):
25
  if self.agent.handle_intervention(): return # wait for intervention and handle it, if paused
26
  PrintStyle(font_color="#1B4F72", padding=True, background_color="white", bold=True).print(f"{self.agent.agent_name}: Using tool '{self.name}':")
 
27
  if self.args and isinstance(self.args, dict):
28
  for key, value in self.args.items():
29
  PrintStyle(font_color="#85C1E9", bold=True).stream(self.nice_key(key)+": ")
@@ -31,12 +33,13 @@ class Tool:
31
  PrintStyle().print()
32
 
33
  def after_execution(self, response: Response, **kwargs):
34
- text = messages.truncate_text(response.message.strip(), self.agent.config.max_tool_response_length)
35
- msg_response = files.read_file("./prompts/fw.tool_response.md", tool_name=self.name, tool_response=text)
36
  if self.agent.handle_intervention(): return # wait for intervention and handle it, if paused
37
  self.agent.append_message(msg_response, human=True)
38
  PrintStyle(font_color="#1B4F72", background_color="white", padding=True, bold=True).print(f"{self.agent.agent_name}: Response from tool '{self.name}':")
39
  PrintStyle(font_color="#85C1E9").print(response.message)
 
40
 
41
  def nice_key(self, key:str):
42
  words = key.split('_')
 
3
  from agent import Agent
4
  from python.helpers.print_style import PrintStyle
5
  from python.helpers import files, messages
6
+ from python.helpers.log import Log
7
 
8
  class Response:
9
  def __init__(self, message: str, break_loop: bool) -> None:
 
25
  def before_execution(self, **kwargs):
26
  if self.agent.handle_intervention(): return # wait for intervention and handle it, if paused
27
  PrintStyle(font_color="#1B4F72", padding=True, background_color="white", bold=True).print(f"{self.agent.agent_name}: Using tool '{self.name}':")
28
+ self.log = Log(type="tool", heading=f"{self.agent.agent_name}: Using tool '{self.name}':", content="", kvps=self.args)
29
  if self.args and isinstance(self.args, dict):
30
  for key, value in self.args.items():
31
  PrintStyle(font_color="#85C1E9", bold=True).stream(self.nice_key(key)+": ")
 
33
  PrintStyle().print()
34
 
35
  def after_execution(self, response: Response, **kwargs):
36
+ text = messages.truncate_text(self.agent, response.message.strip(), self.agent.config.max_tool_response_length)
37
+ msg_response = self.agent.read_prompt("fw.tool_response.md", tool_name=self.name, tool_response=text)
38
  if self.agent.handle_intervention(): return # wait for intervention and handle it, if paused
39
  self.agent.append_message(msg_response, human=True)
40
  PrintStyle(font_color="#1B4F72", background_color="white", padding=True, bold=True).print(f"{self.agent.agent_name}: Response from tool '{self.name}':")
41
  PrintStyle(font_color="#85C1E9").print(response.message)
42
+ self.log.update(content=response.message)
43
 
44
  def nice_key(self, key:str):
45
  words = key.split('_')
python/helpers/vdb.py DELETED
@@ -1,69 +0,0 @@
1
- from langchain.storage import InMemoryByteStore, LocalFileStore
2
- from langchain.embeddings import CacheBackedEmbeddings
3
- from langchain_core.embeddings import Embeddings
4
-
5
- from langchain_chroma import Chroma
6
- import chromadb
7
- from chromadb.config import Settings
8
-
9
- from . import files
10
- from langchain_core.documents import Document
11
- import uuid
12
-
13
-
14
- class VectorDB:
15
-
16
- def __init__(self, embeddings_model:Embeddings, in_memory=False, cache_dir="./cache"):
17
- print("Initializing VectorDB...")
18
- self.embeddings_model = embeddings_model
19
-
20
- db_cache = files.get_abs_path(cache_dir,"database")
21
-
22
- self.client =chromadb.PersistentClient(path=db_cache)
23
- self.collection = self.client.create_collection("my_collection")
24
- self.collection
25
-
26
-
27
- def search(self, query:str, results=2):
28
- emb = self.embeddings_model.embed_query(query)
29
- res = self.collection.query(query_embeddings=[emb],n_results=results)
30
- best = res["documents"][0][0] # type: ignore
31
-
32
- # def delete_documents(self, query):
33
- # score_limit = 1
34
- # k = 2
35
- # tot = 0
36
- # while True:
37
- # # Perform similarity search with score
38
- # docs = self.db.similarity_search_with_score(query, k=k)
39
-
40
- # # Extract document IDs and filter based on score
41
- # document_ids = [result[0].metadata["id"] for result in docs if result[1] < score_limit]
42
-
43
- # # Delete documents with IDs over the threshold score
44
- # if document_ids:
45
- # fnd = self.db.get(where={"id": {"$in": document_ids}})
46
- # if fnd["ids"]: self.db.delete(ids=fnd["ids"])
47
- # tot += len(fnd["ids"])
48
-
49
- # # If fewer than K document IDs, break the loop
50
- # if len(document_ids) < k:
51
- # break
52
-
53
- # return tot
54
-
55
- def insert(self, data:str):
56
-
57
- id = str(uuid.uuid4())
58
- emb = self.embeddings_model.embed_documents([data])[0]
59
-
60
- self.collection.add(
61
- ids=[id],
62
- embeddings=[emb],
63
- documents=[data],
64
- )
65
-
66
- return id
67
-
68
-
69
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
python/helpers/vector_db.py CHANGED
@@ -1,25 +1,33 @@
1
  from langchain.storage import InMemoryByteStore, LocalFileStore
2
  from langchain.embeddings import CacheBackedEmbeddings
3
- from langchain_chroma import Chroma
 
 
 
4
 
 
5
  from . import files
6
  from langchain_core.documents import Document
7
  import uuid
8
-
 
9
 
10
  class VectorDB:
11
 
12
- def __init__(self, embeddings_model, in_memory=False, cache_dir="./cache"):
13
  print("Initializing VectorDB...")
 
 
14
  self.embeddings_model = embeddings_model
15
 
16
- em_cache = files.get_abs_path(cache_dir,"embeddings")
17
- db_cache = files.get_abs_path(cache_dir,"database")
 
18
 
19
  if in_memory:
20
  self.store = InMemoryByteStore()
21
  else:
22
- self.store = LocalFileStore(em_cache)
23
 
24
 
25
  #here we setup the embeddings model with the chosen cache storage
@@ -28,9 +36,63 @@ class VectorDB:
28
  self.store,
29
  namespace=getattr(embeddings_model, 'model', getattr(embeddings_model, 'model_name', "default")) )
30
 
 
 
 
31
 
32
- self.db = Chroma(embedding_function=self.embedder,persist_directory=db_cache)
33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
  def search_similarity(self, query, results=3):
36
  return self.db.similarity_search(query,results)
@@ -64,7 +126,8 @@ class VectorDB:
64
  # If fewer than K document IDs, break the loop
65
  if len(document_ids) < k:
66
  break
67
-
 
68
  return tot
69
 
70
  def delete_documents_by_ids(self, ids:list[str]):
@@ -72,13 +135,20 @@ class VectorDB:
72
  self.db.delete(ids=ids)
73
  # post = self.db.get(ids=ids)["ids"]
74
  #TODO? compare pre and post
 
75
  return len(ids)
76
 
77
- def insert_document(self, data):
78
  id = str(uuid.uuid4())
79
- self.db.add_documents(documents=[ Document(data, metadata={"id": id}) ], ids=[id])
80
-
81
  return id
82
-
 
 
 
 
 
 
83
 
84
 
 
1
  from langchain.storage import InMemoryByteStore, LocalFileStore
2
  from langchain.embeddings import CacheBackedEmbeddings
3
+ # from langchain_chroma import Chroma
4
+ from langchain_community.vectorstores import FAISS
5
+ import faiss
6
+ from langchain_community.docstore.in_memory import InMemoryDocstore
7
 
8
+ import os, json
9
  from . import files
10
  from langchain_core.documents import Document
11
  import uuid
12
+ from python.helpers import knowledge_import
13
+ from python.helpers.log import Log
14
 
15
  class VectorDB:
16
 
17
+ def __init__(self, embeddings_model, in_memory=False, memory_dir="./memory", knowledge_dir="./knowledge"):
18
  print("Initializing VectorDB...")
19
+ Log.log("info", content="Initializing VectorDB...")
20
+
21
  self.embeddings_model = embeddings_model
22
 
23
+ self.em_dir = files.get_abs_path(memory_dir,"embeddings")
24
+ self.db_dir = files.get_abs_path(memory_dir,"database")
25
+ self.kn_dir = files.get_abs_path(knowledge_dir) if knowledge_dir else ""
26
 
27
  if in_memory:
28
  self.store = InMemoryByteStore()
29
  else:
30
+ self.store = LocalFileStore(self.em_dir)
31
 
32
 
33
  #here we setup the embeddings model with the chosen cache storage
 
36
  self.store,
37
  namespace=getattr(embeddings_model, 'model', getattr(embeddings_model, 'model_name', "default")) )
38
 
39
+ # self.db = Chroma(
40
+ # embedding_function=self.embedder,
41
+ # persist_directory=db_dir)
42
 
 
43
 
44
+ # if db folder exists and is not empty:
45
+ if os.path.exists(self.db_dir) and len(os.listdir(self.db_dir)) > 0:
46
+ self.db = FAISS.load_local(
47
+ folder_path=self.db_dir,
48
+ embeddings=self.embedder,
49
+ allow_dangerous_deserialization=True
50
+ )
51
+ else:
52
+ index = faiss.IndexFlatL2(len(self.embedder.embed_query("example text")))
53
+
54
+ self.db = FAISS(
55
+ embedding_function=self.embedder,
56
+ index=index,
57
+ docstore=InMemoryDocstore(),
58
+ index_to_docstore_id={})
59
+
60
+ #preload knowledge files
61
+ if self.kn_dir:
62
+ self.preload_knowledge(self.kn_dir, self.db_dir)
63
+
64
+
65
+ def preload_knowledge(self, kn_dir:str, db_dir:str):
66
+
67
+ # Load the index file if it exists
68
+ index_path = files.get_abs_path(db_dir, "knowledge_import.json")
69
+
70
+ #make sure directory exists
71
+ if not os.path.exists(db_dir):
72
+ os.makedirs(db_dir)
73
+
74
+ index: dict[str, knowledge_import.KnowledgeImport] = {}
75
+ if os.path.exists(index_path):
76
+ with open(index_path, 'r') as f:
77
+ index = json.load(f)
78
+
79
+ index = knowledge_import.load_knowledge(kn_dir,index)
80
+
81
+ for file in index:
82
+ if index[file]['state'] in ['changed', 'removed'] and index[file].get('ids',[]): # for knowledge files that have been changed or removed and have IDs
83
+ self.delete_documents_by_ids(index[file]['ids']) # remove original version
84
+ if index[file]['state'] == 'changed':
85
+ index[file]['ids'] = self.insert_documents(index[file]['documents']) # insert new version
86
+
87
+ # remove index where state="removed"
88
+ index = {k: v for k, v in index.items() if v['state'] != 'removed'}
89
+
90
+ # strip state and documents from index and save it
91
+ for file in index:
92
+ if "documents" in index[file]: del index[file]['documents'] # type: ignore
93
+ if "state" in index[file]: del index[file]['state'] # type: ignore
94
+ with open(index_path, 'w') as f:
95
+ json.dump(index, f)
96
 
97
  def search_similarity(self, query, results=3):
98
  return self.db.similarity_search(query,results)
 
126
  # If fewer than K document IDs, break the loop
127
  if len(document_ids) < k:
128
  break
129
+
130
+ if tot: self.db.save_local(folder_path=self.db_dir) # persist
131
  return tot
132
 
133
  def delete_documents_by_ids(self, ids:list[str]):
 
135
  self.db.delete(ids=ids)
136
  # post = self.db.get(ids=ids)["ids"]
137
  #TODO? compare pre and post
138
+ if ids: self.db.save_local(folder_path=self.db_dir) #persist
139
  return len(ids)
140
 
141
+ def insert_text(self, text):
142
  id = str(uuid.uuid4())
143
+ self.db.add_documents(documents=[ Document(text, metadata={"id": id}) ], ids=[id])
144
+ self.db.save_local(folder_path=self.db_dir) #persist
145
  return id
146
+
147
+ def insert_documents(self, docs:list[Document]):
148
+ ids = [str(uuid.uuid4()) for _ in range(len(docs))]
149
+ for doc, id in zip(docs, ids): doc.metadata["id"] = id #add ids to documents metadata
150
+ self.db.add_documents(documents=docs, ids=ids)
151
+ self.db.save_local(folder_path=self.db_dir) #persist
152
+ return ids
153
 
154
 
python/tools/code_execution_tool.py CHANGED
@@ -11,6 +11,7 @@ from python.helpers.print_style import PrintStyle
11
  from python.helpers.shell_local import LocalInteractiveSession
12
  from python.helpers.shell_ssh import SSHInteractiveSession
13
  from python.helpers.docker import DockerContainerManager
 
14
 
15
  @dataclass
16
  class State:
@@ -38,13 +39,23 @@ class CodeExecution(Tool):
38
  elif runtime == "output":
39
  response = self.get_terminal_output()
40
  else:
41
- response = files.read_file("./prompts/fw.code_runtime_wrong.md", runtime=runtime)
42
 
43
- if not response: response = files.read_file("./prompts/fw.code_no_output.md")
44
  return Response(message=response, break_loop=False)
45
 
 
 
 
 
 
 
 
 
 
 
46
  def after_execution(self, response, **kwargs):
47
- msg_response = files.read_file("./prompts/fw.tool_response.md", tool_name=self.name, tool_response=response.message)
48
  self.agent.append_message(msg_response, human=True)
49
 
50
  def prepare_state(self):
@@ -98,6 +109,7 @@ class CodeExecution(Tool):
98
 
99
  if partial_output:
100
  PrintStyle(font_color="#85C1E9").stream(partial_output)
 
101
  idle=0
102
  else:
103
  idle+=1
 
11
  from python.helpers.shell_local import LocalInteractiveSession
12
  from python.helpers.shell_ssh import SSHInteractiveSession
13
  from python.helpers.docker import DockerContainerManager
14
+ from python.helpers.log import Log
15
 
16
  @dataclass
17
  class State:
 
39
  elif runtime == "output":
40
  response = self.get_terminal_output()
41
  else:
42
+ response = self.agent.read_prompt("fw.code_runtime_wrong.md", runtime=runtime)
43
 
44
+ if not response: response = self.agent.read_prompt("fw.code_no_output.md")
45
  return Response(message=response, break_loop=False)
46
 
47
+ def before_execution(self, **kwargs):
48
+ if self.agent.handle_intervention(): return # wait for intervention and handle it, if paused
49
+ PrintStyle(font_color="#1B4F72", padding=True, background_color="white", bold=True).print(f"{self.agent.agent_name}: Using tool '{self.name}':")
50
+ self.log = Log(type="code_exe", heading=f"{self.agent.agent_name}: Using tool '{self.name}':", content="", kvps=self.args)
51
+ if self.args and isinstance(self.args, dict):
52
+ for key, value in self.args.items():
53
+ PrintStyle(font_color="#85C1E9", bold=True).stream(self.nice_key(key)+": ")
54
+ PrintStyle(font_color="#85C1E9", padding=isinstance(value,str) and "\n" in value).stream(value)
55
+ PrintStyle().print()
56
+
57
  def after_execution(self, response, **kwargs):
58
+ msg_response = self.agent.read_prompt("fw.tool_response.md", tool_name=self.name, tool_response=response.message)
59
  self.agent.append_message(msg_response, human=True)
60
 
61
  def prepare_state(self):
 
109
 
110
  if partial_output:
111
  PrintStyle(font_color="#85C1E9").stream(partial_output)
112
+ self.log.update(content=full_output)
113
  idle=0
114
  else:
115
  idle+=1
python/tools/knowledge_tool.py CHANGED
@@ -1,6 +1,5 @@
1
  import os
2
  from agent import Agent
3
- from . import online_knowledge_tool
4
  from python.helpers import perplexity_search
5
  from python.helpers import duckduckgo_search
6
 
@@ -10,6 +9,7 @@ import concurrent.futures
10
  from python.helpers.tool import Tool, Response
11
  from python.helpers import files
12
  from python.helpers.print_style import PrintStyle
 
13
 
14
  class Knowledge(Tool):
15
  def execute(self, question="", **kwargs):
@@ -21,6 +21,7 @@ class Knowledge(Tool):
21
  perplexity = executor.submit(perplexity_search.perplexity_search, question)
22
  else:
23
  PrintStyle.hint("No API key provided for Perplexity. Skipping Perplexity search.")
 
24
  perplexity = None
25
 
26
 
@@ -46,7 +47,7 @@ class Knowledge(Tool):
46
  except Exception as e:
47
  memory_result = "Memory search failed: " + str(e)
48
 
49
- msg = files.read_file("prompts/tool.knowledge.response.md",
50
  online_sources = ((perplexity_result + "\n\n") if perplexity else "") + str(duckduckgo_result),
51
  memory = memory_result )
52
 
 
1
  import os
2
  from agent import Agent
 
3
  from python.helpers import perplexity_search
4
  from python.helpers import duckduckgo_search
5
 
 
9
  from python.helpers.tool import Tool, Response
10
  from python.helpers import files
11
  from python.helpers.print_style import PrintStyle
12
+ from python.helpers.log import Log
13
 
14
  class Knowledge(Tool):
15
  def execute(self, question="", **kwargs):
 
21
  perplexity = executor.submit(perplexity_search.perplexity_search, question)
22
  else:
23
  PrintStyle.hint("No API key provided for Perplexity. Skipping Perplexity search.")
24
+ Log(type="hint", content="No API key provided for Perplexity. Skipping Perplexity search.")
25
  perplexity = None
26
 
27
 
 
47
  except Exception as e:
48
  memory_result = "Memory search failed: " + str(e)
49
 
50
+ msg = self.agent.read_prompt("tool.knowledge.response.md",
51
  online_sources = ((perplexity_result + "\n\n") if perplexity else "") + str(duckduckgo_result),
52
  memory = memory_result )
53
 
python/tools/memory_tool.py CHANGED
@@ -6,9 +6,10 @@ import os, json
6
  from python.helpers.tool import Tool, Response
7
  from python.helpers.print_style import PrintStyle
8
  from chromadb.errors import InvalidDimensionException
 
9
 
10
- # TODO multiple DBs at once
11
- db: VectorDB | None= None
12
 
13
  class Memory(Tool):
14
  def execute(self,**kwargs):
@@ -28,39 +29,48 @@ class Memory(Tool):
28
  except InvalidDimensionException as e:
29
  # hint about embedding change with existing database
30
  PrintStyle.hint("If you changed your embedding model, you will need to remove contents of /memory directory.")
 
31
  raise
32
 
33
  # result = process_query(self.agent, self.args["memory"],self.args["action"], result_count=self.agent.config.auto_memory_count)
34
  return Response(message=result, break_loop=False)
35
 
36
  def search(agent:Agent, query:str, count:int=5, threshold:float=0.1):
37
- initialize(agent)
 
38
  docs = db.search_similarity_threshold(query,count,threshold) # type: ignore
39
- if len(docs)==0: return files.read_file("./prompts/fw.memories_not_found.md", query=query)
40
  else: return str(docs)
41
 
42
  def save(agent:Agent, text:str):
43
- initialize(agent)
44
- id = db.insert_document(text) # type: ignore
45
- return files.read_file("./prompts/fw.memory_saved.md", memory_id=id)
46
 
47
  def delete(agent:Agent, ids_str:str):
48
- initialize(agent)
49
  ids = extract_guids(ids_str)
50
  deleted = db.delete_documents_by_ids(ids) # type: ignore
51
- return files.read_file("./prompts/fw.memories_deleted.md", memory_count=deleted)
52
 
53
  def forget(agent:Agent, query:str):
54
- initialize(agent)
55
  deleted = db.delete_documents_by_query(query) # type: ignore
56
- return files.read_file("./prompts/fw.memories_deleted.md", memory_count=deleted)
57
 
58
- def initialize(agent:Agent):
59
- global db
60
- if not db:
61
- dir = os.path.join("memory",agent.config.memory_subdir)
62
- db = VectorDB(embeddings_model=agent.config.embeddings_model, in_memory=False, cache_dir=dir)
63
 
 
 
 
 
 
 
 
 
64
  def extract_guids(text):
65
  pattern = r'\b[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[1-5][0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}\b'
66
  return re.findall(pattern, text)
 
6
  from python.helpers.tool import Tool, Response
7
  from python.helpers.print_style import PrintStyle
8
  from chromadb.errors import InvalidDimensionException
9
+ from python.helpers.log import Log
10
 
11
+ # databases based on subdirectories from agent config
12
+ dbs = {}
13
 
14
  class Memory(Tool):
15
  def execute(self,**kwargs):
 
29
  except InvalidDimensionException as e:
30
  # hint about embedding change with existing database
31
  PrintStyle.hint("If you changed your embedding model, you will need to remove contents of /memory directory.")
32
+ Log(type="hint", content="If you changed your embedding model, you will need to remove contents of /memory directory.")
33
  raise
34
 
35
  # result = process_query(self.agent, self.args["memory"],self.args["action"], result_count=self.agent.config.auto_memory_count)
36
  return Response(message=result, break_loop=False)
37
 
38
  def search(agent:Agent, query:str, count:int=5, threshold:float=0.1):
39
+ db = get_db(agent)
40
+ # docs = db.search_similarity(query,count) # type: ignore
41
  docs = db.search_similarity_threshold(query,count,threshold) # type: ignore
42
+ if len(docs)==0: return agent.read_prompt("fw.memories_not_found.md", query=query)
43
  else: return str(docs)
44
 
45
  def save(agent:Agent, text:str):
46
+ db = get_db(agent)
47
+ id = db.insert_text(text) # type: ignore
48
+ return agent.read_prompt("fw.memory_saved.md", memory_id=id)
49
 
50
  def delete(agent:Agent, ids_str:str):
51
+ db = get_db(agent)
52
  ids = extract_guids(ids_str)
53
  deleted = db.delete_documents_by_ids(ids) # type: ignore
54
+ return agent.read_prompt("fw.memories_deleted.md", memory_count=deleted)
55
 
56
  def forget(agent:Agent, query:str):
57
+ db = get_db(agent)
58
  deleted = db.delete_documents_by_query(query) # type: ignore
59
+ return agent.read_prompt("fw.memories_deleted.md", memory_count=deleted)
60
 
61
+ def get_db(agent: Agent):
62
+ mem_dir = os.path.join("memory", agent.config.memory_subdir)
63
+ kn_dir = os.path.join("knowledge", agent.config.knowledge_subdir)
64
+ key = (mem_dir, kn_dir)
 
65
 
66
+ if key not in dbs:
67
+ db = VectorDB(embeddings_model=agent.config.embeddings_model, in_memory=False, memory_dir=mem_dir, knowledge_dir=kn_dir)
68
+ dbs[key] = db
69
+ else:
70
+ db = dbs[key]
71
+
72
+ return db
73
+
74
  def extract_guids(text):
75
  pattern = r'\b[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[1-5][0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}\b'
76
  return re.findall(pattern, text)
python/tools/online_knowledge_tool.py DELETED
@@ -1,13 +0,0 @@
1
- from agent import Agent
2
- from python.helpers import perplexity_search
3
- from python.helpers.tool import Tool, Response
4
-
5
- class OnlineKnowledge(Tool):
6
- def execute(self,**kwargs):
7
- return Response(
8
- message=process_question(self.args["question"]),
9
- break_loop=False,
10
- )
11
-
12
- def process_question(question):
13
- return str(perplexity_search.perplexity_search(question))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
python/tools/response.py CHANGED
@@ -6,6 +6,7 @@ from agent import Agent
6
  from python.helpers.tool import Tool, Response
7
  from python.helpers import files
8
  from python.helpers.print_style import PrintStyle
 
9
 
10
  class ResponseTool(Tool):
11
 
@@ -14,7 +15,8 @@ class ResponseTool(Tool):
14
  return Response(message=self.args["text"], break_loop=True)
15
 
16
  def before_execution(self, **kwargs):
17
- pass # do not add anything to the history or output
 
18
 
19
  def after_execution(self, response, **kwargs):
20
  pass # do not add anything to the history or output
 
6
  from python.helpers.tool import Tool, Response
7
  from python.helpers import files
8
  from python.helpers.print_style import PrintStyle
9
+ from python.helpers.log import Log
10
 
11
  class ResponseTool(Tool):
12
 
 
15
  return Response(message=self.args["text"], break_loop=True)
16
 
17
  def before_execution(self, **kwargs):
18
+ self.log = Log(type="response", heading=f"{self.agent.agent_name}: Responding:", content=self.args.get("text", ""))
19
+
20
 
21
  def after_execution(self, response, **kwargs):
22
  pass # do not add anything to the history or output
python/tools/task_done.py CHANGED
@@ -6,6 +6,7 @@ from agent import Agent
6
  from python.helpers.tool import Tool, Response
7
  from python.helpers import files
8
  from python.helpers.print_style import PrintStyle
 
9
 
10
  class TaskDone(Tool):
11
 
@@ -14,7 +15,7 @@ class TaskDone(Tool):
14
  return Response(message=self.args["text"], break_loop=True)
15
 
16
  def before_execution(self, **kwargs):
17
- pass # do not add anything to the history or output
18
 
19
  def after_execution(self, response, **kwargs):
20
  pass # do add anything to the history or output
 
6
  from python.helpers.tool import Tool, Response
7
  from python.helpers import files
8
  from python.helpers.print_style import PrintStyle
9
+ from python.helpers.log import Log
10
 
11
  class TaskDone(Tool):
12
 
 
15
  return Response(message=self.args["text"], break_loop=True)
16
 
17
  def before_execution(self, **kwargs):
18
+ self.log = Log(type="response", heading=f"{self.agent.agent_name}: Task done:", content=self.args.get("text", ""))
19
 
20
  def after_execution(self, response, **kwargs):
21
  pass # do add anything to the history or output
python/tools/unknown.py CHANGED
@@ -4,8 +4,8 @@ from python.helpers import files
4
  class Unknown(Tool):
5
  def execute(self, **kwargs):
6
  return Response(
7
- message=files.read_file("prompts/fw.tool_not_found.md",
8
  tool_name=self.name,
9
- tools_prompt=files.read_file("prompts/agent.tools.md")),
10
  break_loop=False)
11
 
 
4
  class Unknown(Tool):
5
  def execute(self, **kwargs):
6
  return Response(
7
+ message=self.agent.read_prompt("fw.tool_not_found.md",
8
  tool_name=self.name,
9
+ tools_prompt=self.agent.read_prompt("agent.tools.md")),
10
  break_loop=False)
11
 
requirements.txt CHANGED
@@ -5,7 +5,6 @@ langchain-huggingface==0.0.3
5
  langchain-openai==0.1.15
6
  langchain-community==0.2.7
7
  langchain-anthropic==0.1.19
8
- langchain-chroma==0.1.2
9
  langchain-google-genai==1.0.7
10
  webcolors==24.6.0
11
  sentence-transformers==3.0.1
@@ -16,4 +15,8 @@ inputimeout==1.0.4
16
  newspaper3k==0.2.8
17
  beautifulsoup4==4.12.3
18
  lxml_html_clean==0.2.0
19
- pynput==1.7.7
 
 
 
 
 
5
  langchain-openai==0.1.15
6
  langchain-community==0.2.7
7
  langchain-anthropic==0.1.19
 
8
  langchain-google-genai==1.0.7
9
  webcolors==24.6.0
10
  sentence-transformers==3.0.1
 
15
  newspaper3k==0.2.8
16
  beautifulsoup4==4.12.3
17
  lxml_html_clean==0.2.0
18
+ pynput==1.7.7
19
+ pypdf==4.3.1
20
+ Flask[async]==3.0.3
21
+ Flask-BasicAuth==0.2.0
22
+ faiss-cpu==1.8.0.post1
main.py β†’ run_cli.py RENAMED
@@ -1,5 +1,4 @@
1
- import threading, time, models, tts, stt, os, sys, argparse
2
- from dotenv import load_dotenv
3
  from ansio import application_keypad, mouse_input, raw_input
4
  from ansio.input import InputEvent, get_input_event
5
  from agent import Agent, AgentConfig
@@ -7,105 +6,42 @@ from python.helpers.print_style import PrintStyle
7
  from python.helpers.files import read_file
8
  from python.helpers import files
9
  import python.helpers.timed_input as timed_input
10
- from pynput import keyboard
11
 
12
- load_dotenv() # take environment variables from.env.
13
 
14
  input_lock = threading.Lock()
15
  os.chdir(files.get_abs_path("./work_dir")) #change CWD to work_dir
16
 
17
- def initialize():
18
-
19
- # main chat model used by agents (smarter, more accurate)
20
- chat_llm = models.get_openai_chat(model_name="gpt-4o-mini", temperature=0)
21
- # chat_llm = models.get_ollama_chat(model_name="gemma2:latest", temperature=0)
22
- # chat_llm = models.get_lmstudio_chat(model_name="TheBloke/Mistral-7B-Instruct-v0.2-GGUF", temperature=0)
23
- # chat_llm = models.get_openrouter(model_name="meta-llama/llama-3-8b-instruct:free")
24
- # chat_llm = models.get_azure_openai_chat(deployment_name="gpt-4o-mini", temperature=0)
25
- # chat_llm = models.get_anthropic_chat(model_name="claude-3-5-sonnet-20240620", temperature=0)
26
- # chat_llm = models.get_google_chat(model_name="gemini-1.5-flash", temperature=0)
27
- # chat_llm = models.get_groq_chat(model_name="llama-3.1-70b-versatile", temperature=0)
28
-
29
- # utility model used for helper functions (cheaper, faster)
30
- utility_llm = chat_llm # change if you want to use a different utility model
31
-
32
- # embedding model used for memory
33
- embedding_llm = models.get_openai_embedding(model_name="text-embedding-3-small")
34
- # embedding_llm = models.get_ollama_embedding(model_name="nomic-embed-text")
35
- # embedding_llm = models.get_huggingface_embedding(model_name="sentence-transformers/all-MiniLM-L6-v2")
36
-
37
- # agent configuration
38
- config = AgentConfig(
39
- chat_model = chat_llm,
40
- utility_model = utility_llm,
41
- embeddings_model = embedding_llm,
42
- # memory_subdir = "",
43
- auto_memory_count = 0,
44
- # auto_memory_skip = 2,
45
- # rate_limit_seconds = 60,
46
- # rate_limit_requests = 30,
47
- # rate_limit_input_tokens = 0,
48
- # rate_limit_output_tokens = 0,
49
- # msgs_keep_max = 25,
50
- # msgs_keep_start = 5,
51
- # msgs_keep_end = 10,
52
- # max_tool_response_length = 3000,
53
- # response_timeout_seconds = 60,
54
- code_exec_docker_enabled = True,
55
- # code_exec_docker_name = "agent-zero-exe",
56
- # code_exec_docker_image = "frdel/agent-zero-exe:latest",
57
- # code_exec_docker_ports = { "22/tcp": 50022 }
58
- # code_exec_docker_volumes = { files.get_abs_path("work_dir"): {"bind": "/root", "mode": "rw"} }
59
- code_exec_ssh_enabled = True,
60
- # code_exec_ssh_addr = "localhost",
61
- # code_exec_ssh_port = 50022,
62
- # code_exec_ssh_user = "root",
63
- # code_exec_ssh_pass = "toor",
64
- # additional = {},
65
- )
66
-
67
- # create the first agent
68
- agent0 = Agent( number = 0, config = config )
69
-
70
- # start the chat loop
71
- chat(agent0)
72
-
73
-
74
  # Main conversation loop
75
  def chat(agent:Agent):
76
-
77
  # start the conversation loop
78
  while True:
79
  # ask user for message
80
  with input_lock:
81
  timeout = agent.get_data("timeout") # how long the agent is willing to wait
82
  if not timeout: # if agent wants to wait for user input forever
83
- PrintStyle(background_color="#6C3483", font_color="white", bold=True, padding=True).print(f"User message ('alt_gr' for mic, 'e' to leave):")
84
  import readline # this fixes arrow keys in terminal
85
- listener = keyboard.Listener(on_press=on_press)
86
- listener.start()
87
  user_input = input("> ")
88
- listener.stop()
89
  PrintStyle(font_color="white", padding=False, log_only=True).print(f"> {user_input}")
90
 
91
  else: # otherwise wait for user input with a timeout
92
- PrintStyle(background_color="#6C3483", font_color="white", bold=True, padding=True).print(f"User message ({timeout}s timeout, 'w' to wait, 'alt_gr' for mic, 'e' to leave):")
93
  import readline # this fixes arrow keys in terminal
94
- listener = keyboard.Listener(on_press=on_press)
95
- listener.start()
96
  user_input = timeout_input("> ", timeout=timeout)
97
- listener.stop()
98
  if not user_input:
99
- user_input = read_file("prompts/fw.msg_timeout.md")
100
  PrintStyle(font_color="white", padding=False).stream(f"{user_input}")
101
  else:
102
  user_input = user_input.strip()
103
  if user_input.lower()=="w": # the user needs more time
104
- listener = keyboard.Listener(on_press=on_press)
105
- listener.start()
106
- user_input = input("> ")
107
- listener.stop()
108
- PrintStyle(font_color="white", padding=False, log_only=True).print(f"> {user_input}")
109
 
110
  # exit the conversation when the user types 'exit'
111
  if user_input.lower() == 'e': break
@@ -115,8 +51,8 @@ def chat(agent:Agent):
115
 
116
  # print agent0 response
117
  PrintStyle(font_color="white",background_color="#1D8348", bold=True, padding=True).print(f"{agent.agent_name}: reponse:")
118
- PrintStyle(font_color="white").print(f"{assistant_response}")
119
- if (os.getenv('EDGE_TTS_MODEL')) : tts.speech(assistant_response)
120
 
121
  # User intervention during agent streaming
122
  def intervention():
@@ -132,10 +68,6 @@ def intervention():
132
  if user_input: Agent.streaming_agent.intervention_message = user_input # set intervention message if non-empty
133
  Agent.paused = False # continue agent streaming
134
 
135
- def on_press(key):
136
- if key == keyboard.Key.alt_gr:
137
- # Lancer le speech to text
138
- stt.record()
139
 
140
  # Capture keyboard input to trigger user intervention
141
  def capture_keys():
@@ -164,5 +96,6 @@ if __name__ == "__main__":
164
  # Start the key capture thread for user intervention during agent streaming
165
  threading.Thread(target=capture_keys, daemon=True).start()
166
 
167
- # Start the chat
168
- initialize()
 
 
1
+ import threading, time, models, os
 
2
  from ansio import application_keypad, mouse_input, raw_input
3
  from ansio.input import InputEvent, get_input_event
4
  from agent import Agent, AgentConfig
 
6
  from python.helpers.files import read_file
7
  from python.helpers import files
8
  import python.helpers.timed_input as timed_input
9
+ from initialize import initialize
10
 
 
11
 
12
  input_lock = threading.Lock()
13
  os.chdir(files.get_abs_path("./work_dir")) #change CWD to work_dir
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  # Main conversation loop
16
  def chat(agent:Agent):
17
+
18
  # start the conversation loop
19
  while True:
20
  # ask user for message
21
  with input_lock:
22
  timeout = agent.get_data("timeout") # how long the agent is willing to wait
23
  if not timeout: # if agent wants to wait for user input forever
24
+ PrintStyle(background_color="#6C3483", font_color="white", bold=True, padding=True).print(f"User message ('e' to leave):")
25
  import readline # this fixes arrow keys in terminal
 
 
26
  user_input = input("> ")
 
27
  PrintStyle(font_color="white", padding=False, log_only=True).print(f"> {user_input}")
28
 
29
  else: # otherwise wait for user input with a timeout
30
+ PrintStyle(background_color="#6C3483", font_color="white", bold=True, padding=True).print(f"User message ({timeout}s timeout, 'w' to wait, 'e' to leave):")
31
  import readline # this fixes arrow keys in terminal
32
+ # user_input = timed_input("> ", timeout=timeout)
 
33
  user_input = timeout_input("> ", timeout=timeout)
34
+
35
  if not user_input:
36
+ user_input = agent.read_prompt("fw.msg_timeout.md")
37
  PrintStyle(font_color="white", padding=False).stream(f"{user_input}")
38
  else:
39
  user_input = user_input.strip()
40
  if user_input.lower()=="w": # the user needs more time
41
+ user_input = input("> ").strip()
42
+ PrintStyle(font_color="white", padding=False, log_only=True).print(f"> {user_input}")
43
+
44
+
 
45
 
46
  # exit the conversation when the user types 'exit'
47
  if user_input.lower() == 'e': break
 
51
 
52
  # print agent0 response
53
  PrintStyle(font_color="white",background_color="#1D8348", bold=True, padding=True).print(f"{agent.agent_name}: reponse:")
54
+ PrintStyle(font_color="white").print(f"{assistant_response}")
55
+
56
 
57
  # User intervention during agent streaming
58
  def intervention():
 
68
  if user_input: Agent.streaming_agent.intervention_message = user_input # set intervention message if non-empty
69
  Agent.paused = False # continue agent streaming
70
 
 
 
 
 
71
 
72
  # Capture keyboard input to trigger user intervention
73
  def capture_keys():
 
96
  # Start the key capture thread for user intervention during agent streaming
97
  threading.Thread(target=capture_keys, daemon=True).start()
98
 
99
+ # initialize and start the chat
100
+ agent0 = initialize()
101
+ chat(agent0)
run_ui.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import wraps
2
+ import os
3
+ from pathlib import Path
4
+ import threading
5
+ from flask import Flask, request, jsonify, Response
6
+ from flask_basicauth import BasicAuth
7
+ from agent import Agent
8
+ from initialize import initialize
9
+ from python.helpers.files import get_abs_path
10
+ from python.helpers.print_style import PrintStyle
11
+ from python.helpers.log import Log
12
+ from dotenv import load_dotenv
13
+
14
+ #global agent instance
15
+ agent0: Agent|None = None
16
+
17
+ #initialize the internal Flask server
18
+ app = Flask("app",static_folder=get_abs_path("./webui"),static_url_path="/")
19
+
20
+ # Set up basic authentication, name and password from .env variables
21
+ app.config['BASIC_AUTH_USERNAME'] = os.environ.get('BASIC_AUTH_USERNAME') or "admin" #default name
22
+ app.config['BASIC_AUTH_PASSWORD'] = os.environ.get('BASIC_AUTH_PASSWORD') or "admin" #default pass
23
+ basic_auth = BasicAuth(app)
24
+
25
+ # get global agent
26
+ def get_agent(reset: bool = False) -> Agent:
27
+ global agent0
28
+ if agent0 is None or reset:
29
+ agent0 = initialize()
30
+ return agent0
31
+
32
+ # Now you can use @requires_auth function decorator to require login on certain pages
33
+ def requires_auth(f):
34
+ @wraps(f)
35
+ async def decorated(*args, **kwargs):
36
+ auth = request.authorization
37
+ if not auth or not (auth.username == app.config['BASIC_AUTH_USERNAME'] and auth.password == app.config['BASIC_AUTH_PASSWORD']):
38
+ return Response(
39
+ 'Could not verify your access level for that URL.\n'
40
+ 'You have to login with proper credentials', 401,
41
+ {'WWW-Authenticate': 'Basic realm="Login Required"'})
42
+ return await f(*args, **kwargs)
43
+ return decorated
44
+
45
+ # handle default address, show demo html page from ./test_form.html
46
+ @app.route('/', methods=['GET'])
47
+ async def test_form():
48
+ return Path(get_abs_path("./webui/index.html")).read_text()
49
+
50
+ # simple health check, just return OK to see the server is running
51
+ @app.route('/ok', methods=['GET','POST'])
52
+ async def health_check():
53
+ return "OK"
54
+
55
+ # # secret page, requires authentication
56
+ # @app.route('/secret', methods=['GET'])
57
+ # @requires_auth
58
+ # async def secret_page():
59
+ # return Path("./secret_page.html").read_text()
60
+
61
+ # send message to agent
62
+ @app.route('/msg', methods=['POST'])
63
+ async def handle_message():
64
+ try:
65
+
66
+ #agent instance
67
+ agent = get_agent()
68
+
69
+ #data sent to the server
70
+ input = request.get_json()
71
+ text = input.get("text", "")
72
+
73
+ # print to console and log
74
+ PrintStyle(background_color="#6C3483", font_color="white", bold=True, padding=True).print(f"User message:")
75
+ PrintStyle(font_color="white", padding=False).print(f"> {text}")
76
+ Log.log(type="user", heading="User message", content=text)
77
+
78
+ #pass the message to the agent
79
+ threading.Thread(target=agent.communicate, args=(text,)).start()
80
+
81
+ #data from this server
82
+ response = {
83
+ "ok": True,
84
+ "message": "Message received.",
85
+ }
86
+
87
+ except Exception as e:
88
+ response = {
89
+ "ok": False,
90
+ "message": str(e),
91
+ }
92
+
93
+ #respond with json
94
+ return jsonify(response)
95
+
96
+ # pausing/unpausing the agent
97
+ @app.route('/pause', methods=['POST'])
98
+ async def pause():
99
+ try:
100
+
101
+ #data sent to the server
102
+ input = request.get_json()
103
+ paused = input.get("paused", False)
104
+
105
+ Agent.paused = paused
106
+
107
+ response = {
108
+ "ok": True,
109
+ "message": "Agent paused." if paused else "Agent unpaused.",
110
+ "pause": paused
111
+ }
112
+
113
+ except Exception as e:
114
+ response = {
115
+ "ok": False,
116
+ "message": str(e),
117
+ }
118
+
119
+ #respond with json
120
+ return jsonify(response)
121
+
122
+ # restarting with new agent0
123
+ @app.route('/reset', methods=['POST'])
124
+ async def reset():
125
+ try:
126
+
127
+ agent = get_agent(reset=True)
128
+ Log.reset()
129
+
130
+ response = {
131
+ "ok": True,
132
+ "message": "Agent restarted.",
133
+ }
134
+
135
+ except Exception as e:
136
+ response = {
137
+ "ok": False,
138
+ "message": str(e),
139
+ }
140
+
141
+ #respond with json
142
+ return jsonify(response)
143
+
144
+ # Web UI polling
145
+ @app.route('/poll', methods=['POST'])
146
+ async def poll():
147
+ try:
148
+
149
+ #data sent to the server
150
+ input = request.get_json()
151
+ from_no = input.get("log_from", "")
152
+
153
+ logs = Log.logs[int(from_no):]
154
+ to = Log.last_updated #max(0, len(Log.logs)-1)
155
+
156
+ #data from this server
157
+ response = {
158
+ "ok": True,
159
+ "logs": logs,
160
+ "log_to": to,
161
+ "log_guid": Log.guid,
162
+ "log_version": Log.version,
163
+ "paused": Agent.paused
164
+ }
165
+
166
+ except Exception as e:
167
+ response = {
168
+ "ok": False,
169
+ "message": str(e),
170
+ }
171
+
172
+ #respond with json
173
+ return jsonify(response)
174
+
175
+
176
+
177
+ #run the internal server
178
+ if __name__ == "__main__":
179
+
180
+ load_dotenv()
181
+
182
+ get_agent() #initialize
183
+
184
+ # Suppress only request logs but keep the startup messages
185
+ from werkzeug.serving import WSGIRequestHandler
186
+ class NoRequestLoggingWSGIRequestHandler(WSGIRequestHandler):
187
+ def log_request(self, code='-', size='-'):
188
+ pass # Override to suppress request logging
189
+
190
+ # run the server on port from .env
191
+ port = int(os.environ.get("WEB_UI_PORT", 0)) or None
192
+ app.run(request_handler=NoRequestLoggingWSGIRequestHandler,port=port)