frdel commited on
Commit
c425c40
·
1 Parent(s): ba0986b

docker files, agent number fix, json misformat message

Browse files
agent.py CHANGED
@@ -15,7 +15,6 @@ from python.helpers.rate_limiter import RateLimiter
15
 
16
  @dataclass
17
  class AgentConfig:
18
- agent_number: int
19
  chat_model:BaseChatModel
20
  embeddings_model:Embeddings
21
  memory_subdir: str = ""
@@ -54,7 +53,7 @@ class Agent:
54
 
55
  # non-config vars
56
  self.number = number
57
- self.agent_name = f"Agent {self.config.agent_number}"
58
 
59
  self.system_prompt = files.read_file("./prompts/agent.system.md").replace("{", "{{").replace("}", "}}")
60
  self.tools_prompt = files.read_file("./prompts/agent.tools.md").replace("{", "{{").replace("}", "}}")
@@ -236,20 +235,26 @@ class Agent:
236
  def process_tools(self, msg: str):
237
  # search for tool usage requests in agent message
238
  tool_request = extract_tools.json_parse_dirty(msg)
239
- tool_name = tool_request.get("tool_name", "")
240
- tool_args = tool_request.get("tool_args", {})
241
 
242
- tool = self.get_tool(
243
- tool_name,
244
- tool_args,
245
- msg)
 
 
 
 
 
 
246
 
247
- if self.handle_intervention(): return # wait if paused and handle intervention message if needed
248
-
249
- tool.before_execution(**tool_args)
250
- response = tool.execute(**tool_args)
251
- tool.after_execution(response)
252
- if response.break_loop: return response.message
 
 
253
 
254
 
255
  def get_tool(self, name: str, args: dict, message: str, **kwargs):
 
15
 
16
  @dataclass
17
  class AgentConfig:
 
18
  chat_model:BaseChatModel
19
  embeddings_model:Embeddings
20
  memory_subdir: str = ""
 
53
 
54
  # non-config vars
55
  self.number = number
56
+ self.agent_name = f"Agent {self.number}"
57
 
58
  self.system_prompt = files.read_file("./prompts/agent.system.md").replace("{", "{{").replace("}", "}}")
59
  self.tools_prompt = files.read_file("./prompts/agent.tools.md").replace("{", "{{").replace("}", "}}")
 
235
  def process_tools(self, msg: str):
236
  # search for tool usage requests in agent message
237
  tool_request = extract_tools.json_parse_dirty(msg)
 
 
238
 
239
+ if tool_request is not None:
240
+ tool_name = tool_request.get("tool_name", "")
241
+ tool_args = tool_request.get("tool_args", {})
242
+
243
+ tool = self.get_tool(
244
+ tool_name,
245
+ tool_args,
246
+ msg)
247
+
248
+ if self.handle_intervention(): return # wait if paused and handle intervention message if needed
249
 
250
+ tool.before_execution(**tool_args)
251
+ response = tool.execute(**tool_args)
252
+ tool.after_execution(response)
253
+ if response.break_loop: return response.message
254
+ else:
255
+ msg = files.read_file("prompts/fw.msg_misformat.md")
256
+ self.append_message(msg, human=True)
257
+ PrintStyle(font_color="red", padding=True).print(msg)
258
 
259
 
260
  def get_tool(self, name: str, args: dict, message: str, **kwargs):
docker/.bashrc ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # .bashrc
2
+
3
+ # Source global definitions
4
+ if [ -f /etc/bashrc ]; then
5
+ . /etc/bashrc
6
+ fi
7
+
8
+ # Activate the virtual environment
9
+ source /opt/venv/bin/activate
docker/Dockerfile ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use the latest slim version of Debian
2
+ FROM --platform=$TARGETPLATFORM debian:bookworm-slim
3
+
4
+ # Set ARG for platform-specific commands
5
+ ARG TARGETPLATFORM
6
+
7
+ # Update and install necessary packages
8
+ RUN apt-get update && apt-get install -y \
9
+ python3 \
10
+ python3-pip \
11
+ python3-venv \
12
+ nodejs \
13
+ npm \
14
+ openssh-server \
15
+ sudo \
16
+ && rm -rf /var/lib/apt/lists/*
17
+
18
+ # Set up SSH
19
+ RUN mkdir /var/run/sshd && \
20
+ echo 'root:toor' | chpasswd && \
21
+ sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config
22
+
23
+ # Create and activate Python virtual environment
24
+ ENV VIRTUAL_ENV=/opt/venv
25
+ RUN python3 -m venv $VIRTUAL_ENV
26
+
27
+ # Copy initial .bashrc with virtual environment activation to a temporary location
28
+ COPY .bashrc /etc/skel/.bashrc
29
+
30
+ # Copy the script to ensure .bashrc is in the root directory
31
+ COPY initialize.sh /usr/local/bin/initialize.sh
32
+ RUN chmod +x /usr/local/bin/initialize.sh
33
+
34
+ # Ensure the virtual environment and pip setup
35
+ RUN $VIRTUAL_ENV/bin/pip install --upgrade pip
36
+
37
+ # Expose SSH port
38
+ EXPOSE 22
39
+
40
+ # Init .bashrc
41
+ CMD ["/usr/local/bin/initialize.sh"]
42
+
43
+
docker/initialize.sh ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Ensure .bashrc is in the root directory
4
+ if [ ! -f /root/.bashrc ]; then
5
+ cp /etc/skel/.bashrc /root/.bashrc
6
+ chmod 444 /root/.bashrc
7
+ fi
8
+
9
+ # Ensure .profile is in the root directory
10
+ if [ ! -f /root/.profile ]; then
11
+ cp /etc/skel/.bashrc /root/.profile
12
+ chmod 444 /root/.profile
13
+ fi
14
+
15
+ # Start SSH service
16
+ exec /usr/sbin/sshd -D
main.py CHANGED
@@ -9,7 +9,6 @@ from python.helpers import files
9
 
10
 
11
  input_lock = threading.Lock()
12
-
13
  os.chdir(files.get_abs_path("./work_dir")) #change CWD to work_dir
14
 
15
 
@@ -25,6 +24,10 @@ def initialize():
25
  # chat_llm = models.get_anthropic_sonnet_35(temperature=0)
26
  # chat_llm = models.get_anthropic_haiku(temperature=0)
27
  # chat_llm = models.get_ollama_dolphin()
 
 
 
 
28
 
29
  # embedding model used for memory
30
  # embedding_llm = models.get_embedding_openai()
@@ -32,7 +35,6 @@ def initialize():
32
 
33
  # agent configuration
34
  config = AgentConfig(
35
- agent_number = 0,
36
  chat_model = chat_llm,
37
  embeddings_model = embedding_llm,
38
  # memory_subdir = "",
 
9
 
10
 
11
  input_lock = threading.Lock()
 
12
  os.chdir(files.get_abs_path("./work_dir")) #change CWD to work_dir
13
 
14
 
 
24
  # chat_llm = models.get_anthropic_sonnet_35(temperature=0)
25
  # chat_llm = models.get_anthropic_haiku(temperature=0)
26
  # chat_llm = models.get_ollama_dolphin()
27
+ # chat_llm = models.get_ollama(model_name="gemma2:27b")
28
+ # chat_llm = models.get_ollama(model_name="llama3:8b-text-fp16")
29
+ # chat_llm = models.get_ollama(model_name="gemma2:latest")
30
+ # chat_llm = models.get_ollama(model_name="qwen:14b")
31
 
32
  # embedding model used for memory
33
  # embedding_llm = models.get_embedding_openai()
 
35
 
36
  # agent configuration
37
  config = AgentConfig(
 
38
  chat_model = chat_llm,
39
  embeddings_model = embedding_llm,
40
  # memory_subdir = "",
models.py CHANGED
@@ -68,6 +68,9 @@ def get_groq_llama8b(api_key=None, temperature=DEFAULT_TEMPERATURE):
68
  api_key = api_key or get_api_key("groq")
69
  return ChatGroq(model_name="Llama3-8b-8192", temperature=temperature, api_key=api_key) # type: ignore
70
 
 
 
 
71
  def get_groq_gemma(api_key=None, temperature=DEFAULT_TEMPERATURE):
72
  api_key = api_key or get_api_key("groq")
73
  return ChatGroq(model_name="gemma-7b-it", temperature=temperature, api_key=api_key) # type: ignore
 
68
  api_key = api_key or get_api_key("groq")
69
  return ChatGroq(model_name="Llama3-8b-8192", temperature=temperature, api_key=api_key) # type: ignore
70
 
71
+ def get_ollama(model_name, temperature=DEFAULT_TEMPERATURE):
72
+ return Ollama(model=model_name,temperature=temperature)
73
+
74
  def get_groq_gemma(api_key=None, temperature=DEFAULT_TEMPERATURE):
75
  api_key = api_key or get_api_key("groq")
76
  return ChatGroq(model_name="gemma-7b-it", temperature=temperature, api_key=api_key) # type: ignore
prompts/agent.tools.md CHANGED
@@ -86,6 +86,7 @@ Execute provided terminal commands, python code or nodejs code.
86
  This tool can be used to achieve any task that requires computation, or any other software related activity.
87
  Place your code escaped and properly indented in the "code" argument.
88
  Select the corresponding runtime with "runtime" argument. Possible values are "terminal", "python" and "nodejs".
 
89
  You can use pip, npm and apt-get in terminal runtime to install any required packages.
90
  IMPORTANT: Never use implicit print or implicit output, it does not work! If you need output of your code, you MUST use print() or console.log() to output selected variables.
91
  When tool outputs error, you need to change your code accordingly before trying again. knowledge_tool can help analyze errors.
 
86
  This tool can be used to achieve any task that requires computation, or any other software related activity.
87
  Place your code escaped and properly indented in the "code" argument.
88
  Select the corresponding runtime with "runtime" argument. Possible values are "terminal", "python" and "nodejs".
89
+ Sometimes a dialogue can occur in output, questions like Y/N, in that case use the "teminal" runtime in the next step and send your answer.
90
  You can use pip, npm and apt-get in terminal runtime to install any required packages.
91
  IMPORTANT: Never use implicit print or implicit output, it does not work! If you need output of your code, you MUST use print() or console.log() to output selected variables.
92
  When tool outputs error, you need to change your code accordingly before trying again. knowledge_tool can help analyze errors.
prompts/fw.msg_misformat.md ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ ~~~json
2
+ {
3
+ "system_warning": "You have misformatted your message. Follow system prompt instructions on JSON message formatting precisely."
4
+ }
5
+ ~~~
python/helpers/docker.py CHANGED
@@ -35,7 +35,7 @@ class DockerContainerManager:
35
  #print(f"Container with name '{self.name}' is already running with ID: {existing_container.id}")
36
  pass
37
  else:
38
- print(f"Initializing docker container {self.name}...")
39
  self.container = self.client.containers.run(
40
  self.image,
41
  detach=True,
 
35
  #print(f"Container with name '{self.name}' is already running with ID: {existing_container.id}")
36
  pass
37
  else:
38
+ print(f"Initializing docker container {self.name} for safe code execution...")
39
  self.container = self.client.containers.run(
40
  self.image,
41
  detach=True,
python/helpers/extract_tools.py CHANGED
@@ -6,17 +6,17 @@ from .dirty_json import DirtyJson
6
  import regex
7
 
8
 
9
- def json_parse_dirty(json:str) -> dict[str,Any]:
10
  ext_json = extract_json_object_string(json)
11
- # ext_json = fix_json_string(ext_json)
12
- data = DirtyJson.parse_string(ext_json)
13
- if isinstance(data,dict): return data
14
- return {}
 
15
 
16
  def extract_json_object_string(content):
17
  start = content.find('{')
18
  if start == -1:
19
- print("No JSON content found.")
20
  return ""
21
 
22
  # Find the first '{'
 
6
  import regex
7
 
8
 
9
+ def json_parse_dirty(json:str) -> dict[str,Any] | None:
10
  ext_json = extract_json_object_string(json)
11
+ if ext_json:
12
+ # ext_json = fix_json_string(ext_json)
13
+ data = DirtyJson.parse_string(ext_json)
14
+ if isinstance(data,dict): return data
15
+ return None
16
 
17
  def extract_json_object_string(content):
18
  start = content.find('{')
19
  if start == -1:
 
20
  return ""
21
 
22
  # Find the first '{'