Spaces:
Paused
Paused
frdel
commited on
Commit
·
3d95878
1
Parent(s):
4b2cd36
duckduckgo search prototype
Browse files- main.py +1 -0
- prompts/agent.memory.md +0 -1
- prompts/fw.code_no_output.md +5 -3
- prompts/fw.code_runtime_wrong.md +5 -1
- prompts/fw.error.md +5 -2
- prompts/fw.intervention.md +5 -1
- prompts/fw.memories_deleted.md +5 -1
- prompts/fw.memories_not_found.md +5 -1
- prompts/fw.memorized.md +0 -1
- prompts/fw.memory_saved.md +5 -1
- prompts/fw.msg_cleanup.md +1 -0
- prompts/tool.knowledge.response.md +6 -0
- python/helpers/duckduckgo_search.py +30 -0
- python/helpers/vector_db.py +2 -1
- python/tools/knowledge_tool.py +21 -4
- requirements.txt +2 -1
main.py
CHANGED
|
@@ -55,6 +55,7 @@ def initialize():
|
|
| 55 |
# msgs_keep_start = 5,
|
| 56 |
# msgs_keep_end = 10,
|
| 57 |
# max_tool_response_length = 3000,
|
|
|
|
| 58 |
code_exec_docker_enabled = True,
|
| 59 |
# code_exec_docker_name = "agent-zero-exe",
|
| 60 |
# code_exec_docker_image = "frdel/agent-zero-exe:latest",
|
|
|
|
| 55 |
# msgs_keep_start = 5,
|
| 56 |
# msgs_keep_end = 10,
|
| 57 |
# max_tool_response_length = 3000,
|
| 58 |
+
# response_timeout_seconds = 60,
|
| 59 |
code_exec_docker_enabled = True,
|
| 60 |
# code_exec_docker_name = "agent-zero-exe",
|
| 61 |
# code_exec_docker_image = "frdel/agent-zero-exe:latest",
|
prompts/agent.memory.md
CHANGED
|
@@ -1,5 +1,4 @@
|
|
| 1 |
# Memories
|
| 2 |
- following are your memories on the current topic
|
| 3 |
-
- you may find some of them helpful to solve the current task
|
| 4 |
|
| 5 |
{{memories}}
|
|
|
|
| 1 |
# Memories
|
| 2 |
- following are your memories on the current topic
|
|
|
|
| 3 |
|
| 4 |
{{memories}}
|
prompts/fw.code_no_output.md
CHANGED
|
@@ -1,3 +1,5 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
Otherwise proceed.
|
|
|
|
|
|
|
|
|
| 1 |
+
~~~json
|
| 2 |
+
{
|
| 3 |
+
"system_warning": "No output or error was returned. If you require output from the tool, you have to use use console printing in your code. Otherwise proceed."
|
| 4 |
+
}
|
| 5 |
+
~~~
|
prompts/fw.code_runtime_wrong.md
CHANGED
|
@@ -1 +1,5 @@
|
|
| 1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
~~~json
|
| 2 |
+
{
|
| 3 |
+
"system_warning": "The runtime '{{runtime}}' is not supported, available options are 'terminal', 'python', 'nodejs' and 'output'."
|
| 4 |
+
}
|
| 5 |
+
~~~
|
prompts/fw.error.md
CHANGED
|
@@ -1,2 +1,5 @@
|
|
| 1 |
-
|
| 2 |
-
{
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
~~~json
|
| 2 |
+
{
|
| 3 |
+
"system_error": "{{error}}"
|
| 4 |
+
}
|
| 5 |
+
~~~
|
prompts/fw.intervention.md
CHANGED
|
@@ -1 +1,5 @@
|
|
| 1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
~~~json
|
| 2 |
+
{
|
| 3 |
+
"user_intervention": "{{user_message}}"
|
| 4 |
+
}
|
| 5 |
+
~~~
|
prompts/fw.memories_deleted.md
CHANGED
|
@@ -1 +1,5 @@
|
|
| 1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
~~~json
|
| 2 |
+
{
|
| 3 |
+
"memories_deleted": "{{memory_count}}"
|
| 4 |
+
}
|
| 5 |
+
~~~
|
prompts/fw.memories_not_found.md
CHANGED
|
@@ -1 +1,5 @@
|
|
| 1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
~~~json
|
| 2 |
+
{
|
| 3 |
+
"memory": "No memories found for specified query: {{query}}"
|
| 4 |
+
}
|
| 5 |
+
~~~
|
prompts/fw.memorized.md
DELETED
|
@@ -1 +0,0 @@
|
|
| 1 |
-
Information saved to memory.
|
|
|
|
|
|
prompts/fw.memory_saved.md
CHANGED
|
@@ -1 +1,5 @@
|
|
| 1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
~~~json
|
| 2 |
+
{
|
| 3 |
+
"memory": "Memory has been saved with id {{memory_id}}."
|
| 4 |
+
}
|
| 5 |
+
~~~
|
prompts/fw.msg_cleanup.md
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
# Provide a JSON summary of given messages
|
| 2 |
- From the messages you are given, write a summary of key points in the conversation.
|
| 3 |
- Include important aspects and remove unnecessary details.
|
|
|
|
| 4 |
|
| 5 |
# Expected output format
|
| 6 |
~~~json
|
|
|
|
| 1 |
# Provide a JSON summary of given messages
|
| 2 |
- From the messages you are given, write a summary of key points in the conversation.
|
| 3 |
- Include important aspects and remove unnecessary details.
|
| 4 |
+
- Keep necessary information like file names, URLs, keys etc.
|
| 5 |
|
| 6 |
# Expected output format
|
| 7 |
~~~json
|
prompts/tool.knowledge.response.md
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
~~~json
|
| 2 |
+
{
|
| 3 |
+
"online_sources": "{{online_sources}}",
|
| 4 |
+
"memory": "{{memory}}",
|
| 5 |
+
}
|
| 6 |
+
~~~
|
python/helpers/duckduckgo_search.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
|
| 2 |
+
|
| 3 |
+
# def search(query: str, results = 5, region = "wt-wt", time="y") -> str:
|
| 4 |
+
# # Create an instance with custom parameters
|
| 5 |
+
# api = DuckDuckGoSearchAPIWrapper(
|
| 6 |
+
# region=region, # Set the region for search results
|
| 7 |
+
# safesearch="off", # Set safesearch level (options: strict, moderate, off)
|
| 8 |
+
# time=time, # Set time range (options: d, w, m, y)
|
| 9 |
+
# max_results=results # Set maximum number of results to return
|
| 10 |
+
# )
|
| 11 |
+
# # Perform a search
|
| 12 |
+
# result = api.run(query)
|
| 13 |
+
# return result
|
| 14 |
+
|
| 15 |
+
from duckduckgo_search import DDGS
|
| 16 |
+
|
| 17 |
+
def search(query: str, results = 5, region = "wt-wt", time="y") -> list[str]:
|
| 18 |
+
|
| 19 |
+
ddgs = DDGS()
|
| 20 |
+
src = ddgs.text(
|
| 21 |
+
query,
|
| 22 |
+
region=region, # Specify region
|
| 23 |
+
safesearch="off", # SafeSearch setting
|
| 24 |
+
timelimit=time, # Time limit (y = past year)
|
| 25 |
+
max_results=results # Number of results to return
|
| 26 |
+
)
|
| 27 |
+
results = []
|
| 28 |
+
for s in src:
|
| 29 |
+
results.append(str(s))
|
| 30 |
+
return results
|
python/helpers/vector_db.py
CHANGED
|
@@ -28,6 +28,7 @@ class VectorDB:
|
|
| 28 |
self.store,
|
| 29 |
namespace=getattr(embeddings_model, 'model', getattr(embeddings_model, 'model_name', "default")) )
|
| 30 |
|
|
|
|
| 31 |
self.db = Chroma(embedding_function=self.embedder,persist_directory=db_cache)
|
| 32 |
|
| 33 |
|
|
@@ -35,7 +36,7 @@ class VectorDB:
|
|
| 35 |
return self.db.similarity_search(query,results)
|
| 36 |
|
| 37 |
def search_similarity_threshold(self, query, results=3, threshold=0.5):
|
| 38 |
-
return self.db.search(query,search_type="similarity_score_threshold",score_threshold=threshold)
|
| 39 |
|
| 40 |
def search_max_rel(self, query, results=3):
|
| 41 |
return self.db.max_marginal_relevance_search(query,results)
|
|
|
|
| 28 |
self.store,
|
| 29 |
namespace=getattr(embeddings_model, 'model', getattr(embeddings_model, 'model_name', "default")) )
|
| 30 |
|
| 31 |
+
|
| 32 |
self.db = Chroma(embedding_function=self.embedder,persist_directory=db_cache)
|
| 33 |
|
| 34 |
|
|
|
|
| 36 |
return self.db.similarity_search(query,results)
|
| 37 |
|
| 38 |
def search_similarity_threshold(self, query, results=3, threshold=0.5):
|
| 39 |
+
return self.db.search(query, search_type="similarity_score_threshold", k=results, score_threshold=threshold)
|
| 40 |
|
| 41 |
def search_max_rel(self, query, results=3):
|
| 42 |
return self.db.max_marginal_relevance_search(query,results)
|
python/tools/knowledge_tool.py
CHANGED
|
@@ -1,5 +1,9 @@
|
|
|
|
|
| 1 |
from agent import Agent
|
| 2 |
from . import online_knowledge_tool
|
|
|
|
|
|
|
|
|
|
| 3 |
from . import memory_tool
|
| 4 |
import concurrent.futures
|
| 5 |
|
|
@@ -12,12 +16,25 @@ class Knowledge(Tool):
|
|
| 12 |
def execute(self, question="", **kwargs):
|
| 13 |
with concurrent.futures.ThreadPoolExecutor() as executor:
|
| 14 |
# Schedule the two functions to be run in parallel
|
| 15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
future_memory = executor.submit(memory_tool.search, self.agent, question)
|
| 17 |
|
| 18 |
# Wait for both functions to complete
|
| 19 |
-
|
|
|
|
| 20 |
memory_result = future_memory.result()
|
| 21 |
|
| 22 |
-
|
| 23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
from agent import Agent
|
| 3 |
from . import online_knowledge_tool
|
| 4 |
+
from python.helpers import perplexity_search
|
| 5 |
+
from python.helpers import duckduckgo_search
|
| 6 |
+
|
| 7 |
from . import memory_tool
|
| 8 |
import concurrent.futures
|
| 9 |
|
|
|
|
| 16 |
def execute(self, question="", **kwargs):
|
| 17 |
with concurrent.futures.ThreadPoolExecutor() as executor:
|
| 18 |
# Schedule the two functions to be run in parallel
|
| 19 |
+
|
| 20 |
+
# perplexity search, if API provided
|
| 21 |
+
if os.getenv("API_KEY_PERPLEXITY"):
|
| 22 |
+
perplexity = executor.submit(perplexity_search.perplexity_search, question)
|
| 23 |
+
else: perplexity = None
|
| 24 |
+
|
| 25 |
+
# duckduckgo search
|
| 26 |
+
duckduckgo = executor.submit(duckduckgo_search.search, question)
|
| 27 |
+
|
| 28 |
+
# memory search
|
| 29 |
future_memory = executor.submit(memory_tool.search, self.agent, question)
|
| 30 |
|
| 31 |
# Wait for both functions to complete
|
| 32 |
+
perplexity_result = (perplexity.result() if perplexity else "") or ""
|
| 33 |
+
duckduckgo_result = duckduckgo.result()
|
| 34 |
memory_result = future_memory.result()
|
| 35 |
|
| 36 |
+
msg = files.read_file("prompts/tool.knowledge.response.md",
|
| 37 |
+
online_sources = perplexity_result + "\n\n" + str(duckduckgo_result),
|
| 38 |
+
memory = memory_result )
|
| 39 |
+
|
| 40 |
+
return Response(message=msg, break_loop=False)
|
requirements.txt
CHANGED
|
@@ -11,4 +11,5 @@ webcolors==24.6.0
|
|
| 11 |
sentence-transformers==3.0.1
|
| 12 |
pytimedinput==2.0.1
|
| 13 |
docker==7.1.0
|
| 14 |
-
paramiko==3.4.0
|
|
|
|
|
|
| 11 |
sentence-transformers==3.0.1
|
| 12 |
pytimedinput==2.0.1
|
| 13 |
docker==7.1.0
|
| 14 |
+
paramiko==3.4.0
|
| 15 |
+
duckduckgo_search==6.1.12
|