Spaces:
Paused
Paused
frdel commited on
Commit ·
ab1dce9
1
Parent(s): c8fe3c7
Memory extensions, UI updates
Browse filesAuto memory extensions for main and solutions areas
UI updates, error toasts, progress bar
- .gitignore +12 -3
- .vscode/settings.json +0 -1
- agent.py +34 -30
- example.env +4 -2
- initialize.py +5 -4
- knowledge/custom/.gitkeep +0 -0
- knowledge/custom/main/.gitkeep +0 -0
- knowledge/custom/solutions/.gitkeep +0 -0
- knowledge/default/.gitkeep +0 -0
- knowledge/default/main/.gitkeep +0 -0
- knowledge/default/solutions/.gitkeep +0 -0
- knowledge/default/solutions/yt_download.md +6 -0
- models.py +5 -0
- prompts/default/agent.system.memories.md +4 -0
- prompts/default/memory.memories_query.sys.md +19 -0
- prompts/default/memory.memories_sum.sys.md +22 -0
- python/extensions/message_loop_prompts/recall_memories.py +97 -0
- python/extensions/message_loop_prompts/recall_solutions.py +72 -48
- python/extensions/monologue_end/50_memorize_memories.py +74 -0
- python/extensions/monologue_end/{memorize_solutions.py → 51_memorize_solutions.py} +36 -32
- python/extensions/monologue_end/90_waiting_for_input_msg.py +12 -0
- python/extensions/msg_loop_break/zero_memorize_history.py +0 -35
- python/helpers/knowledge_import.py +77 -47
- python/helpers/log.py +85 -21
- python/helpers/tool.py +3 -3
- python/helpers/vector_db.py +51 -22
- python/tools/code_execution_tool.py +3 -3
- python/tools/knowledge_tool.py +1 -3
- python/tools/memory_tool.py +19 -22
- python/tools/response.py +1 -1
- python/tools/task_done.py +1 -1
- requirements.txt +4 -0
- run_ui.py +105 -73
- test.py +0 -293
- test2.py +0 -120
- webui/index.css +77 -13
- webui/index.html +69 -46
- webui/index.js +126 -49
- webui/messages.js +23 -18
- webui/toast.css +42 -0
.gitignore
CHANGED
|
@@ -26,7 +26,16 @@ tmp/*
|
|
| 26 |
# But do not ignore the directory itself
|
| 27 |
!tmp/.gitkeep
|
| 28 |
|
| 29 |
-
# Ignore
|
| 30 |
knowledge/*
|
| 31 |
-
|
| 32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
# But do not ignore the directory itself
|
| 27 |
!tmp/.gitkeep
|
| 28 |
|
| 29 |
+
# Ignore everything in the "knowledge" directory
|
| 30 |
knowledge/*
|
| 31 |
+
|
| 32 |
+
# Do not ignore subdirectories (so we can track .gitkeep)
|
| 33 |
+
!knowledge/*/
|
| 34 |
+
|
| 35 |
+
# Ignore all files within subdirectories (except .gitkeep)
|
| 36 |
+
knowledge/**/*.*
|
| 37 |
+
!knowledge/**/.gitkeep
|
| 38 |
+
|
| 39 |
+
# Explicitly allow the default folder and its contents
|
| 40 |
+
!knowledge/default/
|
| 41 |
+
!knowledge/default/**
|
.vscode/settings.json
CHANGED
|
@@ -1,4 +1,3 @@
|
|
| 1 |
{
|
| 2 |
"python.analysis.typeCheckingMode": "standard",
|
| 3 |
-
"deno.enable": true
|
| 4 |
}
|
|
|
|
| 1 |
{
|
| 2 |
"python.analysis.typeCheckingMode": "standard",
|
|
|
|
| 3 |
}
|
agent.py
CHANGED
|
@@ -14,6 +14,7 @@ from langchain_core.embeddings import Embeddings
|
|
| 14 |
import python.helpers.log as Log
|
| 15 |
from python.helpers.dirty_json import DirtyJson
|
| 16 |
from python.helpers.defer import DeferredTask
|
|
|
|
| 17 |
|
| 18 |
|
| 19 |
class AgentContext:
|
|
@@ -90,7 +91,7 @@ class AgentConfig:
|
|
| 90 |
embeddings_model: Embeddings
|
| 91 |
prompts_subdir: str = ""
|
| 92 |
memory_subdir: str = ""
|
| 93 |
-
|
| 94 |
auto_memory_count: int = 3
|
| 95 |
auto_memory_skip: int = 2
|
| 96 |
rate_limit_seconds: int = 60
|
|
@@ -162,29 +163,31 @@ class Agent:
|
|
| 162 |
async def monologue(self, msg: str):
|
| 163 |
try:
|
| 164 |
|
| 165 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 166 |
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
) # call monologue_start extensions
|
| 170 |
|
| 171 |
printer = PrintStyle(italic=True, font_color="#b3ffd9", padding=False)
|
| 172 |
user_message = self.read_prompt("fw.user_message.md", message=msg)
|
| 173 |
await self.append_message(user_message, human=True)
|
| 174 |
|
| 175 |
await self.call_extensions(
|
| 176 |
-
"monologue_start",
|
| 177 |
) # call monologue_end extensions
|
| 178 |
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
): # let the agent iterate on his thoughts until he stops by using a tool
|
| 182 |
|
| 183 |
self.context.streaming_agent = self # mark self as current streamer
|
| 184 |
agent_response = ""
|
| 185 |
loop_data["iteration"] += 1
|
| 186 |
|
| 187 |
-
|
| 188 |
try:
|
| 189 |
|
| 190 |
# set system prompt and message history
|
|
@@ -219,9 +222,9 @@ class Agent:
|
|
| 219 |
font_color="green",
|
| 220 |
padding=True,
|
| 221 |
background_color="white",
|
| 222 |
-
).print(f"{self.agent_name}: Generating
|
| 223 |
log = self.context.log.log(
|
| 224 |
-
type="agent", heading=f"{self.agent_name}: Generating
|
| 225 |
)
|
| 226 |
|
| 227 |
async for chunk in chain.astream(loop_data["history"]):
|
|
@@ -346,7 +349,7 @@ class Agent:
|
|
| 346 |
return "\n".join([f"{msg.type}: {msg.content}" for msg in messages])
|
| 347 |
|
| 348 |
async def call_utility_llm(
|
| 349 |
-
self, system: str, msg: str,
|
| 350 |
):
|
| 351 |
prompt = ChatPromptTemplate.from_messages(
|
| 352 |
[SystemMessage(content=system), HumanMessage(content=msg)]
|
|
@@ -354,17 +357,6 @@ class Agent:
|
|
| 354 |
|
| 355 |
chain = prompt | self.config.utility_model
|
| 356 |
response = ""
|
| 357 |
-
printer = None
|
| 358 |
-
logger = None
|
| 359 |
-
|
| 360 |
-
if output_label:
|
| 361 |
-
PrintStyle(
|
| 362 |
-
bold=True, font_color="orange", padding=True, background_color="white"
|
| 363 |
-
).print(f"{self.agent_name}: {output_label}:")
|
| 364 |
-
printer = PrintStyle(italic=True, font_color="orange", padding=False)
|
| 365 |
-
logger = self.context.log.log(
|
| 366 |
-
type=log_type, heading=f"{self.agent_name}: {output_label}:"
|
| 367 |
-
)
|
| 368 |
|
| 369 |
formatted_inputs = prompt.format()
|
| 370 |
tokens = int(len(formatted_inputs) / 4)
|
|
@@ -380,11 +372,10 @@ class Agent:
|
|
| 380 |
else:
|
| 381 |
content = str(chunk)
|
| 382 |
|
| 383 |
-
if
|
| 384 |
-
|
|
|
|
| 385 |
response += content
|
| 386 |
-
if logger:
|
| 387 |
-
logger.update(content=response)
|
| 388 |
|
| 389 |
self.rate_limiter.set_output_tokens(int(len(response) / 4))
|
| 390 |
|
|
@@ -396,10 +387,23 @@ class Agent:
|
|
| 396 |
|
| 397 |
async def replace_middle_messages(self, middle_messages):
|
| 398 |
cleanup_prompt = self.read_prompt("fw.msg_cleanup.md")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 399 |
summary = await self.call_utility_llm(
|
| 400 |
system=cleanup_prompt,
|
| 401 |
msg=self.concat_messages(middle_messages),
|
| 402 |
-
|
| 403 |
)
|
| 404 |
new_human_message = HumanMessage(content=summary)
|
| 405 |
return [new_human_message]
|
|
@@ -473,7 +477,7 @@ class Agent:
|
|
| 473 |
await self.append_message(msg, human=True)
|
| 474 |
PrintStyle(font_color="red", padding=True).print(msg)
|
| 475 |
self.context.log.log(
|
| 476 |
-
type="error", content=f"{self.agent_name}: Message misformat
|
| 477 |
)
|
| 478 |
|
| 479 |
def log_from_stream(self, stream: str, logItem: Log.LogItem):
|
|
|
|
| 14 |
import python.helpers.log as Log
|
| 15 |
from python.helpers.dirty_json import DirtyJson
|
| 16 |
from python.helpers.defer import DeferredTask
|
| 17 |
+
from typing import Callable
|
| 18 |
|
| 19 |
|
| 20 |
class AgentContext:
|
|
|
|
| 91 |
embeddings_model: Embeddings
|
| 92 |
prompts_subdir: str = ""
|
| 93 |
memory_subdir: str = ""
|
| 94 |
+
knowledge_subdirs: list[str] = field(default_factory=lambda: ["default", "custom"])
|
| 95 |
auto_memory_count: int = 3
|
| 96 |
auto_memory_skip: int = 2
|
| 97 |
rate_limit_seconds: int = 60
|
|
|
|
| 163 |
async def monologue(self, msg: str):
|
| 164 |
try:
|
| 165 |
|
| 166 |
+
# loop data dictionary to pass to extensions
|
| 167 |
+
loop_data: dict[str, Any] = {
|
| 168 |
+
"message": msg,
|
| 169 |
+
"iteration": -1,
|
| 170 |
+
"history_from": len(self.history),
|
| 171 |
+
}
|
| 172 |
|
| 173 |
+
# call monologue_start extensions
|
| 174 |
+
await self.call_extensions("monologue_start", loop_data=loop_data)
|
|
|
|
| 175 |
|
| 176 |
printer = PrintStyle(italic=True, font_color="#b3ffd9", padding=False)
|
| 177 |
user_message = self.read_prompt("fw.user_message.md", message=msg)
|
| 178 |
await self.append_message(user_message, human=True)
|
| 179 |
|
| 180 |
await self.call_extensions(
|
| 181 |
+
"monologue_start", loop_data=loop_data
|
| 182 |
) # call monologue_end extensions
|
| 183 |
|
| 184 |
+
# let the agent run message loop until he stops it with a response tool
|
| 185 |
+
while True:
|
|
|
|
| 186 |
|
| 187 |
self.context.streaming_agent = self # mark self as current streamer
|
| 188 |
agent_response = ""
|
| 189 |
loop_data["iteration"] += 1
|
| 190 |
|
|
|
|
| 191 |
try:
|
| 192 |
|
| 193 |
# set system prompt and message history
|
|
|
|
| 222 |
font_color="green",
|
| 223 |
padding=True,
|
| 224 |
background_color="white",
|
| 225 |
+
).print(f"{self.agent_name}: Generating")
|
| 226 |
log = self.context.log.log(
|
| 227 |
+
type="agent", heading=f"{self.agent_name}: Generating"
|
| 228 |
)
|
| 229 |
|
| 230 |
async for chunk in chain.astream(loop_data["history"]):
|
|
|
|
| 349 |
return "\n".join([f"{msg.type}: {msg.content}" for msg in messages])
|
| 350 |
|
| 351 |
async def call_utility_llm(
|
| 352 |
+
self, system: str, msg: str, callback: Callable[[str], None] | None = None
|
| 353 |
):
|
| 354 |
prompt = ChatPromptTemplate.from_messages(
|
| 355 |
[SystemMessage(content=system), HumanMessage(content=msg)]
|
|
|
|
| 357 |
|
| 358 |
chain = prompt | self.config.utility_model
|
| 359 |
response = ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 360 |
|
| 361 |
formatted_inputs = prompt.format()
|
| 362 |
tokens = int(len(formatted_inputs) / 4)
|
|
|
|
| 372 |
else:
|
| 373 |
content = str(chunk)
|
| 374 |
|
| 375 |
+
if callback:
|
| 376 |
+
callback(content)
|
| 377 |
+
|
| 378 |
response += content
|
|
|
|
|
|
|
| 379 |
|
| 380 |
self.rate_limiter.set_output_tokens(int(len(response) / 4))
|
| 381 |
|
|
|
|
| 387 |
|
| 388 |
async def replace_middle_messages(self, middle_messages):
|
| 389 |
cleanup_prompt = self.read_prompt("fw.msg_cleanup.md")
|
| 390 |
+
log_item = self.context.log.log(
|
| 391 |
+
type="util", heading="Mid messages cleanup summary"
|
| 392 |
+
)
|
| 393 |
+
|
| 394 |
+
PrintStyle(
|
| 395 |
+
bold=True, font_color="orange", padding=True, background_color="white"
|
| 396 |
+
).print(f"{self.agent_name}: Mid messages cleanup summary")
|
| 397 |
+
printer = PrintStyle(italic=True, font_color="orange", padding=False)
|
| 398 |
+
|
| 399 |
+
def log_callback(content):
|
| 400 |
+
printer.print(content)
|
| 401 |
+
log_item.stream(content=content)
|
| 402 |
+
|
| 403 |
summary = await self.call_utility_llm(
|
| 404 |
system=cleanup_prompt,
|
| 405 |
msg=self.concat_messages(middle_messages),
|
| 406 |
+
callback=log_callback,
|
| 407 |
)
|
| 408 |
new_human_message = HumanMessage(content=summary)
|
| 409 |
return [new_human_message]
|
|
|
|
| 477 |
await self.append_message(msg, human=True)
|
| 478 |
PrintStyle(font_color="red", padding=True).print(msg)
|
| 479 |
self.context.log.log(
|
| 480 |
+
type="error", content=f"{self.agent_name}: Message misformat"
|
| 481 |
)
|
| 482 |
|
| 483 |
def log_from_stream(self, stream: str, logItem: Log.LogItem):
|
example.env
CHANGED
|
@@ -2,9 +2,10 @@ API_KEY_OPENAI=
|
|
| 2 |
API_KEY_ANTHROPIC=
|
| 3 |
API_KEY_GROQ=
|
| 4 |
API_KEY_PERPLEXITY=
|
| 5 |
-
|
| 6 |
-
|
| 7 |
API_KEY_OPENROUTER=
|
|
|
|
| 8 |
|
| 9 |
API_KEY_OPENAI_AZURE=
|
| 10 |
OPENAI_AZURE_ENDPOINT=
|
|
@@ -22,3 +23,4 @@ PYDEVD_DISABLE_FILE_VALIDATION=1
|
|
| 22 |
OLLAMA_BASE_URL="http://127.0.0.1:11434"
|
| 23 |
LM_STUDIO_BASE_URL="http://127.0.0.1:1234/v1"
|
| 24 |
OPEN_ROUTER_BASE_URL="https://openrouter.ai/api/v1"
|
|
|
|
|
|
| 2 |
API_KEY_ANTHROPIC=
|
| 3 |
API_KEY_GROQ=
|
| 4 |
API_KEY_PERPLEXITY=
|
| 5 |
+
API_KEY_GOOGLE=
|
| 6 |
+
API_KEY_MISTRAL=
|
| 7 |
API_KEY_OPENROUTER=
|
| 8 |
+
API_KEY_SAMBANOVA=
|
| 9 |
|
| 10 |
API_KEY_OPENAI_AZURE=
|
| 11 |
OPENAI_AZURE_ENDPOINT=
|
|
|
|
| 23 |
OLLAMA_BASE_URL="http://127.0.0.1:11434"
|
| 24 |
LM_STUDIO_BASE_URL="http://127.0.0.1:1234/v1"
|
| 25 |
OPEN_ROUTER_BASE_URL="https://openrouter.ai/api/v1"
|
| 26 |
+
SAMBANOVA_BASE_URL="https://fast-api.snova.ai/v1"
|
initialize.py
CHANGED
|
@@ -7,15 +7,16 @@ def initialize():
|
|
| 7 |
chat_llm = models.get_openai_chat(model_name="gpt-4o-mini", temperature=0)
|
| 8 |
# chat_llm = models.get_ollama_chat(model_name="gemma2:latest", temperature=0)
|
| 9 |
# chat_llm = models.get_lmstudio_chat(model_name="lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF", temperature=0)
|
| 10 |
-
# chat_llm = models.get_openrouter_chat(model_name="
|
| 11 |
# chat_llm = models.get_azure_openai_chat(deployment_name="gpt-4o-mini", temperature=0)
|
| 12 |
# chat_llm = models.get_anthropic_chat(model_name="claude-3-5-sonnet-20240620", temperature=0)
|
| 13 |
# chat_llm = models.get_google_chat(model_name="gemini-1.5-flash", temperature=0)
|
| 14 |
# chat_llm = models.get_mistral_chat(model_name="mistral-small-latest", temperature=0)
|
| 15 |
# chat_llm = models.get_groq_chat(model_name="llama-3.1-70b-versatile", temperature=0)
|
| 16 |
-
|
|
|
|
| 17 |
# utility model used for helper functions (cheaper, faster)
|
| 18 |
-
utility_llm = chat_llm
|
| 19 |
|
| 20 |
# embedding model used for memory
|
| 21 |
embedding_llm = models.get_openai_embedding(model_name="text-embedding-3-small")
|
|
@@ -30,7 +31,7 @@ def initialize():
|
|
| 30 |
embeddings_model = embedding_llm,
|
| 31 |
# prompts_subdir = "",
|
| 32 |
# memory_subdir = "",
|
| 33 |
-
|
| 34 |
auto_memory_count = 0,
|
| 35 |
# auto_memory_skip = 2,
|
| 36 |
# rate_limit_seconds = 60,
|
|
|
|
| 7 |
chat_llm = models.get_openai_chat(model_name="gpt-4o-mini", temperature=0)
|
| 8 |
# chat_llm = models.get_ollama_chat(model_name="gemma2:latest", temperature=0)
|
| 9 |
# chat_llm = models.get_lmstudio_chat(model_name="lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF", temperature=0)
|
| 10 |
+
# chat_llm = models.get_openrouter_chat(model_name="openai/o1-mini-2024-09-12")
|
| 11 |
# chat_llm = models.get_azure_openai_chat(deployment_name="gpt-4o-mini", temperature=0)
|
| 12 |
# chat_llm = models.get_anthropic_chat(model_name="claude-3-5-sonnet-20240620", temperature=0)
|
| 13 |
# chat_llm = models.get_google_chat(model_name="gemini-1.5-flash", temperature=0)
|
| 14 |
# chat_llm = models.get_mistral_chat(model_name="mistral-small-latest", temperature=0)
|
| 15 |
# chat_llm = models.get_groq_chat(model_name="llama-3.1-70b-versatile", temperature=0)
|
| 16 |
+
# chat_llm = models.get_sambanova_chat(model_name="Meta-Llama-3.1-70B-Instruct-8k", temperature=0)
|
| 17 |
+
|
| 18 |
# utility model used for helper functions (cheaper, faster)
|
| 19 |
+
utility_llm = chat_llm
|
| 20 |
|
| 21 |
# embedding model used for memory
|
| 22 |
embedding_llm = models.get_openai_embedding(model_name="text-embedding-3-small")
|
|
|
|
| 31 |
embeddings_model = embedding_llm,
|
| 32 |
# prompts_subdir = "",
|
| 33 |
# memory_subdir = "",
|
| 34 |
+
knowledge_subdirs = ["default","custom"],
|
| 35 |
auto_memory_count = 0,
|
| 36 |
# auto_memory_skip = 2,
|
| 37 |
# rate_limit_seconds = 60,
|
knowledge/custom/.gitkeep
ADDED
|
File without changes
|
knowledge/custom/main/.gitkeep
ADDED
|
File without changes
|
knowledge/custom/solutions/.gitkeep
ADDED
|
File without changes
|
knowledge/default/.gitkeep
ADDED
|
File without changes
|
knowledge/default/main/.gitkeep
ADDED
|
File without changes
|
knowledge/default/solutions/.gitkeep
ADDED
|
File without changes
|
knowledge/default/solutions/yt_download.md
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Problem
|
| 2 |
+
Download a YouTube video
|
| 3 |
+
# Solution
|
| 4 |
+
1. If you don't have exact URL, use knowledge_tool to get it
|
| 5 |
+
2. Pip install yt-dlp and ffmpeg
|
| 6 |
+
3. Download the video using yt-dlp command: 'yt-dlp YT_URL', replace YT_URL with your video URL.
|
models.py
CHANGED
|
@@ -84,3 +84,8 @@ def get_openrouter_chat(model_name: str, api_key=get_api_key("openrouter"), temp
|
|
| 84 |
|
| 85 |
def get_openrouter_embedding(model_name: str, api_key=get_api_key("openrouter"), base_url=os.getenv("OPEN_ROUTER_BASE_URL") or "https://openrouter.ai/api/v1"):
|
| 86 |
return OpenAIEmbeddings(model=model_name, api_key=api_key, base_url=base_url) # type: ignore
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 84 |
|
| 85 |
def get_openrouter_embedding(model_name: str, api_key=get_api_key("openrouter"), base_url=os.getenv("OPEN_ROUTER_BASE_URL") or "https://openrouter.ai/api/v1"):
|
| 86 |
return OpenAIEmbeddings(model=model_name, api_key=api_key, base_url=base_url) # type: ignore
|
| 87 |
+
|
| 88 |
+
# Sambanova models
|
| 89 |
+
def get_sambanova_chat(model_name: str, api_key=get_api_key("sambanova"), temperature=DEFAULT_TEMPERATURE, base_url=os.getenv("SAMBANOVA_BASE_URL") or "https://fast-api.snova.ai/v1", max_tokens=1024):
|
| 90 |
+
return ChatOpenAI(api_key=api_key, model=model_name, temperature=temperature, base_url=base_url, max_tokens=max_tokens) # type: ignore
|
| 91 |
+
|
prompts/default/agent.system.memories.md
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Memories on the topic
|
| 2 |
+
- following are your memories about current topic:
|
| 3 |
+
|
| 4 |
+
{{memories}}
|
prompts/default/memory.memories_query.sys.md
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# AI's job
|
| 2 |
+
1. The AI receives a MESSAGE from USER and short conversation HISTORY for reference
|
| 3 |
+
2. AI analyzes the MESSAGE and HISTORY for CONTEXT
|
| 4 |
+
3. AI provide a search query for search engine where previous memories are stored based on CONTEXT
|
| 5 |
+
|
| 6 |
+
# Format
|
| 7 |
+
- The response format is a plain text string containing the query
|
| 8 |
+
- No other text, no formatting
|
| 9 |
+
|
| 10 |
+
# Example
|
| 11 |
+
```json
|
| 12 |
+
USER: "Write a song about my dog"
|
| 13 |
+
AI: "user's dog"
|
| 14 |
+
USER: "following the results of the biology project, summarize..."
|
| 15 |
+
AI: "biology project results"
|
| 16 |
+
```
|
| 17 |
+
|
| 18 |
+
# HISTORY:
|
| 19 |
+
{{history}}
|
prompts/default/memory.memories_sum.sys.md
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Assistant's job
|
| 2 |
+
1. The assistant receives a HISTORY of conversation between USER and AGENT
|
| 3 |
+
2. Assistant searches for relevant information from the HISTORY
|
| 4 |
+
3. Assistant writes notes about information worth memorizing for further use
|
| 5 |
+
|
| 6 |
+
# Format
|
| 7 |
+
- The response format is a JSON array of text notes containing facts to memorize
|
| 8 |
+
- If the history does not contain any useful information, the response will be an empty JSON array.
|
| 9 |
+
|
| 10 |
+
# Example
|
| 11 |
+
~~~json
|
| 12 |
+
[
|
| 13 |
+
"User's name is John Doe",
|
| 14 |
+
"User's age is 30"
|
| 15 |
+
]
|
| 16 |
+
~~~
|
| 17 |
+
|
| 18 |
+
# Rules
|
| 19 |
+
- Focus only on relevant details and facts like names, IDs, instructions, opinions etc.
|
| 20 |
+
- Do not include irrelevant details that are of no use in the future
|
| 21 |
+
- Do not memorize facts that change like time, date etc.
|
| 22 |
+
- Do not add your own details that are not specifically mentioned in the history
|
python/extensions/message_loop_prompts/recall_memories.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from agent import Agent
|
| 2 |
+
from python.helpers.extension import Extension
|
| 3 |
+
from python.helpers.files import read_file
|
| 4 |
+
from python.helpers.vector_db import Area
|
| 5 |
+
import json
|
| 6 |
+
from python.helpers import errors, files
|
| 7 |
+
from python.tools.memory_tool import get_db
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class RecallMemories(Extension):
|
| 11 |
+
|
| 12 |
+
INTERVAL = 3
|
| 13 |
+
HISTORY = 5
|
| 14 |
+
RESULTS = 3
|
| 15 |
+
THRESHOLD = 0.1
|
| 16 |
+
|
| 17 |
+
async def execute(self, loop_data={}, **kwargs):
|
| 18 |
+
|
| 19 |
+
iter = loop_data.get("iteration", 0)
|
| 20 |
+
if (
|
| 21 |
+
iter % RecallMemories.INTERVAL == 0
|
| 22 |
+
): # every 3 iterations (or the first one) recall memories
|
| 23 |
+
await self.search_memories(loop_data=loop_data, **kwargs)
|
| 24 |
+
|
| 25 |
+
async def search_memories(self, loop_data={}, **kwargs):
|
| 26 |
+
# try:
|
| 27 |
+
# show temp info message
|
| 28 |
+
self.agent.context.log.log(
|
| 29 |
+
type="info", content="Searching memories...", temp=True
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
# show full util message, this will hide temp message immediately if turned on
|
| 33 |
+
log_item = self.agent.context.log.log(
|
| 34 |
+
type="util",
|
| 35 |
+
heading="Searching memories...",
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
# get system message and chat history for util llm
|
| 39 |
+
msgs_text = self.agent.concat_messages(
|
| 40 |
+
self.agent.history[-RecallMemories.HISTORY :]
|
| 41 |
+
) # only last X messages
|
| 42 |
+
system = self.agent.read_prompt(
|
| 43 |
+
"memory.memories_query.sys.md", history=msgs_text
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
# log query streamed by LLM
|
| 47 |
+
def log_callback(content):
|
| 48 |
+
log_item.stream(query=content)
|
| 49 |
+
|
| 50 |
+
# call util llm to summarize conversation
|
| 51 |
+
query = await self.agent.call_utility_llm(
|
| 52 |
+
system=system, msg=loop_data["message"], callback=log_callback
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
# get solutions database
|
| 56 |
+
vdb = get_db(self.agent)
|
| 57 |
+
|
| 58 |
+
memories = vdb.search_similarity_threshold(
|
| 59 |
+
query=query,
|
| 60 |
+
results=RecallMemories.RESULTS,
|
| 61 |
+
threshold=RecallMemories.THRESHOLD,
|
| 62 |
+
filter=f"area != '{Area.SOLUTIONS.value}'" # exclude solutions
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
# log the short result
|
| 66 |
+
if not isinstance(memories, list) or len(memories) == 0:
|
| 67 |
+
log_item.update(
|
| 68 |
+
heading="No useful memories found.",
|
| 69 |
+
)
|
| 70 |
+
return
|
| 71 |
+
else:
|
| 72 |
+
log_item.update(
|
| 73 |
+
heading=f"\n\n{len(memories)} memories found.",
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
# concatenate memory.page_content in memories:
|
| 77 |
+
memories_text = ""
|
| 78 |
+
for memory in memories:
|
| 79 |
+
memories_text += memory.page_content + "\n\n"
|
| 80 |
+
memories_text = memories_text.strip()
|
| 81 |
+
|
| 82 |
+
# log the full results
|
| 83 |
+
log_item.update(memories=memories_text)
|
| 84 |
+
|
| 85 |
+
# place to prompt
|
| 86 |
+
memories_prompt = self.agent.read_prompt(
|
| 87 |
+
"agent.system.memories.md", memories=memories_text
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
# append to system message
|
| 91 |
+
loop_data["system"] += memories_prompt
|
| 92 |
+
|
| 93 |
+
# except Exception as e:
|
| 94 |
+
# err = errors.format_error(e)
|
| 95 |
+
# self.agent.context.log.log(
|
| 96 |
+
# type="error", heading="Recall memories extension error:", content=err
|
| 97 |
+
# )
|
python/extensions/message_loop_prompts/recall_solutions.py
CHANGED
|
@@ -1,8 +1,10 @@
|
|
| 1 |
from agent import Agent
|
| 2 |
from python.helpers.extension import Extension
|
| 3 |
from python.helpers.files import read_file
|
| 4 |
-
from python.helpers.vector_db import
|
| 5 |
import json
|
|
|
|
|
|
|
| 6 |
|
| 7 |
|
| 8 |
class RecallSolutions(Extension):
|
|
@@ -15,59 +17,81 @@ class RecallSolutions(Extension):
|
|
| 15 |
async def execute(self, loop_data={}, **kwargs):
|
| 16 |
|
| 17 |
iter = loop_data.get("iteration", 0)
|
| 18 |
-
if
|
|
|
|
|
|
|
| 19 |
await self.search_solutions(loop_data=loop_data, **kwargs)
|
| 20 |
|
| 21 |
-
|
| 22 |
async def search_solutions(self, loop_data={}, **kwargs):
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
)
|
| 26 |
-
|
| 27 |
-
# get system message and chat history for util llm
|
| 28 |
-
msgs_text = self.agent.concat_messages(
|
| 29 |
-
self.agent.history[-RecallSolutions.HISTORY:]
|
| 30 |
-
) # only last X messages
|
| 31 |
-
system = self.agent.read_prompt(
|
| 32 |
-
"memory.solutions_query.sys.md", history=msgs_text
|
| 33 |
-
)
|
| 34 |
-
|
| 35 |
-
# call util llm to summarize conversation
|
| 36 |
-
query = await self.agent.call_utility_llm(
|
| 37 |
-
system=system, msg=loop_data["message"]
|
| 38 |
-
)
|
| 39 |
-
|
| 40 |
-
# get solutions database
|
| 41 |
-
vdb = get_or_create(
|
| 42 |
-
logger=self.agent.context.log,
|
| 43 |
-
embeddings_model=self.agent.config.embeddings_model,
|
| 44 |
-
memory_dir="./memory/solutions",
|
| 45 |
-
knowledge_dir="",
|
| 46 |
-
)
|
| 47 |
-
|
| 48 |
-
solutions = vdb.search_similarity_threshold(
|
| 49 |
-
query=query, results=RecallSolutions.RESULTS, threshold=RecallSolutions.THRESHOLD
|
| 50 |
-
)
|
| 51 |
-
|
| 52 |
-
if not isinstance(solutions, list) or len(solutions) == 0:
|
| 53 |
self.agent.context.log.log(
|
| 54 |
-
type="info", content="
|
| 55 |
)
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
self.agent.context.log.log(
|
| 59 |
-
type="
|
| 60 |
-
|
| 61 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
)
|
| 63 |
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
|
| 69 |
-
|
| 70 |
-
|
| 71 |
|
| 72 |
-
#
|
| 73 |
-
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
from agent import Agent
|
| 2 |
from python.helpers.extension import Extension
|
| 3 |
from python.helpers.files import read_file
|
| 4 |
+
from python.helpers.vector_db import Area
|
| 5 |
import json
|
| 6 |
+
from python.helpers import errors, files
|
| 7 |
+
from python.tools.memory_tool import get_db
|
| 8 |
|
| 9 |
|
| 10 |
class RecallSolutions(Extension):
|
|
|
|
| 17 |
async def execute(self, loop_data={}, **kwargs):
|
| 18 |
|
| 19 |
iter = loop_data.get("iteration", 0)
|
| 20 |
+
if (
|
| 21 |
+
iter % RecallSolutions.INTERVAL == 0
|
| 22 |
+
): # every 3 iterations (or the first one) recall solution memories
|
| 23 |
await self.search_solutions(loop_data=loop_data, **kwargs)
|
| 24 |
|
|
|
|
| 25 |
async def search_solutions(self, loop_data={}, **kwargs):
|
| 26 |
+
# try:
|
| 27 |
+
# show temp info message
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
self.agent.context.log.log(
|
| 29 |
+
type="info", content="Searching memory for solutions...", temp=True
|
| 30 |
)
|
| 31 |
+
|
| 32 |
+
# show full util message, this will hide temp message immediately if turned on
|
| 33 |
+
log_item = self.agent.context.log.log(
|
| 34 |
+
type="util",
|
| 35 |
+
heading="Searching memory for solutions...",
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
# get system message and chat history for util llm
|
| 39 |
+
msgs_text = self.agent.concat_messages(
|
| 40 |
+
self.agent.history[-RecallSolutions.HISTORY :]
|
| 41 |
+
) # only last X messages
|
| 42 |
+
system = self.agent.read_prompt(
|
| 43 |
+
"memory.solutions_query.sys.md", history=msgs_text
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
# log query streamed by LLM
|
| 47 |
+
def log_callback(content):
|
| 48 |
+
log_item.stream(query=content)
|
| 49 |
+
|
| 50 |
+
# call util llm to summarize conversation
|
| 51 |
+
query = await self.agent.call_utility_llm(
|
| 52 |
+
system=system, msg=loop_data["message"], callback=log_callback
|
| 53 |
)
|
| 54 |
|
| 55 |
+
# get solutions database
|
| 56 |
+
vdb = get_db(self.agent)
|
| 57 |
+
|
| 58 |
+
solutions = vdb.search_similarity_threshold(
|
| 59 |
+
query=query,
|
| 60 |
+
results=RecallSolutions.RESULTS,
|
| 61 |
+
threshold=RecallSolutions.THRESHOLD,
|
| 62 |
+
filter=f"area == '{Area.SOLUTIONS.value}'"
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
# log the short result
|
| 66 |
+
if not isinstance(solutions, list) or len(solutions) == 0:
|
| 67 |
+
log_item.update(
|
| 68 |
+
heading="No successful solution memories found.",
|
| 69 |
+
)
|
| 70 |
+
return
|
| 71 |
+
else:
|
| 72 |
+
log_item.update(
|
| 73 |
+
heading=f"\n\n{len(solutions)} successful solution memories found.",
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
# concatenate solution.page_content in solutions:
|
| 77 |
+
solutions_text = ""
|
| 78 |
+
for solution in solutions:
|
| 79 |
+
solutions_text += solution.page_content + "\n\n"
|
| 80 |
+
solutions_text = solutions_text.strip()
|
| 81 |
+
|
| 82 |
+
# log the full results
|
| 83 |
+
log_item.update(solutions=solutions_text)
|
| 84 |
+
|
| 85 |
+
# place to prompt
|
| 86 |
+
solutions_prompt = self.agent.read_prompt(
|
| 87 |
+
"agent.system.solutions.md", solutions=solutions_text
|
| 88 |
+
)
|
| 89 |
|
| 90 |
+
# append to system message
|
| 91 |
+
loop_data["system"] += solutions_prompt
|
| 92 |
|
| 93 |
+
# except Exception as e:
|
| 94 |
+
# err = errors.format_error(e)
|
| 95 |
+
# self.agent.context.log.log(
|
| 96 |
+
# type="error", heading="Recall solutions extension error:", content=err
|
| 97 |
+
# )
|
python/extensions/monologue_end/50_memorize_memories.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from agent import Agent
|
| 2 |
+
from python.helpers.extension import Extension
|
| 3 |
+
import python.helpers.files as files
|
| 4 |
+
from python.helpers.vector_db import Area
|
| 5 |
+
import json
|
| 6 |
+
from python.helpers.dirty_json import DirtyJson
|
| 7 |
+
from python.helpers import errors
|
| 8 |
+
from python.tools.memory_tool import get_db
|
| 9 |
+
|
| 10 |
+
class MemorizeMemories(Extension):
|
| 11 |
+
|
| 12 |
+
async def execute(self, loop_data={}, **kwargs):
|
| 13 |
+
# try:
|
| 14 |
+
|
| 15 |
+
# show temp info message
|
| 16 |
+
self.agent.context.log.log(
|
| 17 |
+
type="info", content="Memorizing new information...", temp=True
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
# show full util message, this will hide temp message immediately if turned on
|
| 21 |
+
log_item = self.agent.context.log.log(
|
| 22 |
+
type="util",
|
| 23 |
+
heading="Memorizing new information...",
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
# get system message and chat history for util llm
|
| 27 |
+
system = self.agent.read_prompt("memory.memories_sum.sys.md")
|
| 28 |
+
msgs_text = self.agent.concat_messages(self.agent.history)
|
| 29 |
+
|
| 30 |
+
# log query streamed by LLM
|
| 31 |
+
def log_callback(content):
|
| 32 |
+
log_item.stream(content=content)
|
| 33 |
+
|
| 34 |
+
# call util llm to find info in history
|
| 35 |
+
memories_json = await self.agent.call_utility_llm(
|
| 36 |
+
system=system,
|
| 37 |
+
msg=msgs_text,
|
| 38 |
+
callback=log_callback,
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
memories = DirtyJson.parse_string(memories_json)
|
| 42 |
+
|
| 43 |
+
if not isinstance(memories, list) or len(memories) == 0:
|
| 44 |
+
log_item.update(heading="No useful information to memorize.")
|
| 45 |
+
return
|
| 46 |
+
else:
|
| 47 |
+
log_item.update(
|
| 48 |
+
heading=f"{len(memories)} entries to memorize."
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
# save chat history
|
| 52 |
+
vdb = get_db(self.agent)
|
| 53 |
+
|
| 54 |
+
memories_txt = ""
|
| 55 |
+
for memory in memories:
|
| 56 |
+
# solution to plain text:
|
| 57 |
+
txt = f"{memory}"
|
| 58 |
+
memories_txt += txt + "\n\n"
|
| 59 |
+
vdb.insert_text(
|
| 60 |
+
text=txt, metadata={"area": Area.MAIN.value}
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
memories_txt = memories_txt.strip()
|
| 64 |
+
log_item.update(memories=memories_txt)
|
| 65 |
+
log_item.update(
|
| 66 |
+
result=f"{len(memories)} entries memorized.",
|
| 67 |
+
heading=f"{len(memories)} entries memorized.",
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
# except Exception as e:
|
| 71 |
+
# err = errors.format_error(e)
|
| 72 |
+
# self.agent.context.log.log(
|
| 73 |
+
# type="error", heading="Memorize memories extension error:", content=err
|
| 74 |
+
# )
|
python/extensions/monologue_end/{memorize_solutions.py → 51_memorize_solutions.py}
RENAMED
|
@@ -1,70 +1,74 @@
|
|
| 1 |
from agent import Agent
|
| 2 |
from python.helpers.extension import Extension
|
| 3 |
-
|
| 4 |
-
from python.helpers.vector_db import
|
| 5 |
import json
|
| 6 |
from python.helpers.dirty_json import DirtyJson
|
| 7 |
from python.helpers import errors
|
| 8 |
-
|
| 9 |
|
| 10 |
class MemorizeSolutions(Extension):
|
| 11 |
|
| 12 |
async def execute(self, loop_data={}, **kwargs):
|
| 13 |
-
try:
|
|
|
|
|
|
|
| 14 |
self.agent.context.log.log(
|
| 15 |
type="info", content="Memorizing succesful solutions...", temp=True
|
| 16 |
)
|
| 17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
# get system message and chat history for util llm
|
| 19 |
system = self.agent.read_prompt("memory.solutions_sum.sys.md")
|
| 20 |
msgs_text = self.agent.concat_messages(self.agent.history)
|
| 21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
# call util llm to find solutions in history
|
| 23 |
solutions_json = await self.agent.call_utility_llm(
|
| 24 |
system=system,
|
| 25 |
msg=msgs_text,
|
| 26 |
-
|
| 27 |
)
|
| 28 |
|
| 29 |
solutions = DirtyJson.parse_string(solutions_json)
|
| 30 |
|
| 31 |
if not isinstance(solutions, list) or len(solutions) == 0:
|
| 32 |
-
|
| 33 |
-
type="info", content="No succesful solutions found.", temp=False
|
| 34 |
-
)
|
| 35 |
return
|
| 36 |
else:
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
content=f"{len(solutions)} succesful solutions found.",
|
| 40 |
-
temp=True,
|
| 41 |
)
|
| 42 |
|
| 43 |
# save chat history
|
| 44 |
-
vdb =
|
| 45 |
-
logger=self.agent.context.log,
|
| 46 |
-
embeddings_model=self.agent.config.embeddings_model,
|
| 47 |
-
memory_dir="./memory/solutions",
|
| 48 |
-
knowledge_dir="",
|
| 49 |
-
)
|
| 50 |
|
|
|
|
| 51 |
for solution in solutions:
|
| 52 |
# solution to plain text:
|
| 53 |
-
txt =
|
| 54 |
-
|
| 55 |
-
)
|
| 56 |
vdb.insert_text(
|
| 57 |
-
text=txt,
|
| 58 |
-
)
|
| 59 |
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
|
|
|
| 64 |
)
|
| 65 |
|
| 66 |
-
except Exception as e:
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
|
|
|
| 1 |
from agent import Agent
|
| 2 |
from python.helpers.extension import Extension
|
| 3 |
+
import python.helpers.files as files
|
| 4 |
+
from python.helpers.vector_db import Area
|
| 5 |
import json
|
| 6 |
from python.helpers.dirty_json import DirtyJson
|
| 7 |
from python.helpers import errors
|
| 8 |
+
from python.tools.memory_tool import get_db
|
| 9 |
|
| 10 |
class MemorizeSolutions(Extension):
|
| 11 |
|
| 12 |
async def execute(self, loop_data={}, **kwargs):
|
| 13 |
+
# try:
|
| 14 |
+
|
| 15 |
+
# show temp info message
|
| 16 |
self.agent.context.log.log(
|
| 17 |
type="info", content="Memorizing succesful solutions...", temp=True
|
| 18 |
)
|
| 19 |
|
| 20 |
+
# show full util message, this will hide temp message immediately if turned on
|
| 21 |
+
log_item = self.agent.context.log.log(
|
| 22 |
+
type="util",
|
| 23 |
+
heading="Memorizing succesful solutions...",
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
# get system message and chat history for util llm
|
| 27 |
system = self.agent.read_prompt("memory.solutions_sum.sys.md")
|
| 28 |
msgs_text = self.agent.concat_messages(self.agent.history)
|
| 29 |
|
| 30 |
+
# log query streamed by LLM
|
| 31 |
+
def log_callback(content):
|
| 32 |
+
log_item.stream(content=content)
|
| 33 |
+
|
| 34 |
# call util llm to find solutions in history
|
| 35 |
solutions_json = await self.agent.call_utility_llm(
|
| 36 |
system=system,
|
| 37 |
msg=msgs_text,
|
| 38 |
+
callback=log_callback,
|
| 39 |
)
|
| 40 |
|
| 41 |
solutions = DirtyJson.parse_string(solutions_json)
|
| 42 |
|
| 43 |
if not isinstance(solutions, list) or len(solutions) == 0:
|
| 44 |
+
log_item.update(heading="No successful solutions to memorize.")
|
|
|
|
|
|
|
| 45 |
return
|
| 46 |
else:
|
| 47 |
+
log_item.update(
|
| 48 |
+
heading=f"{len(solutions)} successful solutions to memorize."
|
|
|
|
|
|
|
| 49 |
)
|
| 50 |
|
| 51 |
# save chat history
|
| 52 |
+
vdb = get_db(self.agent)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
|
| 54 |
+
solutions_txt = ""
|
| 55 |
for solution in solutions:
|
| 56 |
# solution to plain text:
|
| 57 |
+
txt = f"# Problem\n {solution['problem']}\n# Solution\n {solution['solution']}"
|
| 58 |
+
solutions_txt += txt + "\n\n"
|
|
|
|
| 59 |
vdb.insert_text(
|
| 60 |
+
text=txt, metadata={"area": Area.SOLUTIONS.value}
|
| 61 |
+
)
|
| 62 |
|
| 63 |
+
solutions_txt = solutions_txt.strip()
|
| 64 |
+
log_item.update(solutions=solutions_txt)
|
| 65 |
+
log_item.update(
|
| 66 |
+
result=f"{len(solutions)} solutions memorized.",
|
| 67 |
+
heading=f"{len(solutions)} solutions memorized.",
|
| 68 |
)
|
| 69 |
|
| 70 |
+
# except Exception as e:
|
| 71 |
+
# err = errors.format_error(e)
|
| 72 |
+
# self.agent.context.log.log(
|
| 73 |
+
# type="error", heading="Memorize solutions extension error:", content=err
|
| 74 |
+
# )
|
python/extensions/monologue_end/90_waiting_for_input_msg.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from agent import Agent
|
| 2 |
+
from python.helpers.extension import Extension
|
| 3 |
+
|
| 4 |
+
class WaitingForInputMsg(Extension):
|
| 5 |
+
|
| 6 |
+
async def execute(self, loop_data={}, **kwargs):
|
| 7 |
+
# show temp info message
|
| 8 |
+
if self.agent.number == 0:
|
| 9 |
+
self.agent.context.log.log(
|
| 10 |
+
type="util", heading="Waiting for input", temp=True
|
| 11 |
+
)
|
| 12 |
+
|
python/extensions/msg_loop_break/zero_memorize_history.py
DELETED
|
@@ -1,35 +0,0 @@
|
|
| 1 |
-
from agent import Agent
|
| 2 |
-
from python.helpers.extension import Extension
|
| 3 |
-
from python.helpers.files import read_file
|
| 4 |
-
from python.helpers.vector_db import VectorDB
|
| 5 |
-
import json
|
| 6 |
-
|
| 7 |
-
class MemorizeHistory(Extension):
|
| 8 |
-
|
| 9 |
-
async def execute(self, **kwargs):
|
| 10 |
-
if self.agent.number != 0: return #only agent 0 will memorize chat history with user
|
| 11 |
-
|
| 12 |
-
self.agent.context.log.log(type="info", content="Memorizing chat history...", temp=True)
|
| 13 |
-
|
| 14 |
-
#get system message and chat history for util llm
|
| 15 |
-
system = self.agent.read_prompt("fw.memory.hist_sum.sys")
|
| 16 |
-
msgs = []
|
| 17 |
-
for msg in self.agent.history:
|
| 18 |
-
content = msg.get("content", "")
|
| 19 |
-
if content:
|
| 20 |
-
msgs.append(content)
|
| 21 |
-
msgs_json = json.dumps(msgs)
|
| 22 |
-
|
| 23 |
-
#call util llm to summarize conversation
|
| 24 |
-
summary = await self.agent.call_utility_llm(system=system,msg=msgs_json,output_label="")
|
| 25 |
-
|
| 26 |
-
#save chat history
|
| 27 |
-
vdb = VectorDB(
|
| 28 |
-
logger=self.agent.context.log,
|
| 29 |
-
embeddings_model=self.agent.config.embeddings_model,
|
| 30 |
-
memory_dir="./memory/history",
|
| 31 |
-
knowledge_dir=""
|
| 32 |
-
)
|
| 33 |
-
|
| 34 |
-
self.agent.context.log.log(type="info", content="Chat history memorized.", temp=True)
|
| 35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
python/helpers/knowledge_import.py
CHANGED
|
@@ -4,13 +4,17 @@ import hashlib
|
|
| 4 |
import json
|
| 5 |
from typing import Any, Dict, Literal, TypedDict
|
| 6 |
from langchain_community.document_loaders import (
|
| 7 |
-
CSVLoader,
|
| 8 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
)
|
| 10 |
from python.helpers import files
|
| 11 |
-
from python.helpers.log import
|
| 12 |
|
| 13 |
-
text_loader_kwargs = {
|
| 14 |
|
| 15 |
|
| 16 |
class KnowledgeImport(TypedDict):
|
|
@@ -23,65 +27,91 @@ class KnowledgeImport(TypedDict):
|
|
| 23 |
|
| 24 |
def calculate_checksum(file_path: str) -> str:
|
| 25 |
hasher = hashlib.md5()
|
| 26 |
-
with open(file_path,
|
| 27 |
buf = f.read()
|
| 28 |
hasher.update(buf)
|
| 29 |
return hasher.hexdigest()
|
| 30 |
|
| 31 |
-
|
|
|
|
|
|
|
|
|
|
| 32 |
knowledge_dir = files.get_abs_path(knowledge_dir)
|
| 33 |
-
|
|
|
|
| 34 |
|
| 35 |
# Mapping file extensions to corresponding loader classes
|
| 36 |
file_types_loaders = {
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
}
|
| 44 |
|
| 45 |
cnt_files = 0
|
| 46 |
cnt_docs = 0
|
| 47 |
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
|
| 80 |
# loop index where state is not set and mark it as removed
|
| 81 |
for file_key, file_data in index.items():
|
| 82 |
-
if not file_data.get(
|
| 83 |
-
index[file_key][
|
| 84 |
|
| 85 |
print(f"Processed {cnt_docs} documents from {cnt_files} files.")
|
| 86 |
-
|
|
|
|
|
|
|
|
|
|
| 87 |
return index
|
|
|
|
| 4 |
import json
|
| 5 |
from typing import Any, Dict, Literal, TypedDict
|
| 6 |
from langchain_community.document_loaders import (
|
| 7 |
+
CSVLoader,
|
| 8 |
+
JSONLoader,
|
| 9 |
+
PyPDFLoader,
|
| 10 |
+
TextLoader,
|
| 11 |
+
UnstructuredHTMLLoader,
|
| 12 |
+
UnstructuredMarkdownLoader,
|
| 13 |
)
|
| 14 |
from python.helpers import files
|
| 15 |
+
from python.helpers.log import LogItem
|
| 16 |
|
| 17 |
+
text_loader_kwargs = {"autodetect_encoding": True}
|
| 18 |
|
| 19 |
|
| 20 |
class KnowledgeImport(TypedDict):
|
|
|
|
| 27 |
|
| 28 |
def calculate_checksum(file_path: str) -> str:
|
| 29 |
hasher = hashlib.md5()
|
| 30 |
+
with open(file_path, "rb") as f:
|
| 31 |
buf = f.read()
|
| 32 |
hasher.update(buf)
|
| 33 |
return hasher.hexdigest()
|
| 34 |
|
| 35 |
+
|
| 36 |
+
def load_knowledge(
|
| 37 |
+
log_item: LogItem | None, knowledge_dir: str, index: Dict[str, KnowledgeImport]
|
| 38 |
+
) -> Dict[str, KnowledgeImport]:
|
| 39 |
knowledge_dir = files.get_abs_path(knowledge_dir)
|
| 40 |
+
|
| 41 |
+
from python.helpers.vector_db import Area
|
| 42 |
|
| 43 |
# Mapping file extensions to corresponding loader classes
|
| 44 |
file_types_loaders = {
|
| 45 |
+
"txt": TextLoader,
|
| 46 |
+
"pdf": PyPDFLoader,
|
| 47 |
+
"csv": CSVLoader,
|
| 48 |
+
"html": UnstructuredHTMLLoader,
|
| 49 |
+
"json": JSONLoader,
|
| 50 |
+
"md": UnstructuredMarkdownLoader,
|
| 51 |
}
|
| 52 |
|
| 53 |
cnt_files = 0
|
| 54 |
cnt_docs = 0
|
| 55 |
|
| 56 |
+
for area in Area:
|
| 57 |
+
subdir = files.get_abs_path(knowledge_dir, area.value)
|
| 58 |
+
|
| 59 |
+
if not os.path.exists(subdir):
|
| 60 |
+
os.makedirs(subdir)
|
| 61 |
+
continue
|
| 62 |
+
|
| 63 |
+
# Fetch all files in the directory with specified extensions
|
| 64 |
+
kn_files = glob.glob(subdir + "/**/*", recursive=True)
|
| 65 |
+
if kn_files:
|
| 66 |
+
print(f"Found {len(kn_files)} knowledge files in {subdir}, processing...")
|
| 67 |
+
if log_item:
|
| 68 |
+
log_item.stream(
|
| 69 |
+
progress=f"\nFound {len(kn_files)} knowledge files in {subdir}, processing...",
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
for file_path in kn_files:
|
| 73 |
+
ext = file_path.split(".")[-1].lower()
|
| 74 |
+
if ext in file_types_loaders:
|
| 75 |
+
checksum = calculate_checksum(file_path)
|
| 76 |
+
file_key = file_path # os.path.relpath(file_path, knowledge_dir)
|
| 77 |
+
|
| 78 |
+
# Load existing data from the index or create a new entry
|
| 79 |
+
file_data = index.get(file_key, {})
|
| 80 |
+
|
| 81 |
+
if file_data.get("checksum") == checksum:
|
| 82 |
+
file_data["state"] = "original"
|
| 83 |
+
else:
|
| 84 |
+
file_data["state"] = "changed"
|
| 85 |
+
|
| 86 |
+
if file_data["state"] == "changed":
|
| 87 |
+
file_data["checksum"] = checksum
|
| 88 |
+
loader_cls = file_types_loaders[ext]
|
| 89 |
+
loader = loader_cls(
|
| 90 |
+
file_path,
|
| 91 |
+
**(
|
| 92 |
+
text_loader_kwargs
|
| 93 |
+
if ext in ["txt", "csv", "html", "md"]
|
| 94 |
+
else {}
|
| 95 |
+
),
|
| 96 |
+
)
|
| 97 |
+
file_data["documents"] = loader.load_and_split()
|
| 98 |
+
for doc in file_data["documents"]:
|
| 99 |
+
doc.metadata["area"] = area.value
|
| 100 |
+
cnt_files += 1
|
| 101 |
+
cnt_docs += len(file_data["documents"])
|
| 102 |
+
# print(f"Imported {len(file_data['documents'])} documents from {file_path}")
|
| 103 |
+
|
| 104 |
+
# Update the index
|
| 105 |
+
index[file_key] = file_data # type: ignore
|
| 106 |
|
| 107 |
# loop index where state is not set and mark it as removed
|
| 108 |
for file_key, file_data in index.items():
|
| 109 |
+
if not file_data.get("state", ""):
|
| 110 |
+
index[file_key]["state"] = "removed"
|
| 111 |
|
| 112 |
print(f"Processed {cnt_docs} documents from {cnt_files} files.")
|
| 113 |
+
if log_item:
|
| 114 |
+
log_item.stream(
|
| 115 |
+
progress=f"\nProcessed {cnt_docs} documents from {cnt_files} files."
|
| 116 |
+
)
|
| 117 |
return index
|
python/helpers/log.py
CHANGED
|
@@ -5,20 +5,23 @@ import uuid
|
|
| 5 |
|
| 6 |
|
| 7 |
type Type = Literal[
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
|
|
|
|
|
|
|
|
|
| 18 |
|
| 19 |
@dataclass
|
| 20 |
class LogItem:
|
| 21 |
-
log:
|
| 22 |
no: int
|
| 23 |
type: str
|
| 24 |
heading: str
|
|
@@ -30,9 +33,35 @@ class LogItem:
|
|
| 30 |
def __post_init__(self):
|
| 31 |
self.guid = self.log.guid
|
| 32 |
|
| 33 |
-
def update(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
if self.guid == self.log.guid:
|
| 35 |
-
self.log.update_item(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
|
| 37 |
def output(self):
|
| 38 |
return {
|
|
@@ -41,34 +70,70 @@ class LogItem:
|
|
| 41 |
"heading": self.heading,
|
| 42 |
"content": self.content,
|
| 43 |
"temp": self.temp,
|
| 44 |
-
"kvps": self.kvps
|
| 45 |
}
|
| 46 |
|
|
|
|
| 47 |
class Log:
|
| 48 |
|
| 49 |
def __init__(self):
|
| 50 |
self.guid: str = str(uuid.uuid4())
|
| 51 |
self.updates: list[int] = []
|
| 52 |
self.logs: list[LogItem] = []
|
|
|
|
| 53 |
|
| 54 |
-
def log(
|
| 55 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
self.logs.append(item)
|
| 57 |
self.updates += [item.no]
|
|
|
|
|
|
|
| 58 |
return item
|
| 59 |
|
| 60 |
-
def update_item(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
item = self.logs[no]
|
| 62 |
if type is not None:
|
| 63 |
item.type = type
|
| 64 |
if heading is not None:
|
| 65 |
item.heading = heading
|
|
|
|
| 66 |
if content is not None:
|
| 67 |
item.content = content
|
| 68 |
if kvps is not None:
|
| 69 |
item.kvps = kvps
|
| 70 |
if temp is not None:
|
| 71 |
item.temp = temp
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
self.updates += [item.no]
|
| 73 |
|
| 74 |
def output(self, start=None, end=None):
|
|
@@ -76,19 +141,18 @@ class Log:
|
|
| 76 |
start = 0
|
| 77 |
if end is None:
|
| 78 |
end = len(self.updates)
|
| 79 |
-
|
| 80 |
out = []
|
| 81 |
seen = set()
|
| 82 |
for update in self.updates[start:end]:
|
| 83 |
if update not in seen:
|
| 84 |
out.append(self.logs[update].output())
|
| 85 |
seen.add(update)
|
| 86 |
-
|
| 87 |
-
return out
|
| 88 |
-
|
| 89 |
|
|
|
|
| 90 |
|
| 91 |
def reset(self):
|
| 92 |
self.guid = str(uuid.uuid4())
|
| 93 |
self.updates = []
|
| 94 |
self.logs = []
|
|
|
|
|
|
| 5 |
|
| 6 |
|
| 7 |
type Type = Literal[
|
| 8 |
+
"agent",
|
| 9 |
+
"code_exe",
|
| 10 |
+
"error",
|
| 11 |
+
"hint",
|
| 12 |
+
"info",
|
| 13 |
+
"progress",
|
| 14 |
+
"response",
|
| 15 |
+
"tool",
|
| 16 |
+
"user",
|
| 17 |
+
"util",
|
| 18 |
+
"warning",
|
| 19 |
+
]
|
| 20 |
+
|
| 21 |
|
| 22 |
@dataclass
|
| 23 |
class LogItem:
|
| 24 |
+
log: "Log"
|
| 25 |
no: int
|
| 26 |
type: str
|
| 27 |
heading: str
|
|
|
|
| 33 |
def __post_init__(self):
|
| 34 |
self.guid = self.log.guid
|
| 35 |
|
| 36 |
+
def update(
|
| 37 |
+
self,
|
| 38 |
+
type: Type | None = None,
|
| 39 |
+
heading: str | None = None,
|
| 40 |
+
content: str | None = None,
|
| 41 |
+
kvps: dict | None = None,
|
| 42 |
+
temp: bool | None = None,
|
| 43 |
+
**kwargs,
|
| 44 |
+
):
|
| 45 |
if self.guid == self.log.guid:
|
| 46 |
+
self.log.update_item(
|
| 47 |
+
self.no,
|
| 48 |
+
type=type,
|
| 49 |
+
heading=heading,
|
| 50 |
+
content=content,
|
| 51 |
+
kvps=kvps,
|
| 52 |
+
temp=temp,
|
| 53 |
+
**kwargs,
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
def stream(self, heading: str | None = None, content: str | None = None, **kwargs):
|
| 57 |
+
if heading is not None:
|
| 58 |
+
self.update(heading=self.heading + heading)
|
| 59 |
+
if content is not None:
|
| 60 |
+
self.update(content=self.content + content)
|
| 61 |
+
|
| 62 |
+
for k, v in kwargs.items():
|
| 63 |
+
prev = self.kvps.get(k, "") if self.kvps else ""
|
| 64 |
+
self.update(**{k: prev + v})
|
| 65 |
|
| 66 |
def output(self):
|
| 67 |
return {
|
|
|
|
| 70 |
"heading": self.heading,
|
| 71 |
"content": self.content,
|
| 72 |
"temp": self.temp,
|
| 73 |
+
"kvps": self.kvps,
|
| 74 |
}
|
| 75 |
|
| 76 |
+
|
| 77 |
class Log:
|
| 78 |
|
| 79 |
def __init__(self):
|
| 80 |
self.guid: str = str(uuid.uuid4())
|
| 81 |
self.updates: list[int] = []
|
| 82 |
self.logs: list[LogItem] = []
|
| 83 |
+
self.progress = ""
|
| 84 |
|
| 85 |
+
def log(
|
| 86 |
+
self,
|
| 87 |
+
type: Type,
|
| 88 |
+
heading: str | None = None,
|
| 89 |
+
content: str | None = None,
|
| 90 |
+
kvps: dict | None = None,
|
| 91 |
+
temp: bool | None = None,
|
| 92 |
+
) -> LogItem:
|
| 93 |
+
item = LogItem(
|
| 94 |
+
log=self,
|
| 95 |
+
no=len(self.logs),
|
| 96 |
+
type=type,
|
| 97 |
+
heading=heading or "",
|
| 98 |
+
content=content or "",
|
| 99 |
+
kvps=kvps,
|
| 100 |
+
temp=temp or False,
|
| 101 |
+
)
|
| 102 |
self.logs.append(item)
|
| 103 |
self.updates += [item.no]
|
| 104 |
+
if heading:
|
| 105 |
+
self.progress = heading
|
| 106 |
return item
|
| 107 |
|
| 108 |
+
def update_item(
|
| 109 |
+
self,
|
| 110 |
+
no: int,
|
| 111 |
+
type: str | None = None,
|
| 112 |
+
heading: str | None = None,
|
| 113 |
+
content: str | None = None,
|
| 114 |
+
kvps: dict | None = None,
|
| 115 |
+
temp: bool | None = None,
|
| 116 |
+
**kwargs,
|
| 117 |
+
):
|
| 118 |
item = self.logs[no]
|
| 119 |
if type is not None:
|
| 120 |
item.type = type
|
| 121 |
if heading is not None:
|
| 122 |
item.heading = heading
|
| 123 |
+
self.progress = heading
|
| 124 |
if content is not None:
|
| 125 |
item.content = content
|
| 126 |
if kvps is not None:
|
| 127 |
item.kvps = kvps
|
| 128 |
if temp is not None:
|
| 129 |
item.temp = temp
|
| 130 |
+
|
| 131 |
+
if kwargs:
|
| 132 |
+
if item.kvps is None:
|
| 133 |
+
item.kvps = {}
|
| 134 |
+
for k, v in kwargs.items():
|
| 135 |
+
item.kvps[k] = v
|
| 136 |
+
|
| 137 |
self.updates += [item.no]
|
| 138 |
|
| 139 |
def output(self, start=None, end=None):
|
|
|
|
| 141 |
start = 0
|
| 142 |
if end is None:
|
| 143 |
end = len(self.updates)
|
| 144 |
+
|
| 145 |
out = []
|
| 146 |
seen = set()
|
| 147 |
for update in self.updates[start:end]:
|
| 148 |
if update not in seen:
|
| 149 |
out.append(self.logs[update].output())
|
| 150 |
seen.add(update)
|
|
|
|
|
|
|
|
|
|
| 151 |
|
| 152 |
+
return out
|
| 153 |
|
| 154 |
def reset(self):
|
| 155 |
self.guid = str(uuid.uuid4())
|
| 156 |
self.updates = []
|
| 157 |
self.logs = []
|
| 158 |
+
self.progress = ""
|
python/helpers/tool.py
CHANGED
|
@@ -22,8 +22,8 @@ class Tool:
|
|
| 22 |
pass
|
| 23 |
|
| 24 |
async def before_execution(self, **kwargs):
|
| 25 |
-
PrintStyle(font_color="#1B4F72", padding=True, background_color="white", bold=True).print(f"{self.agent.agent_name}: Using tool '{self.name}'
|
| 26 |
-
self.log = self.agent.context.log.log(type="tool", heading=f"{self.agent.agent_name}: Using tool '{self.name}'
|
| 27 |
if self.args and isinstance(self.args, dict):
|
| 28 |
for key, value in self.args.items():
|
| 29 |
PrintStyle(font_color="#85C1E9", bold=True).stream(self.nice_key(key)+": ")
|
|
@@ -34,7 +34,7 @@ class Tool:
|
|
| 34 |
text = messages.truncate_text(self.agent, response.message.strip(), self.agent.config.max_tool_response_length)
|
| 35 |
msg_response = self.agent.read_prompt("fw.tool_response.md", tool_name=self.name, tool_response=text)
|
| 36 |
await self.agent.append_message(msg_response, human=True)
|
| 37 |
-
PrintStyle(font_color="#1B4F72", background_color="white", padding=True, bold=True).print(f"{self.agent.agent_name}: Response from tool '{self.name}'
|
| 38 |
PrintStyle(font_color="#85C1E9").print(response.message)
|
| 39 |
self.log.update(content=response.message)
|
| 40 |
|
|
|
|
| 22 |
pass
|
| 23 |
|
| 24 |
async def before_execution(self, **kwargs):
|
| 25 |
+
PrintStyle(font_color="#1B4F72", padding=True, background_color="white", bold=True).print(f"{self.agent.agent_name}: Using tool '{self.name}'")
|
| 26 |
+
self.log = self.agent.context.log.log(type="tool", heading=f"{self.agent.agent_name}: Using tool '{self.name}'", content="", kvps=self.args)
|
| 27 |
if self.args and isinstance(self.args, dict):
|
| 28 |
for key, value in self.args.items():
|
| 29 |
PrintStyle(font_color="#85C1E9", bold=True).stream(self.nice_key(key)+": ")
|
|
|
|
| 34 |
text = messages.truncate_text(self.agent, response.message.strip(), self.agent.config.max_tool_response_length)
|
| 35 |
msg_response = self.agent.read_prompt("fw.tool_response.md", tool_name=self.name, tool_response=text)
|
| 36 |
await self.agent.append_message(msg_response, human=True)
|
| 37 |
+
PrintStyle(font_color="#1B4F72", background_color="white", padding=True, bold=True).print(f"{self.agent.agent_name}: Response from tool '{self.name}'")
|
| 38 |
PrintStyle(font_color="#85C1E9").print(response.message)
|
| 39 |
self.log.update(content=response.message)
|
| 40 |
|
python/helpers/vector_db.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
|
|
| 1 |
from langchain.storage import InMemoryByteStore, LocalFileStore
|
| 2 |
from langchain.embeddings import CacheBackedEmbeddings
|
| 3 |
|
|
@@ -11,21 +12,31 @@ from . import files
|
|
| 11 |
from langchain_core.documents import Document
|
| 12 |
import uuid
|
| 13 |
from python.helpers import knowledge_import
|
| 14 |
-
from python.helpers.log import Log
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
index: dict[str, "VectorDB"] = {}
|
| 17 |
|
| 18 |
|
| 19 |
-
def
|
| 20 |
-
logger: Log,
|
| 21 |
embeddings_model,
|
|
|
|
| 22 |
in_memory=False,
|
| 23 |
-
|
| 24 |
-
knowledge_dir=None,
|
| 25 |
):
|
| 26 |
if index.get(memory_dir) is None:
|
|
|
|
|
|
|
| 27 |
index[memory_dir] = VectorDB(
|
| 28 |
-
|
| 29 |
)
|
| 30 |
return index[memory_dir]
|
| 31 |
|
|
@@ -34,22 +45,23 @@ class VectorDB:
|
|
| 34 |
|
| 35 |
def __init__(
|
| 36 |
self,
|
| 37 |
-
|
| 38 |
embeddings_model,
|
|
|
|
|
|
|
| 39 |
in_memory=False,
|
| 40 |
-
memory_dir="./memory/default",
|
| 41 |
-
knowledge_dir=None,
|
| 42 |
):
|
| 43 |
-
self.
|
| 44 |
|
| 45 |
print("Initializing VectorDB...")
|
| 46 |
-
self.
|
| 47 |
|
| 48 |
self.embeddings_model = embeddings_model
|
| 49 |
|
| 50 |
-
self.em_dir = files.get_abs_path(
|
|
|
|
|
|
|
| 51 |
self.db_dir = files.get_abs_path("./memory", memory_dir, "database")
|
| 52 |
-
self.kn_dir = files.get_abs_path(knowledge_dir) if knowledge_dir else ""
|
| 53 |
|
| 54 |
# make sure embeddings and database directories exist
|
| 55 |
os.makedirs(self.db_dir, exist_ok=True)
|
|
@@ -93,10 +105,10 @@ class VectorDB:
|
|
| 93 |
)
|
| 94 |
|
| 95 |
# preload knowledge files
|
| 96 |
-
if
|
| 97 |
-
self.preload_knowledge(
|
| 98 |
|
| 99 |
-
def preload_knowledge(self,
|
| 100 |
|
| 101 |
# Load the index file if it exists
|
| 102 |
index_path = files.get_abs_path(db_dir, "knowledge_import.json")
|
|
@@ -110,7 +122,8 @@ class VectorDB:
|
|
| 110 |
with open(index_path, "r") as f:
|
| 111 |
index = json.load(f)
|
| 112 |
|
| 113 |
-
|
|
|
|
| 114 |
|
| 115 |
for file in index:
|
| 116 |
if index[file]["state"] in ["changed", "removed"] and index[file].get(
|
|
@@ -139,12 +152,16 @@ class VectorDB:
|
|
| 139 |
def search_similarity(self, query, results=3):
|
| 140 |
return self.db.similarity_search(query, results)
|
| 141 |
|
| 142 |
-
def search_similarity_threshold(
|
|
|
|
|
|
|
|
|
|
| 143 |
return self.db.search(
|
| 144 |
query,
|
| 145 |
search_type="similarity_score_threshold",
|
| 146 |
k=results,
|
| 147 |
score_threshold=threshold,
|
|
|
|
| 148 |
)
|
| 149 |
|
| 150 |
def search_max_rel(self, query, results=3):
|
|
@@ -198,8 +215,20 @@ class VectorDB:
|
|
| 198 |
|
| 199 |
def insert_documents(self, docs: list[Document]):
|
| 200 |
ids = [str(uuid.uuid4()) for _ in range(len(docs))]
|
| 201 |
-
|
| 202 |
-
doc
|
| 203 |
-
|
| 204 |
-
|
|
|
|
| 205 |
return ids
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any
|
| 2 |
from langchain.storage import InMemoryByteStore, LocalFileStore
|
| 3 |
from langchain.embeddings import CacheBackedEmbeddings
|
| 4 |
|
|
|
|
| 12 |
from langchain_core.documents import Document
|
| 13 |
import uuid
|
| 14 |
from python.helpers import knowledge_import
|
| 15 |
+
from python.helpers.log import Log, LogItem
|
| 16 |
+
import pandas as pd
|
| 17 |
+
from enum import Enum
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class Area(Enum):
|
| 21 |
+
MAIN = "main"
|
| 22 |
+
SOLUTIONS = "solutions"
|
| 23 |
+
|
| 24 |
|
| 25 |
index: dict[str, "VectorDB"] = {}
|
| 26 |
|
| 27 |
|
| 28 |
+
def get_or_create_db(
|
| 29 |
+
logger: Log | None,
|
| 30 |
embeddings_model,
|
| 31 |
+
memory_dir: str,
|
| 32 |
in_memory=False,
|
| 33 |
+
knowledge_dirs: list[str] = [],
|
|
|
|
| 34 |
):
|
| 35 |
if index.get(memory_dir) is None:
|
| 36 |
+
log_item = None
|
| 37 |
+
if(logger): log_item = logger.log(type="util", heading=f"Initializing VectorDB in {memory_dir}")
|
| 38 |
index[memory_dir] = VectorDB(
|
| 39 |
+
log_item, embeddings_model, memory_dir, knowledge_dirs, in_memory
|
| 40 |
)
|
| 41 |
return index[memory_dir]
|
| 42 |
|
|
|
|
| 45 |
|
| 46 |
def __init__(
|
| 47 |
self,
|
| 48 |
+
log_item: LogItem | None,
|
| 49 |
embeddings_model,
|
| 50 |
+
memory_dir: str,
|
| 51 |
+
knowledge_dirs: list[str] = [],
|
| 52 |
in_memory=False,
|
|
|
|
|
|
|
| 53 |
):
|
| 54 |
+
self.log_item = log_item
|
| 55 |
|
| 56 |
print("Initializing VectorDB...")
|
| 57 |
+
if(self.log_item): self.log_item.stream(progress="\nInitializing VectorDB")
|
| 58 |
|
| 59 |
self.embeddings_model = embeddings_model
|
| 60 |
|
| 61 |
+
self.em_dir = files.get_abs_path(
|
| 62 |
+
"./memory/embeddings"
|
| 63 |
+
) # just caching, no need to parameterize
|
| 64 |
self.db_dir = files.get_abs_path("./memory", memory_dir, "database")
|
|
|
|
| 65 |
|
| 66 |
# make sure embeddings and database directories exist
|
| 67 |
os.makedirs(self.db_dir, exist_ok=True)
|
|
|
|
| 105 |
)
|
| 106 |
|
| 107 |
# preload knowledge files
|
| 108 |
+
if knowledge_dirs:
|
| 109 |
+
self.preload_knowledge(knowledge_dirs, self.db_dir)
|
| 110 |
|
| 111 |
+
def preload_knowledge(self, kn_dirs: list[str], db_dir: str):
|
| 112 |
|
| 113 |
# Load the index file if it exists
|
| 114 |
index_path = files.get_abs_path(db_dir, "knowledge_import.json")
|
|
|
|
| 122 |
with open(index_path, "r") as f:
|
| 123 |
index = json.load(f)
|
| 124 |
|
| 125 |
+
for kn_dir in kn_dirs:
|
| 126 |
+
index = knowledge_import.load_knowledge(self.log_item, kn_dir, index)
|
| 127 |
|
| 128 |
for file in index:
|
| 129 |
if index[file]["state"] in ["changed", "removed"] and index[file].get(
|
|
|
|
| 152 |
def search_similarity(self, query, results=3):
|
| 153 |
return self.db.similarity_search(query, results)
|
| 154 |
|
| 155 |
+
def search_similarity_threshold(
|
| 156 |
+
self, query: str, results=3, threshold=0.5, filter: str = ""
|
| 157 |
+
):
|
| 158 |
+
comparator = VectorDB.get_comparator(filter) if filter else None
|
| 159 |
return self.db.search(
|
| 160 |
query,
|
| 161 |
search_type="similarity_score_threshold",
|
| 162 |
k=results,
|
| 163 |
score_threshold=threshold,
|
| 164 |
+
filter=comparator,
|
| 165 |
)
|
| 166 |
|
| 167 |
def search_max_rel(self, query, results=3):
|
|
|
|
| 215 |
|
| 216 |
def insert_documents(self, docs: list[Document]):
|
| 217 |
ids = [str(uuid.uuid4()) for _ in range(len(docs))]
|
| 218 |
+
if ids:
|
| 219 |
+
for doc, id in zip(docs, ids):
|
| 220 |
+
doc.metadata["id"] = id # add ids to documents metadata
|
| 221 |
+
self.db.add_documents(documents=docs, ids=ids)
|
| 222 |
+
self.db.save_local(folder_path=self.db_dir) # persist
|
| 223 |
return ids
|
| 224 |
+
|
| 225 |
+
@staticmethod
|
| 226 |
+
def get_comparator(condition: str):
|
| 227 |
+
def comparator(data: dict[str, Any]):
|
| 228 |
+
try:
|
| 229 |
+
return eval(condition, {}, data)
|
| 230 |
+
except Exception as e:
|
| 231 |
+
print(f"Error evaluating condition: {e}")
|
| 232 |
+
return False
|
| 233 |
+
|
| 234 |
+
return comparator
|
python/tools/code_execution_tool.py
CHANGED
|
@@ -53,10 +53,10 @@ class CodeExecution(Tool):
|
|
| 53 |
await self.agent.handle_intervention() # wait for intervention and handle it, if paused
|
| 54 |
PrintStyle(
|
| 55 |
font_color="#1B4F72", padding=True, background_color="white", bold=True
|
| 56 |
-
).print(f"{self.agent.agent_name}: Using tool '{self.name}'
|
| 57 |
self.log = self.agent.context.log.log(
|
| 58 |
type="code_exe",
|
| 59 |
-
heading=f"{self.agent.agent_name}: Using tool '{self.name}'
|
| 60 |
content="",
|
| 61 |
kvps=self.args,
|
| 62 |
)
|
|
@@ -132,7 +132,7 @@ class CodeExecution(Tool):
|
|
| 132 |
self.state.shell.send_command(command)
|
| 133 |
|
| 134 |
PrintStyle(background_color="white", font_color="#1B4F72", bold=True).print(
|
| 135 |
-
f"{self.agent.agent_name} code execution output
|
| 136 |
)
|
| 137 |
return await self.get_terminal_output()
|
| 138 |
|
|
|
|
| 53 |
await self.agent.handle_intervention() # wait for intervention and handle it, if paused
|
| 54 |
PrintStyle(
|
| 55 |
font_color="#1B4F72", padding=True, background_color="white", bold=True
|
| 56 |
+
).print(f"{self.agent.agent_name}: Using tool '{self.name}'")
|
| 57 |
self.log = self.agent.context.log.log(
|
| 58 |
type="code_exe",
|
| 59 |
+
heading=f"{self.agent.agent_name}: Using tool '{self.name}'",
|
| 60 |
content="",
|
| 61 |
kvps=self.args,
|
| 62 |
)
|
|
|
|
| 132 |
self.state.shell.send_command(command)
|
| 133 |
|
| 134 |
PrintStyle(background_color="white", font_color="#1B4F72", bold=True).print(
|
| 135 |
+
f"{self.agent.agent_name} code execution output"
|
| 136 |
)
|
| 137 |
return await self.get_terminal_output()
|
| 138 |
|
python/tools/knowledge_tool.py
CHANGED
|
@@ -25,9 +25,7 @@ class Knowledge(Tool):
|
|
| 25 |
duckduckgo = executor.submit(duckduckgo_search.search, question)
|
| 26 |
|
| 27 |
# manual memory search
|
| 28 |
-
future_memory_man = executor.submit(memory_tool.search, self.agent,
|
| 29 |
-
# history memory search
|
| 30 |
-
# future_memory_man = executor.submit(memory_tool.search, self.agent, "history", question)
|
| 31 |
|
| 32 |
# Wait for both functions to complete
|
| 33 |
try:
|
|
|
|
| 25 |
duckduckgo = executor.submit(duckduckgo_search.search, question)
|
| 26 |
|
| 27 |
# manual memory search
|
| 28 |
+
future_memory_man = executor.submit(memory_tool.search, self.agent, question)
|
|
|
|
|
|
|
| 29 |
|
| 30 |
# Wait for both functions to complete
|
| 31 |
try:
|
python/tools/memory_tool.py
CHANGED
|
@@ -1,14 +1,11 @@
|
|
| 1 |
import re
|
| 2 |
-
from typing import Literal
|
| 3 |
from agent import Agent
|
| 4 |
-
from python.helpers.vector_db import
|
| 5 |
import os
|
| 6 |
from python.helpers.tool import Tool, Response
|
| 7 |
from python.helpers.print_style import PrintStyle
|
| 8 |
from python.helpers.errors import handle_error
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
type Area = Literal['manual', 'history']
|
| 12 |
|
| 13 |
class Memory(Tool):
|
| 14 |
async def execute(self,**kwargs):
|
|
@@ -20,13 +17,13 @@ class Memory(Tool):
|
|
| 20 |
if "query" in kwargs:
|
| 21 |
threshold = float(kwargs.get("threshold", 0.1))
|
| 22 |
count = int(kwargs.get("count", 5))
|
| 23 |
-
result = search(self.agent,
|
| 24 |
elif "memorize" in kwargs:
|
| 25 |
-
result = save(self.agent,
|
| 26 |
elif "forget" in kwargs:
|
| 27 |
-
result = forget(self.agent,
|
| 28 |
# elif "delete" in kwargs
|
| 29 |
-
result = delete(self.agent,
|
| 30 |
except Exception as e:
|
| 31 |
handle_error(e)
|
| 32 |
# hint about embedding change with existing database
|
|
@@ -37,39 +34,39 @@ class Memory(Tool):
|
|
| 37 |
# result = process_query(self.agent, self.args["memory"],self.args["action"], result_count=self.agent.config.auto_memory_count)
|
| 38 |
return Response(message=result, break_loop=False)
|
| 39 |
|
| 40 |
-
def search(agent:Agent,
|
| 41 |
-
db = get_db(agent
|
| 42 |
# docs = db.search_similarity(query,count) # type: ignore
|
| 43 |
docs = db.search_similarity_threshold(query,count,threshold) # type: ignore
|
| 44 |
if len(docs)==0: return agent.read_prompt("fw.memories_not_found.md", query=query)
|
| 45 |
else: return str(docs)
|
| 46 |
|
| 47 |
-
def save(agent:Agent,
|
| 48 |
-
db = get_db(agent
|
| 49 |
id = db.insert_text(text) # type: ignore
|
| 50 |
return agent.read_prompt("fw.memory_saved.md", memory_id=id)
|
| 51 |
|
| 52 |
-
def delete(agent:Agent,
|
| 53 |
-
db = get_db(agent
|
| 54 |
ids = extract_guids(ids_str)
|
| 55 |
deleted = db.delete_documents_by_ids(ids) # type: ignore
|
| 56 |
return agent.read_prompt("fw.memories_deleted.md", memory_count=deleted)
|
| 57 |
|
| 58 |
-
def forget(agent:Agent,
|
| 59 |
-
db = get_db(agent
|
| 60 |
deleted = db.delete_documents_by_query(query) # type: ignore
|
| 61 |
return agent.read_prompt("fw.memories_deleted.md", memory_count=deleted)
|
| 62 |
|
| 63 |
-
def get_db(agent: Agent
|
| 64 |
-
mem_dir =
|
| 65 |
-
|
| 66 |
|
| 67 |
-
db =
|
| 68 |
agent.context.log,
|
| 69 |
embeddings_model=agent.config.embeddings_model,
|
| 70 |
in_memory=False,
|
| 71 |
memory_dir=mem_dir,
|
| 72 |
-
|
| 73 |
|
| 74 |
return db
|
| 75 |
|
|
|
|
| 1 |
import re
|
|
|
|
| 2 |
from agent import Agent
|
| 3 |
+
from python.helpers.vector_db import get_or_create_db
|
| 4 |
import os
|
| 5 |
from python.helpers.tool import Tool, Response
|
| 6 |
from python.helpers.print_style import PrintStyle
|
| 7 |
from python.helpers.errors import handle_error
|
| 8 |
+
from python.helpers import files
|
|
|
|
|
|
|
| 9 |
|
| 10 |
class Memory(Tool):
|
| 11 |
async def execute(self,**kwargs):
|
|
|
|
| 17 |
if "query" in kwargs:
|
| 18 |
threshold = float(kwargs.get("threshold", 0.1))
|
| 19 |
count = int(kwargs.get("count", 5))
|
| 20 |
+
result = search(self.agent, kwargs["query"], count, threshold)
|
| 21 |
elif "memorize" in kwargs:
|
| 22 |
+
result = save(self.agent, kwargs["memorize"])
|
| 23 |
elif "forget" in kwargs:
|
| 24 |
+
result = forget(self.agent, kwargs["forget"])
|
| 25 |
# elif "delete" in kwargs
|
| 26 |
+
result = delete(self.agent, kwargs["delete"])
|
| 27 |
except Exception as e:
|
| 28 |
handle_error(e)
|
| 29 |
# hint about embedding change with existing database
|
|
|
|
| 34 |
# result = process_query(self.agent, self.args["memory"],self.args["action"], result_count=self.agent.config.auto_memory_count)
|
| 35 |
return Response(message=result, break_loop=False)
|
| 36 |
|
| 37 |
+
def search(agent:Agent, query:str, count:int=5, threshold:float=0.1):
|
| 38 |
+
db = get_db(agent)
|
| 39 |
# docs = db.search_similarity(query,count) # type: ignore
|
| 40 |
docs = db.search_similarity_threshold(query,count,threshold) # type: ignore
|
| 41 |
if len(docs)==0: return agent.read_prompt("fw.memories_not_found.md", query=query)
|
| 42 |
else: return str(docs)
|
| 43 |
|
| 44 |
+
def save(agent:Agent, text:str):
|
| 45 |
+
db = get_db(agent)
|
| 46 |
id = db.insert_text(text) # type: ignore
|
| 47 |
return agent.read_prompt("fw.memory_saved.md", memory_id=id)
|
| 48 |
|
| 49 |
+
def delete(agent:Agent, ids_str:str):
|
| 50 |
+
db = get_db(agent)
|
| 51 |
ids = extract_guids(ids_str)
|
| 52 |
deleted = db.delete_documents_by_ids(ids) # type: ignore
|
| 53 |
return agent.read_prompt("fw.memories_deleted.md", memory_count=deleted)
|
| 54 |
|
| 55 |
+
def forget(agent:Agent, query:str):
|
| 56 |
+
db = get_db(agent)
|
| 57 |
deleted = db.delete_documents_by_query(query) # type: ignore
|
| 58 |
return agent.read_prompt("fw.memories_deleted.md", memory_count=deleted)
|
| 59 |
|
| 60 |
+
def get_db(agent: Agent):
|
| 61 |
+
mem_dir = files.get_abs_path("memory", agent.config.memory_subdir or "default")
|
| 62 |
+
kn_dirs = [files.get_abs_path("knowledge", d) for d in agent.config.knowledge_subdirs or []]
|
| 63 |
|
| 64 |
+
db = get_or_create_db(
|
| 65 |
agent.context.log,
|
| 66 |
embeddings_model=agent.config.embeddings_model,
|
| 67 |
in_memory=False,
|
| 68 |
memory_dir=mem_dir,
|
| 69 |
+
knowledge_dirs=kn_dirs)
|
| 70 |
|
| 71 |
return db
|
| 72 |
|
python/tools/response.py
CHANGED
|
@@ -7,7 +7,7 @@ class ResponseTool(Tool):
|
|
| 7 |
return Response(message=self.args["text"], break_loop=True)
|
| 8 |
|
| 9 |
async def before_execution(self, **kwargs):
|
| 10 |
-
self.log = self.agent.context.log.log(type="response", heading=f"{self.agent.agent_name}: Responding
|
| 11 |
|
| 12 |
|
| 13 |
async def after_execution(self, response, **kwargs):
|
|
|
|
| 7 |
return Response(message=self.args["text"], break_loop=True)
|
| 8 |
|
| 9 |
async def before_execution(self, **kwargs):
|
| 10 |
+
self.log = self.agent.context.log.log(type="response", heading=f"{self.agent.agent_name}: Responding", content=self.args.get("text", ""))
|
| 11 |
|
| 12 |
|
| 13 |
async def after_execution(self, response, **kwargs):
|
python/tools/task_done.py
CHANGED
|
@@ -7,7 +7,7 @@ class TaskDone(Tool):
|
|
| 7 |
return Response(message=self.args["text"], break_loop=True)
|
| 8 |
|
| 9 |
async def before_execution(self, **kwargs):
|
| 10 |
-
self.log = self.agent.context.log.log(type="response", heading=f"{self.agent.agent_name}: Task done
|
| 11 |
|
| 12 |
async def after_execution(self, response, **kwargs):
|
| 13 |
pass # do add anything to the history or output
|
|
|
|
| 7 |
return Response(message=self.args["text"], break_loop=True)
|
| 8 |
|
| 9 |
async def before_execution(self, **kwargs):
|
| 10 |
+
self.log = self.agent.context.log.log(type="response", heading=f"{self.agent.agent_name}: Task done", content=self.args.get("text", ""))
|
| 11 |
|
| 12 |
async def after_execution(self, response, **kwargs):
|
| 13 |
pass # do add anything to the history or output
|
requirements.txt
CHANGED
|
@@ -7,10 +7,12 @@ langchain-openai==0.1.15
|
|
| 7 |
langchain-community==0.2.7
|
| 8 |
langchain-anthropic==0.1.19
|
| 9 |
langchain-google-genai==1.0.7
|
|
|
|
| 10 |
langchain_mistralai==0.1.8
|
| 11 |
webcolors==24.6.0
|
| 12 |
sentence-transformers==3.0.1
|
| 13 |
docker==7.1.0
|
|
|
|
| 14 |
paramiko==3.4.0
|
| 15 |
duckduckgo_search==6.1.12
|
| 16 |
inputimeout==1.0.4
|
|
@@ -19,6 +21,8 @@ beautifulsoup4==4.12.3
|
|
| 19 |
lxml_html_clean==0.2.0
|
| 20 |
pynput==1.7.7
|
| 21 |
pypdf==4.3.1
|
|
|
|
|
|
|
| 22 |
Flask[async]==3.0.3
|
| 23 |
Flask-BasicAuth==0.2.0
|
| 24 |
faiss-cpu==1.8.0.post1
|
|
|
|
| 7 |
langchain-community==0.2.7
|
| 8 |
langchain-anthropic==0.1.19
|
| 9 |
langchain-google-genai==1.0.7
|
| 10 |
+
Markdown==3.7
|
| 11 |
langchain_mistralai==0.1.8
|
| 12 |
webcolors==24.6.0
|
| 13 |
sentence-transformers==3.0.1
|
| 14 |
docker==7.1.0
|
| 15 |
+
pandas==2.2.3
|
| 16 |
paramiko==3.4.0
|
| 17 |
duckduckgo_search==6.1.12
|
| 18 |
inputimeout==1.0.4
|
|
|
|
| 21 |
lxml_html_clean==0.2.0
|
| 22 |
pynput==1.7.7
|
| 23 |
pypdf==4.3.1
|
| 24 |
+
unstructured==0.15.13
|
| 25 |
+
unstructured-client==0.25.9
|
| 26 |
Flask[async]==3.0.3
|
| 27 |
Flask-BasicAuth==0.2.0
|
| 28 |
faiss-cpu==1.8.0.post1
|
run_ui.py
CHANGED
|
@@ -13,93 +13,116 @@ from python.helpers.print_style import PrintStyle
|
|
| 13 |
from python.helpers.log import Log
|
| 14 |
from dotenv import load_dotenv
|
| 15 |
|
|
|
|
| 16 |
|
| 17 |
-
#initialize the internal Flask server
|
| 18 |
-
app = Flask("app",static_folder=get_abs_path("./webui"),static_url_path="/")
|
| 19 |
lock = threading.Lock()
|
| 20 |
|
| 21 |
# Set up basic authentication, name and password from .env variables
|
| 22 |
-
app.config[
|
| 23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
basic_auth = BasicAuth(app)
|
| 25 |
|
|
|
|
| 26 |
# get context to run agent zero in
|
| 27 |
-
def get_context(ctxid:str):
|
| 28 |
with lock:
|
| 29 |
-
if not ctxid:
|
| 30 |
-
first = AgentContext.first()
|
| 31 |
-
if first:
|
|
|
|
| 32 |
return AgentContext(config=initialize())
|
| 33 |
got = AgentContext.get(ctxid)
|
| 34 |
-
if got:
|
| 35 |
-
|
|
|
|
|
|
|
| 36 |
|
| 37 |
# Now you can use @requires_auth function decorator to require login on certain pages
|
| 38 |
def requires_auth(f):
|
| 39 |
@wraps(f)
|
| 40 |
async def decorated(*args, **kwargs):
|
| 41 |
auth = request.authorization
|
| 42 |
-
if not auth or not (
|
|
|
|
|
|
|
|
|
|
| 43 |
return Response(
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
|
|
|
|
|
|
| 47 |
return await f(*args, **kwargs)
|
|
|
|
| 48 |
return decorated
|
| 49 |
|
|
|
|
| 50 |
# handle default address, show demo html page from ./test_form.html
|
| 51 |
-
@app.route(
|
| 52 |
async def test_form():
|
| 53 |
return Path(get_abs_path("./webui/index.html")).read_text()
|
| 54 |
|
|
|
|
| 55 |
# simple health check, just return OK to see the server is running
|
| 56 |
-
@app.route(
|
| 57 |
async def health_check():
|
| 58 |
return "OK"
|
| 59 |
|
|
|
|
| 60 |
# # secret page, requires authentication
|
| 61 |
# @app.route('/secret', methods=['GET'])
|
| 62 |
# @requires_auth
|
| 63 |
# async def secret_page():
|
| 64 |
# return Path("./secret_page.html").read_text()
|
| 65 |
|
|
|
|
| 66 |
# send message to agent (async UI)
|
| 67 |
-
@app.route(
|
| 68 |
async def handle_message_async():
|
| 69 |
return await handle_message(False)
|
| 70 |
|
|
|
|
| 71 |
# send message to agent (synchronous API)
|
| 72 |
-
@app.route(
|
| 73 |
async def handle_msg_sync():
|
| 74 |
return await handle_message(True)
|
| 75 |
|
| 76 |
-
|
|
|
|
| 77 |
try:
|
| 78 |
-
|
| 79 |
-
#data sent to the server
|
| 80 |
input = request.get_json()
|
| 81 |
text = input.get("text", "")
|
| 82 |
ctxid = input.get("context", "")
|
| 83 |
blev = input.get("broadcast", 1)
|
| 84 |
|
| 85 |
-
#context instance - get or create
|
| 86 |
context = get_context(ctxid)
|
| 87 |
|
| 88 |
# print to console and log
|
| 89 |
-
PrintStyle(
|
|
|
|
|
|
|
| 90 |
PrintStyle(font_color="white", padding=False).print(f"> {text}")
|
| 91 |
context.log.log(type="user", heading="User message", content=text)
|
| 92 |
|
| 93 |
if sync:
|
| 94 |
context.communicate(text)
|
| 95 |
-
result = await context.process.result()
|
| 96 |
response = {
|
| 97 |
"ok": True,
|
| 98 |
"message": result,
|
| 99 |
-
}
|
| 100 |
else:
|
| 101 |
|
| 102 |
-
print("\n\n",(context.process and context.process.is_alive()))
|
| 103 |
context.communicate(text)
|
| 104 |
response = {
|
| 105 |
"ok": True,
|
|
@@ -111,21 +134,23 @@ async def handle_message(sync:bool):
|
|
| 111 |
"ok": False,
|
| 112 |
"message": str(e),
|
| 113 |
}
|
|
|
|
| 114 |
|
| 115 |
-
#respond with json
|
| 116 |
return jsonify(response)
|
| 117 |
-
|
|
|
|
| 118 |
# pausing/unpausing the agent
|
| 119 |
-
@app.route(
|
| 120 |
async def pause():
|
| 121 |
try:
|
| 122 |
-
|
| 123 |
-
#data sent to the server
|
| 124 |
input = request.get_json()
|
| 125 |
paused = input.get("paused", False)
|
| 126 |
ctxid = input.get("context", "")
|
| 127 |
|
| 128 |
-
#context instance - get or create
|
| 129 |
context = get_context(ctxid)
|
| 130 |
|
| 131 |
context.paused = paused
|
|
@@ -133,100 +158,107 @@ async def pause():
|
|
| 133 |
response = {
|
| 134 |
"ok": True,
|
| 135 |
"message": "Agent paused." if paused else "Agent unpaused.",
|
| 136 |
-
"pause": paused
|
| 137 |
-
}
|
| 138 |
-
|
| 139 |
except Exception as e:
|
| 140 |
response = {
|
| 141 |
"ok": False,
|
| 142 |
"message": str(e),
|
| 143 |
}
|
|
|
|
| 144 |
|
| 145 |
-
#respond with json
|
| 146 |
return jsonify(response)
|
| 147 |
|
|
|
|
| 148 |
# restarting with new agent0
|
| 149 |
-
@app.route(
|
| 150 |
async def reset():
|
| 151 |
try:
|
| 152 |
|
| 153 |
-
#data sent to the server
|
| 154 |
input = request.get_json()
|
| 155 |
ctxid = input.get("context", "")
|
| 156 |
|
| 157 |
-
#context instance - get or create
|
| 158 |
context = get_context(ctxid)
|
| 159 |
context.reset()
|
| 160 |
-
|
| 161 |
response = {
|
| 162 |
"ok": True,
|
| 163 |
"message": "Agent restarted.",
|
| 164 |
-
}
|
| 165 |
-
|
| 166 |
except Exception as e:
|
| 167 |
response = {
|
| 168 |
"ok": False,
|
| 169 |
"message": str(e),
|
| 170 |
}
|
|
|
|
| 171 |
|
| 172 |
-
#respond with json
|
| 173 |
return jsonify(response)
|
| 174 |
|
|
|
|
| 175 |
# killing context
|
| 176 |
-
@app.route(
|
| 177 |
async def remove():
|
| 178 |
try:
|
| 179 |
|
| 180 |
-
#data sent to the server
|
| 181 |
input = request.get_json()
|
| 182 |
ctxid = input.get("context", "")
|
| 183 |
|
| 184 |
-
#context instance - get or create
|
| 185 |
AgentContext.remove(ctxid)
|
| 186 |
-
|
| 187 |
response = {
|
| 188 |
"ok": True,
|
| 189 |
"message": "Context removed.",
|
| 190 |
-
}
|
| 191 |
-
|
| 192 |
except Exception as e:
|
| 193 |
response = {
|
| 194 |
"ok": False,
|
| 195 |
"message": str(e),
|
| 196 |
}
|
|
|
|
| 197 |
|
| 198 |
-
#respond with json
|
| 199 |
return jsonify(response)
|
| 200 |
|
|
|
|
| 201 |
# Web UI polling
|
| 202 |
-
@app.route(
|
| 203 |
async def poll():
|
| 204 |
try:
|
| 205 |
-
|
| 206 |
-
#data sent to the server
|
| 207 |
input = request.get_json()
|
| 208 |
ctxid = input.get("context", uuid.uuid4())
|
| 209 |
from_no = input.get("log_from", 0)
|
| 210 |
|
| 211 |
-
#context instance - get or create
|
| 212 |
context = get_context(ctxid)
|
| 213 |
|
| 214 |
-
|
| 215 |
logs = context.log.output(start=from_no)
|
| 216 |
|
| 217 |
# loop AgentContext._contexts
|
| 218 |
ctxs = []
|
| 219 |
for ctx in AgentContext._contexts.values():
|
| 220 |
-
ctxs.append(
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
|
|
|
|
|
|
| 230 |
response = {
|
| 231 |
"ok": True,
|
| 232 |
"context": context.id,
|
|
@@ -234,7 +266,8 @@ async def poll():
|
|
| 234 |
"logs": logs,
|
| 235 |
"log_guid": context.log.guid,
|
| 236 |
"log_version": len(context.log.updates),
|
| 237 |
-
"
|
|
|
|
| 238 |
}
|
| 239 |
|
| 240 |
except Exception as e:
|
|
@@ -242,23 +275,22 @@ async def poll():
|
|
| 242 |
"ok": False,
|
| 243 |
"message": str(e),
|
| 244 |
}
|
|
|
|
| 245 |
|
| 246 |
-
#respond with json
|
| 247 |
return jsonify(response)
|
| 248 |
|
| 249 |
|
| 250 |
-
|
| 251 |
-
#run the internal server
|
| 252 |
if __name__ == "__main__":
|
| 253 |
|
| 254 |
-
load_dotenv()
|
| 255 |
-
|
| 256 |
# Suppress only request logs but keep the startup messages
|
| 257 |
from werkzeug.serving import WSGIRequestHandler
|
|
|
|
| 258 |
class NoRequestLoggingWSGIRequestHandler(WSGIRequestHandler):
|
| 259 |
-
def log_request(self, code=
|
| 260 |
pass # Override to suppress request logging
|
| 261 |
|
| 262 |
# run the server on port from .env
|
| 263 |
port = int(os.environ.get("WEB_UI_PORT", 0)) or None
|
| 264 |
-
app.run(request_handler=NoRequestLoggingWSGIRequestHandler,port=port)
|
|
|
|
| 13 |
from python.helpers.log import Log
|
| 14 |
from dotenv import load_dotenv
|
| 15 |
|
| 16 |
+
load_dotenv()
|
| 17 |
|
| 18 |
+
# initialize the internal Flask server
|
| 19 |
+
app = Flask("app", static_folder=get_abs_path("./webui"), static_url_path="/")
|
| 20 |
lock = threading.Lock()
|
| 21 |
|
| 22 |
# Set up basic authentication, name and password from .env variables
|
| 23 |
+
app.config["BASIC_AUTH_USERNAME"] = (
|
| 24 |
+
os.environ.get("BASIC_AUTH_USERNAME") or "admin"
|
| 25 |
+
) # default name
|
| 26 |
+
app.config["BASIC_AUTH_PASSWORD"] = (
|
| 27 |
+
os.environ.get("BASIC_AUTH_PASSWORD") or "admin"
|
| 28 |
+
) # default pass
|
| 29 |
basic_auth = BasicAuth(app)
|
| 30 |
|
| 31 |
+
|
| 32 |
# get context to run agent zero in
|
| 33 |
+
def get_context(ctxid: str):
|
| 34 |
with lock:
|
| 35 |
+
if not ctxid:
|
| 36 |
+
first = AgentContext.first()
|
| 37 |
+
if first:
|
| 38 |
+
return first
|
| 39 |
return AgentContext(config=initialize())
|
| 40 |
got = AgentContext.get(ctxid)
|
| 41 |
+
if got:
|
| 42 |
+
return got
|
| 43 |
+
return AgentContext(config=initialize(), id=ctxid)
|
| 44 |
+
|
| 45 |
|
| 46 |
# Now you can use @requires_auth function decorator to require login on certain pages
|
| 47 |
def requires_auth(f):
|
| 48 |
@wraps(f)
|
| 49 |
async def decorated(*args, **kwargs):
|
| 50 |
auth = request.authorization
|
| 51 |
+
if not auth or not (
|
| 52 |
+
auth.username == app.config["BASIC_AUTH_USERNAME"]
|
| 53 |
+
and auth.password == app.config["BASIC_AUTH_PASSWORD"]
|
| 54 |
+
):
|
| 55 |
return Response(
|
| 56 |
+
"Could not verify your access level for that URL.\n"
|
| 57 |
+
"You have to login with proper credentials",
|
| 58 |
+
401,
|
| 59 |
+
{"WWW-Authenticate": 'Basic realm="Login Required"'},
|
| 60 |
+
)
|
| 61 |
return await f(*args, **kwargs)
|
| 62 |
+
|
| 63 |
return decorated
|
| 64 |
|
| 65 |
+
|
| 66 |
# handle default address, show demo html page from ./test_form.html
|
| 67 |
+
@app.route("/", methods=["GET"])
|
| 68 |
async def test_form():
|
| 69 |
return Path(get_abs_path("./webui/index.html")).read_text()
|
| 70 |
|
| 71 |
+
|
| 72 |
# simple health check, just return OK to see the server is running
|
| 73 |
+
@app.route("/ok", methods=["GET", "POST"])
|
| 74 |
async def health_check():
|
| 75 |
return "OK"
|
| 76 |
|
| 77 |
+
|
| 78 |
# # secret page, requires authentication
|
| 79 |
# @app.route('/secret', methods=['GET'])
|
| 80 |
# @requires_auth
|
| 81 |
# async def secret_page():
|
| 82 |
# return Path("./secret_page.html").read_text()
|
| 83 |
|
| 84 |
+
|
| 85 |
# send message to agent (async UI)
|
| 86 |
+
@app.route("/msg", methods=["POST"])
|
| 87 |
async def handle_message_async():
|
| 88 |
return await handle_message(False)
|
| 89 |
|
| 90 |
+
|
| 91 |
# send message to agent (synchronous API)
|
| 92 |
+
@app.route("/msg_sync", methods=["POST"])
|
| 93 |
async def handle_msg_sync():
|
| 94 |
return await handle_message(True)
|
| 95 |
|
| 96 |
+
|
| 97 |
+
async def handle_message(sync: bool):
|
| 98 |
try:
|
| 99 |
+
|
| 100 |
+
# data sent to the server
|
| 101 |
input = request.get_json()
|
| 102 |
text = input.get("text", "")
|
| 103 |
ctxid = input.get("context", "")
|
| 104 |
blev = input.get("broadcast", 1)
|
| 105 |
|
| 106 |
+
# context instance - get or create
|
| 107 |
context = get_context(ctxid)
|
| 108 |
|
| 109 |
# print to console and log
|
| 110 |
+
PrintStyle(
|
| 111 |
+
background_color="#6C3483", font_color="white", bold=True, padding=True
|
| 112 |
+
).print(f"User message:")
|
| 113 |
PrintStyle(font_color="white", padding=False).print(f"> {text}")
|
| 114 |
context.log.log(type="user", heading="User message", content=text)
|
| 115 |
|
| 116 |
if sync:
|
| 117 |
context.communicate(text)
|
| 118 |
+
result = await context.process.result() # type: ignore
|
| 119 |
response = {
|
| 120 |
"ok": True,
|
| 121 |
"message": result,
|
| 122 |
+
}
|
| 123 |
else:
|
| 124 |
|
| 125 |
+
print("\n\n", (context.process and context.process.is_alive()))
|
| 126 |
context.communicate(text)
|
| 127 |
response = {
|
| 128 |
"ok": True,
|
|
|
|
| 134 |
"ok": False,
|
| 135 |
"message": str(e),
|
| 136 |
}
|
| 137 |
+
PrintStyle.error(str(e))
|
| 138 |
|
| 139 |
+
# respond with json
|
| 140 |
return jsonify(response)
|
| 141 |
+
|
| 142 |
+
|
| 143 |
# pausing/unpausing the agent
|
| 144 |
+
@app.route("/pause", methods=["POST"])
|
| 145 |
async def pause():
|
| 146 |
try:
|
| 147 |
+
|
| 148 |
+
# data sent to the server
|
| 149 |
input = request.get_json()
|
| 150 |
paused = input.get("paused", False)
|
| 151 |
ctxid = input.get("context", "")
|
| 152 |
|
| 153 |
+
# context instance - get or create
|
| 154 |
context = get_context(ctxid)
|
| 155 |
|
| 156 |
context.paused = paused
|
|
|
|
| 158 |
response = {
|
| 159 |
"ok": True,
|
| 160 |
"message": "Agent paused." if paused else "Agent unpaused.",
|
| 161 |
+
"pause": paused,
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
except Exception as e:
|
| 165 |
response = {
|
| 166 |
"ok": False,
|
| 167 |
"message": str(e),
|
| 168 |
}
|
| 169 |
+
PrintStyle.error(str(e))
|
| 170 |
|
| 171 |
+
# respond with json
|
| 172 |
return jsonify(response)
|
| 173 |
|
| 174 |
+
|
| 175 |
# restarting with new agent0
|
| 176 |
+
@app.route("/reset", methods=["POST"])
|
| 177 |
async def reset():
|
| 178 |
try:
|
| 179 |
|
| 180 |
+
# data sent to the server
|
| 181 |
input = request.get_json()
|
| 182 |
ctxid = input.get("context", "")
|
| 183 |
|
| 184 |
+
# context instance - get or create
|
| 185 |
context = get_context(ctxid)
|
| 186 |
context.reset()
|
| 187 |
+
|
| 188 |
response = {
|
| 189 |
"ok": True,
|
| 190 |
"message": "Agent restarted.",
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
except Exception as e:
|
| 194 |
response = {
|
| 195 |
"ok": False,
|
| 196 |
"message": str(e),
|
| 197 |
}
|
| 198 |
+
PrintStyle.error(str(e))
|
| 199 |
|
| 200 |
+
# respond with json
|
| 201 |
return jsonify(response)
|
| 202 |
|
| 203 |
+
|
| 204 |
# killing context
|
| 205 |
+
@app.route("/remove", methods=["POST"])
|
| 206 |
async def remove():
|
| 207 |
try:
|
| 208 |
|
| 209 |
+
# data sent to the server
|
| 210 |
input = request.get_json()
|
| 211 |
ctxid = input.get("context", "")
|
| 212 |
|
| 213 |
+
# context instance - get or create
|
| 214 |
AgentContext.remove(ctxid)
|
| 215 |
+
|
| 216 |
response = {
|
| 217 |
"ok": True,
|
| 218 |
"message": "Context removed.",
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
except Exception as e:
|
| 222 |
response = {
|
| 223 |
"ok": False,
|
| 224 |
"message": str(e),
|
| 225 |
}
|
| 226 |
+
PrintStyle.error(str(e))
|
| 227 |
|
| 228 |
+
# respond with json
|
| 229 |
return jsonify(response)
|
| 230 |
|
| 231 |
+
|
| 232 |
# Web UI polling
|
| 233 |
+
@app.route("/poll", methods=["POST"])
|
| 234 |
async def poll():
|
| 235 |
try:
|
| 236 |
+
|
| 237 |
+
# data sent to the server
|
| 238 |
input = request.get_json()
|
| 239 |
ctxid = input.get("context", uuid.uuid4())
|
| 240 |
from_no = input.get("log_from", 0)
|
| 241 |
|
| 242 |
+
# context instance - get or create
|
| 243 |
context = get_context(ctxid)
|
| 244 |
|
|
|
|
| 245 |
logs = context.log.output(start=from_no)
|
| 246 |
|
| 247 |
# loop AgentContext._contexts
|
| 248 |
ctxs = []
|
| 249 |
for ctx in AgentContext._contexts.values():
|
| 250 |
+
ctxs.append(
|
| 251 |
+
{
|
| 252 |
+
"id": ctx.id,
|
| 253 |
+
"no": ctx.no,
|
| 254 |
+
"log_guid": ctx.log.guid,
|
| 255 |
+
"log_version": len(ctx.log.updates),
|
| 256 |
+
"log_length": len(ctx.log.logs),
|
| 257 |
+
"paused": ctx.paused,
|
| 258 |
+
}
|
| 259 |
+
)
|
| 260 |
+
|
| 261 |
+
# data from this server
|
| 262 |
response = {
|
| 263 |
"ok": True,
|
| 264 |
"context": context.id,
|
|
|
|
| 266 |
"logs": logs,
|
| 267 |
"log_guid": context.log.guid,
|
| 268 |
"log_version": len(context.log.updates),
|
| 269 |
+
"log_progress": context.log.progress,
|
| 270 |
+
"paused": context.paused,
|
| 271 |
}
|
| 272 |
|
| 273 |
except Exception as e:
|
|
|
|
| 275 |
"ok": False,
|
| 276 |
"message": str(e),
|
| 277 |
}
|
| 278 |
+
PrintStyle.error(str(e))
|
| 279 |
|
| 280 |
+
# respond with json
|
| 281 |
return jsonify(response)
|
| 282 |
|
| 283 |
|
| 284 |
+
# run the internal server
|
|
|
|
| 285 |
if __name__ == "__main__":
|
| 286 |
|
|
|
|
|
|
|
| 287 |
# Suppress only request logs but keep the startup messages
|
| 288 |
from werkzeug.serving import WSGIRequestHandler
|
| 289 |
+
|
| 290 |
class NoRequestLoggingWSGIRequestHandler(WSGIRequestHandler):
|
| 291 |
+
def log_request(self, code="-", size="-"):
|
| 292 |
pass # Override to suppress request logging
|
| 293 |
|
| 294 |
# run the server on port from .env
|
| 295 |
port = int(os.environ.get("WEB_UI_PORT", 0)) or None
|
| 296 |
+
app.run(request_handler=NoRequestLoggingWSGIRequestHandler, port=port)
|
test.py
DELETED
|
@@ -1,293 +0,0 @@
|
|
| 1 |
-
from playwright.async_api import async_playwright, TimeoutError as PlaywrightTimeoutError
|
| 2 |
-
from python.helpers.print_style import PrintStyle
|
| 3 |
-
import asyncio
|
| 4 |
-
import random
|
| 5 |
-
import re
|
| 6 |
-
|
| 7 |
-
# List of user agents
|
| 8 |
-
user_agents = [
|
| 9 |
-
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
|
| 10 |
-
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Safari/605.1.15',
|
| 11 |
-
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0',
|
| 12 |
-
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.101 Safari/537.36',
|
| 13 |
-
'Mozilla/5.0 (iPhone; CPU iPhone OS 14_6 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Mobile/15E148 Safari/604.1',
|
| 14 |
-
'Mozilla/5.0 (iPad; CPU OS 14_6 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/91.0.4472.80 Mobile/15E148 Safari/604.1',
|
| 15 |
-
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36 Edg/91.0.864.59',
|
| 16 |
-
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36'
|
| 17 |
-
]
|
| 18 |
-
|
| 19 |
-
async def scrape_page(url, headless=True):
|
| 20 |
-
try:
|
| 21 |
-
async with async_playwright() as p:
|
| 22 |
-
browser = await p.chromium.launch(headless=headless)
|
| 23 |
-
context = await browser.new_context(
|
| 24 |
-
user_agent=random.choice(user_agents)
|
| 25 |
-
)
|
| 26 |
-
page = await context.new_page()
|
| 27 |
-
page.set_default_timeout(30000)
|
| 28 |
-
|
| 29 |
-
await page.goto(url, wait_until='domcontentloaded', timeout=30000)
|
| 30 |
-
await page.wait_for_selector('body', timeout=5000)
|
| 31 |
-
|
| 32 |
-
result = await page.evaluate(r'''() => {
|
| 33 |
-
const cleanText = (text) => {
|
| 34 |
-
return text
|
| 35 |
-
.replace(/class="[^"]*"/g, '')
|
| 36 |
-
.replace(/id="[^"]*"/g, '')
|
| 37 |
-
.replace(/<[^>]+>/g, '')
|
| 38 |
-
.replace(/\s+/g, ' ')
|
| 39 |
-
.trim();
|
| 40 |
-
};
|
| 41 |
-
const getMetaContent = (name) => {
|
| 42 |
-
const meta = document.querySelector(`meta[name="${name}"], meta[property="${name}"]`);
|
| 43 |
-
return meta ? cleanText(meta.content) : null;
|
| 44 |
-
};
|
| 45 |
-
const getMainContent = () => {
|
| 46 |
-
const contentSelectors = [
|
| 47 |
-
'main', '#content', '#main-content', '.content',
|
| 48 |
-
'.post-content', '.entry-content', '.article-content',
|
| 49 |
-
'#mw-content-text', '.mw-parser-output'
|
| 50 |
-
];
|
| 51 |
-
for (const selector of contentSelectors) {
|
| 52 |
-
const element = document.querySelector(selector);
|
| 53 |
-
if (element) {
|
| 54 |
-
return cleanText(element.innerText);
|
| 55 |
-
}
|
| 56 |
-
}
|
| 57 |
-
return cleanText(document.body.innerText);
|
| 58 |
-
};
|
| 59 |
-
const getProducts = () => {
|
| 60 |
-
const productSelectors = [
|
| 61 |
-
'.s-result-item', '.product-item', '[data-component-type="s-search-result"]', '.sg-col-inner'
|
| 62 |
-
];
|
| 63 |
-
for (const selector of productSelectors) {
|
| 64 |
-
const elements = document.querySelectorAll(selector);
|
| 65 |
-
if (elements.length > 0) {
|
| 66 |
-
return Array.from(elements).slice(0, 10).map(product => {
|
| 67 |
-
const titleElement = product.querySelector('h2 a, .a-link-normal.a-text-normal');
|
| 68 |
-
const priceElement = product.querySelector('.a-price .a-offscreen, .a-price');
|
| 69 |
-
const imageElement = product.querySelector('img.s-image');
|
| 70 |
-
let price = priceElement ? cleanText(priceElement.textContent) : null;
|
| 71 |
-
// Remove duplicate price
|
| 72 |
-
price = price ? price.replace(/(\$\d+(?:\.\d{2}?))\1+/, '$1') : null;
|
| 73 |
-
return {
|
| 74 |
-
title: titleElement ? cleanText(titleElement.textContent) : null,
|
| 75 |
-
link: titleElement ? titleElement.href : null,
|
| 76 |
-
price: price,
|
| 77 |
-
image: imageElement ? imageElement.src : null
|
| 78 |
-
};
|
| 79 |
-
}).filter(product => product.title && product.link);
|
| 80 |
-
}
|
| 81 |
-
}
|
| 82 |
-
return [];
|
| 83 |
-
};
|
| 84 |
-
const getNavigation = () => {
|
| 85 |
-
const navSelectors = ['nav', 'header', '[data-cy="header-nav"]'];
|
| 86 |
-
for (const selector of navSelectors) {
|
| 87 |
-
const navElement = document.querySelector(selector);
|
| 88 |
-
if (navElement) {
|
| 89 |
-
return Array.from(navElement.querySelectorAll('a'))
|
| 90 |
-
.filter(link => !link.href.endsWith('.svg'))
|
| 91 |
-
.slice(0, 10) // Limit to 10 items
|
| 92 |
-
.map(link => ({
|
| 93 |
-
href: link.href,
|
| 94 |
-
text: cleanText(link.textContent)
|
| 95 |
-
}));
|
| 96 |
-
}
|
| 97 |
-
}
|
| 98 |
-
return [];
|
| 99 |
-
};
|
| 100 |
-
const getImages = () => {
|
| 101 |
-
return Array.from(document.querySelectorAll('img'))
|
| 102 |
-
.filter(img => !img.src.endsWith('.svg'))
|
| 103 |
-
.map(img => ({
|
| 104 |
-
src: img.src,
|
| 105 |
-
alt: img.alt,
|
| 106 |
-
}));
|
| 107 |
-
};
|
| 108 |
-
const getLists = () => {
|
| 109 |
-
const listSelectors = [
|
| 110 |
-
'.mw-parser-output > ul', '.mw-parser-output > ol',
|
| 111 |
-
'[data-cy="main-content"] ul', '[data-cy="main-content"] ol',
|
| 112 |
-
'main ul', 'main ol',
|
| 113 |
-
'.content ul', '.content ol',
|
| 114 |
-
'#content ul', '#content ol'
|
| 115 |
-
];
|
| 116 |
-
let lists = [];
|
| 117 |
-
for (const selector of listSelectors) {
|
| 118 |
-
const elements = document.querySelectorAll(selector);
|
| 119 |
-
if (elements.length > 0) {
|
| 120 |
-
lists = Array.from(elements)
|
| 121 |
-
.filter(list => list.children.length >= 3 && list.children.length <= 20)
|
| 122 |
-
.map(list => ({
|
| 123 |
-
type: list.tagName.toLowerCase(),
|
| 124 |
-
items: Array.from(list.children)
|
| 125 |
-
.filter(li => li.textContent.trim().length > 10)
|
| 126 |
-
.map(li => {
|
| 127 |
-
// Remove CSS classes, IDs, and inline styles
|
| 128 |
-
let text = li.textContent.replace(/\s+/g, ' ').trim();
|
| 129 |
-
// Remove any remaining HTML tags
|
| 130 |
-
text = text.replace(/<[^>]+>/g, '');
|
| 131 |
-
// Remove CSS-related content
|
| 132 |
-
text = text.replace(/\.mw-parser-output[^{]+\{[^}]+\}/g, '');
|
| 133 |
-
// Remove ISBN prefix if present
|
| 134 |
-
text = text.replace(/^ISBN\s+/, '');
|
| 135 |
-
return text;
|
| 136 |
-
})
|
| 137 |
-
.filter(text => text.length > 0 && !text.includes('mw-parser-output'))
|
| 138 |
-
.slice(0, 10)
|
| 139 |
-
}))
|
| 140 |
-
.filter(list => list.items.length >= 3)
|
| 141 |
-
.slice(0, 3);
|
| 142 |
-
if (lists.length > 0) break;
|
| 143 |
-
}
|
| 144 |
-
}
|
| 145 |
-
return lists;
|
| 146 |
-
};
|
| 147 |
-
const getSocialMediaLinks = () => {
|
| 148 |
-
const socialSelectors = [
|
| 149 |
-
'a[href*="facebook.com"]',
|
| 150 |
-
'a[href*="twitter.com"]',
|
| 151 |
-
'a[href*="x.com"]',
|
| 152 |
-
'a[href*="github.com"]',
|
| 153 |
-
'a[href*="reddit.com"]',
|
| 154 |
-
'a[href*="tiktok.com"]',
|
| 155 |
-
'a[href*="discord.com"]',
|
| 156 |
-
'a[href*="instagram.com"]',
|
| 157 |
-
'a[href*="linkedin.com"]',
|
| 158 |
-
'a[href*="youtube.com"]',
|
| 159 |
-
'a[href*="pinterest.com"]',
|
| 160 |
-
'a[href*="snapchat.com"]',
|
| 161 |
-
'a[href*="tumblr.com"]',
|
| 162 |
-
'a[href*="medium.com"]',
|
| 163 |
-
'a[href*="whatsapp.com"]',
|
| 164 |
-
'a[href*="telegram.org"]',
|
| 165 |
-
'a[href*="vimeo.com"]',
|
| 166 |
-
'a[href*="flickr.com"]',
|
| 167 |
-
'a[href*="quora.com"]',
|
| 168 |
-
'a[href*="twitch.tv"]'
|
| 169 |
-
];
|
| 170 |
-
return socialSelectors.map(selector => {
|
| 171 |
-
const element = document.querySelector(selector);
|
| 172 |
-
return element ? element.href : null;
|
| 173 |
-
}).filter(Boolean);
|
| 174 |
-
};
|
| 175 |
-
const getCodeScripts = () => {
|
| 176 |
-
const codeBlocks = document.querySelectorAll('pre code, .highlight pre, .code-block');
|
| 177 |
-
return Array.from(codeBlocks).map(block => ({
|
| 178 |
-
language: block.className.match(/language-(\w+)/)?.[1] || 'text',
|
| 179 |
-
code: cleanText(block.textContent)
|
| 180 |
-
})).filter(script => script.code.length > 0);
|
| 181 |
-
};
|
| 182 |
-
return {
|
| 183 |
-
url: window.location.href,
|
| 184 |
-
title: document.title,
|
| 185 |
-
author: getMetaContent('author'),
|
| 186 |
-
publishDate: getMetaContent('article:published_time') || getMetaContent('date'),
|
| 187 |
-
lastModified: document.lastModified,
|
| 188 |
-
keywords: getMetaContent('keywords'),
|
| 189 |
-
metaDescription: getMetaContent('description') || getMetaContent('og:description'),
|
| 190 |
-
mainContent: getMainContent(),
|
| 191 |
-
navigation: getNavigation(),
|
| 192 |
-
images: getImages(),
|
| 193 |
-
lists: getLists(),
|
| 194 |
-
products: getProducts(),
|
| 195 |
-
socialMediaLinks: getSocialMediaLinks(),
|
| 196 |
-
codeScripts: getCodeScripts(),
|
| 197 |
-
};
|
| 198 |
-
}''')
|
| 199 |
-
|
| 200 |
-
await browser.close()
|
| 201 |
-
return result
|
| 202 |
-
except PlaywrightTimeoutError as e:
|
| 203 |
-
PrintStyle(font_color="yellow", padding=True).print(f"Attempt {attempt + 1} failed due to timeout: {str(e)}")
|
| 204 |
-
except Exception as e:
|
| 205 |
-
PrintStyle(font_color="red", padding=True).print(f"Attempt {attempt + 1} failed: {str(e)}")
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
async def scrape_url(url, headless):
|
| 209 |
-
try:
|
| 210 |
-
result = await scrape_page(url, headless)
|
| 211 |
-
|
| 212 |
-
if result is None:
|
| 213 |
-
raise ValueError("Scraping result is None")
|
| 214 |
-
|
| 215 |
-
markdown_content = f"# {result.get('title', 'No Title')}\n\n"
|
| 216 |
-
|
| 217 |
-
if result.get('url'):
|
| 218 |
-
markdown_content += f"URL: {result['url']}\n\n"
|
| 219 |
-
|
| 220 |
-
if result.get('author'):
|
| 221 |
-
markdown_content += f"Author: {result['author']}\n\n"
|
| 222 |
-
|
| 223 |
-
if result.get('publishDate'):
|
| 224 |
-
markdown_content += f"Published: {result['publishDate']}\n\n"
|
| 225 |
-
|
| 226 |
-
if result.get('keywords'):
|
| 227 |
-
markdown_content += f"Keywords: {result['keywords']}\n\n"
|
| 228 |
-
|
| 229 |
-
if result.get('metaDescription'):
|
| 230 |
-
markdown_content += f"## Description\n\n{result['metaDescription']}\n\n"
|
| 231 |
-
|
| 232 |
-
if result.get('mainContent'):
|
| 233 |
-
markdown_content += "## Webpage Content:\n\n"
|
| 234 |
-
markdown_content += result['mainContent'] + "\n\n"
|
| 235 |
-
|
| 236 |
-
if result.get('lists'):
|
| 237 |
-
markdown_content += "## Lists\n\n"
|
| 238 |
-
for list_item in result['lists']:
|
| 239 |
-
markdown_content += f"### {list_item['type'].upper()} List\n\n"
|
| 240 |
-
for item in list_item['items']:
|
| 241 |
-
markdown_content += f"- {item}\n"
|
| 242 |
-
markdown_content += "\n"
|
| 243 |
-
|
| 244 |
-
if result.get('products'):
|
| 245 |
-
markdown_content += "## Products\n\n"
|
| 246 |
-
for product in result['products']:
|
| 247 |
-
markdown_content += f"### {product.get('title', 'Untitled Product')}\n\n"
|
| 248 |
-
if product.get('price'):
|
| 249 |
-
price = product['price']
|
| 250 |
-
price = re.sub(r'(\$\d+(?:\.\d{2})?)\1+', r'\1', price)
|
| 251 |
-
markdown_content += f"Price: {price}\n\n"
|
| 252 |
-
if product.get('link'):
|
| 253 |
-
markdown_content += f"[View Product]({product['link']})\n\n"
|
| 254 |
-
if product.get('image'):
|
| 255 |
-
markdown_content += f"\n\n"
|
| 256 |
-
markdown_content += "---\n\n"
|
| 257 |
-
|
| 258 |
-
if result.get('socialMediaLinks'):
|
| 259 |
-
markdown_content += "## Social Media Links\n\n"
|
| 260 |
-
for link in result['socialMediaLinks']:
|
| 261 |
-
markdown_content += f"- [{link.split('.com')[0].split('/')[-1].capitalize()}]({link})\n"
|
| 262 |
-
markdown_content += "\n"
|
| 263 |
-
|
| 264 |
-
if result.get('codeScripts'):
|
| 265 |
-
markdown_content += "## Code Snippet\n\n"
|
| 266 |
-
for script in result['codeScripts']:
|
| 267 |
-
markdown_content += f"```{script['language']}\n{script['code']}\n```\n\n"
|
| 268 |
-
|
| 269 |
-
markdown_content = markdown_content.strip()
|
| 270 |
-
|
| 271 |
-
return markdown_content
|
| 272 |
-
except Exception as e:
|
| 273 |
-
return f"Error: Failed to scrape URL. Reason: {str(e)}"
|
| 274 |
-
|
| 275 |
-
async def fetch_page_content(url: str, max_retries: int = 2, headless: bool = True):
|
| 276 |
-
for attempt in range(max_retries):
|
| 277 |
-
try:
|
| 278 |
-
content = await scrape_url(url, headless)
|
| 279 |
-
if content.startswith("Error:"):
|
| 280 |
-
raise Exception(content)
|
| 281 |
-
return str(content)
|
| 282 |
-
except Exception as e:
|
| 283 |
-
PrintStyle(font_color="red", padding=True).print(f"Attempt {attempt + 1} failed: {str(e)}")
|
| 284 |
-
|
| 285 |
-
raise Exception("Error: Webpage content is not available.")
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
async def test():
|
| 289 |
-
url = "https://github.com/frdel/agent-zero"
|
| 290 |
-
content = await fetch_page_content(url)
|
| 291 |
-
print(content)
|
| 292 |
-
|
| 293 |
-
asyncio.run(test())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
test2.py
DELETED
|
@@ -1,120 +0,0 @@
|
|
| 1 |
-
import requests
|
| 2 |
-
from bs4 import BeautifulSoup
|
| 3 |
-
import random
|
| 4 |
-
import re
|
| 5 |
-
|
| 6 |
-
# List of user agents
|
| 7 |
-
user_agents = [
|
| 8 |
-
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
|
| 9 |
-
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Safari/605.1.15',
|
| 10 |
-
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0',
|
| 11 |
-
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.101 Safari/537.36',
|
| 12 |
-
'Mozilla/5.0 (iPhone; CPU iPhone OS 14_6 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Mobile/15E148 Safari/604.1',
|
| 13 |
-
'Mozilla/5.0 (iPad; CPU OS 14_6 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/91.0.4472.80 Mobile/15E148 Safari/604.1',
|
| 14 |
-
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36 Edg/91.0.864.59',
|
| 15 |
-
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36'
|
| 16 |
-
]
|
| 17 |
-
|
| 18 |
-
def clean_text(text):
|
| 19 |
-
"""Utility function to clean HTML tags and whitespace."""
|
| 20 |
-
text = re.sub(r'class="[^"]*"', '', text)
|
| 21 |
-
text = re.sub(r'id="[^"]*"', '', text)
|
| 22 |
-
text = re.sub(r'<[^>]+>', '', text)
|
| 23 |
-
return re.sub(r'\s+', ' ', text).strip()
|
| 24 |
-
|
| 25 |
-
def scrape_page(url):
|
| 26 |
-
headers = {
|
| 27 |
-
'User-Agent': random.choice(user_agents)
|
| 28 |
-
}
|
| 29 |
-
|
| 30 |
-
try:
|
| 31 |
-
response = requests.get(url, headers=headers, timeout=30)
|
| 32 |
-
response.raise_for_status() # Check if the request was successful
|
| 33 |
-
soup = BeautifulSoup(response.content, 'html.parser')
|
| 34 |
-
|
| 35 |
-
# Extract title
|
| 36 |
-
title = soup.title.string if soup.title else 'No Title'
|
| 37 |
-
|
| 38 |
-
# Extract meta information
|
| 39 |
-
def get_meta_content(name):
|
| 40 |
-
meta = soup.find('meta', attrs={'name': name}) or soup.find('meta', property=name)
|
| 41 |
-
return meta['content'] if meta else None
|
| 42 |
-
|
| 43 |
-
author = get_meta_content('author')
|
| 44 |
-
publish_date = get_meta_content('article:published_time') or get_meta_content('date')
|
| 45 |
-
keywords = get_meta_content('keywords')
|
| 46 |
-
description = get_meta_content('description') or get_meta_content('og:description')
|
| 47 |
-
|
| 48 |
-
# Extract main content
|
| 49 |
-
def get_main_content():
|
| 50 |
-
content_selectors = ['main', '#content', '#main-content', '.content', '.post-content', '.entry-content', '.article-content', '#mw-content-text', '.mw-parser-output']
|
| 51 |
-
for selector in content_selectors:
|
| 52 |
-
element = soup.select_one(selector)
|
| 53 |
-
if element:
|
| 54 |
-
return clean_text(element.get_text())
|
| 55 |
-
return clean_text(soup.body.get_text() if soup.body else "")
|
| 56 |
-
|
| 57 |
-
# Extract images
|
| 58 |
-
def get_images():
|
| 59 |
-
images = []
|
| 60 |
-
for img in soup.find_all('img'):
|
| 61 |
-
if not img.get('src', '').endswith('.svg'):
|
| 62 |
-
images.append({
|
| 63 |
-
'src': img['src'],
|
| 64 |
-
'alt': img.get('alt', '')
|
| 65 |
-
})
|
| 66 |
-
return images
|
| 67 |
-
|
| 68 |
-
# Extract product listings
|
| 69 |
-
def get_products():
|
| 70 |
-
products = []
|
| 71 |
-
product_selectors = ['.s-result-item', '.product-item', '[data-component-type="s-search-result"]', '.sg-col-inner']
|
| 72 |
-
for selector in product_selectors:
|
| 73 |
-
elements = soup.select(selector)
|
| 74 |
-
if elements:
|
| 75 |
-
for product in elements[:10]:
|
| 76 |
-
title_element = product.select_one('h2 a, .a-link-normal.a-text-normal')
|
| 77 |
-
price_element = product.select_one('.a-price .a-offscreen, .a-price')
|
| 78 |
-
image_element = product.select_one('img.s-image')
|
| 79 |
-
price = clean_text(price_element.text) if price_element else None
|
| 80 |
-
price = re.sub(r'(\$\d+(?:\.\d{2})?)\1+', r'\1', price) if price else None
|
| 81 |
-
products.append({
|
| 82 |
-
'title': clean_text(title_element.text) if title_element else None,
|
| 83 |
-
'link': title_element['href'] if title_element else None,
|
| 84 |
-
'price': price,
|
| 85 |
-
'image': image_element['src'] if image_element else None
|
| 86 |
-
})
|
| 87 |
-
return products
|
| 88 |
-
|
| 89 |
-
# Extract lists
|
| 90 |
-
def get_lists():
|
| 91 |
-
lists = []
|
| 92 |
-
list_selectors = ['.mw-parser-output > ul', '.mw-parser-output > ol', 'main ul', 'main ol', '.content ul', '.content ol']
|
| 93 |
-
for selector in list_selectors:
|
| 94 |
-
elements = soup.select(selector)
|
| 95 |
-
for element in elements:
|
| 96 |
-
list_items = [clean_text(li.get_text()) for li in element.find_all('li') if len(li.get_text().strip()) > 10]
|
| 97 |
-
if 3 <= len(list_items) <= 20:
|
| 98 |
-
lists.append({'type': element.name, 'items': list_items[:10]})
|
| 99 |
-
return lists
|
| 100 |
-
|
| 101 |
-
# Gather all extracted data
|
| 102 |
-
return {
|
| 103 |
-
'url': url,
|
| 104 |
-
'title': title,
|
| 105 |
-
'author': author,
|
| 106 |
-
'publishDate': publish_date,
|
| 107 |
-
'keywords': keywords,
|
| 108 |
-
'metaDescription': description,
|
| 109 |
-
'mainContent': get_main_content(),
|
| 110 |
-
'images': get_images(),
|
| 111 |
-
'products': get_products(),
|
| 112 |
-
'lists': get_lists()
|
| 113 |
-
}
|
| 114 |
-
|
| 115 |
-
except requests.exceptions.RequestException as e:
|
| 116 |
-
return {"error": str(e)}
|
| 117 |
-
|
| 118 |
-
# Example usage:
|
| 119 |
-
result = scrape_page('https://github.com/frdel/agent-zero')
|
| 120 |
-
print(result)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
webui/index.css
CHANGED
|
@@ -175,7 +175,8 @@ h3 {
|
|
| 175 |
margin-top: var(--spacing-lg);
|
| 176 |
}
|
| 177 |
h4 {
|
| 178 |
-
margin-top:
|
|
|
|
| 179 |
}
|
| 180 |
|
| 181 |
#a0version {
|
|
@@ -239,12 +240,44 @@ h4 {
|
|
| 239 |
|
| 240 |
#logo-container img {
|
| 241 |
border-radius: var(--spacing-xs);
|
| 242 |
-
|
| 243 |
-
|
| 244 |
filter: none;
|
| 245 |
transition: filter 0.3s ease;
|
| 246 |
}
|
| 247 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 248 |
#right-panel.expanded #logo-container {
|
| 249 |
margin-left: 5.5rem;
|
| 250 |
}
|
|
@@ -271,6 +304,11 @@ h4 {
|
|
| 271 |
align-self: flex-start;
|
| 272 |
}
|
| 273 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 274 |
.message-user {
|
| 275 |
background-color: #4a4a4a;
|
| 276 |
border-bottom-right-radius: var(--spacing-xs);
|
|
@@ -281,10 +319,28 @@ h4 {
|
|
| 281 |
}
|
| 282 |
|
| 283 |
/* Message Types */
|
| 284 |
-
.message-fw {
|
| 285 |
border-radius: var(--border-radius);
|
| 286 |
border-top-left-radius: var(--spacing-xs);
|
| 287 |
margin-left: var(--spacing-lg);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 288 |
}
|
| 289 |
|
| 290 |
/* Update message types for dark mode */
|
|
@@ -294,8 +350,8 @@ h4 {
|
|
| 294 |
.message-agent-delegation { background-color: #00695C; color: #E0E0E0; }
|
| 295 |
.message-tool { background-color: #5D4C6A; color: #E0E0E0; }
|
| 296 |
.message-code-exe { background-color: #3a147c; color: #E0E0E0; }
|
| 297 |
-
.message-adhoc { background-color: #cf7b0e; color: #E0E0E0; }
|
| 298 |
.message-info { background-color: var(--color-panel); color: #E0E0E0; }
|
|
|
|
| 299 |
.message-warning { background-color: #c2771b; color: #E0E0E0; }
|
| 300 |
.message-error { background-color: #ab1313; color: #E0E0E0; }
|
| 301 |
|
|
@@ -306,22 +362,23 @@ h4 {
|
|
| 306 |
margin-bottom: var(--spacing-xs);
|
| 307 |
opacity: 0.7;
|
| 308 |
}
|
| 309 |
-
|
| 310 |
-
|
|
|
|
| 311 |
border-collapse: collapse;
|
| 312 |
font-size: 0.9em;
|
| 313 |
margin-bottom: var(--spacing-sm);
|
| 314 |
width: 100%;
|
| 315 |
}
|
| 316 |
|
| 317 |
-
.
|
| 318 |
-
.
|
| 319 |
border-bottom: 1px solid rgba(255, 255, 255, 0.1);
|
| 320 |
padding: 0.25rem;
|
| 321 |
text-align: left;
|
| 322 |
}
|
| 323 |
|
| 324 |
-
.
|
| 325 |
color: var(--color-primary);
|
| 326 |
width: 40%;
|
| 327 |
}
|
|
@@ -564,8 +621,8 @@ input:checked + .slider:before {
|
|
| 564 |
.light-mode .message-agent-delegation { background-color: #E0F2F1; color: #00695C; }
|
| 565 |
.light-mode .message-tool { background-color: #EDE7F6; color: #5D4C6A; }
|
| 566 |
.light-mode .message-code-exe { background-color: #FCE4EC; color: #3a147c; }
|
| 567 |
-
.light-mode .message-adhoc { background-color: #FFF3E0; color: #cf7b0e; }
|
| 568 |
.light-mode .message-info { background-color: #E8EAF6; color: #2C3E50; }
|
|
|
|
| 569 |
.light-mode .message-warning { background-color: #FFF3E0; color: #c2771b; }
|
| 570 |
.light-mode .message-error { background-color: #FFEBEE; color: #ab1313; }
|
| 571 |
.light-mode .message-user {
|
|
@@ -663,14 +720,21 @@ input:checked + .slider:before {
|
|
| 663 |
}
|
| 664 |
|
| 665 |
.msg-thoughts{
|
| 666 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 667 |
}
|
| 668 |
|
| 669 |
.message-temp {
|
| 670 |
display: none;
|
| 671 |
}
|
| 672 |
|
| 673 |
-
.message-temp:last-
|
| 674 |
display: block; /* or any style you want for visibility */
|
| 675 |
}
|
| 676 |
|
|
|
|
| 175 |
margin-top: var(--spacing-lg);
|
| 176 |
}
|
| 177 |
h4 {
|
| 178 |
+
margin-top: auto;
|
| 179 |
+
margin-bottom: auto;
|
| 180 |
}
|
| 181 |
|
| 182 |
#a0version {
|
|
|
|
| 240 |
|
| 241 |
#logo-container img {
|
| 242 |
border-radius: var(--spacing-xs);
|
| 243 |
+
width: auto;
|
| 244 |
+
height: 3rem;
|
| 245 |
filter: none;
|
| 246 |
transition: filter 0.3s ease;
|
| 247 |
}
|
| 248 |
|
| 249 |
+
#progress-bar-box{
|
| 250 |
+
background-color: var(--color-panel);
|
| 251 |
+
/* padding-left: 1em;
|
| 252 |
+
padding-right: 1em;
|
| 253 |
+
padding-top: 0.5em;
|
| 254 |
+
padding-bottom: 0; */
|
| 255 |
+
padding: var(--spacing-sm) var(--spacing-md);
|
| 256 |
+
padding-bottom: 0;
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
#progress-bar-h {
|
| 260 |
+
color: var(--color-primary);
|
| 261 |
+
display: flex;
|
| 262 |
+
align-items: left;
|
| 263 |
+
justify-content: flex-start;
|
| 264 |
+
height: 1.2em;
|
| 265 |
+
text-wrap: ellipsis;
|
| 266 |
+
overflow: hidden;
|
| 267 |
+
font-weight: normal;
|
| 268 |
+
}
|
| 269 |
+
|
| 270 |
+
#progress-bar-i {
|
| 271 |
+
font-weight: bold;
|
| 272 |
+
padding-right: 0.5em;
|
| 273 |
+
color: var(--color-secondary)
|
| 274 |
+
}
|
| 275 |
+
|
| 276 |
+
.progress-bar h4{
|
| 277 |
+
margin-left: 1em;
|
| 278 |
+
margin-right: 1.2em;
|
| 279 |
+
}
|
| 280 |
+
|
| 281 |
#right-panel.expanded #logo-container {
|
| 282 |
margin-left: 5.5rem;
|
| 283 |
}
|
|
|
|
| 304 |
align-self: flex-start;
|
| 305 |
}
|
| 306 |
|
| 307 |
+
.center-container {
|
| 308 |
+
align-self: center;
|
| 309 |
+
max-width: 80%;
|
| 310 |
+
}
|
| 311 |
+
|
| 312 |
.message-user {
|
| 313 |
background-color: #4a4a4a;
|
| 314 |
border-bottom-right-radius: var(--spacing-xs);
|
|
|
|
| 319 |
}
|
| 320 |
|
| 321 |
/* Message Types */
|
| 322 |
+
/* .message-fw {
|
| 323 |
border-radius: var(--border-radius);
|
| 324 |
border-top-left-radius: var(--spacing-xs);
|
| 325 |
margin-left: var(--spacing-lg);
|
| 326 |
+
} */
|
| 327 |
+
|
| 328 |
+
.message-center{
|
| 329 |
+
align-self: center;
|
| 330 |
+
border-bottom-left-radius: unset;
|
| 331 |
+
}
|
| 332 |
+
|
| 333 |
+
.message-followup{
|
| 334 |
+
margin-left: 2em;
|
| 335 |
+
margin-bottom: 2em;
|
| 336 |
+
}
|
| 337 |
+
.message-followup .message{
|
| 338 |
+
border-radius: 1.125em; /* 18px */
|
| 339 |
+
border-top-left-radius: 0.3125em; /* 5px */
|
| 340 |
+
}
|
| 341 |
+
|
| 342 |
+
.message-followup + .message-followup {
|
| 343 |
+
margin-bottom: 0;
|
| 344 |
}
|
| 345 |
|
| 346 |
/* Update message types for dark mode */
|
|
|
|
| 350 |
.message-agent-delegation { background-color: #00695C; color: #E0E0E0; }
|
| 351 |
.message-tool { background-color: #5D4C6A; color: #E0E0E0; }
|
| 352 |
.message-code-exe { background-color: #3a147c; color: #E0E0E0; }
|
|
|
|
| 353 |
.message-info { background-color: var(--color-panel); color: #E0E0E0; }
|
| 354 |
+
.message-util { background-color: #23211a; color: #E0E0E0; display:none }
|
| 355 |
.message-warning { background-color: #c2771b; color: #E0E0E0; }
|
| 356 |
.message-error { background-color: #ab1313; color: #E0E0E0; }
|
| 357 |
|
|
|
|
| 362 |
margin-bottom: var(--spacing-xs);
|
| 363 |
opacity: 0.7;
|
| 364 |
}
|
| 365 |
+
.msg-kvps {
|
| 366 |
+
font-size: 0.9em;
|
| 367 |
+
margin-bottom: 0.625em; /* 10px */
|
| 368 |
border-collapse: collapse;
|
| 369 |
font-size: 0.9em;
|
| 370 |
margin-bottom: var(--spacing-sm);
|
| 371 |
width: 100%;
|
| 372 |
}
|
| 373 |
|
| 374 |
+
.msg-kvps th,
|
| 375 |
+
.msg-kvps td {
|
| 376 |
border-bottom: 1px solid rgba(255, 255, 255, 0.1);
|
| 377 |
padding: 0.25rem;
|
| 378 |
text-align: left;
|
| 379 |
}
|
| 380 |
|
| 381 |
+
.msg-kvps th {
|
| 382 |
color: var(--color-primary);
|
| 383 |
width: 40%;
|
| 384 |
}
|
|
|
|
| 621 |
.light-mode .message-agent-delegation { background-color: #E0F2F1; color: #00695C; }
|
| 622 |
.light-mode .message-tool { background-color: #EDE7F6; color: #5D4C6A; }
|
| 623 |
.light-mode .message-code-exe { background-color: #FCE4EC; color: #3a147c; }
|
|
|
|
| 624 |
.light-mode .message-info { background-color: #E8EAF6; color: #2C3E50; }
|
| 625 |
+
.light-mode .message-util { background-color: #e8eaf6d6; color: #353c43; }
|
| 626 |
.light-mode .message-warning { background-color: #FFF3E0; color: #c2771b; }
|
| 627 |
.light-mode .message-error { background-color: #FFEBEE; color: #ab1313; }
|
| 628 |
.light-mode .message-user {
|
|
|
|
| 720 |
}
|
| 721 |
|
| 722 |
.msg-thoughts{
|
| 723 |
+
display: auto;
|
| 724 |
+
}
|
| 725 |
+
|
| 726 |
+
.message-util .msg-kvps{
|
| 727 |
+
}
|
| 728 |
+
|
| 729 |
+
.message-util .msg-content{
|
| 730 |
+
|
| 731 |
}
|
| 732 |
|
| 733 |
.message-temp {
|
| 734 |
display: none;
|
| 735 |
}
|
| 736 |
|
| 737 |
+
.message-temp:not([style*="display: none"]):last-of-type {
|
| 738 |
display: block; /* or any style you want for visibility */
|
| 739 |
}
|
| 740 |
|
webui/index.html
CHANGED
|
@@ -6,6 +6,7 @@
|
|
| 6 |
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 7 |
<title>Agent Zero</title>
|
| 8 |
<link rel="stylesheet" href="index.css">
|
|
|
|
| 9 |
|
| 10 |
<script>
|
| 11 |
window.safeCall = function (name, ...args) {
|
|
@@ -23,42 +24,44 @@
|
|
| 23 |
<!--Sidebar-->
|
| 24 |
<button id="toggle-sidebar" class="toggle-sidebar-button" aria-label="Toggle Sidebar" aria-expanded="false">
|
| 25 |
<span aria-hidden="true">
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
|
|
|
|
|
|
| 29 |
</span>
|
| 30 |
-
|
| 31 |
</div>
|
| 32 |
<div id="left-panel" class="panel">
|
| 33 |
<div class="left-panel-top">
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
<
|
| 51 |
-
<
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
</div>
|
| 63 |
<!--Preferences-->
|
| 64 |
<div class="pref-section">
|
|
@@ -89,47 +92,67 @@
|
|
| 89 |
<span class="slider"></span>
|
| 90 |
</label>
|
| 91 |
</li>
|
| 92 |
-
<li x-data="{ darkMode: localStorage.getItem('darkMode') =
|
| 93 |
-
|
|
|
|
| 94 |
<label class="switch">
|
| 95 |
-
|
| 96 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 97 |
</label>
|
| 98 |
-
|
|
|
|
| 99 |
</ul>
|
| 100 |
-
<span id="a0version">Agent Zero 0.
|
| 101 |
-
</div>
|
| 102 |
</div>
|
|
|
|
| 103 |
<div id="right-panel" class="panel">
|
| 104 |
<div id="logo-container">
|
| 105 |
<a href="https://github.com/frdel/agent-zero" target="_blank" rel="noopener noreferrer">
|
| 106 |
<img src="splash.jpg" alt="a0" width="48" height="48">
|
| 107 |
</a>
|
| 108 |
-
<div id="time-date"></div>
|
| 109 |
-
|
| 110 |
<!--Chat-->
|
| 111 |
<div id="chat-history">
|
| 112 |
</div>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 113 |
<div id="input-section" x-data="{ paused: false }">
|
| 114 |
<textarea id="chat-input" placeholder="Type your message here..." rows="1"></textarea>
|
| 115 |
<button class="chat-button" id="send-button" aria-label="Send message">
|
| 116 |
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 100 100">
|
| 117 |
<path d="M25 20 L75 50 L25 80" fill="none" stroke="currentColor" stroke-width="15" />
|
| 118 |
-
|
| 119 |
</button>
|
| 120 |
-
<button class="chat-button pause-button" id="pause-button" @click="pauseAgent(true)" x-show="!paused"
|
|
|
|
| 121 |
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="currentColor">
|
| 122 |
-
<path d="M6 19h4V5H6v14zm8-14v14h4V5h-4z"/>
|
| 123 |
</svg>
|
| 124 |
</button>
|
| 125 |
-
<button class="chat-button pause-button" id="unpause-button" @click="pauseAgent(false)" x-show="paused"
|
|
|
|
| 126 |
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="currentColor">
|
| 127 |
-
<path d="M8 5v14l11-7z"/>
|
| 128 |
</svg>
|
| 129 |
</button>
|
|
|
|
| 130 |
</div>
|
| 131 |
</div>
|
| 132 |
-
</div>
|
| 133 |
</body>
|
| 134 |
|
| 135 |
-
</html>
|
|
|
|
| 6 |
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 7 |
<title>Agent Zero</title>
|
| 8 |
<link rel="stylesheet" href="index.css">
|
| 9 |
+
<link rel="stylesheet" href="toast.css">
|
| 10 |
|
| 11 |
<script>
|
| 12 |
window.safeCall = function (name, ...args) {
|
|
|
|
| 24 |
<!--Sidebar-->
|
| 25 |
<button id="toggle-sidebar" class="toggle-sidebar-button" aria-label="Toggle Sidebar" aria-expanded="false">
|
| 26 |
<span aria-hidden="true">
|
| 27 |
+
<svg id="sidebar-hamburger-svg" xmlns="http://www.w3.org/2000/svg" width="24" height="24"
|
| 28 |
+
viewBox="0 0 24 24" fill="CurrentColor">
|
| 29 |
+
<path
|
| 30 |
+
d="M3 13h2v-2H3v2zm0 4h2v-2H3v2zm0-8h2V7H3v2zm4 4h14v-2H7v2zm0 4h14v-2H7v2zM7 7v2h14V7H7z" />
|
| 31 |
+
</svg>
|
| 32 |
</span>
|
| 33 |
+
</button>
|
| 34 |
</div>
|
| 35 |
<div id="left-panel" class="panel">
|
| 36 |
<div class="left-panel-top">
|
| 37 |
+
<!--Sidebar upper elements-->
|
| 38 |
+
<div class="config-section" id="status-section" x-data="{ connected: true }">
|
| 39 |
+
<h3>Status</h3>
|
| 40 |
+
<h4 class="connected" x-show="connected">✔ Connected</h4>
|
| 41 |
+
<h4 class="disconnected" x-show="!connected">✘ Disconnected</h4>
|
| 42 |
+
</div>
|
| 43 |
|
| 44 |
+
<div class="config-section" x-data="{ showQuickActions: true }">
|
| 45 |
+
<h3>Quick Actions</h3>
|
| 46 |
+
<button class="config-button" id="resetChat" @click="resetChat()">Reset chat</button>
|
| 47 |
+
<button class="config-button" id="newChat" @click="newChat()">New Chat</button>
|
| 48 |
+
</div>
|
| 49 |
|
| 50 |
+
<div class="config-section" id="chats-section" x-data="{ contexts: [], selected: '' }"
|
| 51 |
+
x-show="contexts.length > 0">
|
| 52 |
+
<h3>Chats</h3>
|
| 53 |
+
<ul class="config-list">
|
| 54 |
+
<template x-for="context in contexts">
|
| 55 |
+
<li>
|
| 56 |
+
<span :class="{'chat-list-button': true, 'font-bold': context.id === selected}"
|
| 57 |
+
@click="selected = context.id; selectChat(context.id)">
|
| 58 |
+
Chat #<span x-text="context.no"></span>
|
| 59 |
+
</span>
|
| 60 |
+
<button class="edit-button" @click="killChat(context.id)">X</button>
|
| 61 |
+
</li>
|
| 62 |
+
</template>
|
| 63 |
+
</ul>
|
| 64 |
+
</div>
|
| 65 |
</div>
|
| 66 |
<!--Preferences-->
|
| 67 |
<div class="pref-section">
|
|
|
|
| 92 |
<span class="slider"></span>
|
| 93 |
</label>
|
| 94 |
</li>
|
| 95 |
+
<li x-data="{ darkMode: localStorage.getItem('darkMode') != 'false' }"
|
| 96 |
+
x-init="$watch('darkMode', val => toggleDarkMode(val))">
|
| 97 |
+
<div class="switch-label">Dark mode</div>
|
| 98 |
<label class="switch">
|
| 99 |
+
<input type="checkbox" x-model="darkMode">
|
| 100 |
+
<span class="slider"></span>
|
| 101 |
+
</label>
|
| 102 |
+
</li>
|
| 103 |
+
<li x-data="{ showUtils: false }">
|
| 104 |
+
<span>Show utility messages</span>
|
| 105 |
+
<label class="switch">
|
| 106 |
+
<input type="checkbox" x-model="showUtils"
|
| 107 |
+
x-effect="window.safeCall('toggleUtils',showUtils)">
|
| 108 |
+
<span class="slider"></span>
|
| 109 |
</label>
|
| 110 |
+
</li>
|
| 111 |
+
|
| 112 |
</ul>
|
| 113 |
+
<span id="a0version">Agent Zero 0.7<br>built on 2024-09-25</span>
|
|
|
|
| 114 |
</div>
|
| 115 |
+
</div>
|
| 116 |
<div id="right-panel" class="panel">
|
| 117 |
<div id="logo-container">
|
| 118 |
<a href="https://github.com/frdel/agent-zero" target="_blank" rel="noopener noreferrer">
|
| 119 |
<img src="splash.jpg" alt="a0" width="48" height="48">
|
| 120 |
</a>
|
| 121 |
+
<div id="time-date"></div>
|
| 122 |
+
</div>
|
| 123 |
<!--Chat-->
|
| 124 |
<div id="chat-history">
|
| 125 |
</div>
|
| 126 |
+
<div id="progress-bar-box">
|
| 127 |
+
<h4 id="progress-bar-h"><span id="progress-bar-i">|></span><span id="progress-bar"></span></h4>
|
| 128 |
+
</div>
|
| 129 |
+
<div id="toast" class="toast">
|
| 130 |
+
<div class="toast__message"></div>
|
| 131 |
+
<button class="toast__copy">Copy</button>
|
| 132 |
+
<button class="toast__close">Close</button>
|
| 133 |
+
</div>
|
| 134 |
<div id="input-section" x-data="{ paused: false }">
|
| 135 |
<textarea id="chat-input" placeholder="Type your message here..." rows="1"></textarea>
|
| 136 |
<button class="chat-button" id="send-button" aria-label="Send message">
|
| 137 |
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 100 100">
|
| 138 |
<path d="M25 20 L75 50 L25 80" fill="none" stroke="currentColor" stroke-width="15" />
|
| 139 |
+
</svg>
|
| 140 |
</button>
|
| 141 |
+
<button class="chat-button pause-button" id="pause-button" @click="pauseAgent(true)" x-show="!paused"
|
| 142 |
+
aria-label="Pause agent">
|
| 143 |
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="currentColor">
|
| 144 |
+
<path d="M6 19h4V5H6v14zm8-14v14h4V5h-4z" />
|
| 145 |
</svg>
|
| 146 |
</button>
|
| 147 |
+
<button class="chat-button pause-button" id="unpause-button" @click="pauseAgent(false)" x-show="paused"
|
| 148 |
+
aria-label="Resume agent">
|
| 149 |
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="currentColor">
|
| 150 |
+
<path d="M8 5v14l11-7z" />
|
| 151 |
</svg>
|
| 152 |
</button>
|
| 153 |
+
</div>
|
| 154 |
</div>
|
| 155 |
</div>
|
|
|
|
| 156 |
</body>
|
| 157 |
|
| 158 |
+
</html>
|
webui/index.js
CHANGED
|
@@ -9,70 +9,87 @@ const sendButton = document.getElementById('send-button');
|
|
| 9 |
const inputSection = document.getElementById('input-section');
|
| 10 |
const statusSection = document.getElementById('status-section');
|
| 11 |
const chatsSection = document.getElementById('chats-section');
|
| 12 |
-
const scrollbarThumb = document.querySelector('#chat-history::-webkit-scrollbar-thumb');
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
let autoScroll = true;
|
| 15 |
let context = "";
|
| 16 |
|
| 17 |
// Initialize the toggle button
|
| 18 |
-
setupSidebarToggle();
|
| 19 |
|
| 20 |
function isMobile() {
|
| 21 |
return window.innerWidth <= 768;
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
leftPanel.classList.toggle('hidden');
|
| 26 |
rightPanel.classList.toggle('expanded');
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
if (isMobile()) {
|
| 31 |
-
|
| 32 |
-
|
| 33 |
} else {
|
| 34 |
-
|
| 35 |
-
|
| 36 |
}
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
const leftPanel = document.getElementById('left-panel');
|
| 45 |
const rightPanel = document.getElementById('right-panel');
|
| 46 |
const toggleSidebarButton = document.getElementById('toggle-sidebar');
|
| 47 |
if (toggleSidebarButton) {
|
| 48 |
-
|
| 49 |
} else {
|
| 50 |
-
|
| 51 |
-
|
| 52 |
}
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
|
| 57 |
async function sendMessage() {
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
}
|
| 67 |
}
|
| 68 |
-
|
| 69 |
chatInput.addEventListener('keydown', (e) => {
|
| 70 |
if (e.key === 'Enter' && !e.shiftKey) {
|
| 71 |
e.preventDefault();
|
| 72 |
sendMessage();
|
| 73 |
}
|
| 74 |
});
|
| 75 |
-
|
| 76 |
sendButton.addEventListener('click', sendMessage);
|
| 77 |
|
| 78 |
function updateUserTime() {
|
|
@@ -81,7 +98,7 @@ function updateUserTime() {
|
|
| 81 |
const minutes = now.getMinutes();
|
| 82 |
const seconds = now.getSeconds();
|
| 83 |
const ampm = hours >= 12 ? 'pm' : 'am';
|
| 84 |
-
const formattedHours = hours % 12 || 12;
|
| 85 |
|
| 86 |
// Format the time
|
| 87 |
const timeString = `${formattedHours}:${minutes.toString().padStart(2, '0')}:${seconds.toString().padStart(2, '0')} ${ampm}`;
|
|
@@ -94,11 +111,11 @@ function updateUserTime() {
|
|
| 94 |
const userTimeElement = document.getElementById('time-date');
|
| 95 |
userTimeElement.innerHTML = `${timeString}<br><span id="user-date">${dateString}</span>`;
|
| 96 |
}
|
| 97 |
-
|
| 98 |
updateUserTime();
|
| 99 |
setInterval(updateUserTime, 1000);
|
| 100 |
-
|
| 101 |
-
function setMessage(id, type, heading, content, kvps = null) {
|
| 102 |
// Search for the existing message container by id
|
| 103 |
let messageContainer = document.getElementById(`message-${id}`);
|
| 104 |
|
|
@@ -181,6 +198,8 @@ async function poll() {
|
|
| 181 |
}
|
| 182 |
}
|
| 183 |
|
|
|
|
|
|
|
| 184 |
//set ui model vars from backend
|
| 185 |
const inputAD = Alpine.$data(inputSection);
|
| 186 |
inputAD.paused = response.paused;
|
|
@@ -202,10 +221,18 @@ async function poll() {
|
|
| 202 |
}
|
| 203 |
}
|
| 204 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 205 |
function updatePauseButtonState(isPaused) {
|
| 206 |
const pauseButton = document.getElementById('pause-button');
|
| 207 |
const unpauseButton = document.getElementById('unpause-button');
|
| 208 |
-
|
| 209 |
if (isPaused) {
|
| 210 |
pauseButton.style.display = 'none';
|
| 211 |
unpauseButton.style.display = 'flex';
|
|
@@ -277,22 +304,38 @@ window.toggleThoughts = async function (showThoughts) {
|
|
| 277 |
toggleCssProperty('.msg-thoughts', 'display', showThoughts ? undefined : 'none');
|
| 278 |
}
|
| 279 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 280 |
|
| 281 |
-
window.toggleDarkMode = function(isDark) {
|
| 282 |
if (isDark) {
|
| 283 |
-
|
| 284 |
} else {
|
| 285 |
-
|
| 286 |
}
|
| 287 |
console.log("Dark mode:", isDark);
|
| 288 |
localStorage.setItem('darkMode', isDark);
|
| 289 |
};
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
const isDarkMode = localStorage.getItem('darkMode') ==
|
| 294 |
toggleDarkMode(isDarkMode);
|
| 295 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 296 |
|
| 297 |
function toggleCssProperty(selector, property, value) {
|
| 298 |
// Get the stylesheet that contains the class
|
|
@@ -310,7 +353,7 @@ function toggleCssProperty(selector, property, value) {
|
|
| 310 |
if (value === undefined) {
|
| 311 |
rule.style.removeProperty(property);
|
| 312 |
} else {
|
| 313 |
-
rule.style.setProperty(property, value);
|
| 314 |
}
|
| 315 |
return;
|
| 316 |
}
|
|
@@ -318,6 +361,40 @@ function toggleCssProperty(selector, property, value) {
|
|
| 318 |
}
|
| 319 |
}
|
| 320 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 321 |
chatInput.addEventListener('input', adjustTextareaHeight);
|
| 322 |
|
| 323 |
setInterval(poll, 250);
|
|
|
|
| 9 |
const inputSection = document.getElementById('input-section');
|
| 10 |
const statusSection = document.getElementById('status-section');
|
| 11 |
const chatsSection = document.getElementById('chats-section');
|
| 12 |
+
const scrollbarThumb = document.querySelector('#chat-history::-webkit-scrollbar-thumb');
|
| 13 |
+
const progressBar = document.getElementById('progress-bar');
|
| 14 |
+
|
| 15 |
+
|
| 16 |
|
| 17 |
let autoScroll = true;
|
| 18 |
let context = "";
|
| 19 |
|
| 20 |
// Initialize the toggle button
|
| 21 |
+
setupSidebarToggle();
|
| 22 |
|
| 23 |
function isMobile() {
|
| 24 |
return window.innerWidth <= 768;
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
function toggleSidebar() {
|
| 28 |
leftPanel.classList.toggle('hidden');
|
| 29 |
rightPanel.classList.toggle('expanded');
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
function handleResize() {
|
| 33 |
if (isMobile()) {
|
| 34 |
+
leftPanel.classList.add('hidden');
|
| 35 |
+
rightPanel.classList.add('expanded');
|
| 36 |
} else {
|
| 37 |
+
leftPanel.classList.remove('hidden');
|
| 38 |
+
rightPanel.classList.remove('expanded');
|
| 39 |
}
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
// Run on startup and window resize
|
| 43 |
+
window.addEventListener('load', handleResize);
|
| 44 |
+
window.addEventListener('resize', handleResize);
|
| 45 |
+
|
| 46 |
+
function setupSidebarToggle() {
|
| 47 |
const leftPanel = document.getElementById('left-panel');
|
| 48 |
const rightPanel = document.getElementById('right-panel');
|
| 49 |
const toggleSidebarButton = document.getElementById('toggle-sidebar');
|
| 50 |
if (toggleSidebarButton) {
|
| 51 |
+
toggleSidebarButton.addEventListener('click', toggleSidebar);
|
| 52 |
} else {
|
| 53 |
+
console.error('Toggle sidebar button not found');
|
| 54 |
+
setTimeout(setupSidebarToggle, 100);
|
| 55 |
}
|
| 56 |
+
}
|
| 57 |
+
// Make sure to call this function
|
| 58 |
+
document.addEventListener('DOMContentLoaded', setupSidebarToggle);
|
| 59 |
|
| 60 |
async function sendMessage() {
|
| 61 |
+
try {
|
| 62 |
+
const message = chatInput.value.trim();
|
| 63 |
+
if (message) {
|
| 64 |
+
|
| 65 |
+
const response = await sendJsonData("/msg", { text: message, context });
|
| 66 |
+
|
| 67 |
+
if (!response) {
|
| 68 |
+
toast("No response returned.", "error")
|
| 69 |
+
} else if (!response.ok) {
|
| 70 |
+
if (response.message) {
|
| 71 |
+
toast(response.message, "error")
|
| 72 |
+
} else {
|
| 73 |
+
toast("Undefined error.", "error")
|
| 74 |
+
}
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
//setMessage('user', message);
|
| 78 |
+
chatInput.value = '';
|
| 79 |
+
adjustTextareaHeight();
|
| 80 |
+
}
|
| 81 |
+
} catch (e) {
|
| 82 |
+
toast(e.message, "error")
|
| 83 |
}
|
| 84 |
}
|
| 85 |
+
|
| 86 |
chatInput.addEventListener('keydown', (e) => {
|
| 87 |
if (e.key === 'Enter' && !e.shiftKey) {
|
| 88 |
e.preventDefault();
|
| 89 |
sendMessage();
|
| 90 |
}
|
| 91 |
});
|
| 92 |
+
|
| 93 |
sendButton.addEventListener('click', sendMessage);
|
| 94 |
|
| 95 |
function updateUserTime() {
|
|
|
|
| 98 |
const minutes = now.getMinutes();
|
| 99 |
const seconds = now.getSeconds();
|
| 100 |
const ampm = hours >= 12 ? 'pm' : 'am';
|
| 101 |
+
const formattedHours = hours % 12 || 12;
|
| 102 |
|
| 103 |
// Format the time
|
| 104 |
const timeString = `${formattedHours}:${minutes.toString().padStart(2, '0')}:${seconds.toString().padStart(2, '0')} ${ampm}`;
|
|
|
|
| 111 |
const userTimeElement = document.getElementById('time-date');
|
| 112 |
userTimeElement.innerHTML = `${timeString}<br><span id="user-date">${dateString}</span>`;
|
| 113 |
}
|
| 114 |
+
|
| 115 |
updateUserTime();
|
| 116 |
setInterval(updateUserTime, 1000);
|
| 117 |
+
|
| 118 |
+
function setMessage(id, type, heading, content, temp, kvps = null) {
|
| 119 |
// Search for the existing message container by id
|
| 120 |
let messageContainer = document.getElementById(`message-${id}`);
|
| 121 |
|
|
|
|
| 198 |
}
|
| 199 |
}
|
| 200 |
|
| 201 |
+
updateProgress(response.log_progress)
|
| 202 |
+
|
| 203 |
//set ui model vars from backend
|
| 204 |
const inputAD = Alpine.$data(inputSection);
|
| 205 |
inputAD.paused = response.paused;
|
|
|
|
| 221 |
}
|
| 222 |
}
|
| 223 |
|
| 224 |
+
function updateProgress(progress) {
|
| 225 |
+
if (!progress) progress = "Waiting for input"
|
| 226 |
+
|
| 227 |
+
if (progressBar.innerHTML != progress) {
|
| 228 |
+
progressBar.innerHTML = progress
|
| 229 |
+
}
|
| 230 |
+
}
|
| 231 |
+
|
| 232 |
function updatePauseButtonState(isPaused) {
|
| 233 |
const pauseButton = document.getElementById('pause-button');
|
| 234 |
const unpauseButton = document.getElementById('unpause-button');
|
| 235 |
+
|
| 236 |
if (isPaused) {
|
| 237 |
pauseButton.style.display = 'none';
|
| 238 |
unpauseButton.style.display = 'flex';
|
|
|
|
| 304 |
toggleCssProperty('.msg-thoughts', 'display', showThoughts ? undefined : 'none');
|
| 305 |
}
|
| 306 |
|
| 307 |
+
window.toggleUtils = async function (showUtils) {
|
| 308 |
+
// add display:none to .msg-json class definition
|
| 309 |
+
toggleCssProperty('.message-util', 'display', showUtils ? undefined : 'none');
|
| 310 |
+
// toggleCssProperty('.message-util .msg-kvps', 'display', showUtils ? undefined : 'none');
|
| 311 |
+
// toggleCssProperty('.message-util .msg-content', 'display', showUtils ? undefined : 'none');
|
| 312 |
+
}
|
| 313 |
|
| 314 |
+
window.toggleDarkMode = function (isDark) {
|
| 315 |
if (isDark) {
|
| 316 |
+
document.body.classList.remove('light-mode');
|
| 317 |
} else {
|
| 318 |
+
document.body.classList.add('light-mode');
|
| 319 |
}
|
| 320 |
console.log("Dark mode:", isDark);
|
| 321 |
localStorage.setItem('darkMode', isDark);
|
| 322 |
};
|
| 323 |
+
|
| 324 |
+
// Modify this part
|
| 325 |
+
document.addEventListener('DOMContentLoaded', () => {
|
| 326 |
+
const isDarkMode = localStorage.getItem('darkMode') !== 'false';
|
| 327 |
toggleDarkMode(isDarkMode);
|
| 328 |
+
});
|
| 329 |
+
|
| 330 |
+
window.toggleDarkMode = function (isDark) {
|
| 331 |
+
if (isDark) {
|
| 332 |
+
document.body.classList.remove('light-mode');
|
| 333 |
+
} else {
|
| 334 |
+
document.body.classList.add('light-mode');
|
| 335 |
+
}
|
| 336 |
+
console.log("Dark mode:", isDark);
|
| 337 |
+
localStorage.setItem('darkMode', isDark);
|
| 338 |
+
};
|
| 339 |
|
| 340 |
function toggleCssProperty(selector, property, value) {
|
| 341 |
// Get the stylesheet that contains the class
|
|
|
|
| 353 |
if (value === undefined) {
|
| 354 |
rule.style.removeProperty(property);
|
| 355 |
} else {
|
| 356 |
+
rule.style.setProperty(property, value);
|
| 357 |
}
|
| 358 |
return;
|
| 359 |
}
|
|
|
|
| 361 |
}
|
| 362 |
}
|
| 363 |
|
| 364 |
+
function toast(text, type = 'info') {
|
| 365 |
+
const toast = document.getElementById('toast');
|
| 366 |
+
|
| 367 |
+
// Update the toast content and type
|
| 368 |
+
toast.querySelector('#toast .toast__message').textContent = text;
|
| 369 |
+
toast.className = `toast toast--${type}`;
|
| 370 |
+
toast.style.display = 'flex';
|
| 371 |
+
|
| 372 |
+
// Add the close button event listener
|
| 373 |
+
const closeButton = toast.querySelector('#toast .toast__close');
|
| 374 |
+
closeButton.onclick = () => {
|
| 375 |
+
toast.style.display = 'none';
|
| 376 |
+
clearTimeout(toast.timeoutId);
|
| 377 |
+
};
|
| 378 |
+
|
| 379 |
+
// Add the copy button event listener
|
| 380 |
+
const copyButton = toast.querySelector('#toast .toast__copy');
|
| 381 |
+
copyButton.onclick = () => {
|
| 382 |
+
navigator.clipboard.writeText(text);
|
| 383 |
+
copyButton.textContent = 'Copied!';
|
| 384 |
+
setTimeout(() => {
|
| 385 |
+
copyButton.textContent = 'Copy';
|
| 386 |
+
}, 2000);
|
| 387 |
+
};
|
| 388 |
+
|
| 389 |
+
// Clear any existing timeout
|
| 390 |
+
clearTimeout(toast.timeoutId);
|
| 391 |
+
|
| 392 |
+
// Automatically close the toast after 5 seconds
|
| 393 |
+
toast.timeoutId = setTimeout(() => {
|
| 394 |
+
toast.style.display = 'none';
|
| 395 |
+
}, 10000);
|
| 396 |
+
}
|
| 397 |
+
|
| 398 |
chatInput.addEventListener('input', adjustTextareaHeight);
|
| 399 |
|
| 400 |
setInterval(poll, 250);
|
webui/messages.js
CHANGED
|
@@ -18,8 +18,8 @@ export function getHandler(type) {
|
|
| 18 |
return drawMessageError;
|
| 19 |
case 'info':
|
| 20 |
return drawMessageInfo;
|
| 21 |
-
case '
|
| 22 |
-
return
|
| 23 |
case 'hint':
|
| 24 |
return drawMessageInfo;
|
| 25 |
default:
|
|
@@ -27,7 +27,7 @@ export function getHandler(type) {
|
|
| 27 |
}
|
| 28 |
}
|
| 29 |
|
| 30 |
-
export function _drawMessage(messageContainer, heading, content, temp, kvps = null, messageClasses = [], contentClasses = []) {
|
| 31 |
|
| 32 |
|
| 33 |
// if (type !== 'user') {
|
|
@@ -48,10 +48,12 @@ export function _drawMessage(messageContainer, heading, content, temp, kvps = nu
|
|
| 48 |
textNode.textContent = content;
|
| 49 |
textNode.style.whiteSpace = 'pre-wrap';
|
| 50 |
textNode.style.wordBreak = 'break-word';
|
| 51 |
-
textNode.classList.add("
|
| 52 |
messageDiv.appendChild(textNode);
|
| 53 |
messageContainer.appendChild(messageDiv);
|
| 54 |
|
|
|
|
|
|
|
| 55 |
// if (type !== 'user') {
|
| 56 |
// const actions = document.createElement('div');
|
| 57 |
// actions.classList.add('message-actions');
|
|
@@ -63,51 +65,53 @@ export function _drawMessage(messageContainer, heading, content, temp, kvps = nu
|
|
| 63 |
}
|
| 64 |
|
| 65 |
export function drawMessageDefault(messageContainer, id, type, heading, content, temp, kvps = null) {
|
| 66 |
-
_drawMessage(messageContainer, heading, content, temp, kvps, ['message-ai', 'message-default'], ['msg-json']);
|
| 67 |
}
|
| 68 |
|
| 69 |
export function drawMessageAgent(messageContainer, id, type, heading, content, temp, kvps = null) {
|
| 70 |
-
let kvpsFlat=null
|
| 71 |
if (kvps) {
|
| 72 |
kvpsFlat = { ...kvps, ...kvps['tool_args'] || {} }
|
| 73 |
delete kvpsFlat['tool_args']
|
| 74 |
}
|
| 75 |
|
| 76 |
-
_drawMessage(messageContainer, heading, content, temp, kvpsFlat, ['message-ai', 'message-agent'], ['msg-json']);
|
| 77 |
}
|
| 78 |
|
| 79 |
export function drawMessageResponse(messageContainer, id, type, heading, content, temp, kvps = null) {
|
| 80 |
-
_drawMessage(messageContainer, heading, content, temp, null, ['message-ai', 'message-agent-response'
|
| 81 |
}
|
| 82 |
|
| 83 |
export function drawMessageDelegation(messageContainer, id, type, heading, content, temp, kvps = null) {
|
| 84 |
-
_drawMessage(messageContainer, heading, content, temp, kvps, ['message-ai', 'message-agent', 'message-
|
| 85 |
}
|
| 86 |
|
| 87 |
export function drawMessageUser(messageContainer, id, type, heading, content, temp, kvps = null) {
|
| 88 |
-
_drawMessage(messageContainer, heading, content, temp, kvps, ['message-user']);
|
| 89 |
}
|
| 90 |
|
| 91 |
export function drawMessageTool(messageContainer, id, type, heading, content, temp, kvps = null) {
|
| 92 |
-
_drawMessage(messageContainer, heading, content, temp, kvps, ['message-ai', 'message-tool'
|
| 93 |
}
|
| 94 |
|
| 95 |
export function drawMessageCodeExe(messageContainer, id, type, heading, content, temp, kvps = null) {
|
| 96 |
-
_drawMessage(messageContainer, heading, content, temp, null, ['message-ai', 'message-code-exe'
|
| 97 |
}
|
| 98 |
|
| 99 |
export function drawMessageAgentPlain(classes, messageContainer, id, type, heading, content, temp, kvps = null) {
|
| 100 |
-
_drawMessage(messageContainer, heading, content, temp,
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
export function drawMessageAdhoc(messageContainer, id, type, heading, content, temp, kvps = null) {
|
| 104 |
-
return drawMessageAgentPlain(['message-adhoc'], messageContainer, id, type, heading, content, temp, kvps);
|
| 105 |
}
|
| 106 |
|
| 107 |
export function drawMessageInfo(messageContainer, id, type, heading, content, temp, kvps = null) {
|
| 108 |
return drawMessageAgentPlain(['message-info'], messageContainer, id, type, heading, content, temp, kvps);
|
| 109 |
}
|
| 110 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 111 |
export function drawMessageWarning(messageContainer, id, type, heading, content, temp, kvps = null) {
|
| 112 |
return drawMessageAgentPlain(['message-warning'], messageContainer, id, type, heading, content, temp, kvps);
|
| 113 |
}
|
|
@@ -119,9 +123,10 @@ export function drawMessageError(messageContainer, id, type, heading, content, t
|
|
| 119 |
function drawKvps(container, kvps) {
|
| 120 |
if (kvps) {
|
| 121 |
const table = document.createElement('table');
|
| 122 |
-
table.classList.add('
|
| 123 |
for (let [key, value] of Object.entries(kvps)) {
|
| 124 |
const row = table.insertRow();
|
|
|
|
| 125 |
if (key == "thoughts") row.classList.add('msg-thoughts');
|
| 126 |
|
| 127 |
const th = row.insertCell();
|
|
|
|
| 18 |
return drawMessageError;
|
| 19 |
case 'info':
|
| 20 |
return drawMessageInfo;
|
| 21 |
+
case 'util':
|
| 22 |
+
return drawMessageUtil;
|
| 23 |
case 'hint':
|
| 24 |
return drawMessageInfo;
|
| 25 |
default:
|
|
|
|
| 27 |
}
|
| 28 |
}
|
| 29 |
|
| 30 |
+
export function _drawMessage(messageContainer, heading, content, temp, followUp, kvps = null, messageClasses = [], contentClasses = []) {
|
| 31 |
|
| 32 |
|
| 33 |
// if (type !== 'user') {
|
|
|
|
| 48 |
textNode.textContent = content;
|
| 49 |
textNode.style.whiteSpace = 'pre-wrap';
|
| 50 |
textNode.style.wordBreak = 'break-word';
|
| 51 |
+
textNode.classList.add("msg-content", ...contentClasses)
|
| 52 |
messageDiv.appendChild(textNode);
|
| 53 |
messageContainer.appendChild(messageDiv);
|
| 54 |
|
| 55 |
+
if (followUp) messageContainer.classList.add("message-followup")
|
| 56 |
+
|
| 57 |
// if (type !== 'user') {
|
| 58 |
// const actions = document.createElement('div');
|
| 59 |
// actions.classList.add('message-actions');
|
|
|
|
| 65 |
}
|
| 66 |
|
| 67 |
export function drawMessageDefault(messageContainer, id, type, heading, content, temp, kvps = null) {
|
| 68 |
+
_drawMessage(messageContainer, heading, content, temp, false, kvps, ['message-ai', 'message-default'], ['msg-json']);
|
| 69 |
}
|
| 70 |
|
| 71 |
export function drawMessageAgent(messageContainer, id, type, heading, content, temp, kvps = null) {
|
| 72 |
+
let kvpsFlat = null
|
| 73 |
if (kvps) {
|
| 74 |
kvpsFlat = { ...kvps, ...kvps['tool_args'] || {} }
|
| 75 |
delete kvpsFlat['tool_args']
|
| 76 |
}
|
| 77 |
|
| 78 |
+
_drawMessage(messageContainer, heading, content, temp, false, kvpsFlat, ['message-ai', 'message-agent'], ['msg-json']);
|
| 79 |
}
|
| 80 |
|
| 81 |
export function drawMessageResponse(messageContainer, id, type, heading, content, temp, kvps = null) {
|
| 82 |
+
_drawMessage(messageContainer, heading, content, temp, true, null, ['message-ai', 'message-agent-response']);
|
| 83 |
}
|
| 84 |
|
| 85 |
export function drawMessageDelegation(messageContainer, id, type, heading, content, temp, kvps = null) {
|
| 86 |
+
_drawMessage(messageContainer, heading, content, temp, true, kvps, ['message-ai', 'message-agent', 'message-agent-delegation']);
|
| 87 |
}
|
| 88 |
|
| 89 |
export function drawMessageUser(messageContainer, id, type, heading, content, temp, kvps = null) {
|
| 90 |
+
_drawMessage(messageContainer, heading, content, temp, false, kvps, ['message-user']);
|
| 91 |
}
|
| 92 |
|
| 93 |
export function drawMessageTool(messageContainer, id, type, heading, content, temp, kvps = null) {
|
| 94 |
+
_drawMessage(messageContainer, heading, content, temp, true, kvps, ['message-ai', 'message-tool'], ['msg-output']);
|
| 95 |
}
|
| 96 |
|
| 97 |
export function drawMessageCodeExe(messageContainer, id, type, heading, content, temp, kvps = null) {
|
| 98 |
+
_drawMessage(messageContainer, heading, content, temp, true, null, ['message-ai', 'message-code-exe']);
|
| 99 |
}
|
| 100 |
|
| 101 |
export function drawMessageAgentPlain(classes, messageContainer, id, type, heading, content, temp, kvps = null) {
|
| 102 |
+
_drawMessage(messageContainer, heading, content, temp, false, null, [...classes]);
|
| 103 |
+
messageContainer.classList.add('center-container')
|
|
|
|
|
|
|
|
|
|
| 104 |
}
|
| 105 |
|
| 106 |
export function drawMessageInfo(messageContainer, id, type, heading, content, temp, kvps = null) {
|
| 107 |
return drawMessageAgentPlain(['message-info'], messageContainer, id, type, heading, content, temp, kvps);
|
| 108 |
}
|
| 109 |
|
| 110 |
+
export function drawMessageUtil(messageContainer, id, type, heading, content, temp, kvps = null) {
|
| 111 |
+
_drawMessage(messageContainer, heading, content, temp, false, kvps, ['message-util'], ['msg-json']);
|
| 112 |
+
messageContainer.classList.add('center-container')
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
export function drawMessageWarning(messageContainer, id, type, heading, content, temp, kvps = null) {
|
| 116 |
return drawMessageAgentPlain(['message-warning'], messageContainer, id, type, heading, content, temp, kvps);
|
| 117 |
}
|
|
|
|
| 123 |
function drawKvps(container, kvps) {
|
| 124 |
if (kvps) {
|
| 125 |
const table = document.createElement('table');
|
| 126 |
+
table.classList.add('msg-kvps');
|
| 127 |
for (let [key, value] of Object.entries(kvps)) {
|
| 128 |
const row = table.insertRow();
|
| 129 |
+
row.classList.add('kvps-row');
|
| 130 |
if (key == "thoughts") row.classList.add('msg-thoughts');
|
| 131 |
|
| 132 |
const th = row.insertCell();
|
webui/toast.css
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#toast {
|
| 2 |
+
/* position: fixed;
|
| 3 |
+
bottom: 20px;
|
| 4 |
+
left: 50%;
|
| 5 |
+
transform: translateX(-50%); */
|
| 6 |
+
margin: 0.5em;
|
| 7 |
+
background-color: #333;
|
| 8 |
+
color: #fff;
|
| 9 |
+
padding: 0.3em;
|
| 10 |
+
border-radius: 0.3125em;
|
| 11 |
+
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.15);
|
| 12 |
+
display: none;
|
| 13 |
+
align-items: center;
|
| 14 |
+
z-index: 9999;
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
#toast.toast--success {
|
| 18 |
+
background-color: #4CAF50;
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
#toast.toast--error {
|
| 22 |
+
background-color: #731811;
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
#toast.toast--info {
|
| 26 |
+
background-color: #2196F3;
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
.toast__message {
|
| 30 |
+
margin-right: 16px;
|
| 31 |
+
flex-grow: 1;
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
.toast__close,
|
| 35 |
+
.toast__copy {
|
| 36 |
+
background-color: transparent;
|
| 37 |
+
border: none;
|
| 38 |
+
color: #fff;
|
| 39 |
+
cursor: pointer;
|
| 40 |
+
font-size: 16px;
|
| 41 |
+
margin-left: 8px;
|
| 42 |
+
}
|