Commit
·
3b91e5c
1
Parent(s):
6c0b5ec
fix: remove deprecated files
Browse files- conversation_memory.py +0 -66
- utils.py +0 -104
conversation_memory.py
DELETED
|
@@ -1,66 +0,0 @@
|
|
| 1 |
-
import json
|
| 2 |
-
import os
|
| 3 |
-
class ConversationMemory:
|
| 4 |
-
def __init__(self):
|
| 5 |
-
self.tools_used = []
|
| 6 |
-
self.all_queries = []
|
| 7 |
-
self.query_results = []
|
| 8 |
-
self.request = []
|
| 9 |
-
|
| 10 |
-
def update_from_parsed(self, parsed_steps, request):
|
| 11 |
-
tools = []
|
| 12 |
-
self.request.append(request)
|
| 13 |
-
for step in parsed_steps:
|
| 14 |
-
if step['type'] == 'ai_function_call':
|
| 15 |
-
tools.append(step['tool'])
|
| 16 |
-
if 'query' in step['args']:
|
| 17 |
-
args_dict = json.loads(step['args'])
|
| 18 |
-
query = args_dict['query']
|
| 19 |
-
self.all_queries.append(query)
|
| 20 |
-
elif step['type'] == 'ai_final_answer':
|
| 21 |
-
if step['ai_said']:
|
| 22 |
-
if 'query' in step['ai_said'][0]:
|
| 23 |
-
self.all_queries.append(step['ai_said'][1])
|
| 24 |
-
elif step['type'] == 'tool_response':
|
| 25 |
-
if step['tool'] == 'execute_query':
|
| 26 |
-
self.query_results.append(step['response'])
|
| 27 |
-
|
| 28 |
-
self.tools_used.extend(tools)
|
| 29 |
-
|
| 30 |
-
def get_last_n_queries(self):
|
| 31 |
-
return list(set(self.all_queries))
|
| 32 |
-
|
| 33 |
-
def get_last_n_results(self):
|
| 34 |
-
return list(set(self.query_results))
|
| 35 |
-
|
| 36 |
-
def get_all_tools_used(self):
|
| 37 |
-
return list(set(self.tools_used))
|
| 38 |
-
|
| 39 |
-
def get_all_user_messages(self):
|
| 40 |
-
return list(set(self.request))
|
| 41 |
-
|
| 42 |
-
def reset(self, path = "memory.json"):
|
| 43 |
-
os.remove(path)
|
| 44 |
-
self.__init__() # Re-initialize the object
|
| 45 |
-
|
| 46 |
-
def summary(self):
|
| 47 |
-
return {
|
| 48 |
-
"total_requests": len(self.request),
|
| 49 |
-
"tools_used": self.get_all_tools_used(),
|
| 50 |
-
"last_request": self.request[-1] if self.request else None,
|
| 51 |
-
"last_query": self.all_queries[-1] if self.all_queries else None,
|
| 52 |
-
"last_result": self.query_results[-1] if self.query_results else None,
|
| 53 |
-
}
|
| 54 |
-
|
| 55 |
-
def save_memory(self, path="memory.json"):
|
| 56 |
-
with open(path, "w") as f:
|
| 57 |
-
json.dump(self.__dict__, f)
|
| 58 |
-
|
| 59 |
-
def load_memory(self, path="memory.json"):
|
| 60 |
-
try:
|
| 61 |
-
with open(path, "r") as f:
|
| 62 |
-
data = json.load(f)
|
| 63 |
-
self.__dict__.update(data)
|
| 64 |
-
return self
|
| 65 |
-
except FileNotFoundError:
|
| 66 |
-
return ConversationMemory()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
utils.py
DELETED
|
@@ -1,104 +0,0 @@
|
|
| 1 |
-
import re
|
| 2 |
-
import os
|
| 3 |
-
from conversation_memory import ConversationMemory
|
| 4 |
-
import logging
|
| 5 |
-
|
| 6 |
-
logger = logging.getLogger(__name__)
|
| 7 |
-
def parse_mcp_output(output_dict):
|
| 8 |
-
result = []
|
| 9 |
-
messages = output_dict.get("messages", [])
|
| 10 |
-
query_store = []
|
| 11 |
-
last_tool_answer = ""
|
| 12 |
-
last_answer = ""
|
| 13 |
-
for msg in messages:
|
| 14 |
-
role_name = msg.__class__.__name__ # Example: HumanMessage, AIMessage, ToolMessage
|
| 15 |
-
content = getattr(msg, "content", "")
|
| 16 |
-
|
| 17 |
-
# AIMessage with tool call
|
| 18 |
-
if role_name == "AIMessage":
|
| 19 |
-
function_call = getattr(msg, "additional_kwargs", {}).get("function_call")
|
| 20 |
-
if function_call:
|
| 21 |
-
tool_name = function_call.get("name")
|
| 22 |
-
arguments = function_call.get("arguments")
|
| 23 |
-
|
| 24 |
-
# Check if arguments is a JSON string or a dict
|
| 25 |
-
if isinstance(arguments, str):
|
| 26 |
-
import json
|
| 27 |
-
try:
|
| 28 |
-
arguments_dict = json.loads(arguments)
|
| 29 |
-
except json.JSONDecodeError:
|
| 30 |
-
arguments_dict = {}
|
| 31 |
-
else:
|
| 32 |
-
arguments_dict = arguments or {}
|
| 33 |
-
|
| 34 |
-
# Check for presence of "query" key
|
| 35 |
-
if "query" in arguments_dict:
|
| 36 |
-
#print("query detected!!!")
|
| 37 |
-
|
| 38 |
-
if content:
|
| 39 |
-
print(f"=============== AI Reasoning Step ===============")
|
| 40 |
-
print(content[0])
|
| 41 |
-
print()
|
| 42 |
-
print("=============== AI used the following tools ===============")
|
| 43 |
-
print(tool_name)
|
| 44 |
-
print()
|
| 45 |
-
print("=============== AI generated the following query ===============")
|
| 46 |
-
print(arguments_dict['query'])
|
| 47 |
-
|
| 48 |
-
logger.info(f"ai said:{content[0]}")
|
| 49 |
-
logger.info(f"ai used:{tool_name}")
|
| 50 |
-
logger.info(f"generated query:{arguments_dict['query']}")
|
| 51 |
-
#print(arguments_dict["query"])
|
| 52 |
-
query_store.append(arguments_dict["query"])
|
| 53 |
-
|
| 54 |
-
result.append({
|
| 55 |
-
"type": "ai_function_call",
|
| 56 |
-
"ai_said": content,
|
| 57 |
-
"tool": tool_name,
|
| 58 |
-
"args": arguments
|
| 59 |
-
})
|
| 60 |
-
else:
|
| 61 |
-
#print(f"ai said:{content}")
|
| 62 |
-
logger.info(f"ai said:{content}")
|
| 63 |
-
logger.info(f"ai used:{tool_name}")
|
| 64 |
-
print(f"=============== AI Reasoning Step ===============")
|
| 65 |
-
print(content)
|
| 66 |
-
print()
|
| 67 |
-
print("=============== AI used the following tools ===============")
|
| 68 |
-
print(tool_name)
|
| 69 |
-
print()
|
| 70 |
-
|
| 71 |
-
result.append({
|
| 72 |
-
"type": "ai_function_call",
|
| 73 |
-
"ai_said": content,
|
| 74 |
-
"tool": tool_name,
|
| 75 |
-
"args": arguments
|
| 76 |
-
})
|
| 77 |
-
|
| 78 |
-
else:
|
| 79 |
-
final_answer = content
|
| 80 |
-
#print(f"ai final answer:{content}")
|
| 81 |
-
logger.info(f"ai final answer:{content}")
|
| 82 |
-
print("=============== AI's final answer ===============")
|
| 83 |
-
print(content)
|
| 84 |
-
result.append({
|
| 85 |
-
"type": "ai_final_answer",
|
| 86 |
-
"ai_said": content
|
| 87 |
-
})
|
| 88 |
-
|
| 89 |
-
# ToolMessage
|
| 90 |
-
elif role_name == "ToolMessage":
|
| 91 |
-
tool_name = getattr(msg, "name", None)
|
| 92 |
-
print("=============== The tool returned the following response ===============")
|
| 93 |
-
print(content)
|
| 94 |
-
last_tool_answer = content
|
| 95 |
-
logger.info(f"tool response:{content}")
|
| 96 |
-
result.append({
|
| 97 |
-
"type": "tool_response",
|
| 98 |
-
"tool": tool_name,
|
| 99 |
-
"response": content
|
| 100 |
-
})
|
| 101 |
-
|
| 102 |
-
return result, final_answer, last_tool_answer, query_store
|
| 103 |
-
|
| 104 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|