moneychatbot / src /processor /tools /interaction.py
hadadrjt's picture
SearchGPT: Release pre-stable scripts.
02ce7c3
raw
history blame
10.6 kB
#
# SPDX-FileCopyrightText: Hadad <hadad@linuxmail.org>
# SPDX-License-Identifier: Apache-2.0
#
import time
from assets.css.reasoning import styles
from ..response.formatter import assistant_response
from ..reasoning.interface import reasoning_interfaces
from ..reasoning.tool_reasoning import tool_reasoning
from .parser import extract_tool_parameters
from .executor import invoke_tool_function
from config import (
MAX_TOKENS,
REASONING_DELAY,
REASONING_INSERT,
TOOLS_TEMPERATURE,
MAXIMUM_ITERATIONS,
MAX_RETRY_LIMIT,
ITERATION_METRICS
)
def process_tool_interactions(server, model_name, conversation_messages, tool_definitions, search_engine):
retry_count = 0
logs_generator = ""
tool_results = []
execution_success = False
last_error = None
error_history = []
while MAXIMUM_ITERATIONS <= MAX_RETRY_LIMIT and not execution_success:
ITERATION_METRICS["attempts"] += 1
current_iteration_successful = False
iteration_errors = []
for iteration_index in range(MAXIMUM_ITERATIONS):
try:
retry_delay = ITERATION_METRICS["retry_delays"][min(retry_count, len(ITERATION_METRICS["retry_delays"]) - 1)]
if retry_count > 0:
time.sleep(retry_delay * ITERATION_METRICS["backoff_multiplier"])
model_response = server.chat.completions.create(
model=model_name,
messages=conversation_messages,
tools=tool_definitions,
tool_choice="auto",
max_tokens=MAX_TOKENS,
temperature=TOOLS_TEMPERATURE
)
response_choice = model_response.choices[0]
assistant_message = response_choice.message
formatted_assistant_message = assistant_response(assistant_message)
conversation_messages.append(
{
"role": formatted_assistant_message["role"],
"content": formatted_assistant_message["content"],
"tool_calls": formatted_assistant_message["tool_calls"]
}
)
pending_tool_calls = assistant_message.tool_calls or []
if not pending_tool_calls:
if logs_generator:
logs_generator = styles(logs_generator.replace('<br>', '\n').strip(), expanded=False)
execution_success = True
current_iteration_successful = True
break
tool_execution_errors = []
for tool_invocation in pending_tool_calls:
tool_name = tool_invocation.function.name
tool_arguments_raw = tool_invocation.function.arguments
extracted_arguments, extraction_error = extract_tool_parameters(tool_arguments_raw)
if extraction_error:
error_key = f"{tool_name}_extraction"
ITERATION_METRICS["error_patterns"][error_key] = ITERATION_METRICS["error_patterns"].get(error_key, 0) + 1
tool_execution_errors.append({
"tool": tool_name,
"error": extraction_error,
"type": "extraction"
})
reasoning_error = tool_reasoning(tool_name, None, "error", error=extraction_error)
for i in range(0, len(reasoning_error), REASONING_INSERT):
logs_generator = styles(reasoning_interfaces(reasoning_error, i), expanded=True)
yield logs_generator
time.sleep(REASONING_DELAY)
logs_generator = styles(reasoning_error, expanded=True)
yield logs_generator
tool_execution_result = extraction_error
else:
reasoning_status = tool_reasoning(tool_name, extracted_arguments, "parsing")
for i in range(0, len(reasoning_status), REASONING_INSERT):
logs_generator = styles(reasoning_interfaces(reasoning_status, i), expanded=True)
yield logs_generator
time.sleep(REASONING_DELAY)
reasoning_start = tool_reasoning(tool_name, extracted_arguments, "executing")
for i in range(0, len(reasoning_start), REASONING_INSERT):
logs_generator = styles(reasoning_interfaces(reasoning_start, i), expanded=True)
yield logs_generator
time.sleep(REASONING_DELAY)
try:
tool_execution_result = invoke_tool_function(
search_engine,
tool_name,
extracted_arguments
)
tool_results.append({
"tool": tool_name,
"arguments": extracted_arguments,
"result": tool_execution_result,
"iteration": MAXIMUM_ITERATIONS,
"retry_count": retry_count
})
reasoning_done = tool_reasoning(tool_name, extracted_arguments, "completed", result=tool_execution_result)
for i in range(0, len(reasoning_done), REASONING_INSERT):
logs_generator = styles(reasoning_interfaces(reasoning_done, i), expanded=True)
yield logs_generator
time.sleep(REASONING_DELAY)
logs_generator = styles(reasoning_done, expanded=False)
yield logs_generator
except Exception as tool_error:
error_key = f"{tool_name}_execution"
ITERATION_METRICS["error_patterns"][error_key] = ITERATION_METRICS["error_patterns"].get(error_key, 0) + 1
tool_execution_errors.append({
"tool": tool_name,
"error": str(tool_error),
"type": "execution",
"arguments": extracted_arguments
})
reasoning_error = tool_reasoning(tool_name, extracted_arguments, "error", error=str(tool_error))
for i in range(0, len(reasoning_error), REASONING_INSERT):
logs_generator = styles(reasoning_interfaces(reasoning_error, i), expanded=True)
yield logs_generator
time.sleep(REASONING_DELAY)
logs_generator = styles(reasoning_error, expanded=True)
yield logs_generator
tool_execution_result = str(tool_error)
conversation_messages.append(
{
"role": "tool",
"tool_call_id": tool_invocation.id,
"name": tool_name,
"content": tool_execution_result
}
)
if not tool_execution_errors:
execution_success = True
current_iteration_successful = True
break
else:
iteration_errors.extend(tool_execution_errors)
except Exception as model_error:
last_error = str(model_error)
error_history.append({
"iteration": MAXIMUM_ITERATIONS,
"error": last_error,
"timestamp": time.time()
})
ITERATION_METRICS["failures"] += 1
iteration_errors.append({
"error": last_error,
"type": "model"
})
if current_iteration_successful:
execution_success = True
break
else:
if iteration_errors:
error_history.extend(iteration_errors)
retry_count += 1
previous_iterations = MAXIMUM_ITERATIONS
if ITERATION_METRICS["error_patterns"]:
frequent_errors = max(ITERATION_METRICS["error_patterns"].values())
if frequent_errors > 3:
new_iterations = min(MAXIMUM_ITERATIONS + 2, MAX_RETRY_LIMIT)
else:
new_iterations = min(MAXIMUM_ITERATIONS + 1, MAX_RETRY_LIMIT)
else:
new_iterations = min(MAXIMUM_ITERATIONS + 1, MAX_RETRY_LIMIT)
if new_iterations > previous_iterations:
retry_reasoning = f"Retrying with increased iterations: {new_iterations} (attempt {retry_count + 1})"
for i in range(0, len(retry_reasoning), REASONING_INSERT):
logs_generator = styles(reasoning_interfaces(retry_reasoning, i), expanded=True)
yield logs_generator
time.sleep(REASONING_DELAY)
if new_iterations >= MAX_RETRY_LIMIT:
final_error = f"Maximum retry limit reached after {ITERATION_METRICS['attempts']} attempts with {ITERATION_METRICS['failures']} failures"
logs_generator = styles(final_error, expanded=True)
yield logs_generator
break
ITERATION_METRICS["success_rate"] = (len(tool_results) / max(ITERATION_METRICS["attempts"], 1)) * 100
if logs_generator:
logs_generator = styles(logs_generator.replace('<br>', '\n').strip(), expanded=False)
generator_results = len(tool_results) > 0
return conversation_messages, logs_generator, generator_results