Update src/txagent/txagent.py
Browse files- src/txagent/txagent.py +505 -134
src/txagent/txagent.py
CHANGED
|
@@ -14,33 +14,11 @@ from .toolrag import ToolRAGModel
|
|
| 14 |
import torch
|
| 15 |
import logging
|
| 16 |
|
| 17 |
-
# Configure logging
|
| 18 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
| 19 |
logger = logging.getLogger("TxAgent")
|
| 20 |
|
| 21 |
from .utils import NoRepeatSentenceProcessor, ReasoningTraceChecker, tool_result_format
|
| 22 |
-
import re
|
| 23 |
-
|
| 24 |
-
def extract_function_call_json(text: str, return_message: bool = False):
|
| 25 |
-
"""
|
| 26 |
-
Simple extraction of function call JSON from model output text.
|
| 27 |
-
"""
|
| 28 |
-
match = re.search(r"\[TOOL_CALLS\](.*?)$", text, re.DOTALL)
|
| 29 |
-
if not match:
|
| 30 |
-
return [], text
|
| 31 |
-
|
| 32 |
-
try:
|
| 33 |
-
json_block = match.group(1).strip()
|
| 34 |
-
func_calls = json.loads(json_block)
|
| 35 |
-
if isinstance(func_calls, dict):
|
| 36 |
-
func_calls = [func_calls]
|
| 37 |
-
message = text.split("[TOOL_CALLS]")[0].strip()
|
| 38 |
-
if return_message:
|
| 39 |
-
return func_calls, message
|
| 40 |
-
return func_calls
|
| 41 |
-
except Exception as e:
|
| 42 |
-
logger.error(f"Error parsing function call JSON: {e}")
|
| 43 |
-
return [], text
|
| 44 |
|
| 45 |
class TxAgent:
|
| 46 |
def __init__(self, model_name,
|
|
@@ -96,20 +74,19 @@ class TxAgent:
|
|
| 96 |
return f"The model {model_name} is already loaded."
|
| 97 |
self.model_name = model_name
|
| 98 |
|
| 99 |
-
# OPTIMIZED: maximize batching for big GPU
|
| 100 |
self.model = LLM(
|
| 101 |
model=self.model_name,
|
| 102 |
dtype="float16",
|
| 103 |
max_model_len=131072,
|
| 104 |
-
max_num_batched_tokens=
|
| 105 |
-
gpu_memory_utilization=0.
|
| 106 |
trust_remote_code=True
|
| 107 |
)
|
| 108 |
self.chat_template = Template(self.model.get_tokenizer().chat_template)
|
| 109 |
self.tokenizer = self.model.get_tokenizer()
|
| 110 |
logger.info(
|
| 111 |
"Model %s loaded with max_model_len=%d, max_num_batched_tokens=%d, gpu_memory_utilization=%.2f",
|
| 112 |
-
self.model_name, 131072,
|
| 113 |
)
|
| 114 |
return f"Model {model_name} loaded successfully."
|
| 115 |
|
|
@@ -129,6 +106,7 @@ class TxAgent:
|
|
| 129 |
self.rag_model.load_tool_desc_embedding(self.tooluniverse)
|
| 130 |
self.rag_model.save_embeddings(cache_path)
|
| 131 |
logger.debug("Tool description embeddings loaded")
|
|
|
|
| 132 |
def rag_infer(self, query, top_k=5):
|
| 133 |
return self.rag_model.rag_infer(query, top_k)
|
| 134 |
|
|
@@ -158,6 +136,29 @@ class TxAgent:
|
|
| 158 |
logger.debug("Conversation initialized with %d messages", len(conversation))
|
| 159 |
return conversation
|
| 160 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 161 |
def add_special_tools(self, tools, call_agent=False):
|
| 162 |
if self.enable_finish:
|
| 163 |
tools.append(self.tooluniverse.get_one_tool_by_one_name('Finish', return_prompt=True))
|
|
@@ -167,6 +168,11 @@ class TxAgent:
|
|
| 167 |
logger.debug("CallAgent tool added")
|
| 168 |
return tools
|
| 169 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 170 |
def set_system_prompt(self, conversation, sys_prompt):
|
| 171 |
if not conversation:
|
| 172 |
conversation.append({"role": "system", "content": sys_prompt})
|
|
@@ -174,12 +180,246 @@ class TxAgent:
|
|
| 174 |
conversation[0] = {"role": "system", "content": sys_prompt}
|
| 175 |
return conversation
|
| 176 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 177 |
def build_logits_processor(self, messages, llm):
|
| 178 |
logger.warning("Logits processor disabled due to vLLM V1 limitation")
|
| 179 |
return None
|
| 180 |
|
| 181 |
-
def llm_infer(self, messages, temperature=0.
|
| 182 |
-
output_begin_string=None, max_new_tokens=
|
| 183 |
max_token=131072, skip_special_tokens=True,
|
| 184 |
model=None, tokenizer=None, terminators=None,
|
| 185 |
seed=None, check_token_status=False):
|
|
@@ -188,7 +428,7 @@ class TxAgent:
|
|
| 188 |
|
| 189 |
logits_processor = self.build_logits_processor(messages, model)
|
| 190 |
sampling_params = SamplingParams(
|
| 191 |
-
temperature=temperature,
|
| 192 |
max_tokens=max_new_tokens,
|
| 193 |
seed=seed if seed is not None else self.seed,
|
| 194 |
)
|
|
@@ -211,9 +451,13 @@ class TxAgent:
|
|
| 211 |
output = model.generate(prompt, sampling_params=sampling_params)
|
| 212 |
output_text = output[0].outputs[0].text
|
| 213 |
output_tokens = len(self.tokenizer.encode(output_text, add_special_tokens=False))
|
| 214 |
-
logger.debug("Inference output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 215 |
|
| 216 |
-
return output_text if not check_token_status else (output_text, token_overflow)
|
| 217 |
def run_self_agent(self, message: str,
|
| 218 |
temperature: float,
|
| 219 |
max_new_tokens: int,
|
|
@@ -248,10 +492,21 @@ class TxAgent:
|
|
| 248 |
max_new_tokens: int,
|
| 249 |
max_token: int):
|
| 250 |
logger.info("Starting format agent")
|
| 251 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 252 |
conversation = self.set_system_prompt(
|
| 253 |
[], "Transform the agent's answer to a single letter: 'A', 'B', 'C', 'D'.")
|
| 254 |
-
conversation.append({"role": "user", "content": message +
|
|
|
|
| 255 |
return self.llm_infer(
|
| 256 |
messages=conversation,
|
| 257 |
temperature=temperature,
|
|
@@ -259,17 +514,108 @@ class TxAgent:
|
|
| 259 |
max_new_tokens=max_new_tokens,
|
| 260 |
max_token=max_token)
|
| 261 |
|
| 262 |
-
def
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 268 |
|
| 269 |
def run_gradio_chat(self, message: str,
|
| 270 |
history: list,
|
| 271 |
temperature: float,
|
| 272 |
-
max_new_tokens: int =
|
| 273 |
max_token: int = 131072,
|
| 274 |
call_agent: bool = False,
|
| 275 |
conversation: gr.State = None,
|
|
@@ -278,38 +624,71 @@ class TxAgent:
|
|
| 278 |
call_agent_level: int = 0,
|
| 279 |
sub_agent_task: str = None,
|
| 280 |
uploaded_files: list = None):
|
| 281 |
-
logger.info("Chat started, message: %s", message[:
|
| 282 |
if not message or len(message.strip()) < 5:
|
| 283 |
yield "Please provide a valid message or upload files to analyze."
|
| 284 |
return
|
| 285 |
|
| 286 |
-
picked_tools_prompt, call_agent_level = self.initialize_tools_prompt(
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
|
|
|
|
| 290 |
last_outputs = []
|
| 291 |
-
|
| 292 |
next_round = True
|
| 293 |
current_round = 0
|
|
|
|
|
|
|
|
|
|
| 294 |
|
| 295 |
try:
|
| 296 |
while next_round and current_round < max_round:
|
| 297 |
current_round += 1
|
| 298 |
-
logger.
|
| 299 |
-
|
| 300 |
if last_outputs:
|
| 301 |
-
function_call_messages, picked_tools_prompt, special_tool_call = self.
|
| 302 |
-
last_outputs,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 303 |
|
| 304 |
-
conversation.extend(function_call_messages)
|
| 305 |
-
outputs.append(tool_result_format(function_call_messages))
|
| 306 |
if special_tool_call == 'Finish':
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 307 |
next_round = False
|
| 308 |
-
|
| 309 |
-
|
| 310 |
-
return
|
| 311 |
|
| 312 |
-
|
| 313 |
last_outputs_str, token_overflow = self.llm_infer(
|
| 314 |
messages=conversation,
|
| 315 |
temperature=temperature,
|
|
@@ -321,103 +700,95 @@ class TxAgent:
|
|
| 321 |
check_token_status=True)
|
| 322 |
|
| 323 |
if last_outputs_str is None:
|
|
|
|
| 324 |
if self.force_finish:
|
| 325 |
last_outputs_str = self.get_answer_based_on_unfinished_reasoning(
|
| 326 |
conversation, temperature, max_new_tokens, max_token)
|
| 327 |
-
|
|
|
|
| 328 |
return last_outputs_str
|
| 329 |
-
|
| 330 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 331 |
|
| 332 |
last_outputs.append(last_outputs_str)
|
| 333 |
|
| 334 |
-
|
| 335 |
-
|
| 336 |
-
|
| 337 |
-
|
| 338 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 339 |
|
| 340 |
except Exception as e:
|
| 341 |
-
logger.error("Exception in run_gradio_chat: %s", e)
|
| 342 |
-
|
| 343 |
-
|
| 344 |
-
|
| 345 |
-
|
| 346 |
-
|
| 347 |
-
|
| 348 |
-
|
| 349 |
-
|
| 350 |
-
|
| 351 |
-
|
| 352 |
-
|
| 353 |
-
|
| 354 |
-
|
| 355 |
-
|
| 356 |
-
|
| 357 |
-
# Can handle other special tools here
|
| 358 |
-
|
| 359 |
-
return function_call_messages, picked_tools_prompt, special_tool_call
|
| 360 |
-
|
| 361 |
-
def run_multistep_agent(self, message: str,
|
| 362 |
-
temperature: float,
|
| 363 |
-
max_new_tokens: int,
|
| 364 |
-
max_token: int,
|
| 365 |
-
max_round: int = 5,
|
| 366 |
-
call_agent=False,
|
| 367 |
-
call_agent_level=0):
|
| 368 |
-
logger.info("Starting multistep agent for message: %s", message[:100])
|
| 369 |
-
picked_tools_prompt, call_agent_level = self.initialize_tools_prompt(call_agent, call_agent_level, message)
|
| 370 |
-
conversation = self.initialize_conversation(message)
|
| 371 |
-
|
| 372 |
-
outputs = []
|
| 373 |
-
last_outputs = []
|
| 374 |
-
next_round = True
|
| 375 |
-
current_round = 0
|
| 376 |
-
|
| 377 |
-
while next_round and current_round < max_round:
|
| 378 |
-
current_round += 1
|
| 379 |
-
last_outputs_str, token_overflow = self.llm_infer(
|
| 380 |
-
messages=conversation,
|
| 381 |
-
temperature=temperature,
|
| 382 |
-
tools=picked_tools_prompt,
|
| 383 |
-
skip_special_tokens=False,
|
| 384 |
-
max_new_tokens=max_new_tokens,
|
| 385 |
-
max_token=max_token,
|
| 386 |
-
seed=self.seed,
|
| 387 |
-
check_token_status=True)
|
| 388 |
-
|
| 389 |
-
if last_outputs_str is None:
|
| 390 |
-
logger.warning("Token limit exceeded inside multistep agent")
|
| 391 |
-
return "⚠️ Token overflow."
|
| 392 |
-
|
| 393 |
-
outputs.append(last_outputs_str)
|
| 394 |
-
|
| 395 |
-
if "[FinalAnswer]" in last_outputs_str:
|
| 396 |
-
logger.info("Multistep Final Answer Provided")
|
| 397 |
-
return last_outputs_str.split("[FinalAnswer]")[-1]
|
| 398 |
-
|
| 399 |
-
last_outputs = [last_outputs_str]
|
| 400 |
-
|
| 401 |
-
return "⚠️ Max rounds exceeded."
|
| 402 |
|
| 403 |
def run_gradio_chat_batch(self, messages: List[str],
|
| 404 |
temperature: float,
|
| 405 |
-
max_new_tokens: int =
|
| 406 |
max_token: int = 131072,
|
| 407 |
call_agent: bool = False,
|
| 408 |
conversation: List = None,
|
| 409 |
max_round: int = 5,
|
| 410 |
seed: int = None,
|
| 411 |
call_agent_level: int = 0):
|
| 412 |
-
"""
|
| 413 |
logger.info("Starting batch chat for %d messages", len(messages))
|
| 414 |
batch_results = []
|
| 415 |
-
|
| 416 |
for message in messages:
|
|
|
|
| 417 |
conv = self.initialize_conversation(message, conversation, history=None)
|
| 418 |
picked_tools_prompt, call_agent_level = self.initialize_tools_prompt(
|
| 419 |
call_agent, call_agent_level, message)
|
| 420 |
-
|
|
|
|
| 421 |
output, token_overflow = self.llm_infer(
|
| 422 |
messages=conv,
|
| 423 |
temperature=temperature,
|
|
@@ -428,12 +799,12 @@ class TxAgent:
|
|
| 428 |
seed=seed,
|
| 429 |
check_token_status=True
|
| 430 |
)
|
| 431 |
-
|
| 432 |
if output is None:
|
| 433 |
logger.warning("Token limit exceeded for message: %s", message[:100])
|
| 434 |
-
batch_results.append("
|
| 435 |
else:
|
| 436 |
batch_results.append(output)
|
| 437 |
-
|
| 438 |
logger.info("Batch chat completed for %d messages", len(messages))
|
| 439 |
-
return batch_results
|
|
|
|
| 14 |
import torch
|
| 15 |
import logging
|
| 16 |
|
| 17 |
+
# Configure logging with a more specific logger name
|
| 18 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
| 19 |
logger = logging.getLogger("TxAgent")
|
| 20 |
|
| 21 |
from .utils import NoRepeatSentenceProcessor, ReasoningTraceChecker, tool_result_format
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
class TxAgent:
|
| 24 |
def __init__(self, model_name,
|
|
|
|
| 74 |
return f"The model {model_name} is already loaded."
|
| 75 |
self.model_name = model_name
|
| 76 |
|
|
|
|
| 77 |
self.model = LLM(
|
| 78 |
model=self.model_name,
|
| 79 |
dtype="float16",
|
| 80 |
max_model_len=131072,
|
| 81 |
+
max_num_batched_tokens=32768, # Increased for A100 80GB
|
| 82 |
+
gpu_memory_utilization=0.9, # Higher utilization for better performance
|
| 83 |
trust_remote_code=True
|
| 84 |
)
|
| 85 |
self.chat_template = Template(self.model.get_tokenizer().chat_template)
|
| 86 |
self.tokenizer = self.model.get_tokenizer()
|
| 87 |
logger.info(
|
| 88 |
"Model %s loaded with max_model_len=%d, max_num_batched_tokens=%d, gpu_memory_utilization=%.2f",
|
| 89 |
+
self.model_name, 131072, 32768, 0.9
|
| 90 |
)
|
| 91 |
return f"Model {model_name} loaded successfully."
|
| 92 |
|
|
|
|
| 106 |
self.rag_model.load_tool_desc_embedding(self.tooluniverse)
|
| 107 |
self.rag_model.save_embeddings(cache_path)
|
| 108 |
logger.debug("Tool description embeddings loaded")
|
| 109 |
+
|
| 110 |
def rag_infer(self, query, top_k=5):
|
| 111 |
return self.rag_model.rag_infer(query, top_k)
|
| 112 |
|
|
|
|
| 136 |
logger.debug("Conversation initialized with %d messages", len(conversation))
|
| 137 |
return conversation
|
| 138 |
|
| 139 |
+
def tool_RAG(self, message=None,
|
| 140 |
+
picked_tool_names=None,
|
| 141 |
+
existing_tools_prompt=[],
|
| 142 |
+
rag_num=0,
|
| 143 |
+
return_call_result=False):
|
| 144 |
+
if not self.enable_rag:
|
| 145 |
+
return []
|
| 146 |
+
extra_factor = 10
|
| 147 |
+
if picked_tool_names is None:
|
| 148 |
+
assert picked_tool_names is not None or message is not None
|
| 149 |
+
picked_tool_names = self.rag_infer(
|
| 150 |
+
message, top_k=rag_num * extra_factor)
|
| 151 |
+
|
| 152 |
+
picked_tool_names_no_special = [tool for tool in picked_tool_names if tool not in self.special_tools_name]
|
| 153 |
+
picked_tool_names = picked_tool_names_no_special[:rag_num]
|
| 154 |
+
|
| 155 |
+
picked_tools = self.tooluniverse.get_tool_by_name(picked_tool_names)
|
| 156 |
+
picked_tools_prompt = self.tooluniverse.prepare_tool_prompts(picked_tools)
|
| 157 |
+
logger.debug("Retrieved %d tools via RAG", len(picked_tools_prompt))
|
| 158 |
+
if return_call_result:
|
| 159 |
+
return picked_tools_prompt, picked_tool_names
|
| 160 |
+
return picked_tools_prompt
|
| 161 |
+
|
| 162 |
def add_special_tools(self, tools, call_agent=False):
|
| 163 |
if self.enable_finish:
|
| 164 |
tools.append(self.tooluniverse.get_one_tool_by_one_name('Finish', return_prompt=True))
|
|
|
|
| 168 |
logger.debug("CallAgent tool added")
|
| 169 |
return tools
|
| 170 |
|
| 171 |
+
def add_finish_tools(self, tools):
|
| 172 |
+
tools.append(self.tooluniverse.get_one_tool_by_one_name('Finish', return_prompt=True))
|
| 173 |
+
logger.debug("Finish tool added")
|
| 174 |
+
return tools
|
| 175 |
+
|
| 176 |
def set_system_prompt(self, conversation, sys_prompt):
|
| 177 |
if not conversation:
|
| 178 |
conversation.append({"role": "system", "content": sys_prompt})
|
|
|
|
| 180 |
conversation[0] = {"role": "system", "content": sys_prompt}
|
| 181 |
return conversation
|
| 182 |
|
| 183 |
+
def run_function_call(self, fcall_str,
|
| 184 |
+
return_message=False,
|
| 185 |
+
existing_tools_prompt=None,
|
| 186 |
+
message_for_call_agent=None,
|
| 187 |
+
call_agent=False,
|
| 188 |
+
call_agent_level=None,
|
| 189 |
+
temperature=None):
|
| 190 |
+
try:
|
| 191 |
+
function_call_json, message = self.tooluniverse.extract_function_call_json(
|
| 192 |
+
fcall_str, return_message=return_message, verbose=False)
|
| 193 |
+
except Exception as e:
|
| 194 |
+
logger.error("Tool call parsing failed: %s", e)
|
| 195 |
+
function_call_json = []
|
| 196 |
+
message = fcall_str
|
| 197 |
+
|
| 198 |
+
call_results = []
|
| 199 |
+
special_tool_call = ''
|
| 200 |
+
if function_call_json:
|
| 201 |
+
if isinstance(function_call_json, list):
|
| 202 |
+
for i in range(len(function_call_json)):
|
| 203 |
+
logger.info("Tool Call: %s", function_call_json[i])
|
| 204 |
+
if function_call_json[i]["name"] == 'Finish':
|
| 205 |
+
special_tool_call = 'Finish'
|
| 206 |
+
break
|
| 207 |
+
elif function_call_json[i]["name"] == 'CallAgent':
|
| 208 |
+
if call_agent_level < 2 and call_agent:
|
| 209 |
+
solution_plan = function_call_json[i]['arguments']['solution']
|
| 210 |
+
full_message = (
|
| 211 |
+
message_for_call_agent +
|
| 212 |
+
"\nYou must follow the following plan to answer the question: " +
|
| 213 |
+
str(solution_plan)
|
| 214 |
+
)
|
| 215 |
+
call_result = self.run_multistep_agent(
|
| 216 |
+
full_message, temperature=temperature,
|
| 217 |
+
max_new_tokens=512, max_token=131072,
|
| 218 |
+
call_agent=False, call_agent_level=call_agent_level)
|
| 219 |
+
if call_result is None:
|
| 220 |
+
call_result = "⚠️ No content returned from sub-agent."
|
| 221 |
+
else:
|
| 222 |
+
call_result = call_result.split('[FinalAnswer]')[-1].strip()
|
| 223 |
+
else:
|
| 224 |
+
call_result = "Error: CallAgent disabled."
|
| 225 |
+
else:
|
| 226 |
+
call_result = self.tooluniverse.run_one_function(function_call_json[i])
|
| 227 |
+
call_id = self.tooluniverse.call_id_gen()
|
| 228 |
+
function_call_json[i]["call_id"] = call_id
|
| 229 |
+
logger.info("Tool Call Result: %s", call_result)
|
| 230 |
+
call_results.append({
|
| 231 |
+
"role": "tool",
|
| 232 |
+
"content": json.dumps({"tool_name": function_call_json[i]["name"], "content": call_result, "call_id": call_id})
|
| 233 |
+
})
|
| 234 |
+
else:
|
| 235 |
+
call_results.append({
|
| 236 |
+
"role": "tool",
|
| 237 |
+
"content": json.dumps({"content": "Invalid or no function call detected."})
|
| 238 |
+
})
|
| 239 |
+
|
| 240 |
+
revised_messages = [{
|
| 241 |
+
"role": "assistant",
|
| 242 |
+
"content": message.strip(),
|
| 243 |
+
"tool_calls": json.dumps(function_call_json)
|
| 244 |
+
}] + call_results
|
| 245 |
+
return revised_messages, existing_tools_prompt, special_tool_call
|
| 246 |
+
|
| 247 |
+
def run_function_call_stream(self, fcall_str,
|
| 248 |
+
return_message=False,
|
| 249 |
+
existing_tools_prompt=None,
|
| 250 |
+
message_for_call_agent=None,
|
| 251 |
+
call_agent=False,
|
| 252 |
+
call_agent_level=None,
|
| 253 |
+
temperature=None,
|
| 254 |
+
return_gradio_history=True):
|
| 255 |
+
try:
|
| 256 |
+
function_call_json, message = self.tooluniverse.extract_function_call_json(
|
| 257 |
+
fcall_str, return_message=return_message, verbose=False)
|
| 258 |
+
except Exception as e:
|
| 259 |
+
logger.error("Tool call parsing failed: %s", e)
|
| 260 |
+
function_call_json = []
|
| 261 |
+
message = fcall_str
|
| 262 |
+
|
| 263 |
+
call_results = []
|
| 264 |
+
special_tool_call = ''
|
| 265 |
+
if return_gradio_history:
|
| 266 |
+
gradio_history = []
|
| 267 |
+
if function_call_json:
|
| 268 |
+
if isinstance(function_call_json, list):
|
| 269 |
+
for i in range(len(function_call_json)):
|
| 270 |
+
if function_call_json[i]["name"] == 'Finish':
|
| 271 |
+
special_tool_call = 'Finish'
|
| 272 |
+
break
|
| 273 |
+
elif function_call_json[i]["name"] == 'DirectResponse':
|
| 274 |
+
call_result = function_call_json[i]['arguments']['respose']
|
| 275 |
+
special_tool_call = 'DirectResponse'
|
| 276 |
+
elif function_call_json[i]["name"] == 'RequireClarification':
|
| 277 |
+
call_result = function_call_json[i]['arguments']['unclear_question']
|
| 278 |
+
special_tool_call = 'RequireClarification'
|
| 279 |
+
elif function_call_json[i]["name"] == 'CallAgent':
|
| 280 |
+
if call_agent_level < 2 and call_agent:
|
| 281 |
+
solution_plan = function_call_json[i]['arguments']['solution']
|
| 282 |
+
full_message = (
|
| 283 |
+
message_for_call_agent +
|
| 284 |
+
"\nYou must follow the following plan to answer the question: " +
|
| 285 |
+
str(solution_plan)
|
| 286 |
+
)
|
| 287 |
+
sub_agent_task = "Sub TxAgent plan: " + str(solution_plan)
|
| 288 |
+
call_result = yield from self.run_gradio_chat(
|
| 289 |
+
full_message, history=[], temperature=temperature,
|
| 290 |
+
max_new_tokens=512, max_token=131072,
|
| 291 |
+
call_agent=False, call_agent_level=call_agent_level,
|
| 292 |
+
conversation=None, sub_agent_task=sub_agent_task)
|
| 293 |
+
if call_result is not None and isinstance(call_result, str):
|
| 294 |
+
call_result = call_result.split('[FinalAnswer]')[-1]
|
| 295 |
+
else:
|
| 296 |
+
call_result = "⚠️ No content returned from sub-agent."
|
| 297 |
+
else:
|
| 298 |
+
call_result = "Error: CallAgent disabled."
|
| 299 |
+
else:
|
| 300 |
+
call_result = self.tooluniverse.run_one_function(function_call_json[i])
|
| 301 |
+
call_id = self.tooluniverse.call_id_gen()
|
| 302 |
+
function_call_json[i]["call_id"] = call_id
|
| 303 |
+
call_results.append({
|
| 304 |
+
"role": "tool",
|
| 305 |
+
"content": json.dumps({"tool_name": function_call_json[i]["name"], "content": call_result, "call_id": call_id})
|
| 306 |
+
})
|
| 307 |
+
if return_gradio_history and function_call_json[i]["name"] != 'Finish':
|
| 308 |
+
metadata = {"title": f"🧰 {function_call_json[i]['name']}", "log": str(function_call_json[i]['arguments'])}
|
| 309 |
+
gradio_history.append(ChatMessage(role="assistant", content=str(call_result), metadata=metadata))
|
| 310 |
+
else:
|
| 311 |
+
call_results.append({
|
| 312 |
+
"role": "tool",
|
| 313 |
+
"content": json.dumps({"content": "Invalid or no function call detected."})
|
| 314 |
+
})
|
| 315 |
+
|
| 316 |
+
revised_messages = [{
|
| 317 |
+
"role": "assistant",
|
| 318 |
+
"content": message.strip(),
|
| 319 |
+
"tool_calls": json.dumps(function_call_json)
|
| 320 |
+
}] + call_results
|
| 321 |
+
if return_gradio_history:
|
| 322 |
+
return revised_messages, existing_tools_prompt, special_tool_call, gradio_history
|
| 323 |
+
return revised_messages, existing_tools_prompt, special_tool_call
|
| 324 |
+
|
| 325 |
+
def get_answer_based_on_unfinished_reasoning(self, conversation, temperature, max_new_tokens, max_token, outputs=None):
|
| 326 |
+
if conversation[-1]['role'] == 'assistant':
|
| 327 |
+
conversation.append(
|
| 328 |
+
{'role': 'tool', 'content': 'Errors occurred during function call; provide final answer with current information.'})
|
| 329 |
+
finish_tools_prompt = self.add_finish_tools([])
|
| 330 |
+
last_outputs_str = self.llm_infer(
|
| 331 |
+
messages=conversation,
|
| 332 |
+
temperature=temperature,
|
| 333 |
+
tools=finish_tools_prompt,
|
| 334 |
+
output_begin_string='[FinalAnswer]',
|
| 335 |
+
skip_special_tokens=True,
|
| 336 |
+
max_new_tokens=max_new_tokens,
|
| 337 |
+
max_token=max_token)
|
| 338 |
+
logger.info("Unfinished reasoning answer: %s", last_outputs_str[:100])
|
| 339 |
+
return last_outputs_str
|
| 340 |
+
|
| 341 |
+
def run_multistep_agent(self, message: str,
|
| 342 |
+
temperature: float,
|
| 343 |
+
max_new_tokens: int,
|
| 344 |
+
max_token: int,
|
| 345 |
+
max_round: int = 5,
|
| 346 |
+
call_agent=False,
|
| 347 |
+
call_agent_level=0):
|
| 348 |
+
logger.info("Starting multistep agent for message: %s", message[:100])
|
| 349 |
+
picked_tools_prompt, call_agent_level = self.initialize_tools_prompt(
|
| 350 |
+
call_agent, call_agent_level, message)
|
| 351 |
+
conversation = self.initialize_conversation(message)
|
| 352 |
+
outputs = []
|
| 353 |
+
last_outputs = []
|
| 354 |
+
next_round = True
|
| 355 |
+
current_round = 0
|
| 356 |
+
token_overflow = False
|
| 357 |
+
enable_summary = False
|
| 358 |
+
last_status = {}
|
| 359 |
+
|
| 360 |
+
while next_round and current_round < max_round:
|
| 361 |
+
current_round += 1
|
| 362 |
+
if len(outputs) > 0:
|
| 363 |
+
function_call_messages, picked_tools_prompt, special_tool_call = self.run_function_call(
|
| 364 |
+
last_outputs, return_message=True,
|
| 365 |
+
existing_tools_prompt=picked_tools_prompt,
|
| 366 |
+
message_for_call_agent=message,
|
| 367 |
+
call_agent=call_agent,
|
| 368 |
+
call_agent_level=call_agent_level,
|
| 369 |
+
temperature=temperature)
|
| 370 |
+
|
| 371 |
+
if special_tool_call == 'Finish':
|
| 372 |
+
next_round = False
|
| 373 |
+
conversation.extend(function_call_messages)
|
| 374 |
+
content = function_call_messages[0]['content']
|
| 375 |
+
if content is None:
|
| 376 |
+
return "❌ No content returned after Finish tool call."
|
| 377 |
+
return content.split('[FinalAnswer]')[-1]
|
| 378 |
+
|
| 379 |
+
if (self.enable_summary or token_overflow) and not call_agent:
|
| 380 |
+
enable_summary = True
|
| 381 |
+
last_status = self.function_result_summary(
|
| 382 |
+
conversation, status=last_status, enable_summary=enable_summary)
|
| 383 |
+
|
| 384 |
+
if function_call_messages:
|
| 385 |
+
conversation.extend(function_call_messages)
|
| 386 |
+
outputs.append(tool_result_format(function_call_messages))
|
| 387 |
+
else:
|
| 388 |
+
next_round = False
|
| 389 |
+
conversation.extend([{"role": "assistant", "content": ''.join(last_outputs)}])
|
| 390 |
+
return ''.join(last_outputs).replace("</s>", "")
|
| 391 |
+
|
| 392 |
+
last_outputs = []
|
| 393 |
+
outputs.append("### TxAgent:\n")
|
| 394 |
+
last_outputs_str, token_overflow = self.llm_infer(
|
| 395 |
+
messages=conversation,
|
| 396 |
+
temperature=temperature,
|
| 397 |
+
tools=picked_tools_prompt,
|
| 398 |
+
skip_special_tokens=False,
|
| 399 |
+
max_new_tokens=2048,
|
| 400 |
+
max_token=131072,
|
| 401 |
+
check_token_status=True)
|
| 402 |
+
if last_outputs_str is None:
|
| 403 |
+
logger.warning("Token limit exceeded")
|
| 404 |
+
if self.force_finish:
|
| 405 |
+
return self.get_answer_based_on_unfinished_reasoning(
|
| 406 |
+
conversation, temperature, max_new_tokens, max_token)
|
| 407 |
+
return "❌ Token limit exceeded."
|
| 408 |
+
last_outputs.append(last_outputs_str)
|
| 409 |
+
|
| 410 |
+
if max_round == current_round:
|
| 411 |
+
logger.warning("Max rounds exceeded")
|
| 412 |
+
if self.force_finish:
|
| 413 |
+
return self.get_answer_based_on_unfinished_reasoning(
|
| 414 |
+
conversation, temperature, max_new_tokens, max_token)
|
| 415 |
+
return None
|
| 416 |
+
|
| 417 |
def build_logits_processor(self, messages, llm):
|
| 418 |
logger.warning("Logits processor disabled due to vLLM V1 limitation")
|
| 419 |
return None
|
| 420 |
|
| 421 |
+
def llm_infer(self, messages, temperature=0.1, tools=None,
|
| 422 |
+
output_begin_string=None, max_new_tokens=512,
|
| 423 |
max_token=131072, skip_special_tokens=True,
|
| 424 |
model=None, tokenizer=None, terminators=None,
|
| 425 |
seed=None, check_token_status=False):
|
|
|
|
| 428 |
|
| 429 |
logits_processor = self.build_logits_processor(messages, model)
|
| 430 |
sampling_params = SamplingParams(
|
| 431 |
+
temperature=temperature,
|
| 432 |
max_tokens=max_new_tokens,
|
| 433 |
seed=seed if seed is not None else self.seed,
|
| 434 |
)
|
|
|
|
| 451 |
output = model.generate(prompt, sampling_params=sampling_params)
|
| 452 |
output_text = output[0].outputs[0].text
|
| 453 |
output_tokens = len(self.tokenizer.encode(output_text, add_special_tokens=False))
|
| 454 |
+
logger.debug("Inference output: %s (output tokens: %d)", output_text[:100], output_tokens)
|
| 455 |
+
torch.cuda.empty_cache()
|
| 456 |
+
gc.collect()
|
| 457 |
+
if check_token_status and max_token is not None:
|
| 458 |
+
return output_text, token_overflow
|
| 459 |
+
return output_text
|
| 460 |
|
|
|
|
| 461 |
def run_self_agent(self, message: str,
|
| 462 |
temperature: float,
|
| 463 |
max_new_tokens: int,
|
|
|
|
| 492 |
max_new_tokens: int,
|
| 493 |
max_token: int):
|
| 494 |
logger.info("Starting format agent")
|
| 495 |
+
if '[FinalAnswer]' in answer:
|
| 496 |
+
possible_final_answer = answer.split("[FinalAnswer]")[-1]
|
| 497 |
+
elif "\n\n" in answer:
|
| 498 |
+
possible_final_answer = answer.split("\n\n")[-1]
|
| 499 |
+
else:
|
| 500 |
+
possible_final_answer = answer.strip()
|
| 501 |
+
if len(possible_final_answer) == 1 and possible_final_answer in ['A', 'B', 'C', 'D', 'E']:
|
| 502 |
+
return possible_final_answer
|
| 503 |
+
elif len(possible_final_answer) > 1 and possible_final_answer[1] == ':' and possible_final_answer[0] in ['A', 'B', 'C', 'D', 'E']:
|
| 504 |
+
return possible_final_answer[0]
|
| 505 |
+
|
| 506 |
conversation = self.set_system_prompt(
|
| 507 |
[], "Transform the agent's answer to a single letter: 'A', 'B', 'C', 'D'.")
|
| 508 |
+
conversation.append({"role": "user", "content": message +
|
| 509 |
+
"\nAgent's answer: " + answer + "\nAnswer (must be a letter):"})
|
| 510 |
return self.llm_infer(
|
| 511 |
messages=conversation,
|
| 512 |
temperature=temperature,
|
|
|
|
| 514 |
max_new_tokens=max_new_tokens,
|
| 515 |
max_token=max_token)
|
| 516 |
|
| 517 |
+
def run_summary_agent(self, thought_calls: str,
|
| 518 |
+
function_response: str,
|
| 519 |
+
temperature: float,
|
| 520 |
+
max_new_tokens: int,
|
| 521 |
+
max_token: int):
|
| 522 |
+
logger.info("Summarizing tool result")
|
| 523 |
+
prompt = f"""Thought and function calls:
|
| 524 |
+
{thought_calls}
|
| 525 |
+
Function calls' responses:
|
| 526 |
+
\"\"\"
|
| 527 |
+
{function_response}
|
| 528 |
+
\"\"\"
|
| 529 |
+
Summarize the function calls' l responses in one sentence with all necessary information.
|
| 530 |
+
"""
|
| 531 |
+
conversation = [{"role": "user", "content": prompt}]
|
| 532 |
+
output = self.llm_infer(
|
| 533 |
+
messages=conversation,
|
| 534 |
+
temperature=temperature,
|
| 535 |
+
tools=None,
|
| 536 |
+
max_new_tokens=max_new_tokens,
|
| 537 |
+
max_token=max_token)
|
| 538 |
+
if '[' in output:
|
| 539 |
+
output = output.split('[')[0]
|
| 540 |
+
return output
|
| 541 |
+
|
| 542 |
+
def function_result_summary(self, input_list, status, enable_summary):
|
| 543 |
+
if 'tool_call_step' not in status:
|
| 544 |
+
status['tool_call_step'] = 0
|
| 545 |
+
for idx in range(len(input_list)):
|
| 546 |
+
pos_id = len(input_list) - idx - 1
|
| 547 |
+
if input_list[pos_id]['role'] == 'assistant' and 'tool_calls' in input_list[pos_id]:
|
| 548 |
+
break
|
| 549 |
+
|
| 550 |
+
status['step'] = status.get('step', 0) + 1
|
| 551 |
+
if not enable_summary:
|
| 552 |
+
return status
|
| 553 |
+
|
| 554 |
+
status['summarized_index'] = status.get('summarized_index', 0)
|
| 555 |
+
status['summarized_step'] = status.get('summarized_step', 0)
|
| 556 |
+
status['previous_length'] = status.get('previous_length', 0)
|
| 557 |
+
status['history'] = status.get('history', [])
|
| 558 |
+
|
| 559 |
+
function_response = ''
|
| 560 |
+
idx = status['summarized_index']
|
| 561 |
+
this_thought_calls = None
|
| 562 |
+
|
| 563 |
+
while idx < len(input_list):
|
| 564 |
+
if (self.summary_mode == 'step' and status['summarized_step'] < status['step'] - status['tool_call_step'] - self.summary_skip_last_k) or \
|
| 565 |
+
(self.summary_mode == 'length' and status['previous_length'] > self.summary_context_length):
|
| 566 |
+
if input_list[idx]['role'] == 'assistant':
|
| 567 |
+
if function_response:
|
| 568 |
+
status['summarized_step'] += 1
|
| 569 |
+
result_summary = self.run_summary_agent(
|
| 570 |
+
thought_calls=this_thought_calls,
|
| 571 |
+
function_response=function_response,
|
| 572 |
+
temperature=0.1,
|
| 573 |
+
max_new_tokens=512,
|
| 574 |
+
max_token=131072)
|
| 575 |
+
input_list.insert(last_call_idx + 1, {'role': 'tool', 'content': result_summary})
|
| 576 |
+
status['summarized_index'] = last_call_idx + 2
|
| 577 |
+
idx += 1
|
| 578 |
+
last_call_idx = idx
|
| 579 |
+
this_thought_calls = input_list[idx]['content'] + input_list[idx]['tool_calls']
|
| 580 |
+
function_response = ''
|
| 581 |
+
elif input_list[idx]['role'] == 'tool' and this_thought_calls is not None:
|
| 582 |
+
function_response += input_list[idx]['content']
|
| 583 |
+
del input_list[idx]
|
| 584 |
+
idx -= 1
|
| 585 |
+
else:
|
| 586 |
+
break
|
| 587 |
+
idx += 1
|
| 588 |
+
|
| 589 |
+
if function_response:
|
| 590 |
+
status['summarized_step'] += 1
|
| 591 |
+
result_summary = self.run_summary_agent(
|
| 592 |
+
thought_calls=this_thought_calls,
|
| 593 |
+
function_response=function_response,
|
| 594 |
+
temperature=0.1,
|
| 595 |
+
max_new_tokens=512,
|
| 596 |
+
max_token=131072)
|
| 597 |
+
tool_calls = json.loads(input_list[last_call_idx]['tool_calls'])
|
| 598 |
+
for tool_call in tool_calls:
|
| 599 |
+
del tool_call['call_id']
|
| 600 |
+
input_list[last_call_idx]['tool_calls'] = json.dumps(tool_calls)
|
| 601 |
+
input_list.insert(last_call_idx + 1, {'role': 'tool', 'content': result_summary})
|
| 602 |
+
status['summarized_index'] = last_call_idx + 2
|
| 603 |
+
|
| 604 |
+
return status
|
| 605 |
+
|
| 606 |
+
def update_parameters(self, **kwargs):
|
| 607 |
+
updated_attributes = {}
|
| 608 |
+
for key, value in kwargs.items():
|
| 609 |
+
if hasattr(self, key):
|
| 610 |
+
setattr(self, key, value)
|
| 611 |
+
updated_attributes[key] = value
|
| 612 |
+
logger.info("Updated parameters: %s", updated_attributes)
|
| 613 |
+
return updated_attributes
|
| 614 |
|
| 615 |
def run_gradio_chat(self, message: str,
|
| 616 |
history: list,
|
| 617 |
temperature: float,
|
| 618 |
+
max_new_tokens: int = 2048,
|
| 619 |
max_token: int = 131072,
|
| 620 |
call_agent: bool = False,
|
| 621 |
conversation: gr.State = None,
|
|
|
|
| 624 |
call_agent_level: int = 0,
|
| 625 |
sub_agent_task: str = None,
|
| 626 |
uploaded_files: list = None):
|
| 627 |
+
logger.info("Chat started, message: %s", message[:100])
|
| 628 |
if not message or len(message.strip()) < 5:
|
| 629 |
yield "Please provide a valid message or upload files to analyze."
|
| 630 |
return
|
| 631 |
|
| 632 |
+
picked_tools_prompt, call_agent_level = self.initialize_tools_prompt(
|
| 633 |
+
call_agent, call_agent_level, message)
|
| 634 |
+
conversation = self.initialize_conversation(
|
| 635 |
+
message, conversation, history)
|
| 636 |
+
history = []
|
| 637 |
last_outputs = []
|
| 638 |
+
|
| 639 |
next_round = True
|
| 640 |
current_round = 0
|
| 641 |
+
enable_summary = False
|
| 642 |
+
last_status = {}
|
| 643 |
+
token_overflow = False
|
| 644 |
|
| 645 |
try:
|
| 646 |
while next_round and current_round < max_round:
|
| 647 |
current_round += 1
|
| 648 |
+
logger.debug("Starting round %d/%d", current_round, max_round)
|
|
|
|
| 649 |
if last_outputs:
|
| 650 |
+
function_call_messages, picked_tools_prompt, special_tool_call, current_gradio_history = yield from self.run_function_call_stream(
|
| 651 |
+
last_outputs, return_message=True,
|
| 652 |
+
existing_tools_prompt=picked_tools_prompt,
|
| 653 |
+
message_for_call_agent=message,
|
| 654 |
+
call_agent=call_agent,
|
| 655 |
+
call_agent_level=call_agent_level,
|
| 656 |
+
temperature=temperature)
|
| 657 |
+
history.extend(current_gradio_history)
|
| 658 |
|
|
|
|
|
|
|
| 659 |
if special_tool_call == 'Finish':
|
| 660 |
+
logger.info("Finish tool called, ending chat")
|
| 661 |
+
yield history
|
| 662 |
+
next_round = False
|
| 663 |
+
conversation.extend(function_call_messages)
|
| 664 |
+
content = function_call_messages[0]['content']
|
| 665 |
+
if content:
|
| 666 |
+
return content
|
| 667 |
+
return "No content returned after Finish tool call."
|
| 668 |
+
|
| 669 |
+
elif special_tool_call in ['RequireClarification', 'DirectResponse']:
|
| 670 |
+
last_msg = history[-1] if history else ChatMessage(role="assistant", content="Response needed.")
|
| 671 |
+
history.append(ChatMessage(role="assistant", content=last_msg.content))
|
| 672 |
+
logger.info("Special tool %s called, ending chat", special_tool_call)
|
| 673 |
+
yield history
|
| 674 |
+
next_round = False
|
| 675 |
+
return last_msg.content
|
| 676 |
+
|
| 677 |
+
if (self.enable_summary or token_overflow) and not call_agent:
|
| 678 |
+
enable_summary = True
|
| 679 |
+
last_status = self.function_result_summary(
|
| 680 |
+
conversation, status=last_status, enable_summary=enable_summary)
|
| 681 |
+
|
| 682 |
+
if function_call_messages:
|
| 683 |
+
conversation.extend(function_call_messages)
|
| 684 |
+
yield history
|
| 685 |
+
else:
|
| 686 |
next_round = False
|
| 687 |
+
conversation.append({"role": "assistant", "content": ''.join(last_outputs)})
|
| 688 |
+
logger.info("No function call messages, ending chat")
|
| 689 |
+
return ''.join(last_outputs).replace("</s>", "")
|
| 690 |
|
| 691 |
+
last_outputs = []
|
| 692 |
last_outputs_str, token_overflow = self.llm_infer(
|
| 693 |
messages=conversation,
|
| 694 |
temperature=temperature,
|
|
|
|
| 700 |
check_token_status=True)
|
| 701 |
|
| 702 |
if last_outputs_str is None:
|
| 703 |
+
logger.warning("Token limit exceeded")
|
| 704 |
if self.force_finish:
|
| 705 |
last_outputs_str = self.get_answer_based_on_unfinished_reasoning(
|
| 706 |
conversation, temperature, max_new_tokens, max_token)
|
| 707 |
+
history.append(ChatMessage(role="assistant", content=last_outputs_str.strip()))
|
| 708 |
+
yield history
|
| 709 |
return last_outputs_str
|
| 710 |
+
error_msg = "Token limit exceeded."
|
| 711 |
+
history.append(ChatMessage(role="assistant", content=error_msg))
|
| 712 |
+
yield history
|
| 713 |
+
return error_msg
|
| 714 |
+
|
| 715 |
+
last_thought = last_outputs_str.split("[TOOL_CALLS]")[0]
|
| 716 |
+
for msg in history:
|
| 717 |
+
if msg.metadata is not None:
|
| 718 |
+
msg.metadata['status'] = 'done'
|
| 719 |
+
|
| 720 |
+
if '[FinalAnswer]' in last_thought:
|
| 721 |
+
parts = last_thought.split('[FinalAnswer]', 1)
|
| 722 |
+
final_thought, final_answer = parts if len(parts) == 2 else (last_thought, "")
|
| 723 |
+
history.append(ChatMessage(role="assistant", content=final_thought.strip()))
|
| 724 |
+
yield history
|
| 725 |
+
history.append(ChatMessage(role="assistant", content="**🧠 Final Analysis:**\n" + final_answer.strip()))
|
| 726 |
+
logger.info("Final answer provided: %s", final_answer[:100])
|
| 727 |
+
yield history
|
| 728 |
+
next_round = False # Ensure we exit after final answer
|
| 729 |
+
return final_answer
|
| 730 |
+
else:
|
| 731 |
+
history.append(ChatMessage(role="assistant", content=last_thought))
|
| 732 |
+
yield history
|
| 733 |
|
| 734 |
last_outputs.append(last_outputs_str)
|
| 735 |
|
| 736 |
+
if next_round:
|
| 737 |
+
if self.force_finish:
|
| 738 |
+
last_outputs_str = self.get_answer_based_on_unfinished_reasoning(
|
| 739 |
+
conversation, temperature, max_new_tokens, max_token)
|
| 740 |
+
parts = last_outputs_str.split('[FinalAnswer]', 1)
|
| 741 |
+
final_thought, final_answer = parts if len(parts) == 2 else (last_outputs_str, "")
|
| 742 |
+
history.append(ChatMessage(role="assistant", content=final_thought.strip()))
|
| 743 |
+
yield history
|
| 744 |
+
history.append(ChatMessage(role="assistant", content="**🧠 Final Analysis:**\n" + final_answer.strip()))
|
| 745 |
+
logger.info("Forced final answer: %s", final_answer[:100])
|
| 746 |
+
yield history
|
| 747 |
+
return final_answer
|
| 748 |
+
else:
|
| 749 |
+
error_msg = "Reasoning rounds exceeded limit."
|
| 750 |
+
history.append(ChatMessage(role="assistant", content=error_msg))
|
| 751 |
+
yield history
|
| 752 |
+
return error_msg
|
| 753 |
|
| 754 |
except Exception as e:
|
| 755 |
+
logger.error("Exception in run_gradio_chat: %s", e, exc_info=True)
|
| 756 |
+
error_msg = f"Error: {e}"
|
| 757 |
+
history.append(ChatMessage(role="assistant", content=error_msg))
|
| 758 |
+
yield history
|
| 759 |
+
if self.force_finish:
|
| 760 |
+
last_outputs_str = self.get_answer_based_on_unfinished_reasoning(
|
| 761 |
+
conversation, temperature, max_new_tokens, max_token)
|
| 762 |
+
parts = last_outputs_str.split('[FinalAnswer]', 1)
|
| 763 |
+
final_thought, final_answer = parts if len(parts) == 2 else (last_outputs_str, "")
|
| 764 |
+
history.append(ChatMessage(role="assistant", content=final_thought.strip()))
|
| 765 |
+
yield history
|
| 766 |
+
history.append(ChatMessage(role="assistant", content="**🧠 Final Analysis:**\n" + final_answer.strip()))
|
| 767 |
+
logger.info("Forced final answer after error: %s", final_answer[:100])
|
| 768 |
+
yield history
|
| 769 |
+
return final_answer
|
| 770 |
+
return error_msg
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 771 |
|
| 772 |
def run_gradio_chat_batch(self, messages: List[str],
|
| 773 |
temperature: float,
|
| 774 |
+
max_new_tokens: int = 2048,
|
| 775 |
max_token: int = 131072,
|
| 776 |
call_agent: bool = False,
|
| 777 |
conversation: List = None,
|
| 778 |
max_round: int = 5,
|
| 779 |
seed: int = None,
|
| 780 |
call_agent_level: int = 0):
|
| 781 |
+
"""Run batch inference for multiple messages."""
|
| 782 |
logger.info("Starting batch chat for %d messages", len(messages))
|
| 783 |
batch_results = []
|
| 784 |
+
|
| 785 |
for message in messages:
|
| 786 |
+
# Initialize conversation for each message
|
| 787 |
conv = self.initialize_conversation(message, conversation, history=None)
|
| 788 |
picked_tools_prompt, call_agent_level = self.initialize_tools_prompt(
|
| 789 |
call_agent, call_agent_level, message)
|
| 790 |
+
|
| 791 |
+
# Run single inference for simplicity (extend for multi-round if needed)
|
| 792 |
output, token_overflow = self.llm_infer(
|
| 793 |
messages=conv,
|
| 794 |
temperature=temperature,
|
|
|
|
| 799 |
seed=seed,
|
| 800 |
check_token_status=True
|
| 801 |
)
|
| 802 |
+
|
| 803 |
if output is None:
|
| 804 |
logger.warning("Token limit exceeded for message: %s", message[:100])
|
| 805 |
+
batch_results.append("Token limit exceeded.")
|
| 806 |
else:
|
| 807 |
batch_results.append(output)
|
| 808 |
+
|
| 809 |
logger.info("Batch chat completed for %d messages", len(messages))
|
| 810 |
+
return batch_results
|