umyunsang commited on
Commit
e7c7f3c
ยท
verified ยท
1 Parent(s): 741d10d

fix: DirectEnginePlanner tokenizer native tool calling + RegexPlanner fallback

Browse files
src/cli/renderer.py CHANGED
@@ -6,6 +6,7 @@ Uses `rich` when available; falls back to plain print() otherwise.
6
  from __future__ import annotations
7
 
8
  from threading import Lock
 
9
 
10
  from src.cli.terminal import (
11
  get_narrow_terminal_warning,
@@ -19,6 +20,7 @@ try:
19
  from rich.markdown import Markdown
20
  from rich.panel import Panel
21
  from rich.status import Status
 
22
  from rich.text import Text
23
 
24
  _console = Console()
@@ -44,6 +46,65 @@ NODE_STATUS_MESSAGES: dict[str, str] = {
44
  }
45
 
46
  MARKDOWN_CODE_THEME = "monokai"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
 
49
  def get_node_message(node_name: str) -> str:
@@ -124,6 +185,149 @@ def _plain_rule(columns: int) -> str:
124
  return "โ”€" * max(columns - 2, 12)
125
 
126
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127
  def render_evidence_section(evidence_items: list) -> str:
128
  """EvidenceItem dict ๋ฆฌ์ŠคํŠธ๋ฅผ ์ถœ์ฒ˜ ์„น์…˜ ํ…์ŠคํŠธ๋กœ ๋ณ€ํ™˜ํ•œ๋‹ค.
129
 
@@ -193,7 +397,11 @@ def _build_citations_text(citations: list[str]) -> Text:
193
 
194
 
195
  def _build_rich_result_content(
196
- text_body: str, evidence_items: list, citations: list
 
 
 
 
197
  ) -> Text | Markdown | Group:
198
  """Build the rich renderable used inside the result panel."""
199
  renderables = []
@@ -201,10 +409,25 @@ def _build_rich_result_content(
201
  if text_body:
202
  renderables.append(Markdown(text_body, code_theme=MARKDOWN_CODE_THEME))
203
 
 
 
 
 
 
 
 
 
204
  if evidence_items:
205
- evidence_text = render_evidence_section(evidence_items)
206
- if evidence_text:
207
- renderables.append(Text(f"\n{evidence_text}\n", style="dim"))
 
 
 
 
 
 
 
208
  elif citations:
209
  renderables.append(_build_citations_text(citations))
210
 
@@ -222,15 +445,23 @@ def render_result(result: dict) -> None:
222
  - result["text"] or result["response"]: main answer text
223
  - result["evidence_items"]: EvidenceItem dict ๋ฆฌ์ŠคํŠธ (structured, ์šฐ์„ )
224
  - result["citations"] or result["sources"]: list of source strings (fallback)
 
225
  """
226
  text_body: str = result.get("text") or result.get("response") or ""
227
  evidence_items: list = result.get("evidence_items") or []
228
  citations: list = result.get("citations") or result.get("sources") or []
 
229
 
230
  use_rich, columns = _resolve_render_mode()
231
 
232
  if use_rich:
233
- content = _build_rich_result_content(text_body, evidence_items, citations)
 
 
 
 
 
 
234
  _console.print(
235
  Panel(
236
  content,
@@ -244,10 +475,19 @@ def render_result(result: dict) -> None:
244
  print(f"\n{rule}")
245
  print("GovOn")
246
  print(text_body)
 
 
 
 
247
  if evidence_items:
248
- evidence_text = render_evidence_section(evidence_items)
249
- if evidence_text:
250
- print(f"\n{evidence_text}")
 
 
 
 
 
251
  elif citations:
252
  print("\n์ถœ์ฒ˜")
253
  for idx, src in enumerate(citations, 1):
 
6
  from __future__ import annotations
7
 
8
  from threading import Lock
9
+ from typing import Any
10
 
11
  from src.cli.terminal import (
12
  get_narrow_terminal_warning,
 
20
  from rich.markdown import Markdown
21
  from rich.panel import Panel
22
  from rich.status import Status
23
+ from rich.table import Table
24
  from rich.text import Text
25
 
26
  _console = Console()
 
46
  }
47
 
48
  MARKDOWN_CODE_THEME = "monokai"
49
+ STRUCTURED_TOOL_ORDER = ("stats_lookup", "keyword_analyzer", "demographics_lookup")
50
+ STRUCTURED_TOOL_TITLES = {
51
+ "stats_lookup": "๋ฏผ์› ํ†ต๊ณ„",
52
+ "keyword_analyzer": "ํ‚ค์›Œ๋“œ ๋ถ„์„",
53
+ "demographics_lookup": "์ธ๊ตฌํ†ต๊ณ„",
54
+ }
55
+ STRUCTURED_API_TITLES = {
56
+ "doc_count": "์ฑ„๋„๋ณ„ ์ ‘์ˆ˜ ๊ฑด์ˆ˜",
57
+ "trend": "์ถ”์ด",
58
+ "statistics": "๊ธฐ๊ฐ„๋ณ„ ํ†ต๊ณ„",
59
+ "org_ranking": "๊ธฐ๊ด€ ์ˆœ์œ„",
60
+ "region_ranking": "์ง€์—ญ ์ˆœ์œ„",
61
+ "core_keyword": "ํ•ต์‹ฌ ํ‚ค์›Œ๋“œ",
62
+ "related_word": "์—ฐ๊ด€์–ด",
63
+ "gender": "์„ฑ๋ณ„ ๋ถ„ํฌ",
64
+ "age": "์—ฐ๋ น ๋ถ„ํฌ",
65
+ "population": "์ธ๊ตฌ ๋Œ€๋น„ ๋น„์œจ",
66
+ }
67
+ TABLE_COLUMN_PRIORITY = (
68
+ "keyword",
69
+ "topic",
70
+ "label",
71
+ "term",
72
+ "hits",
73
+ "value",
74
+ "ratio",
75
+ "prebRatio",
76
+ "prevRatio",
77
+ "population",
78
+ "pttn",
79
+ "dfpt",
80
+ "saeol",
81
+ )
82
+ TABLE_COLUMN_LABELS = {
83
+ "keyword": "ํ‚ค์›Œ๋“œ",
84
+ "topic": "ํ•ญ๋ชฉ",
85
+ "label": "ํ•ญ๋ชฉ",
86
+ "term": "ํ•ญ๋ชฉ",
87
+ "hits": "๊ฑด์ˆ˜",
88
+ "value": "๊ฐ’",
89
+ "ratio": "๋น„์œจ",
90
+ "prebRatio": "์ „์ผ ๋Œ€๋น„",
91
+ "prevRatio": "์ „๊ธฐ ๋Œ€๋น„",
92
+ "population": "์ธ๊ตฌ",
93
+ "pttn": "๊ตญ๋ฏผ์‹ ๋ฌธ๊ณ ",
94
+ "dfpt": "๋ฏผ์›24",
95
+ "saeol": "์ƒˆ์˜ฌ",
96
+ "source_type": "์ถœ์ฒ˜",
97
+ "title": "์ œ๋ชฉ",
98
+ "page": "ํŽ˜์ด์ง€",
99
+ "score": "์ ์ˆ˜",
100
+ "link_or_path": "๊ฒฝ๋กœ/๋งํฌ",
101
+ }
102
+ TABLE_HIDDEN_KEYS = {"_source_api"}
103
+ EVIDENCE_SOURCE_LABELS = {
104
+ "rag": "๋กœ์ปฌ ๋ฌธ์„œ",
105
+ "api": "์™ธ๋ถ€ API",
106
+ "llm_generated": "LLM ์ƒ์„ฑ",
107
+ }
108
 
109
 
110
  def get_node_message(node_name: str) -> str:
 
185
  return "โ”€" * max(columns - 2, 12)
186
 
187
 
188
+ def _format_table_value(key: str, value: Any) -> str:
189
+ """Format a structured value for rich/plain table rendering."""
190
+ if value in ("", None):
191
+ return "-"
192
+
193
+ if key == "source_type":
194
+ return EVIDENCE_SOURCE_LABELS.get(str(value), str(value))
195
+ if key == "page":
196
+ return f"p.{value}"
197
+ if key == "score":
198
+ try:
199
+ return f"{float(value):.2f}"
200
+ except (TypeError, ValueError):
201
+ return str(value)
202
+ if key in {"hits", "population", "pttn", "dfpt", "saeol"}:
203
+ try:
204
+ return f"{int(float(value)):,}"
205
+ except (TypeError, ValueError):
206
+ return str(value)
207
+ if key == "value":
208
+ try:
209
+ value_f = float(value)
210
+ return f"{value_f:,.1f}" if value_f % 1 else f"{value_f:,.0f}"
211
+ except (TypeError, ValueError):
212
+ return str(value)
213
+ if key in {"ratio", "prebRatio", "prevRatio"}:
214
+ text = str(value)
215
+ return text if text.endswith("%") else f"{text}%"
216
+ return str(value)
217
+
218
+
219
+ def _select_table_columns(rows: list[dict], columns: int) -> list[str]:
220
+ """Select visible table columns based on row shape and terminal width."""
221
+ visible_keys: list[str] = []
222
+ seen: set[str] = set()
223
+
224
+ for key in TABLE_COLUMN_PRIORITY:
225
+ if any(row.get(key) not in ("", None) for row in rows):
226
+ visible_keys.append(key)
227
+ seen.add(key)
228
+
229
+ for row in rows:
230
+ for key in row:
231
+ if key in TABLE_HIDDEN_KEYS or key in seen:
232
+ continue
233
+ if row.get(key) not in ("", None):
234
+ visible_keys.append(key)
235
+ seen.add(key)
236
+
237
+ max_columns = 5 if columns >= 120 else 4 if columns >= 80 else 2
238
+ return visible_keys[:max_columns]
239
+
240
+
241
+ def _build_rich_table(rows: list[dict], columns: int, *, column_keys: list[str] | None = None):
242
+ """Build a Rich table from structured rows."""
243
+ selected_keys = column_keys or _select_table_columns(rows, columns)
244
+ if not selected_keys:
245
+ return None
246
+
247
+ table = Table(expand=True)
248
+ for key in selected_keys:
249
+ table.add_column(
250
+ TABLE_COLUMN_LABELS.get(key, key),
251
+ overflow="fold",
252
+ no_wrap=key in {"source_type", "page", "score"},
253
+ )
254
+
255
+ for row in rows:
256
+ table.add_row(*(_format_table_value(key, row.get(key)) for key in selected_keys))
257
+
258
+ return table
259
+
260
+
261
+ def _render_plain_table(
262
+ title: str,
263
+ rows: list[dict],
264
+ columns: int,
265
+ *,
266
+ column_keys: list[str] | None = None,
267
+ ) -> str:
268
+ """Render structured rows as a tab-delimited plain-text table."""
269
+ selected_keys = column_keys or _select_table_columns(rows, columns)
270
+ if not selected_keys:
271
+ return ""
272
+
273
+ lines = [title, "\t".join(TABLE_COLUMN_LABELS.get(key, key) for key in selected_keys)]
274
+ for row in rows:
275
+ lines.append("\t".join(_format_table_value(key, row.get(key)) for key in selected_keys))
276
+ return "\n".join(lines)
277
+
278
+
279
+ def _iter_structured_result_sections(tool_results: dict[str, Any]) -> list[tuple[str, list[dict]]]:
280
+ """Extract table-ready structured result sections from tool results."""
281
+ sections: list[tuple[str, list[dict]]] = []
282
+
283
+ for tool_name in STRUCTURED_TOOL_ORDER:
284
+ payload = tool_results.get(tool_name)
285
+ if not isinstance(payload, dict):
286
+ continue
287
+ results = payload.get("results")
288
+ if not isinstance(results, list) or not results:
289
+ continue
290
+
291
+ grouped_rows: dict[str, list[dict]] = {}
292
+ for row in results:
293
+ if not isinstance(row, dict):
294
+ continue
295
+ grouped_rows.setdefault(str(row.get("_source_api") or "results"), []).append(row)
296
+
297
+ for source_api, rows in grouped_rows.items():
298
+ source_title = STRUCTURED_API_TITLES.get(source_api)
299
+ tool_title = STRUCTURED_TOOL_TITLES.get(tool_name, tool_name)
300
+ title = f"{tool_title} ยท {source_title}" if source_title else tool_title
301
+ sections.append((title, rows))
302
+
303
+ return sections
304
+
305
+
306
+ def _build_evidence_table_rows(evidence_items: list[dict]) -> list[dict]:
307
+ """Normalize evidence items into a table-oriented row schema."""
308
+ rows: list[dict] = []
309
+ for item in evidence_items:
310
+ rows.append(
311
+ {
312
+ "source_type": item.get("source_type"),
313
+ "title": item.get("title") or item.get("excerpt", ""),
314
+ "page": item.get("page"),
315
+ "score": item.get("score"),
316
+ "link_or_path": item.get("link_or_path"),
317
+ }
318
+ )
319
+ return rows
320
+
321
+
322
+ def _select_evidence_columns(columns: int) -> list[str]:
323
+ """Return evidence table columns based on terminal width."""
324
+ if columns >= 120:
325
+ return ["source_type", "title", "page", "score", "link_or_path"]
326
+ if columns >= 80:
327
+ return ["source_type", "title", "score"]
328
+ return ["source_type", "title"]
329
+
330
+
331
  def render_evidence_section(evidence_items: list) -> str:
332
  """EvidenceItem dict ๋ฆฌ์ŠคํŠธ๋ฅผ ์ถœ์ฒ˜ ์„น์…˜ ํ…์ŠคํŠธ๋กœ ๋ณ€ํ™˜ํ•œ๋‹ค.
333
 
 
397
 
398
 
399
  def _build_rich_result_content(
400
+ text_body: str,
401
+ evidence_items: list,
402
+ citations: list,
403
+ tool_results: dict[str, Any],
404
+ columns: int,
405
  ) -> Text | Markdown | Group:
406
  """Build the rich renderable used inside the result panel."""
407
  renderables = []
 
409
  if text_body:
410
  renderables.append(Markdown(text_body, code_theme=MARKDOWN_CODE_THEME))
411
 
412
+ for title, rows in _iter_structured_result_sections(tool_results):
413
+ table = _build_rich_table(rows, columns)
414
+ if table is None:
415
+ continue
416
+ renderables.append(Text(""))
417
+ renderables.append(Text(title, style="bold cyan"))
418
+ renderables.append(table)
419
+
420
  if evidence_items:
421
+ evidence_rows = _build_evidence_table_rows(evidence_items)
422
+ evidence_table = _build_rich_table(
423
+ evidence_rows,
424
+ columns,
425
+ column_keys=_select_evidence_columns(columns),
426
+ )
427
+ if evidence_table is not None:
428
+ renderables.append(Text(""))
429
+ renderables.append(Text("์ฐธ์กฐ ๊ทผ๊ฑฐ", style="bold"))
430
+ renderables.append(evidence_table)
431
  elif citations:
432
  renderables.append(_build_citations_text(citations))
433
 
 
445
  - result["text"] or result["response"]: main answer text
446
  - result["evidence_items"]: EvidenceItem dict ๋ฆฌ์ŠคํŠธ (structured, ์šฐ์„ )
447
  - result["citations"] or result["sources"]: list of source strings (fallback)
448
+ - result["tool_results"]: stats/keyword/demographics structured result dict
449
  """
450
  text_body: str = result.get("text") or result.get("response") or ""
451
  evidence_items: list = result.get("evidence_items") or []
452
  citations: list = result.get("citations") or result.get("sources") or []
453
+ tool_results: dict[str, Any] = result.get("tool_results") or {}
454
 
455
  use_rich, columns = _resolve_render_mode()
456
 
457
  if use_rich:
458
+ content = _build_rich_result_content(
459
+ text_body,
460
+ evidence_items,
461
+ citations,
462
+ tool_results,
463
+ columns,
464
+ )
465
  _console.print(
466
  Panel(
467
  content,
 
475
  print(f"\n{rule}")
476
  print("GovOn")
477
  print(text_body)
478
+ for title, rows in _iter_structured_result_sections(tool_results):
479
+ table_text = _render_plain_table(title, rows, columns)
480
+ if table_text:
481
+ print(f"\n{table_text}")
482
  if evidence_items:
483
+ evidence_table = _render_plain_table(
484
+ "์ฐธ์กฐ ๊ทผ๊ฑฐ",
485
+ _build_evidence_table_rows(evidence_items),
486
+ columns,
487
+ column_keys=_select_evidence_columns(columns),
488
+ )
489
+ if evidence_table:
490
+ print(f"\n{evidence_table}")
491
  elif citations:
492
  print("\n์ถœ์ฒ˜")
493
  for idx, src in enumerate(citations, 1):
src/inference/graph/nodes.py CHANGED
@@ -113,14 +113,26 @@ async def planner_node(
113
  plan = await planner_adapter.plan(messages=messages, context=context)
114
  except PlanValidationError as exc:
115
  _latency_ms = round((time.monotonic() - _start) * 1000, 2)
116
- logger.warning(f"[planner] plan ์ƒ์„ฑ ์‹คํŒจ, fallback ์ ์šฉ: {exc} latency_ms={_latency_ms}")
117
- return {
118
- **validator.make_fallback_plan(exc),
119
- "goal": f"โš  ๊ณ„ํš ์ˆ˜๋ฆฝ ์‹คํŒจ: {str(exc)[:100]}",
120
- "reason": "์‹œ์Šคํ…œ์ด ์š”์ฒญ์„ ๋ถ„์„ํ•˜์ง€ ๋ชปํ–ˆ์Šต๋‹ˆ๋‹ค. ๊ฑฐ์ ˆ ํ›„ ๋‹ค์‹œ ์‹œ๋„ํ•ด์ฃผ์„ธ์š”.",
121
- "task_type": "",
122
- "node_latencies": {"planner": _latency_ms},
123
- }
 
 
 
 
 
 
 
 
 
 
 
 
124
 
125
  try:
126
  validator.validate(plan)
 
113
  plan = await planner_adapter.plan(messages=messages, context=context)
114
  except PlanValidationError as exc:
115
  _latency_ms = round((time.monotonic() - _start) * 1000, 2)
116
+ logger.warning(f"[planner] LLM plan ์‹คํŒจ, RegexPlannerAdapter fallback ์‹œ๋„: {exc}")
117
+ try:
118
+ from .planner_adapter import RegexPlannerAdapter
119
+
120
+ regex_fallback = RegexPlannerAdapter(
121
+ registry=getattr(planner_adapter, "_registry", None)
122
+ )
123
+ plan = await regex_fallback.plan(messages=messages, context=context)
124
+ logger.info(f"[planner] Regex fallback ์„ฑ๊ณต: tools={plan.tools}")
125
+ except Exception as fallback_exc:
126
+ logger.warning(
127
+ f"[planner] Regex fallback๋„ ์‹คํŒจ: {fallback_exc} latency_ms={_latency_ms}"
128
+ )
129
+ return {
130
+ **validator.make_fallback_plan(exc),
131
+ "goal": f"โš  ๊ณ„ํš ์ˆ˜๋ฆฝ ์‹คํŒจ: {str(exc)[:100]}",
132
+ "reason": "์‹œ์Šคํ…œ์ด ์š”์ฒญ์„ ๋ถ„์„ํ•˜์ง€ ๋ชปํ–ˆ์Šต๋‹ˆ๋‹ค. ๊ฑฐ์ ˆ ํ›„ ๋‹ค์‹œ ์‹œ๋„ํ•ด์ฃผ์„ธ์š”.",
133
+ "task_type": "",
134
+ "node_latencies": {"planner": _latency_ms},
135
+ }
136
 
137
  try:
138
  validator.validate(plan)
src/inference/graph/planner_adapter.py CHANGED
@@ -298,14 +298,38 @@ class DirectEnginePlannerAdapter(PlannerAdapter):
298
  system_prompt = LLMPlannerAdapter._build_system_prompt()
299
  user_prompt = LLMPlannerAdapter._build_user_prompt(messages, context)
300
 
301
- # Hermes tool calling format
302
- tools_json = json.dumps(tool_definitions, ensure_ascii=False)
303
- prompt = (
304
- f"<tools>\n{tools_json}\n</tools>\n\n"
305
- f"[|system|]{system_prompt}[|endofturn|]\n"
306
- f"[|user|]{user_prompt}[|endofturn|]\n"
307
- "[|assistant|]"
308
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
309
 
310
  try:
311
  from vllm import SamplingParams as _SamplingParams
@@ -329,7 +353,13 @@ class DirectEnginePlannerAdapter(PlannerAdapter):
329
  if output is None or not output.outputs:
330
  raise PlanValidationError("Engine ์ถœ๋ ฅ์ด ๋น„์–ด ์žˆ์Œ")
331
 
332
- content = self._engine_manager._strip_thought_blocks(output.outputs[0].text)
 
 
 
 
 
 
333
 
334
  # 1์ฐจ: <tool_call> ํƒœ๊ทธ ํŒŒ์‹ฑ
335
  tool_calls = self._parse_hermes_tool_calls(content)
 
298
  system_prompt = LLMPlannerAdapter._build_system_prompt()
299
  user_prompt = LLMPlannerAdapter._build_user_prompt(messages, context)
300
 
301
+ # OpenAI function calling ํ˜•์‹์˜ tools (EXAONE chat template ํ˜ธํ™˜)
302
+ tools_for_template = tool_definitions
303
+
304
+ # EXAONE chat template์œผ๋กœ ํ”„๋กฌํ”„ํŠธ ์ƒ์„ฑ
305
+ chat_messages = [
306
+ {"role": "system", "content": system_prompt},
307
+ {"role": "user", "content": user_prompt},
308
+ ]
309
+
310
+ try:
311
+ # vLLM engine์—์„œ ํ† ํฌ๋‚˜์ด์ € ํš๋“
312
+ if self._engine_manager.engine is not None:
313
+ tokenizer = self._engine_manager.engine.get_tokenizer()
314
+ else:
315
+ raise RuntimeError("engine is None (SKIP_MODEL_LOAD=true?)")
316
+ prompt = tokenizer.apply_chat_template(
317
+ chat_messages,
318
+ tools=tools_for_template,
319
+ tokenize=False,
320
+ add_generation_prompt=True,
321
+ )
322
+ logger.info("[DirectEnginePlanner] EXAONE chat template์œผ๋กœ ํ”„๋กฌํ”„ํŠธ ์ƒ์„ฑ ์™„๋ฃŒ")
323
+ except Exception as exc:
324
+ logger.warning(
325
+ f"[DirectEnginePlanner] tokenizer chat template ์‹คํŒจ, ํ…์ŠคํŠธ JSON fallback: {exc}"
326
+ )
327
+ # fallback: ๊ธฐ์กด ํ…์ŠคํŠธ JSON ๋ฐฉ์‹
328
+ prompt = (
329
+ f"[|system|]{system_prompt}[|endofturn|]\n"
330
+ f"[|user|]{user_prompt}[|endofturn|]\n"
331
+ "[|assistant|]"
332
+ )
333
 
334
  try:
335
  from vllm import SamplingParams as _SamplingParams
 
353
  if output is None or not output.outputs:
354
  raise PlanValidationError("Engine ์ถœ๋ ฅ์ด ๋น„์–ด ์žˆ์Œ")
355
 
356
+ # raw output ๋กœ๊น… (๋””๋ฒ„๊น…์šฉ)
357
+ raw_text = output.outputs[0].text
358
+ logger.info(f"[DirectEnginePlanner] raw output ({len(raw_text)} chars): {raw_text[:300]}")
359
+ content = self._engine_manager._strip_thought_blocks(raw_text)
360
+ logger.info(
361
+ f"[DirectEnginePlanner] stripped content ({len(content)} chars): {content[:300]}"
362
+ )
363
 
364
  # 1์ฐจ: <tool_call> ํƒœ๊ทธ ํŒŒ์‹ฑ
365
  tool_calls = self._parse_hermes_tool_calls(content)