sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
usestrix/strix:strix/agents/state.py | import uuid
from datetime import UTC, datetime
from typing import Any
from pydantic import BaseModel, Field
def _generate_agent_id() -> str:
return f"agent_{uuid.uuid4().hex[:8]}"
class AgentState(BaseModel):
agent_id: str = Field(default_factory=_generate_agent_id)
agent_name: str = "Strix Agent"
parent_id: str | None = None
sandbox_id: str | None = None
sandbox_token: str | None = None
sandbox_info: dict[str, Any] | None = None
task: str = ""
iteration: int = 0
max_iterations: int = 300
completed: bool = False
stop_requested: bool = False
waiting_for_input: bool = False
llm_failed: bool = False
waiting_start_time: datetime | None = None
final_result: dict[str, Any] | None = None
max_iterations_warning_sent: bool = False
messages: list[dict[str, Any]] = Field(default_factory=list)
context: dict[str, Any] = Field(default_factory=dict)
start_time: str = Field(default_factory=lambda: datetime.now(UTC).isoformat())
last_updated: str = Field(default_factory=lambda: datetime.now(UTC).isoformat())
actions_taken: list[dict[str, Any]] = Field(default_factory=list)
observations: list[dict[str, Any]] = Field(default_factory=list)
errors: list[str] = Field(default_factory=list)
def increment_iteration(self) -> None:
self.iteration += 1
self.last_updated = datetime.now(UTC).isoformat()
def add_message(
self, role: str, content: Any, thinking_blocks: list[dict[str, Any]] | None = None
) -> None:
message = {"role": role, "content": content}
if thinking_blocks:
message["thinking_blocks"] = thinking_blocks
self.messages.append(message)
self.last_updated = datetime.now(UTC).isoformat()
def add_action(self, action: dict[str, Any]) -> None:
self.actions_taken.append(
{
"iteration": self.iteration,
"timestamp": datetime.now(UTC).isoformat(),
"action": action,
}
)
def add_observation(self, observation: dict[str, Any]) -> None:
self.observations.append(
{
"iteration": self.iteration,
"timestamp": datetime.now(UTC).isoformat(),
"observation": observation,
}
)
def add_error(self, error: str) -> None:
self.errors.append(f"Iteration {self.iteration}: {error}")
self.last_updated = datetime.now(UTC).isoformat()
def update_context(self, key: str, value: Any) -> None:
self.context[key] = value
self.last_updated = datetime.now(UTC).isoformat()
def set_completed(self, final_result: dict[str, Any] | None = None) -> None:
self.completed = True
self.final_result = final_result
self.last_updated = datetime.now(UTC).isoformat()
def request_stop(self) -> None:
self.stop_requested = True
self.last_updated = datetime.now(UTC).isoformat()
def should_stop(self) -> bool:
return self.stop_requested or self.completed or self.has_reached_max_iterations()
def is_waiting_for_input(self) -> bool:
return self.waiting_for_input
def enter_waiting_state(self, llm_failed: bool = False) -> None:
self.waiting_for_input = True
self.waiting_start_time = datetime.now(UTC)
self.llm_failed = llm_failed
self.last_updated = datetime.now(UTC).isoformat()
def resume_from_waiting(self, new_task: str | None = None) -> None:
self.waiting_for_input = False
self.waiting_start_time = None
self.stop_requested = False
self.completed = False
self.llm_failed = False
if new_task:
self.task = new_task
self.last_updated = datetime.now(UTC).isoformat()
def has_reached_max_iterations(self) -> bool:
return self.iteration >= self.max_iterations
def is_approaching_max_iterations(self, threshold: float = 0.85) -> bool:
return self.iteration >= int(self.max_iterations * threshold)
def has_waiting_timeout(self) -> bool:
if not self.waiting_for_input or not self.waiting_start_time:
return False
if (
self.stop_requested
or self.llm_failed
or self.completed
or self.has_reached_max_iterations()
):
return False
elapsed = (datetime.now(UTC) - self.waiting_start_time).total_seconds()
return elapsed > 600
def has_empty_last_messages(self, count: int = 3) -> bool:
if len(self.messages) < count:
return False
last_messages = self.messages[-count:]
for message in last_messages:
content = message.get("content", "")
if isinstance(content, str) and content.strip():
return False
return True
def get_conversation_history(self) -> list[dict[str, Any]]:
return self.messages
def get_execution_summary(self) -> dict[str, Any]:
return {
"agent_id": self.agent_id,
"agent_name": self.agent_name,
"parent_id": self.parent_id,
"sandbox_id": self.sandbox_id,
"sandbox_info": self.sandbox_info,
"task": self.task,
"iteration": self.iteration,
"max_iterations": self.max_iterations,
"completed": self.completed,
"final_result": self.final_result,
"start_time": self.start_time,
"last_updated": self.last_updated,
"total_actions": len(self.actions_taken),
"total_observations": len(self.observations),
"total_errors": len(self.errors),
"has_errors": len(self.errors) > 0,
"max_iterations_reached": self.has_reached_max_iterations() and not self.completed,
}
| {
"repo_id": "usestrix/strix",
"file_path": "strix/agents/state.py",
"license": "Apache License 2.0",
"lines": 135,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
usestrix/strix:strix/llm/config.py | from strix.config import Config
from strix.config.config import resolve_llm_config
from strix.llm.utils import resolve_strix_model
class LLMConfig:
def __init__(
self,
model_name: str | None = None,
enable_prompt_caching: bool = True,
skills: list[str] | None = None,
timeout: int | None = None,
scan_mode: str = "deep",
):
resolved_model, self.api_key, self.api_base = resolve_llm_config()
self.model_name = model_name or resolved_model
if not self.model_name:
raise ValueError("STRIX_LLM environment variable must be set and not empty")
api_model, canonical = resolve_strix_model(self.model_name)
self.litellm_model: str = api_model or self.model_name
self.canonical_model: str = canonical or self.model_name
self.enable_prompt_caching = enable_prompt_caching
self.skills = skills or []
self.timeout = timeout or int(Config.get("llm_timeout") or "300")
self.scan_mode = scan_mode if scan_mode in ["quick", "standard", "deep"] else "deep"
| {
"repo_id": "usestrix/strix",
"file_path": "strix/llm/config.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
usestrix/strix:strix/llm/llm.py | import asyncio
from collections.abc import AsyncIterator
from dataclasses import dataclass
from typing import Any
import litellm
from jinja2 import Environment, FileSystemLoader, select_autoescape
from litellm import acompletion, completion_cost, stream_chunk_builder, supports_reasoning
from litellm.utils import supports_prompt_caching, supports_vision
from strix.config import Config
from strix.llm.config import LLMConfig
from strix.llm.memory_compressor import MemoryCompressor
from strix.llm.utils import (
_truncate_to_first_function,
fix_incomplete_tool_call,
normalize_tool_format,
parse_tool_invocations,
)
from strix.skills import load_skills
from strix.tools import get_tools_prompt
from strix.utils.resource_paths import get_strix_resource_path
litellm.drop_params = True
litellm.modify_params = True
class LLMRequestFailedError(Exception):
def __init__(self, message: str, details: str | None = None):
super().__init__(message)
self.message = message
self.details = details
@dataclass
class LLMResponse:
content: str
tool_invocations: list[dict[str, Any]] | None = None
thinking_blocks: list[dict[str, Any]] | None = None
@dataclass
class RequestStats:
input_tokens: int = 0
output_tokens: int = 0
cached_tokens: int = 0
cost: float = 0.0
requests: int = 0
def to_dict(self) -> dict[str, int | float]:
return {
"input_tokens": self.input_tokens,
"output_tokens": self.output_tokens,
"cached_tokens": self.cached_tokens,
"cost": round(self.cost, 4),
"requests": self.requests,
}
class LLM:
def __init__(self, config: LLMConfig, agent_name: str | None = None):
self.config = config
self.agent_name = agent_name
self.agent_id: str | None = None
self._total_stats = RequestStats()
self.memory_compressor = MemoryCompressor(model_name=config.litellm_model)
self.system_prompt = self._load_system_prompt(agent_name)
reasoning = Config.get("strix_reasoning_effort")
if reasoning:
self._reasoning_effort = reasoning
elif config.scan_mode == "quick":
self._reasoning_effort = "medium"
else:
self._reasoning_effort = "high"
def _load_system_prompt(self, agent_name: str | None) -> str:
if not agent_name:
return ""
try:
prompt_dir = get_strix_resource_path("agents", agent_name)
skills_dir = get_strix_resource_path("skills")
env = Environment(
loader=FileSystemLoader([prompt_dir, skills_dir]),
autoescape=select_autoescape(enabled_extensions=(), default_for_string=False),
)
skills_to_load = [
*list(self.config.skills or []),
f"scan_modes/{self.config.scan_mode}",
]
skill_content = load_skills(skills_to_load)
env.globals["get_skill"] = lambda name: skill_content.get(name, "")
result = env.get_template("system_prompt.jinja").render(
get_tools_prompt=get_tools_prompt,
loaded_skill_names=list(skill_content.keys()),
**skill_content,
)
return str(result)
except Exception: # noqa: BLE001
return ""
def set_agent_identity(self, agent_name: str | None, agent_id: str | None) -> None:
if agent_name:
self.agent_name = agent_name
if agent_id:
self.agent_id = agent_id
async def generate(
self, conversation_history: list[dict[str, Any]]
) -> AsyncIterator[LLMResponse]:
messages = self._prepare_messages(conversation_history)
max_retries = int(Config.get("strix_llm_max_retries") or "5")
for attempt in range(max_retries + 1):
try:
async for response in self._stream(messages):
yield response
return # noqa: TRY300
except Exception as e: # noqa: BLE001
if attempt >= max_retries or not self._should_retry(e):
self._raise_error(e)
wait = min(10, 2 * (2**attempt))
await asyncio.sleep(wait)
async def _stream(self, messages: list[dict[str, Any]]) -> AsyncIterator[LLMResponse]:
accumulated = ""
chunks: list[Any] = []
done_streaming = 0
self._total_stats.requests += 1
response = await acompletion(**self._build_completion_args(messages), stream=True)
async for chunk in response:
chunks.append(chunk)
if done_streaming:
done_streaming += 1
if getattr(chunk, "usage", None) or done_streaming > 5:
break
continue
delta = self._get_chunk_content(chunk)
if delta:
accumulated += delta
if "</function>" in accumulated or "</invoke>" in accumulated:
end_tag = "</function>" if "</function>" in accumulated else "</invoke>"
pos = accumulated.find(end_tag)
accumulated = accumulated[: pos + len(end_tag)]
yield LLMResponse(content=accumulated)
done_streaming = 1
continue
yield LLMResponse(content=accumulated)
if chunks:
self._update_usage_stats(stream_chunk_builder(chunks))
accumulated = normalize_tool_format(accumulated)
accumulated = fix_incomplete_tool_call(_truncate_to_first_function(accumulated))
yield LLMResponse(
content=accumulated,
tool_invocations=parse_tool_invocations(accumulated),
thinking_blocks=self._extract_thinking(chunks),
)
def _prepare_messages(self, conversation_history: list[dict[str, Any]]) -> list[dict[str, Any]]:
messages = [{"role": "system", "content": self.system_prompt}]
if self.agent_name:
messages.append(
{
"role": "user",
"content": (
f"\n\n<agent_identity>\n"
f"<meta>Internal metadata: do not echo or reference.</meta>\n"
f"<agent_name>{self.agent_name}</agent_name>\n"
f"<agent_id>{self.agent_id}</agent_id>\n"
f"</agent_identity>\n\n"
),
}
)
compressed = list(self.memory_compressor.compress_history(conversation_history))
conversation_history.clear()
conversation_history.extend(compressed)
messages.extend(compressed)
if messages[-1].get("role") == "assistant":
messages.append({"role": "user", "content": "<meta>Continue the task.</meta>"})
if self._is_anthropic() and self.config.enable_prompt_caching:
messages = self._add_cache_control(messages)
return messages
def _build_completion_args(self, messages: list[dict[str, Any]]) -> dict[str, Any]:
if not self._supports_vision():
messages = self._strip_images(messages)
args: dict[str, Any] = {
"model": self.config.litellm_model,
"messages": messages,
"timeout": self.config.timeout,
"stream_options": {"include_usage": True},
}
if self.config.api_key:
args["api_key"] = self.config.api_key
if self.config.api_base:
args["api_base"] = self.config.api_base
if self._supports_reasoning():
args["reasoning_effort"] = self._reasoning_effort
return args
def _get_chunk_content(self, chunk: Any) -> str:
if chunk.choices and hasattr(chunk.choices[0], "delta"):
return getattr(chunk.choices[0].delta, "content", "") or ""
return ""
def _extract_thinking(self, chunks: list[Any]) -> list[dict[str, Any]] | None:
if not chunks or not self._supports_reasoning():
return None
try:
resp = stream_chunk_builder(chunks)
if resp.choices and hasattr(resp.choices[0].message, "thinking_blocks"):
blocks: list[dict[str, Any]] = resp.choices[0].message.thinking_blocks
return blocks
except Exception: # noqa: BLE001, S110 # nosec B110
pass
return None
def _update_usage_stats(self, response: Any) -> None:
try:
if hasattr(response, "usage") and response.usage:
input_tokens = getattr(response.usage, "prompt_tokens", 0) or 0
output_tokens = getattr(response.usage, "completion_tokens", 0) or 0
cached_tokens = 0
if hasattr(response.usage, "prompt_tokens_details"):
prompt_details = response.usage.prompt_tokens_details
if hasattr(prompt_details, "cached_tokens"):
cached_tokens = prompt_details.cached_tokens or 0
cost = self._extract_cost(response)
else:
input_tokens = 0
output_tokens = 0
cached_tokens = 0
cost = 0.0
self._total_stats.input_tokens += input_tokens
self._total_stats.output_tokens += output_tokens
self._total_stats.cached_tokens += cached_tokens
self._total_stats.cost += cost
except Exception: # noqa: BLE001, S110 # nosec B110
pass
def _extract_cost(self, response: Any) -> float:
if hasattr(response, "usage") and response.usage:
direct_cost = getattr(response.usage, "cost", None)
if direct_cost is not None:
return float(direct_cost)
try:
if hasattr(response, "_hidden_params"):
response._hidden_params.pop("custom_llm_provider", None)
return completion_cost(response, model=self.config.canonical_model) or 0.0
except Exception: # noqa: BLE001
return 0.0
def _should_retry(self, e: Exception) -> bool:
code = getattr(e, "status_code", None) or getattr(
getattr(e, "response", None), "status_code", None
)
return code is None or litellm._should_retry(code)
def _raise_error(self, e: Exception) -> None:
from strix.telemetry import posthog
posthog.error("llm_error", type(e).__name__)
raise LLMRequestFailedError(f"LLM request failed: {type(e).__name__}", str(e)) from e
def _is_anthropic(self) -> bool:
if not self.config.model_name:
return False
return any(p in self.config.model_name.lower() for p in ["anthropic/", "claude"])
def _supports_vision(self) -> bool:
try:
return bool(supports_vision(model=self.config.canonical_model))
except Exception: # noqa: BLE001
return False
def _supports_reasoning(self) -> bool:
try:
return bool(supports_reasoning(model=self.config.canonical_model))
except Exception: # noqa: BLE001
return False
def _strip_images(self, messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
result = []
for msg in messages:
content = msg.get("content")
if isinstance(content, list):
text_parts = []
for item in content:
if isinstance(item, dict) and item.get("type") == "text":
text_parts.append(item.get("text", ""))
elif isinstance(item, dict) and item.get("type") == "image_url":
text_parts.append("[Image removed - model doesn't support vision]")
result.append({**msg, "content": "\n".join(text_parts)})
else:
result.append(msg)
return result
def _add_cache_control(self, messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
if not messages or not supports_prompt_caching(self.config.canonical_model):
return messages
result = list(messages)
if result[0].get("role") == "system":
content = result[0]["content"]
result[0] = {
**result[0],
"content": [
{"type": "text", "text": content, "cache_control": {"type": "ephemeral"}}
]
if isinstance(content, str)
else content,
}
return result
| {
"repo_id": "usestrix/strix",
"file_path": "strix/llm/llm.py",
"license": "Apache License 2.0",
"lines": 280,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
usestrix/strix:strix/llm/memory_compressor.py | import logging
from typing import Any
import litellm
from strix.config.config import Config, resolve_llm_config
logger = logging.getLogger(__name__)
MAX_TOTAL_TOKENS = 100_000
MIN_RECENT_MESSAGES = 15
SUMMARY_PROMPT_TEMPLATE = """You are an agent performing context
condensation for a security agent. Your job is to compress scan data while preserving
ALL operationally critical information for continuing the security assessment.
CRITICAL ELEMENTS TO PRESERVE:
- Discovered vulnerabilities and potential attack vectors
- Scan results and tool outputs (compressed but maintaining key findings)
- Access credentials, tokens, or authentication details found
- System architecture insights and potential weak points
- Progress made in the assessment
- Failed attempts and dead ends (to avoid duplication)
- Any decisions made about the testing approach
COMPRESSION GUIDELINES:
- Preserve exact technical details (URLs, paths, parameters, payloads)
- Summarize verbose tool outputs while keeping critical findings
- Maintain version numbers, specific technologies identified
- Keep exact error messages that might indicate vulnerabilities
- Compress repetitive or similar findings into consolidated form
Remember: Another security agent will use this summary to continue the assessment.
They must be able to pick up exactly where you left off without losing any
operational advantage or context needed to find vulnerabilities.
CONVERSATION SEGMENT TO SUMMARIZE:
{conversation}
Provide a technically precise summary that preserves all operational security context while
keeping the summary concise and to the point."""
def _count_tokens(text: str, model: str) -> int:
try:
count = litellm.token_counter(model=model, text=text)
return int(count)
except Exception:
logger.exception("Failed to count tokens")
return len(text) // 4 # Rough estimate
def _get_message_tokens(msg: dict[str, Any], model: str) -> int:
content = msg.get("content", "")
if isinstance(content, str):
return _count_tokens(content, model)
if isinstance(content, list):
return sum(
_count_tokens(item.get("text", ""), model)
for item in content
if isinstance(item, dict) and item.get("type") == "text"
)
return 0
def _extract_message_text(msg: dict[str, Any]) -> str:
content = msg.get("content", "")
if isinstance(content, str):
return content
if isinstance(content, list):
parts = []
for item in content:
if isinstance(item, dict):
if item.get("type") == "text":
parts.append(item.get("text", ""))
elif item.get("type") == "image_url":
parts.append("[IMAGE]")
return " ".join(parts)
return str(content)
def _summarize_messages(
messages: list[dict[str, Any]],
model: str,
timeout: int = 30,
) -> dict[str, Any]:
if not messages:
empty_summary = "<context_summary message_count='0'>{text}</context_summary>"
return {
"role": "user",
"content": empty_summary.format(text="No messages to summarize"),
}
formatted = []
for msg in messages:
role = msg.get("role", "unknown")
text = _extract_message_text(msg)
formatted.append(f"{role}: {text}")
conversation = "\n".join(formatted)
prompt = SUMMARY_PROMPT_TEMPLATE.format(conversation=conversation)
_, api_key, api_base = resolve_llm_config()
try:
completion_args: dict[str, Any] = {
"model": model,
"messages": [{"role": "user", "content": prompt}],
"timeout": timeout,
}
if api_key:
completion_args["api_key"] = api_key
if api_base:
completion_args["api_base"] = api_base
response = litellm.completion(**completion_args)
summary = response.choices[0].message.content or ""
if not summary.strip():
return messages[0]
summary_msg = "<context_summary message_count='{count}'>{text}</context_summary>"
return {
"role": "user",
"content": summary_msg.format(count=len(messages), text=summary),
}
except Exception:
logger.exception("Failed to summarize messages")
return messages[0]
def _handle_images(messages: list[dict[str, Any]], max_images: int) -> None:
image_count = 0
for msg in reversed(messages):
content = msg.get("content", [])
if isinstance(content, list):
for item in content:
if isinstance(item, dict) and item.get("type") == "image_url":
if image_count >= max_images:
item.update(
{
"type": "text",
"text": "[Previously attached image removed to preserve context]",
}
)
else:
image_count += 1
class MemoryCompressor:
def __init__(
self,
max_images: int = 3,
model_name: str | None = None,
timeout: int | None = None,
):
self.max_images = max_images
self.model_name = model_name or Config.get("strix_llm")
self.timeout = timeout or int(Config.get("strix_memory_compressor_timeout") or "120")
if not self.model_name:
raise ValueError("STRIX_LLM environment variable must be set and not empty")
def compress_history(
self,
messages: list[dict[str, Any]],
) -> list[dict[str, Any]]:
"""Compress conversation history to stay within token limits.
Strategy:
1. Handle image limits first
2. Keep all system messages
3. Keep minimum recent messages
4. Summarize older messages when total tokens exceed limit
The compression preserves:
- All system messages unchanged
- Most recent messages intact
- Critical security context in summaries
- Recent images for visual context
- Technical details and findings
"""
if not messages:
return messages
_handle_images(messages, self.max_images)
system_msgs = []
regular_msgs = []
for msg in messages:
if msg.get("role") == "system":
system_msgs.append(msg)
else:
regular_msgs.append(msg)
recent_msgs = regular_msgs[-MIN_RECENT_MESSAGES:]
old_msgs = regular_msgs[:-MIN_RECENT_MESSAGES]
# Type assertion since we ensure model_name is not None in __init__
model_name: str = self.model_name # type: ignore[assignment]
total_tokens = sum(
_get_message_tokens(msg, model_name) for msg in system_msgs + regular_msgs
)
if total_tokens <= MAX_TOTAL_TOKENS * 0.9:
return messages
compressed = []
chunk_size = 10
for i in range(0, len(old_msgs), chunk_size):
chunk = old_msgs[i : i + chunk_size]
summary = _summarize_messages(chunk, model_name, self.timeout)
if summary:
compressed.append(summary)
return system_msgs + compressed + recent_msgs
| {
"repo_id": "usestrix/strix",
"file_path": "strix/llm/memory_compressor.py",
"license": "Apache License 2.0",
"lines": 176,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
usestrix/strix:strix/llm/utils.py | import html
import re
from typing import Any
_INVOKE_OPEN = re.compile(r'<invoke\s+name=["\']([^"\']+)["\']>')
_PARAM_NAME_ATTR = re.compile(r'<parameter\s+name=["\']([^"\']+)["\']>')
_FUNCTION_CALLS_TAG = re.compile(r"</?function_calls>")
_STRIP_TAG_QUOTES = re.compile(r"<(function|parameter)\s*=\s*([^>]*?)>")
def normalize_tool_format(content: str) -> str:
"""Convert alternative tool-call XML formats to the expected one.
Handles:
<function_calls>...</function_calls> β stripped
<invoke name="X"> β <function=X>
<parameter name="X"> β <parameter=X>
</invoke> β </function>
<function="X"> β <function=X>
<parameter="X"> β <parameter=X>
"""
if "<invoke" in content or "<function_calls" in content:
content = _FUNCTION_CALLS_TAG.sub("", content)
content = _INVOKE_OPEN.sub(r"<function=\1>", content)
content = _PARAM_NAME_ATTR.sub(r"<parameter=\1>", content)
content = content.replace("</invoke>", "</function>")
return _STRIP_TAG_QUOTES.sub(
lambda m: f"<{m.group(1)}={m.group(2).strip().strip(chr(34) + chr(39))}>", content
)
STRIX_MODEL_MAP: dict[str, str] = {
"claude-sonnet-4.6": "anthropic/claude-sonnet-4-6",
"claude-opus-4.6": "anthropic/claude-opus-4-6",
"gpt-5.2": "openai/gpt-5.2",
"gpt-5.1": "openai/gpt-5.1",
"gpt-5": "openai/gpt-5",
"gemini-3-pro-preview": "gemini/gemini-3-pro-preview",
"gemini-3-flash-preview": "gemini/gemini-3-flash-preview",
"glm-5": "openrouter/z-ai/glm-5",
"glm-4.7": "openrouter/z-ai/glm-4.7",
}
def resolve_strix_model(model_name: str | None) -> tuple[str | None, str | None]:
"""Resolve a strix/ model into names for API calls and capability lookups.
Returns (api_model, canonical_model):
- api_model: openai/<base> for API calls (Strix API is OpenAI-compatible)
- canonical_model: actual provider model name for litellm capability lookups
Non-strix models return the same name for both.
"""
if not model_name or not model_name.startswith("strix/"):
return model_name, model_name
base_model = model_name[6:]
api_model = f"openai/{base_model}"
canonical_model = STRIX_MODEL_MAP.get(base_model, api_model)
return api_model, canonical_model
def _truncate_to_first_function(content: str) -> str:
if not content:
return content
function_starts = [
match.start() for match in re.finditer(r"<function=|<invoke\s+name=", content)
]
if len(function_starts) >= 2:
second_function_start = function_starts[1]
return content[:second_function_start].rstrip()
return content
def parse_tool_invocations(content: str) -> list[dict[str, Any]] | None:
content = normalize_tool_format(content)
content = fix_incomplete_tool_call(content)
tool_invocations: list[dict[str, Any]] = []
fn_regex_pattern = r"<function=([^>]+)>\n?(.*?)</function>"
fn_param_regex_pattern = r"<parameter=([^>]+)>(.*?)</parameter>"
fn_matches = re.finditer(fn_regex_pattern, content, re.DOTALL)
for fn_match in fn_matches:
fn_name = fn_match.group(1)
fn_body = fn_match.group(2)
param_matches = re.finditer(fn_param_regex_pattern, fn_body, re.DOTALL)
args = {}
for param_match in param_matches:
param_name = param_match.group(1)
param_value = param_match.group(2).strip()
param_value = html.unescape(param_value)
args[param_name] = param_value
tool_invocations.append({"toolName": fn_name, "args": args})
return tool_invocations if tool_invocations else None
def fix_incomplete_tool_call(content: str) -> str:
"""Fix incomplete tool calls by adding missing closing tag.
Handles both ``<function=β¦>`` and ``<invoke name="β¦">`` formats.
"""
has_open = "<function=" in content or "<invoke " in content
count_open = content.count("<function=") + content.count("<invoke ")
has_close = "</function>" in content or "</invoke>" in content
if has_open and count_open == 1 and not has_close:
content = content.rstrip()
content = content + "function>" if content.endswith("</") else content + "\n</function>"
return content
def format_tool_call(tool_name: str, args: dict[str, Any]) -> str:
xml_parts = [f"<function={tool_name}>"]
for key, value in args.items():
xml_parts.append(f"<parameter={key}>{value}</parameter>")
xml_parts.append("</function>")
return "\n".join(xml_parts)
def clean_content(content: str) -> str:
if not content:
return ""
content = normalize_tool_format(content)
content = fix_incomplete_tool_call(content)
tool_pattern = r"<function=[^>]+>.*?</function>"
cleaned = re.sub(tool_pattern, "", content, flags=re.DOTALL)
incomplete_tool_pattern = r"<function=[^>]+>.*$"
cleaned = re.sub(incomplete_tool_pattern, "", cleaned, flags=re.DOTALL)
partial_tag_pattern = r"<f(?:u(?:n(?:c(?:t(?:i(?:o(?:n(?:=(?:[^>]*)?)?)?)?)?)?)?)?)?$"
cleaned = re.sub(partial_tag_pattern, "", cleaned)
hidden_xml_patterns = [
r"<inter_agent_message>.*?</inter_agent_message>",
r"<agent_completion_report>.*?</agent_completion_report>",
]
for pattern in hidden_xml_patterns:
cleaned = re.sub(pattern, "", cleaned, flags=re.DOTALL | re.IGNORECASE)
cleaned = re.sub(r"\n\s*\n", "\n\n", cleaned)
return cleaned.strip()
| {
"repo_id": "usestrix/strix",
"file_path": "strix/llm/utils.py",
"license": "Apache License 2.0",
"lines": 114,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
usestrix/strix:strix/runtime/docker_runtime.py | import contextlib
import os
import secrets
import socket
import time
from pathlib import Path
from typing import cast
import docker
import httpx
from docker.errors import DockerException, ImageNotFound, NotFound
from docker.models.containers import Container
from requests.exceptions import ConnectionError as RequestsConnectionError
from requests.exceptions import Timeout as RequestsTimeout
from strix.config import Config
from . import SandboxInitializationError
from .runtime import AbstractRuntime, SandboxInfo
HOST_GATEWAY_HOSTNAME = "host.docker.internal"
DOCKER_TIMEOUT = 60
CONTAINER_TOOL_SERVER_PORT = 48081
CONTAINER_CAIDO_PORT = 48080
class DockerRuntime(AbstractRuntime):
def __init__(self) -> None:
try:
self.client = docker.from_env(timeout=DOCKER_TIMEOUT)
except (DockerException, RequestsConnectionError, RequestsTimeout) as e:
raise SandboxInitializationError(
"Docker is not available",
"Please ensure Docker Desktop is installed and running.",
) from e
self._scan_container: Container | None = None
self._tool_server_port: int | None = None
self._tool_server_token: str | None = None
self._caido_port: int | None = None
def _find_available_port(self) -> int:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("", 0))
return cast("int", s.getsockname()[1])
def _get_scan_id(self, agent_id: str) -> str:
try:
from strix.telemetry.tracer import get_global_tracer
tracer = get_global_tracer()
if tracer and tracer.scan_config:
return str(tracer.scan_config.get("scan_id", "default-scan"))
except (ImportError, AttributeError):
pass
return f"scan-{agent_id.split('-')[0]}"
def _verify_image_available(self, image_name: str, max_retries: int = 3) -> None:
for attempt in range(max_retries):
try:
image = self.client.images.get(image_name)
if not image.id or not image.attrs:
raise ImageNotFound(f"Image {image_name} metadata incomplete") # noqa: TRY301
except (ImageNotFound, DockerException):
if attempt == max_retries - 1:
raise
time.sleep(2**attempt)
else:
return
def _recover_container_state(self, container: Container) -> None:
for env_var in container.attrs["Config"]["Env"]:
if env_var.startswith("TOOL_SERVER_TOKEN="):
self._tool_server_token = env_var.split("=", 1)[1]
break
port_bindings = container.attrs.get("NetworkSettings", {}).get("Ports", {})
port_key = f"{CONTAINER_TOOL_SERVER_PORT}/tcp"
if port_bindings.get(port_key):
self._tool_server_port = int(port_bindings[port_key][0]["HostPort"])
caido_port_key = f"{CONTAINER_CAIDO_PORT}/tcp"
if port_bindings.get(caido_port_key):
self._caido_port = int(port_bindings[caido_port_key][0]["HostPort"])
def _wait_for_tool_server(self, max_retries: int = 30, timeout: int = 5) -> None:
host = self._resolve_docker_host()
health_url = f"http://{host}:{self._tool_server_port}/health"
time.sleep(5)
for attempt in range(max_retries):
try:
with httpx.Client(trust_env=False, timeout=timeout) as client:
response = client.get(health_url)
if response.status_code == 200:
data = response.json()
if data.get("status") == "healthy":
return
except (httpx.ConnectError, httpx.TimeoutException, httpx.RequestError):
pass
time.sleep(min(2**attempt * 0.5, 5))
raise SandboxInitializationError(
"Tool server failed to start",
"Container initialization timed out. Please try again.",
)
def _create_container(self, scan_id: str, max_retries: int = 2) -> Container:
container_name = f"strix-scan-{scan_id}"
image_name = Config.get("strix_image")
if not image_name:
raise ValueError("STRIX_IMAGE must be configured")
self._verify_image_available(image_name)
last_error: Exception | None = None
for attempt in range(max_retries + 1):
try:
with contextlib.suppress(NotFound):
existing = self.client.containers.get(container_name)
with contextlib.suppress(Exception):
existing.stop(timeout=5)
existing.remove(force=True)
time.sleep(1)
self._tool_server_port = self._find_available_port()
self._caido_port = self._find_available_port()
self._tool_server_token = secrets.token_urlsafe(32)
execution_timeout = Config.get("strix_sandbox_execution_timeout") or "120"
container = self.client.containers.run(
image_name,
command="sleep infinity",
detach=True,
name=container_name,
hostname=container_name,
ports={
f"{CONTAINER_TOOL_SERVER_PORT}/tcp": self._tool_server_port,
f"{CONTAINER_CAIDO_PORT}/tcp": self._caido_port,
},
cap_add=["NET_ADMIN", "NET_RAW"],
labels={"strix-scan-id": scan_id},
environment={
"PYTHONUNBUFFERED": "1",
"TOOL_SERVER_PORT": str(CONTAINER_TOOL_SERVER_PORT),
"TOOL_SERVER_TOKEN": self._tool_server_token,
"STRIX_SANDBOX_EXECUTION_TIMEOUT": str(execution_timeout),
"HOST_GATEWAY": HOST_GATEWAY_HOSTNAME,
},
extra_hosts={HOST_GATEWAY_HOSTNAME: "host-gateway"},
tty=True,
)
self._scan_container = container
self._wait_for_tool_server()
except (DockerException, RequestsConnectionError, RequestsTimeout) as e:
last_error = e
if attempt < max_retries:
self._tool_server_port = None
self._tool_server_token = None
self._caido_port = None
time.sleep(2**attempt)
else:
return container
raise SandboxInitializationError(
"Failed to create container",
f"Container creation failed after {max_retries + 1} attempts: {last_error}",
) from last_error
def _get_or_create_container(self, scan_id: str) -> Container:
container_name = f"strix-scan-{scan_id}"
if self._scan_container:
try:
self._scan_container.reload()
if self._scan_container.status == "running":
return self._scan_container
except NotFound:
self._scan_container = None
self._tool_server_port = None
self._tool_server_token = None
self._caido_port = None
try:
container = self.client.containers.get(container_name)
container.reload()
if container.status != "running":
container.start()
time.sleep(2)
self._scan_container = container
self._recover_container_state(container)
except NotFound:
pass
else:
return container
try:
containers = self.client.containers.list(
all=True, filters={"label": f"strix-scan-id={scan_id}"}
)
if containers:
container = containers[0]
if container.status != "running":
container.start()
time.sleep(2)
self._scan_container = container
self._recover_container_state(container)
return container
except DockerException:
pass
return self._create_container(scan_id)
def _copy_local_directory_to_container(
self, container: Container, local_path: str, target_name: str | None = None
) -> None:
import tarfile
from io import BytesIO
try:
local_path_obj = Path(local_path).resolve()
if not local_path_obj.exists() or not local_path_obj.is_dir():
return
tar_buffer = BytesIO()
with tarfile.open(fileobj=tar_buffer, mode="w") as tar:
for item in local_path_obj.rglob("*"):
if item.is_file():
rel_path = item.relative_to(local_path_obj)
arcname = Path(target_name) / rel_path if target_name else rel_path
tar.add(item, arcname=arcname)
tar_buffer.seek(0)
container.put_archive("/workspace", tar_buffer.getvalue())
container.exec_run(
"chown -R pentester:pentester /workspace && chmod -R 755 /workspace",
user="root",
)
except (OSError, DockerException):
pass
async def create_sandbox(
self,
agent_id: str,
existing_token: str | None = None,
local_sources: list[dict[str, str]] | None = None,
) -> SandboxInfo:
scan_id = self._get_scan_id(agent_id)
container = self._get_or_create_container(scan_id)
source_copied_key = f"_source_copied_{scan_id}"
if local_sources and not hasattr(self, source_copied_key):
for index, source in enumerate(local_sources, start=1):
source_path = source.get("source_path")
if not source_path:
continue
target_name = (
source.get("workspace_subdir") or Path(source_path).name or f"target_{index}"
)
self._copy_local_directory_to_container(container, source_path, target_name)
setattr(self, source_copied_key, True)
if container.id is None:
raise RuntimeError("Docker container ID is unexpectedly None")
token = existing_token or self._tool_server_token
if self._tool_server_port is None or self._caido_port is None or token is None:
raise RuntimeError("Tool server not initialized")
host = self._resolve_docker_host()
api_url = f"http://{host}:{self._tool_server_port}"
await self._register_agent(api_url, agent_id, token)
return {
"workspace_id": container.id,
"api_url": api_url,
"auth_token": token,
"tool_server_port": self._tool_server_port,
"caido_port": self._caido_port,
"agent_id": agent_id,
}
async def _register_agent(self, api_url: str, agent_id: str, token: str) -> None:
try:
async with httpx.AsyncClient(trust_env=False) as client:
response = await client.post(
f"{api_url}/register_agent",
params={"agent_id": agent_id},
headers={"Authorization": f"Bearer {token}"},
timeout=30,
)
response.raise_for_status()
except httpx.RequestError:
pass
async def get_sandbox_url(self, container_id: str, port: int) -> str:
try:
self.client.containers.get(container_id)
return f"http://{self._resolve_docker_host()}:{port}"
except NotFound:
raise ValueError(f"Container {container_id} not found.") from None
def _resolve_docker_host(self) -> str:
docker_host = os.getenv("DOCKER_HOST", "")
if docker_host:
from urllib.parse import urlparse
parsed = urlparse(docker_host)
if parsed.scheme in ("tcp", "http", "https") and parsed.hostname:
return parsed.hostname
return "127.0.0.1"
async def destroy_sandbox(self, container_id: str) -> None:
try:
container = self.client.containers.get(container_id)
container.stop()
container.remove()
self._scan_container = None
self._tool_server_port = None
self._tool_server_token = None
self._caido_port = None
except (NotFound, DockerException):
pass
def cleanup(self) -> None:
if self._scan_container is not None:
container_name = self._scan_container.name
self._scan_container = None
self._tool_server_port = None
self._tool_server_token = None
self._caido_port = None
if container_name is None:
return
import subprocess
subprocess.Popen( # noqa: S603
["docker", "rm", "-f", container_name], # noqa: S607
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
start_new_session=True,
)
| {
"repo_id": "usestrix/strix",
"file_path": "strix/runtime/docker_runtime.py",
"license": "Apache License 2.0",
"lines": 296,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
usestrix/strix:strix/runtime/runtime.py | from abc import ABC, abstractmethod
from typing import TypedDict
class SandboxInfo(TypedDict):
workspace_id: str
api_url: str
auth_token: str | None
tool_server_port: int
caido_port: int
agent_id: str
class AbstractRuntime(ABC):
@abstractmethod
async def create_sandbox(
self,
agent_id: str,
existing_token: str | None = None,
local_sources: list[dict[str, str]] | None = None,
) -> SandboxInfo:
raise NotImplementedError
@abstractmethod
async def get_sandbox_url(self, container_id: str, port: int) -> str:
raise NotImplementedError
@abstractmethod
async def destroy_sandbox(self, container_id: str) -> None:
raise NotImplementedError
def cleanup(self) -> None:
raise NotImplementedError
| {
"repo_id": "usestrix/strix",
"file_path": "strix/runtime/runtime.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
usestrix/strix:strix/runtime/tool_server.py | from __future__ import annotations
import argparse
import asyncio
import os
import signal
import sys
from typing import Any
import uvicorn
from fastapi import Depends, FastAPI, HTTPException, status
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
from pydantic import BaseModel, ValidationError
SANDBOX_MODE = os.getenv("STRIX_SANDBOX_MODE", "false").lower() == "true"
if not SANDBOX_MODE:
raise RuntimeError("Tool server should only run in sandbox mode (STRIX_SANDBOX_MODE=true)")
parser = argparse.ArgumentParser(description="Start Strix tool server")
parser.add_argument("--token", required=True, help="Authentication token")
parser.add_argument("--host", default="0.0.0.0", help="Host to bind to") # nosec
parser.add_argument("--port", type=int, required=True, help="Port to bind to")
parser.add_argument(
"--timeout",
type=int,
default=120,
help="Hard timeout in seconds for each request execution (default: 120)",
)
args = parser.parse_args()
EXPECTED_TOKEN = args.token
REQUEST_TIMEOUT = args.timeout
app = FastAPI()
security = HTTPBearer()
security_dependency = Depends(security)
agent_tasks: dict[str, asyncio.Task[Any]] = {}
def verify_token(credentials: HTTPAuthorizationCredentials) -> str:
if not credentials or credentials.scheme != "Bearer":
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid authentication scheme. Bearer token required.",
headers={"WWW-Authenticate": "Bearer"},
)
if credentials.credentials != EXPECTED_TOKEN:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid authentication token",
headers={"WWW-Authenticate": "Bearer"},
)
return credentials.credentials
class ToolExecutionRequest(BaseModel):
agent_id: str
tool_name: str
kwargs: dict[str, Any]
class ToolExecutionResponse(BaseModel):
result: Any | None = None
error: str | None = None
async def _run_tool(agent_id: str, tool_name: str, kwargs: dict[str, Any]) -> Any:
from strix.tools.argument_parser import convert_arguments
from strix.tools.context import set_current_agent_id
from strix.tools.registry import get_tool_by_name
set_current_agent_id(agent_id)
tool_func = get_tool_by_name(tool_name)
if not tool_func:
raise ValueError(f"Tool '{tool_name}' not found")
converted_kwargs = convert_arguments(tool_func, kwargs)
return await asyncio.to_thread(tool_func, **converted_kwargs)
@app.post("/execute", response_model=ToolExecutionResponse)
async def execute_tool(
request: ToolExecutionRequest, credentials: HTTPAuthorizationCredentials = security_dependency
) -> ToolExecutionResponse:
verify_token(credentials)
agent_id = request.agent_id
if agent_id in agent_tasks:
old_task = agent_tasks[agent_id]
if not old_task.done():
old_task.cancel()
task = asyncio.create_task(
asyncio.wait_for(
_run_tool(agent_id, request.tool_name, request.kwargs), timeout=REQUEST_TIMEOUT
)
)
agent_tasks[agent_id] = task
try:
result = await task
return ToolExecutionResponse(result=result)
except asyncio.CancelledError:
return ToolExecutionResponse(error="Cancelled by newer request")
except TimeoutError:
return ToolExecutionResponse(error=f"Tool timed out after {REQUEST_TIMEOUT}s")
except ValidationError as e:
return ToolExecutionResponse(error=f"Invalid arguments: {e}")
except (ValueError, RuntimeError, ImportError) as e:
return ToolExecutionResponse(error=f"Tool execution error: {e}")
except Exception as e: # noqa: BLE001
return ToolExecutionResponse(error=f"Unexpected error: {e}")
finally:
if agent_tasks.get(agent_id) is task:
del agent_tasks[agent_id]
@app.post("/register_agent")
async def register_agent(
agent_id: str, credentials: HTTPAuthorizationCredentials = security_dependency
) -> dict[str, str]:
verify_token(credentials)
return {"status": "registered", "agent_id": agent_id}
@app.get("/health")
async def health_check() -> dict[str, Any]:
return {
"status": "healthy",
"sandbox_mode": str(SANDBOX_MODE),
"environment": "sandbox" if SANDBOX_MODE else "main",
"auth_configured": "true" if EXPECTED_TOKEN else "false",
"active_agents": len(agent_tasks),
"agents": list(agent_tasks.keys()),
}
def signal_handler(_signum: int, _frame: Any) -> None:
if hasattr(signal, "SIGPIPE"):
signal.signal(signal.SIGPIPE, signal.SIG_IGN)
for task in agent_tasks.values():
task.cancel()
sys.exit(0)
if hasattr(signal, "SIGPIPE"):
signal.signal(signal.SIGPIPE, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
if __name__ == "__main__":
uvicorn.run(app, host=args.host, port=args.port, log_level="info")
| {
"repo_id": "usestrix/strix",
"file_path": "strix/runtime/tool_server.py",
"license": "Apache License 2.0",
"lines": 122,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
usestrix/strix:strix/tools/agents_graph/agents_graph_actions.py | import threading
from datetime import UTC, datetime
from typing import Any, Literal
from strix.tools.registry import register_tool
_agent_graph: dict[str, Any] = {
"nodes": {},
"edges": [],
}
_root_agent_id: str | None = None
_agent_messages: dict[str, list[dict[str, Any]]] = {}
_running_agents: dict[str, threading.Thread] = {}
_agent_instances: dict[str, Any] = {}
_agent_states: dict[str, Any] = {}
def _run_agent_in_thread(
agent: Any, state: Any, inherited_messages: list[dict[str, Any]]
) -> dict[str, Any]:
try:
if inherited_messages:
state.add_message("user", "<inherited_context_from_parent>")
for msg in inherited_messages:
state.add_message(msg["role"], msg["content"])
state.add_message("user", "</inherited_context_from_parent>")
parent_info = _agent_graph["nodes"].get(state.parent_id, {})
parent_name = parent_info.get("name", "Unknown Parent")
context_status = (
"inherited conversation context from your parent for background understanding"
if inherited_messages
else "started with a fresh context"
)
task_xml = f"""<agent_delegation>
<identity>
β οΈ You are NOT your parent agent. You are a NEW, SEPARATE sub-agent (not root).
Your Info: {state.agent_name} ({state.agent_id})
Parent Info: {parent_name} ({state.parent_id})
</identity>
<your_task>{state.task}</your_task>
<instructions>
- You have {context_status}
- Inherited context is for BACKGROUND ONLY - don't continue parent's work
- Maintain strict self-identity: never speak as or for your parent
- Do not merge your conversation with the parent's;
- Do not claim parent's actions or messages as your own
- Focus EXCLUSIVELY on your delegated task above
- Work independently with your own approach
- Use agent_finish when complete to report back to parent
- You are a SPECIALIST for this specific task
- You share the same container as other agents but have your own tool server instance
- All agents share /workspace directory and proxy history for better collaboration
- You can see files created by other agents and proxy traffic from previous work
- Build upon previous work but focus on your specific delegated task
</instructions>
</agent_delegation>"""
state.add_message("user", task_xml)
_agent_states[state.agent_id] = state
_agent_graph["nodes"][state.agent_id]["state"] = state.model_dump()
import asyncio
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
result = loop.run_until_complete(agent.agent_loop(state.task))
finally:
loop.close()
except Exception as e:
_agent_graph["nodes"][state.agent_id]["status"] = "error"
_agent_graph["nodes"][state.agent_id]["finished_at"] = datetime.now(UTC).isoformat()
_agent_graph["nodes"][state.agent_id]["result"] = {"error": str(e)}
_running_agents.pop(state.agent_id, None)
_agent_instances.pop(state.agent_id, None)
raise
else:
if state.stop_requested:
_agent_graph["nodes"][state.agent_id]["status"] = "stopped"
else:
_agent_graph["nodes"][state.agent_id]["status"] = "completed"
_agent_graph["nodes"][state.agent_id]["finished_at"] = datetime.now(UTC).isoformat()
_agent_graph["nodes"][state.agent_id]["result"] = result
_running_agents.pop(state.agent_id, None)
_agent_instances.pop(state.agent_id, None)
return {"result": result}
@register_tool(sandbox_execution=False)
def view_agent_graph(agent_state: Any) -> dict[str, Any]:
try:
structure_lines = ["=== AGENT GRAPH STRUCTURE ==="]
def _build_tree(agent_id: str, depth: int = 0) -> None:
node = _agent_graph["nodes"][agent_id]
indent = " " * depth
you_indicator = " β This is you" if agent_id == agent_state.agent_id else ""
structure_lines.append(f"{indent}* {node['name']} ({agent_id}){you_indicator}")
structure_lines.append(f"{indent} Task: {node['task']}")
structure_lines.append(f"{indent} Status: {node['status']}")
children = [
edge["to"]
for edge in _agent_graph["edges"]
if edge["from"] == agent_id and edge["type"] == "delegation"
]
if children:
structure_lines.append(f"{indent} Children:")
for child_id in children:
_build_tree(child_id, depth + 2)
root_agent_id = _root_agent_id
if not root_agent_id and _agent_graph["nodes"]:
for agent_id, node in _agent_graph["nodes"].items():
if node.get("parent_id") is None:
root_agent_id = agent_id
break
if not root_agent_id:
root_agent_id = next(iter(_agent_graph["nodes"].keys()))
if root_agent_id and root_agent_id in _agent_graph["nodes"]:
_build_tree(root_agent_id)
else:
structure_lines.append("No agents in the graph yet")
graph_structure = "\n".join(structure_lines)
total_nodes = len(_agent_graph["nodes"])
running_count = sum(
1 for node in _agent_graph["nodes"].values() if node["status"] == "running"
)
waiting_count = sum(
1 for node in _agent_graph["nodes"].values() if node["status"] == "waiting"
)
stopping_count = sum(
1 for node in _agent_graph["nodes"].values() if node["status"] == "stopping"
)
completed_count = sum(
1 for node in _agent_graph["nodes"].values() if node["status"] == "completed"
)
stopped_count = sum(
1 for node in _agent_graph["nodes"].values() if node["status"] == "stopped"
)
failed_count = sum(
1 for node in _agent_graph["nodes"].values() if node["status"] in ["failed", "error"]
)
except Exception as e: # noqa: BLE001
return {
"error": f"Failed to view agent graph: {e}",
"graph_structure": "Error retrieving graph structure",
}
else:
return {
"graph_structure": graph_structure,
"summary": {
"total_agents": total_nodes,
"running": running_count,
"waiting": waiting_count,
"stopping": stopping_count,
"completed": completed_count,
"stopped": stopped_count,
"failed": failed_count,
},
}
@register_tool(sandbox_execution=False)
def create_agent(
agent_state: Any,
task: str,
name: str,
inherit_context: bool = True,
skills: str | None = None,
) -> dict[str, Any]:
try:
parent_id = agent_state.agent_id
skill_list = []
if skills:
skill_list = [s.strip() for s in skills.split(",") if s.strip()]
if len(skill_list) > 5:
return {
"success": False,
"error": (
"Cannot specify more than 5 skills for an agent (use comma-separated format)"
),
"agent_id": None,
}
if skill_list:
from strix.skills import get_all_skill_names, validate_skill_names
validation = validate_skill_names(skill_list)
if validation["invalid"]:
available_skills = list(get_all_skill_names())
return {
"success": False,
"error": (
f"Invalid skills: {validation['invalid']}. "
f"Available skills: {', '.join(available_skills)}"
),
"agent_id": None,
}
from strix.agents import StrixAgent
from strix.agents.state import AgentState
from strix.llm.config import LLMConfig
state = AgentState(task=task, agent_name=name, parent_id=parent_id, max_iterations=300)
parent_agent = _agent_instances.get(parent_id)
timeout = None
scan_mode = "deep"
if parent_agent and hasattr(parent_agent, "llm_config"):
if hasattr(parent_agent.llm_config, "timeout"):
timeout = parent_agent.llm_config.timeout
if hasattr(parent_agent.llm_config, "scan_mode"):
scan_mode = parent_agent.llm_config.scan_mode
llm_config = LLMConfig(skills=skill_list, timeout=timeout, scan_mode=scan_mode)
agent_config = {
"llm_config": llm_config,
"state": state,
}
if parent_agent and hasattr(parent_agent, "non_interactive"):
agent_config["non_interactive"] = parent_agent.non_interactive
agent = StrixAgent(agent_config)
inherited_messages = []
if inherit_context:
inherited_messages = agent_state.get_conversation_history()
_agent_instances[state.agent_id] = agent
thread = threading.Thread(
target=_run_agent_in_thread,
args=(agent, state, inherited_messages),
daemon=True,
name=f"Agent-{name}-{state.agent_id}",
)
thread.start()
_running_agents[state.agent_id] = thread
except Exception as e: # noqa: BLE001
return {"success": False, "error": f"Failed to create agent: {e}", "agent_id": None}
else:
return {
"success": True,
"agent_id": state.agent_id,
"message": f"Agent '{name}' created and started asynchronously",
"agent_info": {
"id": state.agent_id,
"name": name,
"status": "running",
"parent_id": parent_id,
},
}
@register_tool(sandbox_execution=False)
def send_message_to_agent(
agent_state: Any,
target_agent_id: str,
message: str,
message_type: Literal["query", "instruction", "information"] = "information",
priority: Literal["low", "normal", "high", "urgent"] = "normal",
) -> dict[str, Any]:
try:
if target_agent_id not in _agent_graph["nodes"]:
return {
"success": False,
"error": f"Target agent '{target_agent_id}' not found in graph",
"message_id": None,
}
sender_id = agent_state.agent_id
from uuid import uuid4
message_id = f"msg_{uuid4().hex[:8]}"
message_data = {
"id": message_id,
"from": sender_id,
"to": target_agent_id,
"content": message,
"message_type": message_type,
"priority": priority,
"timestamp": datetime.now(UTC).isoformat(),
"delivered": False,
"read": False,
}
if target_agent_id not in _agent_messages:
_agent_messages[target_agent_id] = []
_agent_messages[target_agent_id].append(message_data)
_agent_graph["edges"].append(
{
"from": sender_id,
"to": target_agent_id,
"type": "message",
"message_id": message_id,
"message_type": message_type,
"priority": priority,
"created_at": datetime.now(UTC).isoformat(),
}
)
message_data["delivered"] = True
target_name = _agent_graph["nodes"][target_agent_id]["name"]
sender_name = _agent_graph["nodes"][sender_id]["name"]
return {
"success": True,
"message_id": message_id,
"message": f"Message sent from '{sender_name}' to '{target_name}'",
"delivery_status": "delivered",
"target_agent": {
"id": target_agent_id,
"name": target_name,
"status": _agent_graph["nodes"][target_agent_id]["status"],
},
}
except Exception as e: # noqa: BLE001
return {"success": False, "error": f"Failed to send message: {e}", "message_id": None}
@register_tool(sandbox_execution=False)
def agent_finish(
agent_state: Any,
result_summary: str,
findings: list[str] | None = None,
success: bool = True,
report_to_parent: bool = True,
final_recommendations: list[str] | None = None,
) -> dict[str, Any]:
try:
if not hasattr(agent_state, "parent_id") or agent_state.parent_id is None:
return {
"agent_completed": False,
"error": (
"This tool can only be used by subagents. "
"Root/main agents must use finish_scan instead."
),
"parent_notified": False,
}
agent_id = agent_state.agent_id
if agent_id not in _agent_graph["nodes"]:
return {"agent_completed": False, "error": "Current agent not found in graph"}
agent_node = _agent_graph["nodes"][agent_id]
agent_node["status"] = "finished" if success else "failed"
agent_node["finished_at"] = datetime.now(UTC).isoformat()
agent_node["result"] = {
"summary": result_summary,
"findings": findings or [],
"success": success,
"recommendations": final_recommendations or [],
}
parent_notified = False
if report_to_parent and agent_node["parent_id"]:
parent_id = agent_node["parent_id"]
if parent_id in _agent_graph["nodes"]:
findings_xml = "\n".join(
f" <finding>{finding}</finding>" for finding in (findings or [])
)
recommendations_xml = "\n".join(
f" <recommendation>{rec}</recommendation>"
for rec in (final_recommendations or [])
)
report_message = f"""<agent_completion_report>
<agent_info>
<agent_name>{agent_node["name"]}</agent_name>
<agent_id>{agent_id}</agent_id>
<task>{agent_node["task"]}</task>
<status>{"SUCCESS" if success else "FAILED"}</status>
<completion_time>{agent_node["finished_at"]}</completion_time>
</agent_info>
<results>
<summary>{result_summary}</summary>
<findings>
{findings_xml}
</findings>
<recommendations>
{recommendations_xml}
</recommendations>
</results>
</agent_completion_report>"""
if parent_id not in _agent_messages:
_agent_messages[parent_id] = []
from uuid import uuid4
_agent_messages[parent_id].append(
{
"id": f"report_{uuid4().hex[:8]}",
"from": agent_id,
"to": parent_id,
"content": report_message,
"message_type": "information",
"priority": "high",
"timestamp": datetime.now(UTC).isoformat(),
"delivered": True,
"read": False,
}
)
parent_notified = True
_running_agents.pop(agent_id, None)
return {
"agent_completed": True,
"parent_notified": parent_notified,
"completion_summary": {
"agent_id": agent_id,
"agent_name": agent_node["name"],
"task": agent_node["task"],
"success": success,
"findings_count": len(findings or []),
"has_recommendations": bool(final_recommendations),
"finished_at": agent_node["finished_at"],
},
}
except Exception as e: # noqa: BLE001
return {
"agent_completed": False,
"error": f"Failed to complete agent: {e}",
"parent_notified": False,
}
def stop_agent(agent_id: str) -> dict[str, Any]:
try:
if agent_id not in _agent_graph["nodes"]:
return {
"success": False,
"error": f"Agent '{agent_id}' not found in graph",
"agent_id": agent_id,
}
agent_node = _agent_graph["nodes"][agent_id]
if agent_node["status"] in ["completed", "error", "failed", "stopped"]:
return {
"success": True,
"message": f"Agent '{agent_node['name']}' was already stopped",
"agent_id": agent_id,
"previous_status": agent_node["status"],
}
if agent_id in _agent_states:
agent_state = _agent_states[agent_id]
agent_state.request_stop()
if agent_id in _agent_instances:
agent_instance = _agent_instances[agent_id]
if hasattr(agent_instance, "state"):
agent_instance.state.request_stop()
if hasattr(agent_instance, "cancel_current_execution"):
agent_instance.cancel_current_execution()
agent_node["status"] = "stopping"
try:
from strix.telemetry.tracer import get_global_tracer
tracer = get_global_tracer()
if tracer:
tracer.update_agent_status(agent_id, "stopping")
except (ImportError, AttributeError):
pass
agent_node["result"] = {
"summary": "Agent stop requested by user",
"success": False,
"stopped_by_user": True,
}
return {
"success": True,
"message": f"Stop request sent to agent '{agent_node['name']}'",
"agent_id": agent_id,
"agent_name": agent_node["name"],
"note": "Agent will stop gracefully after current iteration",
}
except Exception as e: # noqa: BLE001
return {
"success": False,
"error": f"Failed to stop agent: {e}",
"agent_id": agent_id,
}
def send_user_message_to_agent(agent_id: str, message: str) -> dict[str, Any]:
try:
if agent_id not in _agent_graph["nodes"]:
return {
"success": False,
"error": f"Agent '{agent_id}' not found in graph",
"agent_id": agent_id,
}
agent_node = _agent_graph["nodes"][agent_id]
if agent_id not in _agent_messages:
_agent_messages[agent_id] = []
from uuid import uuid4
message_data = {
"id": f"user_msg_{uuid4().hex[:8]}",
"from": "user",
"to": agent_id,
"content": message,
"message_type": "instruction",
"priority": "high",
"timestamp": datetime.now(UTC).isoformat(),
"delivered": True,
"read": False,
}
_agent_messages[agent_id].append(message_data)
return {
"success": True,
"message": f"Message sent to agent '{agent_node['name']}'",
"agent_id": agent_id,
"agent_name": agent_node["name"],
}
except Exception as e: # noqa: BLE001
return {
"success": False,
"error": f"Failed to send message to agent: {e}",
"agent_id": agent_id,
}
@register_tool(sandbox_execution=False)
def wait_for_message(
agent_state: Any,
reason: str = "Waiting for messages from other agents",
) -> dict[str, Any]:
try:
agent_id = agent_state.agent_id
agent_name = agent_state.agent_name
agent_state.enter_waiting_state()
if agent_id in _agent_graph["nodes"]:
_agent_graph["nodes"][agent_id]["status"] = "waiting"
_agent_graph["nodes"][agent_id]["waiting_reason"] = reason
try:
from strix.telemetry.tracer import get_global_tracer
tracer = get_global_tracer()
if tracer:
tracer.update_agent_status(agent_id, "waiting")
except (ImportError, AttributeError):
pass
except Exception as e: # noqa: BLE001
return {"success": False, "error": f"Failed to enter waiting state: {e}", "status": "error"}
else:
return {
"success": True,
"status": "waiting",
"message": f"Agent '{agent_name}' is now waiting for messages",
"reason": reason,
"agent_info": {
"id": agent_id,
"name": agent_name,
"status": "waiting",
},
"resume_conditions": [
"Message from another agent",
"Message from user",
"Direct communication",
"Waiting timeout reached",
],
}
| {
"repo_id": "usestrix/strix",
"file_path": "strix/tools/agents_graph/agents_graph_actions.py",
"license": "Apache License 2.0",
"lines": 511,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
usestrix/strix:strix/tools/argument_parser.py | import contextlib
import inspect
import json
import types
from collections.abc import Callable
from typing import Any, Union, get_args, get_origin
class ArgumentConversionError(Exception):
def __init__(self, message: str, param_name: str | None = None) -> None:
self.param_name = param_name
super().__init__(message)
def convert_arguments(func: Callable[..., Any], kwargs: dict[str, Any]) -> dict[str, Any]:
try:
sig = inspect.signature(func)
converted = {}
for param_name, value in kwargs.items():
if param_name not in sig.parameters:
converted[param_name] = value
continue
param = sig.parameters[param_name]
param_type = param.annotation
if param_type == inspect.Parameter.empty or value is None:
converted[param_name] = value
continue
if not isinstance(value, str):
converted[param_name] = value
continue
try:
converted[param_name] = convert_string_to_type(value, param_type)
except (ValueError, TypeError, json.JSONDecodeError) as e:
raise ArgumentConversionError(
f"Failed to convert argument '{param_name}' to type {param_type}: {e}",
param_name=param_name,
) from e
except (ValueError, TypeError, AttributeError) as e:
raise ArgumentConversionError(f"Failed to process function arguments: {e}") from e
return converted
def convert_string_to_type(value: str, param_type: Any) -> Any:
origin = get_origin(param_type)
if origin is Union or isinstance(param_type, types.UnionType):
args = get_args(param_type)
for arg_type in args:
if arg_type is not type(None):
with contextlib.suppress(ValueError, TypeError, json.JSONDecodeError):
return convert_string_to_type(value, arg_type)
return value
if hasattr(param_type, "__args__"):
args = getattr(param_type, "__args__", ())
if len(args) == 2 and type(None) in args:
non_none_type = args[0] if args[1] is type(None) else args[1]
with contextlib.suppress(ValueError, TypeError, json.JSONDecodeError):
return convert_string_to_type(value, non_none_type)
return value
return _convert_basic_types(value, param_type, origin)
def _convert_basic_types(value: str, param_type: Any, origin: Any = None) -> Any:
basic_type_converters: dict[Any, Callable[[str], Any]] = {
int: int,
float: float,
bool: _convert_to_bool,
str: str,
}
if param_type in basic_type_converters:
return basic_type_converters[param_type](value)
if list in (origin, param_type):
return _convert_to_list(value)
if dict in (origin, param_type):
return _convert_to_dict(value)
with contextlib.suppress(json.JSONDecodeError):
return json.loads(value)
return value
def _convert_to_bool(value: str) -> bool:
if value.lower() in ("true", "1", "yes", "on"):
return True
if value.lower() in ("false", "0", "no", "off"):
return False
return bool(value)
def _convert_to_list(value: str) -> list[Any]:
try:
parsed = json.loads(value)
if isinstance(parsed, list):
return parsed
except json.JSONDecodeError:
if "," in value:
return [item.strip() for item in value.split(",")]
return [value]
else:
return [parsed]
def _convert_to_dict(value: str) -> dict[str, Any]:
try:
parsed = json.loads(value)
if isinstance(parsed, dict):
return parsed
except json.JSONDecodeError:
return {}
else:
return {}
| {
"repo_id": "usestrix/strix",
"file_path": "strix/tools/argument_parser.py",
"license": "Apache License 2.0",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
usestrix/strix:strix/tools/browser/browser_actions.py | from typing import TYPE_CHECKING, Any, Literal, NoReturn
from strix.tools.registry import register_tool
if TYPE_CHECKING:
from .tab_manager import BrowserTabManager
BrowserAction = Literal[
"launch",
"goto",
"click",
"type",
"scroll_down",
"scroll_up",
"back",
"forward",
"new_tab",
"switch_tab",
"close_tab",
"wait",
"execute_js",
"double_click",
"hover",
"press_key",
"save_pdf",
"get_console_logs",
"view_source",
"close",
"list_tabs",
]
def _validate_url(action_name: str, url: str | None) -> None:
if not url:
raise ValueError(f"url parameter is required for {action_name} action")
def _validate_coordinate(action_name: str, coordinate: str | None) -> None:
if not coordinate:
raise ValueError(f"coordinate parameter is required for {action_name} action")
def _validate_text(action_name: str, text: str | None) -> None:
if not text:
raise ValueError(f"text parameter is required for {action_name} action")
def _validate_tab_id(action_name: str, tab_id: str | None) -> None:
if not tab_id:
raise ValueError(f"tab_id parameter is required for {action_name} action")
def _validate_js_code(action_name: str, js_code: str | None) -> None:
if not js_code:
raise ValueError(f"js_code parameter is required for {action_name} action")
def _validate_duration(action_name: str, duration: float | None) -> None:
if duration is None:
raise ValueError(f"duration parameter is required for {action_name} action")
def _validate_key(action_name: str, key: str | None) -> None:
if not key:
raise ValueError(f"key parameter is required for {action_name} action")
def _validate_file_path(action_name: str, file_path: str | None) -> None:
if not file_path:
raise ValueError(f"file_path parameter is required for {action_name} action")
def _handle_navigation_actions(
manager: "BrowserTabManager",
action: str,
url: str | None = None,
tab_id: str | None = None,
) -> dict[str, Any]:
if action == "launch":
return manager.launch_browser(url)
if action == "goto":
_validate_url(action, url)
assert url is not None
return manager.goto_url(url, tab_id)
if action == "back":
return manager.back(tab_id)
if action == "forward":
return manager.forward(tab_id)
raise ValueError(f"Unknown navigation action: {action}")
def _handle_interaction_actions(
manager: "BrowserTabManager",
action: str,
coordinate: str | None = None,
text: str | None = None,
key: str | None = None,
tab_id: str | None = None,
) -> dict[str, Any]:
if action in {"click", "double_click", "hover"}:
_validate_coordinate(action, coordinate)
assert coordinate is not None
action_map = {
"click": manager.click,
"double_click": manager.double_click,
"hover": manager.hover,
}
return action_map[action](coordinate, tab_id)
if action in {"scroll_down", "scroll_up"}:
direction = "down" if action == "scroll_down" else "up"
return manager.scroll(direction, tab_id)
if action == "type":
_validate_text(action, text)
assert text is not None
return manager.type_text(text, tab_id)
if action == "press_key":
_validate_key(action, key)
assert key is not None
return manager.press_key(key, tab_id)
raise ValueError(f"Unknown interaction action: {action}")
def _raise_unknown_action(action: str) -> NoReturn:
raise ValueError(f"Unknown action: {action}")
def _handle_tab_actions(
manager: "BrowserTabManager",
action: str,
url: str | None = None,
tab_id: str | None = None,
) -> dict[str, Any]:
if action == "new_tab":
return manager.new_tab(url)
if action == "switch_tab":
_validate_tab_id(action, tab_id)
assert tab_id is not None
return manager.switch_tab(tab_id)
if action == "close_tab":
_validate_tab_id(action, tab_id)
assert tab_id is not None
return manager.close_tab(tab_id)
if action == "list_tabs":
return manager.list_tabs()
raise ValueError(f"Unknown tab action: {action}")
def _handle_utility_actions(
manager: "BrowserTabManager",
action: str,
duration: float | None = None,
js_code: str | None = None,
file_path: str | None = None,
tab_id: str | None = None,
clear: bool = False,
) -> dict[str, Any]:
if action == "wait":
_validate_duration(action, duration)
assert duration is not None
return manager.wait_browser(duration, tab_id)
if action == "execute_js":
_validate_js_code(action, js_code)
assert js_code is not None
return manager.execute_js(js_code, tab_id)
if action == "save_pdf":
_validate_file_path(action, file_path)
assert file_path is not None
return manager.save_pdf(file_path, tab_id)
if action == "get_console_logs":
return manager.get_console_logs(tab_id, clear)
if action == "view_source":
return manager.view_source(tab_id)
if action == "close":
return manager.close_browser()
raise ValueError(f"Unknown utility action: {action}")
@register_tool
def browser_action(
action: BrowserAction,
url: str | None = None,
coordinate: str | None = None,
text: str | None = None,
tab_id: str | None = None,
js_code: str | None = None,
duration: float | None = None,
key: str | None = None,
file_path: str | None = None,
clear: bool = False,
) -> dict[str, Any]:
from .tab_manager import get_browser_tab_manager
manager = get_browser_tab_manager()
try:
navigation_actions = {"launch", "goto", "back", "forward"}
interaction_actions = {
"click",
"type",
"double_click",
"hover",
"press_key",
"scroll_down",
"scroll_up",
}
tab_actions = {"new_tab", "switch_tab", "close_tab", "list_tabs"}
utility_actions = {
"wait",
"execute_js",
"save_pdf",
"get_console_logs",
"view_source",
"close",
}
if action in navigation_actions:
return _handle_navigation_actions(manager, action, url, tab_id)
if action in interaction_actions:
return _handle_interaction_actions(manager, action, coordinate, text, key, tab_id)
if action in tab_actions:
return _handle_tab_actions(manager, action, url, tab_id)
if action in utility_actions:
return _handle_utility_actions(
manager, action, duration, js_code, file_path, tab_id, clear
)
_raise_unknown_action(action)
except (ValueError, RuntimeError) as e:
return {
"error": str(e),
"tab_id": tab_id,
"screenshot": "",
"is_running": False,
}
| {
"repo_id": "usestrix/strix",
"file_path": "strix/tools/browser/browser_actions.py",
"license": "Apache License 2.0",
"lines": 199,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
usestrix/strix:strix/tools/browser/browser_instance.py | import asyncio
import base64
import contextlib
import logging
import threading
from pathlib import Path
from typing import Any, cast
from playwright.async_api import Browser, BrowserContext, Page, Playwright, async_playwright
logger = logging.getLogger(__name__)
MAX_PAGE_SOURCE_LENGTH = 20_000
MAX_CONSOLE_LOG_LENGTH = 30_000
MAX_INDIVIDUAL_LOG_LENGTH = 1_000
MAX_CONSOLE_LOGS_COUNT = 200
MAX_JS_RESULT_LENGTH = 5_000
class _BrowserState:
"""Singleton state for the shared browser instance."""
lock = threading.Lock()
event_loop: asyncio.AbstractEventLoop | None = None
event_loop_thread: threading.Thread | None = None
playwright: Playwright | None = None
browser: Browser | None = None
_state = _BrowserState()
def _ensure_event_loop() -> None:
if _state.event_loop is not None:
return
def run_loop() -> None:
_state.event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(_state.event_loop)
_state.event_loop.run_forever()
_state.event_loop_thread = threading.Thread(target=run_loop, daemon=True)
_state.event_loop_thread.start()
while _state.event_loop is None:
threading.Event().wait(0.01)
async def _create_browser() -> Browser:
if _state.browser is not None and _state.browser.is_connected():
return _state.browser
if _state.browser is not None:
with contextlib.suppress(Exception):
await _state.browser.close()
_state.browser = None
if _state.playwright is not None:
with contextlib.suppress(Exception):
await _state.playwright.stop()
_state.playwright = None
_state.playwright = await async_playwright().start()
_state.browser = await _state.playwright.chromium.launch(
headless=True,
args=[
"--no-sandbox",
"--disable-dev-shm-usage",
"--disable-gpu",
"--disable-web-security",
],
)
return _state.browser
def _get_browser() -> tuple[asyncio.AbstractEventLoop, Browser]:
with _state.lock:
_ensure_event_loop()
assert _state.event_loop is not None
if _state.browser is None or not _state.browser.is_connected():
future = asyncio.run_coroutine_threadsafe(_create_browser(), _state.event_loop)
future.result(timeout=30)
assert _state.browser is not None
return _state.event_loop, _state.browser
class BrowserInstance:
def __init__(self) -> None:
self.is_running = True
self._execution_lock = threading.Lock()
self._loop: asyncio.AbstractEventLoop | None = None
self._browser: Browser | None = None
self.context: BrowserContext | None = None
self.pages: dict[str, Page] = {}
self.current_page_id: str | None = None
self._next_tab_id = 1
self.console_logs: dict[str, list[dict[str, Any]]] = {}
def _run_async(self, coro: Any) -> dict[str, Any]:
if not self._loop or not self.is_running:
raise RuntimeError("Browser instance is not running")
future = asyncio.run_coroutine_threadsafe(coro, self._loop)
return cast("dict[str, Any]", future.result(timeout=30)) # 30 second timeout
async def _setup_console_logging(self, page: Page, tab_id: str) -> None:
self.console_logs[tab_id] = []
def handle_console(msg: Any) -> None:
text = msg.text
if len(text) > MAX_INDIVIDUAL_LOG_LENGTH:
text = text[:MAX_INDIVIDUAL_LOG_LENGTH] + "... [TRUNCATED]"
log_entry = {
"type": msg.type,
"text": text,
"location": msg.location,
"timestamp": asyncio.get_event_loop().time(),
}
self.console_logs[tab_id].append(log_entry)
if len(self.console_logs[tab_id]) > MAX_CONSOLE_LOGS_COUNT:
self.console_logs[tab_id] = self.console_logs[tab_id][-MAX_CONSOLE_LOGS_COUNT:]
page.on("console", handle_console)
async def _create_context(self, url: str | None = None) -> dict[str, Any]:
assert self._browser is not None
self.context = await self._browser.new_context(
viewport={"width": 1280, "height": 720},
user_agent=(
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
),
)
page = await self.context.new_page()
tab_id = f"tab_{self._next_tab_id}"
self._next_tab_id += 1
self.pages[tab_id] = page
self.current_page_id = tab_id
await self._setup_console_logging(page, tab_id)
if url:
await page.goto(url, wait_until="domcontentloaded")
return await self._get_page_state(tab_id)
async def _get_page_state(self, tab_id: str | None = None) -> dict[str, Any]:
if not tab_id:
tab_id = self.current_page_id
if not tab_id or tab_id not in self.pages:
raise ValueError(f"Tab '{tab_id}' not found")
page = self.pages[tab_id]
await asyncio.sleep(2)
screenshot_bytes = await page.screenshot(type="png", full_page=False)
screenshot_b64 = base64.b64encode(screenshot_bytes).decode("utf-8")
url = page.url
title = await page.title()
viewport = page.viewport_size
all_tabs = {}
for tid, tab_page in self.pages.items():
all_tabs[tid] = {
"url": tab_page.url,
"title": await tab_page.title() if not tab_page.is_closed() else "Closed",
}
return {
"screenshot": screenshot_b64,
"url": url,
"title": title,
"viewport": viewport,
"tab_id": tab_id,
"all_tabs": all_tabs,
}
def launch(self, url: str | None = None) -> dict[str, Any]:
with self._execution_lock:
if self.context is not None:
raise ValueError("Browser is already launched")
self._loop, self._browser = _get_browser()
return self._run_async(self._create_context(url))
def goto(self, url: str, tab_id: str | None = None) -> dict[str, Any]:
with self._execution_lock:
return self._run_async(self._goto(url, tab_id))
async def _goto(self, url: str, tab_id: str | None = None) -> dict[str, Any]:
if not tab_id:
tab_id = self.current_page_id
if not tab_id or tab_id not in self.pages:
raise ValueError(f"Tab '{tab_id}' not found")
page = self.pages[tab_id]
await page.goto(url, wait_until="domcontentloaded")
return await self._get_page_state(tab_id)
def click(self, coordinate: str, tab_id: str | None = None) -> dict[str, Any]:
with self._execution_lock:
return self._run_async(self._click(coordinate, tab_id))
async def _click(self, coordinate: str, tab_id: str | None = None) -> dict[str, Any]:
if not tab_id:
tab_id = self.current_page_id
if not tab_id or tab_id not in self.pages:
raise ValueError(f"Tab '{tab_id}' not found")
try:
x, y = map(int, coordinate.split(","))
except ValueError as e:
raise ValueError(f"Invalid coordinate format: {coordinate}. Use 'x,y'") from e
page = self.pages[tab_id]
await page.mouse.click(x, y)
return await self._get_page_state(tab_id)
def type_text(self, text: str, tab_id: str | None = None) -> dict[str, Any]:
with self._execution_lock:
return self._run_async(self._type_text(text, tab_id))
async def _type_text(self, text: str, tab_id: str | None = None) -> dict[str, Any]:
if not tab_id:
tab_id = self.current_page_id
if not tab_id or tab_id not in self.pages:
raise ValueError(f"Tab '{tab_id}' not found")
page = self.pages[tab_id]
await page.keyboard.type(text)
return await self._get_page_state(tab_id)
def scroll(self, direction: str, tab_id: str | None = None) -> dict[str, Any]:
with self._execution_lock:
return self._run_async(self._scroll(direction, tab_id))
async def _scroll(self, direction: str, tab_id: str | None = None) -> dict[str, Any]:
if not tab_id:
tab_id = self.current_page_id
if not tab_id or tab_id not in self.pages:
raise ValueError(f"Tab '{tab_id}' not found")
page = self.pages[tab_id]
if direction == "down":
await page.keyboard.press("PageDown")
elif direction == "up":
await page.keyboard.press("PageUp")
else:
raise ValueError(f"Invalid scroll direction: {direction}")
return await self._get_page_state(tab_id)
def back(self, tab_id: str | None = None) -> dict[str, Any]:
with self._execution_lock:
return self._run_async(self._back(tab_id))
async def _back(self, tab_id: str | None = None) -> dict[str, Any]:
if not tab_id:
tab_id = self.current_page_id
if not tab_id or tab_id not in self.pages:
raise ValueError(f"Tab '{tab_id}' not found")
page = self.pages[tab_id]
await page.go_back(wait_until="domcontentloaded")
return await self._get_page_state(tab_id)
def forward(self, tab_id: str | None = None) -> dict[str, Any]:
with self._execution_lock:
return self._run_async(self._forward(tab_id))
async def _forward(self, tab_id: str | None = None) -> dict[str, Any]:
if not tab_id:
tab_id = self.current_page_id
if not tab_id or tab_id not in self.pages:
raise ValueError(f"Tab '{tab_id}' not found")
page = self.pages[tab_id]
await page.go_forward(wait_until="domcontentloaded")
return await self._get_page_state(tab_id)
def new_tab(self, url: str | None = None) -> dict[str, Any]:
with self._execution_lock:
return self._run_async(self._new_tab(url))
async def _new_tab(self, url: str | None = None) -> dict[str, Any]:
if not self.context:
raise ValueError("Browser not launched")
page = await self.context.new_page()
tab_id = f"tab_{self._next_tab_id}"
self._next_tab_id += 1
self.pages[tab_id] = page
self.current_page_id = tab_id
await self._setup_console_logging(page, tab_id)
if url:
await page.goto(url, wait_until="domcontentloaded")
return await self._get_page_state(tab_id)
def switch_tab(self, tab_id: str) -> dict[str, Any]:
with self._execution_lock:
return self._run_async(self._switch_tab(tab_id))
async def _switch_tab(self, tab_id: str) -> dict[str, Any]:
if tab_id not in self.pages:
raise ValueError(f"Tab '{tab_id}' not found")
self.current_page_id = tab_id
return await self._get_page_state(tab_id)
def close_tab(self, tab_id: str) -> dict[str, Any]:
with self._execution_lock:
return self._run_async(self._close_tab(tab_id))
async def _close_tab(self, tab_id: str) -> dict[str, Any]:
if tab_id not in self.pages:
raise ValueError(f"Tab '{tab_id}' not found")
if len(self.pages) == 1:
raise ValueError("Cannot close the last tab")
page = self.pages.pop(tab_id)
await page.close()
if tab_id in self.console_logs:
del self.console_logs[tab_id]
if self.current_page_id == tab_id:
self.current_page_id = next(iter(self.pages.keys()))
return await self._get_page_state(self.current_page_id)
def wait(self, duration: float, tab_id: str | None = None) -> dict[str, Any]:
with self._execution_lock:
return self._run_async(self._wait(duration, tab_id))
async def _wait(self, duration: float, tab_id: str | None = None) -> dict[str, Any]:
await asyncio.sleep(duration)
return await self._get_page_state(tab_id)
def execute_js(self, js_code: str, tab_id: str | None = None) -> dict[str, Any]:
with self._execution_lock:
return self._run_async(self._execute_js(js_code, tab_id))
async def _execute_js(self, js_code: str, tab_id: str | None = None) -> dict[str, Any]:
if not tab_id:
tab_id = self.current_page_id
if not tab_id or tab_id not in self.pages:
raise ValueError(f"Tab '{tab_id}' not found")
page = self.pages[tab_id]
try:
result = await page.evaluate(js_code)
except Exception as e: # noqa: BLE001
result = {
"error": True,
"error_type": type(e).__name__,
"error_message": str(e),
}
result_str = str(result)
if len(result_str) > MAX_JS_RESULT_LENGTH:
result = result_str[:MAX_JS_RESULT_LENGTH] + "... [JS result truncated at 5k chars]"
state = await self._get_page_state(tab_id)
state["js_result"] = result
return state
def get_console_logs(self, tab_id: str | None = None, clear: bool = False) -> dict[str, Any]:
with self._execution_lock:
return self._run_async(self._get_console_logs(tab_id, clear))
async def _get_console_logs(
self, tab_id: str | None = None, clear: bool = False
) -> dict[str, Any]:
if not tab_id:
tab_id = self.current_page_id
if not tab_id or tab_id not in self.pages:
raise ValueError(f"Tab '{tab_id}' not found")
logs = self.console_logs.get(tab_id, [])
total_length = sum(len(str(log)) for log in logs)
if total_length > MAX_CONSOLE_LOG_LENGTH:
truncated_logs: list[dict[str, Any]] = []
current_length = 0
for log in reversed(logs):
log_length = len(str(log))
if current_length + log_length <= MAX_CONSOLE_LOG_LENGTH:
truncated_logs.insert(0, log)
current_length += log_length
else:
break
if len(truncated_logs) < len(logs):
truncation_notice = {
"type": "info",
"text": (
f"[TRUNCATED: {len(logs) - len(truncated_logs)} older logs "
f"removed to stay within {MAX_CONSOLE_LOG_LENGTH} character limit]"
),
"location": {},
"timestamp": 0,
}
truncated_logs.insert(0, truncation_notice)
logs = truncated_logs
if clear:
self.console_logs[tab_id] = []
state = await self._get_page_state(tab_id)
state["console_logs"] = logs
return state
def view_source(self, tab_id: str | None = None) -> dict[str, Any]:
with self._execution_lock:
return self._run_async(self._view_source(tab_id))
async def _view_source(self, tab_id: str | None = None) -> dict[str, Any]:
if not tab_id:
tab_id = self.current_page_id
if not tab_id or tab_id not in self.pages:
raise ValueError(f"Tab '{tab_id}' not found")
page = self.pages[tab_id]
source = await page.content()
original_length = len(source)
if original_length > MAX_PAGE_SOURCE_LENGTH:
truncation_message = (
f"\n\n<!-- [TRUNCATED: {original_length - MAX_PAGE_SOURCE_LENGTH} "
"characters removed] -->\n\n"
)
available_space = MAX_PAGE_SOURCE_LENGTH - len(truncation_message)
truncate_point = available_space // 2
source = source[:truncate_point] + truncation_message + source[-truncate_point:]
state = await self._get_page_state(tab_id)
state["page_source"] = source
return state
def double_click(self, coordinate: str, tab_id: str | None = None) -> dict[str, Any]:
with self._execution_lock:
return self._run_async(self._double_click(coordinate, tab_id))
async def _double_click(self, coordinate: str, tab_id: str | None = None) -> dict[str, Any]:
if not tab_id:
tab_id = self.current_page_id
if not tab_id or tab_id not in self.pages:
raise ValueError(f"Tab '{tab_id}' not found")
try:
x, y = map(int, coordinate.split(","))
except ValueError as e:
raise ValueError(f"Invalid coordinate format: {coordinate}. Use 'x,y'") from e
page = self.pages[tab_id]
await page.mouse.dblclick(x, y)
return await self._get_page_state(tab_id)
def hover(self, coordinate: str, tab_id: str | None = None) -> dict[str, Any]:
with self._execution_lock:
return self._run_async(self._hover(coordinate, tab_id))
async def _hover(self, coordinate: str, tab_id: str | None = None) -> dict[str, Any]:
if not tab_id:
tab_id = self.current_page_id
if not tab_id or tab_id not in self.pages:
raise ValueError(f"Tab '{tab_id}' not found")
try:
x, y = map(int, coordinate.split(","))
except ValueError as e:
raise ValueError(f"Invalid coordinate format: {coordinate}. Use 'x,y'") from e
page = self.pages[tab_id]
await page.mouse.move(x, y)
return await self._get_page_state(tab_id)
def press_key(self, key: str, tab_id: str | None = None) -> dict[str, Any]:
with self._execution_lock:
return self._run_async(self._press_key(key, tab_id))
async def _press_key(self, key: str, tab_id: str | None = None) -> dict[str, Any]:
if not tab_id:
tab_id = self.current_page_id
if not tab_id or tab_id not in self.pages:
raise ValueError(f"Tab '{tab_id}' not found")
page = self.pages[tab_id]
await page.keyboard.press(key)
return await self._get_page_state(tab_id)
def save_pdf(self, file_path: str, tab_id: str | None = None) -> dict[str, Any]:
with self._execution_lock:
return self._run_async(self._save_pdf(file_path, tab_id))
async def _save_pdf(self, file_path: str, tab_id: str | None = None) -> dict[str, Any]:
if not tab_id:
tab_id = self.current_page_id
if not tab_id or tab_id not in self.pages:
raise ValueError(f"Tab '{tab_id}' not found")
if not Path(file_path).is_absolute():
file_path = str(Path("/workspace") / file_path)
page = self.pages[tab_id]
await page.pdf(path=file_path)
state = await self._get_page_state(tab_id)
state["pdf_saved"] = file_path
return state
def close(self) -> None:
with self._execution_lock:
self.is_running = False
if self._loop and self.context:
future = asyncio.run_coroutine_threadsafe(self._close_context(), self._loop)
with contextlib.suppress(Exception):
future.result(timeout=5)
self.pages.clear()
self.console_logs.clear()
self.current_page_id = None
self.context = None
async def _close_context(self) -> None:
try:
if self.context:
await self.context.close()
except (OSError, RuntimeError) as e:
logger.warning(f"Error closing context: {e}")
def is_alive(self) -> bool:
return (
self.is_running
and self.context is not None
and self._browser is not None
and self._browser.is_connected()
)
| {
"repo_id": "usestrix/strix",
"file_path": "strix/tools/browser/browser_instance.py",
"license": "Apache License 2.0",
"lines": 429,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
usestrix/strix:strix/tools/browser/tab_manager.py | import atexit
import contextlib
import threading
from typing import Any
from strix.tools.context import get_current_agent_id
from .browser_instance import BrowserInstance
class BrowserTabManager:
def __init__(self) -> None:
self._browsers_by_agent: dict[str, BrowserInstance] = {}
self._lock = threading.Lock()
self._register_cleanup_handlers()
def _get_agent_browser(self) -> BrowserInstance | None:
agent_id = get_current_agent_id()
with self._lock:
return self._browsers_by_agent.get(agent_id)
def _set_agent_browser(self, browser: BrowserInstance | None) -> None:
agent_id = get_current_agent_id()
with self._lock:
if browser is None:
self._browsers_by_agent.pop(agent_id, None)
else:
self._browsers_by_agent[agent_id] = browser
def launch_browser(self, url: str | None = None) -> dict[str, Any]:
with self._lock:
agent_id = get_current_agent_id()
if agent_id in self._browsers_by_agent:
raise ValueError("Browser is already launched")
try:
browser = BrowserInstance()
result = browser.launch(url)
self._browsers_by_agent[agent_id] = browser
result["message"] = "Browser launched successfully"
except (OSError, ValueError, RuntimeError) as e:
raise RuntimeError(f"Failed to launch browser: {e}") from e
else:
return result
def goto_url(self, url: str, tab_id: str | None = None) -> dict[str, Any]:
browser = self._get_agent_browser()
if browser is None:
raise ValueError("Browser not launched")
try:
result = browser.goto(url, tab_id)
result["message"] = f"Navigated to {url}"
except (OSError, ValueError, RuntimeError) as e:
raise RuntimeError(f"Failed to navigate to URL: {e}") from e
else:
return result
def click(self, coordinate: str, tab_id: str | None = None) -> dict[str, Any]:
browser = self._get_agent_browser()
if browser is None:
raise ValueError("Browser not launched")
try:
result = browser.click(coordinate, tab_id)
result["message"] = f"Clicked at {coordinate}"
except (OSError, ValueError, RuntimeError) as e:
raise RuntimeError(f"Failed to click: {e}") from e
else:
return result
def type_text(self, text: str, tab_id: str | None = None) -> dict[str, Any]:
browser = self._get_agent_browser()
if browser is None:
raise ValueError("Browser not launched")
try:
result = browser.type_text(text, tab_id)
result["message"] = f"Typed text: {text[:50]}{'...' if len(text) > 50 else ''}"
except (OSError, ValueError, RuntimeError) as e:
raise RuntimeError(f"Failed to type text: {e}") from e
else:
return result
def scroll(self, direction: str, tab_id: str | None = None) -> dict[str, Any]:
browser = self._get_agent_browser()
if browser is None:
raise ValueError("Browser not launched")
try:
result = browser.scroll(direction, tab_id)
result["message"] = f"Scrolled {direction}"
except (OSError, ValueError, RuntimeError) as e:
raise RuntimeError(f"Failed to scroll: {e}") from e
else:
return result
def back(self, tab_id: str | None = None) -> dict[str, Any]:
browser = self._get_agent_browser()
if browser is None:
raise ValueError("Browser not launched")
try:
result = browser.back(tab_id)
result["message"] = "Navigated back"
except (OSError, ValueError, RuntimeError) as e:
raise RuntimeError(f"Failed to go back: {e}") from e
else:
return result
def forward(self, tab_id: str | None = None) -> dict[str, Any]:
browser = self._get_agent_browser()
if browser is None:
raise ValueError("Browser not launched")
try:
result = browser.forward(tab_id)
result["message"] = "Navigated forward"
except (OSError, ValueError, RuntimeError) as e:
raise RuntimeError(f"Failed to go forward: {e}") from e
else:
return result
def new_tab(self, url: str | None = None) -> dict[str, Any]:
browser = self._get_agent_browser()
if browser is None:
raise ValueError("Browser not launched")
try:
result = browser.new_tab(url)
result["message"] = f"Created new tab {result.get('tab_id', '')}"
except (OSError, ValueError, RuntimeError) as e:
raise RuntimeError(f"Failed to create new tab: {e}") from e
else:
return result
def switch_tab(self, tab_id: str) -> dict[str, Any]:
browser = self._get_agent_browser()
if browser is None:
raise ValueError("Browser not launched")
try:
result = browser.switch_tab(tab_id)
result["message"] = f"Switched to tab {tab_id}"
except (OSError, ValueError, RuntimeError) as e:
raise RuntimeError(f"Failed to switch tab: {e}") from e
else:
return result
def close_tab(self, tab_id: str) -> dict[str, Any]:
browser = self._get_agent_browser()
if browser is None:
raise ValueError("Browser not launched")
try:
result = browser.close_tab(tab_id)
result["message"] = f"Closed tab {tab_id}"
except (OSError, ValueError, RuntimeError) as e:
raise RuntimeError(f"Failed to close tab: {e}") from e
else:
return result
def wait_browser(self, duration: float, tab_id: str | None = None) -> dict[str, Any]:
browser = self._get_agent_browser()
if browser is None:
raise ValueError("Browser not launched")
try:
result = browser.wait(duration, tab_id)
result["message"] = f"Waited {duration}s"
except (OSError, ValueError, RuntimeError) as e:
raise RuntimeError(f"Failed to wait: {e}") from e
else:
return result
def execute_js(self, js_code: str, tab_id: str | None = None) -> dict[str, Any]:
browser = self._get_agent_browser()
if browser is None:
raise ValueError("Browser not launched")
try:
result = browser.execute_js(js_code, tab_id)
result["message"] = "JavaScript executed successfully"
except (OSError, ValueError, RuntimeError) as e:
raise RuntimeError(f"Failed to execute JavaScript: {e}") from e
else:
return result
def double_click(self, coordinate: str, tab_id: str | None = None) -> dict[str, Any]:
browser = self._get_agent_browser()
if browser is None:
raise ValueError("Browser not launched")
try:
result = browser.double_click(coordinate, tab_id)
result["message"] = f"Double clicked at {coordinate}"
except (OSError, ValueError, RuntimeError) as e:
raise RuntimeError(f"Failed to double click: {e}") from e
else:
return result
def hover(self, coordinate: str, tab_id: str | None = None) -> dict[str, Any]:
browser = self._get_agent_browser()
if browser is None:
raise ValueError("Browser not launched")
try:
result = browser.hover(coordinate, tab_id)
result["message"] = f"Hovered at {coordinate}"
except (OSError, ValueError, RuntimeError) as e:
raise RuntimeError(f"Failed to hover: {e}") from e
else:
return result
def press_key(self, key: str, tab_id: str | None = None) -> dict[str, Any]:
browser = self._get_agent_browser()
if browser is None:
raise ValueError("Browser not launched")
try:
result = browser.press_key(key, tab_id)
result["message"] = f"Pressed key {key}"
except (OSError, ValueError, RuntimeError) as e:
raise RuntimeError(f"Failed to press key: {e}") from e
else:
return result
def save_pdf(self, file_path: str, tab_id: str | None = None) -> dict[str, Any]:
browser = self._get_agent_browser()
if browser is None:
raise ValueError("Browser not launched")
try:
result = browser.save_pdf(file_path, tab_id)
result["message"] = f"Page saved as PDF: {file_path}"
except (OSError, ValueError, RuntimeError) as e:
raise RuntimeError(f"Failed to save PDF: {e}") from e
else:
return result
def get_console_logs(self, tab_id: str | None = None, clear: bool = False) -> dict[str, Any]:
browser = self._get_agent_browser()
if browser is None:
raise ValueError("Browser not launched")
try:
result = browser.get_console_logs(tab_id, clear)
action_text = "cleared and retrieved" if clear else "retrieved"
logs = result.get("console_logs", [])
truncated = any(log.get("text", "").startswith("[TRUNCATED:") for log in logs)
truncated_text = " (truncated)" if truncated else ""
result["message"] = (
f"Console logs {action_text} for tab "
f"{result.get('tab_id', 'current')}{truncated_text}"
)
except (OSError, ValueError, RuntimeError) as e:
raise RuntimeError(f"Failed to get console logs: {e}") from e
else:
return result
def view_source(self, tab_id: str | None = None) -> dict[str, Any]:
browser = self._get_agent_browser()
if browser is None:
raise ValueError("Browser not launched")
try:
result = browser.view_source(tab_id)
result["message"] = "Page source retrieved"
except (OSError, ValueError, RuntimeError) as e:
raise RuntimeError(f"Failed to get page source: {e}") from e
else:
return result
def list_tabs(self) -> dict[str, Any]:
browser = self._get_agent_browser()
if browser is None:
return {"tabs": {}, "total_count": 0, "current_tab": None}
try:
tab_info = {}
for tid, tab_page in browser.pages.items():
try:
tab_info[tid] = {
"url": tab_page.url,
"title": "Unknown" if tab_page.is_closed() else "Active",
"is_current": tid == browser.current_page_id,
}
except (AttributeError, RuntimeError):
tab_info[tid] = {
"url": "Unknown",
"title": "Closed",
"is_current": False,
}
return {
"tabs": tab_info,
"total_count": len(tab_info),
"current_tab": browser.current_page_id,
}
except (OSError, ValueError, RuntimeError) as e:
raise RuntimeError(f"Failed to list tabs: {e}") from e
def close_browser(self) -> dict[str, Any]:
agent_id = get_current_agent_id()
with self._lock:
browser = self._browsers_by_agent.pop(agent_id, None)
if browser is None:
raise ValueError("Browser not launched")
try:
browser.close()
except (OSError, ValueError, RuntimeError) as e:
raise RuntimeError(f"Failed to close browser: {e}") from e
else:
return {
"message": "Browser closed successfully",
"screenshot": "",
"is_running": False,
}
def cleanup_agent(self, agent_id: str) -> None:
with self._lock:
browser = self._browsers_by_agent.pop(agent_id, None)
if browser:
with contextlib.suppress(Exception):
browser.close()
def cleanup_dead_browser(self) -> None:
with self._lock:
dead_agents = []
for agent_id, browser in self._browsers_by_agent.items():
if not browser.is_alive():
dead_agents.append(agent_id)
for agent_id in dead_agents:
browser = self._browsers_by_agent.pop(agent_id)
with contextlib.suppress(Exception):
browser.close()
def close_all(self) -> None:
with self._lock:
browsers = list(self._browsers_by_agent.values())
self._browsers_by_agent.clear()
for browser in browsers:
with contextlib.suppress(Exception):
browser.close()
def _register_cleanup_handlers(self) -> None:
atexit.register(self.close_all)
_browser_tab_manager = BrowserTabManager()
def get_browser_tab_manager() -> BrowserTabManager:
return _browser_tab_manager
| {
"repo_id": "usestrix/strix",
"file_path": "strix/tools/browser/tab_manager.py",
"license": "Apache License 2.0",
"lines": 300,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
usestrix/strix:strix/tools/executor.py | import inspect
import os
from typing import Any
import httpx
from strix.config import Config
from strix.telemetry import posthog
if os.getenv("STRIX_SANDBOX_MODE", "false").lower() == "false":
from strix.runtime import get_runtime
from .argument_parser import convert_arguments
from .registry import (
get_tool_by_name,
get_tool_names,
get_tool_param_schema,
needs_agent_state,
should_execute_in_sandbox,
)
_SERVER_TIMEOUT = float(Config.get("strix_sandbox_execution_timeout") or "120")
SANDBOX_EXECUTION_TIMEOUT = _SERVER_TIMEOUT + 30
SANDBOX_CONNECT_TIMEOUT = float(Config.get("strix_sandbox_connect_timeout") or "10")
async def execute_tool(tool_name: str, agent_state: Any | None = None, **kwargs: Any) -> Any:
execute_in_sandbox = should_execute_in_sandbox(tool_name)
sandbox_mode = os.getenv("STRIX_SANDBOX_MODE", "false").lower() == "true"
if execute_in_sandbox and not sandbox_mode:
return await _execute_tool_in_sandbox(tool_name, agent_state, **kwargs)
return await _execute_tool_locally(tool_name, agent_state, **kwargs)
async def _execute_tool_in_sandbox(tool_name: str, agent_state: Any, **kwargs: Any) -> Any:
if not hasattr(agent_state, "sandbox_id") or not agent_state.sandbox_id:
raise ValueError("Agent state with a valid sandbox_id is required for sandbox execution.")
if not hasattr(agent_state, "sandbox_token") or not agent_state.sandbox_token:
raise ValueError(
"Agent state with a valid sandbox_token is required for sandbox execution."
)
if (
not hasattr(agent_state, "sandbox_info")
or "tool_server_port" not in agent_state.sandbox_info
):
raise ValueError(
"Agent state with a valid sandbox_info containing tool_server_port is required."
)
runtime = get_runtime()
tool_server_port = agent_state.sandbox_info["tool_server_port"]
server_url = await runtime.get_sandbox_url(agent_state.sandbox_id, tool_server_port)
request_url = f"{server_url}/execute"
agent_id = getattr(agent_state, "agent_id", "unknown")
request_data = {
"agent_id": agent_id,
"tool_name": tool_name,
"kwargs": kwargs,
}
headers = {
"Authorization": f"Bearer {agent_state.sandbox_token}",
"Content-Type": "application/json",
}
timeout = httpx.Timeout(
timeout=SANDBOX_EXECUTION_TIMEOUT,
connect=SANDBOX_CONNECT_TIMEOUT,
)
async with httpx.AsyncClient(trust_env=False) as client:
try:
response = await client.post(
request_url, json=request_data, headers=headers, timeout=timeout
)
response.raise_for_status()
response_data = response.json()
if response_data.get("error"):
posthog.error("tool_execution_error", f"{tool_name}: {response_data['error']}")
raise RuntimeError(f"Sandbox execution error: {response_data['error']}")
return response_data.get("result")
except httpx.HTTPStatusError as e:
posthog.error("tool_http_error", f"{tool_name}: HTTP {e.response.status_code}")
if e.response.status_code == 401:
raise RuntimeError("Authentication failed: Invalid or missing sandbox token") from e
raise RuntimeError(f"HTTP error calling tool server: {e.response.status_code}") from e
except httpx.RequestError as e:
error_type = type(e).__name__
posthog.error("tool_request_error", f"{tool_name}: {error_type}")
raise RuntimeError(f"Request error calling tool server: {error_type}") from e
async def _execute_tool_locally(tool_name: str, agent_state: Any | None, **kwargs: Any) -> Any:
tool_func = get_tool_by_name(tool_name)
if not tool_func:
raise ValueError(f"Tool '{tool_name}' not found")
converted_kwargs = convert_arguments(tool_func, kwargs)
if needs_agent_state(tool_name):
if agent_state is None:
raise ValueError(f"Tool '{tool_name}' requires agent_state but none was provided.")
result = tool_func(agent_state=agent_state, **converted_kwargs)
else:
result = tool_func(**converted_kwargs)
return await result if inspect.isawaitable(result) else result
def validate_tool_availability(tool_name: str | None) -> tuple[bool, str]:
if tool_name is None:
available = ", ".join(sorted(get_tool_names()))
return False, f"Tool name is missing. Available tools: {available}"
if tool_name not in get_tool_names():
available = ", ".join(sorted(get_tool_names()))
return False, f"Tool '{tool_name}' is not available. Available tools: {available}"
return True, ""
def _validate_tool_arguments(tool_name: str, kwargs: dict[str, Any]) -> str | None:
param_schema = get_tool_param_schema(tool_name)
if not param_schema or not param_schema.get("has_params"):
return None
allowed_params: set[str] = param_schema.get("params", set())
required_params: set[str] = param_schema.get("required", set())
optional_params = allowed_params - required_params
schema_hint = _format_schema_hint(tool_name, required_params, optional_params)
unknown_params = set(kwargs.keys()) - allowed_params
if unknown_params:
unknown_list = ", ".join(sorted(unknown_params))
return f"Tool '{tool_name}' received unknown parameter(s): {unknown_list}\n{schema_hint}"
missing_required = [
param for param in required_params if param not in kwargs or kwargs.get(param) in (None, "")
]
if missing_required:
missing_list = ", ".join(sorted(missing_required))
return f"Tool '{tool_name}' missing required parameter(s): {missing_list}\n{schema_hint}"
return None
def _format_schema_hint(tool_name: str, required: set[str], optional: set[str]) -> str:
parts = [f"Valid parameters for '{tool_name}':"]
if required:
parts.append(f" Required: {', '.join(sorted(required))}")
if optional:
parts.append(f" Optional: {', '.join(sorted(optional))}")
return "\n".join(parts)
async def execute_tool_with_validation(
tool_name: str | None, agent_state: Any | None = None, **kwargs: Any
) -> Any:
is_valid, error_msg = validate_tool_availability(tool_name)
if not is_valid:
return f"Error: {error_msg}"
assert tool_name is not None
arg_error = _validate_tool_arguments(tool_name, kwargs)
if arg_error:
return f"Error: {arg_error}"
try:
result = await execute_tool(tool_name, agent_state, **kwargs)
except Exception as e: # noqa: BLE001
error_str = str(e)
if len(error_str) > 500:
error_str = error_str[:500] + "... [truncated]"
return f"Error executing {tool_name}: {error_str}"
else:
return result
async def execute_tool_invocation(tool_inv: dict[str, Any], agent_state: Any | None = None) -> Any:
tool_name = tool_inv.get("toolName")
tool_args = tool_inv.get("args", {})
return await execute_tool_with_validation(tool_name, agent_state, **tool_args)
def _check_error_result(result: Any) -> tuple[bool, Any]:
is_error = False
error_payload: Any = None
if (isinstance(result, dict) and "error" in result) or (
isinstance(result, str) and result.strip().lower().startswith("error:")
):
is_error = True
error_payload = result
return is_error, error_payload
def _update_tracer_with_result(
tracer: Any, execution_id: Any, is_error: bool, result: Any, error_payload: Any
) -> None:
if not tracer or not execution_id:
return
try:
if is_error:
tracer.update_tool_execution(execution_id, "error", error_payload)
else:
tracer.update_tool_execution(execution_id, "completed", result)
except (ConnectionError, RuntimeError) as e:
error_msg = str(e)
if tracer and execution_id:
tracer.update_tool_execution(execution_id, "error", error_msg)
raise
def _format_tool_result(tool_name: str, result: Any) -> tuple[str, list[dict[str, Any]]]:
images: list[dict[str, Any]] = []
screenshot_data = extract_screenshot_from_result(result)
if screenshot_data:
images.append(
{
"type": "image_url",
"image_url": {"url": f"data:image/png;base64,{screenshot_data}"},
}
)
result_str = remove_screenshot_from_result(result)
else:
result_str = result
if result_str is None:
final_result_str = f"Tool {tool_name} executed successfully"
else:
final_result_str = str(result_str)
if len(final_result_str) > 10000:
start_part = final_result_str[:4000]
end_part = final_result_str[-4000:]
final_result_str = start_part + "\n\n... [middle content truncated] ...\n\n" + end_part
observation_xml = (
f"<tool_result>\n<tool_name>{tool_name}</tool_name>\n"
f"<result>{final_result_str}</result>\n</tool_result>"
)
return observation_xml, images
async def _execute_single_tool(
tool_inv: dict[str, Any],
agent_state: Any | None,
tracer: Any | None,
agent_id: str,
) -> tuple[str, list[dict[str, Any]], bool]:
tool_name = tool_inv.get("toolName", "unknown")
args = tool_inv.get("args", {})
execution_id = None
should_agent_finish = False
if tracer:
execution_id = tracer.log_tool_execution_start(agent_id, tool_name, args)
try:
result = await execute_tool_invocation(tool_inv, agent_state)
is_error, error_payload = _check_error_result(result)
if (
tool_name in ("finish_scan", "agent_finish")
and not is_error
and isinstance(result, dict)
):
if tool_name == "finish_scan":
should_agent_finish = result.get("scan_completed", False)
elif tool_name == "agent_finish":
should_agent_finish = result.get("agent_completed", False)
_update_tracer_with_result(tracer, execution_id, is_error, result, error_payload)
except (ConnectionError, RuntimeError, ValueError, TypeError, OSError) as e:
error_msg = str(e)
if tracer and execution_id:
tracer.update_tool_execution(execution_id, "error", error_msg)
raise
observation_xml, images = _format_tool_result(tool_name, result)
return observation_xml, images, should_agent_finish
def _get_tracer_and_agent_id(agent_state: Any | None) -> tuple[Any | None, str]:
try:
from strix.telemetry.tracer import get_global_tracer
tracer = get_global_tracer()
agent_id = agent_state.agent_id if agent_state else "unknown_agent"
except (ImportError, AttributeError):
tracer = None
agent_id = "unknown_agent"
return tracer, agent_id
async def process_tool_invocations(
tool_invocations: list[dict[str, Any]],
conversation_history: list[dict[str, Any]],
agent_state: Any | None = None,
) -> bool:
observation_parts: list[str] = []
all_images: list[dict[str, Any]] = []
should_agent_finish = False
tracer, agent_id = _get_tracer_and_agent_id(agent_state)
for tool_inv in tool_invocations:
observation_xml, images, tool_should_finish = await _execute_single_tool(
tool_inv, agent_state, tracer, agent_id
)
observation_parts.append(observation_xml)
all_images.extend(images)
if tool_should_finish:
should_agent_finish = True
if all_images:
content = [{"type": "text", "text": "Tool Results:\n\n" + "\n\n".join(observation_parts)}]
content.extend(all_images)
conversation_history.append({"role": "user", "content": content})
else:
observation_content = "Tool Results:\n\n" + "\n\n".join(observation_parts)
conversation_history.append({"role": "user", "content": observation_content})
return should_agent_finish
def extract_screenshot_from_result(result: Any) -> str | None:
if not isinstance(result, dict):
return None
screenshot = result.get("screenshot")
if isinstance(screenshot, str) and screenshot:
return screenshot
return None
def remove_screenshot_from_result(result: Any) -> Any:
if not isinstance(result, dict):
return result
result_copy = result.copy()
if "screenshot" in result_copy:
result_copy["screenshot"] = "[Image data extracted - see attached image]"
return result_copy
| {
"repo_id": "usestrix/strix",
"file_path": "strix/tools/executor.py",
"license": "Apache License 2.0",
"lines": 276,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
usestrix/strix:strix/tools/file_edit/file_edit_actions.py | import json
import re
from pathlib import Path
from typing import Any, cast
from strix.tools.registry import register_tool
def _parse_file_editor_output(output: str) -> dict[str, Any]:
try:
pattern = r"<oh_aci_output_[^>]+>\n(.*?)\n</oh_aci_output_[^>]+>"
match = re.search(pattern, output, re.DOTALL)
if match:
json_str = match.group(1)
data = json.loads(json_str)
return cast("dict[str, Any]", data)
return {"output": output, "error": None}
except (json.JSONDecodeError, AttributeError):
return {"output": output, "error": None}
@register_tool
def str_replace_editor(
command: str,
path: str,
file_text: str | None = None,
view_range: list[int] | None = None,
old_str: str | None = None,
new_str: str | None = None,
insert_line: int | None = None,
) -> dict[str, Any]:
from openhands_aci import file_editor
try:
path_obj = Path(path)
if not path_obj.is_absolute():
path = str(Path("/workspace") / path_obj)
result = file_editor(
command=command,
path=path,
file_text=file_text,
view_range=view_range,
old_str=old_str,
new_str=new_str,
insert_line=insert_line,
)
parsed = _parse_file_editor_output(result)
if parsed.get("error"):
return {"error": parsed["error"]}
return {"content": parsed.get("output", result)}
except (OSError, ValueError) as e:
return {"error": f"Error in {command} operation: {e!s}"}
@register_tool
def list_files(
path: str,
recursive: bool = False,
) -> dict[str, Any]:
from openhands_aci.utils.shell import run_shell_cmd
try:
path_obj = Path(path)
if not path_obj.is_absolute():
path = str(Path("/workspace") / path_obj)
path_obj = Path(path)
if not path_obj.exists():
return {"error": f"Directory not found: {path}"}
if not path_obj.is_dir():
return {"error": f"Path is not a directory: {path}"}
cmd = f"find '{path}' -type f -o -type d | head -500" if recursive else f"ls -1a '{path}'"
exit_code, stdout, stderr = run_shell_cmd(cmd)
if exit_code != 0:
return {"error": f"Error listing directory: {stderr}"}
items = stdout.strip().split("\n") if stdout.strip() else []
files = []
dirs = []
for item in items:
item_path = item if recursive else str(Path(path) / item)
item_path_obj = Path(item_path)
if item_path_obj.is_file():
files.append(item)
elif item_path_obj.is_dir():
dirs.append(item)
return {
"files": sorted(files),
"directories": sorted(dirs),
"total_files": len(files),
"total_dirs": len(dirs),
"path": path,
"recursive": recursive,
}
except (OSError, ValueError) as e:
return {"error": f"Error listing directory: {e!s}"}
@register_tool
def search_files(
path: str,
regex: str,
file_pattern: str = "*",
) -> dict[str, Any]:
from openhands_aci.utils.shell import run_shell_cmd
try:
path_obj = Path(path)
if not path_obj.is_absolute():
path = str(Path("/workspace") / path_obj)
if not Path(path).exists():
return {"error": f"Directory not found: {path}"}
escaped_regex = regex.replace("'", "'\"'\"'")
cmd = f"rg --line-number --glob '{file_pattern}' '{escaped_regex}' '{path}'"
exit_code, stdout, stderr = run_shell_cmd(cmd)
if exit_code not in {0, 1}:
return {"error": f"Error searching files: {stderr}"}
return {"output": stdout if stdout else "No matches found"}
except (OSError, ValueError) as e:
return {"error": f"Error searching files: {e!s}"}
# ruff: noqa: TRY300
| {
"repo_id": "usestrix/strix",
"file_path": "strix/tools/file_edit/file_edit_actions.py",
"license": "Apache License 2.0",
"lines": 107,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
usestrix/strix:strix/tools/finish/finish_actions.py | from typing import Any
from strix.tools.registry import register_tool
def _validate_root_agent(agent_state: Any) -> dict[str, Any] | None:
if agent_state and hasattr(agent_state, "parent_id") and agent_state.parent_id is not None:
return {
"success": False,
"error": "finish_scan_wrong_agent",
"message": "This tool can only be used by the root/main agent",
"suggestion": "If you are a subagent, use agent_finish from agents_graph tool instead",
}
return None
def _check_active_agents(agent_state: Any = None) -> dict[str, Any] | None:
try:
from strix.tools.agents_graph.agents_graph_actions import _agent_graph
if agent_state and agent_state.agent_id:
current_agent_id = agent_state.agent_id
else:
return None
active_agents = []
stopping_agents = []
for agent_id, node in _agent_graph["nodes"].items():
if agent_id == current_agent_id:
continue
status = node.get("status", "unknown")
if status == "running":
active_agents.append(
{
"id": agent_id,
"name": node.get("name", "Unknown"),
"task": node.get("task", "Unknown task")[:300],
"status": status,
}
)
elif status == "stopping":
stopping_agents.append(
{
"id": agent_id,
"name": node.get("name", "Unknown"),
"task": node.get("task", "Unknown task")[:300],
"status": status,
}
)
if active_agents or stopping_agents:
response: dict[str, Any] = {
"success": False,
"error": "agents_still_active",
"message": "Cannot finish scan: agents are still active",
}
if active_agents:
response["active_agents"] = active_agents
if stopping_agents:
response["stopping_agents"] = stopping_agents
response["suggestions"] = [
"Use wait_for_message to wait for all agents to complete",
"Use send_message_to_agent if you need agents to complete immediately",
"Check agent_status to see current agent states",
]
response["total_active"] = len(active_agents) + len(stopping_agents)
return response
except ImportError:
pass
except Exception:
import logging
logging.exception("Error checking active agents")
return None
@register_tool(sandbox_execution=False)
def finish_scan(
executive_summary: str,
methodology: str,
technical_analysis: str,
recommendations: str,
agent_state: Any = None,
) -> dict[str, Any]:
validation_error = _validate_root_agent(agent_state)
if validation_error:
return validation_error
active_agents_error = _check_active_agents(agent_state)
if active_agents_error:
return active_agents_error
validation_errors = []
if not executive_summary or not executive_summary.strip():
validation_errors.append("Executive summary cannot be empty")
if not methodology or not methodology.strip():
validation_errors.append("Methodology cannot be empty")
if not technical_analysis or not technical_analysis.strip():
validation_errors.append("Technical analysis cannot be empty")
if not recommendations or not recommendations.strip():
validation_errors.append("Recommendations cannot be empty")
if validation_errors:
return {"success": False, "message": "Validation failed", "errors": validation_errors}
try:
from strix.telemetry.tracer import get_global_tracer
tracer = get_global_tracer()
if tracer:
tracer.update_scan_final_fields(
executive_summary=executive_summary.strip(),
methodology=methodology.strip(),
technical_analysis=technical_analysis.strip(),
recommendations=recommendations.strip(),
)
vulnerability_count = len(tracer.vulnerability_reports)
return {
"success": True,
"scan_completed": True,
"message": "Scan completed successfully",
"vulnerabilities_found": vulnerability_count,
}
import logging
logging.warning("Current tracer not available - scan results not stored")
except (ImportError, AttributeError) as e:
return {"success": False, "message": f"Failed to complete scan: {e!s}"}
else:
return {
"success": True,
"scan_completed": True,
"message": "Scan completed (not persisted)",
"warning": "Results could not be persisted - tracer unavailable",
}
| {
"repo_id": "usestrix/strix",
"file_path": "strix/tools/finish/finish_actions.py",
"license": "Apache License 2.0",
"lines": 118,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
usestrix/strix:strix/tools/notes/notes_actions.py | import uuid
from datetime import UTC, datetime
from typing import Any
from strix.tools.registry import register_tool
_notes_storage: dict[str, dict[str, Any]] = {}
def _filter_notes(
category: str | None = None,
tags: list[str] | None = None,
search_query: str | None = None,
) -> list[dict[str, Any]]:
filtered_notes = []
for note_id, note in _notes_storage.items():
if category and note.get("category") != category:
continue
if tags:
note_tags = note.get("tags", [])
if not any(tag in note_tags for tag in tags):
continue
if search_query:
search_lower = search_query.lower()
title_match = search_lower in note.get("title", "").lower()
content_match = search_lower in note.get("content", "").lower()
if not (title_match or content_match):
continue
note_with_id = note.copy()
note_with_id["note_id"] = note_id
filtered_notes.append(note_with_id)
filtered_notes.sort(key=lambda x: x.get("created_at", ""), reverse=True)
return filtered_notes
@register_tool(sandbox_execution=False)
def create_note(
title: str,
content: str,
category: str = "general",
tags: list[str] | None = None,
) -> dict[str, Any]:
try:
if not title or not title.strip():
return {"success": False, "error": "Title cannot be empty", "note_id": None}
if not content or not content.strip():
return {"success": False, "error": "Content cannot be empty", "note_id": None}
valid_categories = ["general", "findings", "methodology", "questions", "plan"]
if category not in valid_categories:
return {
"success": False,
"error": f"Invalid category. Must be one of: {', '.join(valid_categories)}",
"note_id": None,
}
note_id = str(uuid.uuid4())[:5]
timestamp = datetime.now(UTC).isoformat()
note = {
"title": title.strip(),
"content": content.strip(),
"category": category,
"tags": tags or [],
"created_at": timestamp,
"updated_at": timestamp,
}
_notes_storage[note_id] = note
except (ValueError, TypeError) as e:
return {"success": False, "error": f"Failed to create note: {e}", "note_id": None}
else:
return {
"success": True,
"note_id": note_id,
"message": f"Note '{title}' created successfully",
}
@register_tool(sandbox_execution=False)
def list_notes(
category: str | None = None,
tags: list[str] | None = None,
search: str | None = None,
) -> dict[str, Any]:
try:
filtered_notes = _filter_notes(category=category, tags=tags, search_query=search)
return {
"success": True,
"notes": filtered_notes,
"total_count": len(filtered_notes),
}
except (ValueError, TypeError) as e:
return {
"success": False,
"error": f"Failed to list notes: {e}",
"notes": [],
"total_count": 0,
}
@register_tool(sandbox_execution=False)
def update_note(
note_id: str,
title: str | None = None,
content: str | None = None,
tags: list[str] | None = None,
) -> dict[str, Any]:
try:
if note_id not in _notes_storage:
return {"success": False, "error": f"Note with ID '{note_id}' not found"}
note = _notes_storage[note_id]
if title is not None:
if not title.strip():
return {"success": False, "error": "Title cannot be empty"}
note["title"] = title.strip()
if content is not None:
if not content.strip():
return {"success": False, "error": "Content cannot be empty"}
note["content"] = content.strip()
if tags is not None:
note["tags"] = tags
note["updated_at"] = datetime.now(UTC).isoformat()
return {
"success": True,
"message": f"Note '{note['title']}' updated successfully",
}
except (ValueError, TypeError) as e:
return {"success": False, "error": f"Failed to update note: {e}"}
@register_tool(sandbox_execution=False)
def delete_note(note_id: str) -> dict[str, Any]:
try:
if note_id not in _notes_storage:
return {"success": False, "error": f"Note with ID '{note_id}' not found"}
note_title = _notes_storage[note_id]["title"]
del _notes_storage[note_id]
except (ValueError, TypeError) as e:
return {"success": False, "error": f"Failed to delete note: {e}"}
else:
return {
"success": True,
"message": f"Note '{note_title}' deleted successfully",
}
| {
"repo_id": "usestrix/strix",
"file_path": "strix/tools/notes/notes_actions.py",
"license": "Apache License 2.0",
"lines": 129,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
usestrix/strix:strix/tools/proxy/proxy_actions.py | from typing import Any, Literal
from strix.tools.registry import register_tool
RequestPart = Literal["request", "response"]
@register_tool
def list_requests(
httpql_filter: str | None = None,
start_page: int = 1,
end_page: int = 1,
page_size: int = 50,
sort_by: Literal[
"timestamp",
"host",
"method",
"path",
"status_code",
"response_time",
"response_size",
"source",
] = "timestamp",
sort_order: Literal["asc", "desc"] = "desc",
scope_id: str | None = None,
) -> dict[str, Any]:
from .proxy_manager import get_proxy_manager
manager = get_proxy_manager()
return manager.list_requests(
httpql_filter, start_page, end_page, page_size, sort_by, sort_order, scope_id
)
@register_tool
def view_request(
request_id: str,
part: RequestPart = "request",
search_pattern: str | None = None,
page: int = 1,
page_size: int = 50,
) -> dict[str, Any]:
from .proxy_manager import get_proxy_manager
manager = get_proxy_manager()
return manager.view_request(request_id, part, search_pattern, page, page_size)
@register_tool
def send_request(
method: str,
url: str,
headers: dict[str, str] | None = None,
body: str = "",
timeout: int = 30,
) -> dict[str, Any]:
from .proxy_manager import get_proxy_manager
if headers is None:
headers = {}
manager = get_proxy_manager()
return manager.send_simple_request(method, url, headers, body, timeout)
@register_tool
def repeat_request(
request_id: str,
modifications: dict[str, Any] | None = None,
) -> dict[str, Any]:
from .proxy_manager import get_proxy_manager
if modifications is None:
modifications = {}
manager = get_proxy_manager()
return manager.repeat_request(request_id, modifications)
@register_tool
def scope_rules(
action: Literal["get", "list", "create", "update", "delete"],
allowlist: list[str] | None = None,
denylist: list[str] | None = None,
scope_id: str | None = None,
scope_name: str | None = None,
) -> dict[str, Any]:
from .proxy_manager import get_proxy_manager
manager = get_proxy_manager()
return manager.scope_rules(action, allowlist, denylist, scope_id, scope_name)
@register_tool
def list_sitemap(
scope_id: str | None = None,
parent_id: str | None = None,
depth: Literal["DIRECT", "ALL"] = "DIRECT",
page: int = 1,
) -> dict[str, Any]:
from .proxy_manager import get_proxy_manager
manager = get_proxy_manager()
return manager.list_sitemap(scope_id, parent_id, depth, page)
@register_tool
def view_sitemap_entry(
entry_id: str,
) -> dict[str, Any]:
from .proxy_manager import get_proxy_manager
manager = get_proxy_manager()
return manager.view_sitemap_entry(entry_id)
| {
"repo_id": "usestrix/strix",
"file_path": "strix/tools/proxy/proxy_actions.py",
"license": "Apache License 2.0",
"lines": 89,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
usestrix/strix:strix/tools/proxy/proxy_manager.py | import base64
import os
import re
import time
from typing import TYPE_CHECKING, Any
from urllib.parse import parse_qs, urlencode, urlparse, urlunparse
import requests
from gql import Client, gql
from gql.transport.exceptions import TransportQueryError
from gql.transport.requests import RequestsHTTPTransport
from requests.exceptions import ProxyError, RequestException, Timeout
if TYPE_CHECKING:
from collections.abc import Callable
CAIDO_PORT = 48080 # Fixed port inside container
class ProxyManager:
def __init__(self, auth_token: str | None = None):
host = "127.0.0.1"
self.base_url = f"http://{host}:{CAIDO_PORT}/graphql"
self.proxies = {
"http": f"http://{host}:{CAIDO_PORT}",
"https": f"http://{host}:{CAIDO_PORT}",
}
self.auth_token = auth_token or os.getenv("CAIDO_API_TOKEN")
def _get_client(self) -> Client:
transport = RequestsHTTPTransport(
url=self.base_url, headers={"Authorization": f"Bearer {self.auth_token}"}
)
return Client(transport=transport, fetch_schema_from_transport=False)
def list_requests(
self,
httpql_filter: str | None = None,
start_page: int = 1,
end_page: int = 1,
page_size: int = 50,
sort_by: str = "timestamp",
sort_order: str = "desc",
scope_id: str | None = None,
) -> dict[str, Any]:
offset = (start_page - 1) * page_size
limit = (end_page - start_page + 1) * page_size
sort_mapping = {
"timestamp": "CREATED_AT",
"host": "HOST",
"method": "METHOD",
"path": "PATH",
"status_code": "RESP_STATUS_CODE",
"response_time": "RESP_ROUNDTRIP_TIME",
"response_size": "RESP_LENGTH",
"source": "SOURCE",
}
query = gql("""
query GetRequests(
$limit: Int, $offset: Int, $filter: HTTPQL,
$order: RequestResponseOrderInput, $scopeId: ID
) {
requestsByOffset(
limit: $limit, offset: $offset, filter: $filter,
order: $order, scopeId: $scopeId
) {
edges {
node {
id method host path query createdAt length isTls port
source alteration fileExtension
response { id statusCode length roundtripTime createdAt }
}
}
count { value }
}
}
""")
variables = {
"limit": limit,
"offset": offset,
"filter": httpql_filter,
"order": {
"by": sort_mapping.get(sort_by, "CREATED_AT"),
"ordering": sort_order.upper(),
},
"scopeId": scope_id,
}
try:
result = self._get_client().execute(query, variable_values=variables)
data = result.get("requestsByOffset", {})
nodes = [edge["node"] for edge in data.get("edges", [])]
count_data = data.get("count") or {}
return {
"requests": nodes,
"total_count": count_data.get("value", 0),
"start_page": start_page,
"end_page": end_page,
"page_size": page_size,
"offset": offset,
"returned_count": len(nodes),
"sort_by": sort_by,
"sort_order": sort_order,
}
except (TransportQueryError, ValueError, KeyError) as e:
return {"requests": [], "total_count": 0, "error": f"Error fetching requests: {e}"}
def view_request(
self,
request_id: str,
part: str = "request",
search_pattern: str | None = None,
page: int = 1,
page_size: int = 50,
) -> dict[str, Any]:
queries = {
"request": """query GetRequest($id: ID!) {
request(id: $id) {
id method host path query createdAt length isTls port
source alteration edited raw
}
}""",
"response": """query GetRequest($id: ID!) {
request(id: $id) {
id response {
id statusCode length roundtripTime createdAt raw
}
}
}""",
}
if part not in queries:
return {"error": f"Invalid part '{part}'. Use 'request' or 'response'"}
try:
result = self._get_client().execute(
gql(queries[part]), variable_values={"id": request_id}
)
request_data = result.get("request", {})
if not request_data:
return {"error": f"Request {request_id} not found"}
if part == "request":
raw_content = request_data.get("raw")
else:
response_data = request_data.get("response") or {}
raw_content = response_data.get("raw")
if not raw_content:
return {"error": "No content available"}
content = base64.b64decode(raw_content).decode("utf-8", errors="replace")
if part == "response":
request_data["response"]["raw"] = content
else:
request_data["raw"] = content
return (
self._search_content(request_data, content, search_pattern)
if search_pattern
else self._paginate_content(request_data, content, page, page_size)
)
except (TransportQueryError, ValueError, KeyError, UnicodeDecodeError) as e:
return {"error": f"Failed to view request: {e}"}
def _search_content(
self, request_data: dict[str, Any], content: str, pattern: str
) -> dict[str, Any]:
try:
regex = re.compile(pattern, re.IGNORECASE | re.MULTILINE | re.DOTALL)
matches = []
for match in regex.finditer(content):
start, end = match.start(), match.end()
context_size = 120
before = re.sub(r"\s+", " ", content[max(0, start - context_size) : start].strip())[
-100:
]
after = re.sub(r"\s+", " ", content[end : end + context_size].strip())[:100]
matches.append(
{"match": match.group(), "before": before, "after": after, "position": start}
)
if len(matches) >= 20:
break
return {
"id": request_data.get("id"),
"matches": matches,
"total_matches": len(matches),
"search_pattern": pattern,
"truncated": len(matches) >= 20,
}
except re.error as e:
return {"error": f"Invalid regex: {e}"}
def _paginate_content(
self, request_data: dict[str, Any], content: str, page: int, page_size: int
) -> dict[str, Any]:
display_lines = []
for line in content.split("\n"):
if len(line) <= 80:
display_lines.append(line)
else:
display_lines.extend(
[
line[i : i + 80] + (" \\" if i + 80 < len(line) else "")
for i in range(0, len(line), 80)
]
)
total_lines = len(display_lines)
total_pages = (total_lines + page_size - 1) // page_size
page = max(1, min(page, total_pages))
start_line = (page - 1) * page_size
end_line = min(total_lines, start_line + page_size)
return {
"id": request_data.get("id"),
"content": "\n".join(display_lines[start_line:end_line]),
"page": page,
"total_pages": total_pages,
"showing_lines": f"{start_line + 1}-{end_line} of {total_lines}",
"has_more": page < total_pages,
}
def send_simple_request(
self,
method: str,
url: str,
headers: dict[str, str] | None = None,
body: str = "",
timeout: int = 30,
) -> dict[str, Any]:
if headers is None:
headers = {}
try:
start_time = time.time()
response = requests.request(
method=method,
url=url,
headers=headers,
data=body or None,
proxies=self.proxies,
timeout=timeout,
verify=False,
)
response_time = int((time.time() - start_time) * 1000)
body_content = response.text
if len(body_content) > 10000:
body_content = body_content[:10000] + "\n... [truncated]"
return {
"status_code": response.status_code,
"headers": dict(response.headers),
"body": body_content,
"response_time_ms": response_time,
"url": response.url,
"message": (
"Request sent through proxy - check list_requests() for captured traffic"
),
}
except (RequestException, ProxyError, Timeout) as e:
return {"error": f"Request failed: {type(e).__name__}", "details": str(e), "url": url}
def repeat_request(
self, request_id: str, modifications: dict[str, Any] | None = None
) -> dict[str, Any]:
if modifications is None:
modifications = {}
original = self.view_request(request_id, "request")
if "error" in original:
return {"error": f"Could not retrieve original request: {original['error']}"}
raw_content = original.get("content", "")
if not raw_content:
return {"error": "No raw request content found"}
request_components = self._parse_http_request(raw_content)
if "error" in request_components:
return request_components
full_url = self._build_full_url(request_components, modifications)
if "error" in full_url:
return full_url
modified_request = self._apply_modifications(
request_components, modifications, full_url["url"]
)
return self._send_modified_request(modified_request, request_id, modifications)
def _parse_http_request(self, raw_content: str) -> dict[str, Any]:
lines = raw_content.split("\n")
request_line = lines[0].strip().split(" ")
if len(request_line) < 2:
return {"error": "Invalid request line format"}
method, url_path = request_line[0], request_line[1]
headers = {}
body_start = 0
for i, line in enumerate(lines[1:], 1):
if line.strip() == "":
body_start = i + 1
break
if ":" in line:
key, value = line.split(":", 1)
headers[key.strip()] = value.strip()
body = "\n".join(lines[body_start:]).strip() if body_start < len(lines) else ""
return {"method": method, "url_path": url_path, "headers": headers, "body": body}
def _build_full_url(
self, components: dict[str, Any], modifications: dict[str, Any]
) -> dict[str, Any]:
headers = components["headers"]
host = headers.get("Host", "")
if not host:
return {"error": "No Host header found"}
protocol = (
"https" if ":443" in host or "https" in headers.get("Referer", "").lower() else "http"
)
full_url = f"{protocol}://{host}{components['url_path']}"
if "url" in modifications:
full_url = modifications["url"]
return {"url": full_url}
def _apply_modifications(
self, components: dict[str, Any], modifications: dict[str, Any], full_url: str
) -> dict[str, Any]:
headers = components["headers"].copy()
body = components["body"]
final_url = full_url
if "params" in modifications:
parsed = urlparse(final_url)
params = {k: v[0] if v else "" for k, v in parse_qs(parsed.query).items()}
params.update(modifications["params"])
final_url = urlunparse(parsed._replace(query=urlencode(params)))
if "headers" in modifications:
headers.update(modifications["headers"])
if "body" in modifications:
body = modifications["body"]
if "cookies" in modifications:
cookies = {}
if headers.get("Cookie"):
for cookie in headers["Cookie"].split(";"):
if "=" in cookie:
k, v = cookie.split("=", 1)
cookies[k.strip()] = v.strip()
cookies.update(modifications["cookies"])
headers["Cookie"] = "; ".join([f"{k}={v}" for k, v in cookies.items()])
return {
"method": components["method"],
"url": final_url,
"headers": headers,
"body": body,
}
def _send_modified_request(
self, request_data: dict[str, Any], request_id: str, modifications: dict[str, Any]
) -> dict[str, Any]:
try:
start_time = time.time()
response = requests.request(
method=request_data["method"],
url=request_data["url"],
headers=request_data["headers"],
data=request_data["body"] or None,
proxies=self.proxies,
timeout=30,
verify=False,
)
response_time = int((time.time() - start_time) * 1000)
response_body = response.text
truncated = len(response_body) > 10000
if truncated:
response_body = response_body[:10000] + "\n... [truncated]"
return {
"status_code": response.status_code,
"status_text": response.reason,
"headers": {
k: v
for k, v in response.headers.items()
if k.lower()
in ["content-type", "content-length", "server", "set-cookie", "location"]
},
"body": response_body,
"body_truncated": truncated,
"body_size": len(response.content),
"response_time_ms": response_time,
"url": response.url,
"original_request_id": request_id,
"modifications_applied": modifications,
"request": {
"method": request_data["method"],
"url": request_data["url"],
"headers": request_data["headers"],
"has_body": bool(request_data["body"]),
},
}
except ProxyError as e:
return {
"error": "Proxy connection failed - is Caido running?",
"details": str(e),
"original_request_id": request_id,
}
except (RequestException, Timeout) as e:
return {
"error": f"Failed to repeat request: {type(e).__name__}",
"details": str(e),
"original_request_id": request_id,
}
def _handle_scope_list(self) -> dict[str, Any]:
result = self._get_client().execute(
gql("query { scopes { id name allowlist denylist indexed } }")
)
scopes = result.get("scopes", [])
return {"scopes": scopes, "count": len(scopes)}
def _handle_scope_get(self, scope_id: str | None) -> dict[str, Any]:
if not scope_id:
return self._handle_scope_list()
result = self._get_client().execute(
gql(
"query GetScope($id: ID!) { scope(id: $id) { id name allowlist denylist indexed } }"
),
variable_values={"id": scope_id},
)
scope = result.get("scope")
if not scope:
return {"error": f"Scope {scope_id} not found"}
return {"scope": scope}
def _handle_scope_create(
self, scope_name: str, allowlist: list[str] | None, denylist: list[str] | None
) -> dict[str, Any]:
if not scope_name:
return {"error": "scope_name required for create"}
mutation = gql("""
mutation CreateScope($input: CreateScopeInput!) {
createScope(input: $input) {
scope { id name allowlist denylist indexed }
error {
... on InvalidGlobTermsUserError { code terms }
... on OtherUserError { code }
}
}
}
""")
result = self._get_client().execute(
mutation,
variable_values={
"input": {
"name": scope_name,
"allowlist": allowlist or [],
"denylist": denylist or [],
}
},
)
payload = result.get("createScope", {})
if payload.get("error"):
error = payload["error"]
return {"error": f"Invalid glob patterns: {error.get('terms', error.get('code'))}"}
return {"scope": payload.get("scope"), "message": "Scope created successfully"}
def _handle_scope_update(
self,
scope_id: str,
scope_name: str,
allowlist: list[str] | None,
denylist: list[str] | None,
) -> dict[str, Any]:
if not scope_id or not scope_name:
return {"error": "scope_id and scope_name required"}
mutation = gql("""
mutation UpdateScope($id: ID!, $input: UpdateScopeInput!) {
updateScope(id: $id, input: $input) {
scope { id name allowlist denylist indexed }
error {
... on InvalidGlobTermsUserError { code terms }
... on OtherUserError { code }
}
}
}
""")
result = self._get_client().execute(
mutation,
variable_values={
"id": scope_id,
"input": {
"name": scope_name,
"allowlist": allowlist or [],
"denylist": denylist or [],
},
},
)
payload = result.get("updateScope", {})
if payload.get("error"):
error = payload["error"]
return {"error": f"Invalid glob patterns: {error.get('terms', error.get('code'))}"}
return {"scope": payload.get("scope"), "message": "Scope updated successfully"}
def _handle_scope_delete(self, scope_id: str) -> dict[str, Any]:
if not scope_id:
return {"error": "scope_id required for delete"}
result = self._get_client().execute(
gql("mutation DeleteScope($id: ID!) { deleteScope(id: $id) { deletedId } }"),
variable_values={"id": scope_id},
)
payload = result.get("deleteScope", {})
if not payload.get("deletedId"):
return {"error": f"Failed to delete scope {scope_id}"}
return {"message": f"Scope {scope_id} deleted", "deletedId": payload["deletedId"]}
def scope_rules(
self,
action: str,
allowlist: list[str] | None = None,
denylist: list[str] | None = None,
scope_id: str | None = None,
scope_name: str | None = None,
) -> dict[str, Any]:
handlers: dict[str, Callable[[], dict[str, Any]]] = {
"list": self._handle_scope_list,
"get": lambda: self._handle_scope_get(scope_id),
"create": lambda: (
{"error": "scope_name required for create"}
if not scope_name
else self._handle_scope_create(scope_name, allowlist, denylist)
),
"update": lambda: (
{"error": "scope_id and scope_name required"}
if not scope_id or not scope_name
else self._handle_scope_update(scope_id, scope_name, allowlist, denylist)
),
"delete": lambda: (
{"error": "scope_id required for delete"}
if not scope_id
else self._handle_scope_delete(scope_id)
),
}
handler = handlers.get(action)
if not handler:
return {
"error": f"Unsupported action: {action}. Use 'get', 'list', 'create', "
f"'update', or 'delete'"
}
try:
result = handler()
except (TransportQueryError, ValueError, KeyError) as e:
return {"error": f"Scope operation failed: {e}"}
else:
return result
def list_sitemap(
self,
scope_id: str | None = None,
parent_id: str | None = None,
depth: str = "DIRECT",
page: int = 1,
page_size: int = 30,
) -> dict[str, Any]:
try:
skip_count = (page - 1) * page_size
if parent_id:
query = gql("""
query GetSitemapDescendants($parentId: ID!, $depth: SitemapDescendantsDepth!) {
sitemapDescendantEntries(parentId: $parentId, depth: $depth) {
edges {
node {
id kind label hasDescendants
request { method path response { statusCode } }
}
}
count { value }
}
}
""")
result = self._get_client().execute(
query, variable_values={"parentId": parent_id, "depth": depth}
)
data = result.get("sitemapDescendantEntries", {})
else:
query = gql("""
query GetSitemapRoots($scopeId: ID) {
sitemapRootEntries(scopeId: $scopeId) {
edges { node {
id kind label hasDescendants
metadata { ... on SitemapEntryMetadataDomain { isTls port } }
request { method path response { statusCode } }
} }
count { value }
}
}
""")
result = self._get_client().execute(query, variable_values={"scopeId": scope_id})
data = result.get("sitemapRootEntries", {})
all_nodes = [edge["node"] for edge in data.get("edges", [])]
count_data = data.get("count") or {}
total_count = count_data.get("value", 0)
paginated_nodes = all_nodes[skip_count : skip_count + page_size]
cleaned_nodes = []
for node in paginated_nodes:
cleaned = {
"id": node["id"],
"kind": node["kind"],
"label": node["label"],
"hasDescendants": node["hasDescendants"],
}
if node.get("metadata") and (
node["metadata"].get("isTls") is not None or node["metadata"].get("port")
):
cleaned["metadata"] = node["metadata"]
if node.get("request"):
req = node["request"]
cleaned_req = {}
if req.get("method"):
cleaned_req["method"] = req["method"]
if req.get("path"):
cleaned_req["path"] = req["path"]
response_data = req.get("response") or {}
if response_data.get("statusCode"):
cleaned_req["status"] = response_data["statusCode"]
if cleaned_req:
cleaned["request"] = cleaned_req
cleaned_nodes.append(cleaned)
total_pages = (total_count + page_size - 1) // page_size
return {
"entries": cleaned_nodes,
"page": page,
"page_size": page_size,
"total_pages": total_pages,
"total_count": total_count,
"has_more": page < total_pages,
"showing": (
f"{skip_count + 1}-{min(skip_count + page_size, total_count)} of {total_count}"
),
}
except (TransportQueryError, ValueError, KeyError) as e:
return {"error": f"Failed to fetch sitemap: {e}"}
def _process_sitemap_metadata(self, node: dict[str, Any]) -> dict[str, Any]:
cleaned = {
"id": node["id"],
"kind": node["kind"],
"label": node["label"],
"hasDescendants": node["hasDescendants"],
}
if node.get("metadata") and (
node["metadata"].get("isTls") is not None or node["metadata"].get("port")
):
cleaned["metadata"] = node["metadata"]
return cleaned
def _process_sitemap_request(self, req: dict[str, Any]) -> dict[str, Any] | None:
cleaned_req = {}
if req.get("method"):
cleaned_req["method"] = req["method"]
if req.get("path"):
cleaned_req["path"] = req["path"]
response_data = req.get("response") or {}
if response_data.get("statusCode"):
cleaned_req["status"] = response_data["statusCode"]
return cleaned_req if cleaned_req else None
def _process_sitemap_response(self, resp: dict[str, Any]) -> dict[str, Any]:
cleaned_resp = {}
if resp.get("statusCode"):
cleaned_resp["status"] = resp["statusCode"]
if resp.get("length"):
cleaned_resp["size"] = resp["length"]
if resp.get("roundtripTime"):
cleaned_resp["time_ms"] = resp["roundtripTime"]
return cleaned_resp
def view_sitemap_entry(self, entry_id: str) -> dict[str, Any]:
try:
query = gql("""
query GetSitemapEntry($id: ID!) {
sitemapEntry(id: $id) {
id kind label hasDescendants
metadata { ... on SitemapEntryMetadataDomain { isTls port } }
request { method path response { statusCode length roundtripTime } }
requests(first: 30, order: {by: CREATED_AT, ordering: DESC}) {
edges { node { method path response { statusCode length } } }
count { value }
}
}
}
""")
result = self._get_client().execute(query, variable_values={"id": entry_id})
entry = result.get("sitemapEntry")
if not entry:
return {"error": f"Sitemap entry {entry_id} not found"}
cleaned = self._process_sitemap_metadata(entry)
if entry.get("request"):
req = entry["request"]
cleaned_req = {}
if req.get("method"):
cleaned_req["method"] = req["method"]
if req.get("path"):
cleaned_req["path"] = req["path"]
if req.get("response"):
cleaned_req["response"] = self._process_sitemap_response(req["response"])
if cleaned_req:
cleaned["request"] = cleaned_req
requests_data = entry.get("requests", {})
request_nodes = [edge["node"] for edge in requests_data.get("edges", [])]
cleaned_requests = [
req
for req in (self._process_sitemap_request(node) for node in request_nodes)
if req is not None
]
count_data = requests_data.get("count") or {}
cleaned["related_requests"] = {
"requests": cleaned_requests,
"total_count": count_data.get("value", 0),
"showing": f"Latest {len(cleaned_requests)} requests",
}
return {"entry": cleaned} if cleaned else {"error": "Failed to process sitemap entry"} # noqa: TRY300
except (TransportQueryError, ValueError, KeyError) as e:
return {"error": f"Failed to fetch sitemap entry: {e}"}
def close(self) -> None:
pass
_PROXY_MANAGER: ProxyManager | None = None
def get_proxy_manager() -> ProxyManager:
global _PROXY_MANAGER # noqa: PLW0603
if _PROXY_MANAGER is None:
_PROXY_MANAGER = ProxyManager()
return _PROXY_MANAGER
| {
"repo_id": "usestrix/strix",
"file_path": "strix/tools/proxy/proxy_manager.py",
"license": "Apache License 2.0",
"lines": 684,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
usestrix/strix:strix/tools/python/python_actions.py | from typing import Any, Literal
from strix.tools.registry import register_tool
PythonAction = Literal["new_session", "execute", "close", "list_sessions"]
@register_tool
def python_action(
action: PythonAction,
code: str | None = None,
timeout: int = 30,
session_id: str | None = None,
) -> dict[str, Any]:
from .python_manager import get_python_session_manager
def _validate_code(action_name: str, code: str | None) -> None:
if not code:
raise ValueError(f"code parameter is required for {action_name} action")
def _validate_action(action_name: str) -> None:
raise ValueError(f"Unknown action: {action_name}")
manager = get_python_session_manager()
try:
match action:
case "new_session":
return manager.create_session(session_id, code, timeout)
case "execute":
_validate_code(action, code)
assert code is not None
return manager.execute_code(session_id, code, timeout)
case "close":
return manager.close_session(session_id)
case "list_sessions":
return manager.list_sessions()
case _:
_validate_action(action) # type: ignore[unreachable]
except (ValueError, RuntimeError) as e:
return {"stderr": str(e), "session_id": session_id, "stdout": "", "is_running": False}
| {
"repo_id": "usestrix/strix",
"file_path": "strix/tools/python/python_actions.py",
"license": "Apache License 2.0",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
usestrix/strix:strix/tools/python/python_instance.py | import io
import sys
import threading
from typing import Any
from IPython.core.interactiveshell import InteractiveShell
MAX_STDOUT_LENGTH = 10_000
MAX_STDERR_LENGTH = 5_000
class PythonInstance:
def __init__(self, session_id: str) -> None:
self.session_id = session_id
self.is_running = True
self._execution_lock = threading.Lock()
import os
os.chdir("/workspace")
self.shell = InteractiveShell()
self.shell.init_completer()
self.shell.init_history()
self.shell.init_logger()
self._setup_proxy_functions()
def _setup_proxy_functions(self) -> None:
try:
from strix.tools.proxy import proxy_actions
proxy_functions = [
"list_requests",
"list_sitemap",
"repeat_request",
"scope_rules",
"send_request",
"view_request",
"view_sitemap_entry",
]
proxy_dict = {name: getattr(proxy_actions, name) for name in proxy_functions}
self.shell.user_ns.update(proxy_dict)
except ImportError:
pass
def _validate_session(self) -> dict[str, Any] | None:
if not self.is_running:
return {
"session_id": self.session_id,
"stdout": "",
"stderr": "Session is not running",
"result": None,
}
return None
def _truncate_output(self, content: str, max_length: int, suffix: str) -> str:
if len(content) > max_length:
return content[:max_length] + suffix
return content
def _format_execution_result(
self, execution_result: Any, stdout_content: str, stderr_content: str
) -> dict[str, Any]:
stdout = self._truncate_output(
stdout_content, MAX_STDOUT_LENGTH, "... [stdout truncated at 10k chars]"
)
if execution_result.result is not None:
if stdout and not stdout.endswith("\n"):
stdout += "\n"
result_repr = repr(execution_result.result)
result_repr = self._truncate_output(
result_repr, MAX_STDOUT_LENGTH, "... [result truncated at 10k chars]"
)
stdout += result_repr
stdout = self._truncate_output(
stdout, MAX_STDOUT_LENGTH, "... [output truncated at 10k chars]"
)
stderr_content = stderr_content if stderr_content else ""
stderr_content = self._truncate_output(
stderr_content, MAX_STDERR_LENGTH, "... [stderr truncated at 5k chars]"
)
if (
execution_result.error_before_exec or execution_result.error_in_exec
) and not stderr_content:
stderr_content = "Execution error occurred"
return {
"session_id": self.session_id,
"stdout": stdout,
"stderr": stderr_content,
"result": repr(execution_result.result)
if execution_result.result is not None
else None,
}
def _handle_execution_error(self, error: BaseException) -> dict[str, Any]:
error_msg = str(error)
error_msg = self._truncate_output(
error_msg, MAX_STDERR_LENGTH, "... [error truncated at 5k chars]"
)
return {
"session_id": self.session_id,
"stdout": "",
"stderr": error_msg,
"result": None,
}
def execute_code(self, code: str, timeout: int = 30) -> dict[str, Any]:
session_error = self._validate_session()
if session_error:
return session_error
with self._execution_lock:
result_container: dict[str, Any] = {}
stdout_capture = io.StringIO()
stderr_capture = io.StringIO()
cancelled = threading.Event()
old_stdout, old_stderr = sys.stdout, sys.stderr
def _run_code() -> None:
try:
sys.stdout = stdout_capture
sys.stderr = stderr_capture
execution_result = self.shell.run_cell(code, silent=False, store_history=True)
result_container["execution_result"] = execution_result
result_container["stdout"] = stdout_capture.getvalue()
result_container["stderr"] = stderr_capture.getvalue()
except (KeyboardInterrupt, SystemExit) as e:
result_container["error"] = e
except Exception as e: # noqa: BLE001
result_container["error"] = e
finally:
if not cancelled.is_set():
sys.stdout = old_stdout
sys.stderr = old_stderr
exec_thread = threading.Thread(target=_run_code, daemon=True)
exec_thread.start()
exec_thread.join(timeout=timeout)
if exec_thread.is_alive():
cancelled.set()
sys.stdout, sys.stderr = old_stdout, old_stderr
return self._handle_execution_error(
TimeoutError(f"Code execution timed out after {timeout} seconds")
)
if "error" in result_container:
return self._handle_execution_error(result_container["error"])
if "execution_result" in result_container:
return self._format_execution_result(
result_container["execution_result"],
result_container.get("stdout", ""),
result_container.get("stderr", ""),
)
return self._handle_execution_error(RuntimeError("Unknown execution error"))
def close(self) -> None:
self.is_running = False
self.shell.reset(new_session=False)
def is_alive(self) -> bool:
return self.is_running
| {
"repo_id": "usestrix/strix",
"file_path": "strix/tools/python/python_instance.py",
"license": "Apache License 2.0",
"lines": 141,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
usestrix/strix:strix/tools/python/python_manager.py | import atexit
import contextlib
import threading
from typing import Any
from strix.tools.context import get_current_agent_id
from .python_instance import PythonInstance
class PythonSessionManager:
def __init__(self) -> None:
self._sessions_by_agent: dict[str, dict[str, PythonInstance]] = {}
self._lock = threading.Lock()
self.default_session_id = "default"
self._register_cleanup_handlers()
def _get_agent_sessions(self) -> dict[str, PythonInstance]:
agent_id = get_current_agent_id()
with self._lock:
if agent_id not in self._sessions_by_agent:
self._sessions_by_agent[agent_id] = {}
return self._sessions_by_agent[agent_id]
def create_session(
self, session_id: str | None = None, initial_code: str | None = None, timeout: int = 30
) -> dict[str, Any]:
if session_id is None:
session_id = self.default_session_id
sessions = self._get_agent_sessions()
with self._lock:
if session_id in sessions:
raise ValueError(f"Python session '{session_id}' already exists")
session = PythonInstance(session_id)
sessions[session_id] = session
if initial_code:
result = session.execute_code(initial_code, timeout)
result["message"] = (
f"Python session '{session_id}' created successfully with initial code"
)
else:
result = {
"session_id": session_id,
"message": f"Python session '{session_id}' created successfully",
}
return result
def execute_code(
self, session_id: str | None = None, code: str | None = None, timeout: int = 30
) -> dict[str, Any]:
if session_id is None:
session_id = self.default_session_id
if not code:
raise ValueError("No code provided for execution")
sessions = self._get_agent_sessions()
with self._lock:
if session_id not in sessions:
raise ValueError(f"Python session '{session_id}' not found")
session = sessions[session_id]
result = session.execute_code(code, timeout)
result["message"] = f"Code executed in session '{session_id}'"
return result
def close_session(self, session_id: str | None = None) -> dict[str, Any]:
if session_id is None:
session_id = self.default_session_id
sessions = self._get_agent_sessions()
with self._lock:
if session_id not in sessions:
raise ValueError(f"Python session '{session_id}' not found")
session = sessions.pop(session_id)
session.close()
return {
"session_id": session_id,
"message": f"Python session '{session_id}' closed successfully",
"is_running": False,
}
def list_sessions(self) -> dict[str, Any]:
sessions = self._get_agent_sessions()
with self._lock:
session_info = {}
for sid, session in sessions.items():
session_info[sid] = {
"is_running": session.is_running,
"is_alive": session.is_alive(),
}
return {"sessions": session_info, "total_count": len(session_info)}
def cleanup_agent(self, agent_id: str) -> None:
with self._lock:
sessions = self._sessions_by_agent.pop(agent_id, {})
for session in sessions.values():
with contextlib.suppress(Exception):
session.close()
def cleanup_dead_sessions(self) -> None:
with self._lock:
for sessions in self._sessions_by_agent.values():
dead_sessions = []
for sid, session in sessions.items():
if not session.is_alive():
dead_sessions.append(sid)
for sid in dead_sessions:
session = sessions.pop(sid)
with contextlib.suppress(Exception):
session.close()
def close_all_sessions(self) -> None:
with self._lock:
all_sessions: list[PythonInstance] = []
for sessions in self._sessions_by_agent.values():
all_sessions.extend(sessions.values())
self._sessions_by_agent.clear()
for session in all_sessions:
with contextlib.suppress(Exception):
session.close()
def _register_cleanup_handlers(self) -> None:
atexit.register(self.close_all_sessions)
_python_session_manager = PythonSessionManager()
def get_python_session_manager() -> PythonSessionManager:
return _python_session_manager
| {
"repo_id": "usestrix/strix",
"file_path": "strix/tools/python/python_manager.py",
"license": "Apache License 2.0",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
usestrix/strix:strix/tools/registry.py | import inspect
import logging
import os
from collections.abc import Callable
from functools import wraps
from inspect import signature
from pathlib import Path
from typing import Any
import defusedxml.ElementTree as DefusedET
from strix.utils.resource_paths import get_strix_resource_path
tools: list[dict[str, Any]] = []
_tools_by_name: dict[str, Callable[..., Any]] = {}
_tool_param_schemas: dict[str, dict[str, Any]] = {}
logger = logging.getLogger(__name__)
class ImplementedInClientSideOnlyError(Exception):
def __init__(
self,
message: str = "This tool is implemented in the client side only",
) -> None:
self.message = message
super().__init__(self.message)
def _process_dynamic_content(content: str) -> str:
if "{{DYNAMIC_SKILLS_DESCRIPTION}}" in content:
try:
from strix.skills import generate_skills_description
skills_description = generate_skills_description()
content = content.replace("{{DYNAMIC_SKILLS_DESCRIPTION}}", skills_description)
except ImportError:
logger.warning("Could not import skills utilities for dynamic schema generation")
content = content.replace(
"{{DYNAMIC_SKILLS_DESCRIPTION}}",
"List of skills to load for this agent (max 5). Skill discovery failed.",
)
return content
def _load_xml_schema(path: Path) -> Any:
if not path.exists():
return None
try:
content = path.read_text(encoding="utf-8")
content = _process_dynamic_content(content)
start_tag = '<tool name="'
end_tag = "</tool>"
tools_dict = {}
pos = 0
while True:
start_pos = content.find(start_tag, pos)
if start_pos == -1:
break
name_start = start_pos + len(start_tag)
name_end = content.find('"', name_start)
if name_end == -1:
break
tool_name = content[name_start:name_end]
end_pos = content.find(end_tag, name_end)
if end_pos == -1:
break
end_pos += len(end_tag)
tool_element = content[start_pos:end_pos]
tools_dict[tool_name] = tool_element
pos = end_pos
if pos >= len(content):
break
except (IndexError, ValueError, UnicodeError) as e:
logger.warning(f"Error loading schema file {path}: {e}")
return None
else:
return tools_dict
def _parse_param_schema(tool_xml: str) -> dict[str, Any]:
params: set[str] = set()
required: set[str] = set()
params_start = tool_xml.find("<parameters>")
params_end = tool_xml.find("</parameters>")
if params_start == -1 or params_end == -1:
return {"params": set(), "required": set(), "has_params": False}
params_section = tool_xml[params_start : params_end + len("</parameters>")]
try:
root = DefusedET.fromstring(params_section)
except DefusedET.ParseError:
return {"params": set(), "required": set(), "has_params": False}
for param in root.findall(".//parameter"):
name = param.attrib.get("name")
if not name:
continue
params.add(name)
if param.attrib.get("required", "false").lower() == "true":
required.add(name)
return {"params": params, "required": required, "has_params": bool(params or required)}
def _get_module_name(func: Callable[..., Any]) -> str:
module = inspect.getmodule(func)
if not module:
return "unknown"
module_name = module.__name__
if ".tools." in module_name:
parts = module_name.split(".tools.")[-1].split(".")
if len(parts) >= 1:
return parts[0]
return "unknown"
def _get_schema_path(func: Callable[..., Any]) -> Path | None:
module = inspect.getmodule(func)
if not module or not module.__name__:
return None
module_name = module.__name__
if ".tools." not in module_name:
return None
parts = module_name.split(".tools.")[-1].split(".")
if len(parts) < 2:
return None
folder = parts[0]
file_stem = parts[1]
schema_file = f"{file_stem}_schema.xml"
return get_strix_resource_path("tools", folder, schema_file)
def register_tool(
func: Callable[..., Any] | None = None, *, sandbox_execution: bool = True
) -> Callable[..., Any]:
def decorator(f: Callable[..., Any]) -> Callable[..., Any]:
func_dict = {
"name": f.__name__,
"function": f,
"module": _get_module_name(f),
"sandbox_execution": sandbox_execution,
}
sandbox_mode = os.getenv("STRIX_SANDBOX_MODE", "false").lower() == "true"
if not sandbox_mode:
try:
schema_path = _get_schema_path(f)
xml_tools = _load_xml_schema(schema_path) if schema_path else None
if xml_tools is not None and f.__name__ in xml_tools:
func_dict["xml_schema"] = xml_tools[f.__name__]
else:
func_dict["xml_schema"] = (
f'<tool name="{f.__name__}">'
"<description>Schema not found for tool.</description>"
"</tool>"
)
except (TypeError, FileNotFoundError) as e:
logger.warning(f"Error loading schema for {f.__name__}: {e}")
func_dict["xml_schema"] = (
f'<tool name="{f.__name__}">'
"<description>Error loading schema.</description>"
"</tool>"
)
if not sandbox_mode:
xml_schema = func_dict.get("xml_schema")
param_schema = _parse_param_schema(xml_schema if isinstance(xml_schema, str) else "")
_tool_param_schemas[str(func_dict["name"])] = param_schema
tools.append(func_dict)
_tools_by_name[str(func_dict["name"])] = f
@wraps(f)
def wrapper(*args: Any, **kwargs: Any) -> Any:
return f(*args, **kwargs)
return wrapper
if func is None:
return decorator
return decorator(func)
def get_tool_by_name(name: str) -> Callable[..., Any] | None:
return _tools_by_name.get(name)
def get_tool_names() -> list[str]:
return list(_tools_by_name.keys())
def get_tool_param_schema(name: str) -> dict[str, Any] | None:
return _tool_param_schemas.get(name)
def needs_agent_state(tool_name: str) -> bool:
tool_func = get_tool_by_name(tool_name)
if not tool_func:
return False
sig = signature(tool_func)
return "agent_state" in sig.parameters
def should_execute_in_sandbox(tool_name: str) -> bool:
for tool in tools:
if tool.get("name") == tool_name:
return bool(tool.get("sandbox_execution", True))
return True
def get_tools_prompt() -> str:
tools_by_module: dict[str, list[dict[str, Any]]] = {}
for tool in tools:
module = tool.get("module", "unknown")
if module not in tools_by_module:
tools_by_module[module] = []
tools_by_module[module].append(tool)
xml_sections = []
for module, module_tools in sorted(tools_by_module.items()):
tag_name = f"{module}_tools"
section_parts = [f"<{tag_name}>"]
for tool in module_tools:
tool_xml = tool.get("xml_schema", "")
if tool_xml:
indented_tool = "\n".join(f" {line}" for line in tool_xml.split("\n"))
section_parts.append(indented_tool)
section_parts.append(f"</{tag_name}>")
xml_sections.append("\n".join(section_parts))
return "\n\n".join(xml_sections)
def clear_registry() -> None:
tools.clear()
_tools_by_name.clear()
_tool_param_schemas.clear()
| {
"repo_id": "usestrix/strix",
"file_path": "strix/tools/registry.py",
"license": "Apache License 2.0",
"lines": 194,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
usestrix/strix:strix/tools/reporting/reporting_actions.py | import contextlib
import re
from pathlib import PurePosixPath
from typing import Any
from strix.tools.registry import register_tool
_CVSS_FIELDS = (
"attack_vector",
"attack_complexity",
"privileges_required",
"user_interaction",
"scope",
"confidentiality",
"integrity",
"availability",
)
def parse_cvss_xml(xml_str: str) -> dict[str, str] | None:
if not xml_str or not xml_str.strip():
return None
result = {}
for field in _CVSS_FIELDS:
match = re.search(rf"<{field}>(.*?)</{field}>", xml_str, re.DOTALL)
if match:
result[field] = match.group(1).strip()
return result if result else None
def parse_code_locations_xml(xml_str: str) -> list[dict[str, Any]] | None:
if not xml_str or not xml_str.strip():
return None
locations = []
for loc_match in re.finditer(r"<location>(.*?)</location>", xml_str, re.DOTALL):
loc: dict[str, Any] = {}
loc_content = loc_match.group(1)
for field in (
"file",
"start_line",
"end_line",
"snippet",
"label",
"fix_before",
"fix_after",
):
field_match = re.search(rf"<{field}>(.*?)</{field}>", loc_content, re.DOTALL)
if field_match:
raw = field_match.group(1)
value = (
raw.strip("\n")
if field in ("snippet", "fix_before", "fix_after")
else raw.strip()
)
if field in ("start_line", "end_line"):
with contextlib.suppress(ValueError, TypeError):
loc[field] = int(value)
elif value:
loc[field] = value
if loc.get("file") and loc.get("start_line") is not None:
locations.append(loc)
return locations if locations else None
def _validate_file_path(path: str) -> str | None:
if not path or not path.strip():
return "file path cannot be empty"
p = PurePosixPath(path)
if p.is_absolute():
return f"file path must be relative, got absolute: '{path}'"
if ".." in p.parts:
return f"file path must not contain '..': '{path}'"
return None
def _validate_code_locations(locations: list[dict[str, Any]]) -> list[str]:
errors = []
for i, loc in enumerate(locations):
path_err = _validate_file_path(loc.get("file", ""))
if path_err:
errors.append(f"code_locations[{i}]: {path_err}")
start = loc.get("start_line")
if not isinstance(start, int) or start < 1:
errors.append(f"code_locations[{i}]: start_line must be a positive integer")
end = loc.get("end_line")
if end is None:
errors.append(f"code_locations[{i}]: end_line is required")
elif not isinstance(end, int) or end < 1:
errors.append(f"code_locations[{i}]: end_line must be a positive integer")
elif isinstance(start, int) and end < start:
errors.append(f"code_locations[{i}]: end_line ({end}) must be >= start_line ({start})")
return errors
def _extract_cve(cve: str) -> str:
match = re.search(r"CVE-\d{4}-\d{4,}", cve)
return match.group(0) if match else cve.strip()
def _validate_cve(cve: str) -> str | None:
if not re.match(r"^CVE-\d{4}-\d{4,}$", cve):
return f"invalid CVE format: '{cve}' (expected 'CVE-YYYY-NNNNN')"
return None
def _extract_cwe(cwe: str) -> str:
match = re.search(r"CWE-\d+", cwe)
return match.group(0) if match else cwe.strip()
def _validate_cwe(cwe: str) -> str | None:
if not re.match(r"^CWE-\d+$", cwe):
return f"invalid CWE format: '{cwe}' (expected 'CWE-NNN')"
return None
def calculate_cvss_and_severity(
attack_vector: str,
attack_complexity: str,
privileges_required: str,
user_interaction: str,
scope: str,
confidentiality: str,
integrity: str,
availability: str,
) -> tuple[float, str, str]:
try:
from cvss import CVSS3
vector = (
f"CVSS:3.1/AV:{attack_vector}/AC:{attack_complexity}/"
f"PR:{privileges_required}/UI:{user_interaction}/S:{scope}/"
f"C:{confidentiality}/I:{integrity}/A:{availability}"
)
c = CVSS3(vector)
scores = c.scores()
severities = c.severities()
base_score = scores[0]
base_severity = severities[0]
severity = base_severity.lower()
except Exception:
import logging
logging.exception("Failed to calculate CVSS")
return 7.5, "high", ""
else:
return base_score, severity, vector
def _validate_required_fields(**kwargs: str | None) -> list[str]:
validation_errors: list[str] = []
required_fields = {
"title": "Title cannot be empty",
"description": "Description cannot be empty",
"impact": "Impact cannot be empty",
"target": "Target cannot be empty",
"technical_analysis": "Technical analysis cannot be empty",
"poc_description": "PoC description cannot be empty",
"poc_script_code": "PoC script/code is REQUIRED - provide the actual exploit/payload",
"remediation_steps": "Remediation steps cannot be empty",
}
for field_name, error_msg in required_fields.items():
value = kwargs.get(field_name)
if not value or not str(value).strip():
validation_errors.append(error_msg)
return validation_errors
def _validate_cvss_parameters(**kwargs: str) -> list[str]:
validation_errors: list[str] = []
cvss_validations = {
"attack_vector": ["N", "A", "L", "P"],
"attack_complexity": ["L", "H"],
"privileges_required": ["N", "L", "H"],
"user_interaction": ["N", "R"],
"scope": ["U", "C"],
"confidentiality": ["N", "L", "H"],
"integrity": ["N", "L", "H"],
"availability": ["N", "L", "H"],
}
for param_name, valid_values in cvss_validations.items():
value = kwargs.get(param_name)
if value not in valid_values:
validation_errors.append(
f"Invalid {param_name}: {value}. Must be one of: {valid_values}"
)
return validation_errors
@register_tool(sandbox_execution=False)
def create_vulnerability_report( # noqa: PLR0912
title: str,
description: str,
impact: str,
target: str,
technical_analysis: str,
poc_description: str,
poc_script_code: str,
remediation_steps: str,
cvss_breakdown: str,
endpoint: str | None = None,
method: str | None = None,
cve: str | None = None,
cwe: str | None = None,
code_locations: str | None = None,
) -> dict[str, Any]:
validation_errors = _validate_required_fields(
title=title,
description=description,
impact=impact,
target=target,
technical_analysis=technical_analysis,
poc_description=poc_description,
poc_script_code=poc_script_code,
remediation_steps=remediation_steps,
)
parsed_cvss = parse_cvss_xml(cvss_breakdown)
if not parsed_cvss:
validation_errors.append("cvss: could not parse CVSS breakdown XML")
else:
validation_errors.extend(_validate_cvss_parameters(**parsed_cvss))
parsed_locations = parse_code_locations_xml(code_locations) if code_locations else None
if parsed_locations:
validation_errors.extend(_validate_code_locations(parsed_locations))
if cve:
cve = _extract_cve(cve)
cve_err = _validate_cve(cve)
if cve_err:
validation_errors.append(cve_err)
if cwe:
cwe = _extract_cwe(cwe)
cwe_err = _validate_cwe(cwe)
if cwe_err:
validation_errors.append(cwe_err)
if validation_errors:
return {"success": False, "message": "Validation failed", "errors": validation_errors}
assert parsed_cvss is not None
cvss_score, severity, cvss_vector = calculate_cvss_and_severity(**parsed_cvss)
try:
from strix.telemetry.tracer import get_global_tracer
tracer = get_global_tracer()
if tracer:
from strix.llm.dedupe import check_duplicate
existing_reports = tracer.get_existing_vulnerabilities()
candidate = {
"title": title,
"description": description,
"impact": impact,
"target": target,
"technical_analysis": technical_analysis,
"poc_description": poc_description,
"poc_script_code": poc_script_code,
"endpoint": endpoint,
"method": method,
}
dedupe_result = check_duplicate(candidate, existing_reports)
if dedupe_result.get("is_duplicate"):
duplicate_id = dedupe_result.get("duplicate_id", "")
duplicate_title = ""
for report in existing_reports:
if report.get("id") == duplicate_id:
duplicate_title = report.get("title", "Unknown")
break
return {
"success": False,
"message": (
f"Potential duplicate of '{duplicate_title}' "
f"(id={duplicate_id[:8]}...). Do not re-report the same vulnerability."
),
"duplicate_of": duplicate_id,
"duplicate_title": duplicate_title,
"confidence": dedupe_result.get("confidence", 0.0),
"reason": dedupe_result.get("reason", ""),
}
report_id = tracer.add_vulnerability_report(
title=title,
description=description,
severity=severity,
impact=impact,
target=target,
technical_analysis=technical_analysis,
poc_description=poc_description,
poc_script_code=poc_script_code,
remediation_steps=remediation_steps,
cvss=cvss_score,
cvss_breakdown=parsed_cvss,
endpoint=endpoint,
method=method,
cve=cve,
cwe=cwe,
code_locations=parsed_locations,
)
return {
"success": True,
"message": f"Vulnerability report '{title}' created successfully",
"report_id": report_id,
"severity": severity,
"cvss_score": cvss_score,
}
import logging
logging.warning("Current tracer not available - vulnerability report not stored")
except (ImportError, AttributeError) as e:
return {"success": False, "message": f"Failed to create vulnerability report: {e!s}"}
else:
return {
"success": True,
"message": f"Vulnerability report '{title}' created (not persisted)",
"warning": "Report could not be persisted - tracer unavailable",
}
| {
"repo_id": "usestrix/strix",
"file_path": "strix/tools/reporting/reporting_actions.py",
"license": "Apache License 2.0",
"lines": 281,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
usestrix/strix:strix/tools/terminal/terminal_actions.py | from typing import Any
from strix.tools.registry import register_tool
@register_tool
def terminal_execute(
command: str,
is_input: bool = False,
timeout: float | None = None,
terminal_id: str | None = None,
no_enter: bool = False,
) -> dict[str, Any]:
from .terminal_manager import get_terminal_manager
manager = get_terminal_manager()
try:
return manager.execute_command(
command=command,
is_input=is_input,
timeout=timeout,
terminal_id=terminal_id,
no_enter=no_enter,
)
except (ValueError, RuntimeError) as e:
return {
"error": str(e),
"command": command,
"terminal_id": terminal_id or "default",
"content": "",
"status": "error",
"exit_code": None,
"working_dir": None,
}
| {
"repo_id": "usestrix/strix",
"file_path": "strix/tools/terminal/terminal_actions.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
usestrix/strix:strix/tools/terminal/terminal_manager.py | import atexit
import contextlib
import threading
from typing import Any
from strix.tools.context import get_current_agent_id
from .terminal_session import TerminalSession
class TerminalManager:
def __init__(self) -> None:
self._sessions_by_agent: dict[str, dict[str, TerminalSession]] = {}
self._lock = threading.Lock()
self.default_terminal_id = "default"
self.default_timeout = 30.0
self._register_cleanup_handlers()
def _get_agent_sessions(self) -> dict[str, TerminalSession]:
agent_id = get_current_agent_id()
with self._lock:
if agent_id not in self._sessions_by_agent:
self._sessions_by_agent[agent_id] = {}
return self._sessions_by_agent[agent_id]
def execute_command(
self,
command: str,
is_input: bool = False,
timeout: float | None = None,
terminal_id: str | None = None,
no_enter: bool = False,
) -> dict[str, Any]:
if terminal_id is None:
terminal_id = self.default_terminal_id
session = self._get_or_create_session(terminal_id)
try:
result = session.execute(command, is_input, timeout or self.default_timeout, no_enter)
return {
"content": result["content"],
"command": command,
"terminal_id": terminal_id,
"status": result["status"],
"exit_code": result.get("exit_code"),
"working_dir": result.get("working_dir"),
}
except RuntimeError as e:
return {
"error": str(e),
"command": command,
"terminal_id": terminal_id,
"content": "",
"status": "error",
"exit_code": None,
"working_dir": None,
}
except OSError as e:
return {
"error": f"System error: {e}",
"command": command,
"terminal_id": terminal_id,
"content": "",
"status": "error",
"exit_code": None,
"working_dir": None,
}
def _get_or_create_session(self, terminal_id: str) -> TerminalSession:
sessions = self._get_agent_sessions()
with self._lock:
if terminal_id not in sessions:
sessions[terminal_id] = TerminalSession(terminal_id)
return sessions[terminal_id]
def close_session(self, terminal_id: str | None = None) -> dict[str, Any]:
if terminal_id is None:
terminal_id = self.default_terminal_id
sessions = self._get_agent_sessions()
with self._lock:
if terminal_id not in sessions:
return {
"terminal_id": terminal_id,
"message": f"Terminal '{terminal_id}' not found",
"status": "not_found",
}
session = sessions.pop(terminal_id)
try:
session.close()
except (RuntimeError, OSError) as e:
return {
"terminal_id": terminal_id,
"error": f"Failed to close terminal '{terminal_id}': {e}",
"status": "error",
}
else:
return {
"terminal_id": terminal_id,
"message": f"Terminal '{terminal_id}' closed successfully",
"status": "closed",
}
def list_sessions(self) -> dict[str, Any]:
sessions = self._get_agent_sessions()
with self._lock:
session_info: dict[str, dict[str, Any]] = {}
for tid, session in sessions.items():
session_info[tid] = {
"is_running": session.is_running(),
"working_dir": session.get_working_dir(),
}
return {"sessions": session_info, "total_count": len(session_info)}
def cleanup_agent(self, agent_id: str) -> None:
with self._lock:
sessions = self._sessions_by_agent.pop(agent_id, {})
for session in sessions.values():
with contextlib.suppress(Exception):
session.close()
def cleanup_dead_sessions(self) -> None:
with self._lock:
for sessions in self._sessions_by_agent.values():
dead_sessions: list[str] = []
for tid, session in sessions.items():
if not session.is_running():
dead_sessions.append(tid)
for tid in dead_sessions:
session = sessions.pop(tid)
with contextlib.suppress(Exception):
session.close()
def close_all_sessions(self) -> None:
with self._lock:
all_sessions: list[TerminalSession] = []
for sessions in self._sessions_by_agent.values():
all_sessions.extend(sessions.values())
self._sessions_by_agent.clear()
for session in all_sessions:
with contextlib.suppress(Exception):
session.close()
def _register_cleanup_handlers(self) -> None:
atexit.register(self.close_all_sessions)
_terminal_manager = TerminalManager()
def get_terminal_manager() -> TerminalManager:
return _terminal_manager
| {
"repo_id": "usestrix/strix",
"file_path": "strix/tools/terminal/terminal_manager.py",
"license": "Apache License 2.0",
"lines": 133,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
usestrix/strix:strix/tools/thinking/thinking_actions.py | from typing import Any
from strix.tools.registry import register_tool
@register_tool(sandbox_execution=False)
def think(thought: str) -> dict[str, Any]:
try:
if not thought or not thought.strip():
return {"success": False, "message": "Thought cannot be empty"}
return {
"success": True,
"message": f"Thought recorded successfully with {len(thought.strip())} characters",
}
except (ValueError, TypeError) as e:
return {"success": False, "message": f"Failed to record thought: {e!s}"}
| {
"repo_id": "usestrix/strix",
"file_path": "strix/tools/thinking/thinking_actions.py",
"license": "Apache License 2.0",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
usestrix/strix:strix/tools/web_search/web_search_actions.py | import os
from typing import Any
import requests
from strix.tools.registry import register_tool
SYSTEM_PROMPT = """You are assisting a cybersecurity agent specialized in vulnerability scanning
and security assessment running on Kali Linux. When responding to search queries:
1. Prioritize cybersecurity-relevant information including:
- Vulnerability details (CVEs, CVSS scores, impact)
- Security tools, techniques, and methodologies
- Exploit information and proof-of-concepts
- Security best practices and mitigations
- Penetration testing approaches
- Web application security findings
2. Provide technical depth appropriate for security professionals
3. Include specific versions, configurations, and technical details when available
4. Focus on actionable intelligence for security assessment
5. Cite reliable security sources (NIST, OWASP, CVE databases, security vendors)
6. When providing commands or installation instructions, prioritize Kali Linux compatibility
and use apt package manager or tools pre-installed in Kali
7. Be detailed and specific - avoid general answers. Always include concrete code examples,
command-line instructions, configuration snippets, or practical implementation steps
when applicable
Structure your response to be comprehensive yet concise, emphasizing the most critical
security implications and details."""
@register_tool(sandbox_execution=False)
def web_search(query: str) -> dict[str, Any]:
try:
api_key = os.getenv("PERPLEXITY_API_KEY")
if not api_key:
return {
"success": False,
"message": "PERPLEXITY_API_KEY environment variable not set",
"results": [],
}
url = "https://api.perplexity.ai/chat/completions"
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
payload = {
"model": "sonar-reasoning",
"messages": [
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": query},
],
}
response = requests.post(url, headers=headers, json=payload, timeout=300)
response.raise_for_status()
response_data = response.json()
content = response_data["choices"][0]["message"]["content"]
except requests.exceptions.Timeout:
return {"success": False, "message": "Request timed out", "results": []}
except requests.exceptions.RequestException as e:
return {"success": False, "message": f"API request failed: {e!s}", "results": []}
except KeyError as e:
return {
"success": False,
"message": f"Unexpected API response format: missing {e!s}",
"results": [],
}
except Exception as e: # noqa: BLE001
return {"success": False, "message": f"Web search failed: {e!s}", "results": []}
else:
return {
"success": True,
"query": query,
"content": content,
"message": "Web search completed successfully",
}
| {
"repo_id": "usestrix/strix",
"file_path": "strix/tools/web_search/web_search_actions.py",
"license": "Apache License 2.0",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
vanna-ai/vanna:tests/test_chromadb_persistence_fix.py | """
Test for ChromaDB persistence fix.
This test verifies that ChromaDB collections can be retrieved without triggering
unnecessary embedding function initialization/model downloads.
"""
import pytest
import tempfile
import shutil
import asyncio
from vanna.integrations.chromadb import ChromaAgentMemory
from vanna.core.user import User
from vanna.core.tool import ToolContext
@pytest.fixture
def test_user():
"""Test user for context."""
return User(
id="test_user",
username="test",
email="test@example.com",
group_memberships=["user"],
)
def create_test_context(test_user, agent_memory):
"""Helper to create test context."""
return ToolContext(
user=test_user,
conversation_id="test_conv",
request_id="test_req",
agent_memory=agent_memory,
metadata={},
)
@pytest.mark.asyncio
async def test_chromadb_collection_retrieval_without_embedding_function(test_user):
"""
Test that existing ChromaDB collections can be retrieved without
initializing the embedding function (avoiding model downloads).
This test simulates the real-world scenario where:
1. A collection is created with an embedding function (first app run)
2. The app restarts and retrieves the existing collection
3. The embedding function should NOT be re-initialized on retrieval
"""
try:
import chromadb
from chromadb.config import Settings
except ImportError:
pytest.skip("ChromaDB not installed")
temp_dir = tempfile.mkdtemp()
try:
# Session 1: Create a collection using ChromaAgentMemory (simulating first app run)
# This will create the collection with an embedding function
memory1 = ChromaAgentMemory(
persist_directory=temp_dir, collection_name="test_collection"
)
context = create_test_context(test_user, memory1)
# Save some memories (this will create the collection)
# We need to add explicit embeddings to avoid model download in test environment
collection = memory1._get_collection()
collection.add(
ids=["mem1", "mem2"],
documents=["test question 1", "test question 2"],
embeddings=[[0.1] * 384, [0.2] * 384],
metadatas=[
{
"question": "test question 1",
"tool_name": "test_tool",
"args_json": "{}",
"timestamp": "2024-01-01T00:00:00",
"success": True,
"metadata_json": "{}",
},
{
"question": "test question 2",
"tool_name": "test_tool",
"args_json": "{}",
"timestamp": "2024-01-01T00:01:00",
"success": True,
"metadata_json": "{}",
},
],
)
# Clean up references to simulate app restart
del collection
del memory1
# Session 2: Create new ChromaAgentMemory instance (simulating app restart)
# This should retrieve the existing collection WITHOUT calling _get_embedding_function
memory2 = ChromaAgentMemory(
persist_directory=temp_dir, collection_name="test_collection"
)
# Mock _get_embedding_function to verify it's not called
original_get_ef = memory2._get_embedding_function
def mock_get_ef():
pytest.fail(
"_get_embedding_function was called when retrieving existing collection"
)
memory2._get_embedding_function = mock_get_ef
# This should retrieve the existing collection without calling _get_embedding_function
collection2 = memory2._get_collection()
# Restore original method
memory2._get_embedding_function = original_get_ef
# Verify collection was retrieved successfully
assert collection2 is not None
assert collection2.name == "test_collection"
assert collection2.count() == 2
# Test that we can use public API methods on the retrieved collection
context2 = create_test_context(test_user, memory2)
recent = await memory2.get_recent_memories(context=context2, limit=10)
assert len(recent) == 2
assert recent[0].question in ["test question 1", "test question 2"]
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
@pytest.mark.asyncio
async def test_chromadb_collection_creation_with_embedding_function():
"""
Test that NEW ChromaDB collections are created WITH the embedding function.
"""
try:
from vanna.integrations.chromadb import ChromaAgentMemory
except ImportError:
pytest.skip("ChromaDB not installed")
temp_dir = tempfile.mkdtemp()
try:
# Test: Create ChromaAgentMemory for a non-existent collection
memory = ChromaAgentMemory(
persist_directory=temp_dir, collection_name="new_collection"
)
# Track if _get_embedding_function was called
get_ef_called = []
original_get_ef = memory._get_embedding_function
def tracking_get_ef():
get_ef_called.append(True)
return original_get_ef()
memory._get_embedding_function = tracking_get_ef
# This should create a new collection and SHOULD call _get_embedding_function
collection = memory._get_collection()
# Restore original
memory._get_embedding_function = original_get_ef
# Verify collection was created
assert collection is not None
assert collection.name == "new_collection"
# Verify _get_embedding_function was called
assert get_ef_called, (
"_get_embedding_function should be called when creating new collection"
)
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
if __name__ == "__main__":
# Run tests directly
asyncio.run(test_chromadb_collection_retrieval_without_embedding_function())
asyncio.run(test_chromadb_collection_creation_with_embedding_function())
| {
"repo_id": "vanna-ai/vanna",
"file_path": "tests/test_chromadb_persistence_fix.py",
"license": "MIT License",
"lines": 148,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vanna-ai/vanna:examples/chromadb_gpu_example.py | """
Example: Using ChromaDB AgentMemory with GPU acceleration
This example demonstrates how to use ChromaAgentMemory with intelligent
device selection for GPU acceleration when available.
"""
from vanna.integrations.chromadb import (
ChromaAgentMemory,
get_device,
create_sentence_transformer_embedding_function
)
def example_default_usage():
"""Example 1: Use default embedding function (no GPU, no sentence-transformers required)"""
print("Example 1: Default ChromaDB embedding (CPU-only, no extra dependencies)")
memory = ChromaAgentMemory(
persist_directory="./chroma_memory_default"
)
print("β ChromaAgentMemory created with default embedding function")
print()
def example_auto_gpu():
"""Example 2: Automatic GPU detection with SentenceTransformers"""
print("Example 2: Automatic GPU detection")
# Detect the best available device
device = get_device()
print(f"Detected device: {device}")
# Create embedding function with automatic device selection
embedding_fn = create_sentence_transformer_embedding_function()
memory = ChromaAgentMemory(
persist_directory="./chroma_memory_gpu",
embedding_function=embedding_fn
)
print(f"β ChromaAgentMemory created with SentenceTransformer on {device}")
print()
def example_explicit_cuda():
"""Example 3: Explicitly use CUDA"""
print("Example 3: Explicitly request CUDA")
# Explicitly request CUDA
embedding_fn = create_sentence_transformer_embedding_function(device="cuda")
memory = ChromaAgentMemory(
persist_directory="./chroma_memory_cuda",
embedding_function=embedding_fn
)
print("β ChromaAgentMemory created with SentenceTransformer on CUDA")
print()
def example_custom_model_gpu():
"""Example 4: Use a larger model with GPU"""
print("Example 4: Custom model with GPU acceleration")
# Use a larger, more accurate model with GPU
embedding_fn = create_sentence_transformer_embedding_function(
model_name="sentence-transformers/all-mpnet-base-v2"
)
memory = ChromaAgentMemory(
persist_directory="./chroma_memory_large",
embedding_function=embedding_fn
)
print("β ChromaAgentMemory created with all-mpnet-base-v2 model")
print()
def example_manual_chromadb():
"""Example 5: Manually configure ChromaDB embedding function"""
print("Example 5: Manual ChromaDB embedding function configuration")
from chromadb.utils import embedding_functions
# Manually create and configure the embedding function
device = get_device()
embedding_fn = embedding_functions.SentenceTransformerEmbeddingFunction(
model_name="sentence-transformers/all-MiniLM-L6-v2",
device=device
)
memory = ChromaAgentMemory(
persist_directory="./chroma_memory_manual",
embedding_function=embedding_fn
)
print(f"β ChromaAgentMemory created with manual configuration on {device}")
print()
if __name__ == "__main__":
print("=" * 70)
print("ChromaDB AgentMemory GPU Acceleration Examples")
print("=" * 70)
print()
# Example 1: Default (no GPU, no sentence-transformers needed)
example_default_usage()
# Examples 2-5 require sentence-transformers to be installed
try:
import sentence_transformers
example_auto_gpu()
# Only run CUDA example if CUDA is available
device = get_device()
if device == "cuda":
example_explicit_cuda()
example_custom_model_gpu()
example_manual_chromadb()
except ImportError:
print("β οΈ sentence-transformers not installed")
print(" Install with: pip install sentence-transformers")
print(" Examples 2-5 require this package for GPU acceleration")
print()
print("=" * 70)
print("Summary:")
print("- Example 1 works without sentence-transformers (CPU only)")
print("- Examples 2-5 require sentence-transformers for GPU support")
print("- GPU acceleration automatically detected when available")
print("=" * 70)
| {
"repo_id": "vanna-ai/vanna",
"file_path": "examples/chromadb_gpu_example.py",
"license": "MIT License",
"lines": 100,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
vanna-ai/vanna:examples/transform_args_example.py | """
Example demonstrating how to use ToolRegistry.transform_args for user-specific
argument transformation, such as applying row-level security (RLS) to SQL queries.
This example shows:
1. Creating a custom ToolRegistry subclass that overrides transform_args
2. Applying RLS transformation to SQL queries based on user context
3. Rejecting tool execution when validation fails
"""
from typing import Union
from pydantic import BaseModel
from vanna.core import ToolRegistry
from vanna.core.tool import Tool, ToolContext, ToolRejection, ToolResult
from vanna.core.user import User
# Example: SQL execution tool arguments
class SQLExecutionArgs(BaseModel):
query: str
database: str = "default"
class SQLExecutionTool(Tool[SQLExecutionArgs]):
@property
def name(self) -> str:
return "execute_sql"
@property
def description(self) -> str:
return "Execute a SQL query against the database"
def get_args_schema(self):
return SQLExecutionArgs
async def execute(self, context: ToolContext, args: SQLExecutionArgs) -> ToolResult:
# Execute the SQL query (implementation not shown)
return ToolResult(
success=True,
result_for_llm=f"Executed query: {args.query[:50]}...",
)
class RLSToolRegistry(ToolRegistry):
"""Custom ToolRegistry that applies row-level security to SQL queries."""
async def transform_args(
self,
tool: Tool,
args,
user: User,
context: ToolContext,
) -> Union[SQLExecutionArgs, ToolRejection]:
"""Apply row-level security transformation to SQL queries."""
# Only transform SQL execution tools
if tool.name == "execute_sql" and isinstance(args, SQLExecutionArgs):
original_query = args.query.strip()
# Example 1: Reject queries that try to access restricted tables
if "restricted_table" in original_query.lower():
return ToolRejection(
reason="Access to 'restricted_table' is not permitted for your user group"
)
# Example 2: Apply RLS by modifying the WHERE clause
# This is a simplified example - real RLS would be more sophisticated
if "SELECT" in original_query.upper() and "users" in original_query.lower():
# Add a WHERE clause to filter by user's organization
user_org_id = user.metadata.get("organization_id")
if user_org_id:
# Simple RLS: append WHERE clause for organization filtering
if "WHERE" in original_query.upper():
transformed_query = original_query.replace(
"WHERE",
f"WHERE organization_id = {user_org_id} AND",
1
)
else:
# Add WHERE clause before ORDER BY, LIMIT, etc.
transformed_query = original_query.rstrip(";")
transformed_query += f" WHERE organization_id = {user_org_id}"
# Return transformed arguments
return args.model_copy(update={"query": transformed_query})
# Example 3: Validate required parameters
if not args.database:
return ToolRejection(
reason="Database parameter is required for SQL execution"
)
# For all other tools or if no transformation needed, pass through
return args
# Usage example
async def example_usage():
"""Demonstrate using the RLS-enabled ToolRegistry."""
from vanna.capabilities.agent_memory import AgentMemory
# Create registry and register tool
registry = RLSToolRegistry()
sql_tool = SQLExecutionTool()
registry.register_local_tool(sql_tool, access_groups=[])
# Create a user with organization context
user = User(
user_id="user123",
metadata={"organization_id": 42}
)
# Create tool context
context = ToolContext(
user=user,
conversation_id="conv123",
request_id="req123",
agent_memory=AgentMemory(),
)
# Example 1: Query that will be transformed with RLS
from vanna.core.tool import ToolCall
tool_call = ToolCall(
id="call1",
name="execute_sql",
arguments={
"query": "SELECT * FROM users",
"database": "production"
}
)
result = await registry.execute(tool_call, context)
print(f"Result: {result.result_for_llm}")
# The query will be transformed to: SELECT * FROM users WHERE organization_id = 42
# Example 2: Query that will be rejected
tool_call_rejected = ToolCall(
id="call2",
name="execute_sql",
arguments={
"query": "SELECT * FROM restricted_table",
"database": "production"
}
)
result = await registry.execute(tool_call_rejected, context)
print(f"Rejected: {result.error}")
# Will return: "Access to 'restricted_table' is not permitted for your user group"
if __name__ == "__main__":
import asyncio
asyncio.run(example_usage())
| {
"repo_id": "vanna-ai/vanna",
"file_path": "examples/transform_args_example.py",
"license": "MIT License",
"lines": 124,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
vanna-ai/vanna:frontends/webcomponent/test_backend.py | #!/usr/bin/env python3
"""
Comprehensive test backend for vanna-webcomponent validation.
This backend exercises all component types and update patterns to validate
that nothing breaks during webcomponent pruning.
Usage:
python test_backend.py --mode rapid # Fast stress test
python test_backend.py --mode realistic # Realistic conversation flow
"""
import argparse
import asyncio
import json
import sys
import time
import traceback
import uuid
from datetime import datetime
from typing import AsyncGenerator, Dict, Any, Optional
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import StreamingResponse, FileResponse
from fastapi.staticfiles import StaticFiles
from pydantic import BaseModel
import os
# Add vanna to path
sys.path.insert(0, "../vanna/src")
from vanna.core.rich_component import RichComponent, ComponentLifecycle
from vanna.components.rich import (
RichTextComponent,
StatusCardComponent,
ProgressDisplayComponent,
ProgressBarComponent,
NotificationComponent,
StatusIndicatorComponent,
ButtonComponent,
ButtonGroupComponent,
CardComponent,
TaskListComponent,
Task,
BadgeComponent,
IconTextComponent,
DataFrameComponent,
ChartComponent,
ArtifactComponent,
LogViewerComponent,
LogEntry,
StatusBarUpdateComponent,
TaskTrackerUpdateComponent,
ChatInputUpdateComponent,
TaskOperation,
)
from vanna.servers.base.models import ChatStreamChunk
# Request/Response models
class ChatRequest(BaseModel):
"""Chat request matching vanna API."""
message: str
conversation_id: Optional[str] = None
request_id: Optional[str] = None
request_context: Dict[str, Any] = {}
class UiComponent(BaseModel):
"""UI component wrapper."""
rich_component: RichComponent
# Test state
test_state: Dict[str, Any] = {
"mode": "realistic",
"component_ids": {}, # Track component IDs for updates
"action_count": 0,
}
async def yield_chunk(component: RichComponent, conversation_id: str, request_id: str) -> ChatStreamChunk:
"""Convert component to ChatStreamChunk."""
return ChatStreamChunk(
rich=component.serialize_for_frontend(),
simple=None,
conversation_id=conversation_id,
request_id=request_id,
timestamp=time.time(),
)
async def delay(mode: str, short: float = 0.1, long: float = 0.5):
"""Add delay based on mode."""
if mode == "realistic":
await asyncio.sleep(long)
elif mode == "rapid":
await asyncio.sleep(short)
async def test_text_component(conversation_id: str, request_id: str, mode: str) -> AsyncGenerator[ChatStreamChunk, None]:
"""Test text component with markdown."""
text_id = str(uuid.uuid4())
test_state["component_ids"]["text"] = text_id
# Create with comprehensive markdown
text = RichTextComponent(
id=text_id,
content="""# Test Text Component
This component demonstrates **markdown rendering** with various formatting:
## Formatting Examples
- **Bold text** for emphasis
- *Italic text* for style
- `inline code` for snippets
- ~~Strikethrough~~ for deletions
### Lists
1. First ordered item
2. Second ordered item
3. Third ordered item
### Code Block
```python
def hello():
return "Markdown works!"
```
> Blockquote to test quote rendering
This validates that markdown is properly parsed and displayed.""",
markdown=True,
)
yield await yield_chunk(text, conversation_id, request_id)
await delay(mode)
# Update with simpler markdown
text_updated = text.update(content="""# Updated Text Component
Text has been **successfully updated** with new markdown content!
- Update operation works β
- Markdown still renders β""")
yield await yield_chunk(text_updated, conversation_id, request_id)
await delay(mode)
async def test_status_card(conversation_id: str, request_id: str, mode: str) -> AsyncGenerator[ChatStreamChunk, None]:
"""Test status card with all states."""
card_id = str(uuid.uuid4())
test_state["component_ids"]["status_card"] = card_id
# Create - pending
status_card = StatusCardComponent(
id=card_id,
title="Status Card Test",
status="pending",
description="Testing status card component...",
icon="β³",
collapsible=True,
collapsed=False,
)
yield await yield_chunk(status_card, conversation_id, request_id)
await delay(mode)
# Update to running
status_card_running = status_card.set_status("running", "Processing test...")
yield await yield_chunk(status_card_running, conversation_id, request_id)
await delay(mode)
# Update to completed
status_card_done = status_card.set_status("completed", "Test completed successfully!")
status_card_done.icon = "β
"
yield await yield_chunk(status_card_done, conversation_id, request_id)
await delay(mode)
async def test_progress_display(conversation_id: str, request_id: str, mode: str) -> AsyncGenerator[ChatStreamChunk, None]:
"""Test progress display component."""
progress_id = str(uuid.uuid4())
test_state["component_ids"]["progress_display"] = progress_id
# Create at 0%
progress = ProgressDisplayComponent(
id=progress_id,
label="Test Progress",
value=0.0,
description="Starting test...",
status="info",
animated=True,
)
yield await yield_chunk(progress, conversation_id, request_id)
await delay(mode, 0.05, 0.3)
# Update to 50%
progress_half = progress.update_progress(0.5, "Halfway there...")
yield await yield_chunk(progress_half, conversation_id, request_id)
await delay(mode, 0.05, 0.3)
# Update to 100%
progress_done = progress.update_progress(1.0, "Complete!")
progress_done.status = "success"
yield await yield_chunk(progress_done, conversation_id, request_id)
await delay(mode)
async def test_card_component(conversation_id: str, request_id: str, mode: str) -> AsyncGenerator[ChatStreamChunk, None]:
"""Test card component with actions."""
card_id = str(uuid.uuid4())
test_state["component_ids"]["card"] = card_id
# Create card with markdown content and buttons
card = CardComponent(
id=card_id,
title="Test Card with Markdown",
content="""# Card Content
This card demonstrates **markdown rendering** within cards:
- Interactive action buttons
- Collapsible sections
- Status indicators
- `Formatted text`
Click the buttons below to test interactivity!""",
icon="π",
status="info",
markdown=True,
collapsible=True,
collapsed=False,
actions=[
{"label": "Test Action", "action": "/test-action", "variant": "primary"},
{"label": "Cancel", "action": "/cancel", "variant": "secondary"},
],
)
yield await yield_chunk(card, conversation_id, request_id)
await delay(mode)
# Update card status and content
card_updated = card.update(
status="success",
content="""# Card Updated Successfully!
The card content has been **updated** with:
- New status (success)
- New markdown content
- Same action buttons
β Update operation verified""",
markdown=True
)
yield await yield_chunk(card_updated, conversation_id, request_id)
await delay(mode)
async def test_task_list(conversation_id: str, request_id: str, mode: str) -> AsyncGenerator[ChatStreamChunk, None]:
"""Test task list component."""
task_list_id = str(uuid.uuid4())
test_state["component_ids"]["task_list"] = task_list_id
# Create task list
tasks = [
Task(title="Setup development environment", description="Install dependencies and configure tools", status="completed", progress=1.0),
Task(title="Write test suite", description="Create comprehensive component tests", status="in_progress", progress=0.7),
Task(title="Run validation", description="Validate all components render correctly", status="pending"),
Task(title="Prune webcomponent", description="Remove unused code and cruft", status="pending"),
]
task_list = TaskListComponent(
id=task_list_id,
title="Webcomponent Validation Workflow",
tasks=tasks,
show_progress=True,
show_timestamps=True,
)
yield await yield_chunk(task_list, conversation_id, request_id)
await delay(mode)
# Update task statuses
tasks[1].status = "completed"
tasks[1].progress = 1.0
tasks[2].status = "in_progress"
tasks[2].progress = 0.3
task_list_updated = TaskListComponent(
id=task_list_id,
title="Webcomponent Validation Workflow (Updated)",
tasks=tasks,
show_progress=True,
show_timestamps=True,
)
task_list_updated.lifecycle = ComponentLifecycle.UPDATE
yield await yield_chunk(task_list_updated, conversation_id, request_id)
await delay(mode)
async def test_progress_bar(conversation_id: str, request_id: str, mode: str) -> AsyncGenerator[ChatStreamChunk, None]:
"""Test progress bar component."""
bar_id = str(uuid.uuid4())
test_state["component_ids"]["progress_bar"] = bar_id
# Create
bar = ProgressBarComponent(
id=bar_id,
value=0.3,
label="Loading",
status="info",
)
yield await yield_chunk(bar, conversation_id, request_id)
await delay(mode, 0.05, 0.2)
# Update
bar_updated = bar.update(value=0.8, status="success")
yield await yield_chunk(bar_updated, conversation_id, request_id)
await delay(mode)
async def test_notification(conversation_id: str, request_id: str, mode: str) -> AsyncGenerator[ChatStreamChunk, None]:
"""Test notification component."""
for level in ["info", "success", "warning", "error"]:
notif = NotificationComponent(
id=str(uuid.uuid4()),
message=f"This is a {level} notification",
level=level,
title=f"{level.capitalize()} Test",
)
yield await yield_chunk(notif, conversation_id, request_id)
await delay(mode, 0.05, 0.2)
async def test_status_indicator(conversation_id: str, request_id: str, mode: str) -> AsyncGenerator[ChatStreamChunk, None]:
"""Test status indicator component."""
indicator_id = str(uuid.uuid4())
test_state["component_ids"]["status_indicator"] = indicator_id
# Create with pulse
indicator = StatusIndicatorComponent(
id=indicator_id,
status="running",
message="Processing...",
pulse=True,
)
yield await yield_chunk(indicator, conversation_id, request_id)
await delay(mode)
# Update to success
indicator_success = indicator.update(status="success", message="Done!", pulse=False)
yield await yield_chunk(indicator_success, conversation_id, request_id)
await delay(mode)
async def test_badge(conversation_id: str, request_id: str, mode: str) -> AsyncGenerator[ChatStreamChunk, None]:
"""Test badge component."""
badge = BadgeComponent(
id=str(uuid.uuid4()),
text="Test Badge",
variant="primary",
)
yield await yield_chunk(badge, conversation_id, request_id)
await delay(mode)
async def test_icon_text(conversation_id: str, request_id: str, mode: str) -> AsyncGenerator[ChatStreamChunk, None]:
"""Test icon_text component."""
icon_text = IconTextComponent(
id=str(uuid.uuid4()),
icon="π§",
text="Tool Icon Test",
)
yield await yield_chunk(icon_text, conversation_id, request_id)
await delay(mode)
async def test_buttons(conversation_id: str, request_id: str, mode: str) -> AsyncGenerator[ChatStreamChunk, None]:
"""Test button and button_group components."""
# Single button
button = ButtonComponent(
label="Single Button",
action="/button-test",
variant="primary",
icon="π",
)
yield await yield_chunk(button, conversation_id, request_id)
await delay(mode, 0.05, 0.2)
# Button group
button_group = ButtonGroupComponent(
buttons=[
{"label": "Option 1", "action": "/option1", "variant": "primary"},
{"label": "Option 2", "action": "/option2", "variant": "secondary"},
{"label": "Option 3", "action": "/option3", "variant": "success"},
],
orientation="horizontal",
)
yield await yield_chunk(button_group, conversation_id, request_id)
await delay(mode)
async def test_dataframe(conversation_id: str, request_id: str, mode: str) -> AsyncGenerator[ChatStreamChunk, None]:
"""Test dataframe component with sample data."""
dataframe_id = str(uuid.uuid4())
test_state["component_ids"]["dataframe"] = dataframe_id
# Create sample data
sample_data = [
{"id": 1, "name": "Alice", "age": 30, "city": "New York", "salary": 75000},
{"id": 2, "name": "Bob", "age": 25, "city": "San Francisco", "salary": 85000},
{"id": 3, "name": "Charlie", "age": 35, "city": "Chicago", "salary": 70000},
{"id": 4, "name": "Diana", "age": 28, "city": "Boston", "salary": 80000},
{"id": 5, "name": "Eve", "age": 32, "city": "Seattle", "salary": 90000},
]
dataframe = DataFrameComponent.from_records(
records=sample_data,
title="π Employee Data",
description="""Sample employee dataset demonstrating **DataFrame** features:
- **Searchable**: Try searching for names or cities
- **Sortable**: Click column headers to sort
- **Exportable**: Export to CSV/Excel
- **Paginated**: Navigate through rows
*5 employees across different cities*""",
id=dataframe_id,
searchable=True,
sortable=True,
exportable=True,
)
yield await yield_chunk(dataframe, conversation_id, request_id)
await delay(mode)
# Update with more data
updated_data = sample_data + [
{"id": 6, "name": "Frank", "age": 29, "city": "Austin", "salary": 78000},
]
dataframe_updated = DataFrameComponent.from_records(
records=updated_data,
title="π Employee Data (Updated)",
description="""Dataset **updated** with new employee!
β Added Frank from Austin
β Now showing 6 employees
β Update operation verified""",
id=dataframe_id,
)
dataframe_updated.lifecycle = ComponentLifecycle.UPDATE
yield await yield_chunk(dataframe_updated, conversation_id, request_id)
await delay(mode)
async def test_chart(conversation_id: str, request_id: str, mode: str) -> AsyncGenerator[ChatStreamChunk, None]:
"""Test chart component with Plotly data."""
chart_id = str(uuid.uuid4())
test_state["component_ids"]["chart"] = chart_id
# Create a simple bar chart
chart_data = {
"data": [
{
"x": ["Product A", "Product B", "Product C", "Product D"],
"y": [20, 35, 30, 25],
"type": "bar",
"name": "Sales",
"marker": {"color": "#667eea"},
}
],
"layout": {
"title": "Product Sales",
"xaxis": {"title": "Products"},
"yaxis": {"title": "Sales (units)"},
},
}
chart = ChartComponent(
id=chart_id,
chart_type="bar",
data=chart_data,
title="Sales Chart",
)
yield await yield_chunk(chart, conversation_id, request_id)
await delay(mode)
# Update to line chart
line_chart_data = {
"data": [
{
"x": ["Jan", "Feb", "Mar", "Apr", "May"],
"y": [10, 15, 13, 17, 21],
"type": "scatter",
"mode": "lines+markers",
"name": "Revenue",
"line": {"color": "#10b981", "width": 3},
}
],
"layout": {
"title": "Monthly Revenue Trend",
"xaxis": {"title": "Month"},
"yaxis": {"title": "Revenue ($1000s)"},
},
}
chart_updated = ChartComponent(
id=chart_id,
chart_type="line",
data=line_chart_data,
title="Revenue Chart",
)
chart_updated.lifecycle = ComponentLifecycle.UPDATE
yield await yield_chunk(chart_updated, conversation_id, request_id)
await delay(mode)
async def test_artifact(conversation_id: str, request_id: str, mode: str) -> AsyncGenerator[ChatStreamChunk, None]:
"""Test artifact component with HTML/SVG content."""
artifact_id = str(uuid.uuid4())
test_state["component_ids"]["artifact"] = artifact_id
# Create SVG artifact
svg_content = '''<svg width="200" height="200" xmlns="http://www.w3.org/2000/svg">
<circle cx="100" cy="100" r="80" fill="#667eea" opacity="0.8"/>
<circle cx="100" cy="100" r="60" fill="#764ba2" opacity="0.6"/>
<circle cx="100" cy="100" r="40" fill="#f093fb" opacity="0.4"/>
<text x="100" y="105" text-anchor="middle" fill="white" font-size="20" font-weight="bold">
Test SVG
</text>
</svg>'''
artifact = ArtifactComponent(
id=artifact_id,
content=svg_content,
artifact_type="svg",
title="SVG Circle Visualization",
description="Concentric circles demonstration",
fullscreen_capable=True,
)
yield await yield_chunk(artifact, conversation_id, request_id)
await delay(mode)
async def test_log_viewer(conversation_id: str, request_id: str, mode: str) -> AsyncGenerator[ChatStreamChunk, None]:
"""Test log viewer component."""
log_id = str(uuid.uuid4())
test_state["component_ids"]["log_viewer"] = log_id
# Create initial log viewer with entries
log_viewer = LogViewerComponent(
id=log_id,
title="System Logs",
entries=[
LogEntry(message="System started", level="info"),
LogEntry(message="Loading configuration...", level="info"),
LogEntry(message="Configuration loaded successfully", level="info"),
],
searchable=True,
auto_scroll=True,
)
yield await yield_chunk(log_viewer, conversation_id, request_id)
await delay(mode, 0.05, 0.3)
# Add warning
log_viewer = log_viewer.add_entry("Memory usage at 75%", level="warning")
yield await yield_chunk(log_viewer, conversation_id, request_id)
await delay(mode, 0.05, 0.3)
# Add error
log_viewer = log_viewer.add_entry("Connection timeout", level="error", data={"host": "api.example.com", "port": 443})
yield await yield_chunk(log_viewer, conversation_id, request_id)
await delay(mode, 0.05, 0.3)
# Add success
log_viewer = log_viewer.add_entry("Reconnected successfully", level="info")
yield await yield_chunk(log_viewer, conversation_id, request_id)
await delay(mode)
async def test_ui_state_updates(conversation_id: str, request_id: str, mode: str) -> AsyncGenerator[ChatStreamChunk, None]:
"""Test UI state update components."""
# Status bar update
status_bar = StatusBarUpdateComponent(
message="Running comprehensive component test...",
status="info",
)
yield await yield_chunk(status_bar, conversation_id, request_id)
await delay(mode, 0.1, 0.3)
# Task tracker - add tasks to sidebar
task1 = Task(
title="Validate Text Components",
description="Test text, markdown, and formatting",
status="completed",
progress=1.0,
)
task_tracker_add1 = TaskTrackerUpdateComponent.add_task(task1)
yield await yield_chunk(task_tracker_add1, conversation_id, request_id)
await delay(mode, 0.1, 0.3)
task2 = Task(
title="Validate Data Components",
description="Test DataFrame, Chart, Code blocks",
status="in_progress",
progress=0.6,
)
task_tracker_add2 = TaskTrackerUpdateComponent.add_task(task2)
yield await yield_chunk(task_tracker_add2, conversation_id, request_id)
await delay(mode, 0.1, 0.3)
task3 = Task(
title="Validate Interactive Components",
description="Test buttons, actions, and UI state",
status="pending",
)
task_tracker_add3 = TaskTrackerUpdateComponent.add_task(task3)
yield await yield_chunk(task_tracker_add3, conversation_id, request_id)
await delay(mode, 0.1, 0.3)
# Update task 2 to completed
task_tracker_update = TaskTrackerUpdateComponent(
operation=TaskOperation.UPDATE_TASK,
task_id=task2.id,
status="completed",
progress=1.0,
)
yield await yield_chunk(task_tracker_update, conversation_id, request_id)
await delay(mode, 0.1, 0.3)
# Update status bar
status_bar_complete = StatusBarUpdateComponent(
message="All components validated successfully!",
status="success",
)
yield await yield_chunk(status_bar_complete, conversation_id, request_id)
await delay(mode, 0.1, 0.3)
# Chat input update - change placeholder
chat_input = ChatInputUpdateComponent(
placeholder="Type a message to test chat input updates...",
disabled=False,
)
yield await yield_chunk(chat_input, conversation_id, request_id)
await delay(mode)
async def run_comprehensive_test(conversation_id: str, request_id: str, mode: str) -> AsyncGenerator[ChatStreamChunk, None]:
"""Run all component tests."""
# Introduction
intro = RichTextComponent(
content=f"""# π§ͺ Comprehensive Component Test
**Mode**: {mode}
## Test Coverage
This test validates **16 component types** supported by the webcomponent:
- β
Component creation
- β
Incremental updates
- β
Markdown rendering
- β
Interactive actions
- β
Data visualization
### Component Categories
1. **Primitive**: Text, Badge, Icon Text
2. **Feedback**: Status Card, Progress, Notifications, Logs
3. **Data**: Card, Task List, DataFrame, Chart, Code
4. **Specialized**: Artifact (SVG/HTML)
5. **Interactive**: Buttons with actions
Watch the sidebar checklist as components render! β‘οΈ""",
markdown=True,
)
yield await yield_chunk(intro, conversation_id, request_id)
await delay(mode)
# Run all tests
async for chunk in test_text_component(conversation_id, request_id, mode):
yield chunk
async for chunk in test_status_card(conversation_id, request_id, mode):
yield chunk
async for chunk in test_progress_display(conversation_id, request_id, mode):
yield chunk
async for chunk in test_card_component(conversation_id, request_id, mode):
yield chunk
async for chunk in test_task_list(conversation_id, request_id, mode):
yield chunk
async for chunk in test_progress_bar(conversation_id, request_id, mode):
yield chunk
async for chunk in test_notification(conversation_id, request_id, mode):
yield chunk
async for chunk in test_status_indicator(conversation_id, request_id, mode):
yield chunk
async for chunk in test_badge(conversation_id, request_id, mode):
yield chunk
async for chunk in test_icon_text(conversation_id, request_id, mode):
yield chunk
async for chunk in test_buttons(conversation_id, request_id, mode):
yield chunk
async for chunk in test_dataframe(conversation_id, request_id, mode):
yield chunk
async for chunk in test_chart(conversation_id, request_id, mode):
yield chunk
async for chunk in test_artifact(conversation_id, request_id, mode):
yield chunk
async for chunk in test_log_viewer(conversation_id, request_id, mode):
yield chunk
# NOTE: Table, Container, and CodeBlock components are defined in vanna Python package
# but NOT supported by the webcomponent (no renderers). Skipping these tests.
# These are candidates for removal from the vanna package.
async for chunk in test_ui_state_updates(conversation_id, request_id, mode):
yield chunk
# Completion message
done = StatusCardComponent(
title="β
Test Suite Complete",
status="completed",
description=f"""All **16 component types** successfully rendered in **{mode}** mode!
**Validated:**
- Component creation & updates
- Markdown rendering
- Interactive buttons
- Data visualization
- UI state management
Check the sidebar for the complete checklist.""",
icon="β
",
)
yield await yield_chunk(done, conversation_id, request_id)
async def handle_action_message(message: str, conversation_id: str, request_id: str) -> AsyncGenerator[ChatStreamChunk, None]:
"""Handle button action messages."""
test_state["action_count"] += 1
response = NotificationComponent(
message=f"Action received: {message}",
level="success",
title=f"Action #{test_state['action_count']}",
)
yield await yield_chunk(response, conversation_id, request_id)
# Also show a card with details
card = CardComponent(
title="Action Handler Response",
content=f"Received action: `{message}`\n\nThis confirms button interactivity is working!",
icon="π―",
status="success",
)
yield await yield_chunk(card, conversation_id, request_id)
# FastAPI app
app = FastAPI(title="Vanna Webcomponent Test Backend")
# CORS
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Mount static files (static directory for webcomponent)
static_path = os.path.join(os.path.dirname(__file__), "static")
if os.path.exists(static_path):
app.mount("/static", StaticFiles(directory=static_path), name="static")
@app.post("/api/vanna/v2/chat_sse")
async def chat_sse(chat_request: ChatRequest) -> StreamingResponse:
"""SSE endpoint for streaming chat."""
conversation_id = chat_request.conversation_id or str(uuid.uuid4())
request_id = chat_request.request_id or str(uuid.uuid4())
message = chat_request.message.strip()
async def generate() -> AsyncGenerator[str, None]:
"""Generate SSE stream."""
try:
# Handle button actions
if message.startswith("/") and message != "/test":
async for chunk in handle_action_message(message, conversation_id, request_id):
yield f"data: {chunk.model_dump_json()}\n\n"
# Handle test command or initial message
elif message == "/test" or "test" in message.lower():
async for chunk in run_comprehensive_test(conversation_id, request_id, test_state["mode"]):
yield f"data: {chunk.model_dump_json()}\n\n"
# Default response
else:
response = RichTextComponent(
content=f"You said: {message}\n\nType `/test` to run the comprehensive component test.",
markdown=True,
)
chunk = await yield_chunk(response, conversation_id, request_id)
yield f"data: {chunk.model_dump_json()}\n\n"
yield "data: [DONE]\n\n"
except Exception as e:
error_message = f"{str(e)}\n\nTraceback:\n{traceback.format_exc()}"
print(f"ERROR in chat_sse: {error_message}") # Log to console
error_chunk = {
"type": "error",
"data": {"message": error_message},
"conversation_id": conversation_id,
"request_id": request_id,
}
yield f"data: {json.dumps(error_chunk)}\n\n"
return StreamingResponse(
generate(),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"X-Accel-Buffering": "no",
},
)
@app.get("/health")
async def health():
"""Health check."""
return {"status": "ok", "mode": test_state["mode"]}
@app.get("/")
async def root():
"""Serve test HTML page."""
html_path = os.path.join(os.path.dirname(__file__), "test-comprehensive.html")
if os.path.exists(html_path):
return FileResponse(html_path)
return {
"message": "Vanna Webcomponent Test Backend",
"mode": test_state["mode"],
"endpoints": {
"chat": "POST /api/vanna/v2/chat_sse",
"health": "GET /health",
},
}
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Test backend for vanna-webcomponent")
parser.add_argument(
"--mode",
choices=["rapid", "realistic"],
default="realistic",
help="Test mode: rapid (fast) or realistic (with delays)",
)
parser.add_argument("--host", default="0.0.0.0", help="Host to bind to")
parser.add_argument("--port", type=int, default=5555, help="Port to bind to")
args = parser.parse_args()
test_state["mode"] = args.mode
print(f"Starting test backend in {args.mode} mode...")
print(f"Server running at http://{args.host}:{args.port}")
print("Send message '/test' to run comprehensive component test")
import uvicorn
uvicorn.run(app, host=args.host, port=args.port)
| {
"repo_id": "vanna-ai/vanna",
"file_path": "frontends/webcomponent/test_backend.py",
"license": "MIT License",
"lines": 726,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vanna-ai/vanna:src/evals/benchmarks/llm_comparison.py | """
LLM Comparison Benchmark
This script compares different LLMs on SQL generation tasks.
Run from repository root:
PYTHONPATH=. python evals/benchmarks/llm_comparison.py
"""
import asyncio
import os
from pathlib import Path
from vanna import Agent
from vanna.core.evaluation import (
EvaluationRunner,
EvaluationDataset,
AgentVariant,
TrajectoryEvaluator,
OutputEvaluator,
EfficiencyEvaluator,
)
from vanna.integrations.anthropic import AnthropicLlmService
from vanna.integrations.local import MemoryConversationStore
from vanna.core.registry import ToolRegistry
def get_sql_tools() -> ToolRegistry:
"""Get SQL-related tools for testing.
In a real scenario, this would return actual SQL tools.
For this benchmark, we'll use a placeholder.
"""
# TODO: Add actual SQL tools
return ToolRegistry()
async def compare_llms():
"""Compare different LLMs on SQL generation tasks."""
print("=" * 80)
print("LLM COMPARISON BENCHMARK - SQL Generation")
print("=" * 80)
print()
# Load test dataset
dataset_path = (
Path(__file__).parent.parent / "datasets" / "sql_generation" / "basic.yaml"
)
print(f"Loading dataset from: {dataset_path}")
dataset = EvaluationDataset.from_yaml(str(dataset_path))
print(f"Loaded dataset: {dataset.name}")
print(f"Test cases: {len(dataset.test_cases)}")
print()
# Get API keys
anthropic_key = os.getenv("ANTHROPIC_API_KEY")
if not anthropic_key:
print("β οΈ ANTHROPIC_API_KEY not set. Using placeholder.")
anthropic_key = "test-key"
# Create agent variants
print("Creating agent variants...")
tool_registry = get_sql_tools()
variants = [
AgentVariant(
name="claude-sonnet-4",
agent=Agent(
llm_service=AnthropicLlmService(
api_key=anthropic_key, model="claude-sonnet-4-20250514"
),
tool_registry=tool_registry,
conversation_store=MemoryConversationStore(),
),
metadata={
"provider": "anthropic",
"model": "claude-sonnet-4-20250514",
"version": "2025-05-14",
},
),
AgentVariant(
name="claude-opus-4",
agent=Agent(
llm_service=AnthropicLlmService(
api_key=anthropic_key, model="claude-opus-4-20250514"
),
tool_registry=tool_registry,
conversation_store=MemoryConversationStore(),
),
metadata={
"provider": "anthropic",
"model": "claude-opus-4-20250514",
"version": "2025-05-14",
},
),
]
print(f"Created {len(variants)} variants:")
for v in variants:
print(f" - {v.name}")
print()
# Create evaluators
evaluators = [
TrajectoryEvaluator(),
OutputEvaluator(),
EfficiencyEvaluator(
max_execution_time_ms=10000,
max_tokens=5000,
),
]
print(f"Using {len(evaluators)} evaluators:")
for e in evaluators:
print(f" - {e.name}")
print()
# Create runner with high concurrency for I/O bound tasks
runner = EvaluationRunner(
evaluators=evaluators,
max_concurrency=20, # Run 20 test cases concurrently
)
# Run comparison
print("Running comparison (all variants in parallel)...")
print(
f"Total executions: {len(variants)} variants Γ {len(dataset.test_cases)} test cases = {len(variants) * len(dataset.test_cases)}"
)
print()
comparison = await runner.compare_agents(variants, dataset.test_cases)
# Print results
print()
comparison.print_summary()
# Show winner
print(f"π Best by score: {comparison.get_best_variant('score')}")
print(f"β‘ Best by speed: {comparison.get_best_variant('speed')}")
print(f"β
Best by pass rate: {comparison.get_best_variant('pass_rate')}")
print()
# Save reports
output_dir = Path(__file__).parent.parent / "results"
output_dir.mkdir(exist_ok=True)
html_path = output_dir / "llm_comparison.html"
csv_path = output_dir / "llm_comparison.csv"
comparison.save_html(str(html_path))
comparison.save_csv(str(csv_path))
print(f"π Reports saved:")
print(f" - HTML: {html_path}")
print(f" - CSV: {csv_path}")
async def main():
"""Run the LLM comparison benchmark."""
try:
await compare_llms()
except Exception as e:
print(f"β Error running benchmark: {e}")
import traceback
traceback.print_stack()
traceback.print_exc()
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/evals/benchmarks/llm_comparison.py",
"license": "MIT License",
"lines": 141,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
vanna-ai/vanna:src/vanna/capabilities/agent_memory/base.py | """
Agent memory capability interface for tool usage learning.
This module contains the abstract base class for agent memory operations,
following the same pattern as the FileSystem interface.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any, Dict, List, Optional
if TYPE_CHECKING:
from vanna.core.tool import ToolContext
from .models import (
ToolMemorySearchResult,
TextMemory,
TextMemorySearchResult,
ToolMemory,
)
class AgentMemory(ABC):
"""Abstract base class for agent memory operations."""
@abstractmethod
async def save_tool_usage(
self,
question: str,
tool_name: str,
args: Dict[str, Any],
context: "ToolContext",
success: bool = True,
metadata: Optional[Dict[str, Any]] = None,
) -> None:
"""Save a tool usage pattern for future reference."""
pass
@abstractmethod
async def save_text_memory(
self, content: str, context: "ToolContext"
) -> "TextMemory":
"""Save a free-form text memory."""
pass
@abstractmethod
async def search_similar_usage(
self,
question: str,
context: "ToolContext",
*,
limit: int = 10,
similarity_threshold: float = 0.7,
tool_name_filter: Optional[str] = None,
) -> List[ToolMemorySearchResult]:
"""Search for similar tool usage patterns based on a question."""
pass
@abstractmethod
async def search_text_memories(
self,
query: str,
context: "ToolContext",
*,
limit: int = 10,
similarity_threshold: float = 0.7,
) -> List["TextMemorySearchResult"]:
"""Search stored text memories based on a query."""
pass
@abstractmethod
async def get_recent_memories(
self, context: "ToolContext", limit: int = 10
) -> List[ToolMemory]:
"""Get recently added memories. Returns most recent memories first."""
pass
@abstractmethod
async def get_recent_text_memories(
self, context: "ToolContext", limit: int = 10
) -> List["TextMemory"]:
"""Fetch recently stored text memories."""
pass
@abstractmethod
async def delete_by_id(self, context: "ToolContext", memory_id: str) -> bool:
"""Delete a memory by its ID. Returns True if deleted, False if not found."""
pass
@abstractmethod
async def delete_text_memory(self, context: "ToolContext", memory_id: str) -> bool:
"""Delete a text memory by its ID. Returns True if deleted, False if not found."""
pass
@abstractmethod
async def clear_memories(
self,
context: "ToolContext",
tool_name: Optional[str] = None,
before_date: Optional[str] = None,
) -> int:
"""Clear stored memories (tool or text). Returns number of memories deleted."""
pass
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/capabilities/agent_memory/base.py",
"license": "MIT License",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
vanna-ai/vanna:src/vanna/capabilities/agent_memory/models.py | """
Memory storage models and types.
"""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel
class ToolMemory(BaseModel):
"""Represents a stored tool usage memory."""
memory_id: Optional[str] = None
question: str
tool_name: str
args: Dict[str, Any]
timestamp: Optional[str] = None
success: bool = True
metadata: Optional[Dict[str, Any]] = None
class TextMemory(BaseModel):
"""Represents a stored free-form text memory."""
memory_id: Optional[str] = None
content: str
timestamp: Optional[str] = None
class ToolMemorySearchResult(BaseModel):
"""Represents a search result from tool memory storage."""
memory: ToolMemory
similarity_score: float
rank: int
class TextMemorySearchResult(BaseModel):
"""Represents a search result from text memory storage."""
memory: TextMemory
similarity_score: float
rank: int
class MemoryStats(BaseModel):
"""Memory storage statistics."""
total_memories: int
unique_tools: int
unique_questions: int
success_rate: float
most_used_tools: Dict[str, int]
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/capabilities/agent_memory/models.py",
"license": "MIT License",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
vanna-ai/vanna:src/vanna/capabilities/file_system/base.py | """
File system capability interface.
This module contains the abstract base class for file system operations.
"""
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, List, Optional
from .models import CommandResult, FileSearchMatch
if TYPE_CHECKING:
from vanna.core.tool import ToolContext
class FileSystem(ABC):
"""Abstract base class for file system operations."""
@abstractmethod
async def list_files(self, directory: str, context: "ToolContext") -> List[str]:
"""List files in a directory."""
pass
@abstractmethod
async def read_file(self, filename: str, context: "ToolContext") -> str:
"""Read the contents of a file."""
pass
@abstractmethod
async def write_file(
self,
filename: str,
content: str,
context: "ToolContext",
overwrite: bool = False,
) -> None:
"""Write content to a file."""
pass
@abstractmethod
async def exists(self, path: str, context: "ToolContext") -> bool:
"""Check if a file or directory exists."""
pass
@abstractmethod
async def is_directory(self, path: str, context: "ToolContext") -> bool:
"""Check if a path is a directory."""
pass
@abstractmethod
async def search_files(
self,
query: str,
context: "ToolContext",
*,
max_results: int = 20,
include_content: bool = False,
) -> List[FileSearchMatch]:
"""Search for files matching a query within the accessible namespace."""
pass
@abstractmethod
async def run_bash(
self,
command: str,
context: "ToolContext",
*,
timeout: Optional[float] = None,
) -> CommandResult:
"""Execute a bash command within the accessible namespace."""
pass
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/capabilities/file_system/base.py",
"license": "MIT License",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
vanna-ai/vanna:src/vanna/capabilities/file_system/models.py | """
File system capability models.
This module contains data models for file system operations.
"""
from dataclasses import dataclass
from typing import Optional
@dataclass
class FileSearchMatch:
"""Represents a single search result within a file system."""
path: str
snippet: Optional[str] = None
@dataclass
class CommandResult:
"""Represents the result of executing a shell command."""
stdout: str
stderr: str
returncode: int
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/capabilities/file_system/models.py",
"license": "MIT License",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
vanna-ai/vanna:src/vanna/capabilities/sql_runner/base.py | """
SQL runner capability interface.
This module contains the abstract base class for SQL execution.
"""
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING
import pandas as pd
from .models import RunSqlToolArgs
if TYPE_CHECKING:
from vanna.core.tool import ToolContext
class SqlRunner(ABC):
"""Interface for SQL execution with different implementations."""
@abstractmethod
async def run_sql(
self, args: RunSqlToolArgs, context: "ToolContext"
) -> pd.DataFrame:
"""Execute SQL query and return results as a DataFrame.
Args:
args: SQL query arguments
context: Tool execution context
Returns:
DataFrame with query results
Raises:
Exception: If query execution fails
"""
pass
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/capabilities/sql_runner/base.py",
"license": "MIT License",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
vanna-ai/vanna:src/vanna/capabilities/sql_runner/models.py | """
SQL runner capability models.
This module contains data models for SQL execution.
"""
from pydantic import BaseModel, Field
class RunSqlToolArgs(BaseModel):
"""Arguments for run_sql tool."""
sql: str = Field(description="SQL query to execute")
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/capabilities/sql_runner/models.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
vanna-ai/vanna:src/vanna/components/base.py | """
UI components base - re-exports UiComponent from core.
UiComponent lives in core/ because it's a fundamental return type for tools.
This module provides backward compatibility by re-exporting it here.
"""
# Re-export UiComponent from core for backward compatibility
from ..core.components import UiComponent
__all__ = ["UiComponent"]
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/components/base.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
vanna-ai/vanna:src/vanna/components/rich/containers/card.py | """Card component for displaying structured information."""
from typing import Any, Dict, List, Optional
from pydantic import Field
from ....core.rich_component import RichComponent, ComponentType
class CardComponent(RichComponent):
"""Card component for displaying structured information."""
type: ComponentType = ComponentType.CARD
title: str
content: str
subtitle: Optional[str] = None
icon: Optional[str] = None
status: Optional[str] = None # "success", "warning", "error", "info"
actions: List[Dict[str, Any]] = Field(default_factory=list)
collapsible: bool = False
collapsed: bool = False
markdown: bool = False # Whether content should be rendered as markdown
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/components/rich/containers/card.py",
"license": "MIT License",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
vanna-ai/vanna:src/vanna/components/rich/data/chart.py | """Chart component for data visualization."""
from typing import Any, Dict, Optional, Union
from pydantic import Field
from ....core.rich_component import RichComponent, ComponentType
class ChartComponent(RichComponent):
"""Chart component for data visualization."""
type: ComponentType = ComponentType.CHART
chart_type: str # "line", "bar", "pie", "scatter", etc.
data: Dict[str, Any] # Chart data in format expected by frontend
title: Optional[str] = None
width: Optional[Union[str, int]] = None
height: Optional[Union[str, int]] = None
config: Dict[str, Any] = Field(default_factory=dict) # Chart-specific config
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/components/rich/data/chart.py",
"license": "MIT License",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
vanna-ai/vanna:src/vanna/components/rich/data/dataframe.py | """DataFrame component for displaying tabular data."""
from typing import Any, Dict, List, Optional
from pydantic import Field
from ....core.rich_component import RichComponent, ComponentType
class DataFrameComponent(RichComponent):
"""DataFrame component specifically for displaying tabular data from SQL queries and similar sources."""
type: ComponentType = ComponentType.DATAFRAME
rows: List[Dict[str, Any]] = Field(default_factory=list) # List of row dictionaries
columns: List[str] = Field(default_factory=list) # Column names in display order
title: Optional[str] = None
description: Optional[str] = None
row_count: int = 0
column_count: int = 0
# Display options
max_rows_displayed: int = 100 # Limit rows shown in UI
searchable: bool = True
sortable: bool = True
filterable: bool = True
exportable: bool = True # Allow export to CSV/Excel
# Styling options
striped: bool = True
bordered: bool = True
compact: bool = False
# Pagination
paginated: bool = True
page_size: int = 25
# Data types for better formatting (optional)
column_types: Dict[str, str] = Field(
default_factory=dict
) # column_name -> "string"|"number"|"date"|"boolean"
def __init__(self, **kwargs: Any) -> None:
# Set defaults before calling super().__init__
if "rows" not in kwargs:
kwargs["rows"] = []
if "columns" not in kwargs:
kwargs["columns"] = []
if "column_types" not in kwargs:
kwargs["column_types"] = {}
super().__init__(**kwargs)
# Auto-calculate counts if not provided
if self.rows and len(self.rows) > 0:
if "row_count" not in kwargs:
self.row_count = len(self.rows)
if not self.columns and self.rows:
self.columns = list(self.rows[0].keys())
if "column_count" not in kwargs:
self.column_count = len(self.columns)
else:
if "row_count" not in kwargs:
self.row_count = 0
if "column_count" not in kwargs:
self.column_count = len(self.columns) if self.columns else 0
@classmethod
def from_records(
cls,
records: List[Dict[str, Any]],
title: Optional[str] = None,
description: Optional[str] = None,
**kwargs: Any,
) -> "DataFrameComponent":
"""Create a DataFrame component from a list of record dictionaries."""
columns = list(records[0].keys()) if records else []
# Ensure we pass the required arguments correctly
component_data = {
"rows": records,
"columns": columns,
"row_count": len(records),
"column_count": len(columns),
"column_types": {}, # Initialize empty dict
}
if title is not None:
component_data["title"] = title
if description is not None:
component_data["description"] = description
# Merge with any additional kwargs
component_data.update(kwargs)
return cls(**component_data)
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/components/rich/data/dataframe.py",
"license": "MIT License",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
vanna-ai/vanna:src/vanna/components/rich/feedback/badge.py | """Badge component for displaying status or labels."""
from typing import Optional
from ....core.rich_component import RichComponent, ComponentType
class BadgeComponent(RichComponent):
"""Simple badge/pill component for displaying status or labels."""
type: ComponentType = ComponentType.BADGE
text: str
variant: str = (
"default" # "default", "primary", "success", "warning", "error", "info"
)
size: str = "medium" # "small", "medium", "large"
icon: Optional[str] = None
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/components/rich/feedback/badge.py",
"license": "MIT License",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
vanna-ai/vanna:src/vanna/components/rich/feedback/icon_text.py | """Icon with text component."""
from ....core.rich_component import RichComponent, ComponentType
class IconTextComponent(RichComponent):
"""Simple component for displaying an icon with text."""
type: ComponentType = ComponentType.ICON_TEXT
icon: str
text: str
variant: str = "default" # "default", "primary", "secondary", "muted"
size: str = "medium" # "small", "medium", "large"
alignment: str = "left" # "left", "center", "right"
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/components/rich/feedback/icon_text.py",
"license": "MIT License",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
vanna-ai/vanna:src/vanna/components/rich/feedback/log_viewer.py | """Log viewer component."""
import uuid
from datetime import datetime
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field
from ....core.rich_component import RichComponent, ComponentType
class LogEntry(BaseModel):
"""Log entry for tool execution."""
timestamp: str = Field(default_factory=lambda: datetime.utcnow().isoformat())
level: str = "info" # "debug", "info", "warning", "error"
message: str
data: Optional[Dict[str, Any]] = None
class LogViewerComponent(RichComponent):
"""Generic log viewer for displaying timestamped entries."""
type: ComponentType = ComponentType.LOG_VIEWER
title: str = "Logs"
entries: List[LogEntry] = Field(default_factory=list)
max_entries: int = 100
searchable: bool = True
show_timestamps: bool = True
auto_scroll: bool = True
def add_entry(
self, message: str, level: str = "info", data: Optional[Dict[str, Any]] = None
) -> "LogViewerComponent":
"""Add a new log entry."""
new_entry = LogEntry(message=message, level=level, data=data)
new_entries = self.entries + [new_entry]
# Limit to max_entries
if len(new_entries) > self.max_entries:
new_entries = new_entries[-self.max_entries :]
return self.update(entries=new_entries)
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/components/rich/feedback/log_viewer.py",
"license": "MIT License",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
vanna-ai/vanna:src/vanna/components/rich/feedback/notification.py | """Notification component for alerts and messages."""
from typing import Any, Dict, List, Optional
from pydantic import Field
from ....core.rich_component import RichComponent, ComponentType
class NotificationComponent(RichComponent):
"""Notification component for alerts and messages."""
type: ComponentType = ComponentType.NOTIFICATION
message: str
title: Optional[str] = None
level: str = "info" # "success", "info", "warning", "error"
icon: Optional[str] = None
dismissible: bool = True
auto_dismiss: bool = False
auto_dismiss_delay: int = 5000 # milliseconds
actions: List[Dict[str, Any]] = Field(default_factory=list)
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/components/rich/feedback/notification.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
vanna-ai/vanna:src/vanna/components/rich/feedback/progress.py | """Progress components for displaying progress indicators."""
from typing import Any, Dict, Optional
from ....core.rich_component import RichComponent, ComponentType
class ProgressBarComponent(RichComponent):
"""Progress bar with status and value."""
type: ComponentType = ComponentType.PROGRESS_BAR
value: float # 0.0 to 1.0
label: Optional[str] = None
show_percentage: bool = True
status: Optional[str] = None # "success", "warning", "error"
animated: bool = False
class ProgressDisplayComponent(RichComponent):
"""Generic progress display for any long-running process."""
type: ComponentType = ComponentType.PROGRESS_DISPLAY
label: str
value: float = 0.0 # 0.0 to 1.0
description: Optional[str] = None
status: Optional[str] = None # "info", "success", "warning", "error"
show_percentage: bool = True
animated: bool = False
indeterminate: bool = False
def update_progress(
self, value: float, description: Optional[str] = None
) -> "ProgressDisplayComponent":
"""Update progress value and optionally description."""
updates: Dict[str, Any] = {"value": max(0.0, min(1.0, value))}
if description is not None:
updates["description"] = description
return self.update(**updates)
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/components/rich/feedback/progress.py",
"license": "MIT License",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
vanna-ai/vanna:src/vanna/components/rich/feedback/status_card.py | """Status card component for displaying process status."""
from typing import Any, Dict, List, Optional
from pydantic import Field
from ....core.rich_component import RichComponent, ComponentType
class StatusCardComponent(RichComponent):
"""Generic status card that can display any process status."""
type: ComponentType = ComponentType.STATUS_CARD
title: str
status: str # "pending", "running", "completed", "failed", "success", "warning", "error"
description: Optional[str] = None
icon: Optional[str] = None
metadata: Dict[str, Any] = Field(default_factory=dict)
actions: List[Dict[str, Any]] = Field(default_factory=list)
collapsible: bool = False
collapsed: bool = False
def set_status(
self, status: str, description: Optional[str] = None
) -> "StatusCardComponent":
"""Update the status and optionally the description."""
updates = {"status": status}
if description is not None:
updates["description"] = description
return self.update(**updates)
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/components/rich/feedback/status_card.py",
"license": "MIT License",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
vanna-ai/vanna:src/vanna/components/rich/feedback/status_indicator.py | """Status indicator component."""
from typing import Optional
from ....core.rich_component import RichComponent, ComponentType
class StatusIndicatorComponent(RichComponent):
"""Status indicator with icon and message."""
type: ComponentType = ComponentType.STATUS_INDICATOR
status: str # "success", "warning", "error", "info", "loading"
message: str
icon: Optional[str] = None
pulse: bool = False
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/components/rich/feedback/status_indicator.py",
"license": "MIT License",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
vanna-ai/vanna:src/vanna/components/rich/interactive/button.py | """Button component for interactive actions."""
from typing import Any, Dict, List, Literal, Optional
from ....core.rich_component import ComponentType, RichComponent
class ButtonComponent(RichComponent):
"""Interactive button that sends a message when clicked.
The button renders in the UI and when clicked, sends its action
value as a message to the chat input.
Args:
label: Text displayed on the button
action: Message/command to send when clicked
variant: Visual style variant
size: Button size
icon: Optional emoji or icon
icon_position: Position of icon relative to label
disabled: Whether button is disabled
Example:
ButtonComponent(
label="Generate Report",
action="/report sales",
variant="primary",
icon="π"
)
"""
def __init__(
self,
label: str,
action: str,
variant: Literal[
"primary", "secondary", "success", "warning", "error", "ghost", "link"
] = "primary",
size: Literal["small", "medium", "large"] = "medium",
icon: Optional[str] = None,
icon_position: Literal["left", "right"] = "left",
disabled: bool = False,
):
super().__init__(
type=ComponentType.BUTTON,
data={
"label": label,
"action": action,
"variant": variant,
"size": size,
"icon": icon,
"icon_position": icon_position,
"disabled": disabled,
},
)
class ButtonGroupComponent(RichComponent):
"""Group of buttons with consistent styling.
Args:
buttons: List of button data dictionaries
orientation: Layout direction
spacing: Gap between buttons
alignment: Button alignment within group
full_width: Whether buttons should stretch to fill width
Example:
ButtonGroupComponent(
buttons=[
{"label": "Yes", "action": "/confirm yes", "variant": "success"},
{"label": "No", "action": "/confirm no", "variant": "error"},
],
orientation="horizontal",
spacing="medium"
)
"""
def __init__(
self,
buttons: List[Dict[str, Any]],
orientation: Literal["horizontal", "vertical"] = "horizontal",
spacing: Literal["small", "medium", "large"] = "medium",
alignment: Literal["start", "center", "end", "stretch"] = "start",
full_width: bool = False,
):
super().__init__(
type=ComponentType.BUTTON_GROUP,
data={
"buttons": buttons,
"orientation": orientation,
"spacing": spacing,
"alignment": alignment,
"full_width": full_width,
},
)
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/components/rich/interactive/button.py",
"license": "MIT License",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
vanna-ai/vanna:src/vanna/components/rich/interactive/task_list.py | """Task list component for interactive task tracking."""
import uuid
from datetime import datetime
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field
from ....core.rich_component import RichComponent, ComponentType
class Task(BaseModel):
"""Individual task in a task list."""
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
title: str
description: Optional[str] = None
status: str = "pending" # "pending", "in_progress", "completed", "error"
progress: Optional[float] = None # 0.0 to 1.0
created_at: str = Field(default_factory=lambda: datetime.utcnow().isoformat())
completed_at: Optional[str] = None
metadata: Dict[str, Any] = Field(default_factory=dict)
class TaskListComponent(RichComponent):
"""Interactive task list with progress tracking."""
type: ComponentType = ComponentType.TASK_LIST
title: str = "Tasks"
tasks: List[Task] = Field(default_factory=list)
show_progress: bool = True
allow_reorder: bool = False
show_timestamps: bool = True
filter_status: Optional[str] = None # Filter by task status
def add_task(self, task: Task) -> "TaskListComponent":
"""Add a task to the list."""
new_tasks = self.tasks + [task]
return self.update(tasks=new_tasks)
def update_task(self, task_id: str, **updates: Any) -> "TaskListComponent":
"""Update a specific task."""
new_tasks = []
for task in self.tasks:
if task.id == task_id:
task_data = task.model_dump()
task_data.update(updates)
new_tasks.append(Task(**task_data))
else:
new_tasks.append(task)
return self.update(tasks=new_tasks)
def complete_task(self, task_id: str) -> "TaskListComponent":
"""Mark a task as completed."""
return self.update_task(
task_id,
status="completed",
completed_at=datetime.utcnow().isoformat(),
progress=1.0,
)
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/components/rich/interactive/task_list.py",
"license": "MIT License",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
vanna-ai/vanna:src/vanna/components/rich/interactive/ui_state.py | """UI state update components for controlling interface elements."""
from enum import Enum
from typing import Any, Optional
from .task_list import Task
from ....core.rich_component import RichComponent, ComponentType
class StatusBarUpdateComponent(RichComponent):
"""Component for updating the status bar above chat input."""
type: ComponentType = ComponentType.STATUS_BAR_UPDATE
status: str # "idle", "working", "success", "error"
message: str
detail: Optional[str] = None
def __init__(self, **kwargs: Any) -> None:
# Set a fixed ID for status bar updates
kwargs.setdefault("id", "vanna-status-bar")
super().__init__(**kwargs)
class TaskOperation(str, Enum):
"""Operations for task tracker updates."""
ADD_TASK = "add_task"
UPDATE_TASK = "update_task"
REMOVE_TASK = "remove_task"
CLEAR_TASKS = "clear_tasks"
class TaskTrackerUpdateComponent(RichComponent):
"""Component for updating the task tracker in the sidebar."""
type: ComponentType = ComponentType.TASK_TRACKER_UPDATE
operation: TaskOperation
task: Optional[Task] = None # Used for ADD_TASK
task_id: Optional[str] = None # Used for UPDATE_TASK and REMOVE_TASK
status: Optional[str] = None # Used for UPDATE_TASK
progress: Optional[float] = None # Used for UPDATE_TASK
detail: Optional[str] = None # Used for UPDATE_TASK
def __init__(self, **kwargs: Any) -> None:
# Set a fixed ID for task tracker updates
kwargs.setdefault("id", "vanna-task-tracker")
super().__init__(**kwargs)
@classmethod
def add_task(cls, task: Task) -> "TaskTrackerUpdateComponent":
"""Create a component to add a new task."""
return cls(operation=TaskOperation.ADD_TASK, task=task)
@classmethod
def update_task(
cls,
task_id: str,
status: Optional[str] = None,
progress: Optional[float] = None,
detail: Optional[str] = None,
) -> "TaskTrackerUpdateComponent":
"""Create a component to update an existing task."""
return cls(
operation=TaskOperation.UPDATE_TASK,
task_id=task_id,
status=status,
progress=progress,
detail=detail,
)
@classmethod
def remove_task(cls, task_id: str) -> "TaskTrackerUpdateComponent":
"""Create a component to remove a task."""
return cls(operation=TaskOperation.REMOVE_TASK, task_id=task_id)
@classmethod
def clear_tasks(cls) -> "TaskTrackerUpdateComponent":
"""Create a component to clear all tasks."""
return cls(operation=TaskOperation.CLEAR_TASKS)
class ChatInputUpdateComponent(RichComponent):
"""Component for updating chat input state and appearance."""
type: ComponentType = ComponentType.CHAT_INPUT_UPDATE
placeholder: Optional[str] = None
disabled: Optional[bool] = None
value: Optional[str] = None # Set input text value
focus: Optional[bool] = None # Focus/unfocus the input
def __init__(self, **kwargs: Any) -> None:
# Set a fixed ID for chat input updates
kwargs.setdefault("id", "vanna-chat-input")
super().__init__(**kwargs)
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/components/rich/interactive/ui_state.py",
"license": "MIT License",
"lines": 73,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
vanna-ai/vanna:src/vanna/components/rich/specialized/artifact.py | """Artifact component for interactive content."""
import uuid
from typing import Optional
from pydantic import Field
from ....core.rich_component import RichComponent, ComponentType
class ArtifactComponent(RichComponent):
"""Component for displaying interactive artifacts that can be rendered externally."""
type: ComponentType = ComponentType.ARTIFACT
artifact_id: str = Field(default_factory=lambda: f"artifact_{uuid.uuid4().hex[:8]}")
content: str # HTML/SVG/JS content
artifact_type: str # "html", "svg", "visualization", "interactive", "d3", "threejs"
title: Optional[str] = None
description: Optional[str] = None
editable: bool = True
fullscreen_capable: bool = True
external_renderable: bool = True
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/components/rich/specialized/artifact.py",
"license": "MIT License",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
vanna-ai/vanna:src/vanna/components/rich/text.py | """Rich text component."""
from typing import Optional
from ...core.rich_component import RichComponent, ComponentType
class RichTextComponent(RichComponent):
"""Rich text component with formatting options."""
type: ComponentType = ComponentType.TEXT
content: str
markdown: bool = False
code_language: Optional[str] = None # For syntax highlighting
font_size: Optional[str] = None
font_weight: Optional[str] = None
text_align: Optional[str] = None
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/components/rich/text.py",
"license": "MIT License",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
vanna-ai/vanna:src/vanna/components/simple/image.py | """Simple image component."""
from typing import Optional
from pydantic import Field
from ...core.simple_component import SimpleComponent, SimpleComponentType
class SimpleImageComponent(SimpleComponent):
"""A simple image component."""
type: SimpleComponentType = SimpleComponentType.IMAGE
url: str = Field(..., description="The URL of the image to display.")
alt_text: Optional[str] = Field(
default=None, description="Alternative text for the image."
)
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/components/simple/image.py",
"license": "MIT License",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
vanna-ai/vanna:src/vanna/components/simple/link.py | """Simple link component."""
from typing import Optional
from pydantic import Field
from ...core.simple_component import SimpleComponent, SimpleComponentType
class SimpleLinkComponent(SimpleComponent):
"""A simple link component."""
type: SimpleComponentType = SimpleComponentType.LINK
url: str = Field(..., description="The URL the link points to.")
text: Optional[str] = Field(
default=None, description="The display text for the link."
)
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/components/simple/link.py",
"license": "MIT License",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
vanna-ai/vanna:src/vanna/components/simple/text.py | """Simple text component."""
from pydantic import Field
from ...core.simple_component import SimpleComponent, SimpleComponentType
class SimpleTextComponent(SimpleComponent):
"""A simple text component."""
type: SimpleComponentType = SimpleComponentType.TEXT
text: str = Field(..., description="The text content to display.")
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/components/simple/text.py",
"license": "MIT License",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
vanna-ai/vanna:src/vanna/core/_compat.py | """
Compatibility shims for different Python versions.
This module provides compatibility utilities for features that vary across
Python versions.
"""
try:
from enum import StrEnum # Py 3.11+
except ImportError: # Py < 3.11
from enum import Enum
class StrEnum(str, Enum): # type: ignore[no-redef]
"""Minimal backport of StrEnum for Python < 3.11."""
pass
__all__ = ["StrEnum"]
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/_compat.py",
"license": "MIT License",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
vanna-ai/vanna:src/vanna/core/agent/agent.py | """
Agent implementation for the Vanna Agents framework.
This module provides the main Agent class that orchestrates the interaction
between LLM services, tools, and conversation storage.
"""
import traceback
import uuid
from typing import TYPE_CHECKING, AsyncGenerator, List, Optional
from vanna.components import (
UiComponent,
SimpleTextComponent,
RichTextComponent,
StatusBarUpdateComponent,
TaskTrackerUpdateComponent,
ChatInputUpdateComponent,
StatusCardComponent,
Task,
)
from .config import AgentConfig
from vanna.core.storage import ConversationStore
from vanna.core.llm import LlmService
from vanna.core.system_prompt import SystemPromptBuilder
from vanna.core.storage import Conversation, Message
from vanna.core.llm import LlmMessage, LlmRequest, LlmResponse
from vanna.core.tool import ToolCall, ToolContext, ToolResult, ToolSchema
from vanna.core.user import User
from vanna.core.registry import ToolRegistry
from vanna.core.system_prompt import DefaultSystemPromptBuilder
from vanna.core.lifecycle import LifecycleHook
from vanna.core.middleware import LlmMiddleware
from vanna.core.workflow import WorkflowHandler, DefaultWorkflowHandler
from vanna.core.recovery import ErrorRecoveryStrategy, RecoveryActionType
from vanna.core.enricher import ToolContextEnricher
from vanna.core.enhancer import LlmContextEnhancer, DefaultLlmContextEnhancer
from vanna.core.filter import ConversationFilter
from vanna.core.observability import ObservabilityProvider
from vanna.core.user.resolver import UserResolver
from vanna.core.user.request_context import RequestContext
from vanna.core.agent.config import UiFeature
from vanna.core.audit import AuditLogger
from vanna.capabilities.agent_memory import AgentMemory
import logging
logger = logging.getLogger(__name__)
logger.info("Loaded vanna.core.agent.agent module")
if TYPE_CHECKING:
pass
class Agent:
"""Main agent implementation.
The Agent class orchestrates LLM interactions, tool execution, and conversation
management. It provides 7 extensibility points for customization:
- lifecycle_hooks: Hook into message and tool execution lifecycle
- llm_middlewares: Intercept and transform LLM requests/responses
- error_recovery_strategy: Handle errors with retry logic
- context_enrichers: Add data to tool execution context
- llm_context_enhancer: Enhance LLM system prompts and messages with context
- conversation_filters: Filter conversation history before LLM calls
- observability_provider: Collect telemetry and monitoring data
Example:
agent = Agent(
llm_service=AnthropicLlmService(api_key="..."),
tool_registry=registry,
conversation_store=store,
lifecycle_hooks=[QuotaCheckHook()],
llm_middlewares=[CachingMiddleware()],
llm_context_enhancer=DefaultLlmContextEnhancer(agent_memory),
observability_provider=LoggingProvider()
)
"""
def __init__(
self,
llm_service: LlmService,
tool_registry: ToolRegistry,
user_resolver: UserResolver,
agent_memory: AgentMemory,
conversation_store: Optional[ConversationStore] = None,
config: AgentConfig = AgentConfig(),
system_prompt_builder: SystemPromptBuilder = DefaultSystemPromptBuilder(),
lifecycle_hooks: List[LifecycleHook] = [],
llm_middlewares: List[LlmMiddleware] = [],
workflow_handler: Optional[WorkflowHandler] = None,
error_recovery_strategy: Optional[ErrorRecoveryStrategy] = None,
context_enrichers: List[ToolContextEnricher] = [],
llm_context_enhancer: Optional[LlmContextEnhancer] = None,
conversation_filters: List[ConversationFilter] = [],
observability_provider: Optional[ObservabilityProvider] = None,
audit_logger: Optional[AuditLogger] = None,
):
self.llm_service = llm_service
self.tool_registry = tool_registry
self.user_resolver = user_resolver
self.agent_memory = agent_memory
# Import here to avoid circular dependency
if conversation_store is None:
from vanna.integrations.local import MemoryConversationStore
conversation_store = MemoryConversationStore()
self.conversation_store = conversation_store
self.config = config
self.system_prompt_builder = system_prompt_builder
self.lifecycle_hooks = lifecycle_hooks
self.llm_middlewares = llm_middlewares
# Use DefaultWorkflowHandler if none provided
if workflow_handler is None:
workflow_handler = DefaultWorkflowHandler()
self.workflow_handler = workflow_handler
self.error_recovery_strategy = error_recovery_strategy
self.context_enrichers = context_enrichers
# Use DefaultLlmContextEnhancer if none provided
if llm_context_enhancer is None:
llm_context_enhancer = DefaultLlmContextEnhancer(agent_memory)
self.llm_context_enhancer = llm_context_enhancer
self.conversation_filters = conversation_filters
self.observability_provider = observability_provider
self.audit_logger = audit_logger
# Wire audit logger into tool registry
if self.audit_logger and self.config.audit_config.enabled:
self.tool_registry.audit_logger = self.audit_logger
self.tool_registry.audit_config = self.config.audit_config
logger.info("Initialized Agent")
async def send_message(
self,
request_context: RequestContext,
message: str,
*,
conversation_id: Optional[str] = None,
) -> AsyncGenerator[UiComponent, None]:
"""
Process a user message and yield UI components with error handling.
Args:
request_context: Request context for user resolution (includes metadata)
message: User's message content
conversation_id: Optional conversation ID; if None, creates new conversation
Yields:
UiComponent instances for UI updates
"""
try:
# Delegate to internal method
async for component in self._send_message(
request_context, message, conversation_id=conversation_id
):
yield component
except Exception as e:
# Log full stack trace
stack_trace = traceback.format_exc()
logger.error(
f"Error in send_message (conversation_id={conversation_id}): {e}\n{stack_trace}",
exc_info=True,
)
# Log to observability provider if available
if self.observability_provider:
try:
error_span = await self.observability_provider.create_span(
"agent.send_message.error",
attributes={
"error_type": type(e).__name__,
"error_message": str(e),
"conversation_id": conversation_id or "none",
},
)
await self.observability_provider.end_span(error_span)
await self.observability_provider.record_metric(
"agent.error.count",
1.0,
"count",
tags={"error_type": type(e).__name__},
)
except Exception as obs_error:
logger.error(
f"Failed to log error to observability provider: {obs_error}",
exc_info=True,
)
# Yield error component to UI (simple, user-friendly message)
error_description = "An unexpected error occurred while processing your message. Please try again."
if conversation_id:
error_description += f"\n\nConversation ID: {conversation_id}"
yield UiComponent(
rich_component=StatusCardComponent(
title="Error Processing Message",
status="error",
description=error_description,
icon="β οΈ",
),
simple_component=SimpleTextComponent(
text=f"Error: An unexpected error occurred. Please try again.{f' (Conversation ID: {conversation_id})' if conversation_id else ''}"
),
)
# Update status bar to show error state
yield UiComponent( # type: ignore
rich_component=StatusBarUpdateComponent(
status="error",
message="Error occurred",
detail="An unexpected error occurred while processing your message",
)
)
# Re-enable chat input so user can try again
yield UiComponent( # type: ignore
rich_component=ChatInputUpdateComponent(
placeholder="Try again...", disabled=False
)
)
async def _send_message(
self,
request_context: RequestContext,
message: str,
*,
conversation_id: Optional[str] = None,
) -> AsyncGenerator[UiComponent, None]:
"""
Internal method to process a user message and yield UI components.
Args:
request_context: Request context for user resolution (includes metadata)
message: User's message content
conversation_id: Optional conversation ID; if None, creates new conversation
Yields:
UiComponent instances for UI updates
"""
# Resolve user from request context with observability
user_resolution_span = None
if self.observability_provider:
user_resolution_span = await self.observability_provider.create_span(
"agent.user_resolution",
attributes={"has_context": request_context is not None},
)
user = await self.user_resolver.resolve_user(request_context)
if self.observability_provider and user_resolution_span:
user_resolution_span.set_attribute("user_id", user.id)
await self.observability_provider.end_span(user_resolution_span)
if user_resolution_span.duration_ms():
await self.observability_provider.record_metric(
"agent.user_resolution.duration",
user_resolution_span.duration_ms() or 0,
"ms",
)
# Check if this is a starter UI request (empty message or explicit metadata flag)
is_starter_request = (not message.strip()) or request_context.metadata.get(
"starter_ui_request", False
)
if is_starter_request and self.workflow_handler:
# Handle starter UI request with observability
starter_span = None
if self.observability_provider:
starter_span = await self.observability_provider.create_span(
"agent.workflow_handler.starter_ui", attributes={"user_id": user.id}
)
try:
# Load or create conversation for context
if conversation_id is None:
conversation_id = str(uuid.uuid4())
conversation = await self.conversation_store.get_conversation(
conversation_id, user
)
if not conversation:
# Create empty conversation (will be saved if workflow produces components)
conversation = Conversation(
id=conversation_id, user=user, messages=[]
)
# Get starter UI from workflow handler
components = await self.workflow_handler.get_starter_ui(
self, user, conversation
)
if self.observability_provider and starter_span:
starter_span.set_attribute("has_components", components is not None)
starter_span.set_attribute(
"component_count", len(components) if components else 0
)
if components:
# Yield the starter UI components
for component in components:
yield component
# Yield finalization components
yield UiComponent( # type: ignore
rich_component=StatusBarUpdateComponent(
status="idle",
message="Ready",
detail="Choose an option or type a message",
)
)
yield UiComponent( # type: ignore
rich_component=ChatInputUpdateComponent(
placeholder="Ask a question...", disabled=False
)
)
if self.observability_provider and starter_span:
await self.observability_provider.end_span(starter_span)
if starter_span.duration_ms():
await self.observability_provider.record_metric(
"agent.workflow_handler.starter_ui.duration",
starter_span.duration_ms() or 0,
"ms",
)
# Save the conversation if it was newly created
if self.config.auto_save_conversations:
await self.conversation_store.update_conversation(conversation)
return # Exit without calling LLM
except Exception as e:
logger.error(f"Error generating starter UI: {e}", exc_info=True)
if self.observability_provider and starter_span:
starter_span.set_attribute("error", str(e))
await self.observability_provider.end_span(starter_span)
# Fall through to normal processing on error
# Don't process actual empty messages (that aren't starter requests)
if not message.strip():
return
# Create observability span for entire message processing
message_span = None
if self.observability_provider:
message_span = await self.observability_provider.create_span(
"agent.send_message",
attributes={
"user_id": user.id,
"conversation_id": conversation_id or "new",
},
)
# Run before_message hooks with observability
modified_message = message
for hook in self.lifecycle_hooks:
hook_span = None
if self.observability_provider:
hook_span = await self.observability_provider.create_span(
"agent.hook.before_message",
attributes={"hook": hook.__class__.__name__},
)
hook_result = await hook.before_message(user, modified_message)
if hook_result is not None:
modified_message = hook_result
if self.observability_provider and hook_span:
hook_span.set_attribute("modified_message", hook_result is not None)
await self.observability_provider.end_span(hook_span)
if hook_span.duration_ms():
await self.observability_provider.record_metric(
"agent.hook.duration",
hook_span.duration_ms() or 0,
"ms",
tags={
"hook": hook.__class__.__name__,
"phase": "before_message",
},
)
# Use the potentially modified message
message = modified_message
# Generate conversation ID and request ID if not provided
if conversation_id is None:
conversation_id = str(uuid.uuid4())
request_id = str(uuid.uuid4())
# Update status to working
yield UiComponent( # type: ignore
rich_component=StatusBarUpdateComponent(
status="working",
message="Processing your request...",
detail="Analyzing query",
)
)
# Load or create conversation with observability (but don't add message yet)
conversation_span = None
if self.observability_provider:
conversation_span = await self.observability_provider.create_span(
"agent.conversation.load",
attributes={"conversation_id": conversation_id, "user_id": user.id},
)
conversation = await self.conversation_store.get_conversation(
conversation_id, user
)
is_new_conversation = conversation is None
if not conversation:
# Create empty conversation (will add message after workflow handler check)
conversation = Conversation(id=conversation_id, user=user, messages=[])
if self.observability_provider and conversation_span:
conversation_span.set_attribute("is_new", is_new_conversation)
conversation_span.set_attribute("message_count", len(conversation.messages))
await self.observability_provider.end_span(conversation_span)
if conversation_span.duration_ms():
await self.observability_provider.record_metric(
"agent.conversation.load.duration",
conversation_span.duration_ms() or 0,
"ms",
tags={"is_new": str(is_new_conversation)},
)
# Try workflow handler before adding message to conversation
if self.workflow_handler:
trigger_span = None
if self.observability_provider:
trigger_span = await self.observability_provider.create_span(
"agent.workflow_handler.try_handle",
attributes={"user_id": user.id, "conversation_id": conversation_id},
)
try:
workflow_result = await self.workflow_handler.try_handle(
self, user, conversation, message
)
if self.observability_provider and trigger_span:
trigger_span.set_attribute(
"should_skip_llm", workflow_result.should_skip_llm
)
if workflow_result.should_skip_llm:
# Workflow handled the message, short-circuit LLM
# Apply conversation mutation if provided
if workflow_result.conversation_mutation:
await workflow_result.conversation_mutation(conversation)
# Stream components
if workflow_result.components:
if isinstance(workflow_result.components, list):
for component in workflow_result.components:
yield component
else:
# AsyncGenerator
async for component in workflow_result.components:
yield component
# Finalize response (status bar + chat input)
yield UiComponent( # type: ignore
rich_component=StatusBarUpdateComponent(
status="idle",
message="Workflow complete",
detail="Ready for next message",
)
)
yield UiComponent( # type: ignore
rich_component=ChatInputUpdateComponent(
placeholder="Ask a question...", disabled=False
)
)
# Save conversation if auto-save enabled
if self.config.auto_save_conversations:
await self.conversation_store.update_conversation(conversation)
if self.observability_provider and trigger_span:
await self.observability_provider.end_span(trigger_span)
# Exit without calling LLM
return
except Exception as e:
logger.error(f"Error in workflow handler: {e}", exc_info=True)
if self.observability_provider and trigger_span:
trigger_span.set_attribute("error", str(e))
await self.observability_provider.end_span(trigger_span)
# Fall through to normal LLM processing on error
finally:
if self.observability_provider and trigger_span:
await self.observability_provider.end_span(trigger_span)
# Persist new conversation to store before adding message
if is_new_conversation:
await self.conversation_store.update_conversation(conversation)
# Not triggered, add user message to conversation now
conversation.add_message(Message(role="user", content=message))
# Add initial task
context_task = Task(
title="Load conversation context",
description="Reading message history and user context",
status="pending",
)
yield UiComponent( # type: ignore
rich_component=TaskTrackerUpdateComponent.add_task(context_task)
)
# Collect available UI features for auditing
ui_features_available = []
for feature_name in self.config.ui_features.feature_group_access.keys():
if self.config.ui_features.can_user_access_feature(feature_name, user):
ui_features_available.append(feature_name)
# Create context with observability provider and UI features
context = ToolContext(
user=user,
conversation_id=conversation_id,
request_id=request_id,
agent_memory=self.agent_memory,
observability_provider=self.observability_provider,
metadata={"ui_features_available": ui_features_available},
)
# Enrich context with additional data with observability
for enricher in self.context_enrichers:
enrichment_span = None
if self.observability_provider:
enrichment_span = await self.observability_provider.create_span(
"agent.context.enrichment",
attributes={"enricher": enricher.__class__.__name__},
)
context = await enricher.enrich_context(context)
if self.observability_provider and enrichment_span:
await self.observability_provider.end_span(enrichment_span)
if enrichment_span.duration_ms():
await self.observability_provider.record_metric(
"agent.enrichment.duration",
enrichment_span.duration_ms() or 0,
"ms",
tags={"enricher": enricher.__class__.__name__},
)
# Get available tools for user with observability
schema_span = None
if self.observability_provider:
schema_span = await self.observability_provider.create_span(
"agent.tool_schemas.fetch", attributes={"user_id": user.id}
)
tool_schemas = await self.tool_registry.get_schemas(user)
if self.observability_provider and schema_span:
schema_span.set_attribute("schema_count", len(tool_schemas))
await self.observability_provider.end_span(schema_span)
if schema_span.duration_ms():
await self.observability_provider.record_metric(
"agent.tool_schemas.duration",
schema_span.duration_ms() or 0,
"ms",
tags={"schema_count": str(len(tool_schemas))},
)
# Update task status to completed
yield UiComponent( # type: ignore
rich_component=TaskTrackerUpdateComponent.update_task(
context_task.id, status="completed"
)
)
# Build system prompt with observability
prompt_span = None
if self.observability_provider:
prompt_span = await self.observability_provider.create_span(
"agent.system_prompt.build",
attributes={"tool_count": len(tool_schemas)},
)
system_prompt = await self.system_prompt_builder.build_system_prompt(
user, tool_schemas
)
# Enhance system prompt with LLM context enhancer
if self.llm_context_enhancer and system_prompt is not None:
enhancement_span = None
if self.observability_provider:
enhancement_span = await self.observability_provider.create_span(
"agent.llm_context.enhance_system_prompt",
attributes={
"enhancer": self.llm_context_enhancer.__class__.__name__
},
)
system_prompt = await self.llm_context_enhancer.enhance_system_prompt(
system_prompt, message, user
)
if self.observability_provider and enhancement_span:
await self.observability_provider.end_span(enhancement_span)
if enhancement_span.duration_ms():
await self.observability_provider.record_metric(
"agent.llm_context.enhance_system_prompt.duration",
enhancement_span.duration_ms() or 0,
"ms",
tags={"enhancer": self.llm_context_enhancer.__class__.__name__},
)
if self.observability_provider and prompt_span:
prompt_span.set_attribute(
"prompt_length", len(system_prompt) if system_prompt else 0
)
await self.observability_provider.end_span(prompt_span)
if prompt_span.duration_ms():
await self.observability_provider.record_metric(
"agent.system_prompt.duration", prompt_span.duration_ms() or 0, "ms"
)
# Build LLM request
request = await self._build_llm_request(
conversation, tool_schemas, user, system_prompt
)
# Process with tool loop
tool_iterations = 0
while tool_iterations < self.config.max_tool_iterations:
if self.config.include_thinking_indicators and tool_iterations == 0:
# TODO: Yield thinking indicator
pass
# Get LLM response
if self.config.stream_responses:
response = await self._handle_streaming_response(request)
else:
response = await self._send_llm_request(request)
# Handle tool calls
if response.is_tool_call():
tool_iterations += 1
# First, add the assistant message with tool_calls to the conversation
# This is required for OpenAI API - tool messages must follow assistant messages with tool_calls
assistant_message = Message(
role="assistant",
content=response.content or "", # Ensure content is not None
tool_calls=response.tool_calls,
)
conversation.add_message(assistant_message)
if response.content is not None:
# Yield any partial content from the assistant before tool execution
has_tool_invocation_message_in_chat = (
self.config.ui_features.can_user_access_feature(
UiFeature.UI_FEATURE_SHOW_TOOL_INVOCATION_MESSAGE_IN_CHAT,
user,
)
)
if has_tool_invocation_message_in_chat:
yield UiComponent(
rich_component=RichTextComponent(
content=response.content, markdown=True
),
simple_component=SimpleTextComponent(text=response.content),
)
# Update status to executing tools
yield UiComponent( # type: ignore
rich_component=StatusBarUpdateComponent(
status="working",
message="Executing tools...",
detail=f"Running {len(response.tool_calls or [])} tools",
)
)
else:
# Yield as a status update instead
yield UiComponent( # type: ignore
rich_component=StatusBarUpdateComponent(
status="working", message=response.content, detail=""
)
)
# Collect all tool results first
tool_results = []
for i, tool_call in enumerate(response.tool_calls or []):
# Add task for this tool execution
tool_task = Task(
title=f"Execute {tool_call.name}",
description=f"Running tool with provided arguments",
status="in_progress",
)
has_tool_names_access = (
self.config.ui_features.can_user_access_feature(
UiFeature.UI_FEATURE_SHOW_TOOL_NAMES, user
)
)
# Audit UI feature access check
if (
self.audit_logger
and self.config.audit_config.enabled
and self.config.audit_config.log_ui_feature_checks
):
await self.audit_logger.log_ui_feature_access(
user=user,
feature_name=UiFeature.UI_FEATURE_SHOW_TOOL_NAMES,
access_granted=has_tool_names_access,
required_groups=self.config.ui_features.feature_group_access.get(
UiFeature.UI_FEATURE_SHOW_TOOL_NAMES, []
),
conversation_id=conversation.id,
request_id=request_id,
)
if has_tool_names_access:
yield UiComponent( # type: ignore
rich_component=TaskTrackerUpdateComponent.add_task(
tool_task
)
)
response_str = response.content
# Use primitive StatusCard instead of semantic ToolExecutionComponent
tool_status_card = StatusCardComponent(
title=f"Executing {tool_call.name}",
status="running",
description=f"Running tool with {len(tool_call.arguments)} arguments",
icon="βοΈ",
metadata=tool_call.arguments,
)
has_tool_args_access = (
self.config.ui_features.can_user_access_feature(
UiFeature.UI_FEATURE_SHOW_TOOL_ARGUMENTS, user
)
)
# Audit UI feature access check
if (
self.audit_logger
and self.config.audit_config.enabled
and self.config.audit_config.log_ui_feature_checks
):
await self.audit_logger.log_ui_feature_access(
user=user,
feature_name=UiFeature.UI_FEATURE_SHOW_TOOL_ARGUMENTS,
access_granted=has_tool_args_access,
required_groups=self.config.ui_features.feature_group_access.get(
UiFeature.UI_FEATURE_SHOW_TOOL_ARGUMENTS, []
),
conversation_id=conversation.id,
request_id=request_id,
)
if has_tool_args_access:
yield UiComponent(
rich_component=tool_status_card,
simple_component=SimpleTextComponent(
text=response_str or ""
),
)
# Run before_tool hooks with observability
tool = await self.tool_registry.get_tool(tool_call.name)
if tool:
for hook in self.lifecycle_hooks:
hook_span = None
if self.observability_provider:
hook_span = (
await self.observability_provider.create_span(
"agent.hook.before_tool",
attributes={
"hook": hook.__class__.__name__,
"tool": tool_call.name,
},
)
)
await hook.before_tool(tool, context)
if self.observability_provider and hook_span:
await self.observability_provider.end_span(hook_span)
if hook_span.duration_ms():
await self.observability_provider.record_metric(
"agent.hook.duration",
hook_span.duration_ms() or 0,
"ms",
tags={
"hook": hook.__class__.__name__,
"phase": "before_tool",
"tool": tool_call.name,
},
)
# Execute tool with observability
tool_exec_span = None
if self.observability_provider:
tool_exec_span = await self.observability_provider.create_span(
"agent.tool.execute",
attributes={
"tool": tool_call.name,
"arg_count": len(tool_call.arguments),
},
)
result = await self.tool_registry.execute(tool_call, context)
if self.observability_provider and tool_exec_span:
tool_exec_span.set_attribute("success", result.success)
if not result.success:
tool_exec_span.set_attribute(
"error", result.error or "unknown"
)
await self.observability_provider.end_span(tool_exec_span)
if tool_exec_span.duration_ms():
await self.observability_provider.record_metric(
"agent.tool.duration",
tool_exec_span.duration_ms() or 0,
"ms",
tags={
"tool": tool_call.name,
"success": str(result.success),
},
)
# Run after_tool hooks with observability
for hook in self.lifecycle_hooks:
hook_span = None
if self.observability_provider:
hook_span = await self.observability_provider.create_span(
"agent.hook.after_tool",
attributes={
"hook": hook.__class__.__name__,
"tool": tool_call.name,
},
)
modified_result = await hook.after_tool(result)
if modified_result is not None:
result = modified_result
if self.observability_provider and hook_span:
hook_span.set_attribute(
"modified_result", modified_result is not None
)
await self.observability_provider.end_span(hook_span)
if hook_span.duration_ms():
await self.observability_provider.record_metric(
"agent.hook.duration",
hook_span.duration_ms() or 0,
"ms",
tags={
"hook": hook.__class__.__name__,
"phase": "after_tool",
"tool": tool_call.name,
},
)
# Update status card to show completion
final_status = "success" if result.success else "error"
final_description = (
f"Tool completed successfully"
if result.success
else f"Tool failed: {result.error or 'Unknown error'}"
)
has_tool_args_access_2 = (
self.config.ui_features.can_user_access_feature(
UiFeature.UI_FEATURE_SHOW_TOOL_ARGUMENTS, user
)
)
# Audit UI feature access check
if (
self.audit_logger
and self.config.audit_config.enabled
and self.config.audit_config.log_ui_feature_checks
):
await self.audit_logger.log_ui_feature_access(
user=user,
feature_name=UiFeature.UI_FEATURE_SHOW_TOOL_ARGUMENTS,
access_granted=has_tool_args_access_2,
required_groups=self.config.ui_features.feature_group_access.get(
UiFeature.UI_FEATURE_SHOW_TOOL_ARGUMENTS, []
),
conversation_id=conversation.id,
request_id=request_id,
)
if has_tool_args_access_2:
yield UiComponent(
rich_component=tool_status_card.set_status(
final_status, final_description
),
simple_component=SimpleTextComponent(
text=final_description
),
)
has_tool_names_access_2 = (
self.config.ui_features.can_user_access_feature(
UiFeature.UI_FEATURE_SHOW_TOOL_NAMES, user
)
)
# Audit UI feature access check
if (
self.audit_logger
and self.config.audit_config.enabled
and self.config.audit_config.log_ui_feature_checks
):
await self.audit_logger.log_ui_feature_access(
user=user,
feature_name=UiFeature.UI_FEATURE_SHOW_TOOL_NAMES,
access_granted=has_tool_names_access_2,
required_groups=self.config.ui_features.feature_group_access.get(
UiFeature.UI_FEATURE_SHOW_TOOL_NAMES, []
),
conversation_id=conversation.id,
request_id=request_id,
)
if has_tool_names_access_2:
# Update tool task to completed
yield UiComponent( # type: ignore
rich_component=TaskTrackerUpdateComponent.update_task(
tool_task.id,
status="completed",
detail=f"Tool {'completed successfully' if result.success else 'return an error'}",
)
)
# Yield tool result
if result.ui_component:
# For errors, check if user has access to see error details
if not result.success:
has_tool_error_access = (
self.config.ui_features.can_user_access_feature(
UiFeature.UI_FEATURE_SHOW_TOOL_ERROR, user
)
)
# Audit UI feature access check
if (
self.audit_logger
and self.config.audit_config.enabled
and self.config.audit_config.log_ui_feature_checks
):
await self.audit_logger.log_ui_feature_access(
user=user,
feature_name=UiFeature.UI_FEATURE_SHOW_TOOL_ERROR,
access_granted=has_tool_error_access,
required_groups=self.config.ui_features.feature_group_access.get(
UiFeature.UI_FEATURE_SHOW_TOOL_ERROR, []
),
conversation_id=conversation.id,
request_id=request_id,
)
if has_tool_error_access:
yield result.ui_component
else:
# Success results are always shown if they exist
yield result.ui_component
# Collect tool result data
tool_results.append(
{
"tool_call_id": tool_call.id,
"content": (
result.result_for_llm
if result.success
else result.error or "Tool execution failed"
),
}
)
# Add tool responses to conversation
# For APIs that need all tool results in one message, this helps
for tool_result in tool_results:
tool_response_message = Message(
role="tool",
content=tool_result["content"],
tool_call_id=tool_result["tool_call_id"],
)
conversation.add_message(tool_response_message)
# Rebuild request with tool responses
request = await self._build_llm_request(
conversation, tool_schemas, user, system_prompt
)
else:
# Update status to idle and set completion message
yield UiComponent( # type: ignore
rich_component=StatusBarUpdateComponent(
status="idle",
message="Response complete",
detail="Ready for next message",
)
)
# Update chat input placeholder
yield UiComponent( # type: ignore
rich_component=ChatInputUpdateComponent(
placeholder="Ask a follow-up question...", disabled=False
)
)
# Yield final text response
if response.content:
# Add assistant response to conversation
conversation.add_message(
Message(role="assistant", content=response.content)
)
yield UiComponent(
rich_component=RichTextComponent(
content=response.content, markdown=True
),
simple_component=SimpleTextComponent(text=response.content),
)
break
# Check if we hit the tool iteration limit
if tool_iterations >= self.config.max_tool_iterations:
# The loop exited due to hitting the limit, not due to a natural completion
logger.warning(
f"Tool iteration limit reached: {tool_iterations}/{self.config.max_tool_iterations}"
)
# Update status bar to show warning
yield UiComponent( # type: ignore
rich_component=StatusBarUpdateComponent(
status="warning",
message="Tool limit reached",
detail=f"Stopped after {tool_iterations} tool executions. The task may be incomplete.",
)
)
# Provide detailed warning message to user
warning_message = f"""β οΈ **Tool Execution Limit Reached**
The agent stopped after executing {tool_iterations} tools (the configured maximum). The task may not be fully complete.
You can:
- Ask me to continue where I left off
- Adjust the `max_tool_iterations` setting if you need more tool calls
- Break the task into smaller steps"""
yield UiComponent(
rich_component=RichTextComponent(
content=warning_message, markdown=True
),
simple_component=SimpleTextComponent(
text=f"Tool limit reached after {tool_iterations} executions. Task may be incomplete."
),
)
# Update chat input to suggest follow-up
yield UiComponent( # type: ignore
rich_component=ChatInputUpdateComponent(
placeholder="Continue the task or ask me something else...",
disabled=False,
)
)
# Save conversation if configured
if self.config.auto_save_conversations:
save_span = None
if self.observability_provider:
save_span = await self.observability_provider.create_span(
"agent.conversation.save",
attributes={
"conversation_id": conversation_id,
"message_count": len(conversation.messages),
},
)
await self.conversation_store.update_conversation(conversation)
if self.observability_provider and save_span:
await self.observability_provider.end_span(save_span)
if save_span.duration_ms():
await self.observability_provider.record_metric(
"agent.conversation.save.duration",
save_span.duration_ms() or 0,
"ms",
)
# Run after_message hooks with observability
for hook in self.lifecycle_hooks:
hook_span = None
if self.observability_provider:
hook_span = await self.observability_provider.create_span(
"agent.hook.after_message",
attributes={"hook": hook.__class__.__name__},
)
await hook.after_message(conversation)
if self.observability_provider and hook_span:
await self.observability_provider.end_span(hook_span)
if hook_span.duration_ms():
await self.observability_provider.record_metric(
"agent.hook.duration",
hook_span.duration_ms() or 0,
"ms",
tags={
"hook": hook.__class__.__name__,
"phase": "after_message",
},
)
# End observability span and record metrics
if self.observability_provider and message_span:
message_span.set_attribute("tool_iterations", tool_iterations)
# Track if we hit the tool iteration limit
hit_tool_limit = tool_iterations >= self.config.max_tool_iterations
message_span.set_attribute("hit_tool_limit", hit_tool_limit)
if hit_tool_limit:
message_span.set_attribute("incomplete_response", True)
logger.info(
f"Tool limit reached - marking response as potentially incomplete"
)
await self.observability_provider.end_span(message_span)
if message_span.duration_ms():
await self.observability_provider.record_metric(
"agent.message.duration",
message_span.duration_ms() or 0,
"ms",
tags={"user_id": user.id, "hit_tool_limit": str(hit_tool_limit)},
)
async def get_available_tools(self, user: User) -> List[ToolSchema]:
"""Get tools available to the user."""
return await self.tool_registry.get_schemas(user)
async def _build_llm_request(
self,
conversation: Conversation,
tool_schemas: List[ToolSchema],
user: User,
system_prompt: Optional[str] = None,
) -> LlmRequest:
"""Build LLM request from conversation and tools."""
# Apply conversation filters with observability
filtered_messages = conversation.messages
for filter in self.conversation_filters:
filter_span = None
if self.observability_provider:
filter_span = await self.observability_provider.create_span(
"agent.conversation.filter",
attributes={
"filter": filter.__class__.__name__,
"message_count_before": len(filtered_messages),
},
)
filtered_messages = await filter.filter_messages(filtered_messages)
if self.observability_provider and filter_span:
filter_span.set_attribute("message_count_after", len(filtered_messages))
await self.observability_provider.end_span(filter_span)
if filter_span.duration_ms():
await self.observability_provider.record_metric(
"agent.filter.duration",
filter_span.duration_ms() or 0,
"ms",
tags={"filter": filter.__class__.__name__},
)
messages = []
for msg in filtered_messages:
llm_msg = LlmMessage(
role=msg.role,
content=msg.content,
tool_calls=msg.tool_calls,
tool_call_id=msg.tool_call_id,
)
messages.append(llm_msg)
# Enhance messages with LLM context enhancer
if self.llm_context_enhancer:
enhancement_span = None
if self.observability_provider:
enhancement_span = await self.observability_provider.create_span(
"agent.llm_context.enhance_user_messages",
attributes={
"enhancer": self.llm_context_enhancer.__class__.__name__,
"message_count": len(messages),
},
)
messages = await self.llm_context_enhancer.enhance_user_messages(
messages, user
)
if self.observability_provider and enhancement_span:
enhancement_span.set_attribute("message_count_after", len(messages))
await self.observability_provider.end_span(enhancement_span)
if enhancement_span.duration_ms():
await self.observability_provider.record_metric(
"agent.llm_context.enhance_user_messages.duration",
enhancement_span.duration_ms() or 0,
"ms",
tags={"enhancer": self.llm_context_enhancer.__class__.__name__},
)
return LlmRequest(
messages=messages,
tools=tool_schemas if tool_schemas else None,
user=user,
temperature=self.config.temperature,
max_tokens=self.config.max_tokens,
stream=self.config.stream_responses,
system_prompt=system_prompt,
)
async def _send_llm_request(self, request: LlmRequest) -> LlmResponse:
"""Send LLM request with middleware and observability."""
# Apply before_llm_request middlewares with observability
for middleware in self.llm_middlewares:
mw_span = None
if self.observability_provider:
mw_span = await self.observability_provider.create_span(
"agent.middleware.before_llm",
attributes={"middleware": middleware.__class__.__name__},
)
request = await middleware.before_llm_request(request)
if self.observability_provider and mw_span:
await self.observability_provider.end_span(mw_span)
if mw_span.duration_ms():
await self.observability_provider.record_metric(
"agent.middleware.duration",
mw_span.duration_ms() or 0,
"ms",
tags={
"middleware": middleware.__class__.__name__,
"phase": "before_llm",
},
)
# Create observability span for LLM call
llm_span = None
if self.observability_provider:
llm_span = await self.observability_provider.create_span(
"llm.request",
attributes={
"model": getattr(self.llm_service, "model", "unknown"),
"stream": request.stream,
},
)
# Send request
response = await self.llm_service.send_request(request)
# End span and record metrics
if self.observability_provider and llm_span:
await self.observability_provider.end_span(llm_span)
if llm_span.duration_ms():
await self.observability_provider.record_metric(
"llm.request.duration", llm_span.duration_ms() or 0, "ms"
)
# Apply after_llm_response middlewares with observability
for middleware in self.llm_middlewares:
mw_span = None
if self.observability_provider:
mw_span = await self.observability_provider.create_span(
"agent.middleware.after_llm",
attributes={"middleware": middleware.__class__.__name__},
)
response = await middleware.after_llm_response(request, response)
if self.observability_provider and mw_span:
await self.observability_provider.end_span(mw_span)
if mw_span.duration_ms():
await self.observability_provider.record_metric(
"agent.middleware.duration",
mw_span.duration_ms() or 0,
"ms",
tags={
"middleware": middleware.__class__.__name__,
"phase": "after_llm",
},
)
return response
async def _handle_streaming_response(self, request: LlmRequest) -> LlmResponse:
"""Handle streaming response from LLM."""
# Apply before_llm_request middlewares with observability
for middleware in self.llm_middlewares:
mw_span = None
if self.observability_provider:
mw_span = await self.observability_provider.create_span(
"agent.middleware.before_llm",
attributes={
"middleware": middleware.__class__.__name__,
"stream": True,
},
)
request = await middleware.before_llm_request(request)
if self.observability_provider and mw_span:
await self.observability_provider.end_span(mw_span)
if mw_span.duration_ms():
await self.observability_provider.record_metric(
"agent.middleware.duration",
mw_span.duration_ms() or 0,
"ms",
tags={
"middleware": middleware.__class__.__name__,
"phase": "before_llm",
"stream": "true",
},
)
accumulated_content = ""
accumulated_tool_calls = []
# Create span for streaming
stream_span = None
if self.observability_provider:
stream_span = await self.observability_provider.create_span(
"llm.stream",
attributes={"model": getattr(self.llm_service, "model", "unknown")},
)
async for chunk in self.llm_service.stream_request(request):
if chunk.content:
accumulated_content += chunk.content
# Could yield intermediate TextChunk here
if chunk.tool_calls:
accumulated_tool_calls.extend(chunk.tool_calls)
# End streaming span
if self.observability_provider and stream_span:
stream_span.set_attribute("content_length", len(accumulated_content))
stream_span.set_attribute("tool_call_count", len(accumulated_tool_calls))
await self.observability_provider.end_span(stream_span)
if stream_span.duration_ms():
await self.observability_provider.record_metric(
"llm.stream.duration", stream_span.duration_ms() or 0, "ms"
)
response = LlmResponse(
content=accumulated_content if accumulated_content else None,
tool_calls=accumulated_tool_calls if accumulated_tool_calls else None,
)
# Apply after_llm_response middlewares with observability
for middleware in self.llm_middlewares:
mw_span = None
if self.observability_provider:
mw_span = await self.observability_provider.create_span(
"agent.middleware.after_llm",
attributes={
"middleware": middleware.__class__.__name__,
"stream": True,
},
)
response = await middleware.after_llm_response(request, response)
if self.observability_provider and mw_span:
await self.observability_provider.end_span(mw_span)
if mw_span.duration_ms():
await self.observability_provider.record_metric(
"agent.middleware.duration",
mw_span.duration_ms() or 0,
"ms",
tags={
"middleware": middleware.__class__.__name__,
"phase": "after_llm",
"stream": "true",
},
)
return response
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/agent/agent.py",
"license": "MIT License",
"lines": 1227,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
vanna-ai/vanna:src/vanna/core/agent/config.py | """
Agent configuration.
This module contains configuration models that control agent behavior.
"""
from typing import TYPE_CHECKING, Dict, List, Optional
from pydantic import BaseModel, Field
from .._compat import StrEnum
if TYPE_CHECKING:
from ..user import User
class UiFeature(StrEnum):
UI_FEATURE_SHOW_TOOL_NAMES = "tool_names"
UI_FEATURE_SHOW_TOOL_ARGUMENTS = "tool_arguments"
UI_FEATURE_SHOW_TOOL_ERROR = "tool_error"
UI_FEATURE_SHOW_TOOL_INVOCATION_MESSAGE_IN_CHAT = "tool_invocation_message_in_chat"
UI_FEATURE_SHOW_MEMORY_DETAILED_RESULTS = "memory_detailed_results"
# Optional: you can also define defaults if you want a shared baseline
DEFAULT_UI_FEATURES: Dict[str, List[str]] = {
UiFeature.UI_FEATURE_SHOW_TOOL_NAMES: ["admin", "user"],
UiFeature.UI_FEATURE_SHOW_TOOL_ARGUMENTS: ["admin"],
UiFeature.UI_FEATURE_SHOW_TOOL_ERROR: ["admin"],
UiFeature.UI_FEATURE_SHOW_TOOL_INVOCATION_MESSAGE_IN_CHAT: ["admin"],
UiFeature.UI_FEATURE_SHOW_MEMORY_DETAILED_RESULTS: ["admin"],
}
class UiFeatures(BaseModel):
"""UI features with group-based access control using the same pattern as tools.
Each field specifies which groups can access that UI feature.
Empty list means the feature is accessible to all users.
Uses the same intersection logic as tool access control.
"""
# Custom features for extensibility
feature_group_access: Dict[str, List[str]] = Field(
default_factory=lambda: DEFAULT_UI_FEATURES.copy(),
description="Which groups can access UI features",
)
def can_user_access_feature(self, feature_name: str, user: "User") -> bool:
"""Check if user can access a UI feature using same logic as tools.
Args:
feature_name: Name of the UI feature to check
user: User object with group_memberships
Returns:
True if user has access, False otherwise
"""
# Then try custom features
if feature_name in self.feature_group_access:
allowed_groups = self.feature_group_access[feature_name]
else:
# Feature doesn't exist, deny access
return False
# Empty list means all users can access (same as tools)
if not allowed_groups:
return True
# Same intersection logic as tool access control
user_groups = set(user.group_memberships)
feature_groups = set(allowed_groups)
return bool(user_groups & feature_groups)
def register_feature(self, name: str, access_groups: List[str]) -> None:
"""Register a custom UI feature with group access control.
Args:
name: Name of the custom feature
access_groups: List of groups that can access this feature
"""
self.feature_group_access[name] = access_groups
class AuditConfig(BaseModel):
"""Configuration for audit logging."""
enabled: bool = Field(default=True, description="Enable audit logging")
log_tool_access_checks: bool = Field(
default=True, description="Log tool access permission checks"
)
log_tool_invocations: bool = Field(
default=True, description="Log tool invocations with parameters"
)
log_tool_results: bool = Field(
default=True, description="Log tool execution results"
)
log_ui_feature_checks: bool = Field(
default=False, description="Log UI feature access checks (can be noisy)"
)
log_ai_responses: bool = Field(
default=True, description="Log AI-generated responses"
)
include_full_ai_responses: bool = Field(
default=False,
description="Include full AI response text in logs (privacy concern)",
)
sanitize_tool_parameters: bool = Field(
default=True, description="Sanitize sensitive parameters (passwords, tokens)"
)
class AgentConfig(BaseModel):
"""Configuration for agent behavior."""
max_tool_iterations: int = Field(default=10, gt=0)
stream_responses: bool = Field(default=True)
auto_save_conversations: bool = Field(default=True)
include_thinking_indicators: bool = Field(default=True)
temperature: float = Field(default=0.7, ge=0.0, le=2.0)
max_tokens: Optional[int] = Field(default=None, gt=0)
ui_features: UiFeatures = Field(default_factory=UiFeatures)
audit_config: AuditConfig = Field(default_factory=AuditConfig)
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/agent/config.py",
"license": "MIT License",
"lines": 97,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
vanna-ai/vanna:src/vanna/core/audit/base.py | """
Base audit logger interface.
Audit loggers enable tracking user actions, tool invocations, and access control
decisions for security, compliance, and debugging.
"""
import hashlib
from abc import ABC, abstractmethod
from datetime import datetime
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from .models import (
AiResponseEvent,
AuditEvent,
ToolAccessCheckEvent,
ToolInvocationEvent,
ToolResultEvent,
UiFeatureAccessCheckEvent,
)
if TYPE_CHECKING:
from ..tool.models import ToolCall, ToolContext, ToolResult
from ..user.models import User
class AuditLogger(ABC):
"""Abstract base class for audit logging implementations.
Implementations can:
- Write to files (JSON, CSV, etc.)
- Send to databases (Postgres, MongoDB, etc.)
- Stream to cloud services (CloudWatch, Datadog, etc.)
- Send to SIEM systems (Splunk, Elastic, etc.)
Example:
class PostgresAuditLogger(AuditLogger):
async def log_event(self, event: AuditEvent) -> None:
await self.db.execute(
"INSERT INTO audit_log (...) VALUES (...)",
event.model_dump()
)
agent = Agent(
llm_service=...,
audit_logger=PostgresAuditLogger(db_pool)
)
"""
@abstractmethod
async def log_event(self, event: AuditEvent) -> None:
"""Log a single audit event.
Args:
event: The audit event to log
Raises:
Exception: If logging fails critically
"""
pass
async def log_tool_access_check(
self,
user: "User",
tool_name: str,
access_granted: bool,
required_groups: List[str],
context: "ToolContext",
reason: Optional[str] = None,
) -> None:
"""Convenience method for logging tool access checks.
Args:
user: User attempting to access the tool
tool_name: Name of the tool being accessed
access_granted: Whether access was granted
required_groups: Groups required to access the tool
context: Tool execution context
reason: Optional reason for denial
"""
event = ToolAccessCheckEvent(
user_id=user.id,
username=user.username,
user_email=user.email,
user_groups=user.group_memberships,
conversation_id=context.conversation_id,
request_id=context.request_id,
tool_name=tool_name,
access_granted=access_granted,
required_groups=required_groups,
reason=reason,
)
await self.log_event(event)
async def log_tool_invocation(
self,
user: "User",
tool_call: "ToolCall",
ui_features: List[str],
context: "ToolContext",
sanitize_parameters: bool = True,
) -> None:
"""Convenience method for logging tool invocations.
Args:
user: User invoking the tool
tool_call: Tool call information
ui_features: List of UI features available to the user
context: Tool execution context
sanitize_parameters: Whether to sanitize sensitive parameters
"""
parameters = tool_call.arguments.copy()
sanitized = False
if sanitize_parameters:
parameters, sanitized = self._sanitize_parameters(parameters)
event = ToolInvocationEvent(
user_id=user.id,
username=user.username,
user_email=user.email,
user_groups=user.group_memberships,
conversation_id=context.conversation_id,
request_id=context.request_id,
tool_call_id=tool_call.id,
tool_name=tool_call.name,
parameters=parameters,
parameters_sanitized=sanitized,
ui_features_available=ui_features,
)
await self.log_event(event)
async def log_tool_result(
self,
user: "User",
tool_call: "ToolCall",
result: "ToolResult",
context: "ToolContext",
) -> None:
"""Convenience method for logging tool results.
Args:
user: User who invoked the tool
tool_call: Tool call information
result: Tool execution result
context: Tool execution context
"""
event = ToolResultEvent(
user_id=user.id,
username=user.username,
user_email=user.email,
user_groups=user.group_memberships,
conversation_id=context.conversation_id,
request_id=context.request_id,
tool_call_id=tool_call.id,
tool_name=tool_call.name,
success=result.success,
error=result.error,
execution_time_ms=result.metadata.get("execution_time_ms", 0.0),
result_size_bytes=(
len(result.result_for_llm.encode("utf-8"))
if result.result_for_llm
else 0
),
ui_component_type=(
result.ui_component.__class__.__name__ if result.ui_component else None
),
)
await self.log_event(event)
async def log_ui_feature_access(
self,
user: "User",
feature_name: str,
access_granted: bool,
required_groups: List[str],
conversation_id: str,
request_id: str,
) -> None:
"""Convenience method for logging UI feature access checks.
Args:
user: User attempting to access the feature
feature_name: Name of the UI feature
access_granted: Whether access was granted
required_groups: Groups required to access the feature
conversation_id: Conversation identifier
request_id: Request identifier
"""
event = UiFeatureAccessCheckEvent(
user_id=user.id,
username=user.username,
user_email=user.email,
user_groups=user.group_memberships,
conversation_id=conversation_id,
request_id=request_id,
feature_name=feature_name,
access_granted=access_granted,
required_groups=required_groups,
)
await self.log_event(event)
async def log_ai_response(
self,
user: "User",
conversation_id: str,
request_id: str,
response_text: str,
tool_calls: List["ToolCall"],
model_info: Optional[Dict[str, Any]] = None,
include_full_text: bool = False,
) -> None:
"""Convenience method for logging AI responses.
Args:
user: User receiving the response
conversation_id: Conversation identifier
request_id: Request identifier
response_text: The AI-generated response text
tool_calls: List of tool calls in the response
model_info: Optional model configuration info
include_full_text: Whether to include full response text
"""
response_hash = hashlib.sha256(response_text.encode("utf-8")).hexdigest()
event = AiResponseEvent(
user_id=user.id,
username=user.username,
user_email=user.email,
user_groups=user.group_memberships,
conversation_id=conversation_id,
request_id=request_id,
response_length_chars=len(response_text),
response_text=response_text if include_full_text else None,
response_hash=response_hash,
model_name=model_info.get("model") if model_info else None,
temperature=model_info.get("temperature") if model_info else None,
tool_calls_count=len(tool_calls),
tool_names=[tc.name for tc in tool_calls],
)
await self.log_event(event)
async def query_events(
self,
filters: Optional[Dict[str, Any]] = None,
start_time: Optional[datetime] = None,
end_time: Optional[datetime] = None,
limit: int = 100,
) -> List[AuditEvent]:
"""Query audit events (optional, for implementations that support it).
Args:
filters: Filter criteria (user_id, event_type, etc.)
start_time: Filter events after this time
end_time: Filter events before this time
limit: Maximum number of events to return
Returns:
List of matching audit events
Raises:
NotImplementedError: If query not supported by implementation
"""
raise NotImplementedError("Query not supported by this implementation")
def _sanitize_parameters(
self, parameters: Dict[str, Any]
) -> tuple[Dict[str, Any], bool]:
"""Sanitize sensitive data from parameters.
Args:
parameters: Raw parameters dict
Returns:
Tuple of (sanitized_parameters, was_sanitized)
"""
sanitized = parameters.copy()
was_sanitized = False
# Common sensitive field patterns
sensitive_patterns = [
"password",
"secret",
"token",
"api_key",
"apikey",
"credential",
"auth",
"private_key",
"access_key",
]
for key in list(sanitized.keys()):
key_lower = key.lower()
if any(pattern in key_lower for pattern in sensitive_patterns):
sanitized[key] = "[REDACTED]"
was_sanitized = True
return sanitized, was_sanitized
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/audit/base.py",
"license": "MIT License",
"lines": 264,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
vanna-ai/vanna:src/vanna/core/audit/models.py | """
Audit event models.
This module contains data models for audit logging events.
"""
import uuid
from datetime import datetime
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field
from .._compat import StrEnum
class AuditEventType(StrEnum):
"""Types of audit events."""
# Access control events
TOOL_ACCESS_CHECK = "tool_access_check"
UI_FEATURE_ACCESS_CHECK = "ui_feature_access_check"
# Tool execution events
TOOL_INVOCATION = "tool_invocation"
TOOL_RESULT = "tool_result"
# Conversation events
MESSAGE_RECEIVED = "message_received"
AI_RESPONSE_GENERATED = "ai_response_generated"
CONVERSATION_CREATED = "conversation_created"
# Security events
ACCESS_DENIED = "access_denied"
AUTHENTICATION_ATTEMPT = "authentication_attempt"
class AuditEvent(BaseModel):
"""Base audit event with common fields."""
event_id: str = Field(default_factory=lambda: str(uuid.uuid4()))
event_type: AuditEventType
timestamp: datetime = Field(default_factory=datetime.utcnow)
# User context
user_id: str
username: Optional[str] = None
user_email: Optional[str] = None
user_groups: List[str] = Field(default_factory=list)
# Request context
conversation_id: str
request_id: str
remote_addr: Optional[str] = None
# Event-specific data
details: Dict[str, Any] = Field(default_factory=dict)
# Privacy/redaction markers
contains_pii: bool = False
redacted_fields: List[str] = Field(default_factory=list)
class ToolAccessCheckEvent(AuditEvent):
"""Audit event for tool access permission checks."""
event_type: AuditEventType = AuditEventType.TOOL_ACCESS_CHECK
tool_name: str
access_granted: bool
required_groups: List[str] = Field(default_factory=list)
reason: Optional[str] = None
class ToolInvocationEvent(AuditEvent):
"""Audit event for actual tool executions."""
event_type: AuditEventType = AuditEventType.TOOL_INVOCATION
tool_call_id: str
tool_name: str
# Parameters with sanitization support
parameters: Dict[str, Any] = Field(default_factory=dict)
parameters_sanitized: bool = False
# UI context at invocation time
ui_features_available: List[str] = Field(default_factory=list)
class ToolResultEvent(AuditEvent):
"""Audit event for tool execution results."""
event_type: AuditEventType = AuditEventType.TOOL_RESULT
tool_call_id: str
tool_name: str
success: bool
error: Optional[str] = None
execution_time_ms: float = 0.0
# Result metadata (without full content for size)
result_size_bytes: Optional[int] = None
ui_component_type: Optional[str] = None
class UiFeatureAccessCheckEvent(AuditEvent):
"""Audit event for UI feature access checks."""
event_type: AuditEventType = AuditEventType.UI_FEATURE_ACCESS_CHECK
feature_name: str
access_granted: bool
required_groups: List[str] = Field(default_factory=list)
class AiResponseEvent(AuditEvent):
"""Audit event for AI-generated responses."""
event_type: AuditEventType = AuditEventType.AI_RESPONSE_GENERATED
# Response metadata
response_length_chars: int
response_length_tokens: Optional[int] = None
# Full text (optional, configurable)
response_text: Optional[str] = None
response_hash: str # SHA256 for integrity verification
# Model info
model_name: Optional[str] = None
temperature: Optional[float] = None
# Tool calls in response
tool_calls_count: int = 0
tool_names: List[str] = Field(default_factory=list)
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/audit/models.py",
"license": "MIT License",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
vanna-ai/vanna:src/vanna/core/component_manager.py | """
Component state management and update protocol for rich components.
"""
import uuid
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Set, Union
from pydantic import BaseModel, Field
from ..components.rich import ComponentLifecycle, RichComponent
class UpdateOperation(str, Enum):
"""Types of component update operations."""
CREATE = "create"
UPDATE = "update"
REPLACE = "replace"
REMOVE = "remove"
REORDER = "reorder"
BULK_UPDATE = "bulk_update"
class Position(BaseModel):
"""Position specification for component placement."""
index: Optional[int] = None
anchor_id: Optional[str] = None
relation: str = "after" # "before", "after", "inside", "replace"
class ComponentUpdate(BaseModel):
"""Represents a change to the component tree."""
operation: UpdateOperation
target_id: str # Component being affected
component: Optional[RichComponent] = None # New/updated component data
updates: Optional[Dict[str, Any]] = None # Partial updates for UPDATE operation
position: Optional[Position] = None # For positioning operations
timestamp: str = Field(default_factory=lambda: datetime.utcnow().isoformat())
batch_id: Optional[str] = None # For grouping related updates
def serialize_for_frontend(self) -> Dict[str, Any]:
"""Return update payload with nested components normalized."""
payload = self.model_dump()
# Normalise enum values for the frontend contract.
payload["operation"] = self.operation.value
if self.component:
payload["component"] = self.component.serialize_for_frontend()
return payload
class ComponentNode(BaseModel):
"""Node in the component tree."""
component: RichComponent
children: List["ComponentNode"] = Field(default_factory=list)
parent_id: Optional[str] = None
def find_child(self, component_id: str) -> Optional["ComponentNode"]:
"""Find a child node by component ID."""
for child in self.children:
if child.component.id == component_id:
return child
found = child.find_child(component_id)
if found:
return found
return None
def remove_child(self, component_id: str) -> bool:
"""Remove a child component by ID."""
for i, child in enumerate(self.children):
if child.component.id == component_id:
self.children.pop(i)
return True
if child.remove_child(component_id):
return True
return False
def get_all_ids(self) -> Set[str]:
"""Get all component IDs in this subtree."""
ids = {self.component.id}
for child in self.children:
ids.update(child.get_all_ids())
return ids
class ComponentTree(BaseModel):
"""Hierarchical structure for managing component layout."""
root: Optional[ComponentNode] = None
flat_index: Dict[str, ComponentNode] = Field(default_factory=dict)
def add_component(
self, component: RichComponent, position: Optional[Position] = None
) -> ComponentUpdate:
"""Add a component to the tree."""
node = ComponentNode(component=component)
self.flat_index[component.id] = node
if self.root is None:
self.root = node
else:
parent_node = self._find_parent(position)
if parent_node is not None:
node.parent_id = parent_node.component.id
parent_node.children.append(node)
return ComponentUpdate(
operation=UpdateOperation.CREATE,
target_id=component.id,
component=component,
position=position,
)
def update_component(
self, component_id: str, updates: Dict[str, Any]
) -> Optional[ComponentUpdate]:
"""Update a component's properties."""
node = self.flat_index.get(component_id)
if not node:
return None
# Create updated component
component_data = node.component.model_dump()
component_data.update(updates)
component_data["lifecycle"] = ComponentLifecycle.UPDATE
component_data["timestamp"] = datetime.utcnow().isoformat()
updated_component = node.component.__class__(**component_data)
node.component = updated_component
return ComponentUpdate(
operation=UpdateOperation.UPDATE,
target_id=component_id,
component=updated_component,
updates=updates,
)
def replace_component(
self, old_id: str, new_component: RichComponent
) -> Optional[ComponentUpdate]:
"""Replace one component with another."""
old_node = self.flat_index.get(old_id)
if not old_node:
return None
# Update the component in place
old_node.component = new_component
# Update index
del self.flat_index[old_id]
self.flat_index[new_component.id] = old_node
return ComponentUpdate(
operation=UpdateOperation.REPLACE, target_id=old_id, component=new_component
)
def remove_component(self, component_id: str) -> Optional[ComponentUpdate]:
"""Remove a component and its children."""
node = self.flat_index.get(component_id)
if not node:
return None
# Remove from parent
if self.root and self.root.component.id == component_id:
self.root = None
else:
if self.root:
self.root.remove_child(component_id)
# Remove from flat index (including all children)
removed_ids = node.get_all_ids()
for removed_id in removed_ids:
self.flat_index.pop(removed_id, None)
return ComponentUpdate(operation=UpdateOperation.REMOVE, target_id=component_id)
def get_component(self, component_id: str) -> Optional[RichComponent]:
"""Get a component by ID."""
node = self.flat_index.get(component_id)
return node.component if node else None
def _find_parent(self, position: Optional[Position]) -> Optional[ComponentNode]:
"""Find the parent node for a new component."""
if not position or not position.anchor_id:
return self.root
anchor_node = self.flat_index.get(position.anchor_id)
if not anchor_node:
return self.root
if position.relation == "inside":
return anchor_node
elif position.relation in ["before", "after", "replace"]:
# Find the parent of the anchor
if anchor_node.parent_id:
parent_node = self.flat_index.get(anchor_node.parent_id)
return parent_node if parent_node else self.root
else:
return self.root
else:
return self.root
class ComponentManager:
"""Manages component lifecycle and state updates."""
def __init__(self) -> None:
self.components: Dict[str, RichComponent] = {}
self.component_tree = ComponentTree()
self.update_history: List[ComponentUpdate] = []
self.active_batch: Optional[str] = None
def emit(self, component: RichComponent) -> Optional[ComponentUpdate]:
"""Emit a component with smart lifecycle management."""
if component.id in self.components:
# Existing component - determine if this is an update or replace
existing = self.components[component.id]
if component.lifecycle == ComponentLifecycle.UPDATE:
# Extract changes
old_data = existing.model_dump()
new_data = component.model_dump()
updates = {k: v for k, v in new_data.items() if old_data.get(k) != v}
update = self.component_tree.update_component(component.id, updates)
else:
# Replace
update = self.component_tree.replace_component(component.id, component)
else:
# New component - always append
update = self.component_tree.add_component(component, None)
if update:
self.components[component.id] = component
self.update_history.append(update)
if self.active_batch:
update.batch_id = self.active_batch
return update
def update_component(
self, component_id: str, **updates: Any
) -> Optional[ComponentUpdate]:
"""Update specific fields of an existing component."""
update = self.component_tree.update_component(component_id, updates)
if update and update.component:
self.components[component_id] = update.component
self.update_history.append(update)
if self.active_batch:
update.batch_id = self.active_batch
return update
def replace_component(
self, old_id: str, new_component: RichComponent
) -> Optional[ComponentUpdate]:
"""Replace one component with another."""
update = self.component_tree.replace_component(old_id, new_component)
if update:
self.components.pop(old_id, None)
self.components[new_component.id] = new_component
self.update_history.append(update)
if self.active_batch:
update.batch_id = self.active_batch
return update
def remove_component(self, component_id: str) -> Optional[ComponentUpdate]:
"""Remove a component and handle cleanup."""
update = self.component_tree.remove_component(component_id)
if update:
self.components.pop(component_id, None)
self.update_history.append(update)
if self.active_batch:
update.batch_id = self.active_batch
return update
def get_component(self, component_id: str) -> Optional[RichComponent]:
"""Get a component by ID."""
return self.components.get(component_id)
def get_all_components(self) -> List[RichComponent]:
"""Get all components in the manager."""
return list(self.components.values())
def start_batch(self) -> str:
"""Start a batch of related updates."""
self.active_batch = str(uuid.uuid4())
return self.active_batch
def end_batch(self) -> Optional[str]:
"""End the current batch."""
batch_id = self.active_batch
self.active_batch = None
return batch_id
def get_updates_since(
self, timestamp: Optional[str] = None
) -> List[ComponentUpdate]:
"""Get all updates since a given timestamp."""
if not timestamp:
return self.update_history.copy()
try:
cutoff = datetime.fromisoformat(timestamp.replace("Z", "+00:00"))
return [
update
for update in self.update_history
if datetime.fromisoformat(update.timestamp.replace("Z", "+00:00"))
> cutoff
]
except ValueError:
return self.update_history.copy()
def clear_history(self) -> None:
"""Clear the update history."""
self.update_history.clear()
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/component_manager.py",
"license": "MIT License",
"lines": 260,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
vanna-ai/vanna:src/vanna/core/components.py | """
UI component base class.
This module defines the UiComponent class which is the return type for tool executions.
It's placed in core/ because it's a fundamental type that tools return, not just a UI concern.
"""
from datetime import datetime
from typing import Any, Optional
from pydantic import BaseModel, Field, model_validator
class UiComponent(BaseModel):
"""Base class for UI components streamed to client.
This wraps both rich and simple component representations,
allowing tools to return structured UI updates.
Note: We use Any for component types to avoid circular dependencies.
Type validation happens at runtime through validators.
"""
timestamp: str = Field(default_factory=lambda: datetime.utcnow().isoformat())
rich_component: Any = Field(
..., description="Rich component for advanced rendering"
)
simple_component: Optional[Any] = Field(
None, description="Simple component for basic rendering"
)
@model_validator(mode="after")
def validate_components(self) -> "UiComponent":
"""Validate that components are the correct types at runtime."""
# Import from core - clean imports, no circular dependency
from .rich_component import RichComponent
from .simple_component import SimpleComponent
if not isinstance(self.rich_component, RichComponent):
raise ValueError(
f"rich_component must be a RichComponent, got {type(self.rich_component)}"
)
if self.simple_component is not None and not isinstance(
self.simple_component, SimpleComponent
):
raise ValueError(
f"simple_component must be a SimpleComponent or None, got {type(self.simple_component)}"
)
return self
model_config = {"arbitrary_types_allowed": True}
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/components.py",
"license": "MIT License",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
vanna-ai/vanna:src/vanna/core/enhancer/base.py | """
LLM context enhancer interface.
LLM context enhancers allow you to add additional context to the system prompt
and user messages before LLM calls.
"""
from abc import ABC
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..user.models import User
from ..llm.models import LlmMessage
class LlmContextEnhancer(ABC):
"""Enhancer for adding context to LLM prompts and messages.
Subclass this to create custom enhancers that can:
- Add relevant context to the system prompt based on the user's initial message
- Enrich user messages with additional context (e.g., from memory/RAG)
- Inject relevant examples or documentation
- Add temporal or environmental context
Example:
class MemoryBasedEnhancer(LlmContextEnhancer):
def __init__(self, agent_memory):
self.agent_memory = agent_memory
async def enhance_system_prompt(
self,
system_prompt: str,
user_message: str,
user: User
) -> str:
# Add relevant examples from memory based on user message
examples = await self.agent_memory.search_similar(user_message)
return system_prompt + "\\n\\nRelevant examples:\\n" + examples
async def enhance_user_messages(
self,
messages: list[LlmMessage],
user: User
) -> list[LlmMessage]:
# Could modify or add to messages
return messages
agent = Agent(
llm_service=...,
llm_context_enhancer=MemoryBasedEnhancer(agent_memory)
)
"""
async def enhance_system_prompt(
self, system_prompt: str, user_message: str, user: "User"
) -> str:
"""Enhance the system prompt with additional context.
This method is called before the first LLM request with the initial
user message, allowing you to add relevant context to the system prompt.
Args:
system_prompt: The original system prompt
user_message: The initial user message
user: The user making the request
Returns:
Enhanced system prompt with additional context
Note:
This is called once per conversation turn, before any tool calls.
"""
return system_prompt
async def enhance_user_messages(
self, messages: list["LlmMessage"], user: "User"
) -> list["LlmMessage"]:
"""Enhance user messages with additional context.
This method is called to potentially modify or add context to user messages
before sending them to the LLM.
Args:
messages: The list of messages to enhance
user: The user making the request
Returns:
Enhanced list of messages
Note:
This is called before each LLM request, including after tool calls.
Be careful not to add context repeatedly on each iteration.
"""
return messages
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/enhancer/base.py",
"license": "MIT License",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
vanna-ai/vanna:src/vanna/core/enhancer/default.py | """
Default LLM context enhancer implementation using AgentMemory.
This implementation enriches the system prompt with relevant memories
based on the user's initial message.
"""
from typing import TYPE_CHECKING, List, Optional
from .base import LlmContextEnhancer
if TYPE_CHECKING:
from ..user.models import User
from ..llm.models import LlmMessage
from ...capabilities.agent_memory import AgentMemory, TextMemorySearchResult
class DefaultLlmContextEnhancer(LlmContextEnhancer):
"""Default enhancer that uses AgentMemory to add relevant context.
This enhancer searches the agent's memory for relevant examples and
tool use patterns based on the user's message, and adds them to the
system prompt.
Example:
agent = Agent(
llm_service=...,
agent_memory=agent_memory,
llm_context_enhancer=DefaultLlmContextEnhancer(agent_memory)
)
"""
def __init__(self, agent_memory: Optional["AgentMemory"] = None):
"""Initialize with optional agent memory.
Args:
agent_memory: Optional AgentMemory instance. If not provided,
enhancement will be skipped.
"""
self.agent_memory = agent_memory
async def enhance_system_prompt(
self, system_prompt: str, user_message: str, user: "User"
) -> str:
"""Enhance system prompt with relevant memories.
Searches agent memory for relevant text memories based on the
user's message and adds them to the system prompt.
Args:
system_prompt: The original system prompt
user_message: The initial user message
user: The user making the request
Returns:
Enhanced system prompt with relevant examples from memory
"""
if not self.agent_memory:
return system_prompt
try:
# Import here to avoid circular dependency
from ..tool import ToolContext
import uuid
# Create a temporary context for memory search
context = ToolContext(
user=user,
conversation_id="temp",
request_id=str(uuid.uuid4()),
agent_memory=self.agent_memory,
)
# Search for relevant text memories based on user message
memories: List[
"TextMemorySearchResult"
] = await self.agent_memory.search_text_memories(
query=user_message, context=context, limit=5
)
if not memories:
return system_prompt
# Format memories as context snippets to add to system prompt
examples_section = "\n\n## Relevant Context from Memory\n\n"
examples_section += "The following domain knowledge and context from prior interactions may be relevant:\n\n"
for result in memories:
memory = result.memory
examples_section += f"β’ {memory.content}\n"
# Append examples to system prompt
return system_prompt + examples_section
except Exception as e:
# If memory search fails, return original prompt
# Don't fail the entire request due to memory issues
import logging
logger = logging.getLogger(__name__)
logger.warning(f"Failed to enhance system prompt with memories: {e}")
return system_prompt
async def enhance_user_messages(
self, messages: list["LlmMessage"], user: "User"
) -> list["LlmMessage"]:
"""Enhance user messages.
The default implementation doesn't modify user messages.
Override this to add context to user messages if needed.
Args:
messages: The list of messages
user: The user making the request
Returns:
Original list of messages (unmodified)
"""
return messages
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/enhancer/default.py",
"license": "MIT License",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
vanna-ai/vanna:src/vanna/core/enricher/base.py | """
Base context enricher interface.
Context enrichers allow you to add additional data to the ToolContext
before tools are executed.
"""
from abc import ABC
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ..tool.models import ToolContext
class ToolContextEnricher(ABC):
"""Enricher for adding data to ToolContext.
Subclass this to create custom enrichers that can:
- Add user preferences from database
- Inject session state
- Add temporal context (timezone, current date)
- Include user history or profile data
- Add environment-specific configuration
Example:
class UserPreferencesEnricher(ToolContextEnricher):
def __init__(self, db):
self.db = db
async def enrich_context(self, context: ToolContext) -> ToolContext:
# Fetch user preferences
prefs = await self.db.get_user_preferences(context.user.id)
# Add to context metadata
context.metadata["preferences"] = prefs
context.metadata["timezone"] = prefs.get("timezone", "UTC")
return context
agent = AgentRunner(
llm_service=...,
context_enrichers=[UserPreferencesEnricher(db), SessionEnricher()]
)
"""
async def enrich_context(self, context: "ToolContext") -> "ToolContext":
"""Enrich the tool execution context with additional data.
Args:
context: The tool context to enrich
Returns:
Enriched context (typically modified in-place)
Note:
Enrichers typically modify the context.metadata dict to add
additional data that tools can access.
"""
return context
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/enricher/base.py",
"license": "MIT License",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
vanna-ai/vanna:src/vanna/core/errors.py | """
Exception classes for the Vanna Agents framework.
This module defines all custom exceptions used throughout the framework.
"""
class AgentError(Exception):
"""Base exception for agent framework."""
pass
class ToolExecutionError(AgentError):
"""Error during tool execution."""
pass
class ToolNotFoundError(AgentError):
"""Tool not found in registry."""
pass
class PermissionError(AgentError):
"""User lacks required permissions."""
pass
class ConversationNotFoundError(AgentError):
"""Conversation not found."""
pass
class LlmServiceError(AgentError):
"""Error communicating with LLM service."""
pass
class ValidationError(AgentError):
"""Data validation error."""
pass
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/errors.py",
"license": "MIT License",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
vanna-ai/vanna:src/vanna/core/evaluation/base.py | """
Core evaluation abstractions for the Vanna Agents framework.
This module provides the base classes and models for evaluating agent behavior,
including test cases, expected outcomes, and evaluation results.
"""
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional, Callable
from dataclasses import dataclass, field
from datetime import datetime
from pydantic import BaseModel
from vanna.core import User, UiComponent
class ExpectedOutcome(BaseModel):
"""Defines what we expect from the agent for a test case.
Provides multiple ways to specify expectations:
- tools_called: List of tool names that should be called
- tools_not_called: List of tool names that should NOT be called
- final_answer_contains: Keywords/phrases that should appear in output
- final_answer_not_contains: Keywords/phrases that should NOT appear
- min_components: Minimum number of UI components expected
- max_execution_time_ms: Maximum allowed execution time
- custom_validators: Custom validation functions
"""
tools_called: Optional[List[str]] = None
tools_not_called: Optional[List[str]] = None
final_answer_contains: Optional[List[str]] = None
final_answer_not_contains: Optional[List[str]] = None
min_components: Optional[int] = None
max_components: Optional[int] = None
max_execution_time_ms: Optional[float] = None
metadata: Dict[str, Any] = {}
class TestCase(BaseModel):
"""A single evaluation test case.
Attributes:
id: Unique identifier for the test case
user: User context for the test
message: The message to send to the agent
conversation_id: Optional conversation ID for multi-turn tests
expected_outcome: What we expect the agent to do/produce
metadata: Additional metadata for categorization/filtering
"""
id: str
user: User
message: str
conversation_id: Optional[str] = None
expected_outcome: Optional[ExpectedOutcome] = None
metadata: Dict[str, Any] = {}
@dataclass
class AgentResult:
"""The result of running an agent on a test case.
Captures everything that happened during agent execution
for later evaluation.
"""
test_case_id: str
components: List[UiComponent]
tool_calls: List[Dict[str, Any]] = field(default_factory=list)
llm_requests: List[Dict[str, Any]] = field(default_factory=list)
execution_time_ms: float = 0.0
total_tokens: int = 0
error: Optional[str] = None
metadata: Dict[str, Any] = field(default_factory=dict)
def get_final_answer(self) -> str:
"""Extract the final answer from components."""
# Find text components and concatenate
texts = []
for component in self.components:
if hasattr(component, "rich_component"):
rich_comp = component.rich_component
if hasattr(rich_comp, "type") and rich_comp.type.value == "text":
content = rich_comp.data.get("content") or getattr(
rich_comp, "content", ""
)
if content:
texts.append(content)
return "\n".join(texts)
def get_tool_names_called(self) -> List[str]:
"""Get list of tool names that were called."""
return [call.get("tool_name", "") for call in self.tool_calls]
class EvaluationResult(BaseModel):
"""Result of evaluating a single test case.
Attributes:
test_case_id: ID of the test case evaluated
evaluator_name: Name of the evaluator that produced this result
passed: Whether the test case passed
score: Score from 0.0 to 1.0
reasoning: Explanation of the evaluation
metrics: Additional metrics captured during evaluation
timestamp: When the evaluation was performed
"""
test_case_id: str
evaluator_name: str
passed: bool
score: float # 0.0 to 1.0
reasoning: str
metrics: Dict[str, Any] = {}
timestamp: datetime = datetime.now()
@dataclass
class TestCaseResult:
"""Complete result for a single test case including all evaluations."""
test_case: TestCase
agent_result: AgentResult
evaluations: List[EvaluationResult]
execution_time_ms: float
def overall_passed(self) -> bool:
"""Check if all evaluations passed."""
return all(e.passed for e in self.evaluations)
def overall_score(self) -> float:
"""Calculate average score across all evaluations."""
if not self.evaluations:
return 0.0
return sum(e.score for e in self.evaluations) / len(self.evaluations)
@dataclass
class AgentVariant:
"""A variant of an agent to evaluate (different LLM, config, etc).
Used for comparing different agent configurations, especially
different LLMs or model versions.
Attributes:
name: Human-readable name for this variant
agent: The agent instance to evaluate
metadata: Additional info (model name, provider, config, etc)
"""
name: str
agent: Any # Agent type - avoiding circular import
metadata: Dict[str, Any] = field(default_factory=dict)
class Evaluator(ABC):
"""Base class for evaluating agent behavior.
Evaluators examine the agent's execution and determine if it
met expectations. Multiple evaluators can be composed to check
different aspects (trajectory, output quality, efficiency, etc).
"""
@property
@abstractmethod
def name(self) -> str:
"""Name of this evaluator."""
pass
@abstractmethod
async def evaluate(
self,
test_case: TestCase,
agent_result: AgentResult,
) -> EvaluationResult:
"""Evaluate a single test case execution.
Args:
test_case: The test case that was executed
agent_result: The result from running the agent
Returns:
EvaluationResult with pass/fail, score, and reasoning
"""
pass
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/evaluation/base.py",
"license": "MIT License",
"lines": 148,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
vanna-ai/vanna:src/vanna/core/evaluation/dataset.py | """
Dataset loaders for evaluation test cases.
This module provides utilities for loading test case datasets from
YAML and JSON files.
"""
import json
import yaml
from typing import Any, Dict, List
from pathlib import Path
from .base import TestCase, ExpectedOutcome
from vanna.core import User
class EvaluationDataset:
"""Collection of test cases with metadata.
Example YAML format:
dataset:
name: "SQL Generation Tasks"
description: "Test cases for SQL generation"
test_cases:
- id: "sql_001"
user_id: "test_user"
message: "Show me total sales by region"
expected_outcome:
tools_called: ["generate_sql", "execute_query"]
final_answer_contains: ["SELECT", "GROUP BY", "region"]
"""
def __init__(self, name: str, test_cases: List[TestCase], description: str = ""):
"""Initialize evaluation dataset.
Args:
name: Name of the dataset
test_cases: List of test cases
description: Optional description
"""
self.name = name
self.test_cases = test_cases
self.description = description
@classmethod
def from_yaml(cls, path: str) -> "EvaluationDataset":
"""Load dataset from YAML file.
Args:
path: Path to YAML file
Returns:
EvaluationDataset instance
"""
with open(path, "r") as f:
data = yaml.safe_load(f)
return cls._from_dict(data)
@classmethod
def from_json(cls, path: str) -> "EvaluationDataset":
"""Load dataset from JSON file.
Args:
path: Path to JSON file
Returns:
EvaluationDataset instance
"""
with open(path, "r") as f:
data = json.load(f)
return cls._from_dict(data)
@classmethod
def _from_dict(cls, data: Dict[str, Any]) -> "EvaluationDataset":
"""Create dataset from dictionary.
Args:
data: Dictionary with dataset structure
Returns:
EvaluationDataset instance
"""
dataset_config = data.get("dataset", data)
name = dataset_config.get("name", "Unnamed Dataset")
description = dataset_config.get("description", "")
test_cases = []
for tc_data in dataset_config.get("test_cases", []):
test_case = cls._parse_test_case(tc_data)
test_cases.append(test_case)
return cls(name=name, test_cases=test_cases, description=description)
@classmethod
def _parse_test_case(cls, data: Dict[str, Any]) -> TestCase:
"""Parse a single test case from dictionary.
Args:
data: Test case dictionary
Returns:
TestCase instance
"""
# Create user
user_id = data.get("user_id", "test_user")
user = User(
id=user_id,
username=data.get("username", user_id),
email=data.get("email", f"{user_id}@example.com"),
group_memberships=data.get("user_groups", []),
)
# Parse expected outcome if present
expected_outcome = None
if "expected_outcome" in data:
outcome_data = data["expected_outcome"]
expected_outcome = ExpectedOutcome(
tools_called=outcome_data.get("tools_called"),
tools_not_called=outcome_data.get("tools_not_called"),
final_answer_contains=outcome_data.get("final_answer_contains"),
final_answer_not_contains=outcome_data.get("final_answer_not_contains"),
min_components=outcome_data.get("min_components"),
max_components=outcome_data.get("max_components"),
max_execution_time_ms=outcome_data.get("max_execution_time_ms"),
metadata=outcome_data.get("metadata", {}),
)
return TestCase(
id=data["id"],
user=user,
message=data["message"],
conversation_id=data.get("conversation_id"),
expected_outcome=expected_outcome,
metadata=data.get("metadata", {}),
)
def save_yaml(self, path: str) -> None:
"""Save dataset to YAML file.
Args:
path: Path to save YAML file
"""
data = self._to_dict()
with open(path, "w") as f:
yaml.dump(data, f, default_flow_style=False, sort_keys=False)
def save_json(self, path: str) -> None:
"""Save dataset to JSON file.
Args:
path: Path to save JSON file
"""
data = self._to_dict()
with open(path, "w") as f:
json.dump(data, f, indent=2)
def _to_dict(self) -> Dict[str, Any]:
"""Convert dataset to dictionary.
Returns:
Dictionary representation
"""
return {
"dataset": {
"name": self.name,
"description": self.description,
"test_cases": [self._test_case_to_dict(tc) for tc in self.test_cases],
}
}
def _test_case_to_dict(self, test_case: TestCase) -> Dict[str, Any]:
"""Convert test case to dictionary.
Args:
test_case: TestCase to convert
Returns:
Dictionary representation
"""
data: Dict[str, Any] = {
"id": test_case.id,
"user_id": test_case.user.id,
"username": test_case.user.username,
"email": test_case.user.email,
"user_groups": test_case.user.group_memberships,
"message": test_case.message,
}
if test_case.conversation_id:
data["conversation_id"] = test_case.conversation_id
if test_case.expected_outcome:
outcome = test_case.expected_outcome
outcome_dict: Dict[str, Any] = {}
if outcome.tools_called:
outcome_dict["tools_called"] = outcome.tools_called
if outcome.tools_not_called:
outcome_dict["tools_not_called"] = outcome.tools_not_called
if outcome.final_answer_contains:
outcome_dict["final_answer_contains"] = outcome.final_answer_contains
if outcome.final_answer_not_contains:
outcome_dict["final_answer_not_contains"] = (
outcome.final_answer_not_contains
)
if outcome.min_components is not None:
outcome_dict["min_components"] = outcome.min_components
if outcome.max_components is not None:
outcome_dict["max_components"] = outcome.max_components
if outcome.max_execution_time_ms is not None:
outcome_dict["max_execution_time_ms"] = outcome.max_execution_time_ms
if outcome.metadata:
outcome_dict["metadata"] = outcome.metadata
if outcome_dict:
data["expected_outcome"] = outcome_dict
if test_case.metadata:
data["metadata"] = test_case.metadata
return data
def filter_by_metadata(self, **kwargs: Any) -> "EvaluationDataset":
"""Filter test cases by metadata fields.
Args:
**kwargs: Metadata fields to match
Returns:
New EvaluationDataset with filtered test cases
"""
filtered = [
tc
for tc in self.test_cases
if all(tc.metadata.get(k) == v for k, v in kwargs.items())
]
return EvaluationDataset(
name=f"{self.name} (filtered)",
test_cases=filtered,
description=f"Filtered from: {self.description}",
)
def __len__(self) -> int:
"""Get number of test cases."""
return len(self.test_cases)
def __repr__(self) -> str:
"""String representation."""
return (
f"EvaluationDataset(name='{self.name}', test_cases={len(self.test_cases)})"
)
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/evaluation/dataset.py",
"license": "MIT License",
"lines": 207,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
vanna-ai/vanna:src/vanna/core/evaluation/evaluators.py | """
Built-in evaluators for common evaluation tasks.
This module provides ready-to-use evaluators for:
- Trajectory evaluation (tools called, order, efficiency)
- Output evaluation (content matching, quality)
- LLM-as-judge evaluation (custom criteria)
- Efficiency evaluation (time, tokens, cost)
"""
from typing import Dict, Any, Optional
from datetime import datetime
from .base import Evaluator, TestCase, AgentResult, EvaluationResult
from vanna.core import LlmService
class TrajectoryEvaluator(Evaluator):
"""Evaluate the path the agent took (tools called, order, etc).
Checks if the agent called the expected tools and didn't call
unexpected ones. Useful for verifying agent reasoning and planning.
"""
@property
def name(self) -> str:
return "trajectory"
async def evaluate(
self, test_case: TestCase, agent_result: AgentResult
) -> EvaluationResult:
"""Evaluate tool call trajectory."""
if agent_result.error:
return EvaluationResult(
test_case_id=test_case.id,
evaluator_name=self.name,
passed=False,
score=0.0,
reasoning=f"Agent execution failed: {agent_result.error}",
)
expected = test_case.expected_outcome
if not expected:
return EvaluationResult(
test_case_id=test_case.id,
evaluator_name=self.name,
passed=True,
score=1.0,
reasoning="No expected outcome specified, passing by default",
)
tools_called = agent_result.get_tool_names_called()
issues = []
score = 1.0
# Check expected tools were called
if expected.tools_called:
for expected_tool in expected.tools_called:
if expected_tool not in tools_called:
issues.append(f"Expected tool '{expected_tool}' was not called")
score -= 0.5 / len(expected.tools_called)
# Check unexpected tools were not called
if expected.tools_not_called:
for unexpected_tool in expected.tools_not_called:
if unexpected_tool in tools_called:
issues.append(f"Unexpected tool '{unexpected_tool}' was called")
score -= 0.5 / len(expected.tools_not_called)
score = max(0.0, min(1.0, score))
passed = score >= 0.7 # 70% threshold
reasoning = "Trajectory evaluation: "
if issues:
reasoning += "; ".join(issues)
else:
reasoning += "All expected tools called, no unexpected tools"
return EvaluationResult(
test_case_id=test_case.id,
evaluator_name=self.name,
passed=passed,
score=score,
reasoning=reasoning,
metrics={
"tools_called": tools_called,
"num_tools_called": len(tools_called),
"issues": issues,
},
)
class OutputEvaluator(Evaluator):
"""Evaluate the final output quality.
Checks if the output contains expected content and doesn't
contain forbidden content. Case-insensitive substring matching.
"""
@property
def name(self) -> str:
return "output"
async def evaluate(
self, test_case: TestCase, agent_result: AgentResult
) -> EvaluationResult:
"""Evaluate output content."""
if agent_result.error:
return EvaluationResult(
test_case_id=test_case.id,
evaluator_name=self.name,
passed=False,
score=0.0,
reasoning=f"Agent execution failed: {agent_result.error}",
)
expected = test_case.expected_outcome
if not expected:
return EvaluationResult(
test_case_id=test_case.id,
evaluator_name=self.name,
passed=True,
score=1.0,
reasoning="No expected outcome specified, passing by default",
)
final_answer = agent_result.get_final_answer().lower()
issues = []
score = 1.0
# Check expected content is present
if expected.final_answer_contains:
for expected_content in expected.final_answer_contains:
if expected_content.lower() not in final_answer:
issues.append(
f"Expected content '{expected_content}' not found in output"
)
score -= 0.5 / len(expected.final_answer_contains)
# Check forbidden content is absent
if expected.final_answer_not_contains:
for forbidden_content in expected.final_answer_not_contains:
if forbidden_content.lower() in final_answer:
issues.append(
f"Forbidden content '{forbidden_content}' found in output"
)
score -= 0.5 / len(expected.final_answer_not_contains)
score = max(0.0, min(1.0, score))
passed = score >= 0.7 # 70% threshold
reasoning = "Output evaluation: "
if issues:
reasoning += "; ".join(issues)
else:
reasoning += "All expected content present, no forbidden content"
return EvaluationResult(
test_case_id=test_case.id,
evaluator_name=self.name,
passed=passed,
score=score,
reasoning=reasoning,
metrics={
"output_length": len(final_answer),
"issues": issues,
},
)
class LLMAsJudgeEvaluator(Evaluator):
"""Use an LLM to judge agent performance based on custom criteria.
This evaluator uses a separate LLM to assess the quality of the
agent's output based on natural language criteria.
"""
def __init__(self, judge_llm: LlmService, criteria: str):
"""Initialize LLM-as-judge evaluator.
Args:
judge_llm: The LLM service to use for judging
criteria: Natural language description of what to evaluate
"""
self.judge_llm = judge_llm
self.criteria = criteria
@property
def name(self) -> str:
return "llm_judge"
async def evaluate(
self, test_case: TestCase, agent_result: AgentResult
) -> EvaluationResult:
"""Evaluate using LLM as judge."""
if agent_result.error:
return EvaluationResult(
test_case_id=test_case.id,
evaluator_name=self.name,
passed=False,
score=0.0,
reasoning=f"Agent execution failed: {agent_result.error}",
)
final_answer = agent_result.get_final_answer()
# Build prompt for judge
judge_prompt = f"""You are evaluating an AI agent's response to a user query.
User Query: {test_case.message}
Agent's Response:
{final_answer}
Evaluation Criteria:
{self.criteria}
Please evaluate the response and provide:
1. A score from 0.0 to 1.0 (where 1.0 is perfect)
2. Whether it passes (score >= 0.7)
3. Brief reasoning for your evaluation
Respond in this format:
SCORE: <number>
PASSED: <yes/no>
REASONING: <your explanation>
"""
try:
# Call judge LLM
from vanna.core.llm import LlmRequest, LlmMessage
request = LlmRequest(
user=test_case.user,
messages=[LlmMessage(role="user", content=judge_prompt)],
temperature=0.0, # Deterministic judging
)
response = await self.judge_llm.send_request(request)
judgment = response.content or ""
# Parse response
score = self._parse_score(judgment)
passed = self._parse_passed(judgment)
reasoning = self._parse_reasoning(judgment)
return EvaluationResult(
test_case_id=test_case.id,
evaluator_name=self.name,
passed=passed,
score=score,
reasoning=reasoning,
metrics={"judge_response": judgment},
)
except Exception as e:
return EvaluationResult(
test_case_id=test_case.id,
evaluator_name=self.name,
passed=False,
score=0.0,
reasoning=f"LLM judge evaluation failed: {str(e)}",
)
def _parse_score(self, judgment: str) -> float:
"""Parse score from judge response."""
try:
for line in judgment.split("\n"):
if line.startswith("SCORE:"):
score_str = line.replace("SCORE:", "").strip()
return float(score_str)
except Exception:
pass
return 0.5 # Default if parsing fails
def _parse_passed(self, judgment: str) -> bool:
"""Parse pass/fail from judge response."""
for line in judgment.split("\n"):
if line.startswith("PASSED:"):
passed_str = line.replace("PASSED:", "").strip().lower()
return passed_str in ["yes", "true", "pass"]
return False
def _parse_reasoning(self, judgment: str) -> str:
"""Parse reasoning from judge response."""
for line in judgment.split("\n"):
if line.startswith("REASONING:"):
return line.replace("REASONING:", "").strip()
return judgment # Return full judgment if no reasoning line found
class EfficiencyEvaluator(Evaluator):
"""Evaluate resource usage (time, tokens, cost).
Checks if the agent completed within acceptable resource limits.
"""
def __init__(
self,
max_execution_time_ms: Optional[float] = None,
max_tokens: Optional[int] = None,
max_cost_usd: Optional[float] = None,
):
"""Initialize efficiency evaluator.
Args:
max_execution_time_ms: Maximum allowed execution time in milliseconds
max_tokens: Maximum allowed token usage
max_cost_usd: Maximum allowed cost in USD
"""
self.max_execution_time_ms = max_execution_time_ms
self.max_tokens = max_tokens
self.max_cost_usd = max_cost_usd
@property
def name(self) -> str:
return "efficiency"
async def evaluate(
self, test_case: TestCase, agent_result: AgentResult
) -> EvaluationResult:
"""Evaluate resource efficiency."""
issues = []
score = 1.0
# Check execution time
if self.max_execution_time_ms:
if agent_result.execution_time_ms > self.max_execution_time_ms:
issues.append(
f"Execution time {agent_result.execution_time_ms:.0f}ms "
f"exceeded limit {self.max_execution_time_ms:.0f}ms"
)
score -= 0.33
# Check token usage
if self.max_tokens:
if agent_result.total_tokens > self.max_tokens:
issues.append(
f"Token usage {agent_result.total_tokens} exceeded limit {self.max_tokens}"
)
score -= 0.33
# Check cost (would need cost calculation from metadata)
# For now, skip cost evaluation
# Check from expected outcome if specified
expected = test_case.expected_outcome
if expected and expected.max_execution_time_ms:
if agent_result.execution_time_ms > expected.max_execution_time_ms:
issues.append(
f"Execution time {agent_result.execution_time_ms:.0f}ms "
f"exceeded test case limit {expected.max_execution_time_ms:.0f}ms"
)
score -= 0.34
score = max(0.0, min(1.0, score))
passed = score >= 0.7
reasoning = "Efficiency evaluation: "
if issues:
reasoning += "; ".join(issues)
else:
reasoning += "Within resource limits"
return EvaluationResult(
test_case_id=test_case.id,
evaluator_name=self.name,
passed=passed,
score=score,
reasoning=reasoning,
metrics={
"execution_time_ms": agent_result.execution_time_ms,
"total_tokens": agent_result.total_tokens,
"issues": issues,
},
)
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/evaluation/evaluators.py",
"license": "MIT License",
"lines": 312,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
vanna-ai/vanna:src/vanna/core/evaluation/report.py | """
Evaluation reporting with HTML, CSV, and console output.
This module provides classes for generating evaluation reports,
including comparison reports for evaluating multiple agent variants.
"""
import csv
from typing import List, Dict, Optional, Any
from dataclasses import dataclass, field
from datetime import datetime
from .base import TestCaseResult, AgentVariant, Evaluator, TestCase
@dataclass
class EvaluationReport:
"""Report for a single agent's evaluation results.
Attributes:
agent_name: Name of the agent evaluated
results: List of results for each test case
evaluators: List of evaluators used
metadata: Additional metadata about the agent/run
timestamp: When the evaluation was run
"""
agent_name: str
results: List[TestCaseResult]
evaluators: List[Evaluator]
metadata: Dict[str, Any] = field(default_factory=dict)
timestamp: datetime = field(default_factory=datetime.now)
def pass_rate(self) -> float:
"""Calculate overall pass rate (0.0 to 1.0)."""
if not self.results:
return 0.0
passed = sum(1 for r in self.results if r.overall_passed())
return passed / len(self.results)
def average_score(self) -> float:
"""Calculate average score across all test cases."""
if not self.results:
return 0.0
return sum(r.overall_score() for r in self.results) / len(self.results)
def average_time(self) -> float:
"""Calculate average execution time in milliseconds."""
if not self.results:
return 0.0
return sum(r.execution_time_ms for r in self.results) / len(self.results)
def total_tokens(self) -> int:
"""Calculate total tokens used across all test cases."""
return sum(r.agent_result.total_tokens for r in self.results)
def get_failures(self) -> List[TestCaseResult]:
"""Get all failed test cases."""
return [r for r in self.results if not r.overall_passed()]
def print_summary(self) -> None:
"""Print summary to console."""
print(f"\n{'=' * 80}")
print(f"EVALUATION REPORT: {self.agent_name}")
print(f"{'=' * 80}")
print(f"Timestamp: {self.timestamp.isoformat()}")
print(f"Test Cases: {len(self.results)}")
print(f"Pass Rate: {self.pass_rate():.1%}")
print(f"Average Score: {self.average_score():.2f}")
print(f"Average Time: {self.average_time():.0f}ms")
print(f"Total Tokens: {self.total_tokens()}")
print(f"{'=' * 80}\n")
failures = self.get_failures()
if failures:
print(f"FAILURES ({len(failures)}):")
for result in failures:
print(f"\n Test Case: {result.test_case.id}")
print(f" Message: {result.test_case.message}")
print(f" Score: {result.overall_score():.2f}")
for eval_result in result.evaluations:
if not eval_result.passed:
print(
f" [{eval_result.evaluator_name}] {eval_result.reasoning}"
)
@dataclass
class ComparisonReport:
"""Report comparing multiple agent variants.
This is the primary report type for LLM comparison use cases.
Attributes:
variants: List of agent variants compared
reports: Dict mapping variant name to EvaluationReport
test_cases: Test cases used for comparison
timestamp: When the comparison was run
"""
variants: List[AgentVariant]
reports: Dict[str, EvaluationReport]
test_cases: List[TestCase]
timestamp: datetime = field(default_factory=datetime.now)
def print_summary(self) -> None:
"""Print comparison summary to console."""
print("\n" + "=" * 80)
print("AGENT COMPARISON SUMMARY")
print("=" * 80)
print(f"Timestamp: {self.timestamp.isoformat()}")
print(f"Variants: {len(self.variants)}")
print(f"Test Cases: {len(self.test_cases)}")
# Table of results
print(
f"\n{'Agent':<25} {'Pass Rate':<12} {'Avg Score':<12} {'Avg Time':<12} {'Tokens':<12}"
)
print("-" * 80)
for variant_name, report in self.reports.items():
print(
f"{variant_name:<25} "
f"{report.pass_rate():<12.1%} "
f"{report.average_score():<12.2f} "
f"{report.average_time():<12.0f} "
f"{report.total_tokens():<12,}"
)
print("=" * 80 + "\n")
def get_best_variant(self, metric: str = "score") -> str:
"""Get the best performing variant by metric.
Args:
metric: Metric to optimize ('score', 'speed', 'pass_rate')
Returns:
Name of the best variant
"""
if metric == "score":
return max(self.reports.items(), key=lambda x: x[1].average_score())[0]
elif metric == "speed":
return min(self.reports.items(), key=lambda x: x[1].average_time())[0]
elif metric == "pass_rate":
return max(self.reports.items(), key=lambda x: x[1].pass_rate())[0]
else:
raise ValueError(f"Unknown metric: {metric}")
def save_csv(self, path: str) -> None:
"""Save detailed CSV for further analysis.
Each row represents one test case Γ one variant combination.
"""
with open(path, "w", newline="") as f:
writer = csv.writer(f)
# Header
writer.writerow(
[
"variant",
"test_case_id",
"test_message",
"passed",
"score",
"execution_time_ms",
"tokens",
"error",
"evaluator_scores",
]
)
# Data rows
for variant_name, report in self.reports.items():
for result in report.results:
evaluator_scores = {
e.evaluator_name: e.score for e in result.evaluations
}
writer.writerow(
[
variant_name,
result.test_case.id,
result.test_case.message[:50], # Truncate
result.overall_passed(),
result.overall_score(),
result.execution_time_ms,
result.agent_result.total_tokens,
result.agent_result.error or "",
str(evaluator_scores),
]
)
def save_html(self, path: str) -> None:
"""Save interactive HTML comparison report.
Generates a rich HTML report with:
- Summary statistics
- Charts comparing variants
- Side-by-side test case results
"""
html = self._generate_html()
with open(path, "w") as f:
f.write(html)
def _generate_html(self) -> str:
"""Generate HTML content for report."""
# Build HTML report
html_parts = [
"<!DOCTYPE html>",
"<html>",
"<head>",
"<title>Agent Comparison Report</title>",
"<style>",
"body { font-family: Arial, sans-serif; margin: 20px; }",
"h1 { color: #333; }",
"table { border-collapse: collapse; width: 100%; margin: 20px 0; }",
"th, td { border: 1px solid #ddd; padding: 12px; text-align: left; }",
"th { background-color: #4CAF50; color: white; }",
"tr:nth-child(even) { background-color: #f2f2f2; }",
".passed { color: green; font-weight: bold; }",
".failed { color: red; font-weight: bold; }",
".best { background-color: #d4edda !important; }",
"</style>",
"</head>",
"<body>",
f"<h1>Agent Comparison Report</h1>",
f"<p>Generated: {self.timestamp.isoformat()}</p>",
f"<p>Variants: {len(self.variants)} | Test Cases: {len(self.test_cases)}</p>",
]
# Summary table
html_parts.append("<h2>Summary</h2>")
html_parts.append("<table>")
html_parts.append(
"<tr><th>Agent</th><th>Pass Rate</th><th>Avg Score</th><th>Avg Time (ms)</th><th>Total Tokens</th></tr>"
)
best_by_score = self.get_best_variant("score")
for variant_name, report in self.reports.items():
row_class = "best" if variant_name == best_by_score else ""
html_parts.append(
f"<tr class='{row_class}'>"
f"<td>{variant_name}</td>"
f"<td>{report.pass_rate():.1%}</td>"
f"<td>{report.average_score():.2f}</td>"
f"<td>{report.average_time():.0f}</td>"
f"<td>{report.total_tokens():,}</td>"
f"</tr>"
)
html_parts.append("</table>")
# Test case details
html_parts.append("<h2>Test Case Details</h2>")
for i, test_case in enumerate(self.test_cases):
html_parts.append(f"<h3>Test Case {i + 1}: {test_case.id}</h3>")
html_parts.append(f"<p><strong>Message:</strong> {test_case.message}</p>")
html_parts.append("<table>")
html_parts.append(
"<tr><th>Variant</th><th>Result</th><th>Score</th><th>Time (ms)</th></tr>"
)
for variant_name, report in self.reports.items():
result = next(
(r for r in report.results if r.test_case.id == test_case.id), None
)
if result:
passed_class = "passed" if result.overall_passed() else "failed"
passed_text = "PASS" if result.overall_passed() else "FAIL"
html_parts.append(
f"<tr>"
f"<td>{variant_name}</td>"
f"<td class='{passed_class}'>{passed_text}</td>"
f"<td>{result.overall_score():.2f}</td>"
f"<td>{result.execution_time_ms:.0f}</td>"
f"</tr>"
)
html_parts.append("</table>")
html_parts.append("</body>")
html_parts.append("</html>")
return "\n".join(html_parts)
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/evaluation/report.py",
"license": "MIT License",
"lines": 243,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
vanna-ai/vanna:src/vanna/core/evaluation/runner.py | """
Evaluation runner with parallel execution support.
This module provides the EvaluationRunner class that executes test cases
against agents with configurable parallelism for efficient evaluation,
especially when comparing multiple LLMs or model versions.
"""
import asyncio
from typing import Any, List, Dict, Optional, AsyncGenerator, TYPE_CHECKING
from datetime import datetime
from .base import (
TestCase,
AgentResult,
TestCaseResult,
AgentVariant,
Evaluator,
)
from vanna.core import UiComponent
from vanna.core.user.request_context import RequestContext
from vanna.core.observability import ObservabilityProvider
if TYPE_CHECKING:
from vanna import Agent
from .report import EvaluationReport, ComparisonReport
class EvaluationRunner:
"""Run evaluations with parallel execution support.
The primary use case is comparing multiple agent variants (e.g., different LLMs)
on the same set of test cases. The runner executes test cases in parallel with
configurable concurrency to handle I/O-bound LLM operations efficiently.
Example:
>>> runner = EvaluationRunner(
... evaluators=[TrajectoryEvaluator(), OutputEvaluator()],
... max_concurrency=20
... )
>>> comparison = await runner.compare_agents(
... agent_variants=[claude_variant, gpt_variant],
... test_cases=dataset.test_cases
... )
"""
def __init__(
self,
evaluators: List[Evaluator],
max_concurrency: int = 10,
observability_provider: Optional[ObservabilityProvider] = None,
):
"""Initialize the evaluation runner.
Args:
evaluators: List of evaluators to apply to each test case
max_concurrency: Maximum number of concurrent test case executions
observability_provider: Optional observability for tracking eval runs
"""
self.evaluators = evaluators
self.max_concurrency = max_concurrency
self.observability = observability_provider
self._semaphore = asyncio.Semaphore(max_concurrency)
async def run_evaluation(
self,
agent: "Agent",
test_cases: List[TestCase],
) -> "EvaluationReport":
"""Run evaluation on a single agent.
Args:
agent: The agent to evaluate
test_cases: List of test cases to run
Returns:
EvaluationReport with results for all test cases
"""
from .report import EvaluationReport
results = await self._run_test_cases_parallel(agent, test_cases)
return EvaluationReport(
agent_name="agent",
results=results,
evaluators=self.evaluators,
timestamp=datetime.now(),
)
async def compare_agents(
self,
agent_variants: List[AgentVariant],
test_cases: List[TestCase],
) -> "ComparisonReport":
"""Compare multiple agent variants on same test cases.
This is the PRIMARY use case for LLM comparison. Runs all variants
in parallel for maximum efficiency with I/O-bound LLM calls.
Args:
agent_variants: List of agent variants to compare
test_cases: Test cases to run on each variant
Returns:
ComparisonReport with results for all variants
"""
from .report import ComparisonReport
# Create span for overall comparison
if self.observability:
span = await self.observability.create_span(
"agent_comparison",
attributes={
"num_variants": len(agent_variants),
"num_test_cases": len(test_cases),
},
)
# Run all variants in parallel
tasks = [
self._run_agent_variant(variant, test_cases) for variant in agent_variants
]
variant_reports = await asyncio.gather(*tasks)
if self.observability:
await self.observability.end_span(span)
return ComparisonReport(
variants=agent_variants,
reports=dict(zip([v.name for v in agent_variants], variant_reports)),
test_cases=test_cases,
timestamp=datetime.now(),
)
async def compare_agents_streaming(
self,
agent_variants: List[AgentVariant],
test_cases: List[TestCase],
) -> AsyncGenerator[tuple[str, TestCaseResult, int, int], None]:
"""Stream comparison results as they complete.
Useful for long-running evaluations where you want to see
progress updates in real-time (e.g., for UI display).
Args:
agent_variants: Agent variants to compare
test_cases: Test cases to run
Yields:
Tuples of (variant_name, result, completed_count, total_count)
"""
queue: asyncio.Queue[tuple[str, TestCaseResult]] = asyncio.Queue()
async def worker(variant: AgentVariant) -> None:
"""Worker that runs test cases for one variant."""
results = await self._run_test_cases_parallel(variant.agent, test_cases)
for result in results:
await queue.put((variant.name, result))
# Start all workers
workers = [asyncio.create_task(worker(v)) for v in agent_variants]
# Yield results as they arrive
completed = 0
total = len(agent_variants) * len(test_cases)
while completed < total:
variant_name, result = await queue.get()
completed += 1
yield variant_name, result, completed, total
# Wait for all workers to complete
await asyncio.gather(*workers)
async def _run_agent_variant(
self,
variant: AgentVariant,
test_cases: List[TestCase],
) -> "EvaluationReport":
"""Run a single agent variant on all test cases.
Args:
variant: The agent variant to evaluate
test_cases: Test cases to run
Returns:
EvaluationReport for this variant
"""
from .report import EvaluationReport
if self.observability:
span = await self.observability.create_span(
f"variant_{variant.name}",
attributes={
"variant": variant.name,
"num_test_cases": len(test_cases),
**variant.metadata,
},
)
results = await self._run_test_cases_parallel(variant.agent, test_cases)
if self.observability:
await self.observability.end_span(span)
return EvaluationReport(
agent_name=variant.name,
results=results,
evaluators=self.evaluators,
metadata=variant.metadata,
timestamp=datetime.now(),
)
async def _run_test_cases_parallel(
self,
agent: "Agent",
test_cases: List[TestCase],
) -> List[TestCaseResult]:
"""Run test cases in parallel with concurrency limit.
Args:
agent: The agent to run test cases on
test_cases: Test cases to execute
Returns:
List of TestCaseResult, one per test case
"""
tasks = [
self._run_single_test_case(agent, test_case) for test_case in test_cases
]
return await asyncio.gather(*tasks)
async def _run_single_test_case(
self,
agent: "Agent",
test_case: TestCase,
) -> TestCaseResult:
"""Run a single test case with semaphore to limit concurrency.
Args:
agent: The agent to execute
test_case: The test case to run
Returns:
TestCaseResult with agent execution and evaluations
"""
async with self._semaphore:
# Execute agent
start_time = asyncio.get_event_loop().time()
agent_result = await self._execute_agent(agent, test_case)
execution_time = asyncio.get_event_loop().time() - start_time
# Run evaluators
eval_results = []
for evaluator in self.evaluators:
eval_result = await evaluator.evaluate(test_case, agent_result)
eval_results.append(eval_result)
return TestCaseResult(
test_case=test_case,
agent_result=agent_result,
evaluations=eval_results,
execution_time_ms=execution_time * 1000,
)
async def _execute_agent(
self,
agent: "Agent",
test_case: TestCase,
) -> AgentResult:
"""Execute agent and capture full trajectory.
Args:
agent: The agent to execute
test_case: The test case to run
Returns:
AgentResult with all captured data
"""
components: List[UiComponent] = []
tool_calls: List[Dict[str, Any]] = []
error: Optional[str] = None
try:
# Create request context with user info from test case
# This allows the agent's UserResolver to resolve the correct user
request_context = RequestContext(
cookies={"user_id": test_case.user.id},
headers={},
metadata={"test_case_user": test_case.user},
)
async for component in agent.send_message(
request_context=request_context,
message=test_case.message,
conversation_id=test_case.conversation_id,
):
components.append(component)
except Exception as e:
error = str(e)
# TODO: Extract tool calls and LLM requests from observability
# For now, these will be empty unless we hook into observability
return AgentResult(
test_case_id=test_case.id,
components=components,
tool_calls=tool_calls,
llm_requests=[],
error=error,
)
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/evaluation/runner.py",
"license": "MIT License",
"lines": 257,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
vanna-ai/vanna:src/vanna/core/filter/base.py | """
Base conversation filter interface.
Conversation filters allow you to transform conversation history before
it's sent to the LLM for processing.
"""
from abc import ABC
from typing import TYPE_CHECKING, List
if TYPE_CHECKING:
from ..storage import Message
class ConversationFilter(ABC):
"""Filter for transforming conversation history.
Subclass this to create custom filters that can:
- Remove sensitive information
- Summarize long conversations
- Manage context window limits
- Deduplicate similar messages
- Prioritize recent or relevant messages
Example:
class ContextWindowFilter(ConversationFilter):
def __init__(self, max_tokens: int = 8000):
self.max_tokens = max_tokens
async def filter_messages(self, messages: List[Message]) -> List[Message]:
# Estimate tokens (rough approximation)
total_tokens = 0
filtered = []
# Keep system message and recent messages
for msg in reversed(messages):
msg_tokens = len(msg.content or "") // 4
if total_tokens + msg_tokens > self.max_tokens:
break
filtered.insert(0, msg)
total_tokens += msg_tokens
return filtered
agent = AgentRunner(
llm_service=...,
conversation_filters=[
SensitiveDataFilter(),
ContextWindowFilter(max_tokens=8000)
]
)
"""
async def filter_messages(self, messages: List["Message"]) -> List["Message"]:
"""Filter and transform conversation messages.
Args:
messages: List of conversation messages
Returns:
Filtered/transformed list of messages
Note:
Filters are applied in order, so messages passed to later
filters may already be modified by earlier filters.
"""
return messages
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/filter/base.py",
"license": "MIT License",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
vanna-ai/vanna:src/vanna/core/lifecycle/base.py | """
Base lifecycle hook interface.
Lifecycle hooks allow you to intercept and customize agent behavior
at key points in the execution flow.
"""
from abc import ABC
from typing import TYPE_CHECKING, Any, Optional
if TYPE_CHECKING:
from ..user.models import User
from ..tool import Tool
from ..tool.models import ToolContext, ToolResult
class LifecycleHook(ABC):
"""Hook into agent execution lifecycle.
Subclass this to create custom hooks that can:
- Modify messages before processing
- Add logging or telemetry
- Enforce quotas or rate limits
- Transform tool results
- Add custom validation
Example:
class LoggingHook(LifecycleHook):
async def before_message(self, user: User, message: str) -> Optional[str]:
print(f"User {user.username} sent: {message}")
return None # Don't modify
agent = AgentRunner(
llm_service=...,
lifecycle_hooks=[LoggingHook(), QuotaCheckHook()]
)
"""
async def before_message(self, user: "User", message: str) -> Optional[str]:
"""Called before processing a user message.
Args:
user: User sending the message
message: Original message content
Returns:
Modified message string, or None to keep original
Raises:
AgentError: To halt message processing (e.g., quota exceeded)
"""
return None
async def after_message(self, result: Any) -> None:
"""Called after message has been fully processed.
Args:
result: Final result from message processing
"""
pass
async def before_tool(self, tool: "Tool[Any]", context: "ToolContext") -> None:
"""Called before tool execution.
Args:
tool: Tool about to be executed
context: Tool execution context
Raises:
AgentError: To prevent tool execution
"""
pass
async def after_tool(self, result: "ToolResult") -> Optional["ToolResult"]:
"""Called after tool execution.
Args:
result: Result from tool execution
Returns:
Modified ToolResult, or None to keep original
"""
return None
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/lifecycle/base.py",
"license": "MIT License",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
vanna-ai/vanna:src/vanna/core/llm/base.py | """
LLM domain interface.
This module contains the abstract base class for LLM services.
"""
from abc import ABC, abstractmethod
from typing import Any, AsyncGenerator, List
from .models import LlmRequest, LlmResponse, LlmStreamChunk
class LlmService(ABC):
"""Service for LLM communication."""
@abstractmethod
async def send_request(self, request: LlmRequest) -> LlmResponse:
"""Send a request to the LLM."""
pass
@abstractmethod
async def stream_request(
self, request: LlmRequest
) -> AsyncGenerator[LlmStreamChunk, None]:
"""Stream a request to the LLM.
Args:
request: The LLM request to stream
Yields:
LlmStreamChunk instances as they arrive
"""
# This is an async generator method
raise NotImplementedError
yield # pragma: no cover - makes this an async generator
@abstractmethod
async def validate_tools(self, tools: List[Any]) -> List[str]:
"""Validate tool schemas and return any errors."""
pass
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/llm/base.py",
"license": "MIT License",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
vanna-ai/vanna:src/vanna/core/llm/models.py | """
LLM domain models.
This module contains data models for LLM communication.
"""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field
from ..tool.models import ToolCall
from ..user.models import User
class LlmMessage(BaseModel):
"""Message format for LLM communication."""
role: str = Field(description="Message role")
content: str = Field(description="Message content")
tool_calls: Optional[List[ToolCall]] = Field(default=None)
tool_call_id: Optional[str] = Field(default=None)
class LlmRequest(BaseModel):
"""Request to LLM service."""
messages: List[LlmMessage] = Field(description="Messages to send")
tools: Optional[List[Any]] = Field(
default=None, description="Available tools"
) # Will be ToolSchema but avoiding circular import
user: User = Field(description="User making the request")
stream: bool = Field(default=False, description="Whether to stream response")
temperature: float = Field(default=0.7, ge=0.0, le=2.0)
max_tokens: Optional[int] = Field(default=None, gt=0)
system_prompt: Optional[str] = Field(
default=None, description="System prompt for the LLM"
)
metadata: Dict[str, Any] = Field(default_factory=dict)
class LlmResponse(BaseModel):
"""Response from LLM."""
content: Optional[str] = None
tool_calls: Optional[List[ToolCall]] = None
finish_reason: Optional[str] = None
usage: Optional[Dict[str, int]] = None
metadata: Dict[str, Any] = Field(default_factory=dict)
def is_tool_call(self) -> bool:
"""Check if this response contains tool calls."""
return self.tool_calls is not None and len(self.tool_calls) > 0
class LlmStreamChunk(BaseModel):
"""Streaming chunk from LLM."""
content: Optional[str] = None
tool_calls: Optional[List[ToolCall]] = None
finish_reason: Optional[str] = None
metadata: Dict[str, Any] = Field(default_factory=dict)
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/llm/models.py",
"license": "MIT License",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
vanna-ai/vanna:src/vanna/core/middleware/base.py | """
Base LLM middleware interface.
Middleware allows you to intercept and transform LLM requests and responses
for caching, monitoring, content filtering, and more.
"""
from abc import ABC
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ..llm import LlmRequest, LlmResponse
class LlmMiddleware(ABC):
"""Middleware for intercepting LLM requests and responses.
Subclass this to create custom middleware that can:
- Cache LLM responses
- Log requests/responses
- Filter or modify content
- Track costs and usage
- Implement fallback strategies
Example:
class CachingMiddleware(LlmMiddleware):
def __init__(self):
self.cache = {}
async def before_llm_request(self, request: LlmRequest) -> LlmRequest:
# Could check cache here
return request
async def after_llm_response(self, request: LlmRequest, response: LlmResponse) -> LlmResponse:
# Cache the response
cache_key = self._compute_key(request)
self.cache[cache_key] = response
return response
agent = AgentRunner(
llm_service=...,
llm_middlewares=[CachingMiddleware(), LoggingMiddleware()]
)
"""
async def before_llm_request(self, request: "LlmRequest") -> "LlmRequest":
"""Called before sending request to LLM.
Args:
request: The LLM request about to be sent
Returns:
Modified request, or original if no changes
"""
return request
async def after_llm_response(
self, request: "LlmRequest", response: "LlmResponse"
) -> "LlmResponse":
"""Called after receiving response from LLM.
Args:
request: The original request
response: The LLM response
Returns:
Modified response, or original if no changes
"""
return response
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/middleware/base.py",
"license": "MIT License",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
vanna-ai/vanna:src/vanna/core/observability/base.py | """
Base observability provider interface.
Observability providers allow you to collect telemetry data about
agent execution for monitoring and debugging.
"""
from abc import ABC
from typing import Any, Dict, Optional
from .models import Span, Metric
class ObservabilityProvider(ABC):
"""Provider for collecting telemetry and observability data.
Subclass this to create custom observability integrations that can:
- Emit metrics to monitoring systems
- Create distributed traces
- Log performance data
- Track costs and usage
- Monitor error rates
Example:
class PrometheusProvider(ObservabilityProvider):
def __init__(self, registry):
self.registry = registry
self.request_counter = Counter(
'agent_requests_total',
'Total agent requests',
registry=registry
)
async def record_metric(self, name: str, value: float, tags: Dict[str, str]) -> None:
if name == "agent.request":
self.request_counter.inc()
async def create_span(self, name: str, attributes: Optional[Dict[str, Any]] = None) -> Span:
span = Span(name=name, attributes=attributes or {})
return span
agent = AgentRunner(
llm_service=...,
observability_provider=PrometheusProvider(registry)
)
"""
async def record_metric(
self,
name: str,
value: float,
unit: str = "",
tags: Optional[Dict[str, str]] = None,
) -> None:
"""Record a metric measurement.
Args:
name: Metric name (e.g., "agent.request.duration")
value: Metric value
unit: Unit of measurement (e.g., "ms", "tokens")
tags: Additional tags/labels for the metric
"""
pass
async def create_span(
self, name: str, attributes: Optional[Dict[str, Any]] = None
) -> Span:
"""Create a new span for tracing.
Args:
name: Span name/operation
attributes: Initial span attributes
Returns:
Span object to track the operation
Note:
Call span.end() when the operation completes.
"""
return Span(name=name, attributes=attributes or {})
async def end_span(self, span: Span) -> None:
"""End a span and record it.
Args:
span: The span to end
"""
span.end()
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/observability/base.py",
"license": "MIT License",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
vanna-ai/vanna:src/vanna/core/observability/models.py | """
Observability models for spans and metrics.
"""
import time
from typing import Any, Dict, Optional
from uuid import uuid4
from pydantic import BaseModel, Field
class Span(BaseModel):
"""Represents a unit of work for distributed tracing."""
id: str = Field(default_factory=lambda: str(uuid4()), description="Span ID")
name: str = Field(description="Span name/operation")
start_time: float = Field(default_factory=time.time, description="Start timestamp")
end_time: Optional[float] = Field(default=None, description="End timestamp")
attributes: Dict[str, Any] = Field(
default_factory=dict, description="Span attributes"
)
parent_id: Optional[str] = Field(default=None, description="Parent span ID")
def end(self) -> None:
"""Mark span as ended."""
if self.end_time is None:
self.end_time = time.time()
def duration_ms(self) -> Optional[float]:
"""Get span duration in milliseconds."""
if self.end_time is None:
return None
return (self.end_time - self.start_time) * 1000
def set_attribute(self, key: str, value: Any) -> None:
"""Set a span attribute."""
self.attributes[key] = value
class Metric(BaseModel):
"""Represents a metric measurement."""
name: str = Field(description="Metric name")
value: float = Field(description="Metric value")
unit: str = Field(default="", description="Unit of measurement")
tags: Dict[str, str] = Field(default_factory=dict, description="Metric tags")
timestamp: float = Field(default_factory=time.time, description="Measurement time")
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/observability/models.py",
"license": "MIT License",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
vanna-ai/vanna:src/vanna/core/recovery/base.py | """
Base error recovery strategy interface.
Recovery strategies allow you to customize how the agent handles errors
during tool execution and LLM communication.
"""
from abc import ABC
from typing import TYPE_CHECKING
from .models import RecoveryAction, RecoveryActionType
if TYPE_CHECKING:
from ..tool.models import ToolContext
from ..llm import LlmRequest
class ErrorRecoveryStrategy(ABC):
"""Strategy for handling errors and implementing retry logic.
Subclass this to create custom error recovery strategies that can:
- Retry failed operations with backoff
- Fallback to alternative approaches
- Log errors to external systems
- Gracefully degrade functionality
Example:
class ExponentialBackoffStrategy(ErrorRecoveryStrategy):
async def handle_tool_error(
self, error: Exception, context: ToolContext, attempt: int
) -> RecoveryAction:
if attempt < 3:
delay = (2 ** attempt) * 1000 # Exponential backoff
return RecoveryAction(
action=RecoveryActionType.RETRY,
retry_delay_ms=delay,
message=f"Retrying after {delay}ms"
)
return RecoveryAction(
action=RecoveryActionType.FAIL,
message="Max retries exceeded"
)
agent = AgentRunner(
llm_service=...,
error_recovery_strategy=ExponentialBackoffStrategy()
)
"""
async def handle_tool_error(
self, error: Exception, context: "ToolContext", attempt: int = 1
) -> RecoveryAction:
"""Handle errors during tool execution.
Args:
error: The exception that occurred
context: Tool execution context
attempt: Current attempt number (1-indexed)
Returns:
RecoveryAction indicating how to proceed
"""
# Default: fail immediately
return RecoveryAction(
action=RecoveryActionType.FAIL, message=f"Tool error: {str(error)}"
)
async def handle_llm_error(
self, error: Exception, request: "LlmRequest", attempt: int = 1
) -> RecoveryAction:
"""Handle errors during LLM communication.
Args:
error: The exception that occurred
request: The LLM request that failed
attempt: Current attempt number (1-indexed)
Returns:
RecoveryAction indicating how to proceed
"""
# Default: fail immediately
return RecoveryAction(
action=RecoveryActionType.FAIL, message=f"LLM error: {str(error)}"
)
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/recovery/base.py",
"license": "MIT License",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
vanna-ai/vanna:src/vanna/core/recovery/models.py | """
Recovery action models for error handling.
"""
from enum import Enum
from typing import Any, Optional
from pydantic import BaseModel, Field
class RecoveryActionType(str, Enum):
"""Types of recovery actions."""
RETRY = "retry"
FAIL = "fail"
FALLBACK = "fallback"
SKIP = "skip"
class RecoveryAction(BaseModel):
"""Action to take when recovering from an error."""
action: RecoveryActionType = Field(description="Type of recovery action")
retry_delay_ms: Optional[int] = Field(
default=None, description="Delay before retry in milliseconds"
)
fallback_value: Optional[Any] = Field(
default=None, description="Fallback value to use"
)
message: Optional[str] = Field(
default=None, description="Message to include with action"
)
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/recovery/models.py",
"license": "MIT License",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
vanna-ai/vanna:src/vanna/core/registry.py | """
Tool registry for the Vanna Agents framework.
This module provides the ToolRegistry class for managing and executing tools.
"""
import time
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, TypeVar, Union
from .tool import Tool, ToolCall, ToolContext, ToolRejection, ToolResult, ToolSchema
from .user import User
if TYPE_CHECKING:
from .audit import AuditLogger
from .agent.config import AuditConfig
T = TypeVar("T")
class _LocalToolWrapper(Tool[T]):
"""Wrapper for tools with configurable access groups."""
def __init__(self, wrapped_tool: Tool[T], access_groups: List[str]):
self._wrapped_tool = wrapped_tool
self._access_groups = access_groups
@property
def name(self) -> str:
return self._wrapped_tool.name
@property
def description(self) -> str:
return self._wrapped_tool.description
@property
def access_groups(self) -> List[str]:
return self._access_groups
def get_args_schema(self) -> Type[T]:
return self._wrapped_tool.get_args_schema()
async def execute(self, context: ToolContext, args: T) -> ToolResult:
return await self._wrapped_tool.execute(context, args)
class ToolRegistry:
"""Registry for managing tools."""
def __init__(
self,
audit_logger: Optional["AuditLogger"] = None,
audit_config: Optional["AuditConfig"] = None,
) -> None:
self._tools: Dict[str, Tool[Any]] = {}
self.audit_logger = audit_logger
if audit_config is not None:
self.audit_config = audit_config
else:
from .agent.config import AuditConfig
self.audit_config = AuditConfig()
def register_local_tool(self, tool: Tool[Any], access_groups: List[str]) -> None:
"""Register a local tool with optional access group restrictions.
Args:
tool: The tool to register
access_groups: List of groups that can access this tool.
If None or empty, tool is accessible to all users.
"""
if tool.name in self._tools:
raise ValueError(f"Tool '{tool.name}' already registered")
if access_groups:
# Wrap the tool with access groups
wrapped_tool = _LocalToolWrapper(tool, access_groups)
self._tools[tool.name] = wrapped_tool
else:
# No access restrictions, register as-is
self._tools[tool.name] = tool
async def get_tool(self, name: str) -> Optional[Tool[Any]]:
"""Get a tool by name."""
return self._tools.get(name)
async def list_tools(self) -> List[str]:
"""List all registered tool names."""
return list(self._tools.keys())
async def get_schemas(self, user: Optional[User] = None) -> List[ToolSchema]:
"""Get schemas for all tools accessible to user."""
schemas = []
for tool in self._tools.values():
if user is None or await self._validate_tool_permissions(tool, user):
schemas.append(tool.get_schema())
return schemas
async def _validate_tool_permissions(self, tool: Tool[Any], user: User) -> bool:
"""Validate if user has access to tool based on group membership.
Checks for intersection between user's group memberships and tool's access groups.
If tool has no access groups specified, it's accessible to all users.
"""
tool_access_groups = tool.access_groups
if not tool_access_groups:
return True
user_groups = set(user.group_memberships)
tool_groups = set(tool_access_groups)
# Grant access if any group in user.group_memberships exists in tool.access_groups
return bool(user_groups & tool_groups)
async def transform_args(
self,
tool: Tool[T],
args: T,
user: User,
context: ToolContext,
) -> Union[T, ToolRejection]:
"""Transform and validate tool arguments based on user context.
This method allows per-user transformation of tool arguments, such as:
- Applying row-level security (RLS) to SQL queries
- Filtering available options based on user permissions
- Validating required arguments are present
- Redacting sensitive fields
The default implementation performs no transformation (NoOp).
Subclasses can override this method to implement custom transformation logic.
Args:
tool: The tool being executed
args: Already Pydantic-validated arguments
user: The user executing the tool
context: Full execution context
Returns:
Either:
- Transformed arguments (may be unchanged if no transformation needed)
- ToolRejection with explanation of why args were rejected
"""
return args # Default: no transformation (NoOp)
async def execute(
self,
tool_call: ToolCall,
context: ToolContext,
) -> ToolResult:
"""Execute a tool call with validation."""
tool = await self.get_tool(tool_call.name)
if not tool:
msg = f"Tool '{tool_call.name}' not found"
return ToolResult(
success=False,
result_for_llm=msg,
ui_component=None,
error=msg,
)
# Validate group access
if not await self._validate_tool_permissions(tool, context.user):
msg = f"Insufficient group access for tool '{tool_call.name}'"
# Audit access denial
if (
self.audit_logger
and self.audit_config
and self.audit_config.log_tool_access_checks
):
await self.audit_logger.log_tool_access_check(
user=context.user,
tool_name=tool_call.name,
access_granted=False,
required_groups=tool.access_groups,
context=context,
reason=msg,
)
return ToolResult(
success=False,
result_for_llm=msg,
ui_component=None,
error=msg,
)
# Validate and parse arguments
try:
args_model = tool.get_args_schema()
validated_args = args_model.model_validate(tool_call.arguments)
except Exception as e:
msg = f"Invalid arguments: {str(e)}"
return ToolResult(
success=False,
result_for_llm=msg,
ui_component=None,
error=msg,
)
# Transform/validate arguments based on user context
transform_result = await self.transform_args(
tool=tool,
args=validated_args,
user=context.user,
context=context,
)
if isinstance(transform_result, ToolRejection):
return ToolResult(
success=False,
result_for_llm=transform_result.reason,
ui_component=None,
error=transform_result.reason,
)
# Use transformed arguments for execution
final_args = transform_result
# Audit successful access check
if (
self.audit_logger
and self.audit_config
and self.audit_config.log_tool_access_checks
):
await self.audit_logger.log_tool_access_check(
user=context.user,
tool_name=tool_call.name,
access_granted=True,
required_groups=tool.access_groups,
context=context,
)
# Audit tool invocation
if (
self.audit_logger
and self.audit_config
and self.audit_config.log_tool_invocations
):
# Get UI features if available from context
ui_features = context.metadata.get("ui_features_available", [])
await self.audit_logger.log_tool_invocation(
user=context.user,
tool_call=tool_call,
ui_features=ui_features,
context=context,
sanitize_parameters=self.audit_config.sanitize_tool_parameters,
)
# Execute tool with context-first signature
try:
start_time = time.perf_counter()
result = await tool.execute(context, final_args)
execution_time_ms = (time.perf_counter() - start_time) * 1000
# Add execution time to metadata
result.metadata["execution_time_ms"] = execution_time_ms
# Audit tool result
if (
self.audit_logger
and self.audit_config
and self.audit_config.log_tool_results
):
await self.audit_logger.log_tool_result(
user=context.user,
tool_call=tool_call,
result=result,
context=context,
)
return result
except Exception as e:
msg = f"Execution failed: {str(e)}"
return ToolResult(
success=False,
result_for_llm=msg,
ui_component=None,
error=msg,
)
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/registry.py",
"license": "MIT License",
"lines": 233,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
vanna-ai/vanna:src/vanna/core/rich_component.py | """
Base classes for rich UI components.
This module provides the base RichComponent class and supporting enums
for the component system.
"""
import uuid
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, TypeVar
from pydantic import BaseModel, Field
# Type variable for self-returning methods
T = TypeVar("T", bound="RichComponent")
class ComponentType(str, Enum):
"""Types of rich UI components."""
# Basic components
TEXT = "text"
CARD = "card"
CONTAINER = "container"
# Primitive UI components (domain-agnostic)
STATUS_CARD = "status_card"
PROGRESS_DISPLAY = "progress_display"
LOG_VIEWER = "log_viewer"
BADGE = "badge"
ICON_TEXT = "icon_text"
# Interactive components
TASK_LIST = "task_list"
PROGRESS_BAR = "progress_bar"
BUTTON = "button"
BUTTON_GROUP = "button_group"
# Data components
TABLE = "table"
DATAFRAME = "dataframe"
CHART = "chart"
CODE_BLOCK = "code_block"
# Status components
STATUS_INDICATOR = "status_indicator"
NOTIFICATION = "notification"
ALERT = "alert"
# Artifact components
ARTIFACT = "artifact"
# UI state components
STATUS_BAR_UPDATE = "status_bar_update"
TASK_TRACKER_UPDATE = "task_tracker_update"
CHAT_INPUT_UPDATE = "chat_input_update"
# Legacy (deprecated - use primitives instead)
TOOL_EXECUTION = "tool_execution"
class ComponentLifecycle(str, Enum):
"""Component lifecycle operations."""
CREATE = "create"
UPDATE = "update"
REPLACE = "replace"
REMOVE = "remove"
class RichComponent(BaseModel):
"""Base class for all rich UI components."""
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
type: ComponentType
lifecycle: ComponentLifecycle = ComponentLifecycle.CREATE
data: Dict[str, Any] = Field(default_factory=dict)
children: List[str] = Field(default_factory=list) # Child component IDs
timestamp: str = Field(default_factory=lambda: datetime.utcnow().isoformat())
visible: bool = True
interactive: bool = False
def update(self: T, **kwargs: Any) -> T:
"""Create an updated copy of this component."""
updated_data = self.model_dump()
updated_data.update(kwargs)
updated_data["lifecycle"] = ComponentLifecycle.UPDATE
updated_data["timestamp"] = datetime.utcnow().isoformat()
return self.__class__(**updated_data)
def hide(self: T) -> T:
"""Create a hidden copy of this component."""
return self.update(visible=False)
def show(self: T) -> T:
"""Create a visible copy of this component."""
return self.update(visible=True)
def serialize_for_frontend(self) -> Dict[str, Any]:
"""Normalize component payload for the frontend renderer.
The frontend expects component-specific fields to live under the
``data`` key while the shared metadata (``id``, ``type``, layout hints,
etc.) remains at the top level. Pydantic's ``model_dump`` keeps
component attributes at the top level, so we remap them here before
streaming them across the wire.
"""
# Base fields that should remain at the top level of the payload.
shared_fields = {
"id",
"type",
"lifecycle",
"children",
"timestamp",
"visible",
"interactive",
}
raw = self.model_dump()
payload: Dict[str, Any] = {}
# Preserve any existing data payload so implementations can opt-in to
# advanced usage without losing information.
raw_data = raw.get("data")
if raw_data is not None and isinstance(raw_data, dict):
component_data: Dict[str, Any] = raw_data.copy()
else:
# Handle case where data might be a sequence or other type, or None
component_data = {}
for key, value in raw.items():
if key in shared_fields:
payload[key] = value
elif key == "data":
# For most components, skip the base data field
continue
elif (
key == "rows"
and hasattr(self, "type")
and self.type.value == "dataframe"
):
# For DataFrame components, the 'rows' field contains the actual row data
# which should be included in the component_data as 'data' for the frontend
component_data["data"] = value
else:
component_data[key] = value
payload["data"] = component_data
# Ensure enums are serialized as primitive values for the frontend.
payload["type"] = self.type.value
payload["lifecycle"] = self.lifecycle.value
return payload
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/rich_component.py",
"license": "MIT License",
"lines": 124,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
vanna-ai/vanna:src/vanna/core/simple_component.py | """Base classes for simple UI components."""
from typing import Any, Dict, Optional
from pydantic import BaseModel, Field
from enum import Enum
class SimpleComponentType(str, Enum):
TEXT = "text"
IMAGE = "image"
LINK = "link"
class SimpleComponent(BaseModel):
"""A simple UI component with basic attributes."""
type: SimpleComponentType = Field(..., description="Type of the component.")
semantic_type: Optional[str] = Field(
default=None, description="Semantic type for better categorization."
)
metadata: Optional[Dict[str, Any]] = Field(
default=None, description="Additional metadata for the component."
)
def serialize_for_frontend(self) -> Dict[str, Any]:
"""Serialize simple component for API consumption."""
return self.model_dump()
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/simple_component.py",
"license": "MIT License",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
vanna-ai/vanna:src/vanna/core/storage/base.py | """
Storage domain interface.
This module contains the abstract base class for conversation storage.
"""
from abc import ABC, abstractmethod
from typing import List, Optional
from .models import Conversation
from ..user.models import User
class ConversationStore(ABC):
"""Abstract base class for conversation storage."""
@abstractmethod
async def create_conversation(
self, conversation_id: str, user: User, initial_message: str
) -> Conversation:
"""Create a new conversation with the specified ID."""
pass
@abstractmethod
async def get_conversation(
self, conversation_id: str, user: User
) -> Optional[Conversation]:
"""Get conversation by ID, scoped to user."""
pass
@abstractmethod
async def update_conversation(self, conversation: Conversation) -> None:
"""Update conversation with new messages."""
pass
@abstractmethod
async def delete_conversation(self, conversation_id: str, user: User) -> bool:
"""Delete conversation."""
pass
@abstractmethod
async def list_conversations(
self, user: User, limit: int = 50, offset: int = 0
) -> List[Conversation]:
"""List conversations for user."""
pass
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/storage/base.py",
"license": "MIT License",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
vanna-ai/vanna:src/vanna/core/storage/models.py | """
Storage domain models.
This module contains data models for conversation storage.
"""
from datetime import datetime
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field
from ..tool.models import ToolCall
from ..user.models import User
class Message(BaseModel):
"""Single message in a conversation."""
role: str = Field(description="Message role (user/assistant/system/tool)")
content: str = Field(description="Message content")
timestamp: datetime = Field(default_factory=datetime.utcnow)
metadata: Dict[str, Any] = Field(default_factory=dict)
tool_calls: Optional[List[ToolCall]] = Field(default=None)
tool_call_id: Optional[str] = Field(
default=None, description="ID if this is a tool response"
)
class Conversation(BaseModel):
"""Conversation containing multiple messages."""
id: str = Field(description="Unique conversation identifier")
user: User = Field(description="User this conversation belongs to")
messages: List[Message] = Field(
default_factory=list, description="Messages in conversation"
)
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
metadata: Dict[str, Any] = Field(
default_factory=dict, description="Additional conversation metadata"
)
def add_message(self, message: Message) -> None:
"""Add a message to the conversation."""
self.messages.append(message)
self.updated_at = datetime.utcnow()
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/storage/models.py",
"license": "MIT License",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
vanna-ai/vanna:src/vanna/core/system_prompt/base.py | """
System prompt builder interface.
This module contains the abstract base class for system prompt builders.
"""
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, List, Optional
if TYPE_CHECKING:
from ..tool.models import ToolSchema
from ..user.models import User
class SystemPromptBuilder(ABC):
"""Abstract base class for system prompt builders.
Subclasses should implement the build_system_prompt method to generate
system prompts based on user context and available tools.
"""
@abstractmethod
async def build_system_prompt(
self, user: "User", tools: List["ToolSchema"]
) -> Optional[str]:
"""
Build a system prompt based on user context and available tools.
Args:
user: The user making the request
tools: List of tools available to the user
Returns:
System prompt string, or None if no system prompt should be used
"""
pass
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/system_prompt/base.py",
"license": "MIT License",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
vanna-ai/vanna:src/vanna/core/system_prompt/default.py | """
Default system prompt builder implementation with memory workflow support.
This module provides a default implementation of the SystemPromptBuilder interface
that automatically includes memory workflow instructions when memory tools are available.
"""
from typing import TYPE_CHECKING, List, Optional
from datetime import datetime
from .base import SystemPromptBuilder
if TYPE_CHECKING:
from ..tool.models import ToolSchema
from ..user.models import User
class DefaultSystemPromptBuilder(SystemPromptBuilder):
"""Default system prompt builder with automatic memory workflow integration.
Dynamically generates system prompts that include memory workflow
instructions when memory tools (search_saved_correct_tool_uses and
save_question_tool_args) are available.
"""
def __init__(self, base_prompt: Optional[str] = None):
"""Initialize with an optional base prompt.
Args:
base_prompt: Optional base system prompt. If not provided, uses a default.
"""
self.base_prompt = base_prompt
async def build_system_prompt(
self, user: "User", tools: List["ToolSchema"]
) -> Optional[str]:
"""
Build a system prompt with memory workflow instructions.
Args:
user: The user making the request
tools: List of tools available to the user
Returns:
System prompt string with memory workflow instructions if applicable
"""
if self.base_prompt is not None:
return self.base_prompt
# Check which memory tools are available
tool_names = [tool.name for tool in tools]
has_search = "search_saved_correct_tool_uses" in tool_names
has_save = "save_question_tool_args" in tool_names
has_text_memory = "save_text_memory" in tool_names
# Get today's date
today_date = datetime.now().strftime("%Y-%m-%d")
# Base system prompt
prompt_parts = [
f"You are Vanna, an AI data analyst assistant created to help users with data analysis tasks. Today's date is {today_date}.",
"",
"Response Guidelines:",
"- Any summary of what you did or observations should be the final step.",
"- Use the available tools to help the user accomplish their goals.",
"- When you execute a query, that raw result is shown to the user outside of your response so YOU DO NOT need to include it in your response. Focus on summarizing and interpreting the results.",
]
if tools:
prompt_parts.append(
f"\nYou have access to the following tools: {', '.join(tool_names)}"
)
# Add memory workflow instructions based on available tools
if has_search or has_save or has_text_memory:
prompt_parts.append("\n" + "=" * 60)
prompt_parts.append("MEMORY SYSTEM:")
prompt_parts.append("=" * 60)
if has_search or has_save:
prompt_parts.append("\n1. TOOL USAGE MEMORY (Structured Workflow):")
prompt_parts.append("-" * 50)
if has_search:
prompt_parts.extend(
[
"",
"β’ BEFORE executing any tool (run_sql, visualize_data, or calculator), you MUST first call search_saved_correct_tool_uses with the user's question to check if there are existing successful patterns for similar questions.",
"",
"β’ Review the search results (if any) to inform your approach before proceeding with other tool calls.",
]
)
if has_save:
prompt_parts.extend(
[
"",
"β’ AFTER successfully executing a tool that produces correct and useful results, you MUST call save_question_tool_args to save the successful pattern for future use.",
]
)
if has_search or has_save:
prompt_parts.extend(
[
"",
"Example workflow:",
" β’ User asks a question",
f' β’ First: Call search_saved_correct_tool_uses(question="user\'s question")'
if has_search
else "",
" β’ Then: Execute the appropriate tool(s) based on search results and the question",
f' β’ Finally: If successful, call save_question_tool_args(question="user\'s question", tool_name="tool_used", args={{the args you used}})'
if has_save
else "",
"",
"Do NOT skip the search step, even if you think you know how to answer. Do NOT forget to save successful executions."
if has_search
else "",
"",
"The only exceptions to searching first are:",
' β’ When the user is explicitly asking about the tools themselves (like "list the tools")',
" β’ When the user is testing or asking you to demonstrate the save/search functionality itself",
]
)
if has_text_memory:
prompt_parts.extend(
[
"",
"2. TEXT MEMORY (Domain Knowledge & Context):",
"-" * 50,
"",
"β’ save_text_memory: Save important context about the database, schema, or domain",
"",
"Use text memory to save:",
" β’ Database schema details (column meanings, data types, relationships)",
" β’ Company-specific terminology and definitions",
" β’ Query patterns or best practices for this database",
" β’ Domain knowledge about the business or data",
" β’ User preferences for queries or visualizations",
"",
"DO NOT save:",
" β’ Information already captured in tool usage memory",
" β’ One-time query results or temporary observations",
"",
"Examples:",
' β’ save_text_memory(content="The status column uses 1 for active, 0 for inactive")',
' β’ save_text_memory(content="MRR means Monthly Recurring Revenue in our schema")',
" β’ save_text_memory(content=\"Always exclude test accounts where email contains 'test'\")",
]
)
if has_search or has_save or has_text_memory:
# Remove empty strings from the list
prompt_parts = [part for part in prompt_parts if part != ""]
return "\n".join(prompt_parts)
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/system_prompt/default.py",
"license": "MIT License",
"lines": 133,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
vanna-ai/vanna:src/vanna/core/tool/base.py | """
Tool domain interface.
This module contains the abstract base class for tools.
"""
from abc import ABC, abstractmethod
from typing import Generic, List, Type, TypeVar
from .models import ToolContext, ToolResult, ToolSchema
# Type variable for tool argument types
T = TypeVar("T")
class Tool(ABC, Generic[T]):
"""Abstract base class for tools."""
@property
@abstractmethod
def name(self) -> str:
"""Unique name for this tool."""
pass
@property
@abstractmethod
def description(self) -> str:
"""Description of what this tool does."""
pass
@property
def access_groups(self) -> List[str]:
"""Groups permitted to access this tool."""
return []
@abstractmethod
def get_args_schema(self) -> Type[T]:
"""Return the Pydantic model for arguments."""
pass
@abstractmethod
async def execute(self, context: ToolContext, args: T) -> ToolResult:
"""Execute the tool with validated arguments.
Args:
context: Execution context containing user, conversation_id, and request_id
args: Validated tool arguments
Returns:
ToolResult with success status, result for LLM, and optional UI component
"""
pass
def get_schema(self) -> ToolSchema:
"""Generate tool schema for LLM."""
from typing import Any, cast
args_model = self.get_args_schema()
# Get the schema - args_model should be a Pydantic model class
schema = (
cast(Any, args_model).model_json_schema()
if hasattr(args_model, "model_json_schema")
else {}
)
return ToolSchema(
name=self.name,
description=self.description,
parameters=schema,
access_groups=self.access_groups,
)
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/tool/base.py",
"license": "MIT License",
"lines": 55,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
vanna-ai/vanna:src/vanna/core/tool/models.py | """
Tool domain models.
This module contains data models for tool execution.
"""
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from pydantic import BaseModel, Field
# Import AgentMemory at runtime for Pydantic model resolution
from vanna.capabilities.agent_memory import AgentMemory
if TYPE_CHECKING:
from ..components import UiComponent
from ..user.models import User
from ..observability import ObservabilityProvider
class ToolCall(BaseModel):
"""Represents a tool call from the LLM."""
id: str = Field(description="Unique identifier for this tool call")
name: str = Field(description="Name of the tool to execute")
arguments: Dict[str, Any] = Field(description="Raw arguments from LLM")
class ToolContext(BaseModel):
"""Context passed to all tool executions."""
user: "User" # Forward reference to avoid circular import
conversation_id: str
request_id: str = Field(description="Unique request identifier for tracing")
agent_memory: AgentMemory = Field(
description="Agent memory for tool usage learning"
)
metadata: Dict[str, Any] = Field(default_factory=dict)
observability_provider: Optional["ObservabilityProvider"] = Field(
default=None,
description="Optional observability provider for metrics and spans",
)
class Config:
arbitrary_types_allowed = True
class ToolResult(BaseModel):
"""Result from tool execution.
Changes:
- `result_for_llm`: string that will be sent back to the LLM.
- `ui_component`: optional UI payload for rendering in clients.
"""
success: bool = Field(description="Whether execution succeeded")
result_for_llm: str = Field(description="String content to send back to the LLM")
ui_component: Optional["UiComponent"] = Field(
default=None, description="Optional UI component for rendering"
)
error: Optional[str] = Field(default=None, description="Error message if failed")
metadata: Dict[str, Any] = Field(default_factory=dict)
class ToolSchema(BaseModel):
"""Schema describing a tool for LLM consumption."""
name: str = Field(description="Tool name")
description: str = Field(description="What this tool does")
parameters: Dict[str, Any] = Field(description="JSON Schema of parameters")
access_groups: List[str] = Field(
default_factory=list, description="Groups permitted to access this tool"
)
class ToolRejection(BaseModel):
"""Indicates tool execution should be rejected with a message.
Used by transform_args to reject tool execution when arguments
cannot be appropriately transformed for the user's context.
"""
reason: str = Field(
description="Explanation of why the tool execution was rejected"
)
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/tool/models.py",
"license": "MIT License",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
vanna-ai/vanna:src/vanna/core/user/base.py | """
User domain interface.
This module contains the abstract base class for user services.
"""
from abc import ABC, abstractmethod
from typing import Any, Dict, Optional
from .models import User
class UserService(ABC):
"""Service for user management and authentication."""
@abstractmethod
async def get_user(self, user_id: str) -> Optional[User]:
"""Get user by ID."""
pass
@abstractmethod
async def authenticate(self, credentials: Dict[str, Any]) -> Optional[User]:
"""Authenticate user and return User object if successful."""
pass
@abstractmethod
async def has_permission(self, user: User, permission: str) -> bool:
"""Check if user has specific permission."""
pass
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/user/base.py",
"license": "MIT License",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
vanna-ai/vanna:src/vanna/core/user/models.py | """
User domain models.
This module contains data models for user management.
"""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, ConfigDict, Field
class User(BaseModel):
"""User model for authentication and scoping."""
id: str = Field(description="Unique user identifier")
username: Optional[str] = Field(default=None, description="Username")
email: Optional[str] = Field(default=None, description="User email")
metadata: Dict[str, Any] = Field(
default_factory=dict, description="Additional user metadata"
)
group_memberships: List[str] = Field(
default_factory=list, description="Groups the user belongs to"
)
model_config = ConfigDict(extra="allow")
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/user/models.py",
"license": "MIT License",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
vanna-ai/vanna:src/vanna/core/user/request_context.py | """
Request context for user resolution.
This module provides the RequestContext model for passing web request
information to UserResolver implementations.
"""
from typing import Any, Dict, Optional
from pydantic import BaseModel, Field
class RequestContext(BaseModel):
"""Context from a web request for user resolution.
This structured object replaces raw dictionaries for passing request
data to UserResolver implementations, making it easier to access
cookies, headers, and other request metadata.
Example:
context = RequestContext(
cookies={'vanna_email': 'alice@example.com'},
headers={'Authorization': 'Bearer token'},
remote_addr='127.0.0.1'
)
user = await resolver.resolve_user(context)
"""
cookies: Dict[str, str] = Field(default_factory=dict, description="Request cookies")
headers: Dict[str, str] = Field(default_factory=dict, description="Request headers")
remote_addr: Optional[str] = Field(default=None, description="Remote IP address")
query_params: Dict[str, str] = Field(
default_factory=dict, description="Query parameters"
)
metadata: Dict[str, Any] = Field(
default_factory=dict, description="Additional framework-specific metadata"
)
def get_cookie(self, name: str, default: Optional[str] = None) -> Optional[str]:
"""Get cookie value by name.
Args:
name: Cookie name
default: Default value if cookie not found
Returns:
Cookie value or default
"""
return self.cookies.get(name, default)
def get_header(self, name: str, default: Optional[str] = None) -> Optional[str]:
"""Get header value by name (case-insensitive).
Args:
name: Header name
default: Default value if header not found
Returns:
Header value or default
"""
# Case-insensitive header lookup
name_lower = name.lower()
for key, value in self.headers.items():
if key.lower() == name_lower:
return value
return default
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/user/request_context.py",
"license": "MIT License",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
vanna-ai/vanna:src/vanna/core/user/resolver.py | """
User resolver interface for web request authentication.
This module provides the abstract base class for resolving web requests
to authenticated User objects.
"""
from abc import ABC, abstractmethod
from .models import User
from .request_context import RequestContext
class UserResolver(ABC):
"""Resolves web requests to authenticated users.
Implementations of this interface handle the specifics of extracting
user identity from request context (cookies, headers, tokens, etc.)
and creating authenticated User objects.
Example:
class JwtUserResolver(UserResolver):
async def resolve_user(self, request_context: RequestContext) -> User:
token = request_context.get_header('Authorization')
# ... validate JWT and extract user info
return User(id=user_id, username=username, email=email)
"""
@abstractmethod
async def resolve_user(self, request_context: RequestContext) -> User:
"""Resolve user from request context.
Args:
request_context: Structured request context with cookies, headers, etc.
Returns:
Authenticated User object
Raises:
Can raise exceptions for authentication failures
"""
pass
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/user/resolver.py",
"license": "MIT License",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
vanna-ai/vanna:src/vanna/core/validation.py | """
Development utilities for validating Pydantic models.
This module provides utilities that can be used during development
and testing to catch forward reference issues early.
"""
from typing import Any, Dict, List, Tuple, Type
from pydantic import BaseModel
import importlib
import inspect
def validate_pydantic_models_in_package(package_name: str) -> Dict[str, Any]:
"""
Validate all Pydantic models in a package for completeness.
This function can be used in tests or development scripts to catch
forward reference issues before they cause runtime errors.
Args:
package_name: Name of the package to validate (e.g., 'vanna.core')
Returns:
Dictionary with validation results
"""
results: Dict[str, Any] = {
"total_models": 0,
"incomplete_models": [],
"models": {},
"summary": "",
}
try:
# Import the package
package = importlib.import_module(package_name)
# Get all submodules
submodules = []
if hasattr(package, "__path__"):
import pkgutil
for _, name, _ in pkgutil.iter_modules(
package.__path__, package_name + "."
):
try:
submodule = importlib.import_module(name)
submodules.append((name, submodule))
except ImportError:
continue
else:
submodules = [(package_name, package)]
# Check all Pydantic models in each submodule
for module_name, module in submodules:
for name, obj in inspect.getmembers(module):
if (
inspect.isclass(obj)
and issubclass(obj, BaseModel)
and obj is not BaseModel
):
model_key = f"{module_name}.{name}"
results["total_models"] += 1
# Check for forward references
forward_refs: List[Tuple[str, str]] = []
for field_name, field_info in obj.model_fields.items():
annotation = field_info.annotation
if annotation is not None and hasattr(
annotation, "__forward_arg__"
):
forward_refs.append(
(field_name, annotation.__forward_arg__)
)
# Check completeness
try:
obj.model_json_schema()
is_complete = True
error = None
except Exception as e:
is_complete = False
error = str(e)
results["incomplete_models"].append(model_key)
results["models"][model_key] = {
"class": obj,
"forward_references": forward_refs,
"is_complete": is_complete,
"error": error,
}
# Generate summary
incomplete_models = results["incomplete_models"]
incomplete_count = len(incomplete_models)
total_models = results["total_models"]
if incomplete_count == 0:
results["summary"] = (
f"β All {total_models} Pydantic models are complete and valid!"
)
else:
results["summary"] = (
f"β {incomplete_count} of {total_models} models are incomplete: "
f"{', '.join(incomplete_models)}"
)
except Exception as e:
results["summary"] = f"Error validating package {package_name}: {e}"
return results
def check_models_health() -> bool:
"""
Quick health check for all core Pydantic models.
Returns:
True if all models are healthy, False otherwise
"""
core_packages = [
"vanna.core.tool.models",
"vanna.core.user.models",
"vanna.core.llm.models",
"vanna.core.storage.models",
"vanna.core.agent.models",
]
all_healthy = True
for package in core_packages:
try:
results = validate_pydantic_models_in_package(package)
if results["incomplete_models"]:
print(f"β Issues in {package}: {results['incomplete_models']}")
all_healthy = False
else:
print(f"β
{package}: {results['total_models']} models OK")
except Exception as e:
print(f"β Error checking {package}: {e}")
all_healthy = False
return all_healthy
if __name__ == "__main__":
print("Checking Pydantic model health across core packages...")
print("=" * 60)
healthy = check_models_health()
print("=" * 60)
if healthy:
print("π All Pydantic models are healthy!")
else:
print("β οΈ Some models need attention.")
print("\nTo fix forward reference issues:")
print("1. Ensure all referenced classes are imported")
print("2. Call model_rebuild() after imports")
print("3. Use proper TYPE_CHECKING imports for circular deps")
print("\nNote: You can also catch these issues at development time using:")
print(" - mypy static type checking")
print(" - This validation script in your test suite")
print(" - Pre-commit hooks")
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/validation.py",
"license": "MIT License",
"lines": 136,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
vanna-ai/vanna:src/vanna/core/workflow/base.py | """
Base workflow handler interface.
Workflow triggers allow you to execute deterministic workflows in response to
user messages before they are sent to the LLM. This is useful for:
- Command handling (e.g., /help, /reset)
- Pattern-based routing (e.g., report generation)
- State-based workflows (e.g., onboarding flows)
- Quota enforcement with custom responses
"""
from abc import ABC, abstractmethod
from typing import (
TYPE_CHECKING,
Optional,
Union,
List,
AsyncGenerator,
Callable,
Awaitable,
)
from dataclasses import dataclass
if TYPE_CHECKING:
from ..user.models import User
from ..storage import Conversation
from ...components import UiComponent
from ..agent.agent import Agent
@dataclass
class WorkflowResult:
"""Result from a workflow handler attempt.
When a workflow handles a message, it can optionally return UI components to stream
to the user and/or mutate the conversation state.
Attributes:
should_skip_llm: If True, the workflow handled the message and LLM processing is skipped.
If False, the message continues to the agent/LLM.
components: Optional UI components to stream back to the user.
Can be a list or async generator for streaming responses.
conversation_mutation: Optional async callback to modify conversation state
(e.g., clearing messages, adding system events).
Example:
# Simple command response
WorkflowResult(
should_skip_llm=True,
components=[RichTextComponent(content="Help text here")]
)
# With conversation mutation
async def clear_history(conv):
conv.messages.clear()
WorkflowResult(
should_skip_llm=True,
components=[StatusCardComponent(...)],
conversation_mutation=clear_history
)
# Not handled, continue to agent
WorkflowResult(should_skip_llm=False)
"""
should_skip_llm: bool
components: Optional[
Union[List["UiComponent"], AsyncGenerator["UiComponent", None]]
] = None
conversation_mutation: Optional[Callable[["Conversation"], Awaitable[None]]] = None
class WorkflowHandler(ABC):
"""Base class for handling deterministic workflows before LLM processing.
Implement this interface to intercept user messages and execute deterministic
workflows instead of sending to the LLM. This is the first extensibility point
in the agent's message processing pipeline, running after user resolution and
conversation loading but before the message is added to conversation history
or sent to the LLM.
Use cases:
- Slash commands (/help, /reset, /report)
- Pattern-based routing (regex matching)
- State-based workflows (onboarding, surveys)
- Custom quota enforcement with helpful messages
- Deterministic report generation
- Starter UI (buttons, welcome messages) when conversation begins
Example:
class CommandWorkflow(WorkflowHandler):
async def try_handle(self, agent, user, conversation, message):
if message.startswith("/help"):
return WorkflowResult(
should_skip_llm=True,
components=[
RichTextComponent(
content="Available commands:\\n- /help\\n- /reset",
markdown=True
)
]
)
# Execute tool for reports
if message.startswith("/report"):
tool = await agent.tool_registry.get_tool("generate_report")
result = await tool.execute(ToolContext(user=user), {})
return WorkflowResult(should_skip_llm=True, components=[result.ui_component])
# Not handled, continue to agent
return WorkflowResult(should_skip_llm=False)
async def get_starter_ui(self, agent, user, conversation):
return [
RichTextComponent(content=f"Welcome {user.username}!"),
ButtonComponent(label="Generate Report", value="/report"),
]
agent = Agent(
llm_service=...,
tool_registry=...,
user_resolver=...,
workflow_handler=CommandWorkflow()
)
Observability:
The agent automatically creates an "agent.workflow_handler" span when
a WorkflowHandler is configured, allowing you to monitor handler
performance and outcomes.
"""
@abstractmethod
async def try_handle(
self, agent: "Agent", user: "User", conversation: "Conversation", message: str
) -> WorkflowResult:
"""Attempt to handle a workflow for the given message.
This method is called for every user message before it reaches the LLM.
Inspect the message content, user context, and conversation state to
decide whether to execute a deterministic workflow or allow normal
agent processing.
Args:
agent: The agent instance, providing access to tool_registry, config,
and observability_provider for tool execution and logging.
user: The user who sent the message, including their ID, permissions,
and metadata. Use this for permission checks or personalization.
conversation: The current conversation context, including message history.
Can be inspected for state-based workflows.
message: The user's raw message content.
Returns:
WorkflowResult with should_skip_llm=True to execute a workflow and skip LLM,
or should_skip_llm=False to continue normal agent processing.
When should_skip_llm=True:
- The message is NOT added to conversation history automatically
- The components are streamed to the user
- The conversation_mutation callback (if provided) is executed
- The agent returns without calling the LLM
When should_skip_llm=False:
- The message is added to conversation history
- Normal agent processing continues (LLM call, tool execution, etc.)
Example:
async def try_handle(self, agent, user, conversation, message):
# Pattern matching with tool execution
if message.startswith("/report"):
# Execute tool from registry
tool = await agent.tool_registry.get_tool("generate_sales_report")
context = ToolContext(user=user, conversation=conversation)
result = await tool.execute(context, {})
return WorkflowResult(
should_skip_llm=True,
components=[...]
)
# State-based workflow
if user.metadata.get("needs_onboarding"):
return await self._onboarding_flow(agent, user, message)
# Permission check
if message.startswith("/admin") and "admin" not in user.permissions:
return WorkflowResult(
should_skip_llm=True,
components=[RichTextComponent(content="Access denied.")]
)
# Continue to agent
return WorkflowResult(should_skip_llm=False)
"""
pass
async def get_starter_ui(
self, agent: "Agent", user: "User", conversation: "Conversation"
) -> Optional[List["UiComponent"]]:
"""Provide UI components when a conversation starts.
Override this method to show starter buttons, welcome messages,
or quick actions when a new chat is opened by the user.
This is called by the frontend/server when initializing a new
conversation, before any user messages are sent.
Args:
agent: The agent instance, providing access to tool_registry, config,
and observability_provider for dynamic UI generation.
user: The user starting the conversation
conversation: The new conversation (typically empty)
Returns:
List of UI components to display, or None for no starter UI.
Components can include buttons, welcome text, quick actions, etc.
Example:
async def get_starter_ui(self, agent, user, conversation):
# Show role-based quick actions
if "analyst" in user.permissions:
# Dynamically generate buttons based on available tools
report_tools = [
tool for tool in agent.tool_registry.list_tools()
if tool.startswith("report_")
]
buttons = [
ButtonComponent(label=f"π {tool}", value=f"/{tool}")
for tool in report_tools
]
return [
RichTextComponent(
content=f"Welcome back, {user.username}!",
markdown=True
),
*buttons
]
# New user onboarding
if user.metadata.get("is_new_user"):
return [
RichTextComponent(
content="# Welcome to Vanna!\\n\\nTry one of these to get started:",
markdown=True
),
ButtonComponent(label="Show Example Query", value="/example"),
ButtonComponent(label="View Tutorial", value="/tutorial"),
]
return None
"""
return None
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/workflow/base.py",
"license": "MIT License",
"lines": 211,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
vanna-ai/vanna:src/vanna/core/workflow/default.py | """
Default workflow handler implementation with setup health checking.
This module provides a default implementation of the WorkflowHandler interface
that provides a smart starter UI based on available tools and setup status.
"""
from typing import TYPE_CHECKING, List, Optional, Dict, Any
import traceback
import uuid
from .base import WorkflowHandler, WorkflowResult
if TYPE_CHECKING:
from ..agent.agent import Agent
from ..user.models import User
from ..storage import Conversation
# Import components at module level to avoid circular imports
from vanna.components import (
UiComponent,
RichTextComponent,
StatusCardComponent,
ButtonComponent,
ButtonGroupComponent,
SimpleTextComponent,
CardComponent,
)
# Note: StatusCardComponent and ButtonGroupComponent are kept for /status command compatibility
class DefaultWorkflowHandler(WorkflowHandler):
"""Default workflow handler that provides setup health checking and starter UI.
This handler provides a starter UI that:
- Checks if run_sql tool is available (critical)
- Checks if memory tools are available (warning if missing)
- Checks if visualization tools are available
- Provides appropriate setup guidance based on what's missing
"""
def __init__(self, welcome_message: Optional[str] = None):
"""Initialize with optional custom welcome message.
Args:
welcome_message: Optional custom welcome message. If not provided,
generates one based on available tools.
"""
self.welcome_message = welcome_message
async def try_handle(
self, agent: "Agent", user: "User", conversation: "Conversation", message: str
) -> WorkflowResult:
"""Handle basic commands, but mostly passes through to LLM."""
# Handle basic help command
if message.strip().lower() in ["/help", "help", "/h"]:
# Check if user is admin
is_admin = "admin" in user.group_memberships
help_content = (
"## π€ Vanna AI Assistant\n\n"
"I'm your AI data analyst! Here's what I can help you with:\n\n"
"**π¬ Natural Language Queries**\n"
'- "Show me sales data for last quarter"\n'
'- "Which customers have the highest orders?"\n'
'- "Create a chart of revenue by month"\n\n'
"**π§ Commands**\n"
"- `/help` - Show this help message\n"
)
if is_admin:
help_content += (
"\n**π Admin Commands**\n"
"- `/status` - Check setup status\n"
"- `/memories` - View and manage recent memories\n"
"- `/delete [id]` - Delete a memory by ID\n"
)
help_content += "\n\nJust ask me anything about your data in plain English!"
return WorkflowResult(
should_skip_llm=True,
components=[
UiComponent(
rich_component=RichTextComponent(
content=help_content,
markdown=True,
),
simple_component=None,
)
],
)
# Handle status check command (admin-only)
if message.strip().lower() in ["/status", "status"]:
# Check if user is admin
if "admin" not in user.group_memberships:
return WorkflowResult(
should_skip_llm=True,
components=[
UiComponent(
rich_component=RichTextComponent(
content="# π Access Denied\n\n"
"The `/status` command is only available to administrators.\n\n"
"If you need access to system status information, please contact your system administrator.",
markdown=True,
),
simple_component=None,
)
],
)
return await self._generate_status_check(agent, user)
# Handle get recent memories command (admin-only)
if message.strip().lower() in [
"/memories",
"memories",
"/recent_memories",
"recent_memories",
]:
# Check if user is admin
if "admin" not in user.group_memberships:
return WorkflowResult(
should_skip_llm=True,
components=[
UiComponent(
rich_component=RichTextComponent(
content="# π Access Denied\n\n"
"The `/memories` command is only available to administrators.\n\n"
"If you need access to memory management features, please contact your system administrator.",
markdown=True,
),
simple_component=None,
)
],
)
return await self._get_recent_memories(agent, user, conversation)
# Handle delete memory command (admin-only)
if message.strip().lower().startswith("/delete "):
# Check if user is admin
if "admin" not in user.group_memberships:
return WorkflowResult(
should_skip_llm=True,
components=[
UiComponent(
rich_component=RichTextComponent(
content="# π Access Denied\n\n"
"The `/delete` command is only available to administrators.\n\n"
"If you need access to memory management features, please contact your system administrator.",
markdown=True,
),
simple_component=None,
)
],
)
memory_id = message.strip()[8:].strip() # Extract ID after "/delete "
return await self._delete_memory(agent, user, conversation, memory_id)
# Don't handle other messages, pass to LLM
return WorkflowResult(should_skip_llm=False)
async def get_starter_ui(
self, agent: "Agent", user: "User", conversation: "Conversation"
) -> Optional[List[UiComponent]]:
"""Generate starter UI based on available tools and setup status."""
# Get available tools
tools = await agent.tool_registry.get_schemas(user)
tool_names = [tool.name for tool in tools]
# Analyze setup
setup_analysis = self._analyze_setup(tool_names)
# Check if user is admin (has 'admin' in group memberships)
is_admin = "admin" in user.group_memberships
# Generate single concise card
if self.welcome_message:
# Use custom welcome message
return [
UiComponent(
rich_component=RichTextComponent(
content=self.welcome_message, markdown=True
),
simple_component=None,
)
]
else:
# Generate role-aware welcome card
return [self._generate_starter_card(setup_analysis, is_admin)]
def _generate_starter_card(
self, analysis: Dict[str, Any], is_admin: bool
) -> UiComponent:
"""Generate a single concise starter card based on role and setup status."""
if is_admin:
# Admin view: includes setup status and memory management
return self._generate_admin_starter_card(analysis)
else:
# User view: simple welcome message
return self._generate_user_starter_card(analysis)
def _generate_admin_starter_card(self, analysis: Dict[str, Any]) -> UiComponent:
"""Generate admin starter card with setup info and memory management."""
# Build concise content
if not analysis["has_sql"]:
title = "Admin: Setup Required"
content = "**π Admin View** - You have admin privileges and will see additional system information.\n\n**Vanna AI** requires a SQL connection to function.\n\nPlease configure a SQL tool to get started."
status = "error"
icon = "β οΈ"
elif analysis["is_complete"]:
title = "Admin: System Ready"
content = "**π Admin View** - You have admin privileges and will see additional system information.\n\n**Vanna AI** is fully configured and ready.\n\n"
content += "**Setup:** SQL β | Memory β | Visualization β"
status = "success"
icon = "β
"
else:
title = "Admin: System Ready"
content = "**π Admin View** - You have admin privileges and will see additional system information.\n\n**Vanna AI** is ready to query your database.\n\n"
setup_items = []
setup_items.append("SQL β")
setup_items.append("Memory β" if analysis["has_memory"] else "Memory β")
setup_items.append("Viz β" if analysis["has_viz"] else "Viz β")
content += f"**Setup:** {' | '.join(setup_items)}"
status = "warning" if not analysis["has_memory"] else "success"
icon = "β οΈ" if not analysis["has_memory"] else "β
"
# Add memory management info for admins
actions: List[Dict[str, Any]] = []
if analysis["has_sql"]:
actions.append(
{
"label": "π‘ Help",
"action": "/help",
"variant": "secondary",
}
)
if analysis["has_memory"]:
content += "\n\n**Memory Management:** Tool and text memories are available. As an admin, you can view and manage these memories to help me learn from successful queries."
actions.append(
{
"label": "π§ View Memories",
"action": "/memories",
"variant": "secondary",
}
)
return UiComponent(
rich_component=CardComponent(
title=title,
content=content,
icon=icon,
status=status,
actions=actions,
markdown=True,
),
simple_component=None,
)
def _generate_user_starter_card(self, analysis: Dict[str, Any]) -> UiComponent:
"""Generate simple user starter view using RichTextComponent."""
if not analysis["has_sql"]:
content = (
"# β οΈ Setup Required\n\n"
"Vanna AI requires configuration before it can help you analyze data."
)
else:
content = (
"# π Welcome to Vanna AI\n\n"
"I'm your AI data analyst assistant. Ask me anything about your data in plain English!\n\n"
"Type `/help` to see what I can do."
)
return UiComponent(
rich_component=RichTextComponent(content=content, markdown=True),
simple_component=None,
)
def _analyze_setup(self, tool_names: List[str]) -> Dict[str, Any]:
"""Analyze the current tool setup and return status."""
# Critical tools
has_sql = any(
name in tool_names
for name in ["run_sql", "sql_query", "execute_sql", "query_sql"]
)
# Memory tools (important but not critical)
has_search = "search_saved_correct_tool_uses" in tool_names
has_save = "save_question_tool_args" in tool_names
has_memory = has_search and has_save
# Visualization tools (nice to have)
has_viz = any(
name in tool_names
for name in [
"visualize_data",
"create_chart",
"plot_data",
"generate_chart",
]
)
# Other useful tools
has_calculator = any(
name in tool_names for name in ["calculator", "calc", "calculate"]
)
# Determine overall status
is_complete = has_sql and has_memory and has_viz
is_functional = has_sql
return {
"has_sql": has_sql,
"has_memory": has_memory,
"has_search": has_search,
"has_save": has_save,
"has_viz": has_viz,
"has_calculator": has_calculator,
"is_complete": is_complete,
"is_functional": is_functional,
"tool_count": len(tool_names),
"tool_names": tool_names,
}
def _generate_setup_status_cards(
self, analysis: Dict[str, Any]
) -> List[UiComponent]:
"""Generate status cards showing setup health (used by /status command)."""
cards = []
# SQL Tool Status (Critical)
if analysis["has_sql"]:
sql_card = StatusCardComponent(
title="SQL Connection",
status="success",
description="Database connection configured and ready",
icon="β
",
)
else:
sql_card = StatusCardComponent(
title="SQL Connection",
status="error",
description="No SQL tool detected - this is required for data analysis",
icon="β",
)
cards.append(UiComponent(rich_component=sql_card, simple_component=None))
# Memory Tools Status (Important)
if analysis["has_memory"]:
memory_card = StatusCardComponent(
title="Memory System",
status="success",
description="Search and save tools configured - I can learn from successful queries",
icon="π§ ",
)
elif analysis["has_search"] or analysis["has_save"]:
memory_card = StatusCardComponent(
title="Memory System",
status="warning",
description="Partial memory setup - both search and save tools recommended",
icon="β οΈ",
)
else:
memory_card = StatusCardComponent(
title="Memory System",
status="warning",
description="Memory tools not configured - I won't remember successful patterns",
icon="β οΈ",
)
cards.append(UiComponent(rich_component=memory_card, simple_component=None))
# Visualization Status (Nice to have)
if analysis["has_viz"]:
viz_card = StatusCardComponent(
title="Visualization",
status="success",
description="Chart creation tools available",
icon="π",
)
else:
viz_card = StatusCardComponent(
title="Visualization",
status="info",
description="No visualization tools - results will be text/tables only",
icon="π",
)
cards.append(UiComponent(rich_component=viz_card, simple_component=None))
return cards
def _generate_setup_guidance(
self, analysis: Dict[str, Any]
) -> Optional[UiComponent]:
"""Generate setup guidance based on what's missing (used by /status command)."""
if not analysis["has_sql"]:
# Critical guidance - need SQL
content = (
"## π¨ Setup Required\n\n"
"To get started with Vanna AI, you need to configure a SQL connection tool:\n\n"
"```python\n"
"from vanna.tools import RunSqlTool\n\n"
"# Add SQL tool to your agent\n"
"tool_registry.register(RunSqlTool(\n"
' connection_string="your-database-connection"\n'
"))\n"
"```\n\n"
"**Next Steps:**\n"
"1. Configure your database connection\n"
"2. Add memory tools for learning\n"
"3. Add visualization tools for charts"
)
else:
# Improvement suggestions
suggestions = []
if not analysis["has_memory"]:
suggestions.append(
"**π§ Add Memory Tools** - Help me learn from successful queries:\n"
"```python\n"
"from vanna.tools import SearchSavedCorrectToolUses, SaveQuestionToolArgs\n"
"tool_registry.register(SearchSavedCorrectToolUses())\n"
"tool_registry.register(SaveQuestionToolArgs())\n"
"```"
)
if not analysis["has_viz"]:
suggestions.append(
"**π Add Visualization** - Create charts and graphs:\n"
"```python\n"
"from vanna.tools import VisualizeDataTool\n"
"tool_registry.register(VisualizeDataTool())\n"
"```"
)
if suggestions:
content = "## π‘ Suggested Improvements\n\n" + "\n\n".join(suggestions)
else:
return None # No guidance needed
return UiComponent(
rich_component=RichTextComponent(content=content, markdown=True),
simple_component=None,
)
async def _generate_status_check(
self, agent: "Agent", user: "User"
) -> WorkflowResult:
"""Generate a detailed status check response."""
# Get available tools
tools = await agent.tool_registry.get_schemas(user)
tool_names = [tool.name for tool in tools]
analysis = self._analyze_setup(tool_names)
# Generate status report
status_content = "# π Setup Status Report\n\n"
if analysis["is_complete"]:
status_content += (
"π **Excellent!** Your Vanna AI setup is complete and optimized.\n\n"
)
elif analysis["is_functional"]:
status_content += (
"β
**Good!** Your setup is functional with room for improvement.\n\n"
)
else:
status_content += (
"β οΈ **Action Required** - Your setup needs configuration.\n\n"
)
status_content += f"**Tools Detected:** {analysis['tool_count']} total\n\n"
# Tool breakdown
status_content += "## Tool Status\n\n"
status_content += f"- **SQL Connection:** {'β
Available' if analysis['has_sql'] else 'β Missing (Required)'}\n"
status_content += f"- **Memory System:** {'β
Complete' if analysis['has_memory'] else 'β οΈ Incomplete' if analysis['has_search'] or analysis['has_save'] else 'β Missing'}\n"
status_content += f"- **Visualization:** {'β
Available' if analysis['has_viz'] else 'π Text/Tables Only'}\n"
status_content += f"- **Calculator:** {'β
Available' if analysis['has_calculator'] else 'β Not Available'}\n\n"
if analysis["tool_names"]:
status_content += (
f"**Available Tools:** {', '.join(sorted(analysis['tool_names']))}"
)
components = [
UiComponent(
rich_component=RichTextComponent(content=status_content, markdown=True),
simple_component=None,
)
]
# Add status cards
components.extend(self._generate_setup_status_cards(analysis))
# Add guidance if needed
guidance = self._generate_setup_guidance(analysis)
if guidance:
components.append(guidance)
return WorkflowResult(should_skip_llm=True, components=components)
async def _get_recent_memories(
self, agent: "Agent", user: "User", conversation: "Conversation"
) -> WorkflowResult:
"""Get and display recent memories from agent memory."""
try:
# Check if agent has memory capability
if not hasattr(agent, "agent_memory") or agent.agent_memory is None:
return WorkflowResult(
should_skip_llm=True,
components=[
UiComponent(
rich_component=RichTextComponent(
content="# β οΈ No Memory System\n\n"
"Agent memory is not configured. Recent memories are not available.\n\n"
"To enable memory, configure an AgentMemory implementation in your agent setup.",
markdown=True,
),
simple_component=None,
)
],
)
# Create tool context
from vanna.core.tool import ToolContext
context = ToolContext(
user=user,
conversation_id=conversation.id,
request_id=str(uuid.uuid4()),
agent_memory=agent.agent_memory,
)
# Get both tool memories and text memories
tool_memories = await agent.agent_memory.get_recent_memories(
context=context, limit=10
)
# Try to get text memories (may not be implemented in all memory backends)
text_memories = []
try:
text_memories = await agent.agent_memory.get_recent_text_memories(
context=context, limit=10
)
except (AttributeError, NotImplementedError):
# Text memories not supported by this implementation
pass
if not tool_memories and not text_memories:
return WorkflowResult(
should_skip_llm=True,
components=[
UiComponent(
rich_component=RichTextComponent(
content="# π§ Recent Memories\n\n"
"No recent memories found. As you use tools and ask questions, "
"successful patterns will be saved here for future reference.",
markdown=True,
),
simple_component=None,
)
],
)
components = []
# Header
total_count = len(tool_memories) + len(text_memories)
header_content = f"# π§ Recent Memories\n\nFound {total_count} recent memor{'y' if total_count == 1 else 'ies'}"
components.append(
UiComponent(
rich_component=RichTextComponent(
content=header_content, markdown=True
),
simple_component=None,
)
)
# Display text memories
if text_memories:
components.append(
UiComponent(
rich_component=RichTextComponent(
content=f"## π Text Memories ({len(text_memories)})",
markdown=True,
),
simple_component=None,
)
)
for memory in text_memories:
# Create card with delete button
card_content = f"**Content:** {memory.content}\n\n"
if memory.timestamp:
card_content += f"**Timestamp:** {memory.timestamp}\n\n"
card_content += f"**ID:** `{memory.memory_id}`"
card = CardComponent(
title="Text Memory",
content=card_content,
icon="π",
actions=[
{
"label": "ποΈ Delete",
"action": f"/delete {memory.memory_id}",
"variant": "error",
}
],
)
components.append(
UiComponent(rich_component=card, simple_component=None)
)
# Display tool memories
if tool_memories:
components.append(
UiComponent(
rich_component=RichTextComponent(
content=f"## π§ Tool Memories ({len(tool_memories)})",
markdown=True,
),
simple_component=None,
)
)
for tool_memory in tool_memories:
# Create card with delete button
card_content = f"**Question:** {tool_memory.question}\n\n"
card_content += f"**Tool:** {tool_memory.tool_name}\n\n"
card_content += f"**Arguments:** `{tool_memory.args}`\n\n"
card_content += f"**Success:** {'β
Yes' if tool_memory.success else 'β No'}\n\n"
if tool_memory.timestamp:
card_content += f"**Timestamp:** {tool_memory.timestamp}\n\n"
card_content += f"**ID:** `{tool_memory.memory_id}`"
card = CardComponent(
title=f"Tool: {tool_memory.tool_name}",
content=card_content,
markdown=True,
icon="π§",
status="success" if tool_memory.success else "error",
actions=[
{
"label": "ποΈ Delete",
"action": f"/delete {tool_memory.memory_id}",
"variant": "error",
}
],
)
components.append(
UiComponent(rich_component=card, simple_component=None)
)
return WorkflowResult(should_skip_llm=True, components=components)
except Exception as e:
traceback.print_exc()
return WorkflowResult(
should_skip_llm=True,
components=[
UiComponent(
rich_component=RichTextComponent(
content=f"# β Error Retrieving Memories\n\n"
f"Failed to get recent memories: {str(e)}\n\n"
f"This may indicate an issue with the agent memory configuration.",
markdown=True,
),
simple_component=None,
)
],
)
async def _delete_memory(
self, agent: "Agent", user: "User", conversation: "Conversation", memory_id: str
) -> WorkflowResult:
"""Delete a memory by its ID."""
try:
# Check if agent has memory capability
if not hasattr(agent, "agent_memory") or agent.agent_memory is None:
return WorkflowResult(
should_skip_llm=True,
components=[
UiComponent(
rich_component=RichTextComponent(
content="# β οΈ No Memory System\n\n"
"Agent memory is not configured. Cannot delete memories.",
markdown=True,
),
simple_component=None,
)
],
)
if not memory_id:
return WorkflowResult(
should_skip_llm=True,
components=[
UiComponent(
rich_component=RichTextComponent(
content="# β οΈ Invalid Command\n\n"
"Please provide a memory ID to delete.\n\n"
"Usage: `/delete [memory_id]`",
markdown=True,
),
simple_component=None,
)
],
)
# Create tool context
from vanna.core.tool import ToolContext
context = ToolContext(
user=user,
conversation_id=conversation.id,
request_id=str(uuid.uuid4()),
agent_memory=agent.agent_memory,
)
# Try to delete as a tool memory first
deleted = await agent.agent_memory.delete_by_id(context, memory_id)
# If not found as tool memory, try as text memory
if not deleted:
try:
deleted = await agent.agent_memory.delete_text_memory(
context, memory_id
)
except (AttributeError, NotImplementedError):
# Text memory deletion not supported by this implementation
pass
if deleted:
return WorkflowResult(
should_skip_llm=True,
components=[
UiComponent(
rich_component=RichTextComponent(
content=f"# β
Memory Deleted\n\n"
f"Successfully deleted memory with ID: `{memory_id}`\n\n"
f"You can view remaining memories using `/memories`.",
markdown=True,
),
simple_component=None,
)
],
)
else:
return WorkflowResult(
should_skip_llm=True,
components=[
UiComponent(
rich_component=RichTextComponent(
content=f"# β Memory Not Found\n\n"
f"Could not find memory with ID: `{memory_id}`\n\n"
f"Use `/memories` to see available memory IDs.",
markdown=True,
),
simple_component=None,
)
],
)
except Exception as e:
traceback.print_exc()
return WorkflowResult(
should_skip_llm=True,
components=[
UiComponent(
rich_component=RichTextComponent(
content=f"# β Error Deleting Memory\n\n"
f"Failed to delete memory: {str(e)}\n\n"
f"This may indicate an issue with the agent memory configuration.",
markdown=True,
),
simple_component=None,
)
],
)
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/core/workflow/default.py",
"license": "MIT License",
"lines": 699,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
vanna-ai/vanna:src/vanna/examples/__main__.py | """
Interactive example runner for Vanna Agents.
"""
import sys
import importlib
def main() -> None:
"""Run an example interactively."""
if len(sys.argv) < 2:
print("Available examples:")
print(" python -m vanna.examples mock_quickstart")
print(" python -m vanna.examples mock_custom_tool")
print(" python -m vanna.examples anthropic_quickstart")
print(" python -m vanna.examples openai_quickstart")
print(" python -m vanna.examples mock_quota_example")
print(" python -m vanna.examples mock_rich_components_demo")
print("")
print("Usage: python -m vanna.examples <example_name>")
return
example_name = sys.argv[1]
try:
module = importlib.import_module(f"vanna.examples.{example_name}")
if hasattr(module, "run_interactive"):
module.run_interactive()
elif hasattr(module, "main"):
import asyncio
if asyncio.iscoroutinefunction(module.main):
asyncio.run(module.main())
else:
module.main()
else:
print(f"Example '{example_name}' does not have a main function")
except ImportError:
print(f"Example '{example_name}' not found")
except Exception as e:
print(f"Error running example '{example_name}': {e}")
if __name__ == "__main__":
main()
| {
"repo_id": "vanna-ai/vanna",
"file_path": "src/vanna/examples/__main__.py",
"license": "MIT License",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.