sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-jira-issue/tests/test_tools_jira_issue.py | """Unit tests for JiraIssueToolSpec."""
from unittest.mock import Mock, patch
import pytest
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.tools.jira_issue.base import JiraIssueToolSpec
def test_class():
names_of_base_classes = [b.__name__ for b in JiraIssueToolSpec.__mro__]
assert BaseToolSpec.__name__ in names_of_base_classes
class TestJiraIssueToolSpec:
"""Test suite for JiraIssueToolSpec."""
@pytest.fixture
def mock_jira(self):
"""Create a mock JIRA client."""
with patch("llama_index.tools.jira_issue.base.JIRA") as mock:
yield mock
@pytest.fixture
def jira_tool_spec(self, mock_jira):
"""Create a JiraIssueToolSpec instance with mocked JIRA client."""
mock_jira.return_value = Mock()
return JiraIssueToolSpec(
email="test@example.com",
api_key="test-api-key",
server_url="https://test.atlassian.net",
)
def test_init_with_missing_credentials(self):
"""Test that initialization fails with missing credentials."""
with pytest.raises(Exception, match="Please provide Jira credentials"):
JiraIssueToolSpec(email="", api_key="", server_url="")
def test_search_issues_success(self, jira_tool_spec):
"""Test successful issue search."""
# Mock issue objects
mock_issue1 = Mock()
mock_issue1.key = "PROJ-123"
mock_issue1.fields.summary = "Test Issue 1"
mock_issue1.fields.status.name = "In Progress"
mock_issue1.fields.assignee = Mock(displayName="John Doe")
mock_issue2 = Mock()
mock_issue2.key = "PROJ-124"
mock_issue2.fields.summary = "Test Issue 2"
mock_issue2.fields.status.name = "To Do"
mock_issue2.fields.assignee = None
jira_tool_spec.jira.search_issues.return_value = [mock_issue1, mock_issue2]
result = jira_tool_spec.search_issues("project = PROJ")
assert result["error"] is False
assert result["message"] == "Issues found"
assert len(result["issues"]) == 2
assert result["issues"][0]["key"] == "PROJ-123"
assert result["issues"][0]["assignee"] == "John Doe"
assert result["issues"][1]["assignee"] is None
def test_search_issues_no_results(self, jira_tool_spec):
"""Test issue search with no results."""
jira_tool_spec.jira.search_issues.return_value = []
result = jira_tool_spec.search_issues("project = NONEXISTENT")
assert result["error"] is True
assert result["message"] == "No issues found."
def test_search_issues_failure(self, jira_tool_spec):
"""Test failed issue search."""
jira_tool_spec.jira.search_issues.side_effect = Exception("Invalid JQL")
result = jira_tool_spec.search_issues("invalid jql")
assert result["error"] is True
assert "Failed to search issues: Invalid JQL" in result["message"]
def test_create_issue_success(self, jira_tool_spec):
"""Test successful issue creation."""
mock_issue = Mock(key="KAN-123")
jira_tool_spec.jira.create_issue.return_value = mock_issue
result = jira_tool_spec.create_issue(
project_key="KAN",
summary="New Test Issue",
description="Test description",
issue_type="Task",
)
assert result["error"] is False
assert result["message"] == "Issue KAN-123 created successfully."
assert result["issue_key"] == "KAN-123"
# Verify the create_issue was called with correct parameters
jira_tool_spec.jira.create_issue.assert_called_once_with(
project="KAN",
summary="New Test Issue",
description="Test description",
issuetype={"name": "Task"},
)
def test_create_issue_failure(self, jira_tool_spec):
"""Test failed issue creation."""
jira_tool_spec.jira.create_issue.side_effect = Exception("Project not found")
result = jira_tool_spec.create_issue(project_key="INVALID")
assert result["error"] is True
assert "Failed to create new issue: Project not found" in result["message"]
def test_add_comment_to_issue_success(self, jira_tool_spec):
"""Test successful comment addition."""
mock_issue = Mock()
jira_tool_spec.jira.issue.return_value = mock_issue
result = jira_tool_spec.add_comment_to_issue("KAN-123", "Test comment")
assert result["error"] is False
assert result["message"] == "Comment added to issue KAN-123."
jira_tool_spec.jira.add_comment.assert_called_once_with(
mock_issue, "Test comment"
)
def test_add_comment_to_issue_failure(self, jira_tool_spec):
"""Test failed comment addition."""
jira_tool_spec.jira.issue.side_effect = Exception("Issue not found")
result = jira_tool_spec.add_comment_to_issue("INVALID-123", "Test comment")
assert result["error"] is True
assert "Failed to add comment to issue INVALID-123" in result["message"]
def test_update_issue_summary_success(self, jira_tool_spec):
"""Test successful summary update."""
mock_issue = Mock()
jira_tool_spec.jira.issue.return_value = mock_issue
result = jira_tool_spec.update_issue_summary(
"KAN-123", "Updated Summary", notify=True
)
assert result["error"] is False
assert result["message"] == "Issue KAN-123 summary updated."
mock_issue.update.assert_called_once_with(
summary="Updated Summary", notify=True
)
def test_update_issue_summary_failure(self, jira_tool_spec):
"""Test failed summary update."""
jira_tool_spec.jira.issue.side_effect = Exception("Permission denied")
result = jira_tool_spec.update_issue_summary("KAN-123", "Updated Summary")
assert result["error"] is True
assert "Failed to update issue KAN-123: Permission denied" in result["message"]
def test_update_issue_assignee_success(self, jira_tool_spec):
"""Test successful assignee update."""
mock_user = Mock()
mock_user.displayName = "John Doe"
mock_user.accountId = "12345"
jira_tool_spec.jira.search_users.return_value = [mock_user]
mock_issue = Mock()
jira_tool_spec.jira.issue.return_value = mock_issue
result = jira_tool_spec.update_issue_assignee("KAN-123", "John Doe")
assert result["error"] is False
assert result["message"] == "Issue KAN-123 successfully assigned to John Doe"
mock_issue.update.assert_called_once_with(assignee={"accountId": "12345"})
def test_update_issue_assignee_user_not_found(self, jira_tool_spec):
"""Test assignee update when user is not found."""
jira_tool_spec.jira.search_users.return_value = []
result = jira_tool_spec.update_issue_assignee("KAN-123", "Unknown User")
assert result["error"] is True
assert "User with full name 'Unknown User' not found" in result["message"]
def test_update_issue_assignee_failure(self, jira_tool_spec):
"""Test failed assignee update."""
jira_tool_spec.jira.search_users.side_effect = Exception("API Error")
result = jira_tool_spec.update_issue_assignee("KAN-123", "John Doe")
assert result["error"] is True
assert (
"An error occurred while updating the assignee: API Error"
in result["message"]
)
def test_update_issue_status_success(self, jira_tool_spec):
"""Test successful status update."""
mock_issue = Mock()
jira_tool_spec.jira.issue.return_value = mock_issue
jira_tool_spec.jira.transitions.return_value = [
{"id": "1", "name": "To Do"},
{"id": "2", "name": "In Progress"},
{"id": "3", "name": "Done"},
]
result = jira_tool_spec.update_issue_status("KAN-123", "Done")
assert result["error"] is False
assert result["message"] == "Issue KAN-123 status updated to Done."
jira_tool_spec.jira.transition_issue.assert_called_once_with(mock_issue, "3")
def test_update_issue_status_invalid_transition(self, jira_tool_spec):
"""Test status update with invalid transition."""
mock_issue = Mock()
jira_tool_spec.jira.issue.return_value = mock_issue
jira_tool_spec.jira.transitions.return_value = [
{"id": "1", "name": "To Do"},
{"id": "2", "name": "In Progress"},
]
result = jira_tool_spec.update_issue_status("KAN-123", "Done")
assert result["error"] is True
assert "Status 'Done' not available for issue KAN-123" in result["message"]
assert "Available transitions: ['To Do', 'In Progress']" in result["message"]
def test_update_issue_status_failure(self, jira_tool_spec):
"""Test failed status update."""
jira_tool_spec.jira.issue.side_effect = Exception("Issue not found")
result = jira_tool_spec.update_issue_status("INVALID-123", "Done")
assert result["error"] is True
assert "Failed to update status for issue INVALID-123" in result["message"]
def test_update_issue_due_date_success(self, jira_tool_spec):
"""Test successful due date update."""
mock_issue = Mock()
jira_tool_spec.jira.issue.return_value = mock_issue
result = jira_tool_spec.update_issue_due_date("KAN-123", "2024-12-31")
assert result["error"] is False
assert result["message"] == "Issue KAN-123 due date updated."
mock_issue.update.assert_called_once_with(duedate="2024-12-31")
def test_update_issue_due_date_clear(self, jira_tool_spec):
"""Test clearing due date."""
mock_issue = Mock()
jira_tool_spec.jira.issue.return_value = mock_issue
result = jira_tool_spec.update_issue_due_date("KAN-123", None)
assert result["error"] is False
assert result["message"] == "Issue KAN-123 due date cleared."
mock_issue.update.assert_called_once_with(duedate=None)
def test_update_issue_due_date_invalid_format(self, jira_tool_spec):
"""Test due date update with invalid date format."""
result = jira_tool_spec.update_issue_due_date("KAN-123", "31-12-2024")
assert result["error"] is True
assert result["message"] == "Invalid date format. Use YYYY-MM-DD."
def test_update_issue_due_date_failure(self, jira_tool_spec):
"""Test failed due date update."""
jira_tool_spec.jira.issue.side_effect = Exception("Permission denied")
result = jira_tool_spec.update_issue_due_date("KAN-123", "2024-12-31")
assert result["error"] is True
assert "Failed to update due date for issue KAN-123" in result["message"]
def test_delete_issue_success(self, jira_tool_spec):
"""Test successful issue deletion."""
mock_issue = Mock()
jira_tool_spec.jira.issue.return_value = mock_issue
result = jira_tool_spec.delete_issue("KAN-123")
assert result["error"] is False
assert result["message"] == "Issue KAN-123 deleted successfully."
mock_issue.delete.assert_called_once()
def test_delete_issue_failure(self, jira_tool_spec):
"""Test failed issue deletion."""
jira_tool_spec.jira.issue.side_effect = Exception("Issue not found")
result = jira_tool_spec.delete_issue("INVALID-123")
assert result["error"] is True
assert "Failed to delete issue INVALID-123" in result["message"]
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-jira-issue/tests/test_tools_jira_issue.py",
"license": "MIT License",
"lines": 220,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-web/tests/test_async_web.py | import pytest
from llama_index.readers.web import AsyncWebPageReader
@pytest.fixture()
def url() -> str:
return "https://llamaindex.ai/"
def test_async_web_reader(url: str) -> None:
documents = AsyncWebPageReader().load_data(urls=[url])
assert len(documents) > 0
assert isinstance(documents[0].id_, str)
assert documents[0].id_ != url
assert len(documents[0].id_) == 36
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-web/tests/test_async_web.py",
"license": "MIT License",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-s3/llama_index/vector_stores/s3/base.py | """
S3 vector store index.
An index that is built on top of an existing S3Vectors collection.
"""
import asyncio
import boto3
import logging
import time
from typing import Any, List, Optional, Tuple
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.schema import BaseNode, TextNode
from llama_index.core.utils import iter_batch
from llama_index.core.vector_stores.types import (
BasePydanticVectorStore,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
MetadataFilter,
MetadataFilters,
FilterOperator,
FilterCondition,
)
from llama_index.core.vector_stores.utils import (
metadata_dict_to_node,
node_to_metadata_dict,
)
logger = logging.getLogger(__name__)
class S3VectorStore(BasePydanticVectorStore):
"""
S3 Vector Store.
Uses the S3Vectors service to store and query vectors directly in S3.
It is recommended to create a vector bucket in S3 first.
Args:
index_name (str): The name of the index.
bucket_name_or_arn (str): The name or ARN of the vector bucket.
data_type (str): The data type of the vectors. Only supports "float32" for now.
insert_batch_size (int): The batch size for inserting vectors.
sync_session (Optional[boto3.Session]): The session to use for the synchronous client.
Examples:
`pip install llama-index-vector-stores-s3`
```python
from llama_index.vector_stores.s3 import S3VectorStore
vector_store = S3VectorStore.create_index_from_bucket(
bucket_name_or_arn="my-vector-bucket",
index_name="my-index",
dimension=1536,
)
```
"""
stores_text: bool = True
flat_metadata: bool = False
index_name_or_arn: str = Field(description="The name or ARN of the index.")
bucket_name_or_arn: str = Field(description="The name or ARN of the bucket.")
data_type: str = Field(description="The data type of the vectors.")
insert_batch_size: int = Field(description="The batch size for inserting vectors.")
text_field: Optional[str] = Field(
default=None, description="The field to use as the text field in the metadata."
)
distance_metric: str = Field(
default="cosine", description="The distance metric used by the index."
)
_session: boto3.Session = PrivateAttr()
def __init__(
self,
index_name_or_arn: str,
bucket_name_or_arn: str,
data_type: str = "float32",
insert_batch_size: int = 500,
text_field: Optional[str] = None,
distance_metric: str = "cosine",
sync_session: Optional[boto3.Session] = None,
async_session: Optional[Any] = None,
) -> None:
"""Init params."""
if async_session is not None:
raise NotImplementedError(
"Async sessions are not supported yet by aioboto3/aiobotocore"
)
if insert_batch_size > 500:
raise ValueError("Insert batch size must be less than or equal to 500")
super().__init__(
index_name_or_arn=index_name_or_arn,
bucket_name_or_arn=bucket_name_or_arn,
data_type=data_type,
insert_batch_size=insert_batch_size,
text_field=text_field,
distance_metric=distance_metric,
)
self._session = sync_session or boto3.Session()
@classmethod
def create_index_from_bucket(
cls,
bucket_name_or_arn: str,
index_name: str,
dimension: int,
distance_metric: str = "cosine",
data_type: str = "float32",
insert_batch_size: int = 500,
non_filterable_metadata_keys: Optional[List[str]] = None,
sync_session: Optional[boto3.Session] = None,
async_session: Optional[Any] = None,
) -> "S3VectorStore":
"""
Create an index in S3Vectors.
"""
# node content and node type should never be filterable by default
non_filterable_metadata_keys = non_filterable_metadata_keys or []
if "_node_content" not in non_filterable_metadata_keys:
non_filterable_metadata_keys.append("_node_content")
if "_node_type" not in non_filterable_metadata_keys:
non_filterable_metadata_keys.append("_node_type")
bucket_name, bucket_arn = cls.get_name_or_arn(bucket_name_or_arn)
sync_session = sync_session or boto3.Session()
kwargs = {
"indexName": index_name,
"dimension": dimension,
"dataType": data_type,
"distanceMetric": distance_metric,
"metadataConfiguration": {
"nonFilterableMetadataKeys": non_filterable_metadata_keys,
},
}
if bucket_arn is not None:
kwargs["vectorBucketArn"] = bucket_arn
else:
kwargs["vectorBucketName"] = bucket_name
sync_session.client("s3vectors").create_index(**kwargs)
return cls(
sync_session=sync_session,
async_session=async_session,
data_type=data_type,
index_name_or_arn=index_name,
bucket_name_or_arn=bucket_name_or_arn,
insert_batch_size=insert_batch_size,
distance_metric=distance_metric,
)
@classmethod
def class_name(cls) -> str:
return "S3VectorStore"
@staticmethod
def get_name_or_arn(name_or_arn: str) -> Tuple[str, str]:
"""
Get the name or ARN.
"""
if "arn:" in name_or_arn:
return None, name_or_arn
return name_or_arn, None
def _parse_response(self, response: dict) -> List[BaseNode]:
"""
Parse the response from S3Vectors.
"""
if self.text_field is None:
return [
metadata_dict_to_node(v["metadata"])
for v in response.get("vectors", [])
]
else:
nodes = []
for v in response.get("vectors", []):
if self.text_field not in v["metadata"]:
raise ValueError(
f"Text field {self.text_field} not found in returned metadata"
)
text = v["metadata"].pop(self.text_field)
nodes.append(TextNode(text=text, metadata=v["metadata"]))
return nodes
def get_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
) -> List[BaseNode]:
"""
Get nodes from the index.
Args:
node_ids (Optional[List[str]]): List of node IDs to retrieve.
filters (Optional[MetadataFilters]): Metadata filters to apply.
Returns:
List[BaseNode]: List of nodes retrieved from the index.
"""
if node_ids is None:
raise ValueError("node_ids is required")
if filters is not None:
raise NotImplementedError("Filters are not supported yet")
index_name, index_arn = self.get_name_or_arn(self.index_name_or_arn)
kwargs = {
"keys": node_ids,
"vectorBucketName": self.bucket_name_or_arn,
"returnMetadata": True,
}
if index_arn is not None:
kwargs["indexArn"] = index_arn
else:
kwargs["indexName"] = index_name
response = self._session.client("s3vectors").get_vectors(**kwargs)
return self._parse_response(response)
async def aget_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
) -> List[BaseNode]:
"""
Asynchronous method to get nodes from the index.
Args:
node_ids (Optional[List[str]]): List of node IDs to retrieve.
filters (Optional[MetadataFilters]): Metadata filters to apply.
Returns:
List[BaseNode]: List of nodes retrieved from the index.
"""
return await asyncio.to_thread(
self.get_nodes, node_ids=node_ids, filters=filters
)
def add(self, nodes: List[BaseNode], **add_kwargs: Any) -> List[str]:
"""
Add nodes to index.
Args:
nodes: List[BaseNode]: list of nodes with embeddings
"""
index_name, index_arn = self.get_name_or_arn(self.index_name_or_arn)
# limit to 5 requests per second
# Poor-mans token bucket
start_time = time.time()
available_requests = 5
added_ids = []
for node_batch in iter_batch(nodes, self.insert_batch_size):
vectors = []
for node in node_batch:
node_metadata = node_to_metadata_dict(node)
# delete fields that aren't used to save space
node_metadata.pop("document_id", None)
node_metadata.pop("doc_id", None)
node_metadata.pop("embedding", None)
vectors.append(
{
"key": str(node.id_),
"data": {"float32": node.embedding},
"metadata": {**node_metadata},
}
)
kwargs = {
"vectors": vectors,
"vectorBucketName": self.bucket_name_or_arn,
}
if index_arn is not None:
kwargs["indexArn"] = index_arn
else:
kwargs["indexName"] = index_name
self._session.client("s3vectors").put_vectors(**kwargs)
# Update the token bucket
elapsed_time = time.time() - start_time
if elapsed_time > 1:
available_requests = 5
else:
available_requests -= 1
if available_requests == 0:
time.sleep(1)
available_requests = 5
added_ids.extend([v["key"] for v in vectors])
return added_ids
async def async_add(self, nodes: List[BaseNode], **kwargs: Any) -> List[str]:
"""
Asynchronous method to add nodes to Qdrant index.
Args:
nodes: List[BaseNode]: List of nodes with embeddings.
Returns:
List of node IDs that were added to the index.
Raises:
ValueError: If trying to using async methods without aclient
"""
return await asyncio.to_thread(self.add, nodes, **kwargs)
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The doc_id of the document to delete.
delete_kwargs (Any): Additional arguments to pass to the list_vectors method.
"""
index_name, index_arn = self.get_name_or_arn(self.index_name_or_arn)
next_token = None
while True:
kwargs = {
"vectorBucketName": self.bucket_name_or_arn,
"returnMetadata": True,
**delete_kwargs,
}
if index_arn is not None:
kwargs["indexArn"] = index_arn
else:
kwargs["indexName"] = index_name
response = self._session.client("s3vectors").list_vectors(**kwargs)
nodes_to_delete = [
v["key"]
for v in response.get("vectors", [])
if v["metadata"]["ref_doc_id"] == ref_doc_id
]
kwargs = {
"vectorBucketName": self.bucket_name_or_arn,
"keys": nodes_to_delete,
}
if index_arn is not None:
kwargs["indexArn"] = index_arn
else:
kwargs["indexName"] = index_name
self._session.client("s3vectors").delete_vectors(**kwargs)
next_token = response.get("nextToken")
if next_token is None:
break
async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Asynchronous method to delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The doc_id of the document to delete.
"""
return await asyncio.to_thread(self.delete, ref_doc_id, **delete_kwargs)
def delete_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
**delete_kwargs: Any,
) -> None:
"""
Delete nodes using with node_ids.
Args:
node_ids (Optional[List[str]): List of node IDs to delete.
filters (Optional[MetadataFilters]): Metadata filters to apply.
"""
if filters is not None:
raise NotImplementedError("Deleting by filters is not supported yet")
if node_ids is None:
raise ValueError("node_ids is required")
index_name, index_arn = self.get_name_or_arn(self.index_name_or_arn)
kwargs = {
"vectorBucketName": self.bucket_name_or_arn,
"keys": node_ids,
}
if index_arn is not None:
kwargs["indexArn"] = index_arn
else:
kwargs["indexName"] = index_name
self._session.client("s3vectors").delete_vectors(**kwargs)
async def adelete_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
**delete_kwargs: Any,
) -> None:
"""
Asynchronous method to delete nodes using with node_ids.
Args:
node_ids (Optional[List[str]): List of node IDs to delete.
filters (Optional[MetadataFilters]): Metadata filters to apply.
"""
return await asyncio.to_thread(
self.delete_nodes, node_ids=node_ids, filters=filters, **delete_kwargs
)
def clear(self) -> None:
"""
Clear the index.
"""
index_name, index_arn = self.get_name_or_arn(self.index_name_or_arn)
kwargs = {
"vectorBucketName": self.bucket_name_or_arn,
}
if index_arn is not None:
kwargs["indexArn"] = index_arn
else:
kwargs["indexName"] = index_name
self._session.client("s3vectors").delete_index(**kwargs)
async def aclear(self) -> None:
"""
Asynchronous method to clear the index.
"""
return await asyncio.to_thread(self.clear)
@property
def client(self) -> Any:
"""Return the Qdrant client."""
return self._session.client("s3vectors")
def query(
self,
query: VectorStoreQuery,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""
Query index for top k most similar nodes.
Args:
query (VectorStoreQuery): query
"""
if query.mode != VectorStoreQueryMode.DEFAULT:
raise NotImplementedError(
"Only DEFAULT query mode is supported for S3VectorStore"
)
index_name, index_arn = self.get_name_or_arn(self.index_name_or_arn)
kwargs = {
"vectorBucketName": self.bucket_name_or_arn,
"queryVector": {self.data_type: query.query_embedding},
"topK": query.similarity_top_k,
"filter": self._build_filter(query.filters),
"returnDistance": True,
"returnMetadata": True,
}
if index_arn is not None:
kwargs["indexArn"] = index_arn
else:
kwargs["indexName"] = index_name
response = self._session.client("s3vectors").query_vectors(**kwargs)
nodes = self._parse_response(response)
return VectorStoreQueryResult(
nodes=nodes,
similarities=self._convert_distances_to_similarities(
response.get("vectors", [])
),
ids=[v["key"] for v in response.get("vectors", [])],
)
async def aquery(
self, query: VectorStoreQuery, **kwargs: Any
) -> VectorStoreQueryResult:
"""
Asynchronous method to query index for top k most similar nodes.
Args:
query (VectorStoreQuery): query
"""
return await asyncio.to_thread(self.query, query, **kwargs)
def _build_filter(self, filters: Optional[MetadataFilters]) -> Optional[dict]:
"""
Build a filter for the query.
"""
if filters is None:
return None
def _convert_single_filter(filter_obj) -> dict:
"""Convert a single MetadataFilter to S3 Vectors format."""
if not isinstance(filter_obj, MetadataFilter):
raise ValueError(f"Expected MetadataFilter, got {type(filter_obj)}")
key = filter_obj.key
value = filter_obj.value
operator = filter_obj.operator
# Map LlamaIndex operators to S3 Vectors operators
operator_map = {
FilterOperator.EQ: "$eq",
FilterOperator.NE: "$ne",
FilterOperator.GT: "$gt",
FilterOperator.GTE: "$gte",
FilterOperator.LT: "$lt",
FilterOperator.LTE: "$lte",
FilterOperator.IN: "$in",
FilterOperator.NIN: "$nin",
}
if operator == FilterOperator.IS_EMPTY:
# For IS_EMPTY, we use $exists with false
return {key: {"$exists": False}}
elif operator in operator_map:
return {key: {operator_map[operator]: value}}
else:
# Unsupported operators - for now, we'll raise an error
# Could potentially map TEXT_MATCH, ANY, ALL, CONTAINS if S3 Vectors supports them
raise ValueError(f"Unsupported filter operator: {operator}")
def _convert_filters_recursively(filters_obj) -> dict:
"""Recursively convert MetadataFilters to S3 Vectors format."""
if isinstance(filters_obj, MetadataFilter):
return _convert_single_filter(filters_obj)
elif isinstance(filters_obj, MetadataFilters):
filter_list = []
for f in filters_obj.filters:
converted_filter = _convert_filters_recursively(f)
filter_list.append(converted_filter)
# Handle the condition
if len(filter_list) == 1:
return filter_list[0]
elif filters_obj.condition == FilterCondition.AND:
return {"$and": filter_list}
elif filters_obj.condition == FilterCondition.OR:
return {"$or": filter_list}
elif filters_obj.condition == FilterCondition.NOT:
# S3 Vectors doesn't have explicit $not
# We would need to implement a custom filter that negates the logic
raise ValueError(
"NOT condition is not supported for S3 Vectors filters"
)
else:
raise ValueError(
f"Unexpected filter condition: {filters_obj.condition}"
)
else:
raise ValueError(f"Unexpected filter type: {type(filters_obj)}")
return _convert_filters_recursively(filters)
def _convert_distances_to_similarities(self, vectors: List[dict]) -> List[float]:
"""
Convert distances to similarity scores (0-1 scale, where 1 is most similar).
Args:
vectors: List of vector results containing distance values
Returns:
List of similarity scores normalized to [0, 1] where 1 is most similar
"""
similarities = []
for vector in vectors:
distance = float(vector.get("distance", 0))
if self.distance_metric.lower() == "cosine":
# Cosine distance is typically in range [0, 2] where 0 is most similar
# Convert to similarity: similarity = 1 - (distance / 2)
# But if distance is already normalized to [0, 1], use: similarity = 1 - distance
similarity = max(0.0, min(1.0, 1.0 - distance))
elif self.distance_metric.lower() == "euclidean":
# Euclidean distance ranges from 0 to infinity
# Use: similarity = 1 / (1 + distance) which maps [0, ∞) to (0, 1]
similarity = 1.0 / (1.0 + distance)
else:
# For unknown metrics, assume cosine-like behavior
similarity = max(0.0, min(1.0, 1.0 - distance))
similarities.append(similarity)
return similarities
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-s3/llama_index/vector_stores/s3/base.py",
"license": "MIT License",
"lines": 508,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-s3/tests/test_s3_vector_store.py | import boto3
import os
from llama_index.core.vector_stores.types import VectorStoreQuery
import pytest
import uuid
from typing import List
from llama_index.core.schema import TextNode
from llama_index.core.vector_stores.types import MetadataFilters, MetadataFilter
from llama_index.vector_stores.s3 import S3VectorStore
bucket_name = os.getenv("S3_BUCKET_NAME", "test-bucket")
aws_access_key_id = os.getenv("AWS_ACCESS_KEY_ID")
aws_secret_access_key = os.getenv("AWS_SECRET_ACCESS_KEY")
aws_session_token = os.getenv("AWS_SESSION_TOKEN")
region_name = os.getenv("AWS_REGION", "us-east-2")
should_skip = not all([aws_access_key_id, aws_secret_access_key])
@pytest.fixture
def vector_store():
session = boto3.Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
region_name=region_name,
)
index_name = str(uuid.uuid4())
s3_vector_store = S3VectorStore.create_index_from_bucket(
bucket_name_or_arn=bucket_name,
index_name=index_name,
dimension=1536,
sync_session=session,
)
try:
yield s3_vector_store
finally:
session.client("s3vectors").delete_index(
vectorBucketName=bucket_name, indexName=index_name
)
@pytest.fixture
def nodes() -> List[TextNode]:
return [
TextNode(
id_="1",
text="Hello, world 1!",
metadata={"key": "1"},
embedding=[0.1] + [0.0] * 1535,
),
TextNode(
id_="2",
text="Hello, world 2!",
metadata={"key": "2"},
embedding=[0.0] * 1535 + [0.5],
),
TextNode(
id_="3",
text="Hello, world 3!",
metadata={"key": "3"},
embedding=[0.9] + [0.3] * 1535,
),
]
@pytest.mark.skipif(should_skip, reason="AWS credentials not set")
def test_basic_flow(vector_store: S3VectorStore, nodes: List[TextNode]):
vector_store.add(nodes)
nodes = vector_store.get_nodes(node_ids=["1", "2", "3"])
assert len(nodes) == 3
query = VectorStoreQuery(
query_embedding=[0.1] + [0.0] * 1535,
similarity_top_k=2,
)
results = vector_store.query(query)
assert len(results.nodes) == 2
assert results.nodes[0].text == "Hello, world 1!"
assert results.nodes[1].text == "Hello, world 3!"
assert results.nodes[0].metadata["key"] == "1"
assert results.nodes[1].metadata["key"] == "3"
assert results.similarities[0] > results.similarities[1]
@pytest.mark.skipif(should_skip, reason="AWS credentials not set")
@pytest.mark.asyncio
async def test_async_flow(vector_store: S3VectorStore, nodes: List[TextNode]):
await vector_store.async_add(nodes)
nodes = await vector_store.aget_nodes(node_ids=["1", "2", "3"])
assert len(nodes) == 3
query = VectorStoreQuery(
query_embedding=[0.1] + [0.0] * 1535,
similarity_top_k=2,
)
results = await vector_store.aquery(query)
assert len(results.nodes) == 2
assert results.nodes[0].text == "Hello, world 1!"
assert results.nodes[1].text == "Hello, world 3!"
assert results.nodes[0].metadata["key"] == "1"
assert results.nodes[1].metadata["key"] == "3"
assert results.similarities[0] > results.similarities[1]
@pytest.mark.skipif(should_skip, reason="AWS credentials not set")
def test_text_field(vector_store: S3VectorStore, nodes: List[TextNode]):
vectors = [
{
"key": node.id_,
"metadata": {
"my_text": node.text,
**node.metadata,
},
"data": {"float32": node.embedding},
}
for node in nodes
]
session = boto3.Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
region_name=region_name,
)
# populate with custom vectors
session.client("s3vectors").put_vectors(
vectorBucketName=bucket_name,
indexName=vector_store.index_name_or_arn,
vectors=vectors,
)
vector_store.text_field = "my_text"
results = vector_store.get_nodes(node_ids=["1", "2", "3"])
assert len(results) == 3
@pytest.mark.skipif(should_skip, reason="AWS credentials not set")
@pytest.mark.asyncio
async def test_filtering(vector_store: S3VectorStore, nodes: List[TextNode]):
await vector_store.async_add(nodes)
results = await vector_store.aquery(
VectorStoreQuery(
filters=MetadataFilters(filters=[MetadataFilter(key="key", value="1")]),
query_embedding=[0.1] + [0.0] * 1535,
similarity_top_k=2,
)
)
assert len(results.nodes) == 1
assert results.nodes[0].text == "Hello, world 1!"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-s3/tests/test_s3_vector_store.py",
"license": "MIT License",
"lines": 132,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-service-now/llama_index/readers/service_now/base.py | import logging
import os
from typing import Callable, Dict, List, Optional
import uuid
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.core.instrumentation import get_dispatcher
from pysnc import GlideRecord, ServiceNowClient
from pysnc.auth import ServiceNowPasswordGrantFlow
import requests
from pydantic import BaseModel, Field, model_validator, ConfigDict
from .event import (
FileType,
SNOWKBTotalPagesEvent,
SNOWKBPageFetchStartEvent,
SNOWKBPageFetchCompletedEvent,
SNOWKBPageFailedEvent,
SNOWKBAttachmentProcessingStartEvent,
SNOWKBAttachmentProcessedEvent,
SNOWKBAttachmentSkippedEvent,
SNOWKBAttachmentFailedEvent,
)
internal_logger = logging.getLogger(__name__)
dispatcher = get_dispatcher(__name__)
DEFAULT_WORKFLOW_STATE = "Published"
class CustomParserManager(BaseModel):
"""
Manager for custom file parsers with validation and file processing capabilities.
Validates that custom parsers are provided for processing different file types.
At minimum, an HTML parser must be provided for processing article bodies.
"""
custom_parsers: Dict[FileType, BaseReader] = Field(
description="Dictionary mapping FileType enum values to BaseReader instances"
)
custom_folder: str = Field(
description="Folder path for temporary files during parsing"
)
logger: logging.Logger = Field(
default_factory=lambda: logging.getLogger(__name__),
description="Logger instance",
)
model_config = ConfigDict(arbitrary_types_allowed=True, extra="allow")
@model_validator(mode="after")
def validate_model(self):
# Validate that HTML parser is provided (required for article body processing)
if FileType.HTML not in self.custom_parsers:
raise ValueError(
"HTML parser is required in custom_parsers for processing article bodies. "
"Please provide a parser for FileType.HTML."
)
# Ensure custom_folder exists and is writable
try:
os.makedirs(self.custom_folder, exist_ok=True)
# Test write permissions
test_file = os.path.join(self.custom_folder, ".test_write")
with open(test_file, "w") as f:
f.write("test")
os.remove(test_file)
except (OSError, PermissionError) as e:
raise ValueError(
f"Custom folder '{self.custom_folder}' is not accessible or writable: {e}"
)
return self
@staticmethod
def validate_recommended_parsers(
custom_parsers: Dict[FileType, BaseReader], logger=None
) -> List[str]:
"""
Validate that custom parsers are provided for recommended file types.
Args:
custom_parsers: Dictionary of file type to parser mappings
logger: Optional logger for warnings
Returns:
List of missing recommended file types
"""
recommended_types = [
FileType.PDF,
FileType.HTML,
FileType.DOCUMENT,
FileType.TEXT,
FileType.SPREADSHEET,
FileType.PRESENTATION,
]
missing_types = []
for file_type in recommended_types:
if file_type not in custom_parsers:
missing_types.append(file_type.value)
if missing_types and logger:
logger.warning(
f"Recommended custom parsers missing for file types: {', '.join(missing_types)}"
)
return missing_types
def __remove_custom_file(self, file_path: str):
try:
if os.path.exists(file_path):
os.remove(file_path)
except Exception as e:
self.logger.error(f"Error removing file {file_path}: {e}")
def process_with_custom_parser(
self, file_type: FileType, file_content: bytes, extension: str
) -> str:
"""
Process file content with a custom parser (required).
Args:
file_type: The type of file to process
file_content: The binary file content to process
extension: The file extension
Returns:
Processed markdown text
Raises:
ValueError: If no custom parser found for file type or content is empty
"""
if file_type not in self.custom_parsers:
raise ValueError(f"No custom parser found for file type: {file_type}")
if not file_content:
raise ValueError("File content cannot be empty")
file_name = f"{uuid.uuid4().hex}.{extension}"
custom_file_path = os.path.join(self.custom_folder, file_name)
try:
with open(custom_file_path, "wb") as f:
f.write(file_content)
markdown_text = ""
try:
documents = self.custom_parsers[file_type].load_data(
file_path=custom_file_path
)
if not documents:
raise ValueError(
f"Custom parser for {file_type} returned no documents"
)
markdown_text = "\n".join(doc.text for doc in documents)
if not markdown_text.strip():
raise ValueError(
f"Custom parser for {file_type} returned empty content"
)
except Exception as e:
raise ValueError(
f"Error processing file with custom parser for {file_type}: {e}"
)
finally:
self.__remove_custom_file(custom_file_path)
return markdown_text
except (OSError, PermissionError) as e:
raise ValueError(f"Error creating temporary file for parsing: {e}")
def process_text_with_custom_parser(
self, file_type: FileType, text_content: str, extension: str
) -> str:
"""
Process text content with a custom parser (required).
Args:
file_type: The type of file to process
text_content: The text content to process
extension: The file extension
Returns:
Processed markdown text
Raises:
ValueError: If no custom parser found for file type or content is empty
"""
if file_type not in self.custom_parsers:
raise ValueError(f"No custom parser found for file type: {file_type}")
if not text_content:
raise ValueError("Text content cannot be empty")
# Create a temporary file-like object
file_name = f"{uuid.uuid4().hex}.{extension}"
custom_file_path = os.path.join(self.custom_folder, file_name)
try:
with open(custom_file_path, "w", encoding="utf-8") as f:
f.write(text_content)
markdown_text = ""
try:
documents = self.custom_parsers[file_type].load_data(
file_path=custom_file_path
)
if not documents:
raise ValueError(
f"Custom parser for {file_type} returned no documents"
)
markdown_text = "\n".join(doc.text for doc in documents)
if not markdown_text.strip():
raise ValueError(
f"Custom parser for {file_type} returned empty content"
)
except Exception as e:
raise ValueError(
f"Error processing text with custom parser for {file_type}: {e}"
)
finally:
self.__remove_custom_file(custom_file_path)
return markdown_text
except (OSError, PermissionError) as e:
raise ValueError(f"Error creating temporary file for parsing: {e}")
class SnowKBReader(BaseReader):
"""
ServiceNow Knowledge Base reader using PySNC with username/password or password grant flow.
This reader requires custom parsers for processing different file types. At minimum,
an HTML parser must be provided for processing article bodies. Additional parsers
can be provided for other file types as needed.
The reader uses LlamaIndex's standard instrumentation event system to provide detailed
tracking of the loading process. Events are fired at various stages during knowledge base
article retrieval and attachment processing, allowing for monitoring and debugging.
Required file types:
- FileType.HTML: For HTML content (required for article body processing)
Recommended file types to provide parsers for:
- FileType.PDF: For PDF documents
- FileType.DOCUMENT: For Word documents (.docx)
- FileType.TEXT: For plain text files
- FileType.SPREADSHEET: For Excel files (.xlsx)
- FileType.PRESENTATION: For PowerPoint files (.pptx)
Args:
instance: ServiceNow instance name (without .service-now.com)
custom_parsers: Dictionary mapping FileType enum values to BaseReader instances.
This is REQUIRED and must include at least FileType.HTML.
Each parser must implement the load_data method.
username: ServiceNow username for authentication (required)
password: ServiceNow password for authentication (required)
client_id: OAuth client ID for ServiceNow (optional, but if provided, client_secret is required)
client_secret: OAuth client secret for ServiceNow (optional, but if provided, client_id is required)
process_attachment_callback: Optional callback to filter attachments (content_type: str, size_bytes: int, file_name: str) -> tuple[bool, str]
process_document_callback: Optional callback to filter documents (kb_number: str) -> bool
custom_folder: Folder for temporary files during parsing
fail_on_error: Whether to fail on parsing errors or continue
kb_table: ServiceNow table name for knowledge base articles
logger: Optional logger instance
Authentication:
- Basic auth: Provide username and password only
- OAuth flow: Provide username, password, client_id, and client_secret
Events:
The reader fires various events during processing using LlamaIndex's standard
instrumentation system. Available events include page fetch events, attachment
processing events, and error events. Use get_dispatcher() to subscribe to events.
Raises:
ValueError: If required parameters are missing or invalid, or if HTML parser is not provided
TypeError: If custom_parsers types are incorrect
"""
def __init__(
self,
instance: str,
custom_parsers: Dict[FileType, BaseReader],
username: Optional[str] = None,
password: Optional[str] = None,
client_id: Optional[str] = None,
client_secret: Optional[str] = None,
process_attachment_callback: Optional[
Callable[[str, int], tuple[bool, str]]
] = None,
process_document_callback: Optional[Callable[[str], bool]] = None,
custom_folder: Optional[str] = None,
fail_on_error: bool = True,
kb_table: str = "kb_knowledge",
logger=None,
):
# Validate required parameters
if not instance:
raise ValueError("instance parameter is required")
if custom_parsers is None:
raise ValueError("custom_parsers parameter is required and cannot be None")
if not custom_parsers:
raise ValueError("custom_parsers parameter is required and cannot be empty")
if not isinstance(custom_parsers, dict):
raise TypeError("custom_parsers must be a dictionary")
# Validate custom_parsers dictionary - ensure it has at least one parser
if len(custom_parsers) == 0:
raise ValueError("custom_parsers must contain at least one parser")
# Validate each custom parser
for file_type, parser in custom_parsers.items():
if not isinstance(file_type, FileType):
raise TypeError(
f"custom_parsers keys must be FileType enum values, got {type(file_type)}"
)
if not isinstance(parser, BaseReader):
raise TypeError(
f"custom_parsers values must be BaseReader instances, got {type(parser)} for {file_type}"
)
# Validate that parser has required load_data method
if not hasattr(parser, "load_data") or not callable(parser.load_data):
raise TypeError(
f"custom_parsers[{file_type}] must have a callable 'load_data' method"
)
# Validate authentication parameters
# Username and password are always required
if not username:
raise ValueError("username parameter is required")
if not password:
raise ValueError("password parameter is required")
# If client_id is provided, client_secret must also be provided (for OAuth flow)
if client_id is not None and client_secret is None:
raise ValueError("client_secret is required when client_id is provided")
if client_secret is not None and client_id is None:
raise ValueError("client_id is required when client_secret is provided")
self.instance = instance
self.username = username
self.password = password
self.client_id = client_id
self.client_secret = client_secret
self.custom_parsers = custom_parsers
self.custom_folder = custom_folder or os.path.join(
os.getcwd(), "custom_parsers"
)
# Validate recommended parsers and warn if missing
self.logger = logger or internal_logger
CustomParserManager.validate_recommended_parsers(custom_parsers, self.logger)
# Ensure custom_folder exists and is writable
try:
os.makedirs(self.custom_folder, exist_ok=True)
# Test write permissions
test_file = os.path.join(self.custom_folder, ".test_write")
with open(test_file, "w") as f:
f.write("test")
os.remove(test_file)
except (OSError, PermissionError) as e:
raise ValueError(
f"Custom folder '{self.custom_folder}' is not accessible or writable: {e}"
)
self.process_attachment_callback = process_attachment_callback
self.process_document_callback = process_document_callback
self.fail_on_error = fail_on_error
self.kb_table = kb_table
self.pysnc_client = None
self.initialize_client()
self.custom_parser_manager = CustomParserManager(
custom_parsers=custom_parsers,
custom_folder=self.custom_folder,
logger=self.logger,
)
def _format_attachment_header(self, attachment: dict) -> str:
"""Formats the attachment title as a markdown header."""
return f"# {attachment['file_name']}\n"
def initialize_client(self):
"""Initialize a new ServiceNowClient instance with fresh credentials."""
try:
self.logger.info("Initializing ServiceNow client")
instance = self.instance
user = self.username
password = self.password
# Use OAuth flow if client_id and client_secret are provided, otherwise use basic auth
if self.client_id and self.client_secret:
client_id = self.client_id
client_secret = self.client_secret
self.pysnc_client = ServiceNowClient(
instance,
ServiceNowPasswordGrantFlow(
user, password, client_id, client_secret
),
)
else:
# Basic authentication with username and password
self.pysnc_client = ServiceNowClient(instance, (user, password))
except Exception as e:
self.logger.error(f"Error initializing ServiceNow client: {e}")
raise ValueError(f"Error initializing ServiceNow client: {e}")
def load_data(
self,
article_sys_id: Optional[str] = None,
numbers: Optional[List[str]] = None,
status="Published",
) -> List[Document]:
"""
Load a KB article by sys_id or number using PySNC. Returns a list with one Document.
"""
gr = self.pysnc_client.GlideRecord(self.kb_table)
if article_sys_id:
gr.add_query("sys_id", article_sys_id)
elif numbers:
gr.add_query("number", "IN", ",".join(numbers))
else:
raise ValueError("Must provide article_sys_id or number")
# Handle latest field: include records where latest is true OR latest field is not present/empty
latest_condition = gr.add_query("latest", "true")
latest_condition.add_or_condition("latest", "ISEMPTY")
gr.add_query(
"workflow_state", status or DEFAULT_WORKFLOW_STATE
) # Include only published articles
gr.query()
if not gr.has_next():
self.logger.error(
f"No KB article found for sys_id {article_sys_id} or numbers {numbers}"
)
raise ValueError(
f"No KB article found for sys_id {article_sys_id} or numbers {numbers}"
)
docs = []
total_pages = gr.get_row_count()
self.logger.info(
f"Found {total_pages} KB articles matching criteria: sys_id={article_sys_id}, numbers={numbers}, status={status}"
)
dispatcher.event(SNOWKBTotalPagesEvent(total_pages=total_pages))
while gr.next():
try:
kb_number = gr.number.get_value()
dispatcher.event(SNOWKBPageFetchStartEvent(page_id=kb_number))
# Check if document should be processed using callback
if self.process_document_callback:
should_process = self.process_document_callback(kb_number)
if not should_process:
self.logger.info(
f"Skipping document {kb_number} based on process_document_callback"
)
continue
# Process article text and attachments
txt_lm = (
gr.article_body
if hasattr(gr, "article_body") and gr.article_body
else gr.text.get_value()
)
attachments = self.handle_attachments(
gr.sys_id.get_value(), kb_number=gr.number.get_value()
)
try:
article_markdown = (
self.custom_parser_manager.process_text_with_custom_parser(
FileType.HTML, txt_lm, "html"
)
)
except ValueError as e:
self.logger.error(
f"Error processing article HTML with custom parser: {e}"
)
if self.fail_on_error:
raise
article_markdown = txt_lm # Fallback to original text
complete_text = (
article_markdown
+ "\n\n"
+ "\n".join(
self._format_attachment_header(attach) + attach["markdown_text"]
for attach in attachments
if "markdown_text" in attach
)
)
display_number = (
gr.get_display_value("display_number")
if hasattr(gr, "display_number")
else None
)
sys_updated_on = (
gr.get_value("sys_updated_on")
if hasattr(gr, "sys_updated_on")
else None
)
kb_number = gr.get_value("number") if hasattr(gr, "number") else None
kb_status = (
gr.workflow_state.get_display_value()
if hasattr(gr, "workflow_state")
else "Unknown"
)
doc = Document(
text=complete_text,
extra_info={
"title": gr.short_description.get_display_value()
if hasattr(gr, "short_description")
else "No Title",
"page_id": kb_number,
"status": kb_status,
"version": display_number,
"sys_updated_on": sys_updated_on,
"kb_number": kb_number,
},
)
metadata = {
"version": display_number,
"sys_updated_on": sys_updated_on,
"kb_number": kb_number,
}
dispatcher.event(
SNOWKBPageFetchCompletedEvent(
page_id=kb_number,
document=doc,
metadata=metadata,
)
)
docs.append(doc)
except Exception as e:
self.logger.error(
f"Error processing KB article {gr.number.get_value()}: {e}"
)
dispatcher.event(
SNOWKBPageFailedEvent(
page_id=gr.number.get_value(),
error=str(e),
)
)
if self.fail_on_error:
raise
return docs
def _get_attachment_data(self, gr_attach: GlideRecord, page_id: str) -> dict:
"""Helper method to get attachment data for events."""
return {
"page_id": page_id,
"attachment_id": f"{gr_attach.get_value('sys_id')}",
"attachment_name": f"{gr_attach.get_value('file_name')}",
"attachment_type": f"{gr_attach.get_value('content_type')}",
"attachment_size": int(f"{gr_attach.get_value('size_bytes')}"),
"attachment_link": f"https://{self.instance}.service-now.com/sys_attachment.do?sys_id={gr_attach.get_value('sys_id')}",
}
def handle_attachment(self, gr_attach: GlideRecord, kb_number: str) -> dict:
"""
Process a single attachment GlideRecord and return its info dict.
"""
if not hasattr(gr_attach, "file_name") or not hasattr(
gr_attach, "content_type"
):
self.logger.error(
"Invalid GlideRecord for attachment, missing required fields."
)
return {}
attachment_id = f"{gr_attach.get_value('sys_id')}"
size_bytes = int(f"{gr_attach.get_value('size_bytes')}")
file_name = f"{gr_attach.get_value('file_name')}"
content_type = f"{gr_attach.get_value('content_type')}"
self.logger.info(f"Processing attachment {file_name}")
attachment_data = self._get_attachment_data(gr_attach, kb_number)
dispatcher.event(SNOWKBAttachmentProcessingStartEvent(**attachment_data))
if self.process_attachment_callback:
can_process, message = self.process_attachment_callback(
content_type, size_bytes, file_name
)
if not can_process:
attachment_data = self._get_attachment_data(gr_attach, kb_number)
dispatcher.event(
SNOWKBAttachmentSkippedEvent(**attachment_data, reason=message)
)
self.logger.info(f"Skipping attachment {file_name}: {message}")
return {}
try:
res: requests.Response = self._download_attachment_content(gr_attach.sys_id)
if not res or not getattr(res, "ok", False):
self.logger.error(
f"Failed to download attachment content for {file_name}"
)
return {}
else:
file_content = res.content
file_type = self.get_File_type(file_name)
# Check if parser is available for this file type
if file_type not in self.custom_parsers:
self.logger.warning(
f"No custom parser available for file type {file_type.value} (file: {file_name}). Skipping attachment."
)
attachment_data = self._get_attachment_data(gr_attach, kb_number)
dispatcher.event(
SNOWKBAttachmentSkippedEvent(
**attachment_data, reason=f"No parser for {file_type.value}"
)
)
return {} # Skip this attachment if no parser available
try:
markdown_text = self.custom_parser_manager.process_with_custom_parser(
file_type, file_content, file_name.split(".")[-1]
)
except ValueError as e:
self.logger.error(
f"Error processing attachment {file_name} with custom parser: {e}"
)
attachment_data = self._get_attachment_data(gr_attach, kb_number)
dispatcher.event(
SNOWKBAttachmentFailedEvent(**attachment_data, error=str(e))
)
if self.fail_on_error:
raise
return {} # Skip this attachment if custom parser fails
self.logger.debug(markdown_text)
attachment_data = self._get_attachment_data(gr_attach, kb_number)
dispatcher.event(SNOWKBAttachmentProcessedEvent(**attachment_data))
return {
"file_name": file_name,
"content_type": content_type,
"size_bytes": size_bytes,
"markdown_text": markdown_text,
"sys_id": gr_attach.sys_id,
}
except Exception as e:
self.logger.error(f"Error processing attachment {file_name}: {e}")
attachment_data = self._get_attachment_data(gr_attach, kb_number)
dispatcher.event(
SNOWKBAttachmentFailedEvent(**attachment_data, error=str(e))
)
return {}
def handle_attachments(self, sys_id: str, kb_number: str) -> list:
"""
Download all attachments for a given KB article sys_id. Returns a list of attachment info dicts.
"""
attachments = []
try:
gr_attach = self.pysnc_client.GlideRecord("sys_attachment")
gr_attach.add_query("table_sys_id", sys_id)
gr_attach.add_query("table_name", self.kb_table)
gr_attach.query()
while gr_attach.next():
attachment_info = self.handle_attachment(gr_attach, kb_number)
if "markdown_text" in attachment_info:
attachments.append(attachment_info)
except Exception as e:
self.logger.error(f"Error downloading attachments: {e}")
return attachments
def get_File_type(self, file_name: str) -> FileType:
"""
Determine the file type based on the file name extension.
"""
ext = os.path.splitext(file_name)[1].lower()
if ext in [".jpg", ".jpeg", ".png", ".gif"]:
return FileType.IMAGE
elif ext in [".pdf"]:
return FileType.PDF
elif ext in [".txt"]:
return FileType.TEXT
elif ext in [".csv"]:
return FileType.CSV
elif ext in [".html"]:
return FileType.HTML
elif ext in [".docx"]:
return FileType.DOCUMENT
elif ext in [".xlsx"]:
return FileType.SPREADSHEET
elif ext in [".pptx"]:
return FileType.PRESENTATION
elif ext in [".md"]:
return FileType.MARKDOWN
else:
return FileType.UNKNOWN
def _download_attachment_content(self, sys_id: str) -> Optional[bytes]:
"""
Download attachment content using PySNC's attachment.get_file method.
"""
try:
if hasattr(self.pysnc_client, "attachment_api") and hasattr(
self.pysnc_client.attachment_api, "get_file"
):
return self.pysnc_client.attachment_api.get_file(sys_id)
else:
self.logger.error(
"self.pysnc_client.attachment_api.get_file is not available. Please check your PySNC version."
)
return None
except Exception as e:
self.logger.error(f"Attachment download failed for {sys_id}: {e}")
return None
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-service-now/llama_index/readers/service_now/base.py",
"license": "MIT License",
"lines": 636,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-service-now/llama_index/readers/service_now/event.py | from llama_index.core.instrumentation.events.base import BaseEvent
from typing import Dict, Any
from enum import Enum
from llama_index.core.schema import Document
from pydantic import Field
class FileType(Enum):
IMAGE = "image"
DOCUMENT = "document"
TEXT = "text"
HTML = "html"
CSV = "csv"
MARKDOWN = "md"
SPREADSHEET = "spreadsheet"
PRESENTATION = "presentation"
PDF = "pdf"
UNKNOWN = "unknown"
# ServiceNow Knowledge Base Reader Events
# All events use LlamaIndex's standard instrumentation event system
# and inherit from BaseEvent for consistent event handling across the framework
class SNOWKBTotalPagesEvent(BaseEvent):
"""Event fired when total pages to process is determined."""
total_pages: int = Field(description="Total number of pages to process")
class SNOWKBPageFetchStartEvent(BaseEvent):
"""Event fired when page data fetch starts."""
page_id: str = Field(description="ID of the page being fetched")
class SNOWKBPageFetchCompletedEvent(BaseEvent):
"""Event fired when page data fetch completes successfully."""
page_id: str = Field(description="ID of the page that was fetched")
document: Document = Field(description="The processed document")
metadata: Dict[str, Any] = Field(
default_factory=dict, description="Additional metadata"
)
class SNOWKBPageSkippedEvent(BaseEvent):
"""Event fired when a page is skipped."""
page_id: str = Field(description="ID of the page that was skipped")
reason: str = Field(description="Reason why the page was skipped")
class SNOWKBPageFailedEvent(BaseEvent):
"""Event fired when page processing fails."""
page_id: str = Field(description="ID of the page that failed")
error: str = Field(description="Error message")
class SNOWKBAttachmentProcessingStartEvent(BaseEvent):
"""Event fired when attachment processing starts."""
page_id: str = Field(description="ID of the parent page")
attachment_id: str = Field(description="ID of the attachment")
attachment_name: str = Field(description="Name of the attachment")
attachment_type: str = Field(description="MIME type of the attachment")
attachment_size: int = Field(description="Size of the attachment in bytes")
attachment_link: str = Field(description="Link to the attachment")
class SNOWKBAttachmentProcessedEvent(BaseEvent):
"""Event fired when attachment processing completes successfully."""
page_id: str = Field(description="ID of the parent page")
attachment_id: str = Field(description="ID of the attachment")
attachment_name: str = Field(description="Name of the attachment")
attachment_type: str = Field(description="MIME type of the attachment")
attachment_size: int = Field(description="Size of the attachment in bytes")
attachment_link: str = Field(description="Link to the attachment")
class SNOWKBAttachmentSkippedEvent(BaseEvent):
"""Event fired when an attachment is skipped."""
page_id: str = Field(description="ID of the parent page")
attachment_id: str = Field(description="ID of the attachment")
attachment_name: str = Field(description="Name of the attachment")
attachment_type: str = Field(description="MIME type of the attachment")
attachment_size: int = Field(description="Size of the attachment in bytes")
attachment_link: str = Field(description="Link to the attachment")
reason: str = Field(description="Reason why the attachment was skipped")
class SNOWKBAttachmentFailedEvent(BaseEvent):
"""Event fired when attachment processing fails."""
page_id: str = Field(description="ID of the parent page")
attachment_id: str = Field(description="ID of the attachment")
attachment_name: str = Field(description="Name of the attachment")
attachment_type: str = Field(description="MIME type of the attachment")
attachment_size: int = Field(description="Size of the attachment in bytes")
attachment_link: str = Field(description="Link to the attachment")
error: str = Field(description="Error message")
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-service-now/llama_index/readers/service_now/event.py",
"license": "MIT License",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-service-now/tests/test_snow_kb_reader.py | """Tests for ServiceNow Knowledge Base Reader."""
import sys
from unittest.mock import MagicMock, patch
import pytest
from llama_index.core.schema import Document
from llama_index.core.readers.base import BaseReader
class MockCustomParser(BaseReader):
"""Mock custom parser for testing."""
def load_data(self, file_path: str):
return [Document(text="Mocked parsed content")]
class MockServiceNowClient:
"""Mock ServiceNow client for testing."""
def __init__(self, *args, **kwargs):
self.attachment_api = MagicMock()
self.attachment_api.get_file = MagicMock(return_value=b"mock file content")
def GlideRecord(self, table):
"""Mock GlideRecord for ServiceNow table operations."""
mock_gr = MagicMock()
mock_gr.add_query = MagicMock()
mock_gr.query = MagicMock()
mock_gr.has_next = MagicMock(return_value=True)
mock_gr.next = MagicMock(
side_effect=[True, False]
) # First call returns True, second False
mock_gr.get_row_count = MagicMock(return_value=1)
# Mock properties for KB article
mock_gr.number = MagicMock()
mock_gr.number.get_value = MagicMock(return_value="KB0010001")
mock_gr.sys_id = MagicMock()
mock_gr.sys_id.get_value = MagicMock(return_value="test_sys_id")
mock_gr.short_description = MagicMock()
mock_gr.short_description.get_display_value = MagicMock(
return_value="Test KB Article"
)
mock_gr.text = MagicMock()
mock_gr.text.get_value = MagicMock(return_value="<p>Test article content</p>")
mock_gr.workflow_state = MagicMock()
mock_gr.workflow_state.get_display_value = MagicMock(return_value="Published")
mock_gr.get_value = MagicMock(return_value="test_value")
mock_gr.get_display_value = MagicMock(return_value="test_display_value")
return mock_gr
class MockPasswordGrantFlow:
"""Mock password grant flow for ServiceNow authentication."""
def __init__(self, *args, **kwargs):
pass
@pytest.fixture
def mock_pysnc_imports():
"""Mock pysnc imports for testing."""
with patch.dict("sys.modules", {"pysnc": MagicMock(), "pysnc.auth": MagicMock()}):
sys.modules["pysnc"].ServiceNowClient = MockServiceNowClient
sys.modules["pysnc"].GlideRecord = MagicMock()
sys.modules["pysnc.auth"].ServiceNowPasswordGrantFlow = MockPasswordGrantFlow
yield
@pytest.fixture
def snow_reader(mock_pysnc_imports):
"""Fixture to create a SnowKBReader instance with mocked dependencies."""
with patch(
"llama_index.readers.service_now.base.ServiceNowClient", MockServiceNowClient
):
with patch(
"llama_index.readers.service_now.base.ServiceNowPasswordGrantFlow",
MockPasswordGrantFlow,
):
from llama_index.readers.service_now import SnowKBReader
from llama_index.readers.service_now.base import FileType
# Create custom parsers dictionary with mock parsers
custom_parsers = {
FileType.HTML: MockCustomParser(), # HTML parser is required
FileType.PDF: MockCustomParser(),
FileType.DOCUMENT: MockCustomParser(),
}
return SnowKBReader(
instance="test.service-now.com",
custom_parsers=custom_parsers,
username="test_user",
password="test_pass",
client_id="test_client_id",
client_secret="test_client_secret",
)
class TestSnowKBReader:
"""Test class for ServiceNow Knowledge Base Reader."""
def test_initialization(self, mock_pysnc_imports):
"""Test that SnowKBReader initializes correctly."""
with patch(
"llama_index.readers.service_now.base.ServiceNowClient",
MockServiceNowClient,
):
with patch(
"llama_index.readers.service_now.base.ServiceNowPasswordGrantFlow",
MockPasswordGrantFlow,
):
from llama_index.readers.service_now import SnowKBReader
from llama_index.readers.service_now.base import FileType
custom_parsers = {
FileType.HTML: MockCustomParser(), # Required
FileType.PDF: MockCustomParser(),
}
reader = SnowKBReader(
instance="test.service-now.com",
custom_parsers=custom_parsers,
username="test_user",
password="test_pass",
client_id="test_client_id",
client_secret="test_client_secret",
)
assert reader.instance == "test.service-now.com"
assert reader.username == "test_user"
assert reader.password == "test_pass"
assert reader.client_id == "test_client_id"
assert reader.client_secret == "test_client_secret"
assert reader.kb_table == "kb_knowledge"
assert reader.pysnc_client is not None
assert reader.custom_parsers == custom_parsers
def test_initialization_missing_credentials(self):
"""Test that SnowKBReader raises error when missing required credentials."""
from llama_index.readers.service_now import SnowKBReader
from llama_index.readers.service_now.base import FileType
custom_parsers = {
FileType.HTML: MockCustomParser(), # Required
FileType.PDF: MockCustomParser(),
}
with pytest.raises(ValueError, match="username parameter is required"):
SnowKBReader(instance="test.service-now.com", custom_parsers=custom_parsers)
def test_load_data_by_sys_id(self, snow_reader):
"""Test loading KB article by sys_id."""
with patch.object(snow_reader, "load_data") as mock_load_data:
mock_doc = Document(
text="Test content",
metadata={
"title": "Test KB Article",
"page_id": "KB0010001",
"status": "Published",
},
)
mock_load_data.return_value = [mock_doc]
result = snow_reader.load_data(article_sys_id="test_sys_id")
assert len(result) == 1
assert result[0].text == "Test content"
assert result[0].metadata["title"] == "Test KB Article"
mock_load_data.assert_called_once_with(article_sys_id="test_sys_id")
def test_load_data_by_numbers(self, snow_reader):
"""Test loading KB articles by numbers."""
with patch.object(snow_reader, "load_data") as mock_load_data:
mock_doc = Document(
text="Test content",
metadata={
"title": "Test KB Article",
"page_id": "KB0010001",
"status": "Published",
},
)
mock_load_data.return_value = [mock_doc]
result = snow_reader.load_data(numbers=["KB0010001", "KB0010002"])
assert len(result) == 1
mock_load_data.assert_called_once_with(numbers=["KB0010001", "KB0010002"])
def test_load_data_no_parameters(self, snow_reader):
"""Test that load_data raises error when no parameters provided."""
with pytest.raises(ValueError, match="Must provide article_sys_id or number"):
with patch.object(snow_reader, "load_data") as mock_load_data:
mock_load_data.side_effect = ValueError(
"Must provide article_sys_id or number"
)
snow_reader.load_data()
def test_get_documents_with_attachments(self, snow_reader):
"""Test getting documents with attachment processing."""
with patch.object(snow_reader, "handle_attachments") as mock_handle_attachments:
mock_handle_attachments.return_value = [
{"file_name": "test.pdf", "markdown_text": "PDF content"}
]
with patch.object(
snow_reader.custom_parser_manager, "process_text_with_custom_parser"
) as mock_process:
mock_process.return_value = "Processed HTML content"
result = snow_reader.load_data(article_sys_id="test_sys_id")
assert len(result) == 1
assert "Processed HTML content" in result[0].text
assert "# test.pdf" in result[0].text
assert "PDF content" in result[0].text
def test_handle_attachments(self, snow_reader):
"""Test attachment handling functionality."""
with patch.object(snow_reader.pysnc_client, "GlideRecord") as mock_gr_class:
mock_gr = MagicMock()
mock_gr.next.side_effect = [True, False]
mock_gr_class.return_value = mock_gr
with patch.object(
snow_reader, "handle_attachment"
) as mock_handle_attachment:
mock_handle_attachment.return_value = {
"file_name": "test.pdf",
"markdown_text": "PDF content",
}
result = snow_reader.handle_attachments("test_sys_id", "KB0010001")
assert len(result) == 1
assert result[0]["file_name"] == "test.pdf"
mock_gr.add_query.assert_any_call("table_sys_id", "test_sys_id")
mock_gr.add_query.assert_any_call("table_name", "kb_knowledge")
def test_get_file_type(self, snow_reader):
"""Test file type detection."""
from llama_index.readers.service_now.base import FileType
assert snow_reader.get_File_type("test.pdf") == FileType.PDF
assert snow_reader.get_File_type("test.jpg") == FileType.IMAGE
assert snow_reader.get_File_type("test.docx") == FileType.DOCUMENT
assert snow_reader.get_File_type("test.xlsx") == FileType.SPREADSHEET
assert snow_reader.get_File_type("test.txt") == FileType.TEXT
assert snow_reader.get_File_type("test.html") == FileType.HTML
assert snow_reader.get_File_type("test.csv") == FileType.CSV
assert snow_reader.get_File_type("test.md") == FileType.MARKDOWN
assert snow_reader.get_File_type("test.unknown") == FileType.UNKNOWN
def test_download_attachment_content(self, snow_reader):
"""Test attachment content download."""
result = snow_reader._download_attachment_content("test_sys_id")
assert result == b"mock file content"
snow_reader.pysnc_client.attachment_api.get_file.assert_called_once_with(
"test_sys_id"
)
def test_download_attachment_content_failure(self, snow_reader):
"""Test attachment download failure handling."""
snow_reader.pysnc_client.attachment_api.get_file.side_effect = Exception(
"Download failed"
)
result = snow_reader._download_attachment_content("test_sys_id")
assert result is None
def test_custom_kb_table(self, mock_pysnc_imports):
"""Test initialization with custom KB table."""
with patch(
"llama_index.readers.service_now.base.ServiceNowClient",
MockServiceNowClient,
):
with patch(
"llama_index.readers.service_now.base.ServiceNowPasswordGrantFlow",
MockPasswordGrantFlow,
):
from llama_index.readers.service_now import SnowKBReader
from llama_index.readers.service_now.base import FileType
custom_parsers = {
FileType.HTML: MockCustomParser(), # Required
FileType.PDF: MockCustomParser(),
}
reader = SnowKBReader(
instance="test.service-now.com",
custom_parsers=custom_parsers,
username="test_user",
password="test_pass",
client_id="test_client_id",
client_secret="test_client_secret",
kb_table="custom_kb_table",
)
assert reader.kb_table == "custom_kb_table"
def test_fail_on_error_false(self, mock_pysnc_imports):
"""Test that fail_on_error=False allows processing to continue on errors."""
with patch(
"llama_index.readers.service_now.base.ServiceNowClient",
MockServiceNowClient,
):
with patch(
"llama_index.readers.service_now.base.ServiceNowPasswordGrantFlow",
MockPasswordGrantFlow,
):
from llama_index.readers.service_now import SnowKBReader
from llama_index.readers.service_now.base import FileType
custom_parsers = {
FileType.HTML: MockCustomParser(), # Required
FileType.PDF: MockCustomParser(),
}
reader = SnowKBReader(
instance="test.service-now.com",
custom_parsers=custom_parsers,
username="test_user",
password="test_pass",
client_id="test_client_id",
client_secret="test_client_secret",
fail_on_error=False,
)
assert reader.fail_on_error is False
def test_event_system_integration(self, snow_reader):
"""Test that LlamaIndex event system integration is working."""
from llama_index.readers.service_now.event import (
SNOWKBPageFetchStartEvent,
SNOWKBPageFetchCompletedEvent,
)
# Test that events can be imported and are proper BaseEvent subclasses
assert hasattr(SNOWKBPageFetchStartEvent, "model_fields")
assert hasattr(SNOWKBPageFetchCompletedEvent, "model_fields")
# Test event creation
start_event = SNOWKBPageFetchStartEvent(page_id="KB0010001")
assert start_event.page_id == "KB0010001"
@patch("os.path.exists")
@patch("os.remove")
def test_custom_parser_manager_file_cleanup(
self, mock_remove, mock_exists, snow_reader
):
"""Test that custom parser manager cleans up temporary files."""
mock_exists.return_value = True
# Access the private method through the manager
snow_reader.custom_parser_manager._CustomParserManager__remove_custom_file(
"test_file.txt"
)
mock_exists.assert_called_once_with("test_file.txt")
mock_remove.assert_called_once_with("test_file.txt")
def test_format_attachment_header(self, snow_reader):
"""Test attachment header formatting."""
attachment = {"file_name": "test_document.pdf"}
result = snow_reader._format_attachment_header(attachment)
assert result == "# test_document.pdf\n"
def test_initialize_client_with_valid_credentials(self, mock_pysnc_imports):
"""Test client initialization with valid credentials."""
with patch(
"llama_index.readers.service_now.base.ServiceNowClient",
MockServiceNowClient,
):
with patch(
"llama_index.readers.service_now.base.ServiceNowPasswordGrantFlow",
MockPasswordGrantFlow,
):
from llama_index.readers.service_now import SnowKBReader
from llama_index.readers.service_now.base import FileType
custom_parsers = {
FileType.HTML: MockCustomParser(), # Required
FileType.PDF: MockCustomParser(),
}
reader = SnowKBReader(
instance="test.service-now.com",
custom_parsers=custom_parsers,
username="test_user",
password="test_pass",
client_id="test_client_id",
client_secret="test_client_secret",
)
# Test that client was initialized
assert reader.pysnc_client is not None
def test_custom_parsers_integration(self, mock_pysnc_imports):
"""Test integration with custom parsers."""
with patch(
"llama_index.readers.service_now.base.ServiceNowClient",
MockServiceNowClient,
):
with patch(
"llama_index.readers.service_now.base.ServiceNowPasswordGrantFlow",
MockPasswordGrantFlow,
):
from llama_index.readers.service_now import SnowKBReader
from llama_index.readers.service_now.base import FileType
# Mock custom parser (use the actual MockCustomParser class instead of MagicMock)
custom_parsers = {
FileType.HTML: MockCustomParser(), # Required
FileType.PDF: MockCustomParser(),
}
reader = SnowKBReader(
instance="test.service-now.com",
custom_parsers=custom_parsers,
username="test_user",
password="test_pass",
client_id="test_client_id",
client_secret="test_client_secret",
)
assert reader.custom_parsers == custom_parsers
assert FileType.PDF in reader.custom_parsers
def test_process_callbacks(self, mock_pysnc_imports):
"""Test process callbacks functionality."""
with patch(
"llama_index.readers.service_now.base.ServiceNowClient",
MockServiceNowClient,
):
with patch(
"llama_index.readers.service_now.base.ServiceNowPasswordGrantFlow",
MockPasswordGrantFlow,
):
from llama_index.readers.service_now import SnowKBReader
from llama_index.readers.service_now.base import FileType
def process_attachment_callback(
file_name: str, size: int
) -> tuple[bool, str]:
return True, "Processing"
def process_document_callback(kb_number: str) -> bool:
return True
custom_parsers = {
FileType.HTML: MockCustomParser(), # Required
FileType.PDF: MockCustomParser(),
}
reader = SnowKBReader(
instance="test.service-now.com",
custom_parsers=custom_parsers,
username="test_user",
password="test_pass",
client_id="test_client_id",
client_secret="test_client_secret",
process_attachment_callback=process_attachment_callback,
process_document_callback=process_document_callback,
)
assert reader.process_attachment_callback is not None
assert reader.process_document_callback is not None
# Test callback execution
result = reader.process_attachment_callback("test.pdf", 1000)
assert result == (True, "Processing")
result = reader.process_document_callback("KB0010001")
assert result is True
def test_custom_parser_validation(self, mock_pysnc_imports):
"""Test custom parser validation."""
with patch(
"llama_index.readers.service_now.base.ServiceNowClient",
MockServiceNowClient,
):
with patch(
"llama_index.readers.service_now.base.ServiceNowPasswordGrantFlow",
MockPasswordGrantFlow,
):
from llama_index.readers.service_now import SnowKBReader
from llama_index.readers.service_now.base import FileType
custom_parsers = {
FileType.HTML: MockCustomParser(), # Required
FileType.PDF: MockCustomParser(),
}
reader = SnowKBReader(
instance="test.service-now.com",
custom_parsers=custom_parsers,
username="test_user",
password="test_pass",
client_id="test_client_id",
client_secret="test_client_secret",
)
assert reader.custom_parsers[FileType.PDF] is not None
# Test parsing with custom parser
mock_parser = reader.custom_parsers[FileType.PDF]
result = mock_parser.load_data("test.pdf")
assert len(result) == 1
assert result[0].text == "Mocked parsed content"
def test_smoke_test_instantiation(self, mock_pysnc_imports):
"""Smoke test to verify SnowKBReader can be instantiated correctly."""
from llama_index.readers.service_now import SnowKBReader
from llama_index.readers.service_now.base import FileType
custom_parsers = {
FileType.PDF: MockCustomParser(),
FileType.HTML: MockCustomParser(),
FileType.DOCUMENT: MockCustomParser(),
}
# This should create without errors (though it will fail on ServiceNow connection)
try:
reader = SnowKBReader(
instance="test.service-now.com",
custom_parsers=custom_parsers,
username="test_user",
password="test_password",
client_id="test_client_id",
client_secret="test_client_secret",
)
# Verify basic properties are set correctly
assert reader.instance == "test.service-now.com"
assert reader.username == "test_user"
assert reader.password == "test_password"
assert reader.client_id == "test_client_id"
assert reader.client_secret == "test_client_secret"
assert reader.custom_parsers == custom_parsers
assert reader.kb_table == "kb_knowledge"
assert reader.fail_on_error is True
assert reader.pysnc_client is not None
assert reader.custom_parser_manager is not None
# Verify the custom parsers are working
assert FileType.PDF in reader.custom_parsers
assert FileType.HTML in reader.custom_parsers
assert FileType.DOCUMENT in reader.custom_parsers
except Exception as e:
# We expect a ServiceNow connection error in test environment
if "ServiceNow client" in str(e) or "Instance name not well-formed" in str(
e
):
# This is expected since we can't actually connect to ServiceNow in tests
pass
else:
# Any other error is unexpected and should fail the test
pytest.fail(f"Unexpected error during SnowKBReader instantiation: {e}")
def test_smoke_test_with_minimal_config(self, mock_pysnc_imports):
"""Smoke test with minimal configuration."""
from llama_index.readers.service_now import SnowKBReader
from llama_index.readers.service_now.base import FileType
# Test with minimal required configuration
custom_parsers = {
FileType.HTML: MockCustomParser() # HTML parser is required for article body processing
}
try:
reader = SnowKBReader(
instance="test.service-now.com",
custom_parsers=custom_parsers,
username="test_user",
password="test_password",
)
# Verify minimal config is set correctly
assert reader.instance == "test.service-now.com"
assert reader.username == "test_user"
assert reader.password == "test_password"
assert reader.client_id is None
assert reader.client_secret is None
assert len(reader.custom_parsers) == 1
assert FileType.HTML in reader.custom_parsers
except Exception as e:
# We expect a ServiceNow connection error in test environment
if "ServiceNow client" in str(e) or "Instance name not well-formed" in str(
e
):
# This is expected since we can't actually connect to ServiceNow in tests
pass
else:
# Any other error is unexpected and should fail the test
pytest.fail(
f"Unexpected error during minimal SnowKBReader instantiation: {e}"
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-service-now/tests/test_snow_kb_reader.py",
"license": "MIT License",
"lines": 505,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-cloudflare-ai-gateway/llama_index/llms/cloudflare_ai_gateway/base.py | """
Cloudflare AI Gateway LLM integration.
This module provides integration with Cloudflare AI Gateway, allowing you to
use multiple AI models from different providers with automatic fallback.
"""
import json
from typing import Any, Dict, List, Optional, Sequence, Union
import logging
import httpx
from llama_index.core.llms import LLM
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponse,
CompletionResponseAsyncGen,
CompletionResponseGen,
)
from llama_index.core.bridge.pydantic import BaseModel, Field, PrivateAttr
from llama_index.core.callbacks import CallbackManager
from llama_index.core.llms.callbacks import (
llm_chat_callback,
llm_completion_callback,
)
from llama_index.core.base.llms.types import LLMMetadata
from .providers import get_provider_config
logger = logging.getLogger(__name__)
class CloudflareAIGatewayError(Exception):
"""Base exception for Cloudflare AI Gateway errors."""
pass
class CloudflareAIGatewayUnauthorizedError(CloudflareAIGatewayError):
"""Raised when AI Gateway authentication fails."""
pass
class CloudflareAIGatewayDoesNotExistError(CloudflareAIGatewayError):
"""Raised when AI Gateway does not exist."""
pass
class CloudflareAIGatewayOptions(BaseModel):
"""Options for Cloudflare AI Gateway requests."""
cache_key: Optional[str] = Field(default=None, description="Custom cache key")
cache_ttl: Optional[int] = Field(
default=None, ge=0, description="Cache time-to-live in seconds"
)
skip_cache: bool = Field(default=False, description="Bypass caching")
metadata: Optional[Dict[str, Union[str, int, bool, None]]] = Field(
default=None, description="Custom metadata for the request"
)
collect_log: Optional[bool] = Field(
default=None, description="Enable/disable log collection"
)
event_id: Optional[str] = Field(default=None, description="Custom event identifier")
request_timeout_ms: Optional[int] = Field(
default=None, ge=0, description="Request timeout in milliseconds"
)
class AIGatewayClientWrapper:
"""Wrapper for HTTP clients that intercepts requests and routes through AI Gateway."""
def __init__(self, gateway_instance, original_client, llm_instance):
self.gateway = gateway_instance
self.original_client = original_client
self.llm = llm_instance
self.provider_config = get_provider_config(llm_instance)
if not self.provider_config:
raise CloudflareAIGatewayError(
f"Unsupported provider for LLM: {type(self.llm).__name__}"
)
def __getattr__(self, name):
"""Delegate attribute access to the original client."""
return getattr(self.original_client, name)
def post(self, url: str, **kwargs):
"""Intercept POST requests and route through AI Gateway."""
# Transform request for AI Gateway
transformed_request = self._transform_request(url, kwargs)
# Make request to AI Gateway
return self.gateway._make_ai_gateway_request(transformed_request)
def _transform_request(self, url: str, kwargs: Dict[str, Any]) -> Dict[str, Any]:
"""Transform request for AI Gateway format."""
# Extract headers and body
headers = kwargs.get("headers", {})
json_data = kwargs.get("json", {})
# Get endpoint from URL
endpoint = self.provider_config.transform_endpoint(url)
# Pass the original request body directly to AI Gateway
# AI Gateway handles provider-specific format differences internally
return {
"provider": self.provider_config.name,
"endpoint": endpoint,
"headers": headers,
"query": json_data,
}
class CloudflareAIGateway(LLM):
"""
Cloudflare AI Gateway LLM.
This class intercepts requests to multiple LLM providers and routes them through
Cloudflare AI Gateway for automatic fallback and load balancing.
The key concept is that you provide multiple LLM instances (from different providers),
and this class intercepts their requests, transforms them for AI Gateway, and
delegates the actual LLM functionality to the first available provider.
Args:
llms: List of LLM instances to use (will be tried in order)
account_id: Your Cloudflare account ID
gateway: The name of your AI Gateway
api_key: Your Cloudflare API key (optional if using binding)
binding: Cloudflare AI Gateway binding (alternative to account_id/gateway/api_key)
options: Request-level options for AI Gateway
max_retries: Maximum number of retries for API calls
timeout: Timeout for API requests in seconds
callback_manager: Callback manager for observability
default_headers: Default headers for API requests
http_client: Custom httpx client
async_http_client: Custom async httpx client
"""
llms: List[LLM] = Field(
description="List of LLM instances to use (will be tried in order)"
)
account_id: Optional[str] = Field(
default=None, description="Your Cloudflare account ID"
)
gateway: Optional[str] = Field(
default=None, description="The name of your AI Gateway"
)
api_key: Optional[str] = Field(default=None, description="Your Cloudflare API key")
binding: Optional[Any] = Field(
default=None, description="Cloudflare AI Gateway binding"
)
options: Optional[CloudflareAIGatewayOptions] = Field(
default=None, description="Request-level options for AI Gateway"
)
max_retries: int = Field(
default=3, description="Maximum number of retries for API calls", ge=0
)
timeout: float = Field(
default=60.0, description="Timeout for API requests in seconds", ge=0
)
default_headers: Optional[Dict[str, str]] = Field(
default=None, description="Default headers for API requests"
)
http_client: Optional[httpx.Client] = Field(
default=None, description="Custom httpx client"
)
async_http_client: Optional[httpx.AsyncClient] = Field(
default=None, description="Custom async httpx client"
)
_client: Optional[httpx.Client] = PrivateAttr()
_aclient: Optional[httpx.AsyncClient] = PrivateAttr()
_current_llm_index: int = PrivateAttr(default=0)
_original_clients: Dict[int, Any] = PrivateAttr(default_factory=dict)
_original_async_clients: Dict[int, Any] = PrivateAttr(default_factory=dict)
def __init__(
self,
llms: List[LLM],
account_id: Optional[str] = None,
gateway: Optional[str] = None,
api_key: Optional[str] = None,
binding: Optional[Any] = None,
options: Optional[CloudflareAIGatewayOptions] = None,
max_retries: int = 3,
timeout: float = 60.0,
callback_manager: Optional[CallbackManager] = None,
default_headers: Optional[Dict[str, str]] = None,
http_client: Optional[httpx.Client] = None,
async_http_client: Optional[httpx.AsyncClient] = None,
**kwargs: Any,
) -> None:
# Validate configuration
if not llms:
raise ValueError("At least one LLM must be provided")
if binding is None:
if not account_id or not gateway:
raise ValueError(
"Either binding or account_id+gateway must be provided"
)
if not api_key:
raise ValueError("api_key is required when not using binding")
super().__init__(
llms=llms,
account_id=account_id,
gateway=gateway,
api_key=api_key,
binding=binding,
options=options,
max_retries=max_retries,
timeout=timeout,
callback_manager=callback_manager,
default_headers=default_headers,
http_client=http_client,
async_http_client=async_http_client,
**kwargs,
)
self._client = http_client
self._aclient = async_http_client
# Inject AI Gateway client into each LLM
self._inject_ai_gateway_clients()
def _inject_ai_gateway_clients(self) -> None:
"""Inject AI Gateway client into each LLM to intercept requests."""
for i, llm in enumerate(self.llms):
# Store original client if it exists
if hasattr(llm, "_client") and llm._client is not None:
self._original_clients[i] = llm._client
llm._client = AIGatewayClientWrapper(self, llm._client, llm)
# Store original async client if it exists
if hasattr(llm, "_aclient") and llm._aclient is not None:
self._original_async_clients[i] = llm._aclient
llm._aclient = AIGatewayClientWrapper(self, llm._aclient, llm)
def _get_client(self) -> httpx.Client:
"""Get HTTP client."""
if self._client is None:
self._client = httpx.Client(
timeout=self.timeout,
headers=self.default_headers,
)
return self._client
def _get_aclient(self) -> httpx.AsyncClient:
"""Get async HTTP client."""
if self._aclient is None:
self._aclient = httpx.AsyncClient(
timeout=self.timeout,
headers=self.default_headers,
)
return self._aclient
def _parse_options_to_headers(
self, options: Optional[CloudflareAIGatewayOptions]
) -> Dict[str, str]:
"""Parse options to headers."""
headers = {}
if options is None:
return headers
if options.skip_cache:
headers["cf-skip-cache"] = "true"
if options.cache_ttl is not None:
headers["cf-cache-ttl"] = str(options.cache_ttl)
if options.metadata:
headers["cf-aig-metadata"] = json.dumps(options.metadata)
if options.collect_log is not None:
headers["cf-aig-collect-log"] = str(options.collect_log).lower()
if options.event_id:
headers["cf-aig-event-id"] = options.event_id
if options.request_timeout_ms is not None:
headers["cf-aig-request-timeout-ms"] = str(options.request_timeout_ms)
return headers
def _get_current_llm(self) -> LLM:
"""Get the current LLM to use."""
if not self.llms:
raise CloudflareAIGatewayError("No LLMs configured")
return self.llms[self._current_llm_index % len(self.llms)]
def _try_next_llm(self) -> None:
"""Try the next LLM in the list."""
self._current_llm_index += 1
if self._current_llm_index >= len(self.llms):
raise CloudflareAIGatewayError("All LLMs failed")
def _make_ai_gateway_request(self, request_body: Dict[str, Any]) -> httpx.Response:
"""Make request to AI Gateway."""
if self.binding is not None:
# Use binding - this would need to be implemented based on the binding interface
raise NotImplementedError("Binding support not yet implemented")
else:
# Use API
headers = self._parse_options_to_headers(self.options)
headers.update(
{
"Content-Type": "application/json",
"cf-aig-authorization": f"Bearer {self.api_key}",
}
)
url = (
f"https://gateway.ai.cloudflare.com/v1/{self.account_id}/{self.gateway}"
)
client = self._get_client()
response = client.post(url, json=request_body, headers=headers)
# Handle response
self._handle_ai_gateway_response(response)
return response
def _handle_ai_gateway_response(self, response: httpx.Response) -> None:
"""Handle AI Gateway response and check for errors."""
if response.status_code == 400:
try:
result = response.json()
if (
not result.get("success")
and result.get("error")
and result["error"][0].get("code") == 2001
):
raise CloudflareAIGatewayDoesNotExistError(
"This AI gateway does not exist"
)
except (ValueError, KeyError, IndexError):
pass
raise CloudflareAIGatewayError(f"Bad request: {response.text}")
elif response.status_code == 401:
try:
result = response.json()
if (
not result.get("success")
and result.get("error")
and result["error"][0].get("code") == 2009
):
raise CloudflareAIGatewayUnauthorizedError(
"Your AI Gateway has authentication active, but you didn't provide a valid apiKey"
)
except (ValueError, KeyError, IndexError):
pass
raise CloudflareAIGatewayError("Unauthorized")
elif response.status_code != 200:
raise CloudflareAIGatewayError(
f"Request failed with status {response.status_code}: {response.text}"
)
@classmethod
def class_name(cls) -> str:
return "CloudflareAIGateway"
@property
def metadata(self) -> LLMMetadata:
"""Get LLM metadata from the current LLM."""
current_llm = self._get_current_llm()
return current_llm.metadata
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
"""Chat with the AI Gateway by delegating to the current LLM."""
while True:
try:
current_llm = self._get_current_llm()
return current_llm.chat(messages, **kwargs)
except Exception as e:
# Try next LLM on failure
logger.warning(
f"It seems that the current LLM is not working with the AI Gateway. Error: {e}"
)
self._try_next_llm()
continue
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
"""Stream chat with the AI Gateway by delegating to the current LLM."""
while True:
try:
current_llm = self._get_current_llm()
return current_llm.stream_chat(messages, **kwargs)
except Exception as e:
# Try next LLM on failure
logger.warning(
f"It seems that the current LLM is not working with the AI Gateway. Error: {e}"
)
self._try_next_llm()
continue
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
"""Complete a prompt using the AI Gateway by delegating to the current LLM."""
while True:
try:
current_llm = self._get_current_llm()
return current_llm.complete(prompt, formatted, **kwargs)
except Exception as e:
# Try next LLM on failure
logger.warning(
f"It seems that the current LLM is not working with the AI Gateway. Error: {e}"
)
self._try_next_llm()
continue
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
"""Stream complete a prompt using the AI Gateway by delegating to the current LLM."""
while True:
try:
current_llm = self._get_current_llm()
return current_llm.stream_complete(prompt, formatted, **kwargs)
except Exception as e:
# Try next LLM on failure
logger.warning(
f"It seems that the current LLM is not working with the AI Gateway. Error: {e}"
)
self._try_next_llm()
continue
@llm_chat_callback()
async def achat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponse:
"""Async chat with the AI Gateway by delegating to the current LLM."""
while True:
try:
current_llm = self._get_current_llm()
return await current_llm.achat(messages, **kwargs)
except Exception as e:
# Try next LLM on failure
logger.warning(
f"It seems that the current LLM is not working with the AI Gateway. Error: {e}"
)
self._try_next_llm()
continue
@llm_chat_callback()
async def astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
"""Async stream chat with the AI Gateway by delegating to the current LLM."""
while True:
try:
current_llm = self._get_current_llm()
return current_llm.astream_chat(messages, **kwargs)
except Exception as e:
# Try next LLM on failure
logger.warning(
f"It seems that the current LLM is not working with the AI Gateway. Error: {e}"
)
self._try_next_llm()
continue
@llm_completion_callback()
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
"""Async complete a prompt using the AI Gateway by delegating to the current LLM."""
while True:
try:
current_llm = self._get_current_llm()
return await current_llm.acomplete(prompt, formatted, **kwargs)
except Exception as e:
# Try next LLM on failure
logger.warning(
f"It seems that the current LLM is not working with the AI Gateway. Error: {e}"
)
self._try_next_llm()
continue
@llm_completion_callback()
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
"""Async stream complete a prompt using the AI Gateway by delegating to the current LLM."""
while True:
try:
current_llm = self._get_current_llm()
return current_llm.astream_complete(prompt, formatted, **kwargs)
except Exception as e:
# Try next LLM on failure
logger.warning(
f"It seems that the current LLM is not working with the AI Gateway. Error: {e}"
)
self._try_next_llm()
continue
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-cloudflare-ai-gateway/llama_index/llms/cloudflare_ai_gateway/base.py",
"license": "MIT License",
"lines": 439,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-cloudflare-ai-gateway/tests/test_cloudflare_ai_gateway.py | import os
import pytest
from dotenv import load_dotenv
from llama_index.core.llms import ChatMessage
from llama_index.llms.cloudflare_ai_gateway import CloudflareAIGateway
# Load .env file
load_dotenv()
@pytest.mark.skipif(
not all(
[
os.getenv("OPENAI_API_KEY"),
os.getenv("ANTHROPIC_API_KEY"),
os.getenv("CLOUDFLARE_ACCOUNT_ID"),
os.getenv("CLOUDFLARE_API_KEY"),
os.getenv("CLOUDFLARE_GATEWAY"),
]
),
reason="Missing required environment variables for real test",
)
def test_real_cloudflare_ai_gateway_with_openai_and_claude():
"""Real test using OpenAI and Claude with Cloudflare AI Gateway fallback."""
from llama_index.llms.openai import OpenAI
from llama_index.llms.anthropic import Anthropic
# Create real LLM instances
openai_llm = OpenAI(
model="gpt-4o-mini",
api_key=os.getenv("OPENAI_API_KEY"),
)
anthropic_llm = Anthropic(
model="claude-3-5-sonnet-20241022",
api_key=os.getenv("ANTHROPIC_API_KEY"),
)
# Create Cloudflare AI Gateway LLM with fallback order: OpenAI first, then Claude
llm = CloudflareAIGateway(
llms=[openai_llm, anthropic_llm], # Try OpenAI first, then Claude
account_id=os.getenv("CLOUDFLARE_ACCOUNT_ID"),
gateway=os.getenv("CLOUDFLARE_GATEWAY"),
api_key=os.getenv("CLOUDFLARE_API_KEY"),
)
# Test chat - Cloudflare AI Gateway will try OpenAI first, then Claude if needed
messages = [ChatMessage(role="user", content="What is 2+2?")]
response = llm.chat(messages)
assert response.message.content is not None
assert len(response.message.content) > 0
assert response.message.role == "assistant"
# Test completion - same fallback behavior
completion_response = llm.complete("Write a short sentence about AI.")
assert completion_response.text is not None
assert len(completion_response.text) > 0
print("OpenAI/Claude fallback test successful!")
print(f"Chat response: {response.message.content}")
print(f"Completion response: {completion_response.text}")
print(
"Note: Cloudflare AI Gateway automatically tried OpenAI first, then Claude if needed"
)
@pytest.mark.skipif(
not all(
[
os.getenv("OPENAI_API_KEY"),
os.getenv("ANTHROPIC_API_KEY"),
os.getenv("CLOUDFLARE_ACCOUNT_ID"),
os.getenv("CLOUDFLARE_API_KEY"),
os.getenv("CLOUDFLARE_GATEWAY"),
]
),
reason="Missing required environment variables for real test",
)
def test_cloudflare_ai_gateway_fallback_when_openai_fails():
"""Test Cloudflare AI Gateway fallback when OpenAI fails."""
from llama_index.llms.openai import OpenAI
from llama_index.llms.anthropic import Anthropic
# Create real LLM instances
openai_llm = OpenAI(
model="gpt-4o-mini",
api_key="invalid-openai-key", # Invalid key to simulate OpenAI failure
)
anthropic_llm = Anthropic(
model="claude-3-5-sonnet-20241022",
api_key=os.getenv("ANTHROPIC_API_KEY"), # Valid Claude key
)
# Create Cloudflare AI Gateway LLM with fallback order: OpenAI first, then Claude
llm = CloudflareAIGateway(
llms=[openai_llm, anthropic_llm], # Try OpenAI first (will fail), then Claude
account_id=os.getenv("CLOUDFLARE_ACCOUNT_ID"),
gateway=os.getenv("CLOUDFLARE_GATEWAY"),
api_key=os.getenv("CLOUDFLARE_API_KEY"),
)
# Test chat - OpenAI should fail, then fallback to Claude
messages = [ChatMessage(role="user", content="What is 2+2?")]
response = llm.chat(messages)
assert response.message.content is not None
assert len(response.message.content) > 0
assert response.message.role == "assistant"
# Test completion - same fallback behavior
completion_response = llm.complete("Write a short sentence about AI.")
assert completion_response.text is not None
assert len(completion_response.text) > 0
print("Fallback test successful!")
print(f"Chat response (from Claude): {response.message.content}")
print(f"Completion response (from Claude): {completion_response.text}")
print(
"Note: OpenAI failed with invalid key, but Claude handled the request successfully"
)
@pytest.mark.skipif(
not all(
[
os.getenv("OPENAI_API_KEY"),
os.getenv("ANTHROPIC_API_KEY"),
os.getenv("CLOUDFLARE_ACCOUNT_ID"),
os.getenv("CLOUDFLARE_API_KEY"),
os.getenv("CLOUDFLARE_GATEWAY"),
]
),
reason="Missing required environment variables for real test",
)
def test_cloudflare_ai_gateway_fallback_when_both_fail():
"""Test Cloudflare AI Gateway when both providers fail."""
from llama_index.llms.openai import OpenAI
from llama_index.llms.anthropic import Anthropic
# Create LLM instances with invalid keys to simulate failures
openai_llm = OpenAI(
model="gpt-4o-mini",
api_key="invalid-openai-key",
)
anthropic_llm = Anthropic(
model="claude-3-5-sonnet-20241022",
api_key="invalid-anthropic-key",
)
# Create Cloudflare AI Gateway LLM
llm = CloudflareAIGateway(
llms=[openai_llm, anthropic_llm],
account_id=os.getenv("CLOUDFLARE_ACCOUNT_ID"),
gateway=os.getenv("CLOUDFLARE_GATEWAY"),
api_key=os.getenv("CLOUDFLARE_API_KEY"),
)
# Test that both providers fail and an error is raised
messages = [ChatMessage(role="user", content="What is 2+2?")]
with pytest.raises(Exception): # Should raise an error when both providers fail
llm.chat(messages)
print("Both providers failed as expected - error handling works correctly")
@pytest.mark.skipif(
not all(
[
os.getenv("CLOUDFLARE_ACCOUNT_ID"),
os.getenv("CLOUDFLARE_API_KEY"),
os.getenv("CLOUDFLARE_GATEWAY"),
]
),
reason="Missing required Cloudflare environment variables",
)
def test_cloudflare_ai_gateway_connection():
"""Test basic Cloudflare AI Gateway connection."""
import httpx
account_id = os.getenv("CLOUDFLARE_ACCOUNT_ID")
api_key = os.getenv("CLOUDFLARE_API_KEY")
gateway = os.getenv("CLOUDFLARE_GATEWAY")
print("Testing connection to Cloudflare AI Gateway:")
print(f"Account ID: {account_id}")
print(f"Gateway: {gateway}")
print(f"API Key: {api_key[:10]}..." if api_key else "None")
# Test basic connection
url = f"https://gateway.ai.cloudflare.com/v1/{account_id}/{gateway}"
headers = {
"Content-Type": "application/json",
"cf-aig-authorization": f"Bearer {api_key}",
}
# Simple test request
test_body = [
{
"endpoint": "chat/completions",
"headers": {"Content-Type": "application/json"},
"provider": "openai",
"query": {
"model": "gpt-4o-mini",
"messages": [{"role": "user", "content": "Hello"}],
"max_tokens": 10,
},
}
]
try:
with httpx.Client(timeout=30.0) as client:
response = client.post(url, json=test_body, headers=headers)
print(f"Response status: {response.status_code}")
print(f"Response headers: {dict(response.headers)}")
if response.status_code == 200:
print("[PASS] Cloudflare AI Gateway connection successful!")
result = response.json()
print(f"Response: {result}")
elif response.status_code == 401:
print(
"[FAIL] Authentication failed - check your API key and permissions"
)
print(f"Response: {response.text}")
elif response.status_code == 404:
print(
"[FAIL] Gateway not found - check your account ID and gateway name"
)
print(f"Response: {response.text}")
else:
print(f"[FAIL] Unexpected status code: {response.status_code}")
print(f"Response: {response.text}")
except Exception as e:
print(f"[FAIL] Connection error: {e}")
raise
@pytest.mark.skipif(
not all(
[
os.getenv("OPENAI_API_KEY"),
os.getenv("CLOUDFLARE_ACCOUNT_ID"),
os.getenv("CLOUDFLARE_API_KEY"),
os.getenv("CLOUDFLARE_GATEWAY"),
]
),
reason="Missing required environment variables for comprehensive test",
)
def test_cloudflare_ai_gateway_comprehensive_methods():
"""Comprehensive test of all Cloudflare AI Gateway methods with single OpenAI LLM."""
from llama_index.llms.openai import OpenAI
# Create single OpenAI LLM instance
openai_llm = OpenAI(
model="gpt-4o-mini",
api_key=os.getenv("OPENAI_API_KEY"),
)
# Create Cloudflare AI Gateway LLM with single OpenAI LLM
llm = CloudflareAIGateway(
llms=[openai_llm], # Single OpenAI LLM
account_id=os.getenv("CLOUDFLARE_ACCOUNT_ID"),
gateway=os.getenv("CLOUDFLARE_GATEWAY"),
api_key=os.getenv("CLOUDFLARE_API_KEY"),
)
# Store test results
test_results = []
# Test 1: Basic chat method
print("Testing chat method...")
messages = [ChatMessage(role="user", content="What is 2+2?")]
chat_response = llm.chat(messages)
assert chat_response.message.content is not None
assert len(chat_response.message.content) > 0
assert chat_response.message.role == "assistant"
test_results.append(
("Basic Chat", "PASS", chat_response.message.content[:50] + "...")
)
# Test 2: Basic completion method
print("Testing completion method...")
completion_response = llm.complete("Write a short sentence about AI.")
assert completion_response.text is not None
assert len(completion_response.text) > 0
test_results.append(
("Basic Completion", "PASS", completion_response.text[:50] + "...")
)
# Test 3: Stream chat method
print("Testing stream chat method...")
stream_chat_response = llm.stream_chat(messages)
stream_chat_content = ""
for chunk in stream_chat_response:
if hasattr(chunk, "delta") and chunk.delta:
if hasattr(chunk.delta, "content") and chunk.delta.content:
stream_chat_content += chunk.delta.content
elif isinstance(chunk.delta, str):
stream_chat_content += chunk.delta
elif hasattr(chunk, "content") and chunk.content:
stream_chat_content += chunk.content
assert len(stream_chat_content) > 0
test_results.append(("Stream Chat", "PASS", stream_chat_content[:50] + "..."))
# Test 4: Stream completion method
print("Testing stream completion method...")
stream_completion_response = llm.stream_complete(
"Write a short sentence about technology."
)
stream_completion_content = ""
for chunk in stream_completion_response:
if hasattr(chunk, "delta") and chunk.delta:
if isinstance(chunk.delta, str):
stream_completion_content += chunk.delta
elif hasattr(chunk.delta, "content") and chunk.delta.content:
stream_completion_content += chunk.delta.content
elif hasattr(chunk, "content") and chunk.content:
stream_completion_content += chunk.content
elif isinstance(chunk, str):
stream_completion_content += chunk
assert len(stream_completion_content) > 0
test_results.append(
("Stream Completion", "PASS", stream_completion_content[:50] + "...")
)
# Test 5: Metadata property
print("Testing metadata property...")
metadata = llm.metadata
assert metadata is not None
test_results.append(("Metadata", "PASS", str(metadata.model_name)))
# Test 6: Class name
print("Testing class name...")
class_name = llm.class_name()
assert class_name == "CloudflareAIGateway"
test_results.append(("Class Name", "PASS", class_name))
# Test 7: Chat with system message
print("Testing chat with system message...")
system_messages = [
ChatMessage(
role="system",
content="You are a helpful assistant that always responds with 'Hello from AI Gateway!'",
),
ChatMessage(role="user", content="What should you say?"),
]
system_chat_response = llm.chat(system_messages)
assert system_chat_response.message.content is not None
assert len(system_chat_response.message.content) > 0
test_results.append(
("System Chat", "PASS", system_chat_response.message.content[:50] + "...")
)
# Test 8: Completion with temperature parameter
print("Testing completion with temperature parameter...")
temp_completion_response = llm.complete("Write a creative story.", temperature=0.8)
assert temp_completion_response.text is not None
assert len(temp_completion_response.text) > 0
test_results.append(
("Temperature Completion", "PASS", temp_completion_response.text[:50] + "...")
)
# Test 9: Chat with max_tokens parameter
print("Testing chat with max_tokens parameter...")
max_tokens_messages = [
ChatMessage(role="user", content="Explain quantum computing in detail.")
]
max_tokens_chat_response = llm.chat(max_tokens_messages, max_tokens=50)
assert max_tokens_chat_response.message.content is not None
assert len(max_tokens_chat_response.message.content) > 0
test_results.append(
(
"Max Tokens Chat",
"PASS",
max_tokens_chat_response.message.content[:50] + "...",
)
)
# Print results table
print("\n" + "=" * 80)
print("CLOUDFLARE AI GATEWAY COMPREHENSIVE TEST RESULTS")
print("=" * 80)
print(f"{'Test Method':<25} {'Status':<10} {'Sample Response'}")
print("-" * 80)
for test_name, status, sample in test_results:
print(f"{test_name:<25} {status:<10} {sample}")
print("-" * 80)
print(f"Total Tests: {len(test_results)} | All Tests: PASS")
print("=" * 80)
print("🎉 Cloudflare AI Gateway is working correctly with all methods!")
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-cloudflare-ai-gateway/tests/test_cloudflare_ai_gateway.py",
"license": "MIT License",
"lines": 344,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-confluence/llama_index/readers/confluence/event.py | from enum import Enum
from llama_index.core.schema import Document
from llama_index.core.instrumentation.events.base import BaseEvent
class FileType(Enum):
IMAGE = "image"
DOCUMENT = "document"
TEXT = "text"
HTML = "html"
CSV = "csv"
MARKDOWN = "md"
SPREADSHEET = "spreadsheet"
PRESENTATION = "presentation"
PDF = "pdf"
UNKNOWN = "unknown"
# LlamaIndex instrumentation events
class TotalPagesToProcessEvent(BaseEvent):
"""Event emitted when the total number of pages to process is determined."""
total_pages: int
@classmethod
def class_name(cls) -> str:
return "TotalPagesToProcessEvent"
class PageDataFetchStartedEvent(BaseEvent):
"""Event emitted when processing of a page begins."""
page_id: str
@classmethod
def class_name(cls) -> str:
return "PageDataFetchStartedEvent"
class PageDataFetchCompletedEvent(BaseEvent):
"""Event emitted when a page is successfully processed."""
page_id: str
document: Document
@classmethod
def class_name(cls) -> str:
return "PageDataFetchCompletedEvent"
class PageSkippedEvent(BaseEvent):
"""Event emitted when a page is skipped due to callback decision."""
page_id: str
@classmethod
def class_name(cls) -> str:
return "PageSkippedEvent"
class PageFailedEvent(BaseEvent):
"""Event emitted when page processing fails."""
page_id: str
error: str
@classmethod
def class_name(cls) -> str:
return "PageFailedEvent"
class AttachmentProcessingStartedEvent(BaseEvent):
"""Event emitted when attachment processing begins."""
page_id: str
attachment_id: str
attachment_name: str
attachment_type: str
attachment_size: int
attachment_link: str
@classmethod
def class_name(cls) -> str:
return "AttachmentProcessingStartedEvent"
class AttachmentProcessedEvent(BaseEvent):
"""Event emitted when an attachment is successfully processed."""
page_id: str
attachment_id: str
attachment_name: str
attachment_type: str
attachment_size: int
attachment_link: str
@classmethod
def class_name(cls) -> str:
return "AttachmentProcessedEvent"
class AttachmentSkippedEvent(BaseEvent):
"""Event emitted when an attachment is skipped."""
page_id: str
attachment_id: str
attachment_name: str
attachment_type: str
attachment_size: int
attachment_link: str
reason: str
@classmethod
def class_name(cls) -> str:
return "AttachmentSkippedEvent"
class AttachmentFailedEvent(BaseEvent):
"""Event emitted when attachment processing fails."""
page_id: str
attachment_id: str
attachment_name: str
attachment_type: str
attachment_size: int
attachment_link: str
error: str
@classmethod
def class_name(cls) -> str:
return "AttachmentFailedEvent"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-confluence/llama_index/readers/confluence/event.py",
"license": "MIT License",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-confluence/tests/run_basic_tests.py | #!/usr/bin/env python3
"""
Simple test runner to verify the new ConfluenceReader features.
Run this script to test the new functionality without requiring pytest installation.
"""
import sys
import os
import traceback
from unittest.mock import MagicMock
# Add the package to the path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
def run_basic_tests():
"""Run basic tests for new features without pytest dependency."""
print("Testing ConfluenceReader new features...")
try:
from llama_index.readers.confluence import ConfluenceReader
from llama_index.readers.confluence.event import FileType
from llama_index.core.instrumentation import get_dispatcher
from llama_index.core.instrumentation.event_handlers import BaseEventHandler
print("✓ Successfully imported ConfluenceReader and events")
except ImportError as e:
print(f"✗ Failed to import: {e}")
return False
# Test 1: Custom folder validation
print("\n1. Testing custom folder validation...")
try:
ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
api_token="test_token",
custom_folder="/tmp/test",
)
print(
"✗ Should have raised ValueError for custom_folder without custom_parsers"
)
return False
except ValueError as e:
if "custom_folder can only be used when custom_parsers are provided" in str(e):
print(
"✓ Correctly raised ValueError for custom_folder without custom_parsers"
)
else:
print(f"✗ Wrong error message: {e}")
return False
except Exception as e:
print(f"✗ Unexpected error: {e}")
return False
# Test 2: Custom parsers with folder
print("\n2. Testing custom parsers with custom folder...")
try:
mock_parser = MagicMock()
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
api_token="test_token",
custom_parsers={FileType.PDF: mock_parser},
custom_folder="/tmp/test",
)
assert reader.custom_folder == "/tmp/test"
assert reader.custom_parser_manager is not None
print("✓ Custom parsers with custom folder works correctly")
except Exception as e:
print(f"✗ Failed: {e}")
traceback.print_exc()
return False
# Test 3: Callbacks
print("\n3. Testing callback functionality...")
try:
def attachment_filter(
media_type: str, file_size: int, title: str
) -> tuple[bool, str]:
if file_size > 1000000:
return False, "Too large"
return True, ""
def document_filter(page_id: str) -> bool:
return page_id != "skip_me"
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
api_token="test_token",
process_attachment_callback=attachment_filter,
process_document_callback=document_filter,
)
# Test callbacks
should_process, reason = attachment_filter(
"application/pdf", 2000000, "large.pdf"
)
assert should_process is False
assert reason == "Too large"
assert document_filter("normal_page") is True
assert document_filter("skip_me") is False
print("✓ Callbacks work correctly")
except Exception as e:
print(f"✗ Failed: {e}")
traceback.print_exc()
return False
# Test 4: Event system
print("\n4. Testing event system...")
try:
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki", api_token="test_token"
)
events_received = []
class TestEventHandler(BaseEventHandler):
def handle(self, event):
events_received.append(event.class_name())
# Test that event system can be used
dispatcher = get_dispatcher(__name__)
event_handler = TestEventHandler()
dispatcher.add_event_handler(event_handler)
# Test that ConfluenceReader inherits from DispatcherSpanMixin
from llama_index.core.instrumentation import DispatcherSpanMixin
assert isinstance(reader, DispatcherSpanMixin)
print("✓ Event system structure is correct")
# Clean up
if event_handler in dispatcher.event_handlers:
dispatcher.event_handlers.remove(event_handler)
except Exception as e:
print(f"✗ Failed: {e}")
traceback.print_exc()
return False
# Test 5: Error handling
print("\n5. Testing error handling...")
try:
reader1 = ConfluenceReader(
base_url="https://example.atlassian.net/wiki", api_token="test_token"
)
assert reader1.fail_on_error is True # Default
reader2 = ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
api_token="test_token",
fail_on_error=False,
)
assert reader2.fail_on_error is False
print("✓ Error handling settings work correctly")
except Exception as e:
print(f"✗ Failed: {e}")
traceback.print_exc()
return False
print("\n🎉 All basic tests passed!")
return True
if __name__ == "__main__":
success = run_basic_tests()
if not success:
print("\n❌ Some tests failed")
sys.exit(1)
else:
print("\n✅ All tests passed successfully!")
sys.exit(0)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-confluence/tests/run_basic_tests.py",
"license": "MIT License",
"lines": 146,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-confluence/tests/test_integration.py | """Integration test demonstrating all new ConfluenceReader features working together."""
from unittest.mock import MagicMock, patch
import tempfile
from llama_index.readers.confluence import ConfluenceReader
from llama_index.readers.confluence.event import (
FileType,
TotalPagesToProcessEvent,
PageDataFetchStartedEvent,
PageDataFetchCompletedEvent,
PageSkippedEvent,
PageFailedEvent,
AttachmentProcessingStartedEvent,
AttachmentProcessedEvent,
AttachmentSkippedEvent,
AttachmentFailedEvent,
)
from llama_index.core.schema import Document
from llama_index.core.instrumentation import get_dispatcher
from llama_index.core.instrumentation.event_handlers import BaseEventHandler
class TestIntegration:
"""Integration tests for all new features working together."""
@patch("llama_index.readers.confluence.html_parser.HtmlTextParser")
def test_full_feature_integration(self, mock_html_parser_class):
"""Test all new features working together in a realistic scenario."""
mock_text_maker = MagicMock()
mock_text_maker.convert.return_value = "processed text content"
mock_html_parser_class.return_value = mock_text_maker
# Setup custom parser
mock_parser = MagicMock()
mock_parser.load_data.return_value = [
Document(text="custom parsed content", doc_id="custom")
]
# Setup callbacks
def attachment_filter(
media_type: str, file_size: int, title: str
) -> tuple[bool, str]:
if "skip" in title.lower():
return False, "Filename contains 'skip'"
if file_size > 5000000: # 5MB
return False, "File too large"
return True, ""
def document_filter(page_id: str) -> bool:
return not page_id.startswith("draft_")
# Setup event tracking using new event system
events_log = []
class TestEventHandler(BaseEventHandler):
def handle(self, event):
events_log.append(
{
"class_name": event.class_name(),
"page_id": getattr(event, "page_id", None),
"attachment_name": getattr(event, "attachment_name", None),
}
)
# Create reader with all new features
with tempfile.TemporaryDirectory() as temp_dir:
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
api_token="test_token",
custom_parsers={FileType.PDF: mock_parser},
custom_folder=temp_dir,
process_attachment_callback=attachment_filter,
process_document_callback=document_filter,
fail_on_error=False,
)
# Subscribe to events using new event system
dispatcher = get_dispatcher("llama_index.readers.confluence.base")
event_handler = TestEventHandler()
dispatcher.add_event_handler(event_handler)
# Mock confluence client
reader.confluence = MagicMock()
# Test document processing
normal_page = {
"id": "normal_page",
"title": "Normal Page",
"status": "current",
"body": {"export_view": {"value": "<p>Content</p>"}},
"_links": {"webui": "/pages/123"},
}
draft_page = {
"id": "draft_page_001",
"title": "Draft Page",
"status": "draft",
"body": {"export_view": {"value": "<p>Draft content</p>"}},
"_links": {"webui": "/pages/456"},
}
# Process normal page (should succeed)
result1 = reader.process_page(normal_page, False, mock_text_maker)
assert result1 is not None
assert result1.doc_id == "normal_page"
# Process draft page (should be skipped by callback)
result2 = reader.process_page(draft_page, False, mock_text_maker)
assert result2 is None # Skipped by document callback
# Verify events were logged
assert len(events_log) >= 2 # At least page started and skipped events
# Check that we have the expected event types
event_class_names = [event["class_name"] for event in events_log]
assert "PageDataFetchStartedEvent" in event_class_names
assert "PageSkippedEvent" in event_class_names
# Verify custom folder is set correctly
assert reader.custom_folder == temp_dir
assert reader.custom_parser_manager is not None
# Verify callbacks are working
should_process, reason = reader.process_attachment_callback(
"application/pdf", 1000, "normal.pdf"
)
assert should_process is True
should_process, reason = reader.process_attachment_callback(
"application/pdf", 1000, "skip_this.pdf"
)
assert should_process is False
assert "skip" in reason.lower()
assert reader.process_document_callback("normal_page") is True
assert (
reader.process_document_callback("draft_something") is False
) # Clean up
if event_handler in dispatcher.event_handlers:
dispatcher.event_handlers.remove(event_handler)
def test_event_system_with_realistic_simulation(self):
"""Test event system with a realistic event flow simulation."""
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki", api_token="test_token"
)
# Track different types of events separately
page_events = []
attachment_events = []
error_events = []
class PageEventHandler(BaseEventHandler):
def handle(self, event):
if isinstance(
event,
(
PageDataFetchStartedEvent,
PageDataFetchCompletedEvent,
PageSkippedEvent,
),
):
page_events.append(event)
class AttachmentEventHandler(BaseEventHandler):
def handle(self, event):
if isinstance(
event,
(
AttachmentProcessingStartedEvent,
AttachmentProcessedEvent,
AttachmentSkippedEvent,
),
):
attachment_events.append(event)
class ErrorEventHandler(BaseEventHandler):
def handle(self, event):
if isinstance(event, (PageFailedEvent, AttachmentFailedEvent)):
error_events.append(event)
# Subscribe to different event types using new event system
dispatcher = get_dispatcher("llama_index.readers.confluence.base")
page_handler = PageEventHandler()
attachment_handler = AttachmentEventHandler()
error_handler = ErrorEventHandler()
dispatcher.add_event_handler(page_handler)
dispatcher.add_event_handler(attachment_handler)
dispatcher.add_event_handler(error_handler)
# Simulate a realistic processing flow by manually emitting events
# 1. Start processing pages
dispatcher.event(TotalPagesToProcessEvent(total_pages=3))
# 2. Process first page successfully
dispatcher.event(PageDataFetchStartedEvent(page_id="page1"))
dispatcher.event(
AttachmentProcessingStartedEvent(
page_id="page1",
attachment_id="att1",
attachment_name="doc1.pdf",
attachment_type=FileType.PDF,
attachment_size=1024,
attachment_link="http://example.com/att1",
)
)
dispatcher.event(
AttachmentProcessedEvent(
page_id="page1",
attachment_id="att1",
attachment_name="doc1.pdf",
attachment_type=FileType.PDF,
attachment_size=1024,
attachment_link="http://example.com/att1",
)
)
dispatcher.event(
PageDataFetchCompletedEvent(
page_id="page1", document=Document(text="content1", doc_id="page1")
)
)
# 3. Skip second page
dispatcher.event(PageSkippedEvent(page_id="page2"))
# 4. Fail to process third page
dispatcher.event(PageDataFetchStartedEvent(page_id="page3"))
dispatcher.event(PageFailedEvent(page_id="page3", error="Network timeout"))
# Verify event counts
assert len(page_events) == 4 # 2 started, 1 completed, 1 skipped
assert len(attachment_events) == 2 # 1 started, 1 processed
assert len(error_events) == 1 # 1 page failed
# Verify event content
page_event_types = [type(event).__name__ for event in page_events]
assert "PageDataFetchStartedEvent" in page_event_types
assert "PageDataFetchCompletedEvent" in page_event_types
assert "PageSkippedEvent" in page_event_types
attachment_event_types = [type(event).__name__ for event in attachment_events]
assert "AttachmentProcessingStartedEvent" in attachment_event_types
assert "AttachmentProcessedEvent" in attachment_event_types
error_event_types = [type(event).__name__ for event in error_events]
assert "PageFailedEvent" in error_event_types
# Clean up
for handler in [page_handler, attachment_handler, error_handler]:
if handler in dispatcher.event_handlers:
dispatcher.event_handlers.remove(handler)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-confluence/tests/test_integration.py",
"license": "MIT License",
"lines": 214,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-confluence/tests/test_new_features.py | """Tests for new ConfluenceReader features: callbacks, custom parsers, event system."""
from unittest.mock import Mock, MagicMock, patch
import pytest
import os
from llama_index.readers.confluence import ConfluenceReader
from llama_index.readers.confluence.event import (
FileType,
PageDataFetchStartedEvent,
AttachmentProcessedEvent,
AttachmentFailedEvent,
)
from llama_index.core.instrumentation import get_dispatcher
from llama_index.core.instrumentation.event_handlers import BaseEventHandler
class TestCustomParsersAndFolder:
"""Test custom parsers and custom folder functionality."""
def test_custom_folder_without_parsers_raises_error(self):
"""Test that custom_folder raises error when used without custom_parsers."""
with pytest.raises(
ValueError,
match="custom_folder can only be used when custom_parsers are provided",
):
ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
api_token="test_token",
custom_folder="/tmp/test",
)
def test_custom_parsers_with_custom_folder(self):
"""Test that custom_parsers and custom_folder work together."""
mock_parser = MagicMock()
custom_parsers = {FileType.PDF: mock_parser}
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
api_token="test_token",
custom_parsers=custom_parsers,
custom_folder="/tmp/test",
)
assert reader.custom_parsers == custom_parsers
assert reader.custom_folder == "/tmp/test"
assert reader.custom_parser_manager is not None
def test_custom_parsers_with_default_folder(self):
"""Test that custom_parsers uses current directory when custom_folder not specified."""
mock_parser = MagicMock()
custom_parsers = {FileType.PDF: mock_parser}
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
api_token="test_token",
custom_parsers=custom_parsers,
)
assert reader.custom_parsers == custom_parsers
assert reader.custom_folder == os.getcwd()
assert reader.custom_parser_manager is not None
def test_no_custom_parsers_no_folder(self):
"""Test that without custom_parsers, custom_folder is None and no parser manager is created."""
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki", api_token="test_token"
)
assert reader.custom_parsers == {}
assert reader.custom_folder is None
assert reader.custom_parser_manager is None
class TestCallbacks:
"""Test callback functionality."""
def test_attachment_callback_functionality(self):
"""Test that attachment callback is properly stored and functional."""
def attachment_filter(
media_type: str, file_size: int, title: str
) -> tuple[bool, str]:
if file_size > 1000000: # 1MB limit
return False, "File too large"
if media_type in ["application/zip"]:
return False, "Unsupported file type"
return True, ""
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
api_token="test_token",
process_attachment_callback=attachment_filter,
)
assert reader.process_attachment_callback == attachment_filter
# Test callback behavior
should_process, reason = attachment_filter(
"application/pdf", 500000, "small.pdf"
)
assert should_process is True
assert reason == ""
should_process, reason = attachment_filter(
"application/pdf", 2000000, "large.pdf"
)
assert should_process is False
assert reason == "File too large"
should_process, reason = attachment_filter(
"application/zip", 500000, "archive.zip"
)
assert should_process is False
assert reason == "Unsupported file type"
def test_document_callback_functionality(self):
"""Test that document callback is properly stored and functional."""
excluded_pages = ["page1", "page2"]
def document_filter(page_id: str) -> bool:
return page_id not in excluded_pages
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
api_token="test_token",
process_document_callback=document_filter,
)
assert reader.process_document_callback == document_filter
# Test callback behavior
assert document_filter("normal_page") is True
assert document_filter("page1") is False
assert document_filter("page2") is False
@patch("llama_index.readers.confluence.html_parser.HtmlTextParser")
def test_document_callback_in_process_page(self, mock_html_parser_class):
"""Test that document callback is used during page processing."""
mock_text_maker = MagicMock()
mock_text_maker.convert.return_value = "processed text"
mock_html_parser_class.return_value = mock_text_maker
def document_filter(page_id: str) -> bool:
return page_id != "skip_page"
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
api_token="test_token",
process_document_callback=document_filter,
)
reader.confluence = MagicMock() # Mock the confluence client
# Test normal page processing
normal_page = {
"id": "normal_page",
"title": "Normal Page",
"status": "current",
"body": {"export_view": {"value": "<p>Content</p>"}},
"_links": {"webui": "/pages/123"},
}
result = reader.process_page(normal_page, False, mock_text_maker)
assert result is not None
assert result.doc_id == "normal_page"
# Test skipped page
skip_page = {
"id": "skip_page",
"title": "Skip Page",
"status": "current",
"body": {"export_view": {"value": "<p>Content</p>"}},
"_links": {"webui": "/pages/456"},
}
result = reader.process_page(skip_page, False, mock_text_maker)
assert result is None
class TestEventSystem:
"""Test event system functionality."""
def test_event_system_subscription_and_notification(self):
"""Test that event system can handle event subscriptions and notifications."""
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki", api_token="test_token"
)
events_received = []
class TestEventHandler(BaseEventHandler):
def handle(self, event):
events_received.append(event)
class PageEventHandler(BaseEventHandler):
def handle(self, event):
if isinstance(event, PageDataFetchStartedEvent):
events_received.append(f"PAGE_EVENT: {event.page_id}")
# Subscribe to events using new event system
dispatcher = get_dispatcher("test_new_features_subscription")
general_handler = TestEventHandler()
page_handler = PageEventHandler()
dispatcher.add_event_handler(general_handler)
dispatcher.add_event_handler(page_handler)
# Create and emit a page event
page_event = PageDataFetchStartedEvent(page_id="test_page")
dispatcher.event(page_event)
# Check that both handlers received the event
assert len(events_received) == 2
assert "PAGE_EVENT: test_page" in events_received
assert any(
isinstance(event, PageDataFetchStartedEvent) for event in events_received
)
# Clean up
for handler in [general_handler, page_handler]:
if handler in dispatcher.event_handlers:
dispatcher.event_handlers.remove(handler)
def test_event_system_attachment_events(self):
"""Test event system with attachment events."""
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki", api_token="test_token"
)
attachment_events = []
class AttachmentEventHandler(BaseEventHandler):
def handle(self, event):
if isinstance(event, (AttachmentProcessedEvent, AttachmentFailedEvent)):
attachment_events.append(event)
dispatcher = get_dispatcher("test_new_features_attachment")
attachment_handler = AttachmentEventHandler()
dispatcher.add_event_handler(attachment_handler)
# Test attachment processed event
processed_event = AttachmentProcessedEvent(
page_id="page123",
attachment_id="att456",
attachment_name="document.pdf",
attachment_type=FileType.PDF,
attachment_size=1024,
attachment_link="http://example.com/att456",
)
dispatcher.event(processed_event)
# Test attachment failed event
failed_event = AttachmentFailedEvent(
page_id="page123",
attachment_id="att789",
attachment_name="broken.pdf",
attachment_type=FileType.PDF,
attachment_size=2048,
attachment_link="http://example.com/att789",
error="Processing failed",
)
dispatcher.event(failed_event)
assert len(attachment_events) == 2
assert any(
isinstance(event, AttachmentProcessedEvent) for event in attachment_events
)
assert any(
isinstance(event, AttachmentFailedEvent) for event in attachment_events
)
# Clean up
if attachment_handler in dispatcher.event_handlers:
dispatcher.event_handlers.remove(attachment_handler)
def test_event_system_handler_removal(self):
"""Test event system handler removal functionality."""
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki", api_token="test_token"
)
events_received = []
class TestEventHandler(BaseEventHandler):
def handle(self, event):
events_received.append(event)
# Add and then remove event handler
dispatcher = get_dispatcher("test_new_features_removal")
event_handler = TestEventHandler()
dispatcher.add_event_handler(event_handler)
if event_handler in dispatcher.event_handlers:
dispatcher.event_handlers.remove(event_handler)
# Create and emit event
page_event = PageDataFetchStartedEvent(page_id="test_page")
dispatcher.event(page_event)
# Should not receive any events since we removed the handler
assert len(events_received) == 0
class TestErrorHandling:
"""Test error handling functionality."""
def test_fail_on_error_default_true(self):
"""Test that fail_on_error defaults to True."""
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki", api_token="test_token"
)
assert reader.fail_on_error is True
def test_fail_on_error_explicit_false(self):
"""Test that fail_on_error can be set to False."""
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
api_token="test_token",
fail_on_error=False,
)
assert reader.fail_on_error is False
def test_fail_on_error_explicit_true(self):
"""Test that fail_on_error can be explicitly set to True."""
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
api_token="test_token",
fail_on_error=True,
)
assert reader.fail_on_error is True
class TestLogging:
"""Test logging functionality."""
def test_custom_logger(self):
"""Test that custom logger is properly stored."""
import logging
custom_logger = logging.getLogger("test_confluence_logger")
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
api_token="test_token",
logger=custom_logger,
)
assert reader.logger == custom_logger
def test_default_logger(self):
"""Test that default logger is used when none provided."""
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki", api_token="test_token"
)
# Should use internal logger
assert reader.logger is not None
assert hasattr(reader.logger, "info")
assert hasattr(reader.logger, "error")
assert hasattr(reader.logger, "warning")
class TestChildPageFetching:
"""
Tests the logic for fetching child pages, specifically handling the
difference between cloud and on-premise Confluence instances.
"""
def test_on_prem_folder_call_is_never_made(self):
"""
On-premise mode: Ensures the fix prevents calls for 'folder' children.
"""
from llama_index.readers.confluence import ConfluenceReader
import requests
def side_effect(page_id, type, start=0, limit=50):
if type == "folder":
raise requests.exceptions.HTTPError(
"No ContentTypeBinding found for type: folder"
)
if page_id == "root" and start == 0:
return ["p1"]
return []
with patch("atlassian.Confluence") as MockConfluence:
mock_inst = Mock()
mock_inst.get_child_id_list = Mock(side_effect=side_effect)
mock_inst.cloud = False
MockConfluence.return_value = mock_inst
reader = ConfluenceReader(
base_url="http://onprem", cloud=False, api_token="t"
)
res = reader._dfs_page_ids("root", type="page")
assert set(res) == {"root", "p1"}
def test_mixed_children_recursion_in_cloud(self):
"""
Cloud mode: Verifies correct handling of pages and folders.
"""
from llama_index.readers.confluence import ConfluenceReader
def side_effect(page_id, type, start=0, limit=50):
if start > 0:
return []
if page_id == "root" and type == "page":
return ["p1"]
if page_id == "root" and type == "folder":
return ["f1"]
if page_id == "f1" and type == "page":
return ["p2"]
return []
with patch("atlassian.Confluence") as MockConfluence:
mock_inst = Mock()
mock_inst.get_child_id_list = Mock(side_effect=side_effect)
mock_inst.cloud = True
MockConfluence.return_value = mock_inst
reader = ConfluenceReader(
base_url="https://cloud", cloud=True, api_token="t"
)
res = reader._dfs_page_ids("root", type="page")
expected_ids = {"root", "p1", "p2"}
assert set(res) == expected_ids
assert "f1" not in res
def test_max_num_results_is_respected(self):
"""
Ensures the recursive search stops correctly when the limit is reached.
"""
from llama_index.readers.confluence import ConfluenceReader
def side_effect(page_id, type, start=0, limit=50):
if page_id == "root" and start == 0 and type == "page":
return ["p1", "p2", "p3", "p4"]
return []
with patch("atlassian.Confluence") as MockConfluence:
mock_inst = Mock()
mock_inst.get_child_id_list = Mock(side_effect=side_effect)
mock_inst.cloud = False
MockConfluence.return_value = mock_inst
reader = ConfluenceReader(
base_url="http://onprem", cloud=False, api_token="t"
)
res = reader._dfs_page_ids("root", type="page", max_num_results=3)
assert len(res) == 3
assert set(res) == {"root", "p1", "p2"}
def test_paging_behavior_helper_function(self):
"""
Tests that the _get_data_with_paging helper function works correctly.
"""
from llama_index.readers.confluence import ConfluenceReader
def paged_side_effect(page_id, type, start=0, limit=50):
full_data = ["p1", "p2", "p3", "p4", "p5"]
return full_data[start : start + limit]
with patch("atlassian.Confluence") as MockConfluence:
mock_inst = Mock()
mock_inst.get_child_id_list = Mock(side_effect=paged_side_effect)
mock_inst.cloud = True
MockConfluence.return_value = mock_inst
reader = ConfluenceReader(
base_url="https://cloud", cloud=True, api_token="t"
)
all_ids = reader._get_data_with_paging(
paged_function=reader.confluence.get_child_id_list,
page_id="root",
type="page",
)
assert all_ids == ["p1", "p2", "p3", "p4", "p5"]
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-confluence/tests/test_new_features.py",
"license": "MIT License",
"lines": 385,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-core/tests/agent/workflow/test_function_call.py | from unittest.mock import AsyncMock, MagicMock
import pytest
from llama_index.core.agent.workflow import BaseWorkflowAgent
from llama_index.core.agent.workflow.workflow_events import (
AgentInput,
AgentOutput,
ToolCallResult,
)
from llama_index.core.llms import ChatMessage
from llama_index.core.memory import BaseMemory
from llama_index.core.tools import FunctionTool, ToolOutput
from llama_index.core.workflow.context import Context
from llama_index.core.workflow.events import StopEvent
class TestWorkflowAgent(BaseWorkflowAgent):
"""Test implementation of BaseWorkflowAgent for testing"""
async def take_step(self, ctx, llm_input, tools, memory):
"""Mock implementation"""
return AgentOutput(
response=ChatMessage(role="assistant", content="test response"),
tool_calls=[],
raw="test",
current_agent_name=self.name,
)
async def handle_tool_call_results(self, ctx, results, memory):
"""Mock implementation"""
async def finalize(self, ctx, output, memory):
"""Mock implementation"""
return output
@pytest.fixture
def mock_context():
"""Create a mock context for testing"""
ctx = MagicMock(spec=Context)
ctx.store = AsyncMock()
ctx.store.get = AsyncMock()
ctx.store.set = AsyncMock()
ctx.collect_events = MagicMock()
return ctx
@pytest.fixture
def mock_memory():
"""Create a mock memory for testing"""
memory = MagicMock(spec=BaseMemory)
memory.aget = AsyncMock(return_value=[])
return memory
@pytest.fixture
def test_agent():
"""Create a test agent instance"""
return TestWorkflowAgent(
name="test_agent",
description="Test agent for testing",
tools=[],
llm=None, # Will use default
)
@pytest.mark.asyncio
async def test_aggregate_tool_results_return_direct_non_handoff_no_error_stops(
mock_context, mock_memory, test_agent
):
"""
Test that when return_direct tool is NOT 'handoff' and has NO error,
the workflow stops execution by returning StopEvent (lines 564-569)
"""
# Arrange
tool_output = ToolOutput(
content="Tool executed successfully",
tool_name="direct_tool",
raw_input={"param": "value"},
raw_output="success",
is_error=False,
)
return_direct_tool = ToolCallResult(
tool_name="direct_tool", # NOT 'handoff'
tool_kwargs={"param": "value"},
tool_id="tool_123",
tool_output=tool_output,
return_direct=True,
)
# Mock context store responses
mock_context.store.get.side_effect = lambda key, default=None: {
"num_tool_calls": 1,
"current_tool_calls": [],
"memory": mock_memory,
"user_msg_str": "test message",
}.get(key, default)
# Mock collect_events to return our tool call result
mock_context.collect_events.return_value = [return_direct_tool]
# Act
result = await test_agent.aggregate_tool_results(mock_context, return_direct_tool)
# Assert
# The method should return StopEvent when condition is met
assert isinstance(result, StopEvent)
assert result.result is not None
assert result.result.current_agent_name == "test_agent"
# Verify that current_tool_calls was cleared (line 567)
mock_context.store.set.assert_any_call("current_tool_calls", [])
@pytest.mark.asyncio
async def test_aggregate_tool_results_return_direct_handoff_does_not_stop(
mock_context, mock_memory, test_agent
):
"""
Test that when return_direct tool is 'handoff',
the workflow does NOT stop execution (condition fails at line 564)
"""
# Arrange
tool_output = ToolOutput(
content="Handing off to another agent",
tool_name="handoff",
raw_input={"target_agent": "other_agent"},
raw_output="handoff_success",
is_error=False,
)
handoff_tool = ToolCallResult(
tool_name="handoff", # IS 'handoff'
tool_kwargs={"target_agent": "other_agent"},
tool_id="tool_456",
tool_output=tool_output,
return_direct=True,
)
# Mock context store responses
mock_context.store.get.side_effect = lambda key, default=None: {
"num_tool_calls": 1,
"current_tool_calls": [],
"memory": mock_memory,
"user_msg_str": "test message",
}.get(key, default)
# Mock collect_events to return our handoff tool call result
mock_context.collect_events.return_value = [handoff_tool]
# Act
result = await test_agent.aggregate_tool_results(mock_context, handoff_tool)
# Assert
# The method should NOT return StopEvent for handoff tools
assert not isinstance(result, StopEvent)
# Should return AgentInput to continue the workflow
assert isinstance(result, AgentInput)
# Verify that current_tool_calls was NOT cleared specifically for the return_direct condition
# Note: current_tool_calls might be set for other reasons, but not the lines 564-569 condition
calls_to_current_tool_calls = [
call
for call in mock_context.store.set.call_args_list
if call[0][0] == "current_tool_calls" and call[0][1] == []
]
# Should not find the specific call that clears current_tool_calls due to lines 564-569
assert len(calls_to_current_tool_calls) == 0
@pytest.mark.asyncio
async def test_aggregate_tool_results_return_direct_with_error_does_not_stop(
mock_context, mock_memory, test_agent
):
"""
Test that when return_direct tool has an error,
the workflow does NOT stop execution (condition fails at line 565)
"""
# Arrange
tool_output = ToolOutput(
content="Tool execution failed",
tool_name="error_tool",
raw_input={"param": "value"},
raw_output="error_occurred",
is_error=True, # HAS ERROR
)
error_tool = ToolCallResult(
tool_name="error_tool", # NOT 'handoff'
tool_kwargs={"param": "value"},
tool_id="tool_789",
tool_output=tool_output,
return_direct=True,
)
# Mock context store responses
mock_context.store.get.side_effect = lambda key, default=None: {
"num_tool_calls": 1,
"current_tool_calls": [],
"memory": mock_memory,
"user_msg_str": "test message",
}.get(key, default)
# Mock collect_events to return our error tool call result
mock_context.collect_events.return_value = [error_tool]
# Act
result = await test_agent.aggregate_tool_results(mock_context, error_tool)
# Assert
# The method should NOT return StopEvent for tools with errors
assert not isinstance(result, StopEvent)
# Should return AgentInput to continue the workflow
assert isinstance(result, AgentInput)
# Verify that current_tool_calls was NOT cleared specifically for the return_direct condition
calls_to_current_tool_calls = [
call
for call in mock_context.store.set.call_args_list
if call[0][0] == "current_tool_calls" and call[0][1] == []
]
# Should not find the specific call that clears current_tool_calls due to lines 564-569
assert len(calls_to_current_tool_calls) == 0
@pytest.mark.asyncio
async def test_aggregate_tool_results_return_direct_handoff_with_error_does_not_stop(
mock_context, mock_memory, test_agent
):
"""
Test that when return_direct tool is 'handoff' AND has an error,
the workflow does NOT stop execution (condition fails on both counts)
"""
# Arrange
tool_output = ToolOutput(
content="Handoff failed",
tool_name="handoff",
raw_input={"target_agent": "other_agent"},
raw_output="handoff_error",
is_error=True, # HAS ERROR
)
handoff_error_tool = ToolCallResult(
tool_name="handoff", # IS 'handoff'
tool_kwargs={"target_agent": "other_agent"},
tool_id="tool_999",
tool_output=tool_output,
return_direct=True,
)
# Mock context store responses
mock_context.store.get.side_effect = lambda key, default=None: {
"num_tool_calls": 1,
"current_tool_calls": [],
"memory": mock_memory,
"user_msg_str": "test message",
}.get(key, default)
# Mock collect_events to return our handoff error tool call result
mock_context.collect_events.return_value = [handoff_error_tool]
# Act
result = await test_agent.aggregate_tool_results(mock_context, handoff_error_tool)
# Assert
# The method should NOT return StopEvent for handoff tools with errors
assert not isinstance(result, StopEvent)
# Should return AgentInput to continue the workflow
assert isinstance(result, AgentInput)
@pytest.mark.asyncio
async def test_aggregate_tool_results_context_store_operations_for_successful_return_direct(
mock_context, mock_memory, test_agent
):
"""
Test that context store operations are performed correctly when condition is met (lines 567-568)
"""
# Arrange
tool_output = ToolOutput(
content="Success",
tool_name="success_tool",
raw_input={},
raw_output="ok",
is_error=False,
)
success_tool = ToolCallResult(
tool_name="success_tool",
tool_kwargs={},
tool_id="tool_success",
tool_output=tool_output,
return_direct=True,
)
# Mock context store responses
existing_tool_calls = [success_tool]
mock_context.store.get.side_effect = lambda key, default=None: {
"num_tool_calls": 1,
"current_tool_calls": existing_tool_calls,
"memory": mock_memory,
"user_msg_str": "test message",
}.get(key, default)
# Mock collect_events to return our success tool call result
mock_context.collect_events.return_value = [success_tool]
# Act
result = await test_agent.aggregate_tool_results(mock_context, success_tool)
# Assert
# Verify StopEvent was returned
assert isinstance(result, StopEvent)
assert result.result is not None
# Verify context store was called correctly (line 567)
mock_context.store.set.assert_any_call("current_tool_calls", [])
# Verify the result contains the correct information
assert result.result.current_agent_name == "test_agent"
assert result.result.response.content == "Success"
@pytest.mark.asyncio
async def test_aggregate_tool_results_multiple_tools_one_return_direct_eligible(
mock_context, mock_memory, test_agent
):
"""
Test that when multiple tools are called but only one is eligible for return_direct stop,
the workflow stops correctly
"""
# Arrange
# First tool - return_direct but is handoff (should not stop)
handoff_output = ToolOutput(
content="Handoff",
tool_name="handoff",
raw_input={},
raw_output="handoff",
is_error=False,
)
handoff_tool = ToolCallResult(
tool_name="handoff",
tool_kwargs={},
tool_id="tool_handoff",
tool_output=handoff_output,
return_direct=True,
)
# Second tool - return_direct and eligible to stop
success_output = ToolOutput(
content="Success",
tool_name="success_tool",
raw_input={},
raw_output="success",
is_error=False,
)
success_tool = ToolCallResult(
tool_name="success_tool",
tool_kwargs={},
tool_id="tool_success",
tool_output=success_output,
return_direct=True,
)
# Mock context store responses
mock_context.store.get.side_effect = lambda key, default=None: {
"num_tool_calls": 2,
"current_tool_calls": [],
"memory": mock_memory,
"user_msg_str": "test message",
}.get(key, default)
# Mock collect_events to return both tool call results
mock_context.collect_events.return_value = [success_tool, handoff_tool]
# Act
result = await test_agent.aggregate_tool_results(mock_context, success_tool)
# Assert
# Should return StopEvent because one tool is eligible
assert isinstance(result, StopEvent)
# Verify context store was called to clear current_tool_calls (line 567)
mock_context.store.set.assert_any_call("current_tool_calls", [])
@pytest.mark.asyncio
async def test_aggregate_tool_results_boolean_logic_verification():
"""
Test the exact boolean logic used in lines 564-565
"""
test_cases = [
# (tool_name, is_error, should_stop_execution)
("handoff", False, False), # handoff tool, no error -> should NOT stop
("handoff", True, False), # handoff tool, with error -> should NOT stop
("other_tool", False, True), # non-handoff tool, no error -> should stop
("other_tool", True, False), # non-handoff tool, with error -> should NOT stop
("", False, True), # empty name (not handoff), no error -> should stop
("", True, False), # empty name (not handoff), with error -> should NOT stop
]
for tool_name, is_error, should_stop in test_cases:
# Create tool output
tool_output = ToolOutput(
content="test",
tool_name=tool_name,
raw_input={},
raw_output="test",
is_error=is_error,
)
return_direct_tool = ToolCallResult(
tool_name=tool_name,
tool_kwargs={},
tool_id="test_id",
tool_output=tool_output,
return_direct=True,
)
# Test the actual boolean logic from lines 564-565
condition_result = (
return_direct_tool.tool_name != "handoff"
and not return_direct_tool.tool_output.is_error
)
assert condition_result == should_stop, (
f"Boolean logic failed for tool_name='{tool_name}', is_error={is_error}. "
f"Expected {should_stop}, got {condition_result}"
)
@pytest.mark.asyncio
async def test_call_tool_with_exception(mock_context, test_agent):
"""
Test that when a tool raises an exception, _call_tool catches it
and returns a ToolOutput with is_error=True and the exception.
"""
# Arrange
def error_function(x: int) -> str:
raise ValueError("This is a test error")
error_tool = FunctionTool.from_defaults(error_function)
tool_input = {"x": 1}
# Act
tool_output = await test_agent._call_tool(mock_context, error_tool, tool_input)
# Assert
assert tool_output.is_error is True
assert isinstance(tool_output.exception, ValueError)
assert str(tool_output.exception) == "This is a test error"
assert tool_output.tool_name == "error_function"
assert tool_output.raw_input == tool_input
if __name__ == "__main__":
# Run the tests
pytest.main([__file__, "-v"])
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/agent/workflow/test_function_call.py",
"license": "MIT License",
"lines": 386,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-core/tests/agent/workflow/test_return_direct_e2e.py | import os
import pytest
CI = os.getenv("CI", "").lower() in ("1", "true", "yes")
try:
from llama_index.llms.openai import OpenAI # noqa: F401
has_openai = True
except ImportError:
has_openai = False
@pytest.mark.skipif(CI or not has_openai, reason="Skipping in CI environment")
@pytest.mark.asyncio
async def test_return_direct_e2e():
from llama_index.core.agent.workflow import FunctionAgent, ToolCallResult
from llama_index.core.tools import FunctionTool
from llama_index.llms.openai import OpenAI
if not os.getenv("OPENAI_API_KEY") or os.getenv("OPENAI_API_KEY") == "sk-" + (
"a" * 48
):
pytest.skip("OPENAI_API_KEY is not defined")
llm = OpenAI()
def return_direct_tool(previously_failed: bool = False):
"""
Test Tool to check return direct function
Args:
previously_failed (bool, optional): Set to true if previous calls return an error. Defaults to False.
Raises:
RuntimeError: Raised if Tool Call Error, Retry this tool with previously_failed=True if this happened
Returns:
str: Tool Success Status
"""
if not previously_failed:
raise RuntimeError(
"Tool Failed, run again with `previously_failed=True` to success"
)
return "Tool Run is Success"
agent = FunctionAgent(
name="e2e_test_agent",
description="Test agent for testing",
tools=[FunctionTool.from_defaults(fn=return_direct_tool, return_direct=True)],
llm=llm,
system_prompt="You are a test agent for testing function call. Execute the tools requested as-is so that the results can be evaluated",
)
handler = agent.run(
"Run the return_direct_tool(False), observe the output. if error, retry based on the error message",
)
ever_error = False
ever_succeed = False
async for ev in handler.stream_events():
if isinstance(ev, ToolCallResult):
if ev.tool_output.is_error:
assert (
ev.tool_output.raw_output
== "Tool Failed, run again with `previously_failed=True` to success"
)
ever_error = True
else:
assert ev.tool_output.raw_output == "Tool Run is Success"
ever_succeed = True
result = await handler
assert result
assert ever_error
assert ever_succeed
if __name__ == "__main__":
# Run the tests
pytest.main([__file__, "-v"])
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/agent/workflow/test_return_direct_e2e.py",
"license": "MIT License",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-moorcheh/llama_index/vector_stores/moorcheh/base.py | # Importing required libraries and modules
import logging
from typing import Any, List, Optional, Callable, ClassVar, Literal
import uuid
import os
# LlamaIndex internals for schema and vector store support
from llama_index.core.schema import BaseNode, MetadataMode, TextNode
from llama_index.core.llms import LLM
from llama_index.core.base.embeddings.base_sparse import BaseSparseEmbedding
from llama_index.core.vector_stores.types import (
BasePydanticVectorStore,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
# Moorcheh SDK for backend vector storage
from moorcheh_sdk import MoorchehClient, MoorchehError
from moorcheh_sdk import MoorchehClient
ID_KEY = "id"
VECTOR_KEY = "values"
SPARSE_VECTOR_KEY = "sparse_values"
METADATA_KEY = "metadata"
# Logger for debug/info/error output
logger = logging.getLogger(__name__)
class MoorchehVectorStore(BasePydanticVectorStore):
"""
Moorcheh Vector Store.
In this vector store, embeddings and docs are stored within a Moorcheh namespace.
During query time, the index uses Moorcheh to query for the top k most similar nodes.
Args:
api_key (Optional[str]): API key for Moorcheh.
If not provided, will look for MOORCHEH_API_KEY environment variable.
namespace (str): Namespace name to use for this vector store.
namespace_type (str): Type of namespace - "text" or "vector".
vector_dimension (Optional[int]): Vector dimension for vector namespace.
batch_size (int): Batch size for adding nodes. Defaults to DEFAULT_EMBED_BATCH_SIZE.
**kwargs: Additional arguments to pass to MoorchehClient.
"""
# Default values and capabilities
DEFAULT_NAMESPACE: ClassVar[str] = "llamaindex_default"
DEFAULT_EMBED_BATCH_SIZE: ClassVar[int] = 64 # customize as needed
stores_text: bool = True
flat_metadata: bool = True
api_key: Optional[str]
namespace: Optional[str]
namespace_type: Optional[Literal["text", "vector"]] = None
vector_dimension: Optional[int]
add_sparse_vector: Optional[bool]
ai_model: Optional[str]
batch_size: int
sparse_embedding_model: Optional[BaseSparseEmbedding] = None
def __init__(
self,
api_key: Optional[str] = None,
namespace: Optional[str] = None,
namespace_type: Optional[str] = "text",
vector_dimension: Optional[int] = None,
add_sparse_vector: Optional[bool] = False,
tokenizer: Optional[Callable] = None,
ai_model: Optional[str] = "anthropic.claude-3-7-sonnet-20250219-v1:0",
batch_size: int = 64,
sparse_embedding_model: Optional[BaseSparseEmbedding] = None,
) -> None:
# Initialize store attributes
if add_sparse_vector:
if sparse_embedding_model is not None:
sparse_embedding_model = sparse_embedding_model
elif tokenizer is not None:
sparse_embedding_model = DefaultMoorchehSparseEmbedding(
tokenizer=tokenizer
)
else:
sparse_embedding_model = DefaultMoorchehSparseEmbedding()
else:
sparse_embedding_model = None
super().__init__(
api_key=api_key,
namespace=namespace,
namespace_type=namespace_type,
vector_dimension=vector_dimension,
add_sparse_vector=add_sparse_vector,
batch_size=batch_size,
sparse_embedding_model=sparse_embedding_model,
ai_model=ai_model,
)
# Fallback to env var if API key not provided
if not self.api_key:
self.api_key = os.getenv("MOORCHEH_API_KEY")
if not self.api_key:
raise ValueError("`api_key` is required for Moorcheh client initialization")
if not self.namespace:
raise ValueError(
"`namespace` is required for Moorcheh client initialization"
)
# Initialize Moorcheh client
logger.debug("Initializing MoorchehClient")
self._client = MoorchehClient(api_key=self.api_key)
self.is_embedding_query = False
self._sparse_embedding_model = sparse_embedding_model
self.namespace = namespace
logger.debug("Listing namespaces...")
try:
namespaces_response = self._client.list_namespaces()
namespaces = [
namespace["namespace_name"]
for namespace in namespaces_response.get("namespaces", [])
]
logger.debug("Found namespaces.")
except Exception as e:
logger.debug("Failed to list namespaces: {e}")
raise
# Check if the namespace exists
if self.namespace in namespaces:
logger.debug(
"Namespace '{self.namespace}' already exists. No action required."
)
else:
logger.debug("Namespace '{self.namespace}' not found. Creating it.")
# If the namespace doesn't exist, create it
try:
self._client.create_namespace(
namespace_name=self.namespace,
type=self.namespace_type,
vector_dimension=self.vector_dimension,
)
logger.debug("Namespace '{self.namespace}' created.")
except Exception as e:
logger.debug("Failed to create namespace: {e}")
raise
# _client: MoorchehClient = PrivateAttr()
@property
def client(self) -> MoorchehClient:
"""Return initialized Moorcheh client."""
return self._client
@classmethod
def class_name(cls) -> str:
"""Return class name."""
return "MoorchehVectorStore"
def add(
self,
nodes: List[BaseNode],
**add_kwargs: Any,
) -> List[str]:
"""Add nodes to Moorcheh."""
if not nodes:
return []
if self.namespace_type == "text":
return self._add_text_nodes(nodes, **add_kwargs)
else:
return self._add_vector_nodes(nodes, **add_kwargs)
def _add_text_nodes(self, nodes: List[BaseNode], **kwargs: Any) -> List[str]:
"""Add text documents to a text namespace."""
documents = []
ids = []
sparse_inputs = []
for node in nodes:
node_id = node.node_id or str(uuid.uuid4())
ids.append(node_id)
document = {
"id": node_id,
"text": node.get_content(metadata_mode=MetadataMode.NONE),
}
# Add metadata if present
if node.metadata:
document["metadata"] = node.metadata
if self.add_sparse_vector and self._sparse_embedding_model is not None:
sparse_inputs.append(node.get_content(metadata_mode=MetadataMode.EMBED))
documents.append(document)
if sparse_inputs:
sparse_vectors = self._sparse_embedding_model.get_text_embedding_batch(
sparse_inputs
)
for i, sparse_vector in enumerate(sparse_vectors):
documents[i][SPARSE_VECTOR_KEY] = {
"indices": list(sparse_vector.keys()),
"values": list(sparse_vector.values()),
}
# Process in batches
for i in range(0, len(documents), self.batch_size):
batch = documents[i : i + self.batch_size]
try:
result = self._client.upload_documents(
namespace_name=self.namespace, documents=batch
)
logger.debug(f"Uploaded batch of {len(batch)} documents")
except MoorchehError as e:
logger.error(f"Error uploading documents batch: {e}")
raise
logger.info(
f"Added {len(documents)} text documents to namespace {self.namespace}"
)
return ids
def _add_vector_nodes(self, nodes: List[BaseNode], **kwargs: Any) -> List[str]:
"""Add vector nodes to vector namespace."""
vectors = []
ids = []
sparse_inputs = []
if all(node.embedding is None for node in nodes):
raise ValueError("No embeddings could be found within your nodes")
for node in nodes:
if node.embedding is None:
warnings.warn(
f"Node {node.node_id} has no embedding for vector namespace",
UserWarning,
)
node_id = node.node_id or str(uuid.uuid4())
ids.append(node_id)
vector = {
"id": node_id,
"vector": node.embedding,
}
# Add metadata, including text content
metadata = dict(node.metadata) if node.metadata else {}
metadata["text"] = metadata.pop(
"text", node.get_content(metadata_mode=MetadataMode.NONE)
)
vector["metadata"] = metadata
if self.add_sparse_vector and self._sparse_embedding_model is not None:
sparse_inputs.append(node.get_content(metadata_mode=MetadataMode.EMBED))
vectors.append(vector)
if sparse_inputs:
sparse_vectors = self._sparse_embedding_model.get_text_embedding_batch(
sparse_inputs
)
for i, sparse_vector in enumerate(sparse_vectors):
documents[i][SPARSE_VECTOR_KEY] = {
"indices": list(sparse_vector.keys()),
"values": list(sparse_vector.values()),
}
# Process in batches
for i in range(0, len(vectors), self.batch_size):
batch = vectors[i : i + self.batch_size]
try:
result = self._client.upload_vectors(
namespace_name=self.namespace, vectors=batch
)
logger.debug(f"Uploaded batch of {len(batch)} vectors")
except MoorchehError as e:
logger.error(f"Error uploading vectors batch: {e}")
raise
logger.info(f"Added {len(vectors)} vectors to namespace {self.namespace}")
return ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The doc_id of the document to delete.
"""
try:
if self.namespace_type == "text":
result = self._client.delete_documents(
namespace_name=self.namespace, ids=[ref_doc_id]
)
else:
result = self._client.delete_vectors(
namespace_name=self.namespace, ids=[ref_doc_id]
)
logger.info(
f"Deleted document {ref_doc_id} from namespace {self.namespace}"
)
except MoorchehError as e:
logger.error(f"Error deleting document {ref_doc_id}: {e}")
raise
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""
Query Moorcheh vector store.
Args:
query (VectorStoreQuery): query object
Returns:
VectorStoreQueryResult: query result
"""
moorcheh_sparse_vector = None
if (
query.mode in (VectorStoreQueryMode.SPARSE, VectorStoreQueryMode.HYBRID)
and self._sparse_embedding_model is not None
):
if query.query_str is None:
raise ValueError(
"query_str must be specified if mode is SPARSE or HYBRID."
)
sparse_vector = self._sparse_embedding_model.get_query_embedding(
query.query_str
)
if query.alpha is not None:
moorcheh_sparse_vector = {
"indices": list(sparse_vector.keys()),
"values": [v * (1 - query.alpha) for v in sparse_vector.values()],
}
else:
moorcheh_sparse_vector = {
"indices": list(sparse_vector.keys()),
"values": list(sparse_vector.values()),
}
"""
if query.mode != VectorStoreQueryMode.DEFAULT:
logger.warning(
f"Moorcheh does not support query mode {query.mode}. "
"Using default mode instead."
)
"""
# Prepare search parameters
search_kwargs = {
"namespaces": [self.namespace],
"top_k": query.similarity_top_k,
}
# Add similarity threshold if provided
# if query.similarity_top_k is not None:
# search_kwargs["threshold"] = query.similarity_top_k
# Handle query input
if query.query_str is not None:
search_kwargs["query"] = query.query_str
elif query.query_embedding is not None:
search_kwargs["query"] = query.query_embedding
else:
raise ValueError("Either query_str or query_embedding must be provided")
# TODO: Add metadata filter support when available in Moorcheh SDK
if query.filters is not None:
logger.warning(
"Metadata filters are not yet supported by Moorcheh integration"
)
try:
# Execute search
search_result = self._client.search(**search_kwargs)
# Parse results
nodes = []
similarities = []
ids = []
results = search_result.get("results", [])
for result in results:
node_id = result.get("id")
score = result.get("score", 0.0)
if node_id is None:
logger.warning("Found result with no ID, skipping")
continue
ids.append(node_id)
similarities.append(score)
# Extract text and metadata
if self.namespace_type == "text":
text = result.get("text", "")
metadata = result.get("metadata", {})
else:
# For vector namespace, text is stored in metadata
metadata = result.get("metadata", {})
text = metadata.pop("text", "") # Remove text from metadata
# Create node
node = TextNode(
text=text,
id_=node_id,
metadata=metadata,
)
nodes.append(node)
return VectorStoreQueryResult(
nodes=nodes,
similarities=similarities,
ids=ids,
)
except MoorchehError as e:
logger.error(f"Error executing query: {e}")
raise
def get_generative_answer(
self,
query: str,
top_k: int = 5,
ai_model: str = "anthropic.claude-3-7-sonnet-20250219-v1:0",
llm: Optional[LLM] = None,
**kwargs: Any,
) -> str:
"""
Get a generative AI answer using Moorcheh's built-in RAG capability.
This method leverages Moorcheh's information-theoretic approach
to provide context-aware answers directly from the API.
Args:
query (str): The query string.
top_k (int): Number of top results to use for context.
**kwargs: Additional keyword arguments passed to Moorcheh.
Returns:
str: Generated answer string.
"""
try:
# incorporate llama_index llms
if llm:
vs_query = VectorStoreQuery(query_str=query, similarity_top_k=top_k)
result = self.query(vs_query)
context = "\n\n".join([node.text for node in result.nodes])
prompt = f"""Use the context below to answer the question. Context: {context} Question: {query} Answer:"""
return llm.complete(prompt).text
else:
result = self._client.get_generative_answer(
namespace=self.namespace,
query=query,
top_k=top_k,
ai_model=ai_model,
**kwargs,
)
return result.get("answer", "")
except MoorchehError as e:
logger.error(f"Error getting generative answer: {e}")
raise
if __name__ == "__main__":
print("MoorchehVectorStore loaded successfully.")
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-moorcheh/llama_index/vector_stores/moorcheh/base.py",
"license": "MIT License",
"lines": 400,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-moorcheh/llama_index/vector_stores/moorcheh/utils.py | from collections import Counter
from typing import Callable, List
from llama_index.core.bridge.pydantic import Field
from llama_index.core.base.embeddings.base_sparse import (
BaseSparseEmbedding,
SparseEmbedding,
)
def get_default_tokenizer() -> Callable:
try:
from transformers import BertTokenizerFast
except ImportError:
raise ImpotError(
"In order to run `llama-index-vector-stores-moorcheh` you need to have `transformers` installed. Please run `pip install transformers`"
)
orig_tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased")
def _tokenizer(texts: List[str]) -> List[List[int]]:
return orig_tokenizer(texts, padding=True, truncation=True, max_length=512)[
"input_ids"
]
return _tokenizer
class DefaultMoorchehSparseEmbedding(BaseSparseEmbedding):
"""Default Moorcheh sparse embedding."""
tokenizer: Callable = Field(
default_factory=get_default_tokenizer,
description="A callable that returns token input ids.",
)
def build_sparse_embeddings(
self, input_batch: List[List[int]]
) -> List[SparseEmbedding]:
# store a batch of sparse embeddings
sparse_emb_list = []
# iterate through input batch
for token_ids in input_batch:
sparse_emb = {}
# convert the input_ids list to a dictionary of key to frequency values
d = dict(Counter(token_ids))
for idx in d:
sparse_emb[idx] = float(d[idx])
sparse_emb_list.append(sparse_emb)
# return sparse_emb list
return sparse_emb_list
def _get_query_embedding(self, query: str) -> SparseEmbedding:
"""Embed the input query synchronously."""
token_ids = self.tokenizer([query])[0]
return self.build_sparse_embeddings([token_ids])[0]
async def _aget_query_embedding(self, query: str) -> SparseEmbedding:
"""Embed the input query asynchronously."""
return self._get_query_embedding(query)
def _get_text_embedding(self, text: str) -> SparseEmbedding:
"""Embed the input text synchronously."""
return self._get_query_embedding(text)
async def _aget_text_embedding(self, text: str) -> SparseEmbedding:
"""Embed the input text asynchronously."""
return self._get_query_embedding(text)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-moorcheh/llama_index/vector_stores/moorcheh/utils.py",
"license": "MIT License",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-moorcheh/tests/test_vector_stores_moorcheh.py | # Test.py
import os
import pytest
import time
import uuid
from typing import List
from llama_index.core import StorageContext, VectorStoreIndex
from llama_index.core.embeddings import MockEmbedding
from llama_index.core.schema import TextNode
from llama_index.core.vector_stores.types import (
BasePydanticVectorStore,
MetadataFilter,
MetadataFilters,
FilterCondition,
FilterOperator,
)
# Import your custom vector store class
from llama_index.vector_stores.moorcheh import MoorchehVectorStore
MAX_WAIT_TIME = 60
EMBED_DIM = 1536
MOORCHEH_API_KEY = os.environ.get("MOORCHEH_API_KEY", None)
should_skip = not MOORCHEH_API_KEY
def test_class():
names_of_base_classes = [b.__name__ for b in MoorchehVectorStore.__mro__]
assert BasePydanticVectorStore.__name__ in names_of_base_classes
@pytest.fixture
def nodes():
return [
TextNode(
text="Hello, world 1!",
metadata={"some_key": 1},
embedding=[0.3] * EMBED_DIM,
),
TextNode(
text="Hello, world 2!",
metadata={"some_key": 2},
embedding=[0.5] * EMBED_DIM,
),
TextNode(
text="Hello, world 3!",
metadata={"some_key": "3"},
embedding=[0.7] * EMBED_DIM,
),
]
@pytest.fixture
def vector_store():
namespace = f"test-ns-{uuid.uuid4().hex[:8]}"
return MoorchehVectorStore(
api_key=MOORCHEH_API_KEY,
namespace=namespace,
namespace_type="vector",
vector_dimension=EMBED_DIM,
batch_size=100,
)
@pytest.fixture
def index_with_nodes(vector_store, nodes: List[TextNode]):
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex(
nodes=nodes,
storage_context=storage_context,
embed_model=MockEmbedding(embed_dim=EMBED_DIM),
)
# Optionally wait or delay if Moorcheh has any async delay
time.sleep(2)
return index
@pytest.mark.skipif(should_skip, reason="MOORCHEH_API_KEY not set")
def test_basic_e2e(index_with_nodes: VectorStoreIndex):
nodes = index_with_nodes.as_retriever().retrieve("Hello, world 1!")
assert len(nodes) >= 1 # Adjust if exact match count varies
@pytest.mark.skipif(should_skip, reason="MOORCHEH_API_KEY not set")
def test_retrieval_with_filters(index_with_nodes: VectorStoreIndex):
filters = MetadataFilters(
filters=[
MetadataFilter(
key="some_key",
value=1,
operator=FilterOperator.EQ,
),
MetadataFilter(
key="some_key",
value=2,
operator=FilterOperator.EQ,
),
],
condition=FilterCondition.OR,
)
nodes = index_with_nodes.as_retriever(filters=filters).retrieve("Hello, world 1!")
assert len(nodes) == 2
filters = MetadataFilters(
filters=[
MetadataFilter(
key="some_key",
value=1,
operator=FilterOperator.GT,
),
],
)
nodes = index_with_nodes.as_retriever(filters=filters).retrieve("Hello, world 1!")
assert len(nodes) == 1
filters = MetadataFilters(
filters=[
MetadataFilter(
key="some_key",
value=[1, 2],
operator=FilterOperator.IN,
),
],
)
nodes = index_with_nodes.as_retriever(filters=filters).retrieve("Hello, world 1!")
assert len(nodes) == 2
filters = MetadataFilters(
filters=[
MetadataFilter(
key="some_key",
value="3",
operator=FilterOperator.EQ,
),
],
)
nodes = index_with_nodes.as_retriever(filters=filters).retrieve("Hello, world 1!")
assert len(nodes) == 1
@pytest.mark.skipif(should_skip, reason="MOORCHEH_API_KEY not set")
def test_empty_retrieval(vector_store):
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex(
nodes=[], # no documents
storage_context=storage_context,
embed_model=MockEmbedding(embed_dim=EMBED_DIM),
)
results = index.as_retriever().retrieve("Nonexistent")
assert results == []
@pytest.mark.skipif(should_skip, reason="MOORCHEH_API_KEY not set")
def test_namespace_isolation(nodes):
ns1 = f"ns1-{uuid.uuid4().hex[:6]}"
ns2 = f"ns2-{uuid.uuid4().hex[:6]}"
store1 = MoorchehVectorStore(
api_key=MOORCHEH_API_KEY,
namespace=ns1,
namespace_type="vector",
vector_dimension=EMBED_DIM,
)
index1 = VectorStoreIndex(
nodes=nodes[:1], # only first node
storage_context=StorageContext.from_defaults(vector_store=store1),
embed_model=MockEmbedding(embed_dim=EMBED_DIM),
)
store2 = MoorchehVectorStore(
api_key=MOORCHEH_API_KEY,
namespace=ns2,
namespace_type="vector",
vector_dimension=EMBED_DIM,
)
index2 = VectorStoreIndex(
nodes=nodes[1:], # remaining nodes
storage_context=StorageContext.from_defaults(vector_store=store2),
embed_model=MockEmbedding(embed_dim=EMBED_DIM),
)
time.sleep(2)
res1 = index1.as_retriever().retrieve("Hello")
res2 = index2.as_retriever().retrieve("Hello")
assert all("1" in n.text for n in res1)
assert all("2" in n.text or "3" in n.text for n in res2)
@pytest.mark.skipif(should_skip, reason="MOORCHEH_API_KEY not set")
def test_missing_metadata_handling():
nodes = [
TextNode(
text="A node with metadata",
metadata={"key": "val"},
embedding=[0.1] * EMBED_DIM,
),
TextNode(text="A node without metadata", embedding=[0.1] * EMBED_DIM),
]
store = MoorchehVectorStore(
api_key=MOORCHEH_API_KEY,
namespace=f"missing-meta-{uuid.uuid4().hex[:6]}",
namespace_type="vector",
vector_dimension=EMBED_DIM,
)
index = VectorStoreIndex(
nodes=nodes,
storage_context=StorageContext.from_defaults(vector_store=store),
embed_model=MockEmbedding(embed_dim=EMBED_DIM),
)
time.sleep(2)
results = index.as_retriever().retrieve("A node")
assert len(results) == 2
@pytest.mark.skipif(should_skip, reason="MOORCHEH_API_KEY not set")
def test_negative_filter_ops(index_with_nodes: VectorStoreIndex):
filters = MetadataFilters(
filters=[
MetadataFilter(
key="some_key",
value=2,
operator=FilterOperator.NE, # Not equal
),
]
)
nodes = index_with_nodes.as_retriever(filters=filters).retrieve("Hello")
texts = [n.text for n in nodes]
assert "Hello, world 2!" not in texts
filters = MetadataFilters(
filters=[
MetadataFilter(
key="some_key",
value=[2, "3"],
operator=FilterOperator.NOT_IN,
),
]
)
nodes = index_with_nodes.as_retriever(filters=filters).retrieve("Hello")
texts = [n.text for n in nodes]
assert "Hello, world 2!" not in texts
assert "Hello, world 3!" not in texts
@pytest.mark.skipif(should_skip, reason="MOORCHEH_API_KEY not set")
def test_large_batch_insert():
nodes = [
TextNode(text=f"Node {i}", embedding=[float(i / 100)] * EMBED_DIM)
for i in range(200)
]
store = MoorchehVectorStore(
api_key=MOORCHEH_API_KEY,
namespace=f"batchtest-{uuid.uuid4().hex[:6]}",
namespace_type="vector",
vector_dimension=EMBED_DIM,
batch_size=50,
)
index = VectorStoreIndex(
nodes=nodes,
storage_context=StorageContext.from_defaults(vector_store=store),
embed_model=MockEmbedding(embed_dim=EMBED_DIM),
)
time.sleep(5)
res = index.as_retriever().retrieve("Node")
assert len(res) >= 10 # fuzzy matching tolerance
# Test class inheritance
def test_class():
assert BasePydanticVectorStore.__name__ in [
b.__name__ for b in MoorchehVectorStore.__mro__
]
# Fixtures
@pytest.fixture
def nodes():
return [
TextNode(
text="Hello, world 1!",
metadata={"some_key": 1},
embedding=[0.3] * EMBED_DIM,
),
TextNode(
text="Hello, world 2!",
metadata={"some_key": 2},
embedding=[0.5] * EMBED_DIM,
),
TextNode(
text="Hello, world 3!",
metadata={"some_key": "3"},
embedding=[0.7] * EMBED_DIM,
),
]
@pytest.fixture
def vector_store():
return MoorchehVectorStore(
api_key=MOORCHEH_API_KEY,
namespace=f"test-ns-{uuid.uuid4().hex[:8]}",
namespace_type="vector",
vector_dimension=EMBED_DIM,
batch_size=100,
)
@pytest.fixture
def index_with_nodes(vector_store, nodes: List[TextNode]):
index = VectorStoreIndex(
nodes=nodes,
storage_context=StorageContext.from_defaults(vector_store=vector_store),
embed_model=MockEmbedding(embed_dim=EMBED_DIM),
)
time.sleep(2)
return index
# Core Tests
@pytest.mark.skipif(should_skip, reason="MOORCHEH_API_KEY not set")
def test_basic_e2e(index_with_nodes: VectorStoreIndex):
results = index_with_nodes.as_retriever().retrieve("Hello, world 1!")
assert len(results) >= 1
@pytest.mark.skipif(should_skip, reason="MOORCHEH_API_KEY not set")
def test_retrieval_with_filters(index_with_nodes: VectorStoreIndex):
f1 = MetadataFilters(
filters=[
MetadataFilter(key="some_key", value=1, operator=FilterOperator.EQ),
MetadataFilter(key="some_key", value=2, operator=FilterOperator.EQ),
],
condition=FilterCondition.OR,
)
assert len(index_with_nodes.as_retriever(filters=f1).retrieve("Hello")) == 2
f2 = MetadataFilters(
filters=[
MetadataFilter(key="some_key", value=1, operator=FilterOperator.GT),
]
)
assert len(index_with_nodes.as_retriever(filters=f2).retrieve("Hello")) == 1
f3 = MetadataFilters(
filters=[
MetadataFilter(key="some_key", value=[1, 2], operator=FilterOperator.IN),
]
)
assert len(index_with_nodes.as_retriever(filters=f3).retrieve("Hello")) == 2
f4 = MetadataFilters(
filters=[
MetadataFilter(key="some_key", value="3", operator=FilterOperator.EQ),
]
)
assert len(index_with_nodes.as_retriever(filters=f4).retrieve("Hello")) == 1
# Additional Tests
@pytest.mark.skipif(should_skip, reason="MOORCHEH_API_KEY not set")
def test_empty_retrieval(vector_store):
index = VectorStoreIndex(
nodes=[],
storage_context=StorageContext.from_defaults(vector_store=vector_store),
embed_model=MockEmbedding(embed_dim=EMBED_DIM),
)
assert index.as_retriever().retrieve("Nothing") == []
@pytest.mark.skipif(should_skip, reason="MOORCHEH_API_KEY not set")
def test_namespace_isolation(nodes):
ns1, ns2 = f"ns1-{uuid.uuid4().hex[:6]}", f"ns2-{uuid.uuid4().hex[:6]}"
store1 = MoorchehVectorStore(
api_key=MOORCHEH_API_KEY,
namespace=ns1,
namespace_type="vector",
vector_dimension=EMBED_DIM,
)
store2 = MoorchehVectorStore(
api_key=MOORCHEH_API_KEY,
namespace=ns2,
namespace_type="vector",
vector_dimension=EMBED_DIM,
)
VectorStoreIndex(
nodes=nodes[:1],
storage_context=StorageContext.from_defaults(vector_store=store1),
embed_model=MockEmbedding(embed_dim=EMBED_DIM),
)
VectorStoreIndex(
nodes=nodes[1:],
storage_context=StorageContext.from_defaults(vector_store=store2),
embed_model=MockEmbedding(embed_dim=EMBED_DIM),
)
time.sleep(2)
r1 = store1.as_retriever().retrieve("Hello")
r2 = store2.as_retriever().retrieve("Hello")
assert all("1" in n.text for n in r1)
assert all("2" in n.text or "3" in n.text for n in r2)
@pytest.mark.skipif(should_skip, reason="MOORCHEH_API_KEY not set")
def test_duplicate_upsert_behavior(vector_store):
node = TextNode(
id_="fixed-id",
text="Duplicate node",
metadata={"key": "val"},
embedding=[0.1] * EMBED_DIM,
)
VectorStoreIndex(
[node],
StorageContext.from_defaults(vector_store=vector_store),
MockEmbedding(EMBED_DIM),
)
VectorStoreIndex(
[node],
StorageContext.from_defaults(vector_store=vector_store),
MockEmbedding(EMBED_DIM),
)
time.sleep(2)
assert len(vector_store.as_retriever().retrieve("Duplicate")) >= 1
@pytest.mark.skipif(should_skip, reason="MOORCHEH_API_KEY not set")
def test_conflicting_filters(index_with_nodes):
filters = MetadataFilters(
filters=[
MetadataFilter(key="some_key", value=1, operator=FilterOperator.EQ),
MetadataFilter(key="some_key", value=2, operator=FilterOperator.EQ),
],
condition=FilterCondition.AND,
)
assert len(index_with_nodes.as_retriever(filters=filters).retrieve("Hello")) == 0
@pytest.mark.skipif(should_skip, reason="MOORCHEH_API_KEY not set")
def test_similarity_vs_exact(index_with_nodes):
results = index_with_nodes.as_retriever().retrieve("world")
assert len(results) > 0
@pytest.mark.skipif(should_skip, reason="MOORCHEH_API_KEY not set")
def test_filter_missing_metadata_key(index_with_nodes):
filters = MetadataFilters(
filters=[
MetadataFilter(
key="nonexistent", value="missing", operator=FilterOperator.EQ
)
]
)
assert len(index_with_nodes.as_retriever(filters=filters).retrieve("Hello")) == 0
@pytest.mark.skipif(should_skip, reason="MOORCHEH_API_KEY not set")
def test_large_metadata_dict(vector_store):
node = TextNode(
text="Lots of metadata",
metadata={f"key{i}": f"value{i}" for i in range(100)},
embedding=[0.4] * EMBED_DIM,
)
VectorStoreIndex(
[node],
StorageContext.from_defaults(vector_store=vector_store),
MockEmbedding(EMBED_DIM),
)
time.sleep(2)
assert len(vector_store.as_retriever().retrieve("metadata")) >= 1
@pytest.mark.skipif(should_skip, reason="MOORCHEH_API_KEY not set")
def test_large_batch_insert():
nodes = [
TextNode(text=f"Node {i}", embedding=[float(i / 100)] * EMBED_DIM)
for i in range(200)
]
store = MoorchehVectorStore(
api_key=MOORCHEH_API_KEY,
namespace=f"batchtest-{uuid.uuid4().hex[:6]}",
namespace_type="vector",
vector_dimension=EMBED_DIM,
batch_size=50,
)
VectorStoreIndex(
nodes,
StorageContext.from_defaults(vector_store=store),
MockEmbedding(EMBED_DIM),
)
time.sleep(5)
assert len(store.as_retriever().retrieve("Node")) >= 10
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-moorcheh/tests/test_vector_stores_moorcheh.py",
"license": "MIT License",
"lines": 427,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-web/tests/test_simple_webreader.py | import pytest
from llama_index.readers.web import SimpleWebPageReader
@pytest.fixture()
def url() -> str:
return "https://docs.llamaindex.ai/en/stable/module_guides/workflow/"
def test_simple_web_reader(url: str) -> None:
documents = SimpleWebPageReader().load_data(urls=[url])
assert len(documents) > 0
assert isinstance(documents[0].id_, str)
assert documents[0].id_ != url
assert len(documents[0].id_) == 36
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-web/tests/test_simple_webreader.py",
"license": "MIT License",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-core/tests/agent/utils/test_agent_utils.py | import pytest
from typing import Any, List, Type, Optional, Dict
from typing_extensions import override
from pydantic import BaseModel
from llama_index.core.llms import ChatMessage, TextBlock
from llama_index.core.types import Model
from llama_index.core.llms import (
LLMMetadata,
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
LLM,
)
from llama_index.core.prompts.base import PromptTemplate
from llama_index.core.tools import ToolSelection
from llama_index.core.agent.utils import (
messages_to_xml_format,
generate_structured_response,
)
class Structure(BaseModel):
hello: str
world: int
class TestLLM(LLM):
def __init__(self, responses: List[ChatMessage], structured_response: str):
super().__init__()
self._responses = responses
self._structured_response = structured_response
self._response_index = 0
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(is_function_calling_model=True)
async def astream_chat(
self, messages: List[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
response_msg = None
if self._responses:
response_msg = self._responses[self._response_index]
self._response_index = (self._response_index + 1) % len(self._responses)
async def _gen():
if response_msg:
yield ChatResponse(
message=response_msg,
delta=response_msg.content,
raw={"content": response_msg.content},
)
return _gen()
async def astream_chat_with_tools(
self, tools: List[Any], chat_history: List[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
response_msg = None
if self._responses:
response_msg = self._responses[self._response_index]
self._response_index = (self._response_index + 1) % len(self._responses)
async def _gen():
if response_msg:
yield ChatResponse(
message=response_msg,
delta=response_msg.content,
raw={"content": response_msg.content},
)
return _gen()
def get_tool_calls_from_response(
self, response: ChatResponse, **kwargs: Any
) -> List[ToolSelection]:
return response.message.additional_kwargs.get("tool_calls", [])
@override
async def astructured_predict(
self,
output_cls: Type[Model],
prompt: PromptTemplate,
llm_kwargs: Optional[Dict[str, Any]] = None,
**prompt_args: Any,
) -> Model:
return output_cls.model_validate_json(self._structured_response)
@override
async def structured_predict(
self,
output_cls: Type[Model],
prompt: PromptTemplate,
llm_kwargs: Optional[Dict[str, Any]] = None,
**prompt_args: Any,
) -> Model:
return output_cls.model_validate_json(self._structured_response)
async def achat(self, *args, **kwargs):
pass
def chat(self, *args, **kwargs):
pass
def stream_chat(self, *args, **kwargs):
pass
def complete(self, *args, **kwargs):
pass
async def acomplete(self, *args, **kwargs):
pass
def stream_complete(self, *args, **kwargs):
pass
async def astream_complete(self, *args, **kwargs):
pass
def _prepare_chat_with_tools(self, *args, **kwargs):
return {}
@pytest.fixture
def chat_messages() -> List[ChatMessage]:
return [
ChatMessage(role="user", blocks=[TextBlock(text="hello")]),
ChatMessage(role="assistant", blocks=[TextBlock(text="hello back")]),
ChatMessage(role="user", blocks=[TextBlock(text="how are you?")]),
ChatMessage(role="assistant", blocks=[TextBlock(text="I am good, thank you.")]),
]
@pytest.fixture()
def chat_messages_sys(chat_messages: List[ChatMessage]) -> List[ChatMessage]:
return [
ChatMessage(role="system", content="You are a helpful assistant."),
*chat_messages,
]
@pytest.fixture
def xml_string() -> str:
return "<current_conversation>\n\t<user>\n\t\t<message>hello</message>\n\t</user>\n\t<assistant>\n\t\t<message>hello back</message>\n\t</assistant>\n\t<user>\n\t\t<message>how are you?</message>\n\t</user>\n\t<assistant>\n\t\t<message>I am good, thank you.</message>\n\t</assistant>\n</current_conversation>\n\nGiven the conversation, format the output according to the provided schema."
@pytest.fixture
def xml_string_sys() -> str:
return "<current_conversation>\n\t<system>\n\t\t<message>You are a helpful assistant.</message>\n\t</system>\n\t<user>\n\t\t<message>hello</message>\n\t</user>\n\t<assistant>\n\t\t<message>hello back</message>\n\t</assistant>\n\t<user>\n\t\t<message>how are you?</message>\n\t</user>\n\t<assistant>\n\t\t<message>I am good, thank you.</message>\n\t</assistant>\n</current_conversation>\n\nGiven the conversation, format the output according to the provided schema."
@pytest.fixture
def structured_response() -> str:
return Structure(hello="test", world=1).model_dump_json()
def test_messages_to_xml(chat_messages: List[ChatMessage], xml_string: str) -> None:
msg = messages_to_xml_format(chat_messages)
assert len(msg) == 1
assert isinstance(msg[0], ChatMessage)
s = ""
for block in msg[0].blocks:
s += block.text
assert s == xml_string
def test_messages_to_xml_sys(
chat_messages_sys: List[ChatMessage], xml_string_sys: str
) -> None:
msg = messages_to_xml_format(chat_messages_sys)
assert len(msg) == 2
assert isinstance(msg[0], ChatMessage)
assert msg[0].role == "system"
assert msg[0].content == "You are a helpful assistant."
s = ""
for block in msg[1].blocks:
s += block.text
assert s == xml_string_sys
@pytest.mark.asyncio
async def test_generate_structured_response(
chat_messages: List[ChatMessage], structured_response: str
) -> None:
llm = TestLLM(
responses=[ChatMessage(role="assistant", content="Hello World!")],
structured_response=structured_response,
)
generated_response = await generate_structured_response(
messages=chat_messages, llm=llm, output_cls=Structure
)
assert Structure.model_validate(
generated_response
) == Structure.model_validate_json(structured_response)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/agent/utils/test_agent_utils.py",
"license": "MIT License",
"lines": 156,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-core/tests/agent/workflow/test_agent_with_structured_output.py | import pytest
import os
from typing import Any, List, Type, Optional, Dict
from typing_extensions import override
from pydantic import BaseModel, Field
from llama_index.core.types import Model
from llama_index.core.llms import (
LLMMetadata,
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
LLM,
)
from llama_index.core.agent.workflow import (
AgentWorkflow,
AgentOutput,
AgentStreamStructuredOutput,
)
from llama_index.core.prompts.base import PromptTemplate
from llama_index.core.tools import ToolSelection
from llama_index.core.agent.workflow import FunctionAgent
skip_condition = os.getenv("OPENAI_API_KEY", None) is None
class TestLLM(LLM):
def __init__(self, responses: List[ChatMessage], structured_response: str):
super().__init__()
self._responses = responses
self._structured_response = structured_response
self._response_index = 0
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(is_function_calling_model=True)
async def astream_chat(
self, messages: List[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
response_msg = None
if self._responses:
response_msg = self._responses[self._response_index]
self._response_index = (self._response_index + 1) % len(self._responses)
async def _gen():
if response_msg:
yield ChatResponse(
message=response_msg,
delta=response_msg.content,
raw={"content": response_msg.content},
)
return _gen()
async def astream_chat_with_tools(
self, tools: List[Any], chat_history: List[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
response_msg = None
if self._responses:
response_msg = self._responses[self._response_index]
self._response_index = (self._response_index + 1) % len(self._responses)
async def _gen():
if response_msg:
yield ChatResponse(
message=response_msg,
delta=response_msg.content,
raw={"content": response_msg.content},
)
return _gen()
def get_tool_calls_from_response(
self, response: ChatResponse, **kwargs: Any
) -> List[ToolSelection]:
return response.message.additional_kwargs.get("tool_calls", [])
@override
async def astructured_predict(
self,
output_cls: Type[Model],
prompt: PromptTemplate,
llm_kwargs: Optional[Dict[str, Any]] = None,
**prompt_args: Any,
) -> Model:
return output_cls.model_validate_json(self._structured_response)
@override
async def structured_predict(
self,
output_cls: Type[Model],
prompt: PromptTemplate,
llm_kwargs: Optional[Dict[str, Any]] = None,
**prompt_args: Any,
) -> Model:
return output_cls.model_validate_json(self._structured_response)
async def achat(self, *args, **kwargs):
pass
def chat(self, *args, **kwargs):
pass
def stream_chat(self, *args, **kwargs):
pass
def complete(self, *args, **kwargs):
pass
async def acomplete(self, *args, **kwargs):
pass
def stream_complete(self, *args, **kwargs):
pass
async def astream_complete(self, *args, **kwargs):
pass
def _prepare_chat_with_tools(self, *args, **kwargs):
return {}
class Structure(BaseModel):
hello: str
world: int
@pytest.fixture()
def function_agent_output_cls():
return FunctionAgent(
name="retriever",
description="Manages data retrieval",
system_prompt="You are a retrieval assistant.",
llm=TestLLM(
responses=[
ChatMessage(role="assistant", content="Success with the FunctionAgent")
],
structured_response='{"hello":"hello","world":1}',
),
output_cls=Structure,
)
def structured_function_fn(*args, **kwargs) -> dict:
return Structure(hello="bonjour", world=2).model_dump()
async def astructured_function_fn(*args, **kwargs) -> dict:
return Structure(hello="guten tag", world=3).model_dump()
@pytest.fixture()
def function_agent_struct_fn():
return FunctionAgent(
name="retriever",
description="Manages data retrieval",
system_prompt="You are a retrieval assistant.",
llm=TestLLM(
responses=[
ChatMessage(role="assistant", content="Success with the FunctionAgent")
],
structured_response='{"hello":"hello","world":1}',
),
structured_output_fn=structured_function_fn,
)
@pytest.fixture()
def function_agent_astruct_fn():
return FunctionAgent(
name="retriever",
description="Manages data retrieval",
system_prompt="You are a retrieval assistant.",
llm=TestLLM(
responses=[
ChatMessage(role="assistant", content="Success with the FunctionAgent")
],
structured_response='{"hello":"hello","world":1}',
),
structured_output_fn=astructured_function_fn,
)
@pytest.mark.asyncio
async def test_output_cls_agent(function_agent_output_cls: FunctionAgent):
"""Test single agent with state management."""
handler = function_agent_output_cls.run(user_msg="test")
streaming_event = False
async for event in handler.stream_events():
if isinstance(event, AgentStreamStructuredOutput):
streaming_event = True
assert streaming_event
response = await handler
assert "Success with the FunctionAgent" in str(response.response)
assert response.get_pydantic_model(Structure) == Structure(hello="hello", world=1)
@pytest.mark.asyncio
async def test_structured_fn_agent(function_agent_struct_fn: FunctionAgent):
"""Test single agent with state management."""
handler = function_agent_struct_fn.run(user_msg="test")
streaming_event = False
async for event in handler.stream_events():
if isinstance(event, AgentStreamStructuredOutput):
streaming_event = True
assert streaming_event
response = await handler
assert "Success with the FunctionAgent" in str(response.response)
assert response.get_pydantic_model(Structure) == Structure(hello="bonjour", world=2)
@pytest.mark.asyncio
async def test_astructured_fn_agent(function_agent_astruct_fn: FunctionAgent):
"""Test single agent with state management."""
handler = function_agent_astruct_fn.run(user_msg="test")
async for event in handler.stream_events():
if isinstance(event, AgentStreamStructuredOutput):
streaming_event = True
assert streaming_event
response = await handler
assert "Success with the FunctionAgent" in str(response.response)
assert response.get_pydantic_model(Structure) == Structure(
hello="guten tag", world=3
)
@pytest.mark.asyncio
async def test_structured_output_agentworkflow(
function_agent_output_cls: FunctionAgent,
) -> None:
wf = AgentWorkflow(
agents=[function_agent_output_cls],
root_agent=function_agent_output_cls.name,
output_cls=Structure,
)
handler = wf.run(user_msg="test")
streaming_event = False
async for event in handler.stream_events():
if isinstance(event, AgentStreamStructuredOutput):
streaming_event = True
assert streaming_event
response = await handler
assert "Success with the FunctionAgent" in str(response.response)
assert response.get_pydantic_model(Structure) == Structure(hello="hello", world=1)
@pytest.mark.asyncio
async def test_structured_output_fn_agentworkflow(
function_agent_output_cls: FunctionAgent,
) -> None:
wf = AgentWorkflow(
agents=[function_agent_output_cls],
root_agent=function_agent_output_cls.name,
structured_output_fn=structured_function_fn,
)
handler = wf.run(user_msg="test")
async for _ in handler.stream_events():
pass
response = await handler
assert "Success with the FunctionAgent" in str(response.response)
assert response.get_pydantic_model(Structure) == Structure(hello="bonjour", world=2)
@pytest.mark.asyncio
async def test_astructured_output_fn_agentworkflow(
function_agent_output_cls: FunctionAgent,
) -> None:
wf = AgentWorkflow(
agents=[function_agent_output_cls],
root_agent=function_agent_output_cls.name,
structured_output_fn=astructured_function_fn,
)
handler = wf.run(user_msg="test")
async for _ in handler.stream_events():
pass
response = await handler
assert "Success with the FunctionAgent" in str(response.response)
assert response.get_pydantic_model(Structure) == Structure(
hello="guten tag", world=3
)
@pytest.mark.asyncio
@pytest.mark.skipif(condition=skip_condition, reason="OPENAI_API_KEY is not available.")
async def test_multi_agent_openai() -> None:
from llama_index.llms.openai import OpenAI
class MathResult(BaseModel):
operation: str = Field(description="The operation performed")
result: int = Field(description="The result of the operation")
main_agent = FunctionAgent(
llm=OpenAI(model="gpt-4.1"),
name="MainAgent",
description="Useful for dispatching tasks.",
system_prompt="You are the MainAgent. Your task is to distribute tasks to other agents. You must always dispatch tasks to secondary agents. You must never perform tasks yourself.",
tools=[],
can_handoff_to=["CalculatorAgent"],
)
def multiply(x: int, j: int) -> int:
"""
Multiply two numbers together.
Args:
x (int): first factor
j (int): second factor
Returns:
int: the result of the multiplication
"""
return x * j
multiplication_agent = FunctionAgent(
llm=OpenAI(model="gpt-4.1"),
name="CalculatorAgent",
description="Useful for performing operations.",
system_prompt="You are the CalculatorAgent. Your task is to calculate the results of an operation, if needed using the `multiply`tool you are provided with.",
tools=[multiply],
)
workflow = AgentWorkflow(
agents=[main_agent, multiplication_agent],
root_agent=main_agent.name,
output_cls=MathResult,
)
result = await workflow.run(user_msg="What is 30 multiplied by 60?")
assert isinstance(result, AgentOutput)
assert isinstance(result.structured_response, dict)
assert isinstance(result.get_pydantic_model(MathResult), MathResult)
assert result.get_pydantic_model(MathResult).result == 1800
@pytest.mark.asyncio
async def test_from_tools_or_functions() -> None:
def multiply(x: int, j: int) -> int:
"""
Multiply two numbers together.
Args:
x (int): first factor
j (int): second factor
Returns:
int: the result of the multiplication
"""
return x * j
wf: AgentWorkflow = AgentWorkflow.from_tools_or_functions(
tools_or_functions=[multiply],
system_prompt="You are an agent.",
output_cls=Structure,
llm=TestLLM(
responses=[
ChatMessage(role="assistant", content="Success with the workflow!")
],
structured_response=Structure(hello="hello", world=3).model_dump_json(),
),
)
response = await wf.run(user_msg="Hello world!")
assert "Success with the workflow!" in str(response.response)
assert response.get_pydantic_model(Structure) == Structure(hello="hello", world=3)
wf1: AgentWorkflow = AgentWorkflow.from_tools_or_functions(
tools_or_functions=[multiply],
system_prompt="You are an agent.",
structured_output_fn=structured_function_fn,
llm=TestLLM(
responses=[
ChatMessage(role="assistant", content="Success with the workflow!")
],
structured_response=Structure(hello="hello", world=3).model_dump_json(),
),
)
response = await wf1.run(user_msg="Hello world!")
assert "Success with the workflow!" in str(response.response)
assert response.get_pydantic_model(Structure) == Structure(hello="bonjour", world=2)
@pytest.mark.asyncio
@pytest.mark.skipif(condition=skip_condition, reason="OPENAI_API_KEY is not available.")
async def test_multi_agent_openai_from_tools() -> None:
from llama_index.llms.openai import OpenAI
class MathResult(BaseModel):
operation: str = Field(description="The operation performed")
result: int = Field(description="The result of the operation")
def multiply(x: int, j: int) -> int:
"""
Multiply two numbers together.
Args:
x (int): first factor
j (int): second factor
Returns:
int: the result of the multiplication
"""
return x * j
multiplication_wf: AgentWorkflow = AgentWorkflow.from_tools_or_functions(
llm=OpenAI(model="gpt-4.1"),
system_prompt="You are the CalculatorAgent. Your task is to calculate the results of an operation, if needed using the `multiply`tool you are provided with.",
tools_or_functions=[multiply],
output_cls=MathResult,
)
result = await multiplication_wf.run(user_msg="What is 30 multiplied by 60?")
assert isinstance(result, AgentOutput)
assert isinstance(result.structured_response, dict)
assert isinstance(result.get_pydantic_model(MathResult), MathResult)
assert result.get_pydantic_model(MathResult).result == 1800
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/agent/workflow/test_agent_with_structured_output.py",
"license": "MIT License",
"lines": 345,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-core/tests/vector_stores/test_utils.py | from typing import Any
import pytest
from llama_index.core.schema import (
BaseNode,
Document,
MediaResource,
Node,
NodeRelationship,
TextNode,
ImageNode,
IndexNode,
)
from llama_index.core.vector_stores.utils import (
metadata_dict_to_node,
node_to_metadata_dict,
)
@pytest.fixture
def source_node():
return Document(id_="source_node", text="Hello, world!")
@pytest.fixture
def text_node(source_node: Document):
return TextNode(
id_="text_node", text="Hello, world!", ref_doc_id=source_node.ref_doc_id
)
@pytest.fixture
def image_node():
return ImageNode(id_="image_node", image="tests/data/images/dog.jpg")
@pytest.fixture
def index_node():
return IndexNode(id_="index_node", text="Hello, world!", index_id="123")
@pytest.fixture
def multimedia_node():
return Node(
id_="multimedia_node", text_resource=MediaResource(text="Hello, world!")
)
def test_text_node_serdes(text_node: TextNode, source_node: Document):
text_node.relationships[NodeRelationship.SOURCE] = (
source_node.as_related_node_info()
)
serialized_node = node_to_metadata_dict(text_node)
assert serialized_node["document_id"] == source_node.node_id
assert serialized_node["ref_doc_id"] == source_node.node_id
assert serialized_node["doc_id"] == source_node.node_id
assert "text_node" in serialized_node["_node_content"]
assert serialized_node["_node_type"] == text_node.class_name()
deserialized_node = metadata_dict_to_node(serialized_node)
assert isinstance(deserialized_node, TextNode)
assert deserialized_node.text == text_node.text
def test_image_node_serdes(image_node: ImageNode):
serialized_node = node_to_metadata_dict(image_node)
assert "image_node" in serialized_node["_node_content"]
assert serialized_node["_node_type"] == image_node.class_name()
deserialized_node = metadata_dict_to_node(serialized_node)
assert isinstance(deserialized_node, ImageNode)
assert deserialized_node.image == image_node.image
def test_index_node_serdes(index_node: IndexNode):
serialized_node = node_to_metadata_dict(index_node)
assert "index_node" in serialized_node["_node_content"]
assert serialized_node["_node_type"] == index_node.class_name()
deserialized_node = metadata_dict_to_node(serialized_node)
assert isinstance(deserialized_node, IndexNode)
assert deserialized_node.text == index_node.text
assert deserialized_node.index_id == index_node.index_id
def test_multimedia_node_serdes(multimedia_node: Node):
serialized_node: dict[str, Any] = node_to_metadata_dict(multimedia_node)
assert "multimedia_node" in serialized_node["_node_content"]
assert serialized_node["_node_type"] == multimedia_node.class_name()
deserialized_node: BaseNode = metadata_dict_to_node(serialized_node)
assert isinstance(deserialized_node, Node)
assert deserialized_node.text_resource is not None
assert isinstance(deserialized_node.text_resource, MediaResource)
assert deserialized_node.text_resource.text is not None
assert deserialized_node.text_resource.text == multimedia_node.text_resource.text
def test_flat_metadata_serdes(text_node: TextNode):
text_node.metadata = {"key": {"subkey": "value"}}
with pytest.raises(ValueError):
node_to_metadata_dict(text_node, flat_metadata=True)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/vector_stores/test_utils.py",
"license": "MIT License",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/postprocessor/llama-index-postprocessor-flashrank-rerank/llama_index/postprocessor/flashrank_rerank/base.py | from typing import Any
from typing_extensions import override
from flashrank import Ranker, RerankRequest
import llama_index.core.instrumentation as instrument
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.schema import MetadataMode, NodeWithScore, QueryBundle
from llama_index.core.instrumentation.events import BaseEvent
dispatcher = instrument.get_dispatcher(__name__)
class FlashRerankingQueryEvent(BaseEvent):
"""FlashRerankingQueryEvent."""
nodes: list[NodeWithScore] = Field(..., description="Nodes to rerank.")
model_name: str = Field(..., description="Model name.")
query_str: str = Field(..., description="Query string.")
top_k: int = Field(..., description="Top k nodes to return.")
class FlashRerankEndEvent(BaseEvent):
"""FlashRerankEndEvent."""
nodes: list[NodeWithScore] = Field(..., description="Nodes to rerank.")
class FlashRankRerank(BaseNodePostprocessor):
model: str = Field(
description="FlashRank model name.", default="ms-marco-TinyBERT-L-2-v2"
)
top_n: int = Field(
description="Number of nodes to return sorted by score.", default=20
)
max_length: int = Field(
description="Maximum length of passage text passed to the reranker.",
default=512,
)
_reranker: Ranker = PrivateAttr()
@override
def model_post_init(self, context: Any, /) -> None: # pyright: ignore[reportAny]
self._reranker = Ranker(model_name=self.model, max_length=self.max_length)
@classmethod
@override
def class_name(cls) -> str:
return "FlashRankRerank"
@dispatcher.span
@override
def _postprocess_nodes(
self,
nodes: list[NodeWithScore],
query_bundle: QueryBundle | None = None,
) -> list[NodeWithScore]:
if query_bundle is None:
raise ValueError("Missing query bundle in extra info.")
if len(nodes) == 0:
return []
query_and_nodes: RerankRequest = RerankRequest(
query=query_bundle.query_str,
passages=[
{
"id": node.node.id_,
"text": node.node.get_content(metadata_mode=MetadataMode.EMBED),
}
for node in nodes
],
)
## you would need to define a custom event subclassing BaseEvent from llama_index_instrumentation
dispatcher.event(
FlashRerankingQueryEvent(
nodes=nodes,
model_name=self.model,
query_str=query_bundle.query_str,
top_k=self.top_n,
)
)
scores = self._reranker.rerank(query_and_nodes)
scores_by_id = {score["id"]: score["score"] for score in scores}
if len(scores) != len(nodes):
msg = "Number of scores and nodes do not match."
raise ValueError(msg)
for node in nodes:
node.score = scores_by_id[node.node.id_]
new_nodes = sorted(nodes, key=lambda x: -x.score if x.score else 0)[
: self.top_n
]
dispatcher.event(FlashRerankEndEvent(nodes=new_nodes))
return new_nodes
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/postprocessor/llama-index-postprocessor-flashrank-rerank/llama_index/postprocessor/flashrank_rerank/base.py",
"license": "MIT License",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/postprocessor/llama-index-postprocessor-flashrank-rerank/tests/test_postprocessor_flashrank_rerank.py | from llama_index.core.schema import (
MediaResource,
Node,
NodeWithScore,
QueryBundle,
)
from llama_index.postprocessor.flashrank_rerank import FlashRankRerank
def test_init():
reranker = FlashRankRerank()
assert reranker is not None
def test_postprocess_nodes():
reranker = FlashRankRerank()
query_bundle = QueryBundle(
query_str="I'm visiting New York City, what is the best place to get Bagels?"
)
# Simulate bad results from a poor embedding model / query
node_one = NodeWithScore(
node=Node(
text_resource=MediaResource(text="I'm just a Node i am only a Node"),
id_="1",
),
score=0.9,
)
node_two = NodeWithScore(
node=Node(
text_resource=MediaResource(
text="The best place to get Bagels in New York City"
),
id_="2",
),
score=0.3,
)
node_three = NodeWithScore(
node=Node(
text_resource=MediaResource(
text="A Latte without milk is just an espresso"
),
id_="3",
),
score=1.0,
)
nodes = [node_one, node_two, node_three]
reranked_nodes = reranker.postprocess_nodes(nodes, query_bundle)
assert reranked_nodes is not None
assert len(reranked_nodes) == 3
assert reranked_nodes[0] == node_two
assert reranked_nodes[1] == node_one
assert reranked_nodes[2] == node_three
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/postprocessor/llama-index-postprocessor-flashrank-rerank/tests/test_postprocessor_flashrank_rerank.py",
"license": "MIT License",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/storage/docstore/llama-index-storage-docstore-duckdb/llama_index/storage/docstore/duckdb/base.py | from typing import Optional
from llama_index.core.storage.docstore.keyval_docstore import KVDocumentStore
from llama_index.core.storage.docstore.types import DEFAULT_BATCH_SIZE
from llama_index.storage.kvstore.duckdb import DuckDBKVStore
class DuckDBDocumentStore(KVDocumentStore):
"""
DuckDB Document (Node) store.
A DuckDB store for Document and Node objects.
Args:
duckdb_kvstore (DuckDBKVStore): DuckDB key-value store
namespace (str): namespace for the docstore
"""
def __init__(
self,
duckdb_kvstore: DuckDBKVStore,
namespace: Optional[str] = None,
batch_size: int = DEFAULT_BATCH_SIZE,
) -> None:
"""Init a DuckDBDocumentStore."""
super().__init__(duckdb_kvstore, namespace=namespace, batch_size=batch_size)
# avoid conflicts with duckdb index store
self._node_collection = f"{self._namespace}/doc"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/storage/docstore/llama-index-storage-docstore-duckdb/llama_index/storage/docstore/duckdb/base.py",
"license": "MIT License",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/storage/docstore/llama-index-storage-docstore-duckdb/tests/test_storage_docstore_duckdb.py | from llama_index.core.storage.docstore.keyval_docstore import KVDocumentStore
from llama_index.storage.docstore.duckdb import DuckDBDocumentStore
def test_class():
names_of_base_classes = [b.__name__ for b in DuckDBDocumentStore.__mro__]
assert KVDocumentStore.__name__ in names_of_base_classes
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/storage/docstore/llama-index-storage-docstore-duckdb/tests/test_storage_docstore_duckdb.py",
"license": "MIT License",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/storage/index_store/llama-index-storage-index-store-duckdb/llama_index/storage/index_store/duckdb/base.py | from typing import Optional
from llama_index.core.storage.index_store.keyval_index_store import (
KVIndexStore,
DEFAULT_COLLECTION_SUFFIX,
)
from llama_index.storage.kvstore.duckdb import DuckDBKVStore
class DuckDBIndexStore(KVIndexStore):
"""
DuckDB Index store.
Args:
duckdb_kvstore (DuckDBKVStore): DuckDB key-value store
namespace (str): namespace for the index store
"""
def __init__(
self,
duckdb_kvstore: DuckDBKVStore,
namespace: Optional[str] = None,
collection_suffix: Optional[str] = None,
) -> None:
"""Init a DuckDBIndexStore."""
super().__init__(
duckdb_kvstore, namespace=namespace, collection_suffix=collection_suffix
)
# avoid conflicts with duckdb docstore
if self._collection.endswith(DEFAULT_COLLECTION_SUFFIX):
self._collection = f"{self._namespace}/index"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/storage/index_store/llama-index-storage-index-store-duckdb/llama_index/storage/index_store/duckdb/base.py",
"license": "MIT License",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/storage/index_store/llama-index-storage-index-store-duckdb/tests/test_storage_index_store_duckdb.py | from llama_index.core.storage.index_store.keyval_index_store import KVIndexStore
from llama_index.storage.index_store.duckdb import DuckDBIndexStore
def test_class():
names_of_base_classes = [b.__name__ for b in DuckDBIndexStore.__mro__]
assert KVIndexStore.__name__ in names_of_base_classes
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/storage/index_store/llama-index-storage-index-store-duckdb/tests/test_storage_index_store_duckdb.py",
"license": "MIT License",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/storage/kvstore/llama-index-storage-kvstore-duckdb/llama_index/storage/kvstore/duckdb/base.py | import asyncio
import json
import logging
import threading
from pathlib import Path
from typing import Any, Optional
from typing_extensions import override
import duckdb
import pyarrow
from duckdb import (
ColumnExpression,
ConstantExpression,
Expression,
)
from llama_index.core.storage.kvstore.types import (
DEFAULT_COLLECTION,
BaseKVStore,
)
logger = logging.getLogger(__name__)
DEFAULT_BATCH_SIZE = 128
class DuckDBTableIncorrectColumnsError(Exception):
def __init__(
self, table_name: str, expected_columns: list[str], actual_columns: list[str]
):
self.table_name = table_name
self.expected_columns = expected_columns
self.actual_columns = actual_columns
super().__init__(
f"Table {table_name} has incorrect columns. Expected {expected_columns}, got {actual_columns}."
)
class DuckDBKVStore(BaseKVStore):
"""
DuckDB KV Store.
Args:
duckdb_uri (str): DuckDB URI
duckdb_client (Any): DuckDB client
async_duckdb_client (Any): Async DuckDB client
Raises:
ValueError: If duckdb-py is not installed
Examples:
>>> from llama_index.storage.kvstore.duckdb import DuckDBKVStore
>>> # Create a DuckDBKVStore
>>> duckdb_kv_store = DuckDBKVStore(
>>> duckdb_url="duckdb://127.0.0.1:6379")
"""
database_name: str
table_name: str
persist_dir: str
_shared_conn: Optional[duckdb.DuckDBPyConnection] = None
_is_initialized: bool = False
def __init__(
self,
database_name: str = ":memory:",
table_name: str = "keyvalue",
# https://duckdb.org/docs/extensions/full_text_search
persist_dir: str = "./storage",
client: Optional[duckdb.DuckDBPyConnection] = None,
**kwargs: Any, # noqa: ARG002
) -> None:
"""Init params."""
if client is not None:
self._shared_conn = client.cursor()
self.database_name = database_name
self.table_name = table_name
self.persist_dir = persist_dir
self._thread_local = threading.local()
_ = self._initialize_table(self.client, self.table_name)
@classmethod
def from_vector_store(
cls, duckdb_vector_store, table_name: str = "keyvalue"
) -> "DuckDBKVStore":
"""
Load a DuckDBKVStore from a DuckDB Client.
Args:
client (DuckDB): DuckDB client
"""
from llama_index.vector_stores.duckdb.base import DuckDBVectorStore
assert isinstance(duckdb_vector_store, DuckDBVectorStore)
return cls(
database_name=duckdb_vector_store.database_name,
table_name=table_name,
persist_dir=duckdb_vector_store.persist_dir,
client=duckdb_vector_store.client,
)
@property
def client(self) -> duckdb.DuckDBPyConnection:
"""Return client."""
if self._shared_conn is None:
self._shared_conn = self._connect(self.database_name, self.persist_dir)
if not hasattr(self._thread_local, "conn") or self._thread_local.conn is None:
self._thread_local.conn = self._shared_conn.cursor()
return self._thread_local.conn
@classmethod
def _connect(
cls, database_name: str, persist_dir: str
) -> duckdb.DuckDBPyConnection:
"""Connect to the DuckDB database -- create the data persistence directory if it doesn't exist."""
database_connection = database_name
if database_name != ":memory:":
persist_path = Path(persist_dir)
if not persist_path.exists():
persist_path.mkdir(parents=True, exist_ok=True)
database_connection = str(persist_path / database_name)
return duckdb.connect(database_connection)
@property
def table(self) -> duckdb.DuckDBPyRelation:
"""Return the table for the connection to the DuckDB database."""
return self.client.table(self.table_name)
@classmethod
def _initialize_table(
cls, conn: duckdb.DuckDBPyConnection, table_name: str
) -> duckdb.DuckDBPyRelation:
"""Initialize the DuckDB Database, extensions, and documents table."""
home_dir = Path.home()
conn.execute(f"SET home_directory='{home_dir}';")
conn.install_extension("json")
conn.load_extension("json")
_ = (
conn.begin()
.execute(f"""
CREATE TABLE IF NOT EXISTS {table_name} (
key VARCHAR,
collection VARCHAR,
value JSON,
PRIMARY KEY (key, collection)
);
CREATE INDEX IF NOT EXISTS collection_idx ON {table_name} (collection);
""")
.commit()
)
table = conn.table(table_name)
required_columns = ["key", "value"]
table_columns = table.describe().columns
for column in required_columns:
if column not in table_columns:
raise DuckDBTableIncorrectColumnsError(
table_name, required_columns, table_columns
)
return table
@override
def put(self, key: str, val: dict, collection: str = DEFAULT_COLLECTION) -> None:
"""
Put a key-value pair into the store.
Args:
key (str): key
val (dict): value
collection (str): collection name
"""
self.put_all([(key, val)], collection)
@override
async def aput(
self, key: str, val: dict, collection: str = DEFAULT_COLLECTION
) -> None:
"""
Put a key-value pair into the store.
Args:
key (str): key
val (dict): value
collection (str): collection name
"""
await asyncio.to_thread(self.put, key, val, collection)
@override
def put_all(
self,
kv_pairs: list[tuple[str, dict]],
collection: str = DEFAULT_COLLECTION,
batch_size: int = DEFAULT_BATCH_SIZE,
) -> None:
"""
Put a dictionary of key-value pairs into the store.
Args:
kv_pairs (List[Tuple[str, dict]]): key-value pairs
collection (str): collection name
"""
if len(kv_pairs) == 0:
return
rows = [
{"key": key, "collection": collection, "value": json.dumps(value)}
for key, value in kv_pairs
]
arrow_table = pyarrow.Table.from_pylist(rows)
_ = self.client.sql(
query=f"""
INSERT OR REPLACE INTO {self.table.alias}
SELECT * from arrow_table;
""",
)
@override
async def aput_all(
self,
kv_pairs: list[tuple[str, dict]],
collection: str = DEFAULT_COLLECTION,
batch_size: int = DEFAULT_BATCH_SIZE,
) -> None:
"""
Put a dictionary of key-value pairs into the store.
Args:
kv_pairs (List[Tuple[str, dict]]): key-value pairs
collection (str): collection name
"""
await asyncio.to_thread(self.put_all, kv_pairs, collection, batch_size)
@override
def get(self, key: str, collection: str = DEFAULT_COLLECTION) -> Optional[dict]:
"""
Get a value from the store.
Args:
key (str): key
collection (str): collection name
"""
expression: Expression = (
ColumnExpression("collection")
.__eq__(ConstantExpression(collection))
.__and__(ColumnExpression("key").__eq__(ConstantExpression(key)))
)
row_result = self.table.filter(filter_expr=expression).fetchone()
if row_result is None:
return None
return json.loads(row_result[2])
@override
async def aget(
self, key: str, collection: str = DEFAULT_COLLECTION
) -> Optional[dict]:
"""
Get a value from the store.
Args:
key (str): key
collection (str): collection name
"""
return await asyncio.to_thread(self.get, key, collection)
@override
def get_all(self, collection: str = DEFAULT_COLLECTION) -> dict[str, dict]:
"""Get all values from the store."""
filter_expr: Expression = ColumnExpression("collection").__eq__(
ConstantExpression(collection)
)
table: pyarrow.Table = self.table.filter(
filter_expr=filter_expr
).fetch_arrow_table()
as_list = table.to_pylist()
return {row["key"]: json.loads(row["value"]) for row in as_list}
@override
async def aget_all(self, collection: str = DEFAULT_COLLECTION) -> dict[str, dict]:
"""Get all values from the store."""
return self.get_all(collection)
@override
def delete(self, key: str, collection: str = DEFAULT_COLLECTION) -> bool:
"""
Delete a value from the store.
Args:
key (str): key
collection (str): collection name
"""
filter_expression = (
ColumnExpression("collection")
.__eq__(ConstantExpression(collection))
.__and__(ColumnExpression("key").__eq__(ConstantExpression(key)))
)
command = f"DELETE FROM {self.table.alias} WHERE {filter_expression}"
_ = self.client.execute(command)
return True
@override
async def adelete(self, key: str, collection: str = DEFAULT_COLLECTION) -> bool:
"""
Delete a value from the store.
Args:
key (str): key
collection (str): collection name
"""
return self.delete(key, collection)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/storage/kvstore/llama-index-storage-kvstore-duckdb/llama_index/storage/kvstore/duckdb/base.py",
"license": "MIT License",
"lines": 269,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/storage/kvstore/llama-index-storage-kvstore-duckdb/tests/test_storage_kvstore_duckdb.py | import os
from llama_index.core.storage.kvstore.types import BaseKVStore
from llama_index.storage.kvstore.duckdb import DuckDBKVStore
from llama_index.vector_stores.duckdb.base import DuckDBVectorStore
import pytest
def test_class():
names_of_base_classes = [b.__name__ for b in DuckDBKVStore.__mro__]
assert BaseKVStore.__name__ in names_of_base_classes
def test_init():
kv_store = DuckDBKVStore()
assert kv_store.database_name == ":memory:"
assert kv_store.table_name == "keyvalue"
assert kv_store.persist_dir == "./storage"
assert kv_store.client is not None
assert kv_store.table is not None
def test_from_vector_store():
vector_store = DuckDBVectorStore()
kv_store = DuckDBKVStore.from_vector_store(duckdb_vector_store=vector_store)
assert kv_store.database_name == vector_store.database_name
assert kv_store.table_name == "keyvalue"
assert kv_store.persist_dir == vector_store.persist_dir
assert kv_store.client is not None
kv_store.put("id_1", {"name": "John Doe", "text": "Hello, world!"})
results = kv_store.get_all()
assert results["id_1"] == {"name": "John Doe", "text": "Hello, world!"}
def memory_store():
return DuckDBKVStore()
def disk_store():
if os.path.exists("./storage/persisted.duckdb"):
os.remove("./storage/persisted.duckdb")
return DuckDBKVStore(database_name="persisted.duckdb", persist_dir="./storage")
@pytest.mark.parametrize("persistent", ["memory", "disk"])
class TestStore:
@pytest.fixture
def kv_store(self, persistent: str) -> DuckDBKVStore:
if persistent == "memory":
return memory_store()
return disk_store()
def test_put(self, kv_store: DuckDBKVStore):
key = "id_1"
value = {"name": "John Doe", "text": "Hello, world!"}
_ = kv_store.put(key, value)
def test_put_all(self, kv_store: DuckDBKVStore):
kv_pairs = [
("id_1", {"name": "John Doe", "text": "Hello, world!"}),
("id_2", {"name": "Jane Doe", "text": "Hello, world!"}),
]
_ = kv_store.put_all(kv_pairs)
def test_put_all_empty(self, kv_store: DuckDBKVStore):
kv_pairs = []
_ = kv_store.put_all(kv_pairs)
def test_put_twice(self, kv_store: DuckDBKVStore):
key = "id_1"
value = {"name": "John Doe", "text": "Hello, world!"}
value_updated = {"name": "Jane Doe", "text": "Hello, world!"}
_ = kv_store.put(key, value)
_ = kv_store.put(key, value_updated)
assert kv_store.get(key) == value_updated
def test_put_get(self, kv_store: DuckDBKVStore):
key = "id_1"
value = {"name": "John Doe", "text": "Hello, world!"}
_ = kv_store.put(key, value)
assert kv_store.get(key) == value
def test_put_get_collection(self, kv_store: DuckDBKVStore):
key = "id_1"
value = {"name": "John Doe", "text": "Hello, world!"}
_ = kv_store.put(key, value, collection="collection_1")
assert kv_store.get(key, collection="collection_1") == value
def test_put_get_all(self, kv_store: DuckDBKVStore):
key_1 = "id_1"
value_1 = {"name": "John Doe", "text": "Hello, world!"}
key_2 = "id_2"
value_2 = {"name": "Jane Doe", "text": "Hello, world!"}
_ = kv_store.put(key_1, value_1)
_ = kv_store.put(key_2, value_2)
results = kv_store.get_all()
assert results[key_1] == value_1
assert results[key_2] == value_2
def test_delete(self, kv_store: DuckDBKVStore):
key = "id_1"
value = {"name": "John Doe", "text": "Hello, world!"}
_ = kv_store.put(key, value)
assert kv_store.get(key) == value
_ = kv_store.delete(key)
assert kv_store.get(key) is None
def test_delete_collection(self, kv_store: DuckDBKVStore):
key = "id_1"
value = {"name": "John Doe", "text": "Hello, world!"}
_ = kv_store.put(key, value, collection="collection_1")
assert kv_store.get(key, collection="collection_1") == value
_ = kv_store.delete(key, collection="collection_1")
assert kv_store.get(key, collection="collection_1") is None
@pytest.mark.asyncio
async def test_async(self, kv_store: DuckDBKVStore):
key = "id_1"
value = {"name": "John Doe", "text": "Hello, world!"}
_ = await kv_store.aput(key, value)
assert await kv_store.aget(key) == value
new_key = "id_2"
new_value = {"name": "Jane Doe", "text": "Hello, world!"}
_ = await kv_store.aput_all([(new_key, new_value), (new_key, new_value)])
assert await kv_store.aget_all() == {key: value, new_key: new_value}
_ = await kv_store.adelete(key)
assert await kv_store.aget(key) is None
assert await kv_store.aget_all() == {new_key: new_value}
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/storage/kvstore/llama-index-storage-kvstore-duckdb/tests/test_storage_kvstore_duckdb.py",
"license": "MIT License",
"lines": 105,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-nvidia/tests/test_unknown_models.py | import pytest
from unittest.mock import patch, MagicMock
from llama_index.llms.nvidia import NVIDIA
@pytest.mark.parametrize("is_chat_model", [True, False])
def test_unknown_model_is_chat_model_parameter(is_chat_model) -> None:
"""Test that is_chat_model parameter is respected for unknown models."""
mock_model = MagicMock()
mock_model.id = "nvidia/llama-3.3-nemotron-super-49b-v2"
with patch.object(NVIDIA, "available_models", [mock_model]):
llm = NVIDIA(
model="nvidia/llama-3.3-nemotron-super-49b-v2",
is_chat_model=is_chat_model,
api_key="fake-key",
)
assert llm.is_chat_model is is_chat_model
def test_unknown_model_default_is_chat_model() -> None:
"""Test that default (no parameter) defaults to False for unknown models."""
mock_model = MagicMock()
mock_model.id = "nvidia/llama-3.3-nemotron-super-49b-v2"
with patch.object(NVIDIA, "available_models", [mock_model]):
llm = NVIDIA(model="nvidia/llama-3.3-nemotron-super-49b-v2", api_key="fake-key")
# Should default to False for unknown models
assert llm.is_chat_model is False
def test_known_model_not_overridden() -> None:
"""Test that known models are not overridden by user-provided is_chat_model parameter."""
mock_model = MagicMock()
mock_model.id = "mistralai/mistral-7b-instruct-v0.2"
with patch.object(NVIDIA, "available_models", [mock_model]):
llm = NVIDIA(
model="mistralai/mistral-7b-instruct-v0.2",
is_chat_model=False,
api_key="fake-key",
)
# Should not be overridden - known models keep their original setting
assert llm.is_chat_model is True
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-nvidia/tests/test_unknown_models.py",
"license": "MIT License",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-core/tests/agent/workflow/test_events.py | import pytest
import json
from typing import Tuple
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import ToolSelection
from llama_index.core.bridge.pydantic import BaseModel, ValidationError
from llama_index.core.agent.workflow.workflow_events import (
AgentWorkflowStartEvent,
AgentOutput,
PydanticConversionWarning,
AgentStreamStructuredOutput,
)
from llama_index.core.memory import Memory
@pytest.fixture()
def example_agent_output() -> dict:
return {
"response": ChatMessage(role="user", content="30 times 2 is 60."),
"tool_calls": [
ToolSelection(
tool_id="1", tool_name="multiply", tool_kwargs={"i": 30, "j": 2}
)
],
"raw": '{"role": "user", "content": "30 times 2 is 60."}',
"structured_response": {"operation": "30 times 2", "result": 60},
"current_agent_name": "CalculatorAgent",
}
@pytest.fixture()
def example_agent_stream_structured_output() -> Tuple[dict, str]:
d = {"output": {"flavor": "strawberry", "extra_sugar": False}}
return d, json.dumps(d["output"], indent=4)
class MathResult(BaseModel):
operation: str
result: int
class WrongMathResult(BaseModel):
operation: str
result: str
class Flavor(BaseModel):
flavor: str
extra_sugar: bool
def test_agent_workflow_start_event():
event = AgentWorkflowStartEvent(
user_msg="Hello, world!",
chat_history=[ChatMessage(role="user", content="Hello, world!")],
max_iterations=10,
)
assert event.user_msg == "Hello, world!"
assert event.chat_history[0].role.value == "user"
assert event.chat_history[0].content == "Hello, world!"
assert event.max_iterations == 10
def test_agent_workflow_start_event_with_dict():
event = AgentWorkflowStartEvent(
user_msg="Hello, world!",
chat_history=[{"role": "user", "content": "Hello, world!"}],
max_iterations=10,
)
assert event.user_msg == "Hello, world!"
assert event.chat_history[0].role.value == "user"
assert event.chat_history[0].content == "Hello, world!"
assert event.max_iterations == 10
def test_agent_workflow_start_event_to_dict():
event = AgentWorkflowStartEvent(
user_msg="Hello, world!",
chat_history=[ChatMessage(role="user", content="Hello, world!")],
max_iterations=10,
memory=Memory.from_defaults(),
)
# Memory is not included in the dump
dump = event.model_dump()
assert (
len(dump) == 4
) # user_msg, chat_history, max_iterations, early_stopping_method
assert dump["user_msg"] == "Hello, world!"
assert dump["chat_history"][0]["role"] == "user"
assert dump["chat_history"][0]["blocks"][0]["text"] == "Hello, world!"
assert dump["max_iterations"] == 10
assert dump["early_stopping_method"] is None # Not set, so None
def test_agent_output_with_structured_response(example_agent_output: dict) -> None:
try:
agent_output = AgentOutput.model_validate(example_agent_output)
success = True
except ValidationError:
success = False
assert success
assert agent_output.get_pydantic_model(MathResult) == MathResult.model_validate(
example_agent_output["structured_response"]
)
with pytest.warns(PydanticConversionWarning):
a = agent_output.get_pydantic_model(WrongMathResult)
assert a is None
def test_agent_stream_structured_output(
example_agent_stream_structured_output: Tuple[dict, str],
):
try:
ev = AgentStreamStructuredOutput.model_validate(
example_agent_stream_structured_output[0]
)
success = True
except ValidationError:
success = False
assert success
assert str(ev) == example_agent_stream_structured_output[1]
assert ev.get_pydantic_model(Flavor) == Flavor(
flavor="strawberry", extra_sugar=False
)
with pytest.warns(PydanticConversionWarning):
b = ev.get_pydantic_model(MathResult)
assert b is None
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/agent/workflow/test_events.py",
"license": "MIT License",
"lines": 107,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/indices/llama-index-indices-managed-lancedb/llama_index/indices/managed/lancedb/base.py | import lancedb
import json
import polars as pl
import pandas as pd
import pyarrow as pa
import warnings
import httpx
from lancedb.pydantic import LanceModel, Vector
from pydantic import Field
from lancedb import DBConnection, AsyncConnection
from lancedb.table import Table, AsyncTable
from lancedb.rerankers import Reranker
from typing import Optional, Dict, Any, Sequence, Union, Literal, List, cast
from llama_index.core.indices.managed.base import BaseManagedIndex
from llama_index.core.schema import Document, ImageDocument
from .utils import (
LanceDBMultiModalModel,
LanceDBTextModel,
LocalConnectionConfig,
CloudConnectionConfig,
EmbeddingConfig,
IndexingConfig,
TableConfig,
get_lancedb_multimodal_embedding_model,
get_lancedb_text_embedding_model,
)
from .retriever import LanceDBRetriever
from .query_engine import LanceDBRetrieverQueryEngine
DEFAULT_TABLE_NAME = "default_table"
class LanceDBMultiModalIndex(BaseManagedIndex):
"""
Implementation of the MultiModal AI LakeHouse by LanceDB.
"""
class Config:
arbitrary_types_allowed = True
connection_config: Union[LocalConnectionConfig, CloudConnectionConfig]
embedding_config: EmbeddingConfig
indexing_config: IndexingConfig
table_config: TableConfig
_embedding_model: Optional[Union[LanceDBMultiModalModel, LanceDBTextModel]] = None
_table_schema: Optional[Union[LanceModel, pa.Schema]] = None
_connection: Optional[Union[DBConnection, AsyncConnection]] = None
_table: Optional[Union[Table, AsyncTable]] = None
_reranker: Optional[Reranker] = None
def __init__(
self,
connection: Optional[Union[DBConnection, AsyncConnection]] = None,
uri: Optional[str] = None,
region: Optional[str] = None,
api_key: Optional[str] = None,
text_embedding_model: Optional[
Literal[
"bedrock-text",
"cohere",
"gemini-text",
"instructor",
"ollama",
"openai",
"sentence-transformers",
"gte-text",
"huggingface",
"colbert",
"jina",
"watsonx",
"voyageai",
]
] = None,
multimodal_embedding_model: Optional[
Literal["open-clip", "colpali", "jina", "imagebind"]
] = None,
embedding_model_kwargs: Dict[str, Any] = {},
table_name: str = DEFAULT_TABLE_NAME,
indexing: Literal[
"IVF_PQ",
"IVF_HNSW_PQ",
"IVF_HNSW_SQ",
"FTS",
"BTREE",
"BITMAP",
"LABEL_LIST",
"NO_INDEXING",
] = "IVF_PQ",
indexing_kwargs: Dict[str, Any] = {},
reranker: Optional[Reranker] = None,
use_async: bool = False,
table_exists: bool = False,
) -> None:
self._reranker = reranker
if connection:
assert isinstance(connection, (DBConnection, AsyncConnection)), (
"You did not provide a valid LanceDB connection"
)
if use_async:
assert isinstance(connection, AsyncConnection), (
"You set use_async to True, but you provided a synchronous connection"
)
else:
assert isinstance(connection, DBConnection), (
"You set use_async to False, but you provided an asynchronous connection"
)
self._connection = connection
elif uri and uri.startswith("db://"):
self.connection_config = CloudConnectionConfig(
uri=uri,
api_key=api_key,
region=region,
use_async=use_async,
)
elif uri and not uri.startswith("db://"):
self.connection_config = LocalConnectionConfig(
uri=uri,
use_async=use_async,
)
else:
raise ValueError(
"No connection has been passed and no URI has been set for local or remote connection"
)
self.embedding_config = EmbeddingConfig(
text_embedding_model=text_embedding_model,
multi_modal_embedding_model=multimodal_embedding_model,
embedding_kwargs=embedding_model_kwargs,
)
self.indexing_config = IndexingConfig(
indexing=indexing, indexing_kwargs=indexing_kwargs
)
self.table_config = TableConfig(
table_name=table_name,
table_exists=table_exists,
)
def create_index(self) -> None:
if self._connection:
return
if self.connection_config.use_async:
raise ValueError(
"You are trying to establish a synchronous connection when use_async is set to True"
)
if isinstance(self.connection_config, LocalConnectionConfig):
self._connection = lancedb.connect(uri=self.connection_config.uri)
else:
self._connection = lancedb.connect(
uri=self.connection_config.uri,
region=self.connection_config.region,
api_key=self.connection_config.api_key,
)
self._connection = cast(DBConnection, self._connection)
if self.embedding_config.text_embedding_model:
self._embedding_model = get_lancedb_text_embedding_model(
embedding_model=self.embedding_config.text_embedding_model,
**self.embedding_config.embedding_kwargs,
)
class TextSchema(LanceModel):
id: str
metadata: str = Field(default=json.dumps({}))
text: str = self._embedding_model.embedding_modxel.SourceField()
vector: Vector(self._embedding_model.embedding_model.ndims()) = (
self._embedding_model.embedding_model.VectorField()
)
self._table_schema = TextSchema
else:
self._embedding_model = get_lancedb_multimodal_embedding_model(
embedding_model=self.embedding_config.multi_modal_embedding_model,
**self.embedding_config.embedding_kwargs,
)
class MultiModalSchema(LanceModel):
id: str
metadata: str = Field(default=json.dumps({}))
label: str = Field(
default_factory=str,
)
image_uri: str = (
self._embedding_model.embedding_model.SourceField()
) # image uri as the source
image_bytes: bytes = (
self._embedding_model.embedding_model.SourceField()
) # image bytes as the source
vector: Vector(self._embedding_model.embedding_model.ndims()) = (
self._embedding_model.embedding_model.VectorField()
) # vector column
vec_from_bytes: Vector(
self._embedding_model.embedding_model.ndims()
) = self._embedding_model.embedding_model.VectorField() # Another vector column
self._table_schema = MultiModalSchema
if not self.table_config.table_exists:
self._table = self._connection.create_table(
self.table_config.table_name, schema=self._table_schema
)
if self.indexing_config.indexing != "NO_INDEXING":
self._table.create_index(
index_type=self.indexing_config.indexing,
**self.indexing_config.indexing_kwargs,
)
else:
self._table = self._connection.open_table(self.table_config.table_name)
self._table_schema = self._table.schema
async def acreate_index(self) -> None:
if self._connection:
return
if not self.connection_config.use_async:
raise ValueError(
"You are trying to establish an asynchronous connection when use_async is set to False"
)
if isinstance(self.connection_config, LocalConnectionConfig):
self._connection = await lancedb.connect_async(
uri=self.connection_config.uri
)
else:
self._connection = await lancedb.connect_async(
uri=self.connection_config.uri,
region=self.connection_config.region,
api_key=self.connection_config.api_key,
)
self._connection = cast(AsyncConnection, self._connection)
if self.embedding_config.text_embedding_model:
self._embedding_model = get_lancedb_text_embedding_model(
embedding_model=self.embedding_config.text_embedding_model,
**self.embedding_config.embedding_kwargs,
)
class TextSchema(LanceModel):
id: str
metadata: str = Field(default=json.dumps({}))
text: str = self._embedding_model.embedding_model.SourceField()
vector: Vector(self._embedding_model.embedding_model.ndims()) = (
self._embedding_model.embedding_model.VectorField()
)
self._table_schema = TextSchema
else:
self._embedding_model = get_lancedb_multimodal_embedding_model(
embedding_model=self.embedding_config.multi_modal_embedding_model,
**self.embedding_config.embedding_kwargs,
)
self._embedding_model.validate_embedding_model()
class MultiModalSchema(LanceModel):
id: str
metadata: str = Field(default=json.dumps({}))
label: str = Field(
default_factory=str,
)
image_uri: str = (
self._embedding_model.embedding_model.SourceField()
) # image uri as the source
image_bytes: bytes = (
self._embedding_model.embedding_model.SourceField()
) # image bytes as the source
vector: Vector(self._embedding_model.embedding_model.ndims()) = (
self._embedding_model.embedding_model.VectorField()
) # vector column
vec_from_bytes: Vector(
self._embedding_model.embedding_model.ndims()
) = self._embedding_model.embedding_model.VectorField() # Another vector column
self._table_schema = MultiModalSchema
if not self.table_config.table_exists:
self._table = await self._connection.create_table(
self.table_config.table_name, schema=self._table_schema
)
if self.indexing_config.indexing != "NO_INDEXING":
await self._table.create_index(
config=self.indexing_config.async_index_config,
column="vector",
**self.indexing_config.indexing_kwargs,
)
else:
self._table = await self._connection.open_table(
self.table_config.table_name
)
self._table_schema = await self._table.schema()
@classmethod
async def from_documents(
cls,
documents: Sequence[Union[Document, ImageDocument]],
connection: Optional[DBConnection] = None,
uri: Optional[str] = None,
region: Optional[str] = None,
api_key: Optional[str] = None,
text_embedding_model: Optional[
Literal[
"bedrock-text",
"cohere",
"gemini-text",
"instructor",
"ollama",
"openai",
"sentence-transformers",
"gte-text",
"huggingface",
"colbert",
"jina",
"watsonx",
"voyageai",
]
] = None,
multimodal_embedding_model: Optional[
Literal["open-clip", "colpali", "jina", "imagebind"]
] = None,
embedding_model_kwargs: Dict[str, Any] = {},
table_name: str = DEFAULT_TABLE_NAME,
indexing: Literal[
"IVF_PQ",
"IVF_HNSW_PQ",
"IVF_HNSW_SQ",
"FTS",
"BTREE",
"BITMAP",
"LABEL_LIST",
"NO_INDEXING",
] = "IVF_PQ",
indexing_kwargs: Dict[str, Any] = {},
reranker: Optional[Reranker] = None,
use_async: bool = False,
table_exists: bool = False,
) -> "LanceDBMultiModalIndex":
"""
Generate a LanceDBMultiModalIndex from LlamaIndex Documents.
"""
try:
index = cls(
connection,
uri,
region,
api_key,
text_embedding_model,
multimodal_embedding_model,
embedding_model_kwargs,
table_name,
indexing,
indexing_kwargs,
reranker,
use_async,
table_exists,
)
except ValueError as e:
raise ValueError(
f"Initialization of the index from documents are failed: {e}"
)
if use_async:
await index.acreate_index()
else:
index.create_index()
data: List[dict] = []
if text_embedding_model:
assert all(isinstance(document, Document) for document in documents)
for document in documents:
if document.text:
data.append(
{
"id": document.id_,
"text": document.text,
"metadata": json.dumps(document.metadata),
}
)
else:
warnings.warn(
f"Document {document.doc_id} does not contain text and has thus been skipped",
UserWarning,
)
else:
assert all(isinstance(document, ImageDocument) for document in documents)
for document in documents:
label = json.dumps(document.metadata).get("image_label", None) or ""
if document.image:
data.append(
{
"id": document.id_,
"image_bytes": document.image,
"image_uri": document.image_url or "",
"label": label,
"metadata": json.dumps(document.metadata),
}
)
elif document.image_url:
image_bytes = httpx.get(document.image_url).content
data.append(
{
"id": document.id_,
"image_bytes": image_bytes,
"image_uri": document.image_url,
"label": label,
"metadata": json.dumps(document.metadata),
}
)
elif document.image_path:
image_bytes = document.resolve_image().read()
data.append(
{
"id": document.id_,
"image_bytes": image_bytes,
"image_uri": document.image_url or "",
"label": label,
"metadata": json.dumps(document.metadata),
}
)
else:
warnings.warn(
f"Document {document.doc_id} does not contain an image and has thus been skipped",
UserWarning,
)
if use_async:
await index._table.add(data)
else:
index._table.add(data)
return index
@classmethod
async def from_data(
cls,
data: Union[List[dict], pa.Table, pl.DataFrame, pd.DataFrame],
connection: Optional[DBConnection] = None,
uri: Optional[str] = None,
region: Optional[str] = None,
api_key: Optional[str] = None,
text_embedding_model: Optional[
Literal[
"bedrock-text",
"cohere",
"gemini-text",
"instructor",
"ollama",
"openai",
"sentence-transformers",
"gte-text",
"huggingface",
"colbert",
"jina",
"watsonx",
"voyageai",
]
] = None,
multimodal_embedding_model: Optional[
Literal["open-clip", "colpali", "jina", "imagebind"]
] = None,
embedding_model_kwargs: Dict[str, Any] = {},
table_name: str = DEFAULT_TABLE_NAME,
indexing: Literal[
"IVF_PQ",
"IVF_HNSW_PQ",
"IVF_HNSW_SQ",
"FTS",
"BTREE",
"BITMAP",
"LABEL_LIST",
"NO_INDEXING",
] = "IVF_PQ",
indexing_kwargs: Dict[str, Any] = {},
reranker: Optional[Reranker] = None,
use_async: bool = False,
table_exists: bool = False,
) -> "LanceDBMultiModalIndex":
"""
Generate a LanceDBMultiModalIndex from Pandas, Polars or PyArrow data.
"""
try:
index = cls(
connection,
uri,
region,
api_key,
text_embedding_model,
multimodal_embedding_model,
embedding_model_kwargs,
table_name,
indexing,
indexing_kwargs,
reranker,
use_async,
table_exists,
)
except ValueError as e:
raise ValueError(
f"Initialization of the vector store from documents are failed: {e}"
)
if use_async:
await index.acreate_index()
await index._table.add(data)
else:
index.create_index()
index._table.add(data)
return index
def as_retriever(self, **kwargs):
if self.embedding_config.text_embedding_model:
multimodal = False
else:
multimodal = True
return LanceDBRetriever(
table=self._table,
multimodal=multimodal,
**kwargs,
)
def as_query_engine(self, **kwargs):
retriever = self.as_retriever()
return LanceDBRetrieverQueryEngine(retriever=retriever, **kwargs)
async def ainsert_nodes(
self, documents: Sequence[Union[Document, ImageDocument]], **kwargs: Any
) -> None:
data: List[dict] = []
if isinstance(self._embedding_model, LanceDBTextModel):
assert all(isinstance(document, Document) for document in documents)
for document in documents:
if document.text:
data.append(
{
"id": document.id_,
"text": document.text,
"metadata": json.dumps(document.metadata),
}
)
else:
warnings.warn(
f"Document {document.doc_id} does not contain text and has thus been skipped",
UserWarning,
)
else:
assert all(isinstance(document, ImageDocument) for document in documents)
for document in documents:
label = json.dumps(document.metadata).get("image_label", None) or ""
if document.image:
data.append(
{
"id": document.id_,
"image_bytes": document.image,
"image_uri": document.image_url or "",
"label": label,
"metadata": json.dumps(document.metadata),
}
)
elif document.image_url:
image_bytes = httpx.get(document.image_url).content
data.append(
{
"id": document.id_,
"image_bytes": image_bytes,
"image_uri": document.image_url,
"label": label,
"metadata": json.dumps(document.metadata),
}
)
elif document.image_path:
image_bytes = document.resolve_image().read()
data.append(
{
"id": document.id_,
"image_bytes": image_bytes,
"image_uri": document.image_url or "",
"label": label,
"metadata": json.dumps(document.metadata),
}
)
else:
warnings.warn(
f"Document {document.doc_id} does not contain an image and has thus been skipped",
UserWarning,
)
if self.connection_config.use_async:
self._table = cast(AsyncTable, self._table)
await self._table.add(data)
else:
raise ValueError(
"Attempting to add documents asynchronously with a synchronous connection!"
)
def insert_nodes(
self, documents: Sequence[Union[Document, ImageDocument]], **kwargs: Any
) -> None:
data: List[dict] = []
if isinstance(self._embedding_model, LanceDBTextModel):
assert all(isinstance(document, Document) for document in documents)
for document in documents:
if document.text:
data.append(
{
"id": document.id_,
"text": document.text,
"metadata": json.dumps(document.metadata),
}
)
else:
warnings.warn(
f"Document {document.doc_id} does not contain text and has thus been skipped",
UserWarning,
)
else:
assert all(isinstance(document, ImageDocument) for document in documents)
for document in documents:
label = json.dumps(document.metadata).get("image_label", None) or ""
if document.image:
data.append(
{
"id": document.id_,
"image_bytes": document.image,
"image_uri": document.image_url or "",
"label": label,
"metadata": json.dumps(document.metadata),
}
)
elif document.image_url:
image_bytes = httpx.get(document.image_url).content
data.append(
{
"id": document.id_,
"image_bytes": image_bytes,
"image_uri": document.image_url,
"label": label,
"metadata": json.dumps(document.metadata),
}
)
elif document.image_path:
image_bytes = document.resolve_image().read()
data.append(
{
"id": document.id_,
"image_bytes": image_bytes,
"image_uri": document.image_url or "",
"label": label,
"metadata": json.dumps(document.metadata),
}
)
else:
warnings.warn(
f"Document {document.doc_id} does not contain an image and has thus been skipped",
UserWarning,
)
if not self.connection_config.use_async:
self._table = cast(Table, self._table)
self._table.add(data)
else:
raise ValueError(
"Attempting to add documents synchronously with an asynchronous connection!"
)
def insert_data(
self, data: Union[List[dict], pl.DataFrame, pd.DataFrame, pa.Table]
) -> None:
if not self.connection_config.use_async:
self._table = cast(Table, self._table)
self._table.add(data)
else:
raise ValueError(
"Attempting to add data asynchronously with a synchronous connection!"
)
async def ainsert_data(
self, data: Union[List[dict], pl.DataFrame, pd.DataFrame, pa.Table]
) -> None:
if self.connection_config.use_async:
self._table = cast(AsyncTable, self._table)
await self._table.add(data)
else:
raise ValueError(
"Attempting to add data synchronously with an asynchronous connection!"
)
def insert(self, document: Union[Document, ImageDocument], **insert_kwargs):
return self.insert_nodes(documents=[document], **insert_kwargs)
async def ainsert(self, document: Union[Document, ImageDocument], **insert_kwargs):
return await self.ainsert_nodes(documents=[document], **insert_kwargs)
def delete_ref_doc(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
if not self.connection_config.use_async:
self._table = cast(Table, self._table)
self._table.delete(where="id = '" + ref_doc_id + "'")
else:
raise ValueError(
"Attempting to delete data synchronously with an asynchronous connection!"
)
async def adelete_ref_doc(self, ref_doc_id: str, **delete_kwargs):
if self.connection_config.use_async:
self._table = cast(AsyncTable, self._table)
await self._table.delete(where="id = '" + ref_doc_id + "'")
else:
raise ValueError(
"Attempting to delete data asynchronously with a synchronous connection!"
)
def delete_nodes(self, ref_doc_ids: List[str]) -> None:
if not self.connection_config.use_async:
self._table = cast(Table, self._table)
delete_where = "id IN ('" + "', '".join(ref_doc_ids) + "')"
self._table.delete(where=delete_where)
else:
raise ValueError(
"Attempting to delete data synchronously with an asynchronous connection!"
)
async def adelete_nodes(self, ref_doc_ids: List[str]) -> None:
if self.connection_config.use_async:
self._table = cast(AsyncTable, self._table)
delete_where = "id IN ('" + "', '".join(ref_doc_ids) + "')"
await self._table.delete(where=delete_where)
else:
raise ValueError(
"Attempting to delete data asynchronously with a synchronous connection!"
)
def _insert(self, nodes: Any, **insert_kwargs: Any) -> Any:
raise NotImplementedError("_insert is not implemented.")
def update(self, document: Any, **update_kwargs: Any) -> Any:
raise NotImplementedError("update is not implemented.")
def update_ref_doc(self, document: Any, **update_kwargs: Any) -> Any:
raise NotImplementedError("update_ref_doc is not implemented.")
async def aupdate_ref_doc(self, document: Any, **update_kwargs: Any) -> Any:
raise NotImplementedError("aupdate_ref_doc is not implemented.")
def refresh(self, documents: Any, **update_kwargs: Any) -> Any:
raise NotImplementedError("refresh is not implemented.")
def refresh_ref_docs(self, documents: Any, **update_kwargs: Any) -> Any:
raise NotImplementedError("refresh_ref_docs is not implemented.")
async def arefresh_ref_docs(self, documents: Any, **update_kwargs: Any) -> Any:
raise NotImplementedError("arefresh_ref_docs is not implemented.")
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/indices/llama-index-indices-managed-lancedb/llama_index/indices/managed/lancedb/base.py",
"license": "MIT License",
"lines": 696,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/indices/llama-index-indices-managed-lancedb/llama_index/indices/managed/lancedb/query_engine.py | import os
from PIL import Image
from llama_index.core.query_engine.retriever_query_engine import (
RetrieverQueryEngine,
)
from llama_index.core.response_synthesizers import BaseSynthesizer
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.callbacks.schema import CBEventType, EventPayload
from llama_index.core.instrumentation.events.query import (
QueryEndEvent,
QueryStartEvent,
)
from llama_index.core.callbacks import CallbackManager
from llama_index.core.schema import NodeWithScore, ImageDocument
from llama_index.core.llms import ImageBlock
from llama_index.core.base.response.schema import RESPONSE_TYPE
from typing import Optional, List, Union
from typing_extensions import override
from .retriever import LanceDBRetriever, ExtendedQueryBundle
import llama_index.core.instrumentation as instrument
dispatcher = instrument.get_dispatcher(__name__)
class LanceDBRetrieverQueryEngine(RetrieverQueryEngine):
def __init__(
self,
retriever: LanceDBRetriever,
response_synthesizer: Optional[BaseSynthesizer] = None,
node_postprocessors: List[BaseNodePostprocessor] = None,
callback_manager: Optional[CallbackManager] = None,
):
super().__init__(
retriever, response_synthesizer, node_postprocessors, callback_manager
)
@override
def retrieve(self, query_bundle: ExtendedQueryBundle) -> List[NodeWithScore]:
nodes = self._retriever._retrieve(query_bundle)
return self._apply_node_postprocessors(nodes, query_bundle=query_bundle)
@override
async def aretrieve(self, query_bundle: ExtendedQueryBundle) -> List[NodeWithScore]:
nodes = await self._retriever._aretrieve(query_bundle)
return self._apply_node_postprocessors(nodes, query_bundle=query_bundle)
@override
@dispatcher.span
def _query(self, query_bundle: ExtendedQueryBundle) -> RESPONSE_TYPE:
"""Answer a query."""
with self.callback_manager.event(
CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str}
) as query_event:
nodes = self.retrieve(query_bundle)
response = self._response_synthesizer.synthesize(
query=query_bundle,
nodes=nodes,
)
query_event.on_end(payload={EventPayload.RESPONSE: response})
return response
@override
@dispatcher.span
async def _aquery(self, query_bundle: ExtendedQueryBundle) -> RESPONSE_TYPE:
"""Answer a query."""
with self.callback_manager.event(
CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str}
) as query_event:
nodes = await self.aretrieve(query_bundle)
response = await self._response_synthesizer.asynthesize(
query=query_bundle,
nodes=nodes,
)
query_event.on_end(payload={EventPayload.RESPONSE: response})
return response
@override
@dispatcher.span
def query(
self,
query_str: Optional[str] = None,
query_image: Optional[
Union[Image.Image, ImageBlock, ImageDocument, str]
] = None,
query_image_path: Optional[os.PathLike[str]] = None,
) -> RESPONSE_TYPE:
"""
Executes a query against the managed LanceDB index.
Args:
query_str (Optional[str]): The text query string to search for. Defaults to None.
query_image (Optional[Union[Image.Image, ImageBlock, ImageDocument, str]]): An image or image-like object to use as part of the query. Can be a PIL Image, ImageBlock, ImageDocument, or a file path as a string. Defaults to None.
query_image_path (Optional[os.PathLike[str]]): The file path to an image to use as part of the query. Defaults to None.
Returns:
RESPONSE_TYPE: The result of the query.
Notes:
- At least one of `query_str`, `query_image`, or `query_image_path` should be provided.
"""
qb = ExtendedQueryBundle(
query_str=query_str, image_path=query_image_path, image=query_image
)
dispatcher.event(QueryStartEvent(query=qb))
with self.callback_manager.as_trace("query"):
if not query_str:
query_str = ""
query_result = self._query(qb)
dispatcher.event(QueryEndEvent(query=qb, response=query_result))
return query_result
@override
@dispatcher.span
async def aquery(
self,
query_str: Optional[str] = None,
query_image: Optional[
Union[Image.Image, ImageBlock, ImageDocument, str]
] = None,
query_image_path: Optional[os.PathLike[str]] = None,
) -> RESPONSE_TYPE:
"""
Asynchronously executes a query against the managed LanceDB index.
Args:
query_str (Optional[str]): The text query string to search for. Defaults to None.
query_image (Optional[Union[Image.Image, ImageBlock, ImageDocument, str]]): An image or image-like object to use as part of the query. Can be a PIL Image, ImageBlock, ImageDocument, or a file path as a string. Defaults to None.
query_image_path (Optional[os.PathLike[str]]): The file path to an image to use as part of the query. Defaults to None.
Returns:
RESPONSE_TYPE: The result of the query.
Notes:
- At least one of `query_str`, `query_image`, or `query_image_path` should be provided.
"""
qb = ExtendedQueryBundle(
query_str=query_str, image_path=query_image_path, image=query_image
)
dispatcher.event(QueryStartEvent(query=qb))
with self.callback_manager.as_trace("query"):
if not query_str:
query_str = ""
query_result = await self._aquery(qb)
dispatcher.event(QueryEndEvent(query=qb, response=query_result))
return query_result
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/indices/llama-index-indices-managed-lancedb/llama_index/indices/managed/lancedb/query_engine.py",
"license": "MIT License",
"lines": 130,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/indices/llama-index-indices-managed-lancedb/llama_index/indices/managed/lancedb/retriever.py | import os
from lancedb.table import Table, AsyncTable
from PIL import Image
from dataclasses import dataclass
from .utils import query_multimodal, query_text, aquery_multimodal, aquery_text
from llama_index.core.llms import ImageBlock
from llama_index.core.schema import ImageDocument
from llama_index.core.retrievers import BaseRetriever
from llama_index.core.schema import QueryBundle, NodeWithScore
from typing import Union, Optional, List, Any
from typing_extensions import override
@dataclass
class ExtendedQueryBundle(QueryBundle):
image: Optional[Union[Image.Image, ImageBlock, ImageDocument, str]] = None
class LanceDBRetriever(BaseRetriever):
def __init__(
self, table: Union[AsyncTable, Table], multimodal: bool, **kwargs: Any
):
self.table = table
self.multimodal = multimodal
callback_manager = kwargs.get("callback_manager")
verbose = kwargs.get("verbose", False)
super().__init__(callback_manager, verbose)
def _retrieve(self, query_bundle: ExtendedQueryBundle) -> List[NodeWithScore]:
if not self.multimodal:
return query_text(table=self.table, query=query_bundle.query_str)
else:
if not query_bundle.image and not query_bundle.image_path:
raise ValueError(
"No image or image_path has been provided, but retrieval is set to multi-modal."
)
elif query_bundle.image:
return query_multimodal(table=self.table, query=query_bundle.image)
elif query_bundle.image_path:
img = ImageBlock(path=query_bundle.image_path)
return query_multimodal(table=self.table, query=img)
else:
return []
async def _aretrieve(
self, query_bundle: ExtendedQueryBundle
) -> List[NodeWithScore]:
if not self.multimodal:
return await aquery_text(table=self.table, query=query_bundle.query_str)
else:
if not query_bundle.image and not query_bundle.image_path:
raise ValueError(
"No image or image_path has been provided, but retrieval is set to multi-modal."
)
elif query_bundle.image:
return await aquery_multimodal(
table=self.table, query=query_bundle.image
)
elif query_bundle.image_path:
img = ImageBlock(path=query_bundle.image_path)
return await aquery_multimodal(table=self.table, query=img)
else:
return []
@override
def retrieve(
self,
query_str: Optional[str] = None,
query_image: Optional[
Union[Image.Image, ImageBlock, ImageDocument, str]
] = None,
query_image_path: Optional[os.PathLike[str]] = None,
) -> List[NodeWithScore]:
"""
Retrieves nodes relevant to the given query.
Args:
query_str (Optional[str]): The text query string. Required if the retriever is not multimodal.
query_image (Optional[Union[Image.Image, ImageBlock, ImageDocument, str]]): The image query, which can be a PIL Image, ImageBlock, ImageDocument, or a string path/URL. Used if the retriever is multimodal.
query_image_path (Optional[os.PathLike[str]]): The file path to the image query. Used if the retriever is multimodal.
Returns:
List[NodeWithScore]: A list of nodes with associated relevance scores.
Raises:
ValueError: If none of the query parameters are provided.
ValueError: If a text query is not provided for a non-multimodal retriever.
ValueError: If neither an image nor image path is provided for a multimodal retriever.
"""
if not query_str and not query_image and not query_image_path:
raise ValueError(
"At least one among query_str, query_image and query_image_path needs to be set"
)
if not self.multimodal:
if query_str:
query_bundle = ExtendedQueryBundle(query_str=query_str)
else:
raise ValueError(
"No query_str provided, but the retriever is not multimodal"
)
else:
if query_image:
query_bundle = ExtendedQueryBundle(query_str="", image=query_image)
elif query_image_path:
query_bundle = ExtendedQueryBundle(
query_str="", image_path=query_image_path
)
else:
raise ValueError(
"No query_image or query_image_path provided, but the retriever is multimodal"
)
return self._retrieve(query_bundle=query_bundle)
@override
async def aretrieve(
self,
query_str: Optional[str] = None,
query_image: Optional[
Union[Image.Image, ImageBlock, ImageDocument, str]
] = None,
query_image_path: Optional[os.PathLike[str]] = None,
) -> List[NodeWithScore]:
"""
Asynchronously retrieves nodes relevant to the given query.
Args:
query_str (Optional[str]): The text query string. Required if the retriever is not multimodal.
query_image (Optional[Union[Image.Image, ImageBlock, ImageDocument, str]]): The image query, which can be a PIL Image, ImageBlock, ImageDocument, or a string path/URL. Used if the retriever is multimodal.
query_image_path (Optional[os.PathLike[str]]): The file path to the image query. Used if the retriever is multimodal.
Returns:
List[NodeWithScore]: A list of nodes with associated relevance scores.
Raises:
ValueError: If none of the query parameters are provided.
ValueError: If a text query is not provided for a non-multimodal retriever.
ValueError: If neither an image nor image path is provided for a multimodal retriever.
"""
if not query_str and not query_image and not query_image_path:
raise ValueError(
"At least one among query_str, query_image and query_image_path needs to be set"
)
if not self.multimodal:
if query_str:
query_bundle = ExtendedQueryBundle(query_str=query_str)
else:
raise ValueError(
"No query_str provided, but the retriever is not multimodal"
)
else:
if query_image:
query_bundle = ExtendedQueryBundle(query_str="", image=query_image)
elif query_image_path:
query_bundle = ExtendedQueryBundle(
query_str="", image_path=query_image_path
)
else:
raise ValueError(
"No query_image or query_image_path provided, but the retriever is multimodal"
)
return await self._aretrieve(query_bundle=query_bundle)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/indices/llama-index-indices-managed-lancedb/llama_index/indices/managed/lancedb/retriever.py",
"license": "MIT License",
"lines": 147,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/indices/llama-index-indices-managed-lancedb/llama_index/indices/managed/lancedb/utils.py | from lancedb.embeddings import get_registry, EmbeddingFunction
from lancedb.table import Table, AsyncTable
from lancedb.index import IvfPq, IvfFlat, HnswPq, HnswSq, BTree, Bitmap, LabelList, FTS
import httpx
from PIL import Image
import io
import os
from llama_index.core.bridge.pydantic import BaseModel, Field, model_validator
from llama_index.core.llms import ImageBlock
from llama_index.core.schema import ImageDocument, Document, NodeWithScore
from typing import Literal, Union, Any, Optional, Dict, List
from typing_extensions import Self
class CloudConnectionConfig(BaseModel):
uri: str
api_key: Optional[str]
region: Optional[str]
use_async: bool
@model_validator(mode="after")
def validate_connection(self) -> Self:
if not self.api_key:
self.api_key = os.getenv("LANCEDB_API_KEY", None)
if not self.api_key:
raise ValueError(
"You provided a cloud instance without setting the API key either in the code or as an environment variable."
)
if not self.region:
self.region = "us-east-1"
return self
class LocalConnectionConfig(BaseModel):
uri: str
use_async: bool
class TableConfig(BaseModel):
table_name: str
table_exists: bool
class EmbeddingConfig(BaseModel):
text_embedding_model: Optional[
Literal[
"bedrock-text",
"cohere",
"gemini-text",
"instructor",
"ollama",
"openai",
"sentence-transformers",
"gte-text",
"huggingface",
"colbert",
"jina",
"watsonx",
"voyageai",
]
]
multi_modal_embedding_model: Optional[
Literal["open-clip", "colpali", "jina", "imagebind"]
]
embedding_kwargs: Dict[str, Any]
@model_validator(mode="after")
def validate_embeddings(self) -> Self:
if (
self.text_embedding_model is None
and self.multi_modal_embedding_model is None
):
raise ValueError(
"You must specify either a multimodal or a text embedding model"
)
if (
self.text_embedding_model is not None
and self.multi_modal_embedding_model is not None
):
raise ValueError(
"You cannot specify both a multimodal and a text embedding model"
)
class IndexingConfig(BaseModel):
indexing: Literal[
"IVF_PQ",
"IVF_HNSW_PQ",
"IVF_HNSW_SQ",
"FTS",
"BTREE",
"BITMAP",
"LABEL_LIST",
"NO_INDEXING",
]
async_index_config: Optional[
Union[IvfPq, IvfFlat, HnswPq, HnswSq, BTree, Bitmap, LabelList, FTS]
] = None
indexing_kwargs: Dict[str, Any]
@model_validator(mode="after")
def validate_index(self) -> Self:
if self.indexing == "IVF_PQ":
if not isinstance(self.async_index_config, IvfPq):
self.async_index_config = IvfPq(**self.indexing_kwargs)
elif self.indexing == "IVF_HNSW_PQ":
if not isinstance(self.async_index_config, HnswPq):
self.async_index_config = HnswPq(**self.indexing_kwargs)
elif self.indexing == "IVF_HNSW_SQ":
if not isinstance(self.async_index_config, HnswSq):
self.async_index_config = HnswSq(**self.indexing_kwargs)
elif self.indexing == "FTS":
if not isinstance(self.async_index_config, FTS):
self.async_index_config = FTS(**self.indexing_kwargs)
elif self.indexing == "BTREE":
if not isinstance(self.async_index_config, BTree):
self.async_index_config = BTree(**self.indexing_kwargs)
elif self.indexing == "BITMAP":
if not isinstance(self.async_index_config, Bitmap):
self.async_index_config = Bitmap(**self.indexing_kwargs)
elif self.indexing == "LABEL_LIST":
if not isinstance(self.async_index_config, LabelList):
self.async_index_config = LabelList(**self.indexing_kwargs)
elif self.indexing == "NO_INDEXING":
self.async_index_config = None
return self
class LanceDBTextModel(BaseModel):
embedding_model: Union[
Literal[
"bedrock-text",
"cohere",
"gemini-text",
"instructor",
"ollama",
"openai",
"sentence-transformers",
"gte-text",
"huggingface",
"colbert",
"jina",
"watsonx",
"voyageai",
],
EmbeddingFunction,
]
kwargs: dict = Field(
default_factory=dict,
)
@model_validator(mode="after")
def validate_embedding_model(self) -> Self:
if isinstance(self.embedding_model, str):
try:
self.embedding_model = (
get_registry().get(self.embedding_model).create(**self.kwargs)
)
except Exception as e:
raise ValueError(
f"An exception occurred while creating the embeddings function: {e}"
)
return self
class LanceDBMultiModalModel(BaseModel):
embedding_model: Union[
Literal["open-clip", "colpali", "jina", "imagebind"], EmbeddingFunction
]
kwargs: dict = Field(
default_factory=dict,
)
@model_validator(mode="after")
def validate_embedding_model(self) -> Self:
if isinstance(self.embedding_model, str):
try:
self.embedding_model = (
get_registry().get(self.embedding_model).create(**self.kwargs)
)
except Exception as e:
raise ValueError(
f"An exception occurred while creating the embeddings function: {e}"
)
return self
def get_lancedb_text_embedding_model(
embedding_model: Literal[
"bedrock-text",
"cohere",
"gemini-text",
"instructor",
"ollama",
"openai",
"sentence-transformers",
"gte-text",
"huggingface",
"colbert",
"jina",
"watsonx",
"voyageai",
],
**kwargs: Any,
):
"""
Get a pre-defined LanceDB text embedding model.
Args:
embedding_model (str): name of the embedding model.
**kwargs (Any): keyword arguments that are necessary for the initialization of the embedding model you want to use.
Returns:
EmbeddingFunction: a LanceDB embedding function.
"""
return LanceDBTextModel(embedding_model=embedding_model, kwargs=kwargs)
def get_lancedb_multimodal_embedding_model(
embedding_model: Literal["open-clip", "colpali", "jina", "imagebind"], **kwargs: Any
):
"""
Get a pre-defined LanceDB multimodal embedding model.
Args:
embedding_model (str): name of the embedding model.
**kwargs (Any): keyword arguments that are necessary for the initialization of the embedding model you want to use.
Returns:
EmbeddingFunction: a LanceDB embedding function.
"""
return LanceDBMultiModalModel(embedding_model=embedding_model, kwargs=kwargs)
def query_text(table: Table, query: str, **kwargs: Any) -> List[NodeWithScore]:
"""
Query a text-based table.
Args:
query (str): a string representing a text query.
"""
data = table.search(query).to_list()
documents: List[NodeWithScore] = []
for d in data:
if "text" in d:
documents.append(
NodeWithScore(
node=Document(text=d["text"], id_=d["id"]), score=d["_distance"]
)
)
else:
documents.append(
NodeWithScore(
ImageDocument(
image_url=d["image_uri"],
image=d["image_bytes"],
id_=d["id"],
metadata={"label": d["label"]},
),
score=d["_distance"],
)
)
return documents
def query_multimodal(
table: Table,
query: Union[ImageDocument, Image.Image, str, ImageBlock],
**kwargs: Any,
) -> List[NodeWithScore]:
"""
Query a multimodal table.
Args:
query (Union[ImageDocument, Image.Image, str, ImageBlock]): An ImageDocument or an ImageBlock, a PIL Image or a string representing an image URL.
"""
if isinstance(query, (ImageBlock, ImageDocument)):
image_buffer = query.resolve_image()
image_query = Image.open(image_buffer)
elif isinstance(query, Image.Image):
image_query = query
elif isinstance(query, str):
image_bytes = httpx.get(query).content
image_query = Image.open(io.BytesIO(image_bytes))
else:
raise ValueError("Image type not supported.")
data = table.search(image_query).to_list()
documents: List[NodeWithScore] = []
for d in data:
documents.append(
NodeWithScore(
ImageDocument(
image_url=d["image_uri"],
image=d["image_bytes"],
id_=d["id"],
metadata={"label": d["label"]},
),
score=d["_distance"],
)
)
return documents
async def aquery_text(
table: AsyncTable, query: str, **kwargs: Any
) -> List[NodeWithScore]:
"""
Query a text-based table.
Args:
query (str): a string representing a text query.
"""
dt = await table.search(query)
data = await dt.to_list()
documents: List[NodeWithScore] = []
for d in data:
if "text" in d:
documents.append(
NodeWithScore(
node=Document(text=d["text"], id_=d["id"]), score=d["_distance"]
)
)
else:
documents.append(
NodeWithScore(
ImageDocument(
image_url=d["image_uri"],
image=d["image_bytes"],
id_=d["id"],
metadata={"label": d["label"]},
),
score=d["_distance"],
)
)
return documents
async def aquery_multimodal(
table: AsyncTable,
query: Union[ImageDocument, Image.Image, str, ImageBlock],
**kwargs: Any,
) -> List[NodeWithScore]:
"""
Query a multimodal table.
Args:
query (Union[ImageDocument, Image.Image, str, ImageBlock]): An ImageDocument or an ImageBlock, a PIL Image or a string representing an image URL.
"""
if isinstance(query, (ImageBlock, ImageDocument)):
image_buffer = query.resolve_image()
image_query = Image.open(image_buffer)
elif isinstance(query, Image.Image):
image_query = query
elif isinstance(query, str):
image_bytes = httpx.get(query).content
image_query = Image.open(io.BytesIO(image_bytes))
else:
raise ValueError("Image type not supported.")
dt = await table.search(image_query)
data = await dt.to_list()
documents: List[NodeWithScore] = []
for d in data:
documents.append(
NodeWithScore(
ImageDocument(
image_url=d["image_uri"],
image=d["image_bytes"],
id_=d["id"],
metadata={"label": d["label"]},
),
score=d["_distance"],
)
)
return documents
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/indices/llama-index-indices-managed-lancedb/llama_index/indices/managed/lancedb/utils.py",
"license": "MIT License",
"lines": 335,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/indices/llama-index-indices-managed-lancedb/tests/test_index.py | import pytest
import shutil
import os
import requests
import uuid
from lancedb import AsyncConnection, DBConnection
from lancedb.table import AsyncTable, Table
from typing import Generator
import pandas as pd
from llama_index.indices.managed.lancedb.retriever import LanceDBRetriever
from llama_index.indices.managed.lancedb.query_engine import LanceDBRetrieverQueryEngine
from llama_index.indices.managed.lancedb import LanceDBMultiModalIndex
from llama_index.indices.managed.lancedb.utils import (
TableConfig,
EmbeddingConfig,
IndexingConfig,
LanceDBTextModel,
LanceDBMultiModalModel,
)
from llama_index.core.schema import Document, NodeWithScore
from llama_index.core import Settings
from llama_index.core.llms import MockLLM
from typing import List
@pytest.fixture()
def document_data() -> List[Document]:
return [Document(id_="1", text="Hello"), Document(id_="2", text="Test")]
@pytest.fixture()
def data() -> pd.DataFrame:
labels = ["cat", "cat", "dog", "dog", "horse", "horse"]
uris = [
"https://picsum.photos/200/200?random=1",
"https://picsum.photos/200/200?random=2",
"https://picsum.photos/200/200?random=3",
"https://picsum.photos/200/200?random=4",
"https://picsum.photos/200/200?random=5",
"https://picsum.photos/200/200?random=6",
]
ids = [
"1",
"2",
"3",
"4",
"5",
"6",
]
image_bytes = []
for uri in uris:
response = requests.get(uri)
response.raise_for_status()
image_bytes.append(response.content)
metadata = [
'{"mimetype": "image/jpeg"}',
'{"mimetype": "image/jpeg"}',
'{"mimetype": "image/jpeg"}',
'{"mimetype": "image/jpeg"}',
'{"mimetype": "image/jpeg"}',
'{"mimetype": "image/jpeg"}',
]
return pd.DataFrame(
{
"id": ids,
"label": labels,
"image_uri": uris,
"image_bytes": image_bytes,
"metadata": metadata,
}
)
@pytest.fixture()
def uri() -> Generator[str, None, None]:
uri = f"lancedb/{uuid.uuid4()}"
yield uri
if os.path.exists(uri):
shutil.rmtree(uri)
@pytest.mark.asyncio
async def test_init(document_data: List[Document], data: List[dict], uri: str) -> None:
first = LanceDBMultiModalIndex(
uri=uri,
text_embedding_model="sentence-transformers",
embedding_model_kwargs={"name": "all-MiniLM-L6-v2"},
table_name="test_table",
)
assert first.connection_config.uri == uri
assert first.connection_config.use_async is False
assert first.table_config == TableConfig(
table_name="test_table", table_exists=False
)
assert first.indexing_config == IndexingConfig(
indexing="IVF_PQ", indexing_kwargs={}
)
assert first.embedding_config == EmbeddingConfig(
text_embedding_model="sentence-transformers",
multi_modal_embedding_model=None,
embedding_kwargs={"name": "all-MiniLM-L6-v2"},
)
second = await LanceDBMultiModalIndex.from_documents(
documents=document_data,
uri=f"{uri}/documents",
text_embedding_model="sentence-transformers",
embedding_model_kwargs={"name": "all-MiniLM-L6-v2"},
table_name="test_table",
indexing="NO_INDEXING",
use_async=True,
)
assert isinstance(second._connection, AsyncConnection)
assert isinstance(second._table, AsyncTable)
assert isinstance(second._embedding_model, LanceDBTextModel)
third = await LanceDBMultiModalIndex.from_data(
data=data,
uri=f"{uri}/from-data",
multimodal_embedding_model="open-clip",
indexing="NO_INDEXING",
use_async=False,
)
assert isinstance(third._connection, DBConnection)
assert isinstance(third._table, Table)
assert isinstance(third._embedding_model, LanceDBMultiModalModel)
@pytest.mark.asyncio
async def test_retriever_qe(uri: str, document_data: List[Document]) -> None:
Settings.llm = MockLLM()
second = await LanceDBMultiModalIndex.from_documents(
documents=document_data,
uri=f"{uri}/documents",
text_embedding_model="sentence-transformers",
embedding_model_kwargs={"name": "all-MiniLM-L6-v2"},
table_name="test_table",
indexing="NO_INDEXING",
use_async=True,
)
retr = second.as_retriever()
assert isinstance(retr, LanceDBRetriever)
retrieved = await retr.aretrieve(query_str="Hello")
assert isinstance(retrieved, list)
assert len(retrieved) > 0
assert isinstance(retrieved[0], NodeWithScore)
qe = second.as_query_engine()
assert isinstance(qe, LanceDBRetrieverQueryEngine)
response = await qe.aquery(query_str="Hello")
assert isinstance(response.response, str)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/indices/llama-index-indices-managed-lancedb/tests/test_index.py",
"license": "MIT License",
"lines": 138,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/voice_agents/llama-index-voice-agents-elevenlabs/llama_index/voice_agents/elevenlabs/events.py | from llama_index.core.voice_agents import BaseVoiceAgentEvent
from llama_index.core.bridge.pydantic import ConfigDict
from typing import Union, Any
class ConversationInitEvent(BaseVoiceAgentEvent):
model_config = ConfigDict(extra="allow")
conversation_id: str
class AudioEvent(BaseVoiceAgentEvent):
model_config = ConfigDict(extra="allow")
base_64_encoded_audio: str
class AgentResponseEvent(BaseVoiceAgentEvent):
model_config = ConfigDict(extra="allow")
agent_response: str
class AgentResponseCorrectionEvent(BaseVoiceAgentEvent):
model_config = ConfigDict(extra="allow")
corrected_agent_response: str
class UserTranscriptionEvent(BaseVoiceAgentEvent):
model_config = ConfigDict(extra="allow")
user_transcript: str
class InterruptionEvent(BaseVoiceAgentEvent):
model_config = ConfigDict(extra="allow")
class PingEvent(BaseVoiceAgentEvent):
model_config = ConfigDict(extra="allow")
ping_ms: Union[float, int]
class ClientToolCallEvent(BaseVoiceAgentEvent):
model_config = ConfigDict(extra="allow")
tool_call_id: str
tool_name: str
parameters: Any
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/voice_agents/llama-index-voice-agents-elevenlabs/llama_index/voice_agents/elevenlabs/events.py",
"license": "MIT License",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/voice_agents/llama-index-voice-agents-elevenlabs/llama_index/voice_agents/elevenlabs/interface.py | from llama_index.core.voice_agents.interface import BaseVoiceAgentInterface
from elevenlabs.conversational_ai.default_audio_interface import DefaultAudioInterface
class ElevenLabsVoiceAgentInterface(DefaultAudioInterface, BaseVoiceAgentInterface):
def __init__(self, *args, **kwargs):
super().__init__()
# Some methods from BaseVoiceAgentInterface are not implemented in DefaultAudioInterface, so we implement toy methods here
def _speaker_callback(self, *args, **kwargs):
pass
def _microphone_callback(self, *args, **kwargs):
pass
def receive(self, data, *args, **kwargs):
pass
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/voice_agents/llama-index-voice-agents-elevenlabs/llama_index/voice_agents/elevenlabs/interface.py",
"license": "MIT License",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/voice_agents/llama-index-voice-agents-elevenlabs/tests/test_events.py | from llama_index.voice_agents.elevenlabs.events import (
PingEvent,
AudioEvent,
AgentResponseEvent,
AgentResponseCorrectionEvent,
UserTranscriptionEvent,
InterruptionEvent,
ConversationInitEvent,
ClientToolCallEvent,
)
from llama_index.core.voice_agents import BaseVoiceAgentEvent
def test_events_init() -> None:
events = [
PingEvent(type_t="ping", ping_ms=100),
AudioEvent(type_t="audio", base_64_encoded_audio="audio"),
AgentResponseCorrectionEvent(
type_t="agent_response_correction",
corrected_agent_response="Corrected Response.",
),
AgentResponseEvent(type_t="agent_response", agent_response="Response."),
UserTranscriptionEvent(
type_t="user_transcription", user_transcript="Transcript."
),
InterruptionEvent(
type_t="interruption", interrupted=True
), # this tests the extra fields allowséd :)
ConversationInitEvent(
type_t="conversation_initiation_metadata",
conversation_id="1",
metadata={"latency": 12},
),
ClientToolCallEvent(
type_t="client_tool_call",
tool_call_id="1",
tool_name="greet",
parameters={},
),
]
for event in events:
assert isinstance(event, BaseVoiceAgentEvent)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/voice_agents/llama-index-voice-agents-elevenlabs/tests/test_events.py",
"license": "MIT License",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-core/llama_index/core/chat_ui/events.py | from typing import Any, List
from llama_index.core.chat_ui.models.artifact import Artifact
from llama_index.core.schema import NodeWithScore
from llama_index.core.workflow.events import Event
class UIEvent(Event):
type: str
data: Any
class SourceNodesEvent(Event):
nodes: List[NodeWithScore]
class ArtifactEvent(Event):
type: str = "artifact"
data: Artifact
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/llama_index/core/chat_ui/events.py",
"license": "MIT License",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-core/llama_index/core/chat_ui/models/artifact.py | from enum import Enum
from typing import List, Literal, Optional, Union
from pydantic import BaseModel
class ArtifactType(str, Enum):
CODE = "code"
DOCUMENT = "document"
class CodeArtifactData(BaseModel):
file_name: str
code: str
language: str
class DocumentArtifactSource(BaseModel):
id: str
class DocumentArtifactData(BaseModel):
title: str
content: str
type: Literal["markdown", "html"]
sources: Optional[List[DocumentArtifactSource]] = None
class Artifact(BaseModel):
created_at: Optional[int] = None
type: ArtifactType
data: Union[CodeArtifactData, DocumentArtifactData]
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/llama_index/core/chat_ui/models/artifact.py",
"license": "MIT License",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-core/llama_index/core/voice_agents/base.py | from typing import Optional, Any, Callable, List
from abc import ABC, abstractmethod
from .websocket import BaseVoiceAgentWebsocket
from .interface import BaseVoiceAgentInterface
from .events import BaseVoiceAgentEvent
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import BaseTool
class BaseVoiceAgent(ABC):
"""
Abstract class that serves as base for any Voice Agent.
Attributes:
ws (BaseVoiceAgentWebSocket): The websocket underlying the agent and providing the voice service.
interface (BaseVoiceAgentInterface): The audio input/output interface.
api_key (Optional[str]): API key (if needed). Defaults to None.
tools (Optional[List[BaseTool]]): List of tools for the agent to use (tool use should be adapted to the specific integration). Defaults to None.
_messages (List[ChatMessage]): Private attribute initialized as an empty list of ChatMessage, it should be populated with chat messages as the conversation goes on.
_events (List[BaseVoiceAgentEvent]): Private attribute initialized as an empty list of BaseVoiceAgentEvent, it should be populated with events as the conversation goes on.
"""
def __init__(
self,
ws: Optional[BaseVoiceAgentWebsocket] = None,
interface: Optional[BaseVoiceAgentInterface] = None,
ws_url: Optional[str] = None,
api_key: Optional[str] = None,
tools: Optional[List[BaseTool]] = None,
):
self.ws = ws
self.ws_url = ws_url
self.interface = interface
self.api_key = api_key
self.tools = tools
self._messages: List[ChatMessage] = []
self._events: List[BaseVoiceAgentEvent] = []
@abstractmethod
async def start(self, *args: Any, **kwargs: Any) -> None:
"""
Start the voice agent.
Args:
*args: Can take any positional argument.
**kwargs: Can take any keyword argument.
Returns:
out (None): This function does not return anything.
"""
...
@abstractmethod
async def send(self, audio: Any, *args: Any, **kwargs: Any) -> None:
"""
Send audio to the websocket underlying the voice agent.
Args:
audio (Any): audio data to send (generally as bytes or str, but it is kept open also to other types).
*args: Can take any positional argument.
**kwargs: Can take any keyword argument.
Returns:
out (None): This function does not return anything.
"""
...
@abstractmethod
async def handle_message(self, message: Any, *args: Any, **kwargs: Any) -> Any:
"""
Handle incoming message.
Args:
message (Any): incoming message (should be dict, but it is kept open also for other types).
*args: Can take any positional argument.
**kwargs: Can take any keyword argument.
Returns:
out (Any): This function can return any output.
"""
...
@abstractmethod
async def interrupt(self) -> None:
"""
Interrupt the input/output audio flow.
Args:
None
Returns:
out (None): This function does not return anything.
"""
...
@abstractmethod
async def stop(self) -> None:
"""
Stop the conversation with the voice agent.
Args:
None
Returns:
out (None): This function does not return anything.
"""
...
def export_messages(
self,
limit: Optional[int] = None,
filter: Optional[Callable[[List[ChatMessage]], List[ChatMessage]]] = None,
) -> List[ChatMessage]:
"""
Export all recorded messages during a conversation.
Args:
limit (Optional[int]): Maximum number of messages to return. Defaults to None.
filter (Optional[Callable[[List[ChatMessage]], List[ChatMessage]]]): Filter function. Defaults to None.
Returns:
out (List[ChatMessage]): exported messages.
"""
messages = self._messages
if limit:
if limit <= len(messages):
messages = messages[:limit]
if filter:
messages = filter(messages)
return messages
def export_events(
self,
limit: Optional[int] = None,
filter: Optional[
Callable[[List[BaseVoiceAgentEvent]], List[BaseVoiceAgentEvent]]
] = None,
) -> List[BaseVoiceAgentEvent]:
"""
Export all recorded events during a conversation.
Args:
limit (Optional[int]): Maximum number of events to return. Defaults to None.
filter (Optional[Callable[[List[BaseVoiceAgentEvent]], List[BaseVoiceAgentEvent]]]): Filter function. Defaults to None.
Returns:
out (List[BaseVoiceAgentEvent]): exported events.
"""
events = self._events
if limit:
if limit <= len(events):
events = events[:limit]
if filter:
events = filter(events)
return events
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/llama_index/core/voice_agents/base.py",
"license": "MIT License",
"lines": 130,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
run-llama/llama_index:llama-index-core/llama_index/core/voice_agents/events.py | from llama_index.core.bridge.pydantic import BaseModel, Field
class BaseVoiceAgentEvent(BaseModel):
"""
Base class to represent events in Voice Agents conversations.
Attributes:
type_t (str): Event type (serialized as 'type')
"""
type_t: str = Field(serialization_alias="type")
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/llama_index/core/voice_agents/events.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
run-llama/llama_index:llama-index-core/llama_index/core/voice_agents/interface.py | from typing import Any
from abc import ABC, abstractmethod
class BaseVoiceAgentInterface(ABC):
"""
Abstract base class for a voice agent audio input/output interface.
"""
@abstractmethod
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Please implement this method by initializing the class with arbitrary attributes."""
...
@abstractmethod
def _speaker_callback(self, *args: Any, **kwargs: Any) -> Any:
"""
Callback function for the audio output device.
Args:
*args: Can take any positional argument.
**kwargs: Can take any keyword argument.
Returns:
out (Any): This function can return any output.
"""
...
@abstractmethod
def _microphone_callback(self, *args: Any, **kwargs: Any) -> Any:
"""
Callback function for the audio input device.
Args:
*args: Can take any positional argument.
**kwargs: Can take any keyword argument.
Returns:
out (Any): This function can return any output.
"""
...
@abstractmethod
def start(self, *args: Any, **kwargs: Any) -> None:
"""
Start the interface.
Args:
*args: Can take any positional argument.
**kwargs: Can take any keyword argument.
Returns:
out (None): This function does not return anything.
"""
...
@abstractmethod
def stop(self) -> None:
"""
Stop the interface.
Args:
None
Returns:
out (None): This function does not return anything.
"""
...
@abstractmethod
def interrupt(self) -> None:
"""
Interrupt the interface.
Args:
None
Returns:
out (None): This function does not return anything.
"""
...
@abstractmethod
def output(self, *args: Any, **kwargs: Any) -> Any:
"""
Process and output the audio.
Args:
*args: Can take any positional argument.
**kwargs: Can take any keyword argument.
Returns:
out (Any): This function can return any output.
"""
...
@abstractmethod
def receive(self, data: Any, *args: Any, **kwargs: Any) -> Any:
"""
Receive audio data.
Args:
data (Any): received audio data (generally as bytes or str, but it is kept open also to other types).
*args: Can take any positional argument.
**kwargs: Can take any keyword argument.
Returns:
out (Any): This function can return any output.
"""
...
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/llama_index/core/voice_agents/interface.py",
"license": "MIT License",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
run-llama/llama_index:llama-index-core/llama_index/core/voice_agents/websocket.py | from abc import ABC, abstractmethod
from typing import Optional, Any, TYPE_CHECKING
if TYPE_CHECKING:
from websockets.asyncio.client import ClientConnection
class BaseVoiceAgentWebsocket(ABC):
"""
Abstract base class for a voice agent websocket.
Attributes:
uri (str): URL of the websocket.
ws (Optional[ClientConnection]): Private attribute, initialized as None, represents the websocket client.
"""
def __init__(
self,
uri: str,
):
self.uri = uri
self.ws: Optional[ClientConnection] = None
def connect(self) -> None:
"""
Connect to the websocket.
Args:
None
Returns:
out (None): This function does not return anything.
"""
async def aconnect(self) -> None:
"""
Asynchronously connect to the websocket.
The implementation should be:
```
self.ws = await websockets.connect(uri=self.uri)
```
Args:
None
Returns:
out (None): This function does not return anything.
"""
@abstractmethod
async def send(self, data: Any) -> None:
"""
Send data to the websocket.
Args:
data (Any): Data to send to the websocket.
Returns:
out (None): This function does not return anything.
"""
...
@abstractmethod
async def close(self) -> None:
"""
Close the connection with the websocket.
Args:
None
Returns:
out (None): This function does not return anything.
"""
...
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/llama_index/core/voice_agents/websocket.py",
"license": "MIT License",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
run-llama/llama_index:llama-index-core/tests/voice_agents/test_event_serialization.py | import pytest
from typing import Dict
from llama_index.core.voice_agents.events import BaseVoiceAgentEvent
@pytest.fixture()
def json_event() -> Dict[str, str]:
return {"type": "text"}
def test_event_serialization(json_event: Dict[str, str]) -> None:
assert BaseVoiceAgentEvent(type_t="text").model_dump(by_alias=True) == json_event
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/voice_agents/test_event_serialization.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-core/tests/voice_agents/test_subclasses.py | import pytest
from typing import List, Any, Union, Optional, Iterable, AsyncIterable
from llama_index.core.voice_agents import (
BaseVoiceAgent,
BaseVoiceAgentInterface,
BaseVoiceAgentWebsocket,
BaseVoiceAgentEvent,
)
from llama_index.core.llms import ChatMessage
# Try to import websockets dependency
try:
from websockets import ClientConnection, ClientProtocol
from websockets.uri import WebSocketURI
websockets_available = True
except ImportError:
websockets_available = False
# Create dummy classes to prevent NameError when defining mocks
ClientConnection = object
ClientProtocol = object
WebSocketURI = object
class MockVoiceAgentInterface(BaseVoiceAgentInterface):
def __init__(self, name: str = "interface") -> None:
self.name = name
self._is_started = False
self._is_stopped = False
self._num_interrupted = 0
self._received: List[bytes] = []
def _speaker_callback(self) -> None:
self.name += "."
def _microphone_callback(self) -> None:
self.name += ","
def start(self) -> None:
self._is_started = True
def stop(self) -> None:
self._is_stopped = True
def interrupt(self) -> None:
self._num_interrupted += 1
def output(self) -> List[Any]:
return [self.name, self._is_started, self._is_stopped, self._num_interrupted]
def receive(self, data: bytes) -> None:
self._received.append(data)
class MockConnection(ClientConnection):
def __init__(self):
if websockets_available:
self.ws_uri = WebSocketURI(
secure=True, host="localhost", port=2000, path="", query=""
)
self.protocol = ClientProtocol(uri=self.ws_uri)
self._sent: List[
Union[
str,
bytes,
Iterable[Union[str, bytes]],
AsyncIterable[Union[str, bytes]],
]
] = []
self._received: List[str] = []
self._is_closed: bool = False
async def send(
self,
message: Union[
str, bytes, Iterable[Union[str, bytes]], AsyncIterable[Union[str, bytes]]
],
text: Optional[bool] = None,
) -> None:
self._sent.append(message)
async def recv(self, decode: Optional[bool] = None) -> Any:
self._received.append("Received a message")
async def close(self, code: int = 1000, reason: str = "") -> None:
self._is_closed = True
class MockVoiceAgentWebsocket(BaseVoiceAgentWebsocket):
def __init__(self, uri: str, api_key: str):
super().__init__(uri=uri)
self.api_key = api_key
async def aconnect(self) -> None:
self.ws: ClientConnection = MockConnection()
def connect(self) -> None:
pass
async def send(
self,
data: Union[
str, bytes, Iterable[Union[str, bytes]], AsyncIterable[Union[str, bytes]]
],
) -> None:
await self.ws.send(message=data)
async def close(self) -> Any:
await self.ws.close()
class MockVoiceAgent(BaseVoiceAgent):
def __init__(
self,
ws: BaseVoiceAgentWebsocket,
interface: BaseVoiceAgentInterface,
api_key: Optional[str] = None,
):
super().__init__(ws=ws, interface=interface, api_key=api_key)
self._is_started = False
self._sent: List[Any] = []
self._handled: List[dict] = []
self._is_stopped = False
async def start(self, *args, **kwargs) -> None:
self._is_started = True
async def send(self, audio: Any, *args, **kwargs) -> None:
self._sent.append(audio)
async def interrupt(self) -> None:
pass
async def handle_message(self, message: dict) -> Any:
self._handled.append(message)
async def stop(self) -> None:
self._is_stopped = True
@pytest.fixture()
def mock_interface() -> BaseVoiceAgentInterface:
return MockVoiceAgentInterface()
@pytest.fixture()
def mock_websocket() -> BaseVoiceAgentWebsocket:
return MockVoiceAgentWebsocket(
uri="wss://my.mock.websocket:8000", api_key="fake-api-key"
)
@pytest.fixture()
def mock_agent() -> BaseVoiceAgent:
return MockVoiceAgent(
ws=MockVoiceAgentWebsocket(
uri="wss://my.mock.websocket:8000", api_key="fake-api-key"
),
interface=MockVoiceAgentInterface(),
)
@pytest.mark.skipif(not websockets_available, reason="websockets library not installed")
def test_interface_subclassing(mock_interface: MockVoiceAgentInterface):
mock_interface.start()
mock_interface._speaker_callback()
mock_interface._microphone_callback()
mock_interface.receive(data=b"hello world!")
mock_interface.interrupt()
mock_interface.stop()
assert mock_interface.output() == ["interface.,", True, True, 1]
assert mock_interface._received == [b"hello world!"]
@pytest.mark.asyncio
@pytest.mark.skipif(not websockets_available, reason="websockets library not installed")
async def test_websocket_subclassing(mock_websocket: MockVoiceAgentWebsocket):
await mock_websocket.aconnect()
assert isinstance(mock_websocket.ws, MockConnection)
await mock_websocket.send(data="hello world")
await mock_websocket.send(data=b"this is a test")
assert mock_websocket.ws._sent == ["hello world", b"this is a test"]
await mock_websocket.close()
assert mock_websocket.ws._is_closed
@pytest.mark.asyncio
@pytest.mark.skipif(not websockets_available, reason="websockets library not installed")
async def test_agent_subclassing(mock_agent: MockVoiceAgent):
await mock_agent.start()
assert mock_agent._is_started
await mock_agent.send(audio="Hello world")
assert mock_agent._sent == ["Hello world"]
await mock_agent.handle_message(message={"type": "text", "content": "content"})
assert mock_agent._handled == [{"type": "text", "content": "content"}]
mock_agent._events = [
BaseVoiceAgentEvent(type_t="send"),
BaseVoiceAgentEvent(type_t="text"),
]
mock_agent._messages = [
ChatMessage(role="user", content="Hello world"),
ChatMessage(role="assistant", content="content"),
]
def filter_events(events: List[BaseVoiceAgentEvent]):
return [event for event in events if event.type_t == "send"]
assert mock_agent.export_events() == [
BaseVoiceAgentEvent(type_t="send"),
BaseVoiceAgentEvent(type_t="text"),
]
assert mock_agent.export_events(filter=filter_events) == [
BaseVoiceAgentEvent(type_t="send")
]
assert mock_agent.export_events(limit=1) == [BaseVoiceAgentEvent(type_t="send")]
def filter_messages(messages: List[ChatMessage]):
return [message for message in messages if message.role == "assistant"]
assert mock_agent.export_messages() == [
ChatMessage(role="user", content="Hello world"),
ChatMessage(role="assistant", content="content"),
]
assert mock_agent.export_messages(limit=1) == [
ChatMessage(role="user", content="Hello world")
]
assert mock_agent.export_messages(filter=filter_messages) == [
ChatMessage(role="assistant", content="content")
]
await mock_agent.stop()
assert mock_agent._is_stopped
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/voice_agents/test_subclasses.py",
"license": "MIT License",
"lines": 186,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/voice_agents/llama-index-voice-agents-openai/llama_index/voice_agents/openai/audio_interface.py | import pyaudio
import queue
import time
import logging
import threading
import asyncio
from typing import Callable, Optional, Any, Tuple, Union
from llama_index.core.voice_agents import BaseVoiceAgentInterface
logging.basicConfig(
level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s"
)
CHUNK_SIZE = 1024
RATE = 24000
FORMAT = pyaudio.paInt16
REENGAGE_DELAY_MS = 500
class OpenAIVoiceAgentInterface(BaseVoiceAgentInterface):
def __init__(
self,
chunk_size: int = CHUNK_SIZE,
rate: int = RATE,
format: int = FORMAT,
on_audio_callback: Optional[Callable] = None,
):
self.chunk_size = chunk_size
self.rate = rate
self.format = format
self.audio_buffer = bytearray()
self.mic_queue: queue.Queue = queue.Queue()
self.mic_on_at: Union[int, float] = 0
self.mic_active: Optional[bool] = None
self._stop_event = threading.Event()
self.p = pyaudio.PyAudio()
self.on_audio_callback = on_audio_callback # Callback for audio data
def _microphone_callback(
self, in_data: Any, frame_count: int, time_info: Any, status: Any
) -> Tuple[None, Any]:
"""Microphone callback that queues audio chunks."""
if time.time() > self.mic_on_at:
if not self.mic_active:
self.mic_active = True
self.mic_queue.put(in_data)
else:
if self.mic_active:
self.mic_active = False
return (None, pyaudio.paContinue)
def _speaker_callback(
self, in_data: Any, frame_count: int, time_info: Any, status: Any
) -> Tuple[bytes, Any]:
"""Speaker callback that plays audio."""
bytes_needed = frame_count * 2
current_buffer_size = len(self.audio_buffer)
if current_buffer_size >= bytes_needed:
audio_chunk = bytes(self.audio_buffer[:bytes_needed])
self.audio_buffer = self.audio_buffer[bytes_needed:]
self.mic_on_at = time.time() + REENGAGE_DELAY_MS / 1000
else:
audio_chunk = bytes(self.audio_buffer) + b"\x00" * (
bytes_needed - current_buffer_size
)
self.audio_buffer.clear()
return (audio_chunk, pyaudio.paContinue)
def start(self) -> None:
"""Start microphone and speaker streams."""
self.mic_stream = self.p.open(
format=self.format,
channels=1,
rate=self.rate,
input=True,
stream_callback=self._microphone_callback,
frames_per_buffer=self.chunk_size,
)
self.spkr_stream = self.p.open(
format=self.format,
channels=1,
rate=self.rate,
output=True,
stream_callback=self._speaker_callback,
frames_per_buffer=self.chunk_size,
)
self.mic_stream.start_stream()
self.spkr_stream.start_stream()
def stop(self) -> None:
"""Stop and close audio streams."""
self.mic_stream.stop_stream()
self.mic_stream.close()
self.spkr_stream.stop_stream()
self.spkr_stream.close()
self.p.terminate()
def interrupt(self) -> None:
"""Interrupts active input/output audio streaming."""
if self.spkr_stream.is_active():
self.spkr_stream.stop_stream()
if self.mic_active:
self.mic_stream.stop_stream()
def output(self) -> None:
"""Process microphone audio and call back when new audio is ready."""
while not self._stop_event.is_set():
if not self.mic_queue.empty():
mic_chunk = self.mic_queue.get()
if self.on_audio_callback:
asyncio.run(self.on_audio_callback(mic_chunk))
else:
time.sleep(0.05)
def receive(self, data: bytes, *args, **kwargs) -> None:
"""Appends audio data to the buffer for playback."""
self.audio_buffer.extend(data)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/voice_agents/llama-index-voice-agents-openai/llama_index/voice_agents/openai/audio_interface.py",
"license": "MIT License",
"lines": 106,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/voice_agents/llama-index-voice-agents-openai/llama_index/voice_agents/openai/base.py | import base64
import logging
import os
import threading
from typing import List, Optional, Dict, Any
from .types import (
ConversationInputEvent,
ConversationDeltaEvent,
ConversationDoneEvent,
ConversationSessionUpdate,
ConversationSession,
ConversationTool,
ToolParameters,
FunctionCallDoneEvent,
FunctionResultItem,
SendFunctionItemEvent,
)
from .audio_interface import OpenAIVoiceAgentInterface
from .websocket import OpenAIVoiceAgentWebsocket
from llama_index.core.llms import ChatMessage, MessageRole, AudioBlock, TextBlock
from llama_index.core.tools import BaseTool
from llama_index.core.voice_agents import (
BaseVoiceAgent,
BaseVoiceAgentEvent,
BaseVoiceAgentInterface,
BaseVoiceAgentWebsocket,
)
logging.basicConfig(
level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s"
)
logger = logging.getLogger(__name__)
from .utils import get_tool_by_name
DEFAULT_WS_URL = "wss://api.openai.com/v1/realtime"
DEFAULT_MODEL = "gpt-4o-realtime-preview"
class OpenAIVoiceAgent(BaseVoiceAgent):
"""
>**NOTE**: *This API is a BETA, and thus might be subject to changes*.
Interface for the OpenAI Realtime Conversation integration with LlamaIndex.
Attributes:
ws (Optional[BaseVoiceAgentWebsocket]): A pre-defined websocket to use. Defaults to None. In case of doubt, it is advised to leave this argument as None and pass ws_url and model.
interface (Optional[BaseVoiceAgentInterface]): Audio I/O interface. Defaults to None. In case of doubt, it is advised to leave this argument as None.
api_key (Optional[str]): The OpenAI API key. Defaults to the environmental variable OPENAI_API_KEY if the value is None.
ws_url (str): The URL for the OpenAI Realtime Conversation websocket. Defaults to: 'wss://api.openai.com/v1/realtime'.
model (str): The conversational model. Defaults to: 'gpt-4o-realtime-preview'.
tools (List[BaseTool]): Tools to equip the agent with.
"""
def __init__(
self,
ws: Optional[BaseVoiceAgentWebsocket] = None,
interface: Optional[BaseVoiceAgentInterface] = None,
api_key: Optional[str] = None,
ws_url: Optional[str] = None,
model: Optional[str] = None,
tools: Optional[List[BaseTool]] = None,
) -> None:
super().__init__(
ws=ws, interface=interface, ws_url=ws_url, api_key=api_key, tools=tools
)
if not self.ws:
if not model:
model = DEFAULT_MODEL
if not self.ws_url:
self.ws_url = DEFAULT_WS_URL
url = self.ws_url + "?model=" + model
openai_api_key = os.getenv("OPENAI_API_KEY", None) or self.api_key
if not openai_api_key:
raise ValueError(
"The OPENAI_API_KEY is neither passed from the function arguments nor from environmental variables"
)
self.ws: OpenAIVoiceAgentWebsocket = OpenAIVoiceAgentWebsocket(
uri=url, api_key=openai_api_key, on_msg=self.handle_message
)
if not self.interface:
self.interface: OpenAIVoiceAgentInterface = OpenAIVoiceAgentInterface(
on_audio_callback=self.send
)
self.recv_thread: Optional[threading.Thread] = None
async def start(self, *args: Any, **kwargs: Dict[str, Any]) -> None:
"""
Start the conversation and all related processes.
Args:
**kwargs (Any): You can pass all the keyword arguments related to initializing a session, except for `tools`, which is inferred from the `tools` attribute of the class. Find a reference for these arguments and their type [on OpenAI official documentation](https://platform.openai.com/docs/api-reference/realtime-client-events/session/update).
"""
self.ws.connect()
session = ConversationSession.model_validate(kwargs)
logger.info(f"Session: {session}")
if self.tools is not None:
openai_conv_tools: List[ConversationTool] = []
for tool in self.tools:
params_dict = tool.metadata.get_parameters_dict()
tool_params = ToolParameters.model_validate(params_dict)
conv_tool = ConversationTool(
name=tool.metadata.get_name(),
description=tool.metadata.description,
parameters=tool_params,
)
openai_conv_tools.append(conv_tool)
session.tools = openai_conv_tools
update_session_event = ConversationSessionUpdate(
type_t="session.update",
session=session,
)
self._events.append(update_session_event)
self._messages.append(ChatMessage(role="system", content=session.instructions))
# Send initial request to start the conversation
await self.ws.send(update_session_event.model_dump(by_alias=True))
# Start processing microphone audio
self.audio_thread = threading.Thread(target=self.interface.output)
self.audio_thread.start()
# Start audio streams (mic and speaker)
self.interface.start()
print("The agent is ready to have a conversation")
async def send(self, audio: bytes, *args: Any, **kwargs: Any) -> None:
"""
Callback function to send audio data to the OpenAI Conversation Websocket.
Args:
mic_chunk (bytes): the incoming audio stream from the user's input device.
"""
encoded_chunk = base64.b64encode(audio).decode("utf-8")
audio_event = ConversationInputEvent(
type_t="input_audio_buffer.append", audio=encoded_chunk
)
self._events.append(audio_event)
self._messages.append(
ChatMessage(role=MessageRole.USER, blocks=[AudioBlock(audio=audio)])
)
await self.ws.send(audio_event.model_dump(by_alias=True))
async def handle_message(self, message: dict, *args: Any, **kwargs: Any) -> None:
"""
Handle incoming message from OpenAI Conversation Websocket.
Args:
message (dict): The message from the websocket.
"""
message["type_t"] = message.pop("type")
func_res_ev: Optional[SendFunctionItemEvent] = None
if message["type_t"] == "response.audio.delta":
event: BaseVoiceAgentEvent = ConversationDeltaEvent.model_validate(message)
audio_content = base64.b64decode(message["delta"])
self._messages.append(
ChatMessage(
role=MessageRole.ASSISTANT, blocks=[AudioBlock(audio=audio_content)]
)
)
self.interface.receive(audio_content)
elif message["type_t"] == "response.text.delta":
event = ConversationDeltaEvent.model_validate(message)
self._messages.append(
ChatMessage(
role=MessageRole.ASSISTANT, blocks=[TextBlock(text=event.delta)]
)
)
elif message["type_t"] == "response.audio_transcript.delta":
event = ConversationDeltaEvent.model_validate(message)
self._messages.append(
ChatMessage(
role=MessageRole.ASSISTANT, blocks=[TextBlock(text=event.delta)]
)
)
elif message["type_t"] == "response.text.done":
event = ConversationDoneEvent.model_validate(message)
elif message["type_t"] == "response.audio_transcript.done":
event = ConversationDoneEvent.model_validate(message)
elif message["type_t"] == "response.audio.done":
event = ConversationDoneEvent.model_validate(message)
elif message["type_t"] == "response.function_call_arguments.done":
event = FunctionCallDoneEvent.model_validate(message)
if not event.name:
if self.tools and len(self.tools) == 1:
tool_output = self.tools[0](**event.arguments)
output = tool_output.raw_output
func_res_it = FunctionResultItem(
type_t="function_call_output",
call_id=event.call_id,
output=str(output),
)
func_res_ev = SendFunctionItemEvent(
type_t="conversation.item.create", item=func_res_it
)
await self.ws.send(data=func_res_ev.model_dump(by_alias=True))
elif self.tools and len(self.tools) > 1:
if "tool_name" not in event.arguments:
func_res_it = FunctionResultItem(
type_t="function_call_output",
call_id=event.call_id,
output="There are multiple tools and there is not tool name specified. Please pass 'tool_name' as an argument.",
)
func_res_ev = SendFunctionItemEvent(
type_t="conversation.item.create", item=func_res_it
)
await self.ws.send(data=func_res_ev.model_dump(by_alias=True))
else:
tool = get_tool_by_name(
self.tools, name=event.arguments["tool_name"]
)
tool_output = tool(**event.arguments)
output = tool_output.raw_output
func_res_it = FunctionResultItem(
type_t="function_call_output",
call_id=event.call_id,
output=str(output),
)
func_res_ev = SendFunctionItemEvent(
type_t="conversation.item.create", item=func_res_it
)
await self.ws.send(data=func_res_ev.model_dump(by_alias=True))
else:
func_res_it = FunctionResultItem(
type_t="function_call_output",
call_id=event.call_id,
output="Seems like there are no tools available at this time.",
)
func_res_ev = SendFunctionItemEvent(
type_t="conversation.item.create", item=func_res_it
)
await self.ws.send(data=func_res_ev.model_dump(by_alias=True))
else:
if self.tools:
tool = get_tool_by_name(self.tools, name=event.name)
tool_output = tool(**event.arguments)
output = tool_output.raw_output
func_res_it = FunctionResultItem(
type_t="function_call_output",
call_id=event.call_id,
output=str(output),
)
func_res_ev = SendFunctionItemEvent(
type_t="conversation.item.create", item=func_res_it
)
await self.ws.send(data=func_res_ev.model_dump(by_alias=True))
else:
func_res_it = FunctionResultItem(
type_t="function_call_output",
call_id=event.call_id,
output="Seems like there are no tools available at this time.",
)
func_res_ev = SendFunctionItemEvent(
type_t="conversation.item.create", item=func_res_it
)
await self.ws.send(data=func_res_ev.model_dump(by_alias=True))
else:
return
self._events.append(event)
if func_res_ev:
self._events.append(func_res_ev)
async def stop(self) -> None:
"""
Stop the conversation and close all the related processes.
"""
# Signal threads to stop
self.interface._stop_event.set()
await self.ws.close()
# Stop audio streams
self.interface.stop()
# Join threads to ensure they exit cleanly
if self.audio_thread:
self.audio_thread.join()
async def interrupt(self) -> None:
"""
Interrupts the input/output audio streaming.
"""
self.interface.interrupt()
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/voice_agents/llama-index-voice-agents-openai/llama_index/voice_agents/openai/base.py",
"license": "MIT License",
"lines": 261,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/voice_agents/llama-index-voice-agents-openai/llama_index/voice_agents/openai/types.py | import base64
import binascii
import json
from typing import Union, List, Dict, Literal, Optional, Any
from typing_extensions import Self
from llama_index.core.voice_agents import BaseVoiceAgentEvent
from llama_index.core.bridge.pydantic import BaseModel, Field, model_validator
class ConversationVAD(BaseModel):
type_t: str = Field(serialization_alias="type", default="server_vad")
threshold: float = Field(default=0.5)
prefix_padding_ms: int = Field(default=300)
silence_duration_ms: int = Field(default=500)
create_response: bool = Field(default=True)
class ParamPropertyDefinition(BaseModel):
type: str
class ToolParameters(BaseModel):
type: Literal["object"] = Field(default="object")
properties: Dict[str, ParamPropertyDefinition]
required: List[str]
class FunctionResultItem(BaseVoiceAgentEvent):
call_id: str
output: str
class ConversationTool(BaseModel):
type: Literal["function"] = Field(default="function")
name: str
description: str
parameters: ToolParameters
class SendFunctionItemEvent(BaseVoiceAgentEvent):
item: FunctionResultItem
class ConversationSession(BaseModel):
modalities: List[str] = Field(default=["text", "audio"])
instructions: str = Field(default="You are a helpful assistant.")
voice: str = Field(default="sage")
input_audio_format: str = Field(default="pcm16")
output_audio_format: str = Field(default="pcm16")
input_audio_transcription: Dict[Literal["model"], str] = Field(
max_length=1, default={"model": "whisper-1"}
)
turn_detection: ConversationVAD = Field(default_factory=ConversationVAD)
tools: List[ConversationTool] = Field(
default_factory=list,
)
tool_choice: Literal["auto", "none", "required"] = Field(default="auto")
temperature: float = Field(default=0.8, ge=0.6)
max_response_output_tokens: Union[Literal["inf"], int] = Field(
default="inf",
ge=1,
le=4096,
)
speed: float = Field(default=1.1)
tracing: Union[Literal["auto"], Dict] = Field(default="auto")
class ConversationSessionUpdate(BaseVoiceAgentEvent):
session: ConversationSession
class ConversationInputEvent(BaseVoiceAgentEvent):
audio: Union[bytes, str]
@model_validator(mode="after")
def validate_audio_input(self) -> Self:
try:
base64.b64decode(self.audio, validate=True)
except binascii.Error:
if isinstance(self.audio, bytes):
self.audio = base64.b64encode(self.audio).decode("utf-8")
return self
class FunctionCallDoneEvent(BaseVoiceAgentEvent):
call_id: str
name: Optional[str] = Field(default=None)
arguments: Union[str, Dict[str, Any]]
item_id: str
@model_validator(mode="after")
def validate_arguments(self) -> Self:
try:
self.arguments = json.loads(self.arguments)
except json.JSONDecodeError:
raise ValueError("arguments are non-serializable")
return self
class ConversationDeltaEvent(BaseVoiceAgentEvent):
delta: Union[str, bytes]
item_id: str
class ConversationDoneEvent(BaseVoiceAgentEvent):
item_id: str
text: Optional[str] = Field(
default=None,
)
transcript: Optional[str] = Field(
default=None,
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/voice_agents/llama-index-voice-agents-openai/llama_index/voice_agents/openai/types.py",
"license": "MIT License",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/voice_agents/llama-index-voice-agents-openai/llama_index/voice_agents/openai/utils.py | from llama_index.core.tools import BaseTool
from typing import List, Union
def get_tool_by_name(tools: List[BaseTool], name: str) -> Union[BaseTool, None]:
for tool in tools:
if tool.metadata.get_name() == name:
return tool
return None
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/voice_agents/llama-index-voice-agents-openai/llama_index/voice_agents/openai/utils.py",
"license": "MIT License",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/voice_agents/llama-index-voice-agents-openai/llama_index/voice_agents/openai/websocket.py | import threading
import json
import logging
import asyncio
import websockets
from websockets import ConnectionClosedError
from typing import Optional, Callable, Any
from llama_index.core.voice_agents import BaseVoiceAgentWebsocket
logging.basicConfig(
level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s"
)
logger = logging.getLogger(__name__)
class OpenAIVoiceAgentWebsocket(BaseVoiceAgentWebsocket):
def __init__(
self, uri: str, api_key: str, on_msg: Optional[Callable] = None
) -> None:
super().__init__(uri=uri)
self.api_key = api_key
self.on_msg = on_msg
self.send_queue: asyncio.Queue = asyncio.Queue()
self._stop_event = threading.Event()
self.loop_thread: Optional[threading.Thread] = None
self.loop: Optional[asyncio.AbstractEventLoop] = None
def connect(self) -> None:
"""Start the socket loop in a new thread."""
self.loop_thread = threading.Thread(target=self._run_socket_loop, daemon=True)
self.loop_thread.start()
async def aconnect(self) -> None:
"""Method not implemented."""
raise NotImplementedError(
f"This method has not been implemented for {self.__qualname__}"
)
def _run_socket_loop(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.loop.run_until_complete(self._socket_loop())
async def _socket_loop(self) -> None:
"""Establish connection and run send/recv loop."""
headers = {
"Authorization": f"Bearer {self.api_key}",
"OpenAI-Beta": "realtime=v1",
}
try:
async with websockets.connect(self.uri, additional_headers=headers) as ws:
self.ws = ws # Safe: now created inside this thread + loop
# Create separate tasks for sending and receiving
recv_task = asyncio.create_task(self._recv_loop(ws))
send_task = asyncio.create_task(self._send_loop(ws))
try:
# Run both tasks concurrently until one completes or fails
await asyncio.gather(recv_task, send_task)
except Exception as e:
logging.error(f"Error in socket tasks: {e}")
finally:
# Clean up any remaining tasks
recv_task.cancel()
send_task.cancel()
await asyncio.gather(recv_task, send_task, return_exceptions=True)
except Exception as e:
logging.error(f"Failed to connect to WebSocket: {e}")
async def _recv_loop(self, ws) -> None:
"""Handle incoming messages."""
try:
while not self._stop_event.is_set():
try:
message = await ws.recv()
logging.info(f"Received message: {message}")
if message and self.on_msg:
await self.on_msg(json.loads(message))
except ConnectionClosedError:
logging.error("WebSocket connection closed.")
break
except Exception as e:
logging.error(f"Error in receive loop: {e}")
async def _send_loop(self, ws) -> None:
"""Handle outgoing messages."""
try:
while not self._stop_event.is_set():
try:
# Wait for a message to send with a timeout to check stop_event
try:
message = await asyncio.wait_for(
self.send_queue.get(), timeout=0.1
)
await ws.send(json.dumps(message))
except asyncio.TimeoutError:
# Timeout is expected - just continue to check stop_event
continue
except ConnectionClosedError:
logging.error("WebSocket connection closed.")
break
except Exception as e:
logging.error(f"Error in send loop: {e}")
async def send(self, data: Any) -> None:
"""Enqueue a message for sending."""
if self.loop:
self.loop.call_soon_threadsafe(self.send_queue.put_nowait, data)
async def close(self) -> None:
"""Stop the loop and close the WebSocket."""
self._stop_event.set()
if self.loop:
self.loop.call_soon_threadsafe(self.loop.stop)
if self.loop_thread:
self.loop_thread.join()
logging.info("WebSocket loop thread terminated.")
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/voice_agents/llama-index-voice-agents-openai/llama_index/voice_agents/openai/websocket.py",
"license": "MIT License",
"lines": 105,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/voice_agents/llama-index-voice-agents-openai/tests/test_serialization.py | import pytest
from llama_index.voice_agents.openai.types import (
ConversationDeltaEvent,
ConversationDoneEvent,
ConversationSession,
ConversationSessionUpdate,
)
@pytest.fixture()
def session_json() -> dict:
return {
"modalities": ["text", "audio"],
"instructions": "You are a helpful assistant.",
"voice": "sage",
"input_audio_format": "pcm16",
"output_audio_format": "pcm16",
"input_audio_transcription": {"model": "whisper-1"},
"turn_detection": {
"type": "server_vad",
"threshold": 0.5,
"prefix_padding_ms": 300,
"silence_duration_ms": 500,
"create_response": True,
},
"tools": [],
"tool_choice": "auto",
"temperature": 0.8,
"max_response_output_tokens": "inf",
"speed": 1.1,
"tracing": "auto",
}
def test_serialization(session_json: dict) -> None:
start_event = ConversationSessionUpdate(
type_t="session.update",
session=ConversationSession(),
)
assert start_event.model_dump(by_alias=True) == {
"type": "session.update",
"session": session_json,
}
def test_deserialization() -> None:
message = {"type_t": "response.text.delta", "delta": "Hello", "item_id": "msg_001"}
assert ConversationDeltaEvent.model_validate(message) == ConversationDeltaEvent(
type_t="response.text.delta", delta="Hello", item_id="msg_001"
)
message1 = {
"type_t": "response.text.done",
"text": "Hello world, this is a test!",
"item_id": "msg_001",
}
assert ConversationDoneEvent.model_validate(message1) == ConversationDoneEvent(
type_t="response.text.done",
text="Hello world, this is a test!",
item_id="msg_001",
)
message2 = {
"type_t": "response.audio_transcript.done",
"transcript": "Hello world, this is a test!",
"item_id": "msg_001",
}
assert ConversationDoneEvent.model_validate(message2) == ConversationDoneEvent(
type_t="response.audio_transcript.done",
transcript="Hello world, this is a test!",
item_id="msg_001",
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/voice_agents/llama-index-voice-agents-openai/tests/test_serialization.py",
"license": "MIT License",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/voice_agents/llama-index-voice-agents-openai/tests/test_utils.py | import pytest
from llama_index.voice_agents.openai.utils import get_tool_by_name
from llama_index.core.tools import FunctionTool
from typing import List
@pytest.fixture()
def function_tools() -> List[FunctionTool]:
def add(i: int, j: int) -> int:
return i + j
def greet(name: str) -> str:
return "Hello " + name
async def hello_world() -> str:
return "Hello World!"
return [
FunctionTool.from_defaults(fn=add, name="add_tool"),
FunctionTool.from_defaults(fn=greet, name="greet_tool"),
FunctionTool.from_defaults(fn=hello_world, name="hello_world_tool"),
]
def test_tools_from_names(function_tools: List[FunctionTool]) -> None:
tool = get_tool_by_name(function_tools, name="add_tool")
assert tool.metadata.get_name() == "add_tool"
assert tool(**{"i": 5, "j": 7}).raw_output == 12
tool1 = get_tool_by_name(function_tools, name="greet_tool")
assert tool1.metadata.get_name() == "greet_tool"
assert tool1(**{"name": "Mark"}).raw_output == "Hello Mark"
tool2 = get_tool_by_name(function_tools, name="hello_world_tool")
assert tool2.metadata.get_name() == "hello_world_tool"
assert tool2().raw_output == "Hello World!"
assert get_tool_by_name(function_tools, name="test_tool") is None
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/voice_agents/llama-index-voice-agents-openai/tests/test_utils.py",
"license": "MIT License",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-db2/llama_index/vector_stores/db2/base.py | # OopCompanion:suppressRename
from __future__ import annotations
import functools
import json
import logging
import math
import os
from enum import Enum
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Optional,
Type,
TypeVar,
cast,
)
from llama_index.core.schema import (
BaseNode,
MetadataMode,
NodeRelationship,
RelatedNodeInfo,
TextNode,
)
from llama_index.core.utils import iter_batch
from llama_index.core.vector_stores.types import (
BasePydanticVectorStore,
VectorStoreQuery,
VectorStoreQueryResult,
)
if TYPE_CHECKING:
from ibm_db_dbi import Connection
from llama_index.core.vector_stores.utils import metadata_dict_to_node
from pydantic import PrivateAttr
logger = logging.getLogger(__name__)
log_level = os.getenv("LOG_LEVEL", "ERROR").upper()
logging.basicConfig(
level=getattr(logging, log_level),
format="%(asctime)s - %(levelname)s - %(message)s",
)
class DistanceStrategy(Enum):
COSINE = 1
DOT_PRODUCT = 2
EUCLIDEAN_DISTANCE = 3
MANHATTAN_DISTANCE = 4
HAMMING_DISTANCE = 5
EUCLIDEAN_SQUARED = 6
# Define a type variable that can be any kind of function
T = TypeVar("T", bound=Callable[..., Any])
def _handle_exceptions(func: T) -> T:
@functools.wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
try:
return func(*args, **kwargs)
except RuntimeError as db_err:
# Handle a known type of error (e.g., DB-related) specifically
logger.exception("DB-related error occurred.")
raise RuntimeError(f"Failed due to a DB issue: {db_err}") from db_err
except ValueError as val_err:
# Handle another known type of error specifically
logger.exception("Validation error.")
raise ValueError(f"Validation failed: {val_err}") from val_err
except Exception as e:
# Generic handler for all other exceptions
logger.exception(f"An unexpected error occurred: {e}")
raise RuntimeError(f"Unexpected error: {e}") from e
return cast(T, wrapper)
def _escape_str(value: str) -> str:
BS = "\\"
must_escape = (BS, "'")
return (
"".join(f"{BS}{c}" if c in must_escape else c for c in value) if value else ""
)
column_config: Dict = {
"id": {"type": "VARCHAR(64) PRIMARY KEY", "extract_func": lambda x: x.node_id},
"doc_id": {"type": "VARCHAR(64)", "extract_func": lambda x: x.ref_doc_id},
"embedding": {
"type": "VECTOR(embedding_dim, FLOAT32)",
"extract_func": lambda x: f"{x.get_embedding()}",
},
"node_info": {
"type": "BLOB",
"extract_func": lambda x: json.dumps(x.node_info),
},
"metadata": {
"type": "BLOB",
"extract_func": lambda x: json.dumps(x.metadata),
},
"text": {
"type": "CLOB",
"extract_func": lambda x: _escape_str(
x.get_content(metadata_mode=MetadataMode.NONE) or ""
),
},
}
def _stringify_list(lst: List) -> str:
return "(" + ",".join(f"'{item}'" for item in lst) + ")"
def table_exists(connection: Connection, table_name: str) -> bool:
try:
cursor = connection.cursor()
cursor.execute(f"SELECT COUNT(*) FROM {table_name}")
except Exception as ex:
if "SQL0204N" in str(ex):
return False
raise
finally:
cursor.close()
return True
def _get_distance_function(distance_strategy: DistanceStrategy) -> str:
# Dictionary to map distance strategies to their corresponding function names
distance_strategy2function = {
DistanceStrategy.EUCLIDEAN_DISTANCE: "EUCLIDEAN",
DistanceStrategy.DOT_PRODUCT: "DOT",
DistanceStrategy.COSINE: "COSINE",
DistanceStrategy.MANHATTAN_DISTANCE: "MANHATTAN",
DistanceStrategy.HAMMING_DISTANCE: "HAMMING",
DistanceStrategy.EUCLIDEAN_SQUARED: "EUCLIDEAN_SQUARED",
}
# Attempt to return the corresponding distance function
if distance_strategy in distance_strategy2function:
return distance_strategy2function[distance_strategy]
# If it's an unsupported distance strategy, raise an error
raise ValueError(f"Unsupported distance strategy: {distance_strategy}")
@_handle_exceptions
def create_table(client: Connection, table_name: str, embedding_dim: int) -> None:
cols_dict = {
"id": "VARCHAR(64) PRIMARY KEY NOT NULL",
"doc_id": "VARCHAR(64)",
"embedding": f"vector({embedding_dim}, FLOAT32)",
"node_info": "BLOB",
"metadata": "BLOB",
"text": "CLOB",
}
if not table_exists(client, table_name):
cursor = client.cursor()
ddl_body = ", ".join(
f"{col_name} {col_type}" for col_name, col_type in cols_dict.items()
)
ddl = f"CREATE TABLE {table_name} ({ddl_body})"
try:
cursor.execute(ddl)
cursor.execute("COMMIT")
logger.info(f"Table {table_name} created successfully...")
finally:
cursor.close()
else:
logger.info(f"Table {table_name} already exists...")
@_handle_exceptions
def drop_table(connection: Connection, table_name: str) -> None:
if table_exists(connection, table_name):
cursor = connection.cursor()
try:
ddl = f"DROP TABLE {table_name}"
cursor.execute(ddl)
logger.info("Table dropped successfully...")
finally:
cursor.close()
else:
logger.info("Table not found...")
class DB2LlamaVS(BasePydanticVectorStore):
"""
`DB2LlamaVS` vector store.
To use, you should have both:
- the ``ibm_db`` python package installed
- a connection to db2 database with vector store feature (v12.1.2+)
"""
metadata_column: str = "metadata"
stores_text: bool = True
_client: Connection = PrivateAttr()
table_name: str
distance_strategy: DistanceStrategy
batch_size: Optional[int]
params: Optional[dict[str, Any]]
embed_dim: int
def __init__(
self,
_client: Connection,
table_name: str,
distance_strategy: DistanceStrategy = DistanceStrategy.EUCLIDEAN_DISTANCE,
batch_size: Optional[int] = 32,
embed_dim: int = 1536,
params: Optional[dict[str, Any]] = None,
):
try:
import ibm_db_dbi
except ImportError as e:
raise ImportError(
"Unable to import ibm_db_dbi, please install with "
"`pip install -U ibm_db`."
) from e
try:
"""Initialize with necessary components."""
super().__init__(
table_name=table_name,
distance_strategy=distance_strategy,
batch_size=batch_size,
embed_dim=embed_dim,
params=params,
)
# Assign _client to PrivateAttr after the Pydantic initialization
object.__setattr__(self, "_client", _client)
create_table(_client, table_name, embed_dim)
except ibm_db_dbi.DatabaseError as db_err:
logger.exception(f"Database error occurred while create table: {db_err}")
raise RuntimeError(
"Failed to create table due to a database error."
) from db_err
except ValueError as val_err:
logger.exception(f"Validation error: {val_err}")
raise RuntimeError(
"Failed to create table due to a validation error."
) from val_err
except Exception as ex:
logger.exception("An unexpected error occurred while creating the index.")
raise RuntimeError(
"Failed to create table due to an unexpected error."
) from ex
@property
def client(self) -> Any:
"""Get client."""
return self._client
@classmethod
def class_name(cls) -> str:
return "DB2LlamaVS"
def _append_meta_filter_condition(
self, where_str: Optional[str], exact_match_filter: list
) -> str:
filter_str = " AND ".join(
f"JSON_VALUE({self.metadata_column}, '$.{filter_item.key}') = '{filter_item.value}'"
for filter_item in exact_match_filter
)
if where_str is None:
where_str = filter_str
else:
where_str += " AND " + filter_str
return where_str
def _build_insert(self, values: List[BaseNode]) -> List[tuple]:
_data = []
for item in values:
item_values = tuple(
column["extract_func"](item) for column in column_config.values()
)
_data.append(item_values)
return _data
def _build_query(
self, distance_function: str, k: int, where_str: Optional[str] = None
) -> str:
where_clause = f"WHERE {where_str}" if where_str else ""
return f"""
SELECT id,
doc_id,
text,
SYSTOOLS.BSON2JSON(node_info),
SYSTOOLS.BSON2JSON(metadata),
vector_distance(embedding, VECTOR(?, {self.embed_dim}, FLOAT32), {distance_function}) AS distance
FROM {self.table_name}
{where_clause}
ORDER BY distance
FETCH FIRST {k} ROWS ONLY
"""
@_handle_exceptions
def add(self, nodes: list[BaseNode], **kwargs: Any) -> list[str]:
if not nodes:
return []
for result_batch in iter_batch(nodes, self.batch_size):
bind_values = self._build_insert(values=result_batch)
dml = f"""
INSERT INTO {self.table_name} ({", ".join(column_config.keys())})
VALUES (?, ?, VECTOR(?, {self.embed_dim}, FLOAT32), SYSTOOLS.JSON2BSON(?), SYSTOOLS.JSON2BSON(?), ?)
"""
cursor = self.client.cursor()
try:
# Use executemany to insert the batch
cursor.executemany(dml, bind_values)
cursor.execute("COMMIT")
finally:
cursor.close()
return [node.node_id for node in nodes]
@_handle_exceptions
def delete(self, ref_doc_id: str, **kwargs: Any) -> None:
ddl = f"DELETE FROM {self.table_name} WHERE doc_id = '{ref_doc_id}'"
cursor = self._client.cursor()
try:
cursor.execute(ddl)
cursor.execute("COMMIT")
finally:
cursor.close()
@_handle_exceptions
def drop(self) -> None:
drop_table(self._client, self.table_name)
@_handle_exceptions
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
distance_function = _get_distance_function(self.distance_strategy)
where_str = (
f"doc_id in {_stringify_list(query.doc_ids)}" if query.doc_ids else None
)
if query.filters is not None:
where_str = self._append_meta_filter_condition(
where_str, query.filters.filters
)
# build query sql
query_sql = self._build_query(
distance_function, query.similarity_top_k, where_str
)
embedding = f"{query.query_embedding}"
cursor = self._client.cursor()
try:
cursor.execute(query_sql, [embedding])
results = cursor.fetchall()
finally:
cursor.close()
similarities = []
ids = []
nodes = []
for result in results:
doc_id = result[1]
text = result[2] if result[2] is not None else ""
node_info = json.loads(result[3] if result[3] is not None else "{}")
metadata = json.loads(result[4] if result[4] is not None else "{}")
if query.node_ids:
if result[0] not in query.node_ids:
continue
if isinstance(node_info, dict):
start_char_idx = node_info.get("start", None)
end_char_idx = node_info.get("end", None)
try:
node = metadata_dict_to_node(metadata)
node.set_content(text)
except Exception:
# Note: deprecated legacy logic for backward compatibility
node = TextNode(
id_=result[0],
text=text,
metadata=metadata,
start_char_idx=start_char_idx,
end_char_idx=end_char_idx,
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(node_id=doc_id)
},
)
nodes.append(node)
similarities.append(1.0 - math.exp(-result[5]))
ids.append(result[0])
return VectorStoreQueryResult(nodes=nodes, similarities=similarities, ids=ids)
@classmethod
@_handle_exceptions
def from_documents(
cls: Type[DB2LlamaVS],
docs: List[BaseNode],
table_name: str = "llama_index",
**kwargs: Any,
) -> DB2LlamaVS:
"""Return VectorStore initialized from texts and embeddings."""
_client = kwargs.get("client")
if _client is None:
raise ValueError("client parameter is required...")
params = kwargs.get("params")
distance_strategy = kwargs.get("distance_strategy")
drop_table(_client, table_name)
embed_dim = kwargs.get("embed_dim")
vss = cls(
_client=_client,
table_name=table_name,
params=params,
distance_strategy=distance_strategy,
embed_dim=embed_dim,
)
vss.add(nodes=docs)
return vss
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-db2/llama_index/vector_stores/db2/base.py",
"license": "MIT License",
"lines": 371,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-db2/tests/test_vector_stores_db2.py | from llama_index.core.vector_stores.types import BasePydanticVectorStore
from llama_index.vector_stores.db2 import (
DB2LlamaVS,
create_table,
table_exists,
drop_table,
DistanceStrategy,
)
from llama_index.core.schema import NodeRelationship, RelatedNodeInfo, TextNode
from llama_index.core.vector_stores.types import (
ExactMatchFilter,
MetadataFilters,
VectorStoreQuery,
)
def test_class():
names_of_base_classes = [b.__name__ for b in DB2LlamaVS.__mro__]
assert BasePydanticVectorStore.__name__ in names_of_base_classes
database = ""
username = ""
password = ""
def test_table_exists() -> None:
try:
import ibm_db_dbi # type: ignore
except ImportError:
return
try:
connection = ibm_db_dbi.connect(database, username, password)
except Exception:
return
# 1. Create a Table
create_table(connection, "TB1", 8148)
# 2. Existing Table
# expectation:true
assert table_exists(connection, "TB1")
# 3. Non-Existing Table
# expectation:false
assert not table_exists(connection, "TableNonExist")
# 4. Invalid Table Name
# Expectation: SQL0104N
try:
table_exists(connection, "123")
except Exception:
pass
# 5. Empty String
# Expectation: SQL0104N
try:
table_exists(connection, "")
except Exception:
pass
# 6. Special Character
# Expectation: SQL0007N
try:
table_exists(connection, "!!4")
except Exception:
pass
# 7. Table name length > 128
# Expectation: SQL0107N The name is too long. The maximum length is "128".
try:
table_exists(connection, "x" * 129)
except Exception:
pass
# 8. Toggle Upper/Lower Case (like TaBlE)
# Expectation:True
assert table_exists(connection, "Tb1")
drop_table(connection, "TB1")
# 9. Table_Name→ "表格"
# Expectation:True
create_table(connection, '"表格"', 545)
assert table_exists(connection, '"表格"')
drop_table(connection, '"表格"')
connection.commit()
def test_create_table() -> None:
try:
import ibm_db_dbi
except ImportError:
return
try:
connection = ibm_db_dbi.connect(database, username, password)
except Exception:
return
# 1. New table - HELLO
# Dimension - 100
# Expectation: table is created
create_table(connection, "HELLO", 100)
# 2. Existing table name - HELLO
# Dimension - 110
# Expectation: Log message table already exists
create_table(connection, "HELLO", 110)
drop_table(connection, "HELLO")
# 3. New Table - 123
# Dimension - 100
# Expectation: SQL0104N invalid table name
try:
create_table(connection, "123", 100)
drop_table(connection, "123")
except Exception:
pass
# 4. New Table - Hello123
# Dimension - 8148
# Expectation: table is created
create_table(connection, "Hello123", 8148)
drop_table(connection, "Hello123")
# 5. New Table - T1
# Dimension - 65536
# Expectation: SQL0604N VECTOR column exceed the supported
# dimension length.
try:
create_table(connection, "T1", 65536)
drop_table(connection, "T1")
except Exception:
pass
# 6. New Table - T1
# Dimension - 0
# Expectation: SQL0604N VECTOR column unsupported dimension length 0.
try:
create_table(connection, "T1", 0)
drop_table(connection, "T1")
except Exception:
pass
# 7. New Table - T1
# Dimension - -1
# Expectation: SQL0104N An unexpected token "-" was found
try:
create_table(connection, "T1", -1)
drop_table(connection, "T1")
except Exception:
pass
# 8. New Table - T2
# Dimension - '1000'
# Expectation: table is created
create_table(connection, "T2", int("1000"))
drop_table(connection, "T2")
# 9. New Table - T3
# Dimension - 100 passed as a variable
# Expectation: table is created
val = 100
create_table(connection, "T3", val)
drop_table(connection, "T3")
# 10.
# Expectation: SQL0104N An unexpected token
val2 = """H
ello"""
try:
create_table(connection, val2, 545)
drop_table(connection, val2)
except Exception:
pass
# 11. New Table - 表格
# Dimension - 545
# Expectation: table is created
create_table(connection, '"表格"', 545)
drop_table(connection, '"表格"')
# 12. <schema_name.table_name>
# Expectation: table with schema is created
create_table(connection, "U1.TB4", 128)
drop_table(connection, "U1.TB4")
# 13.
# Expectation: table is created
create_table(connection, '"T5"', 128)
drop_table(connection, '"T5"')
# 14. Toggle Case
# Expectation: table is created
create_table(connection, "TaBlE", 128)
drop_table(connection, "TaBlE")
# 15. table_name as empty_string
# Expectation: SQL0104N An unexpected token
try:
create_table(connection, "", 128)
drop_table(connection, "")
create_table(connection, '""', 128)
drop_table(connection, '""')
except Exception:
pass
# 16. Arithmetic Operations in dimension parameter
# Expectation: table is created
n = 1
create_table(connection, "T10", n + 500)
drop_table(connection, "T10")
# 17. String Operations in table_name parameter
# Expectation: table is created
create_table(connection, "YaSh".replace("aS", "ok"), 500)
drop_table(connection, "YaSh".replace("aS", "ok"))
connection.commit()
# Define a list of documents (These dummy examples are 4 random documents )
text_json_list = [
{
"text": "Db2 handles LOB data differently than other kinds of data. As a result, you sometimes need to take additional actions when you define LOB columns and insert the LOB data.",
"id_": "doc_1_2_P4",
"embedding": [1.0, 0.0],
"relationships": "test-0",
"metadata": {
"weight": 1.0,
"rank": "a",
"url": "https://www.ibm.com/docs/en/db2-for-zos/12?topic=programs-storing-lob-data-in-tables",
},
},
{
"text": "Introduced in Db2 13, SQL Data Insights brought artificial intelligence (AI) functionality to the Db2 for z/OS engine. It provided the capability to run SQL AI query to find valuable insights hidden in your Db2 data and help you make better business decisions.",
"id_": "doc_15.5.1_P1",
"embedding": [0.0, 1.0],
"relationships": "test-1",
"metadata": {
"weight": 2.0,
"rank": "c",
"url": "https://community.ibm.com/community/user/datamanagement/blogs/neena-cherian/2023/03/07/accelerating-db2-ai-queries-with-the-new-vector-pr",
},
},
{
"text": "Data structures are elements that are required to use DB2®. You can access and use these elements to organize your data. Examples of data structures include tables, table spaces, indexes, index spaces, keys, views, and databases.",
"id_": "id_22.3.4.3.1_P2",
"embedding": [1.0, 1.0],
"relationships": "test-2",
"metadata": {
"weight": 3.0,
"rank": "d",
"url": "https://www.ibm.com/docs/en/zos-basic-skills?topic=concepts-db2-data-structures",
},
},
{
"text": "DB2® maintains a set of tables that contain information about the data that DB2 controls. These tables are collectively known as the catalog. The catalog tables contain information about DB2 objects such as tables, views, and indexes. When you create, alter, or drop an object, DB2 inserts, updates, or deletes rows of the catalog that describe the object.",
"id_": "id_3.4.3.1_P3",
"embedding": [2.0, 1.0],
"relationships": "test-3",
"metadata": {
"weight": 4.0,
"rank": "e",
"url": "https://www.ibm.com/docs/en/zos-basic-skills?topic=objects-db2-catalog",
},
},
]
# Create Llama Text Nodes
text_nodes = []
for text_json in text_json_list:
# Construct the relationships using RelatedNodeInfo
relationships = {
NodeRelationship.SOURCE: RelatedNodeInfo(node_id=text_json["relationships"])
}
# Prepare the metadata dictionary; you might want to exclude certain metadata fields if necessary
metadata = {
"weight": text_json["metadata"]["weight"],
"rank": text_json["metadata"]["rank"],
}
# Create a TextNode instance
text_node = TextNode(
text=text_json["text"],
id_=text_json["id_"],
embedding=text_json["embedding"],
relationships=relationships,
metadata=metadata,
)
text_nodes.append(text_node)
vector_store_list = []
def test_vs_creation() -> None:
try:
import ibm_db_dbi
except ImportError:
return
try:
connection = ibm_db_dbi.connect(database, username, password)
except Exception:
return
# Ingest documents into Db2 Vector Store using different distance strategies
vector_store_dot = DB2LlamaVS.from_documents(
text_nodes,
table_name="Documents_DOT",
client=connection,
distance_strategy=DistanceStrategy.DOT_PRODUCT,
embed_dim=2,
)
vector_store_list.append(vector_store_dot)
vector_store_max = DB2LlamaVS.from_documents(
text_nodes,
table_name="Documents_COSINE",
client=connection,
distance_strategy=DistanceStrategy.COSINE,
embed_dim=2,
)
vector_store_list.append(vector_store_max)
vector_store_euclidean = DB2LlamaVS.from_documents(
text_nodes,
table_name="Documents_EUCLIDEAN",
client=connection,
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
embed_dim=2,
)
vector_store_list.append(vector_store_euclidean)
connection.commit()
def test_manage_texts():
try:
import ibm_db_dbi
except ImportError:
return
try:
connection = ibm_db_dbi.connect(database, username, password)
except Exception:
return
for i, vs in enumerate(vector_store_list, start=1):
# Adding texts
try:
vs.add_texts(text_nodes, metadata)
print(f"\n\n\nAdd texts complete for vector store {i}\n\n\n")
except Exception as ex:
print(f"\n\n\nExpected error on duplicate add for vector store {i}\n\n\n")
# Deleting texts using the value of 'doc_id'
vs.delete("test-1")
print(f"\n\n\nDelete texts complete for vector store {i}\n\n\n")
# Similarity search
query = VectorStoreQuery(query_embedding=[1.0, 1.0], similarity_top_k=3)
results = vs.query(query=query)
print(f"\n\n\nSimilarity search results for vector store {i}: {results}\n\n\n")
connection.commit()
def test_advanced_searches():
try:
import ibm_db_dbi
except ImportError:
return
try:
connection = ibm_db_dbi.connect(database, username, password)
except Exception:
return
for i, vs in enumerate(vector_store_list, start=1):
def query_without_filters_returns_all_rows_sorted_by_similarity():
print(f"\n--- Vector Store {i} Advanced Searches ---")
# Similarity search without a filter
print("\nSimilarity search results without filter:")
query = VectorStoreQuery(query_embedding=[1.0, 1.0], similarity_top_k=3)
print(vs.query(query=query))
query_without_filters_returns_all_rows_sorted_by_similarity()
def query_with_filters_returns_multiple_matches():
print(f"\n--- Vector Store {i} Advanced Searches ---")
# Similarity search with filter
print("\nSimilarity search results with filter:")
filters = MetadataFilters(filters=[ExactMatchFilter(key="rank", value="c")])
query = VectorStoreQuery(
query_embedding=[1.0, 1.0], filters=filters, similarity_top_k=3
)
result = vs.query(query)
print(result.ids)
query_with_filters_returns_multiple_matches()
def query_with_filter_applies_top_k():
print(f"\n--- Vector Store {i} Advanced Searches ---")
# Similarity search with a filter
print("\nSimilarity search results with top k filter:")
filters = MetadataFilters(filters=[ExactMatchFilter(key="rank", value="c")])
query = VectorStoreQuery(
query_embedding=[1.0, 1.0], filters=filters, similarity_top_k=1
)
result = vs.query(query)
print(result.ids)
query_with_filter_applies_top_k()
def query_with_filter_applies_node_id_filter():
print(f"\n--- Vector Store {i} Advanced Searches ---")
# Similarity search with a filter
print("\nSimilarity search results with node_id filter:")
filters = MetadataFilters(filters=[ExactMatchFilter(key="rank", value="c")])
query = VectorStoreQuery(
query_embedding=[1.0, 1.0],
filters=filters,
similarity_top_k=3,
node_ids=["452D24AB-F185-414C-A352-590B4B9EE51B"],
)
result = vs.query(query)
print(result.ids)
query_with_filter_applies_node_id_filter()
def query_with_exact_filters_returns_single_match():
print(f"\n--- Vector Store {i} Advanced Searches ---")
# Similarity search with a filter
print("\nSimilarity search results with filter:")
filters = MetadataFilters(
filters=[
ExactMatchFilter(key="rank", value="c"),
ExactMatchFilter(key="weight", value=2),
]
)
query = VectorStoreQuery(query_embedding=[1.0, 1.0], filters=filters)
result = vs.query(query)
print(result.ids)
query_with_exact_filters_returns_single_match()
def query_with_contradictive_filter_returns_no_matches():
filters = MetadataFilters(
filters=[
ExactMatchFilter(key="weight", value=2),
ExactMatchFilter(key="weight", value=3),
]
)
query = VectorStoreQuery(query_embedding=[1.0, 1.0], filters=filters)
result = vs.query(query)
print(result.ids)
query_with_contradictive_filter_returns_no_matches()
def query_with_filter_on_unknown_field_returns_no_matches():
print(f"\n--- Vector Store {i} Advanced Searches ---")
# Similarity search with a filter
print("\nSimilarity search results with filter:")
filters = MetadataFilters(
filters=[ExactMatchFilter(key="unknown_field", value="c")]
)
query = VectorStoreQuery(query_embedding=[1.0, 1.0], filters=filters)
result = vs.query(query)
print(result.ids)
query_with_filter_on_unknown_field_returns_no_matches()
def delete_removes_document_from_query_results():
vs.delete("test-1")
query = VectorStoreQuery(query_embedding=[1.0, 1.0], similarity_top_k=2)
result = vs.query(query)
print(result.ids)
delete_removes_document_from_query_results()
connection.commit()
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-db2/tests/test_vector_stores_db2.py",
"license": "MIT License",
"lines": 407,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-hive/examples/hive_demo.py | from llama_index.tools.hive import HiveToolSpec, HiveSearchMessage
hive_tool = HiveToolSpec(api_key="add_your_hive_api_key")
# Simple prompt query
results = hive_tool.search(
prompt="What is the current price of Ethereum?",
include_data_sources=True
)
print("results ", results)
# Chat-style conversation
chat_msgs = [
HiveSearchMessage(role="user", content="Price of what?"),
HiveSearchMessage(role="assistant", content="Please specify asset."),
HiveSearchMessage(role="user", content="BTC"),
]
results = hive_tool.search(
messages=chat_msgs,
include_data_sources=True
)
print("results ", results)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-hive/examples/hive_demo.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-hive/llama_index/tools/hive/base.py | from typing import Optional, List
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from hive_intelligence.client import HiveSearchClient
from hive_intelligence.types import (
HiveSearchRequest,
HiveSearchMessage,
HiveSearchResponse,
)
from hive_intelligence.errors import HiveSearchAPIError
class HiveToolSpec(BaseToolSpec):
"""Hive Search tool spec."""
spec_functions = ["search"]
def __init__(
self,
api_key: str,
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
) -> None:
self.client = HiveSearchClient(api_key=api_key)
self.temperature = temperature
self.top_k = top_k
self.top_p = top_p
def search(
self,
prompt: Optional[str] = None,
messages: Optional[List[HiveSearchMessage]] = None,
include_data_sources: bool = False,
) -> HiveSearchResponse:
"""
Executes a Hive search request via prompt or chat-style messages.
"""
req_args = {
"prompt": prompt,
"messages": messages,
"include_data_sources": include_data_sources,
}
# Only add parameters if they are not None
if self.temperature is not None:
req_args["temperature"] = self.temperature
if self.top_k is not None:
req_args["top_k"] = self.top_k
if self.top_p is not None:
req_args["top_p"] = self.top_p
req = HiveSearchRequest(**req_args)
try:
response = self.client.search(req)
except HiveSearchAPIError as e:
raise RuntimeError(f"{e}") from e
# Return the Hive search response
return response
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-hive/llama_index/tools/hive/base.py",
"license": "MIT License",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-openGauss/llama_index/vector_stores/openGauss/base.py | import logging
from llama_index.vector_stores.postgres.base import PGVectorStore
from typing import (
Any,
NamedTuple,
Type,
Optional,
Union,
Set,
Dict,
List,
Tuple,
Literal,
)
from llama_index.core.vector_stores.types import (
MetadataFilters,
)
import sqlalchemy
_logger = logging.getLogger(__name__)
class DBEmbeddingRow(NamedTuple):
node_id: str
text: str
metadata: dict
similarity: float
PGType = Literal[
"text",
"int",
"integer",
"numeric",
"float",
"double precision",
"boolean",
"date",
"timestamp",
"uuid",
]
def get_data_model(
base: Type,
index_name: str,
schema_name: str,
hybrid_search: bool,
text_search_config: str,
cache_okay: bool,
embed_dim: int = 1536,
use_jsonb: bool = False,
indexed_metadata_keys: Optional[Set[Tuple[str, PGType]]] = None,
) -> Any:
"""
This part create a dynamic sqlalchemy model with a new table.
"""
from opengauss_sqlalchemy.usertype import Vector
from sqlalchemy import Column, Computed
from sqlalchemy.dialects.postgresql import (
BIGINT,
JSON,
JSONB,
TSVECTOR,
VARCHAR,
UUID,
DOUBLE_PRECISION,
)
from sqlalchemy import cast, column
from sqlalchemy import String, Integer, Numeric, Float, Boolean, Date, DateTime
from sqlalchemy.schema import Index
from sqlalchemy.types import TypeDecorator
pg_type_map = {
"text": String,
"int": Integer,
"integer": Integer,
"numeric": Numeric,
"float": Float,
"double precision": DOUBLE_PRECISION, # or Float(precision=53)
"boolean": Boolean,
"date": Date,
"timestamp": DateTime,
"uuid": UUID,
}
indexed_metadata_keys = indexed_metadata_keys or set()
# check that types are in pg_type_map
for key, pg_type in indexed_metadata_keys:
if pg_type not in pg_type_map:
raise ValueError(
f"Invalid type {pg_type} for key {key}. "
f"Must be one of {list(pg_type_map.keys())}"
)
class TSVector(TypeDecorator):
impl = TSVECTOR
cache_ok = cache_okay
tablename = "data_og_%s" % index_name # dynamic table name
class_name = "Data_og_%s" % index_name # dynamic class name
indexname = "%s_og_idx" % index_name # dynamic class name
metadata_dtype = JSONB if use_jsonb else JSON
embedding_col = Column(Vector(embed_dim))
metadata_indices = [
Index(
f"{indexname}_{key}_{pg_type.replace(' ', '_')}",
cast(column("metadata_").op("->>")(key), pg_type_map[pg_type]),
postgresql_using="btree",
)
for key, pg_type in indexed_metadata_keys
]
if hybrid_search:
class HybridAbstractData(base): # type: ignore
__abstract__ = True # this line is necessary
id = Column(BIGINT, primary_key=True, autoincrement=True)
text = Column(VARCHAR, nullable=False)
metadata_ = Column(metadata_dtype)
node_id = Column(VARCHAR)
embedding = embedding_col
text_search_tsv = Column( # type: ignore
TSVector(),
Computed(
"to_tsvector('%s', text)" % text_search_config, persisted=True
),
)
model = type(
class_name,
(HybridAbstractData,),
{
"__tablename__": tablename,
"__table_args__": (*metadata_indices, {"schema": schema_name}),
},
)
Index(
indexname,
model.text_search_tsv, # type: ignore
postgresql_using="gin",
)
Index(
f"{indexname}_1",
model.metadata_["ref_doc_id"].astext, # type: ignore
postgresql_using="btree",
)
else:
class AbstractData(base): # type: ignore
__abstract__ = True # this line is necessary
id = Column(BIGINT, primary_key=True, autoincrement=True)
text = Column(VARCHAR, nullable=False)
metadata_ = Column(metadata_dtype)
node_id = Column(VARCHAR)
embedding = embedding_col
model = type(
class_name,
(AbstractData,),
{
"__tablename__": tablename,
"__table_args__": (*metadata_indices, {"schema": schema_name}),
},
)
Index(
f"{indexname}_1",
model.metadata_["ref_doc_id"].astext, # type: ignore
postgresql_using="btree",
)
return model
class OpenGaussStore(PGVectorStore):
def __init__(
self,
connection_string: Optional[Union[str, sqlalchemy.engine.URL]] = None,
async_connection_string: Optional[Union[str, sqlalchemy.engine.URL]] = None,
table_name: Optional[str] = None,
schema_name: Optional[str] = None,
hybrid_search: bool = False,
text_search_config: str = "english",
embed_dim: int = 1536,
cache_ok: bool = False,
perform_setup: bool = True,
debug: bool = False,
use_jsonb: bool = False,
hnsw_kwargs: Optional[Dict[str, Any]] = None,
create_engine_kwargs: Optional[Dict[str, Any]] = None,
initialization_fail_on_error: bool = False,
engine: Optional[sqlalchemy.engine.Engine] = None,
async_engine: Optional[sqlalchemy.ext.asyncio.AsyncEngine] = None,
indexed_metadata_keys: Optional[Set[Tuple[str, PGType]]] = None,
) -> None:
"""
Constructor.
Args:
connection_string (Union[str, sqlalchemy.engine.URL]): Connection string to postgres db.
async_connection_string (Union[str, sqlalchemy.engine.URL]): Connection string to async pg db.
table_name (str): Table name.
schema_name (str): Schema name.
hybrid_search (bool, optional): Enable hybrid search. Defaults to False.
text_search_config (str, optional): Text search configuration. Defaults to "english".
embed_dim (int, optional): Embedding dimensions. Defaults to 1536.
cache_ok (bool, optional): Enable cache. Defaults to False.
perform_setup (bool, optional): If db should be set up. Defaults to True.
debug (bool, optional): Debug mode. Defaults to False.
use_jsonb (bool, optional): Use JSONB instead of JSON. Defaults to False.
hnsw_kwargs (Optional[Dict[str, Any]], optional): HNSW kwargs, a dict that
contains "hnsw_ef_construction", "hnsw_ef_search", "hnsw_m", and optionally "hnsw_dist_method". Defaults to None,
which turns off HNSW search.
create_engine_kwargs (Optional[Dict[str, Any]], optional): Engine parameters to pass to create_engine. Defaults to None.
engine (Optional[sqlalchemy.engine.Engine], optional): SQLAlchemy engine instance to use. Defaults to None.
async_engine (Optional[sqlalchemy.ext.asyncio.AsyncEngine], optional): SQLAlchemy async engine instance to use. Defaults to None.
indexed_metadata_keys (Optional[List[Tuple[str, str]]], optional): Set of metadata keys with their type to index. Defaults to None.
"""
super().__init__(
connection_string=str(connection_string),
async_connection_string=str(async_connection_string),
table_name=table_name,
schema_name=schema_name,
hybrid_search=hybrid_search,
text_search_config=text_search_config,
embed_dim=embed_dim,
cache_ok=cache_ok,
perform_setup=perform_setup,
debug=debug,
use_jsonb=use_jsonb,
hnsw_kwargs=hnsw_kwargs,
create_engine_kwargs=create_engine_kwargs or {},
initialization_fail_on_error=initialization_fail_on_error,
use_halfvec=False,
indexed_metadata_keys=indexed_metadata_keys,
)
self._table_class = get_data_model(
self._base,
table_name,
schema_name,
hybrid_search,
text_search_config,
cache_ok,
embed_dim=embed_dim,
use_jsonb=use_jsonb,
indexed_metadata_keys=indexed_metadata_keys,
)
@classmethod
def class_name(cls) -> str:
return "OpenGaussStore"
@classmethod
def from_params(
cls,
host: Optional[str] = None,
port: Optional[str] = None,
database: Optional[str] = None,
user: Optional[str] = None,
password: Optional[str] = None,
table_name: str = "llamaindex",
schema_name: str = "public",
connection_string: Optional[Union[str, sqlalchemy.engine.URL]] = None,
async_connection_string: Optional[Union[str, sqlalchemy.engine.URL]] = None,
hybrid_search: bool = False,
text_search_config: str = "english",
embed_dim: int = 1536,
cache_ok: bool = False,
perform_setup: bool = True,
debug: bool = False,
use_jsonb: bool = False,
hnsw_kwargs: Optional[Dict[str, Any]] = None,
create_engine_kwargs: Optional[Dict[str, Any]] = None,
indexed_metadata_keys: Optional[Set[Tuple[str, PGType]]] = None,
) -> "OpenGaussStore":
"""
Construct from params.
Args:
host (Optional[str], optional): Host of postgres connection. Defaults to None.
port (Optional[str], optional): Port of postgres connection. Defaults to None.
database (Optional[str], optional): Postgres DB name. Defaults to None.
user (Optional[str], optional): Postgres username. Defaults to None.
password (Optional[str], optional): Postgres password. Defaults to None.
table_name (str): Table name. Defaults to "llamaindex".
schema_name (str): Schema name. Defaults to "public".
connection_string (Union[str, sqlalchemy.engine.URL]): Connection string to postgres db
async_connection_string (Union[str, sqlalchemy.engine.URL]): Connection string to async pg db
hybrid_search (bool, optional): Enable hybrid search. Defaults to False.
text_search_config (str, optional): Text search configuration. Defaults to "english".
embed_dim (int, optional): Embedding dimensions. Defaults to 1536.
cache_ok (bool, optional): Enable cache. Defaults to False.
perform_setup (bool, optional): If db should be set up. Defaults to True.
debug (bool, optional): Debug mode. Defaults to False.
use_jsonb (bool, optional): Use JSONB instead of JSON. Defaults to False.
hnsw_kwargs (Optional[Dict[str, Any]], optional): HNSW kwargs, a dict that
contains "hnsw_ef_construction", "hnsw_ef_search", "hnsw_m", and optionally "hnsw_dist_method". Defaults to None,
which turns off HNSW search.
create_engine_kwargs (Optional[Dict[str, Any]], optional): Engine parameters to pass to create_engine. Defaults to None.
indexed_metadata_keys (Optional[Set[Tuple[str, str]]], optional): Set of metadata keys to index. Defaults to None.
Returns:
PGVectorStore: Instance of PGVectorStore constructed from params.
"""
conn_str = (
connection_string
or f"opengauss+psycopg2://{user}:{password}@{host}:{port}/{database}"
)
async_conn_str = async_connection_string or (
f"opengauss+asyncpg://{user}:{password}@{host}:{port}/{database}"
)
return cls(
connection_string=conn_str,
async_connection_string=async_conn_str,
table_name=table_name,
schema_name=schema_name,
hybrid_search=hybrid_search,
text_search_config=text_search_config,
embed_dim=embed_dim,
cache_ok=cache_ok,
perform_setup=perform_setup,
debug=debug,
use_jsonb=use_jsonb,
hnsw_kwargs=hnsw_kwargs,
create_engine_kwargs=create_engine_kwargs,
indexed_metadata_keys=indexed_metadata_keys,
)
def _initialize(self) -> None:
fail_on_error = self.initialization_fail_on_error
if not self._is_initialized:
self._connect()
if self.perform_setup:
try:
self._create_schema_if_not_exists()
except Exception as e:
_logger.warning(f"PG Setup: Error creating schema: {e}")
if fail_on_error:
raise
try:
self._create_tables_if_not_exists()
except Exception as e:
_logger.warning(f"PG Setup: Error creating tables: {e}")
if fail_on_error:
raise
if self.hnsw_kwargs is not None:
try:
self._create_hnsw_index()
except Exception as e:
_logger.warning(f"PG Setup: Error creating HNSW index: {e}")
if fail_on_error:
raise
self._is_initialized = True
def _connect(self) -> Any:
from sqlalchemy import create_engine, event
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
from sqlalchemy.orm import sessionmaker
from opengauss_sqlalchemy.register_async import register_vector
self._engine = self._engine or create_engine(
self.connection_string, echo=self.debug, **self.create_engine_kwargs
)
self._session = sessionmaker(self._engine)
self._async_engine = self._async_engine or create_async_engine(
self.async_connection_string, **self.create_engine_kwargs
)
@event.listens_for(self._async_engine.sync_engine, "connect")
def _connect_event(dbapi_connection, connection_record):
dbapi_connection.run_async(register_vector)
self._async_session = sessionmaker(self._async_engine, class_=AsyncSession) # type: ignore
def _query_with_score(
self,
embedding: Optional[List[float]],
limit: int = 10,
metadata_filters: Optional[MetadataFilters] = None,
**kwargs: Any,
) -> List[DBEmbeddingRow]:
stmt = self._build_query(embedding, limit, metadata_filters)
with self._session() as session, session.begin():
from sqlalchemy import text
if kwargs.get("ivfflat_probes"):
ivfflat_probes = kwargs.get("ivfflat_probes")
session.execute(
text(f"SET ivfflat_probes = :ivfflat_probes"),
{"ivfflat_probes": ivfflat_probes},
)
if self.hnsw_kwargs:
hnsw_ef_search = (
kwargs.get("hnsw_ef_search") or self.hnsw_kwargs["hnsw_ef_search"]
)
session.execute(
text(f"SET hnsw_ef_search = :hnsw_ef_search"),
{"hnsw_ef_search": hnsw_ef_search},
)
res = session.execute(
stmt,
)
return [
DBEmbeddingRow(
node_id=item.node_id,
text=item.text,
metadata=item.metadata_,
similarity=(1 - item.distance) if item.distance is not None else 0,
)
for item in res.all()
]
async def _aquery_with_score(
self,
embedding: Optional[List[float]],
limit: int = 10,
metadata_filters: Optional[MetadataFilters] = None,
**kwargs: Any,
) -> List[DBEmbeddingRow]:
stmt = self._build_query(embedding, limit, metadata_filters)
async with self._async_session() as async_session, async_session.begin():
from sqlalchemy import text
if self.hnsw_kwargs:
hnsw_ef_search = (
kwargs.get("hnsw_ef_search") or self.hnsw_kwargs["hnsw_ef_search"]
)
await async_session.execute(
text(f"SET hnsw_ef_search = {hnsw_ef_search}"),
)
if kwargs.get("ivfflat_probes"):
ivfflat_probes = kwargs.get("ivfflat_probes")
await async_session.execute(
text(f"SET ivfflat_probes = :ivfflat_probes"),
{"ivfflat_probes": ivfflat_probes},
)
res = await async_session.execute(stmt)
return [
DBEmbeddingRow(
node_id=item.node_id,
text=item.text,
metadata=item.metadata_,
similarity=(1 - item.distance) if item.distance is not None else 0,
)
for item in res.all()
]
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-openGauss/llama_index/vector_stores/openGauss/base.py",
"license": "MIT License",
"lines": 413,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-openGauss/tests/test_opengauss.py | import pytest
from typing import Any, Dict, Generator, List, Union
import asyncio
from llama_index.core.schema import TextNode, NodeRelationship, RelatedNodeInfo
from llama_index.core.vector_stores.types import (
VectorStoreQuery,
VectorStoreQueryMode,
)
from llama_index.vector_stores.openGauss import OpenGaussStore
PARAMS: Dict[str, Union[str, int]] = {
"host": "localhost",
"user": "postgres",
"password": "mark90",
"port": 5432,
}
TEST_DB = "test_vector_db"
TEST_TABLE_NAME = "lorem_ipsum"
TEST_SCHEMA_NAME = "test"
TEST_EMBED_DIM = 2
try:
import asyncpg # noqa
import psycopg2 # noqa
postgres_not_available = False
except ImportError:
postgres_not_available = True
def _get_sample_vector(num: float) -> List[float]:
return [num] + [1.0] * (TEST_EMBED_DIM - 1)
@pytest.fixture(scope="session")
def conn() -> Any:
import psycopg2
if postgres_not_available:
pytest.skip("psycopg2 or asyncpg not installed")
try:
return psycopg2.connect(**PARAMS)
except Exception as e:
pytest.skip(f"Database connection failed: {e!s}")
@pytest.fixture()
def db(conn: Any) -> Generator:
conn.autocommit = True
with conn.cursor() as c:
c.execute(f"DROP DATABASE IF EXISTS {TEST_DB}")
c.execute(f"CREATE DATABASE {TEST_DB}")
conn.commit()
yield
with conn.cursor() as c:
c.execute(f"DROP DATABASE {TEST_DB}")
conn.commit()
@pytest.fixture()
def pg_hybrid(db: None) -> Any:
store = OpenGaussStore.from_params(
**PARAMS,
database=TEST_DB,
table_name=TEST_TABLE_NAME,
schema_name=TEST_SCHEMA_NAME,
hybrid_search=True,
embed_dim=TEST_EMBED_DIM,
)
yield store
asyncio.run(store.close())
@pytest.fixture()
def opengauss_store(db: None) -> Generator[OpenGaussStore, None, None]:
store = OpenGaussStore.from_params(
**PARAMS,
database=TEST_DB,
table_name=TEST_TABLE_NAME,
schema_name=TEST_SCHEMA_NAME,
embed_dim=TEST_EMBED_DIM,
)
yield store
asyncio.run(store.close())
@pytest.fixture(scope="session")
def node_embeddings() -> List[TextNode]:
return [
TextNode(
text="lorem ipsum",
id_="aaa",
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="aaa")},
extra_info={"test_num": 1},
embedding=_get_sample_vector(1.0),
),
TextNode(
text="dolor sit amet",
id_="bbb",
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="bbb")},
extra_info={"test_key": "test_value"},
embedding=_get_sample_vector(0.1),
),
TextNode(
text="consectetur adipiscing elit",
id_="ccc",
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="ccc")},
extra_info={"test_key_list": ["test_value"]},
embedding=_get_sample_vector(0.1),
),
TextNode(
text="sed do eiusmod tempor",
id_="ddd",
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="ccc")},
extra_info={"test_key_2": "test_val_2"},
embedding=_get_sample_vector(0.1),
),
]
@pytest.fixture(scope="session")
def hybrid_node_embeddings() -> List[TextNode]:
return [
TextNode(
text="lorem ipsum",
id_="aaa",
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="aaa")},
embedding=_get_sample_vector(0.1),
),
TextNode(
text="dolor sit amet",
id_="bbb",
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="bbb")},
extra_info={"test_key": "test_value"},
embedding=_get_sample_vector(1.0),
),
TextNode(
text="The quick brown fox jumped over the lazy dog.",
id_="ccc",
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="ccc")},
embedding=_get_sample_vector(5.0),
),
TextNode(
text="The fox and the hound",
id_="ddd",
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="ddd")},
extra_info={"test_key": "test_value"},
embedding=_get_sample_vector(10.0),
),
]
@pytest.mark.skipif(postgres_not_available, reason="openGauss not available")
@pytest.mark.asyncio
@pytest.mark.parametrize("use_async", [True, False])
async def test_basic_search(
opengauss_store: OpenGaussStore, node_embeddings: List[TextNode], use_async: bool
) -> None:
if use_async:
await opengauss_store.async_add(node_embeddings)
else:
opengauss_store.add(node_embeddings)
assert isinstance(opengauss_store, OpenGaussStore)
assert hasattr(opengauss_store, "_engine")
q = VectorStoreQuery(query_embedding=_get_sample_vector(1.0), similarity_top_k=1)
if use_async:
res = await opengauss_store.aquery(q)
else:
res = opengauss_store.query(q)
assert res.nodes
assert len(res.nodes) == 1
assert res.nodes[0].node_id == "aaa"
@pytest.mark.skipif(postgres_not_available, reason="openGauss is not available")
@pytest.mark.asyncio
@pytest.mark.parametrize("use_async", [True, False])
async def test_sparse_query(
pg_hybrid: OpenGaussStore,
hybrid_node_embeddings: List[TextNode],
use_async: bool,
) -> None:
if use_async:
await pg_hybrid.async_add(hybrid_node_embeddings)
else:
pg_hybrid.add(hybrid_node_embeddings)
assert isinstance(pg_hybrid, OpenGaussStore)
assert hasattr(pg_hybrid, "_engine")
# text search should work when query is a sentence and not just a single word
q = VectorStoreQuery(
query_embedding=_get_sample_vector(0.1),
query_str="who is the fox?",
sparse_top_k=2,
mode=VectorStoreQueryMode.SPARSE,
)
if use_async:
res = await pg_hybrid.aquery(q)
else:
res = pg_hybrid.query(q)
assert res.nodes
assert len(res.nodes) == 2
assert res.nodes[0].node_id == "ccc"
assert res.nodes[1].node_id == "ddd"
@pytest.mark.parametrize("hybird", [True, False])
def test_opengauss_init(hybird: bool) -> None:
store = OpenGaussStore.from_params(
**PARAMS,
database=TEST_DB,
table_name=TEST_TABLE_NAME,
schema_name=TEST_SCHEMA_NAME,
hybrid_search=hybird,
embed_dim=TEST_EMBED_DIM,
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-openGauss/tests/test_opengauss.py",
"license": "MIT License",
"lines": 191,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-openGauss/tests/test_vector_stores_opengauss.py | from llama_index.core.vector_stores.types import BasePydanticVectorStore
from llama_index.vector_stores.openGauss import OpenGaussStore
def test_class():
names_of_base_classes = [b.__name__ for b in OpenGaussStore.__mro__]
assert BasePydanticVectorStore.__name__ in names_of_base_classes
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-openGauss/tests/test_vector_stores_opengauss.py",
"license": "MIT License",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/protocols/llama-index-protocols-ag-ui/llama_index/protocols/ag_ui/agent.py | import json
import uuid
from typing import Any, Callable, Dict, List, Optional, Union
from ag_ui.core import RunAgentInput
from llama_index.core import Settings
from llama_index.core.llms import ChatMessage, ChatResponse, TextBlock
from llama_index.core.llms.function_calling import FunctionCallingLLM
from llama_index.core.tools import BaseTool, FunctionTool, ToolOutput
from llama_index.core.workflow import Context, Workflow, step
from llama_index.core.workflow.events import Event, StartEvent, StopEvent
from llama_index.protocols.ag_ui.events import (
MessagesSnapshotWorkflowEvent,
StateSnapshotWorkflowEvent,
TextMessageChunkWorkflowEvent,
ToolCallChunkWorkflowEvent,
)
from llama_index.protocols.ag_ui.utils import (
llama_index_message_to_ag_ui_message,
ag_ui_message_to_llama_index_message,
timestamp,
validate_tool,
)
DEFAULT_STATE_PROMPT = """<state>
{state}
</state>
{user_input}
"""
class InputEvent(StartEvent):
input_data: RunAgentInput
class LoopEvent(Event):
messages: List[ChatMessage]
class ToolCallEvent(Event):
tool_call_id: str
tool_name: str
tool_kwargs: Dict[str, Any]
class ToolCallResultEvent(Event):
tool_call_id: str
tool_name: str
tool_kwargs: Dict[str, Any]
tool_output: ToolOutput
class AGUIChatWorkflow(Workflow):
def __init__(
self,
llm: Optional[FunctionCallingLLM] = None,
backend_tools: Optional[List[Union[BaseTool, Callable]]] = None,
frontend_tools: Optional[List[Union[BaseTool, Callable]]] = None,
system_prompt: Optional[str] = None,
initial_state: Optional[Dict[str, Any]] = None,
**workflow_kwargs: Any,
):
super().__init__(**workflow_kwargs)
self.llm = llm or Settings.llm
assert (
isinstance(self.llm, FunctionCallingLLM)
and self.llm.metadata.is_function_calling_model
), "llm must be a function calling model"
validated_frontend_tools: List[BaseTool] = [
validate_tool(tool) for tool in frontend_tools or []
]
validated_backend_tools: List[BaseTool] = [
validate_tool(tool) for tool in backend_tools or []
]
self.frontend_tools = {
tool.metadata.name: tool for tool in validated_frontend_tools
}
self.backend_tools = {
tool.metadata.name: tool for tool in validated_backend_tools
}
self.initial_state = initial_state or {}
self.system_prompt = system_prompt
def _snapshot_messages(self, ctx: Context, chat_history: List[ChatMessage]) -> None:
# inject tool calls into the assistant message
for msg in chat_history:
if msg.role == "assistant":
tool_calls = self.llm.get_tool_calls_from_response(
ChatResponse(message=msg), error_on_no_tool_call=False
)
if tool_calls:
msg.additional_kwargs["ag_ui_tool_calls"] = [
{
"id": tool_call.tool_id,
"name": tool_call.tool_name,
"arguments": json.dumps(tool_call.tool_kwargs),
}
for tool_call in tool_calls
]
ag_ui_messages = [llama_index_message_to_ag_ui_message(m) for m in chat_history]
ctx.write_event_to_stream(
MessagesSnapshotWorkflowEvent(
timestamp=timestamp(),
messages=ag_ui_messages,
)
)
@step
async def chat(
self, ctx: Context, ev: InputEvent | LoopEvent
) -> Optional[Union[StopEvent, ToolCallEvent]]:
if isinstance(ev, InputEvent):
ag_ui_messages = ev.input_data.messages
chat_history = [
ag_ui_message_to_llama_index_message(m) for m in ag_ui_messages
]
# State sometimes has unused messages, so we need to remove them
state = ev.input_data.state
if isinstance(state, dict):
state.pop("messages", None)
elif isinstance(state, str):
state = json.loads(state)
state.pop("messages", None)
else:
# initial state is not provided, use the default state
state = self.initial_state.copy()
# Save state to context for tools to use
await ctx.store.set("state", state)
ctx.write_event_to_stream(StateSnapshotWorkflowEvent(snapshot=state))
if state:
for msg in chat_history[::-1]:
if msg.role.value == "user":
msg.content = DEFAULT_STATE_PROMPT.format(
state=str(state), user_input=msg.content
)
break
if self.system_prompt:
if chat_history[0].role.value == "system":
chat_history[0].blocks.append(TextBlock(text=self.system_prompt))
else:
chat_history.insert(
0, ChatMessage(role="system", content=self.system_prompt)
)
await ctx.store.set("chat_history", chat_history)
else:
chat_history = await ctx.store.get("chat_history")
tools = list(self.frontend_tools.values())
tools.extend(list(self.backend_tools.values()))
resp_gen = await self.llm.astream_chat_with_tools(
tools=tools,
chat_history=chat_history,
allow_parallel_tool_calls=True,
)
resp_id = str(uuid.uuid4())
resp = ChatResponse(message=ChatMessage(role="assistant", content=""))
async for resp in resp_gen:
if resp.delta:
ctx.write_event_to_stream(
TextMessageChunkWorkflowEvent(
role="assistant",
delta=resp.delta,
timestamp=timestamp(),
message_id=resp_id,
)
)
chat_history.append(resp.message)
await ctx.store.set("chat_history", chat_history)
tool_calls = self.llm.get_tool_calls_from_response(
resp, error_on_no_tool_call=False
)
if tool_calls:
await ctx.store.set("num_tool_calls", len(tool_calls))
frontend_tool_calls = [
tool_call
for tool_call in tool_calls
if tool_call.tool_name in self.frontend_tools
]
backend_tool_calls = [
tool_call
for tool_call in tool_calls
if tool_call.tool_name in self.backend_tools
]
# Call backend tools first so that the frontend can return results for frontend tools
for tool_call in backend_tool_calls:
ctx.send_event(
ToolCallEvent(
tool_call_id=tool_call.tool_id,
tool_name=tool_call.tool_name,
tool_kwargs=tool_call.tool_kwargs,
)
)
ctx.write_event_to_stream(
ToolCallChunkWorkflowEvent(
tool_call_id=tool_call.tool_id,
tool_call_name=tool_call.tool_name,
delta=json.dumps(tool_call.tool_kwargs),
)
)
for tool_call in frontend_tool_calls:
ctx.send_event(
ToolCallEvent(
tool_call_id=tool_call.tool_id,
tool_name=tool_call.tool_name,
tool_kwargs=tool_call.tool_kwargs,
)
)
ctx.write_event_to_stream(
ToolCallChunkWorkflowEvent(
tool_call_id=tool_call.tool_id,
tool_call_name=tool_call.tool_name,
delta=json.dumps(tool_call.tool_kwargs),
)
)
# Send MessagesSnapshot AFTER ToolCallChunk events, as a "wrap it up" step
self._snapshot_messages(ctx, [*chat_history])
return None
# No tool calls, send snapshot immediately
self._snapshot_messages(ctx, [*chat_history])
return StopEvent()
@step
async def handle_tool_call(
self, ctx: Context, ev: ToolCallEvent
) -> ToolCallResultEvent:
try:
all_tools = {**self.frontend_tools, **self.backend_tools}
tool = all_tools[ev.tool_name]
kwargs = {**ev.tool_kwargs}
if isinstance(tool, FunctionTool) and tool.ctx_param_name:
kwargs[tool.ctx_param_name] = ctx
tool_output = await tool.acall(**kwargs)
# Update the state snapshot
current_state = await ctx.store.get("state", default={})
ctx.write_event_to_stream(
StateSnapshotWorkflowEvent(snapshot=current_state)
)
return ToolCallResultEvent(
tool_call_id=ev.tool_call_id,
tool_name=ev.tool_name,
tool_kwargs=ev.tool_kwargs,
tool_output=tool_output,
)
except Exception as e:
return ToolCallResultEvent(
tool_call_id=ev.tool_call_id,
tool_name=ev.tool_name,
tool_kwargs=ev.tool_kwargs,
tool_output=ToolOutput(
tool_name=ev.tool_name,
content=str(e),
raw_input=ev.tool_kwargs,
raw_output=str(e),
is_error=True,
),
)
@step
async def aggregate_tool_calls(
self, ctx: Context, ev: ToolCallResultEvent
) -> Optional[Union[StopEvent, LoopEvent]]:
num_tool_calls = await ctx.store.get("num_tool_calls")
tool_call_results: List[ToolCallResultEvent] = ctx.collect_events(
ev, [ToolCallResultEvent] * num_tool_calls
)
if tool_call_results is None:
return None
# organize tool results so that frontend tools are last
# for backend tools, update the messages snapshot with the tool output
frontend_tool_calls = [
tool_result
for tool_result in tool_call_results
if tool_result.tool_name in self.frontend_tools
]
backend_tool_calls = [
tool_result
for tool_result in tool_call_results
if tool_result.tool_name in self.backend_tools
]
new_tool_messages = []
for tool_result in backend_tool_calls:
new_tool_messages.append(
ChatMessage(
role="tool",
content=tool_result.tool_output.content,
additional_kwargs={
"tool_call_id": tool_result.tool_call_id,
},
)
)
# emit a messages snapshot event if there are new messages
chat_history = await ctx.store.get("chat_history")
if new_tool_messages:
chat_history.extend(new_tool_messages)
self._snapshot_messages(ctx, [*chat_history])
await ctx.store.set("chat_history", chat_history)
if len(frontend_tool_calls) > 0:
# Expect frontend tool calls to call back to the agent
return StopEvent()
return LoopEvent(messages=chat_history)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/protocols/llama-index-protocols-ag-ui/llama_index/protocols/ag_ui/agent.py",
"license": "MIT License",
"lines": 283,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/protocols/llama-index-protocols-ag-ui/llama_index/protocols/ag_ui/events.py | # Converts ag_ui events to llama_index workflow events
from ag_ui.core.events import (
EventType,
TextMessageStartEvent,
TextMessageContentEvent,
TextMessageChunkEvent,
TextMessageEndEvent,
ToolCallStartEvent,
ToolCallArgsEvent,
ToolCallChunkEvent,
ToolCallEndEvent,
StateSnapshotEvent,
StateDeltaEvent,
MessagesSnapshotEvent,
RawEvent,
CustomEvent,
RunStartedEvent,
RunFinishedEvent,
RunErrorEvent,
StepStartedEvent,
StepFinishedEvent,
)
from llama_index.core.workflow import Event
class TextMessageStartWorkflowEvent(TextMessageStartEvent, Event):
type: EventType = EventType.TEXT_MESSAGE_START
class TextMessageContentWorkflowEvent(TextMessageContentEvent, Event):
type: EventType = EventType.TEXT_MESSAGE_CONTENT
class TextMessageChunkWorkflowEvent(TextMessageChunkEvent, Event):
type: EventType = EventType.TEXT_MESSAGE_CHUNK
class TextMessageEndWorkflowEvent(TextMessageEndEvent, Event):
type: EventType = EventType.TEXT_MESSAGE_END
class ToolCallStartWorkflowEvent(ToolCallStartEvent, Event):
type: EventType = EventType.TOOL_CALL_START
class ToolCallArgsWorkflowEvent(ToolCallArgsEvent, Event):
type: EventType = EventType.TOOL_CALL_ARGS
class ToolCallChunkWorkflowEvent(ToolCallChunkEvent, Event):
type: EventType = EventType.TOOL_CALL_CHUNK
class ToolCallEndWorkflowEvent(ToolCallEndEvent, Event):
type: EventType = EventType.TOOL_CALL_END
class StateSnapshotWorkflowEvent(StateSnapshotEvent, Event):
type: EventType = EventType.STATE_SNAPSHOT
class StateDeltaWorkflowEvent(StateDeltaEvent, Event):
type: EventType = EventType.STATE_DELTA
class MessagesSnapshotWorkflowEvent(MessagesSnapshotEvent, Event):
type: EventType = EventType.MESSAGES_SNAPSHOT
class RawWorkflowEvent(RawEvent, Event):
type: EventType = EventType.RAW
class CustomWorkflowEvent(CustomEvent, Event):
type: EventType = EventType.CUSTOM
class RunStartedWorkflowEvent(RunStartedEvent, Event):
type: EventType = EventType.RUN_STARTED
class RunFinishedWorkflowEvent(RunFinishedEvent, Event):
type: EventType = EventType.RUN_FINISHED
class RunErrorWorkflowEvent(RunErrorEvent, Event):
type: EventType = EventType.RUN_ERROR
class StepStartedWorkflowEvent(StepStartedEvent, Event):
type: EventType = EventType.STEP_STARTED
class StepFinishedWorkflowEvent(StepFinishedEvent, Event):
type: EventType = EventType.STEP_FINISHED
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/protocols/llama-index-protocols-ag-ui/llama_index/protocols/ag_ui/events.py",
"license": "MIT License",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/protocols/llama-index-protocols-ag-ui/llama_index/protocols/ag_ui/utils.py | import datetime
import re
import uuid
from ag_ui.core import (
Message,
SystemMessage,
UserMessage,
AssistantMessage,
ToolMessage,
DeveloperMessage,
ToolCall,
FunctionCall,
)
from ag_ui.encoder import EventEncoder
from typing import Union, Callable
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import BaseTool, FunctionTool
from llama_index.core.workflow import Event
def llama_index_message_to_ag_ui_message(
message: ChatMessage,
) -> Message:
msg_id = message.additional_kwargs.get("id", str(uuid.uuid4()))
if message.role.value == "system":
return SystemMessage(id=msg_id, content=message.content, role="system")
elif (
message.role.value == "user" and "tool_call_id" not in message.additional_kwargs
):
content = message.content or ""
content = re.sub(r"<state>[\s\S]*?</state>", "", content).strip()
return UserMessage(id=msg_id, content=content, role="user")
elif message.role.value == "assistant":
# Remove tool calls from the message
content = message.content
if content:
content = re.sub(r"<tool_call>[\s\S]*?</tool_call>", "", content).strip()
# Fetch tool calls from the message
if message.additional_kwargs.get("ag_ui_tool_calls", None):
tool_calls = [
ToolCall(
type="function",
id=tool_call["id"],
function=FunctionCall(
name=tool_call["name"],
arguments=tool_call["arguments"],
),
)
for tool_call in message.additional_kwargs["ag_ui_tool_calls"]
]
else:
tool_calls = None
return AssistantMessage(
id=msg_id,
content=content or None,
role="assistant",
tool_calls=tool_calls,
)
elif message.role.value == "tool" or "tool_call_id" in message.additional_kwargs:
return ToolMessage(
id=msg_id,
content=message.content or "",
role="tool",
tool_call_id=message.additional_kwargs.get(
"tool_call_id", str(uuid.uuid4())
),
)
else:
raise ValueError(f"Unknown message role: {message.role}")
def ag_ui_message_to_llama_index_message(message: Message) -> ChatMessage:
if isinstance(message, SystemMessage):
return ChatMessage(
role="system", content=message.content, additional_kwargs={"id": message.id}
)
elif isinstance(message, UserMessage):
return ChatMessage(
role="user", content=message.content, additional_kwargs={"id": message.id}
)
elif isinstance(message, AssistantMessage):
# TODO: llama-index-core needs to support tool calls on messages in a more official way
# For now, we'll just convert the tool call into an assistant message
# This is a bit of an opinionated hack for now to support the ag-ui tool calls
tool_calls = message.tool_calls
content = message.content or ""
if tool_calls:
tool_calls_str = "\n".join(
[
f"<tool_call><name>{tool_call.function.name}</name><arguments>{tool_call.function.arguments}</arguments></tool_call>"
for tool_call in tool_calls
]
)
content = f"{content}\n\n{tool_calls_str}".strip()
ag_ui_tool_calls = [
{
"id": tool_call.id,
"name": tool_call.function.name,
"arguments": tool_call.function.arguments,
}
for tool_call in tool_calls
]
else:
ag_ui_tool_calls = None
return ChatMessage(
role="assistant",
content=content,
additional_kwargs={
"id": message.id,
"ag_ui_tool_calls": ag_ui_tool_calls,
},
)
elif isinstance(message, ToolMessage):
# TODO: llama-index-core needs to support tool calls on messages in a more official way
# tool call results into a user message
# This is a bit of an opinionated hack for now to support the ag-ui tool calls
return ChatMessage(
role="user",
content=message.content,
additional_kwargs={"id": message.id, "tool_call_id": message.tool_call_id},
)
elif isinstance(message, DeveloperMessage):
return ChatMessage(
role="system", content=message.content, additional_kwargs={"id": message.id}
)
else:
raise ValueError(f"Unknown message type: {type(message)}")
def workflow_event_to_sse(event: Event) -> str:
return EventEncoder().encode(event)
def timestamp() -> int:
return int(datetime.datetime.now().timestamp())
def validate_tool(tool: Union[BaseTool, Callable]) -> BaseTool:
if isinstance(tool, BaseTool):
return tool
elif callable(tool):
return FunctionTool.from_defaults(tool)
else:
raise ValueError(f"Invalid tool type: {type(tool)}")
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/protocols/llama-index-protocols-ag-ui/llama_index/protocols/ag_ui/utils.py",
"license": "MIT License",
"lines": 134,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-core/llama_index/core/agent/workflow/prompts.py | DEFAULT_HANDOFF_PROMPT = """Useful for handing off to another agent.
If you are currently not equipped to handle the user's request, or another agent is better suited to handle the request, please hand off to the appropriate agent.
Currently available agents:
{agent_info}
"""
DEFAULT_STATE_PROMPT = """Current state:
{state}
Current message:
{msg}
"""
DEFAULT_HANDOFF_OUTPUT_PROMPT = "Agent {to_agent} is now handling the request due to the following reason: {reason}.\nPlease continue with the current request."
DEFAULT_EARLY_STOPPING_PROMPT = """You have reached the maximum number of iterations ({max_iterations}).
Based on the information gathered so far, please provide a helpful final response to the user's original query.
Do not attempt to use any more tools. Simply summarize what you have learned and provide the best possible answer."""
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/llama_index/core/agent/workflow/prompts.py",
"license": "MIT License",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
run-llama/llama_index:llama-index-instrumentation/src/llama_index_instrumentation/base/event.py | from datetime import datetime
from typing import Any, Dict, Optional
from uuid import uuid4
from pydantic import BaseModel, ConfigDict, Field
from llama_index_instrumentation.span import active_span_id
class BaseEvent(BaseModel):
model_config = ConfigDict(
arbitrary_types_allowed=True,
# copy_on_model_validation = "deep" # not supported in Pydantic V2...
)
timestamp: datetime = Field(default_factory=lambda: datetime.now())
id_: str = Field(default_factory=lambda: str(uuid4()))
span_id: Optional[str] = Field(default_factory=active_span_id.get) # type: ignore
tags: Dict[str, Any] = Field(default={})
@classmethod
def class_name(cls) -> str:
"""Return class name."""
return "BaseEvent"
def dict(self, **kwargs: Any) -> Dict[str, Any]:
"""Keep for backwards compatibility."""
return self.model_dump(**kwargs)
def model_dump(self, **kwargs: Any) -> Dict[str, Any]:
data = super().model_dump(**kwargs)
data["class_name"] = self.class_name()
return data
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-instrumentation/src/llama_index_instrumentation/base/event.py",
"license": "MIT License",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-instrumentation/src/llama_index_instrumentation/base/handler.py | from abc import ABC, abstractmethod
class BaseInstrumentationHandler(ABC):
@classmethod
@abstractmethod
def init(cls) -> None:
"""Initialize the instrumentation handler."""
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-instrumentation/src/llama_index_instrumentation/base/handler.py",
"license": "MIT License",
"lines": 6,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-instrumentation/src/llama_index_instrumentation/dispatcher.py | import asyncio
import inspect
import logging
import uuid
from contextlib import contextmanager
from contextvars import Context, ContextVar, Token, copy_context
from functools import partial
from typing import Any, Callable, Dict, Generator, List, Optional, Protocol, TypeVar
import wrapt
from deprecated import deprecated
from pydantic import BaseModel, ConfigDict, Field
from .base import BaseEvent
from .event_handlers import BaseEventHandler
from .events.span import SpanDropEvent
from .span import active_span_id
from .span_handlers import (
BaseSpanHandler,
NullSpanHandler,
)
DISPATCHER_SPAN_DECORATED_ATTR = "__dispatcher_span_decorated__"
_logger = logging.getLogger(__name__)
# ContextVar for managing active instrument tags
active_instrument_tags: ContextVar[Dict[str, Any]] = ContextVar(
"instrument_tags", default={}
)
_R = TypeVar("_R")
@contextmanager
def instrument_tags(new_tags: Dict[str, Any]) -> Generator[None, None, None]:
token = active_instrument_tags.set(new_tags)
try:
yield
finally:
active_instrument_tags.reset(token)
# Keep for backwards compatibility
class EventDispatcher(Protocol):
def __call__(self, event: BaseEvent, **kwargs: Any) -> None: ...
class Dispatcher(BaseModel):
"""
Dispatcher class.
Responsible for dispatching BaseEvent (and its subclasses) as well as
sending signals to enter/exit/drop a BaseSpan. It does so by sending
event and span signals to its attached BaseEventHandler as well as
BaseSpanHandler.
Concurrency:
- Dispatcher is async-task and thread safe in the sense that
spans of async coros will maintain its hieararchy or trace-trees and
spans which emanate from various threads will also maintain its
hierarchy.
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
name: str = Field(default_factory=str, description="Name of dispatcher")
event_handlers: List[BaseEventHandler] = Field(
default=[], description="List of attached handlers"
)
span_handlers: List[BaseSpanHandler] = Field(
default=[NullSpanHandler()], description="Span handler."
)
parent_name: str = Field(
default_factory=str, description="Name of parent Dispatcher."
)
manager: Optional["Manager"] = Field(
default=None, description="Dispatcher manager."
)
root_name: str = Field(default="root", description="Root of the Dispatcher tree.")
propagate: bool = Field(
default=True,
description="Whether to propagate the event to parent dispatchers and their handlers",
)
current_span_ids: Optional[Dict[Any, str]] = Field(
default_factory=dict, # type: ignore
description="Id of current enclosing span. Used for creating `dispatch_event` partials.",
)
def __init__(
self,
name: str = "",
event_handlers: List[BaseEventHandler] = [],
span_handlers: List[BaseSpanHandler] = [],
parent_name: str = "",
manager: Optional["Manager"] = None,
root_name: str = "root",
propagate: bool = True,
):
super().__init__(
name=name,
event_handlers=event_handlers,
span_handlers=span_handlers,
parent_name=parent_name,
manager=manager,
root_name=root_name,
propagate=propagate,
)
@property
def parent(self) -> "Dispatcher":
assert self.manager is not None
return self.manager.dispatchers[self.parent_name]
@property
def root(self) -> "Dispatcher":
assert self.manager is not None
return self.manager.dispatchers[self.root_name]
def add_event_handler(self, handler: BaseEventHandler) -> None:
"""Add handler to set of handlers."""
self.event_handlers += [handler]
def add_span_handler(self, handler: BaseSpanHandler) -> None:
"""Add handler to set of handlers."""
self.span_handlers += [handler]
def event(self, event: BaseEvent, **kwargs: Any) -> None:
"""Dispatch event to all registered handlers."""
c: Optional[Dispatcher] = self
# Attach tags from the active context
event.tags.update(active_instrument_tags.get())
while c:
for h in c.event_handlers:
try:
h.handle(event, **kwargs)
except BaseException:
pass
if not c.propagate:
c = None
else:
c = c.parent
async def aevent(self, event: BaseEvent, **kwargs: Any) -> None:
"""Asynchronously dispatch event to all registered handlers."""
c: Optional[Dispatcher] = self
event.tags.update(active_instrument_tags.get())
tasks: List[asyncio.Task] = []
while c:
for h in c.event_handlers:
try:
tasks.append(asyncio.create_task(h.ahandle(event, **kwargs)))
except BaseException:
pass
if not c.propagate:
c = None
else:
c = c.parent
if tasks:
await asyncio.gather(*tasks, return_exceptions=True)
@deprecated(
version="0.10.41",
reason=(
"`get_dispatch_event()` has been deprecated in favor of using `event()` directly."
" If running into this warning through an integration package, then please "
"update your integration to the latest version."
),
)
def get_dispatch_event(self) -> EventDispatcher:
"""
Keep for backwards compatibility.
In llama-index-core v0.10.41, we removed this method and made changes to
integrations or packs that relied on this method. Adding back this method
in case any integrations or apps have not been upgraded. That is, they
still rely on this method.
"""
return self.event
def span_enter(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
parent_id: Optional[str] = None,
tags: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> None:
"""Send notice to handlers that a span with id_ has started."""
c: Optional[Dispatcher] = self
while c:
for h in c.span_handlers:
try:
h.span_enter(
id_=id_,
bound_args=bound_args,
instance=instance,
parent_id=parent_id,
tags=tags,
**kwargs,
)
except BaseException:
pass
if not c.propagate:
c = None
else:
c = c.parent
def span_drop(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
err: Optional[BaseException] = None,
**kwargs: Any,
) -> None:
"""Send notice to handlers that a span with id_ is being dropped."""
c: Optional[Dispatcher] = self
while c:
for h in c.span_handlers:
try:
h.span_drop(
id_=id_,
bound_args=bound_args,
instance=instance,
err=err,
**kwargs,
)
except BaseException:
pass
if not c.propagate:
c = None
else:
c = c.parent
def span_exit(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
result: Optional[Any] = None,
**kwargs: Any,
) -> None:
"""Send notice to handlers that a span with id_ is exiting."""
c: Optional[Dispatcher] = self
while c:
for h in c.span_handlers:
try:
h.span_exit(
id_=id_,
bound_args=bound_args,
instance=instance,
result=result,
**kwargs,
)
except BaseException:
pass
if not c.propagate:
c = None
else:
c = c.parent
def span(self, func: Callable[..., _R]) -> Callable[..., _R]:
# The `span` decorator should be idempotent.
try:
if hasattr(func, DISPATCHER_SPAN_DECORATED_ATTR):
return func
setattr(func, DISPATCHER_SPAN_DECORATED_ATTR, True)
except AttributeError:
# instance methods can fail with:
# AttributeError: 'method' object has no attribute '__dispatcher_span_decorated__'
pass
@wrapt.decorator
def wrapper(func: Callable, instance: Any, args: list, kwargs: dict) -> Any:
bound_args = inspect.signature(func).bind(*args, **kwargs)
if instance is not None:
actual_class = type(instance).__name__
method_name = func.__name__
id_ = f"{actual_class}.{method_name}-{uuid.uuid4()}"
else:
id_ = f"{func.__qualname__}-{uuid.uuid4()}"
tags = active_instrument_tags.get()
result = None
# Copy the current context
context = copy_context()
token = active_span_id.set(id_)
parent_id = None if token.old_value is Token.MISSING else token.old_value
self.span_enter(
id_=id_,
bound_args=bound_args,
instance=instance,
parent_id=parent_id,
tags=tags,
)
def handle_future_result(
future: asyncio.Future,
span_id: str,
bound_args: inspect.BoundArguments,
instance: Any,
context: Context,
) -> None:
try:
result = None if future.exception() else future.result()
self.span_exit(
id_=span_id,
bound_args=bound_args,
instance=instance,
result=result,
)
return result
except BaseException as e:
self.event(SpanDropEvent(span_id=span_id, err_str=str(e)))
self.span_drop(
id_=span_id, bound_args=bound_args, instance=instance, err=e
)
raise
finally:
try:
context.run(active_span_id.reset, token)
except ValueError as e:
# TODO: Since the context is created in a sync context no in async task,
# detaching the token raises an ValueError saying "token was created
# in a different Context. We should figure out how to handle active spans
# correctly, but for now just suppressing the error so it won't be
# surfaced to the user.
_logger.debug(f"Failed to reset active_span_id: {e}")
try:
result = func(*args, **kwargs)
if isinstance(result, asyncio.Future):
# If the result is a Future, wrap it
new_future = asyncio.ensure_future(result)
new_future.add_done_callback(
partial(
handle_future_result,
span_id=id_,
bound_args=bound_args,
instance=instance,
context=context,
)
)
return new_future
else:
# For non-Future results, proceed as before
self.span_exit(
id_=id_, bound_args=bound_args, instance=instance, result=result
)
return result
except BaseException as e:
self.event(SpanDropEvent(span_id=id_, err_str=str(e)))
self.span_drop(id_=id_, bound_args=bound_args, instance=instance, err=e)
raise
finally:
if not isinstance(result, asyncio.Future):
active_span_id.reset(token)
@wrapt.decorator
async def async_wrapper(
func: Callable, instance: Any, args: list, kwargs: dict
) -> Any:
bound_args = inspect.signature(func).bind(*args, **kwargs)
if instance is not None:
actual_class = type(instance).__name__
method_name = func.__name__
id_ = f"{actual_class}.{method_name}-{uuid.uuid4()}"
else:
id_ = f"{func.__qualname__}-{uuid.uuid4()}"
tags = active_instrument_tags.get()
token = active_span_id.set(id_)
parent_id = None if token.old_value is Token.MISSING else token.old_value
self.span_enter(
id_=id_,
bound_args=bound_args,
instance=instance,
parent_id=parent_id,
tags=tags,
)
try:
result = await func(*args, **kwargs)
except BaseException as e:
self.event(SpanDropEvent(span_id=id_, err_str=str(e)))
self.span_drop(id_=id_, bound_args=bound_args, instance=instance, err=e)
raise
else:
self.span_exit(
id_=id_, bound_args=bound_args, instance=instance, result=result
)
return result
finally:
# clean up
active_span_id.reset(token)
if inspect.iscoroutinefunction(func):
return async_wrapper(func) # type: ignore
else:
return wrapper(func) # type: ignore
@property
def log_name(self) -> str:
"""Name to be used in logging."""
if self.parent:
return f"{self.parent.name}.{self.name}"
else:
return self.name
class Manager:
def __init__(self, root: Dispatcher) -> None:
self.dispatchers: Dict[str, Dispatcher] = {root.name: root}
def add_dispatcher(self, d: Dispatcher) -> None:
if d.name in self.dispatchers:
pass
else:
self.dispatchers[d.name] = d
Dispatcher.model_rebuild()
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-instrumentation/src/llama_index_instrumentation/dispatcher.py",
"license": "MIT License",
"lines": 381,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-instrumentation/src/llama_index_instrumentation/event_handlers/base.py | from abc import abstractmethod
from typing import Any
from pydantic import BaseModel, ConfigDict
from llama_index_instrumentation.base import BaseEvent
class BaseEventHandler(BaseModel):
"""Base callback handler that can be used to track event starts and ends."""
model_config = ConfigDict(arbitrary_types_allowed=True)
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "BaseEventHandler"
@abstractmethod
def handle(self, event: BaseEvent, **kwargs: Any) -> Any:
"""Logic for handling event."""
async def ahandle(self, event: BaseEvent, **kwargs: Any) -> Any:
return self.handle(event, **kwargs)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-instrumentation/src/llama_index_instrumentation/event_handlers/base.py",
"license": "MIT License",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-instrumentation/src/llama_index_instrumentation/event_handlers/null.py | from typing import Any
from llama_index_instrumentation.base import BaseEvent
from .base import BaseEventHandler
class NullEventHandler(BaseEventHandler):
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "NullEventHandler"
def handle(self, event: BaseEvent, **kwargs: Any) -> Any:
"""Handle logic - null handler does nothing."""
return
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-instrumentation/src/llama_index_instrumentation/event_handlers/null.py",
"license": "MIT License",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-instrumentation/src/llama_index_instrumentation/events/span.py | from llama_index_instrumentation.base import BaseEvent
class SpanDropEvent(BaseEvent):
"""
SpanDropEvent.
Args:
err_str (str): Error string.
"""
err_str: str
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "SpanDropEvent"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-instrumentation/src/llama_index_instrumentation/events/span.py",
"license": "MIT License",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
run-llama/llama_index:llama-index-instrumentation/src/llama_index_instrumentation/span/base.py | from typing import Any, Dict, Optional
from uuid import uuid4
from pydantic import BaseModel, ConfigDict, Field
class BaseSpan(BaseModel):
"""Base data class representing a span."""
model_config = ConfigDict(arbitrary_types_allowed=True)
id_: str = Field(default_factory=lambda: str(uuid4()), description="Id of span.")
parent_id: Optional[str] = Field(default=None, description="Id of parent span.")
tags: Dict[str, Any] = Field(default={})
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-instrumentation/src/llama_index_instrumentation/span/base.py",
"license": "MIT License",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-instrumentation/src/llama_index_instrumentation/span/simple.py | from datetime import datetime
from typing import Dict, Optional
from pydantic import Field
from .base import BaseSpan
class SimpleSpan(BaseSpan):
"""Simple span class."""
start_time: datetime = Field(default_factory=lambda: datetime.now())
end_time: Optional[datetime] = Field(default=None)
duration: float = Field(default=0.0, description="Duration of span in seconds.")
metadata: Optional[Dict] = Field(default=None)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-instrumentation/src/llama_index_instrumentation/span/simple.py",
"license": "MIT License",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-instrumentation/src/llama_index_instrumentation/span_handlers/base.py | import inspect
import threading
from abc import abstractmethod
from contextvars import copy_context
from functools import partial
from typing import Any, Callable, Dict, Generic, List, Optional, Tuple, TypeVar
from pydantic import BaseModel, ConfigDict, Field, PrivateAttr
from llama_index_instrumentation.span.base import BaseSpan
T = TypeVar("T", bound=BaseSpan)
class Thread(threading.Thread):
"""
A wrapper for threading.Thread that copies the current context and uses the copy to run the target.
"""
def __init__(
self,
group: Optional[Any] = None,
target: Optional[Callable[..., Any]] = None,
name: Optional[str] = None,
args: Tuple[Any, ...] = (),
kwargs: Optional[Dict[str, Any]] = None,
*,
daemon: Optional[bool] = None,
) -> None:
if target is not None:
args = (
partial(target, *args, **(kwargs if isinstance(kwargs, dict) else {})),
)
else:
args = ()
super().__init__(
group=group,
target=copy_context().run,
name=name,
args=args,
daemon=daemon,
)
class BaseSpanHandler(BaseModel, Generic[T]):
model_config = ConfigDict(arbitrary_types_allowed=True)
open_spans: Dict[str, T] = Field(
default_factory=dict, description="Dictionary of open spans."
)
completed_spans: List[T] = Field(
default_factory=list, description="List of completed spans."
)
dropped_spans: List[T] = Field(
default_factory=list, description="List of completed spans."
)
current_span_ids: Dict[Any, Optional[str]] = Field(
default={}, description="Id of current spans in a given thread."
)
_lock: Optional[threading.Lock] = PrivateAttr()
def __init__(
self,
open_spans: Dict[str, T] = {},
completed_spans: List[T] = [],
dropped_spans: List[T] = [],
current_span_ids: Dict[Any, str] = {},
):
super().__init__(
open_spans=open_spans,
completed_spans=completed_spans,
dropped_spans=dropped_spans,
current_span_ids=current_span_ids,
)
self._lock = None
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "BaseSpanHandler"
@property
def lock(self) -> threading.Lock:
if self._lock is None:
self._lock = threading.Lock()
return self._lock
def span_enter(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
parent_id: Optional[str] = None,
tags: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> None:
"""Logic for entering a span."""
if id_ in self.open_spans:
pass # should probably raise an error here
else:
span = self.new_span(
id_=id_,
bound_args=bound_args,
instance=instance,
parent_span_id=parent_id,
tags=tags,
)
if span:
with self.lock:
self.open_spans[id_] = span
def span_exit(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
result: Optional[Any] = None,
**kwargs: Any,
) -> None:
"""Logic for exiting a span."""
span = self.prepare_to_exit_span(
id_=id_, bound_args=bound_args, instance=instance, result=result
)
if span:
with self.lock:
del self.open_spans[id_]
def span_drop(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
err: Optional[BaseException] = None,
**kwargs: Any,
) -> None:
"""Logic for dropping a span i.e. early exit."""
span = self.prepare_to_drop_span(
id_=id_, bound_args=bound_args, instance=instance, err=err
)
if span:
with self.lock:
del self.open_spans[id_]
@abstractmethod
def new_span(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
parent_span_id: Optional[str] = None,
tags: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> Optional[T]:
"""
Create a span.
Subclasses of BaseSpanHandler should create the respective span type T
and return it. Only NullSpanHandler should return a None here.
"""
...
@abstractmethod
def prepare_to_exit_span(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
result: Optional[Any] = None,
**kwargs: Any,
) -> Optional[T]:
"""
Logic for preparing to exit a span.
Subclasses of BaseSpanHandler should return back the specific span T
that is to be exited. If None is returned, then the span won't actually
be exited.
"""
...
@abstractmethod
def prepare_to_drop_span(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
err: Optional[BaseException] = None,
**kwargs: Any,
) -> Optional[T]:
"""
Logic for preparing to drop a span.
Subclasses of BaseSpanHandler should return back the specific span T
that is to be dropped. If None is returned, then the span won't actually
be dropped.
"""
...
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-instrumentation/src/llama_index_instrumentation/span_handlers/base.py",
"license": "MIT License",
"lines": 175,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-instrumentation/src/llama_index_instrumentation/span_handlers/null.py | import inspect
from typing import Any, Dict, Optional
from llama_index_instrumentation.span.base import BaseSpan
from .base import BaseSpanHandler
class NullSpanHandler(BaseSpanHandler[BaseSpan]):
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "NullSpanHandler"
def span_enter(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
parent_id: Optional[str] = None,
tags: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> None:
"""Logic for entering a span."""
return
def span_exit(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
result: Optional[Any] = None,
**kwargs: Any,
) -> None:
"""Logic for exiting a span."""
return
def new_span(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
parent_span_id: Optional[str] = None,
tags: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> None:
"""Create a span."""
return
def prepare_to_exit_span(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
result: Optional[Any] = None,
**kwargs: Any,
) -> None:
"""Logic for exiting a span."""
return
def prepare_to_drop_span(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
err: Optional[BaseException] = None,
**kwargs: Any,
) -> None:
"""Logic for droppping a span."""
return
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-instrumentation/src/llama_index_instrumentation/span_handlers/null.py",
"license": "MIT License",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-instrumentation/src/llama_index_instrumentation/span_handlers/simple.py | import inspect
import warnings
from datetime import datetime
from functools import reduce
from typing import TYPE_CHECKING, Any, Dict, List, Optional, cast
from llama_index_instrumentation.span.simple import SimpleSpan
from .base import BaseSpanHandler
if TYPE_CHECKING:
from treelib import Tree
class SimpleSpanHandler(BaseSpanHandler[SimpleSpan]):
"""Span Handler that manages SimpleSpan's."""
def class_name(cls) -> str:
"""Class name."""
return "SimpleSpanHandler"
def new_span(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
parent_span_id: Optional[str] = None,
tags: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> SimpleSpan:
"""Create a span."""
return SimpleSpan(id_=id_, parent_id=parent_span_id, tags=tags or {})
def prepare_to_exit_span(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
result: Optional[Any] = None,
**kwargs: Any,
) -> SimpleSpan:
"""Logic for preparing to drop a span."""
span = self.open_spans[id_]
span = cast(SimpleSpan, span)
span.end_time = datetime.now()
span.duration = (span.end_time - span.start_time).total_seconds()
with self.lock:
self.completed_spans += [span]
return span
def prepare_to_drop_span(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
err: Optional[BaseException] = None,
**kwargs: Any,
) -> Optional[SimpleSpan]:
"""Logic for droppping a span."""
if id_ in self.open_spans:
with self.lock:
span = self.open_spans[id_]
span.metadata = {"error": str(err)}
self.dropped_spans += [span]
return span
return None
def _get_parents(self) -> List[SimpleSpan]:
"""Helper method to get all parent/root spans."""
all_spans = self.completed_spans + self.dropped_spans
return [s for s in all_spans if s.parent_id is None]
def _build_tree_by_parent(
self, parent: SimpleSpan, acc: List[SimpleSpan], spans: List[SimpleSpan]
) -> List[SimpleSpan]:
"""Builds the tree by parent root."""
if not spans:
return acc
children = [s for s in spans if s.parent_id == parent.id_]
if not children:
return acc
updated_spans = [s for s in spans if s not in children]
children_trees = [
self._build_tree_by_parent(
parent=c, acc=[c], spans=[s for s in updated_spans if c != s]
)
for c in children
]
return acc + reduce(lambda x, y: x + y, children_trees)
def _get_trace_trees(self) -> List["Tree"]:
"""Method for getting trace trees."""
try:
from treelib import Tree
except ImportError as e:
raise ImportError(
"`treelib` package is missing. Please install it by using "
"`pip install treelib`."
)
all_spans = self.completed_spans + self.dropped_spans
for s in all_spans:
if s.parent_id is None:
continue
if not any(ns.id_ == s.parent_id for ns in all_spans):
warnings.warn(f"Parent with id {s.parent_id} missing from spans")
s.parent_id += "-MISSING"
all_spans.append(SimpleSpan(id_=s.parent_id, parent_id=None))
parents = self._get_parents()
span_groups = []
for p in parents:
this_span_group = self._build_tree_by_parent(
parent=p, acc=[p], spans=[s for s in all_spans if s != p]
)
sorted_span_group = sorted(this_span_group, key=lambda x: x.start_time)
span_groups.append(sorted_span_group)
trees = []
tree = Tree()
for grp in span_groups:
for span in grp:
if span.parent_id is None:
# complete old tree unless its empty (i.e., start of loop)
if tree.all_nodes():
trees.append(tree)
# start new tree
tree = Tree()
tree.create_node(
tag=f"{span.id_} ({span.duration})",
identifier=span.id_,
parent=span.parent_id,
data=span.start_time,
)
trees.append(tree)
return trees
def print_trace_trees(self) -> None:
"""Method for viewing trace trees."""
trees = self._get_trace_trees()
for tree in trees:
print(tree.show(stdout=False, sorting=True, key=lambda node: node.data))
print("")
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-instrumentation/src/llama_index_instrumentation/span_handlers/simple.py",
"license": "MIT License",
"lines": 127,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-instrumentation/tests/test_dispatcher.py | import asyncio
import inspect
import threading
import time
from abc import abstractmethod
from asyncio import (
AbstractEventLoop,
CancelledError,
Queue,
gather,
get_event_loop,
sleep,
)
from collections import Counter
from concurrent.futures import ThreadPoolExecutor
from random import random
from threading import Lock, Thread
from typing import Any, Callable, Dict, List, Optional
from unittest.mock import MagicMock, patch
import llama_index_instrumentation as instrument
import pytest
import wrapt
from llama_index_instrumentation import DispatcherSpanMixin
from llama_index_instrumentation.base import BaseEvent
from llama_index_instrumentation.dispatcher import Dispatcher, Manager, instrument_tags
from llama_index_instrumentation.event_handlers import BaseEventHandler
from llama_index_instrumentation.span import BaseSpan
from llama_index_instrumentation.span_handlers import BaseSpanHandler
from llama_index_instrumentation.span_handlers.base import Thread
dispatcher = instrument.get_dispatcher("test")
value_error = ValueError("value error")
cancelled_error = CancelledError("cancelled error")
class _TestStartEvent(BaseEvent):
@classmethod
def class_name(cls):
return "_TestStartEvent"
class _TestEndEvent(BaseEvent):
@classmethod
def class_name(cls):
return "_TestEndEvent"
class _TestEventHandler(BaseEventHandler):
events: List[BaseEvent] = []
@classmethod
def class_name(cls):
return "_TestEventHandler"
def handle(self, e: BaseEvent): # type:ignore
self.events.append(e)
class _TestAsyncEventHandler(BaseEventHandler):
events: List[BaseEvent] = []
async_calls: int = 0
@classmethod
def class_name(cls):
return "_TestAsyncEventHandler"
def handle(self, e: BaseEvent): # type:ignore
self.events.append(e)
async def ahandle(self, e: BaseEvent, **kwargs: Any) -> Any:
self.async_calls += 1
await asyncio.sleep(0.01) # Simulate async work
self.events.append(e)
return None
@dispatcher.span
def func(a, b=3, **kwargs):
return a + b
@dispatcher.span
async def async_func(a, b=3, **kwargs):
return a + b
@dispatcher.span
def func_exc(a, b=3, c=4, **kwargs):
raise value_error
@dispatcher.span
async def async_func_exc(a, b=3, c=4, **kwargs):
raise cancelled_error
@dispatcher.span
def func_with_event(a, b=3, **kwargs):
dispatcher.event(_TestStartEvent())
@dispatcher.span
async def async_func_with_event(a, b=3, **kwargs):
dispatcher.event(_TestStartEvent())
await asyncio.sleep(0.1)
dispatcher.event(_TestEndEvent())
# Can remove this test once dispatcher.get_dispatch_event is safely dopped.
@dispatcher.span
def func_with_event_backwards_compat(a, b=3, **kwargs):
dispatch_event = dispatcher.get_dispatch_event()
dispatch_event(_TestStartEvent())
class _TestObject:
@dispatcher.span
def func(self, a, b=3, **kwargs):
return a + b
@dispatcher.span
async def async_func(self, a, b=3, **kwargs):
return a + b
@dispatcher.span
def func_exc(self, a, b=3, c=4, **kwargs):
raise value_error
@dispatcher.span
async def async_func_exc(self, a, b=3, c=4, **kwargs):
raise cancelled_error
@dispatcher.span
def func_with_event(self, a, b=3, **kwargs):
dispatcher.event(_TestStartEvent())
@dispatcher.span
async def async_func_with_event(self, a, b=3, **kwargs):
dispatcher.event(_TestStartEvent())
await asyncio.sleep(0.1)
await self.async_func(1) # this should create a new span_id
# that is fine because we have dispatch_event
dispatcher.event(_TestEndEvent())
# Can remove this test once dispatcher.get_dispatch_event is safely dopped.
@dispatcher.span
def func_with_event_backwards_compat(self, a, b=3, **kwargs):
dispatch_event = dispatcher.get_dispatch_event()
dispatch_event(_TestStartEvent())
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_enter")
@patch("llama_index_instrumentation.dispatcher.uuid")
def test_dispatcher_span_args(mock_uuid, mock_span_enter, mock_span_exit):
# arrange
mock_uuid.uuid4.return_value = "mock"
# act
result = func(3, c=5)
# assert
# span_enter
span_id = f"{func.__qualname__}-mock"
bound_args = inspect.signature(func).bind(3, c=5)
mock_span_enter.assert_called_once()
args, kwargs = mock_span_enter.call_args
assert args == ()
assert kwargs == {
"id_": span_id,
"bound_args": bound_args,
"instance": None,
"parent_id": None,
"tags": {},
}
# span_exit
args, kwargs = mock_span_exit.call_args
assert args == ()
assert kwargs == {
"id_": span_id,
"bound_args": bound_args,
"instance": None,
"result": result,
}
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_enter")
@patch("llama_index_instrumentation.dispatcher.uuid")
def test_dispatcher_span_args_with_instance(mock_uuid, mock_span_enter, mock_span_exit):
# arrange
mock_uuid.uuid4.return_value = "mock"
# act
instance = _TestObject()
result = instance.func(3, c=5)
# assert
# span_enter
span_id = f"{instance.func.__qualname__}-mock"
bound_args = inspect.signature(instance.func).bind(3, c=5)
mock_span_enter.assert_called_once()
args, kwargs = mock_span_enter.call_args
assert args == ()
assert kwargs == {
"id_": span_id,
"bound_args": bound_args,
"instance": instance,
"parent_id": None,
"tags": {},
}
# span_exit
args, kwargs = mock_span_exit.call_args
assert args == ()
assert kwargs == {
"id_": span_id,
"bound_args": bound_args,
"instance": instance,
"result": result,
}
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_drop")
@patch.object(Dispatcher, "span_enter")
@patch("llama_index_instrumentation.dispatcher.uuid")
def test_dispatcher_span_drop_args(
mock_uuid: MagicMock,
mock_span_enter: MagicMock,
mock_span_drop: MagicMock,
mock_span_exit: MagicMock,
):
# arrange
mock_uuid.uuid4.return_value = "mock"
instance = _TestObject()
with pytest.raises(ValueError):
_ = instance.func_exc(a=3, b=5, c=2, d=5)
# assert
# span_enter
mock_span_enter.assert_called_once()
# span_drop
mock_span_drop.assert_called_once()
span_id = f"{instance.func_exc.__qualname__}-mock"
bound_args = inspect.signature(instance.func_exc).bind(a=3, b=5, c=2, d=5)
args, kwargs = mock_span_drop.call_args
assert args == ()
assert kwargs == {
"id_": span_id,
"bound_args": bound_args,
"instance": instance,
"err": value_error,
}
# span_exit
mock_span_exit.assert_not_called()
@pytest.mark.asyncio
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_enter")
@patch("llama_index_instrumentation.dispatcher.uuid")
async def test_dispatcher_async_span_args(mock_uuid, mock_span_enter, mock_span_exit):
# arrange
mock_uuid.uuid4.return_value = "mock"
# act
result = await async_func(a=3, c=5)
# assert
# span_enter
span_id = f"{async_func.__qualname__}-mock"
bound_args = inspect.signature(async_func).bind(a=3, c=5)
mock_span_enter.assert_called_once()
args, kwargs = mock_span_enter.call_args
assert args == ()
assert kwargs == {
"id_": span_id,
"bound_args": bound_args,
"instance": None,
"parent_id": None,
"tags": {},
}
# span_exit
args, kwargs = mock_span_exit.call_args
assert args == ()
assert kwargs == {
"id_": span_id,
"bound_args": bound_args,
"instance": None,
"result": result,
}
@pytest.mark.asyncio
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_enter")
@patch("llama_index_instrumentation.dispatcher.uuid")
async def test_dispatcher_async_span_args_with_instance(
mock_uuid, mock_span_enter, mock_span_exit
):
# arrange
mock_uuid.uuid4.return_value = "mock"
# act
instance = _TestObject()
result = await instance.async_func(a=3, c=5)
# assert
# span_enter
span_id = f"{instance.async_func.__qualname__}-mock"
bound_args = inspect.signature(instance.async_func).bind(a=3, c=5)
mock_span_enter.assert_called_once()
args, kwargs = mock_span_enter.call_args
assert args == ()
assert kwargs == {
"id_": span_id,
"bound_args": bound_args,
"instance": instance,
"parent_id": None,
"tags": {},
}
# span_exit
args, kwargs = mock_span_exit.call_args
assert args == ()
assert kwargs == {
"id_": span_id,
"bound_args": bound_args,
"instance": instance,
"result": result,
}
@pytest.mark.asyncio
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_drop")
@patch.object(Dispatcher, "span_enter")
@patch("llama_index_instrumentation.dispatcher.uuid")
async def test_dispatcher_async_span_drop_args(
mock_uuid: MagicMock,
mock_span_enter: MagicMock,
mock_span_drop: MagicMock,
mock_span_exit: MagicMock,
):
# arrange
mock_uuid.uuid4.return_value = "mock"
with pytest.raises(CancelledError):
# act
_ = await async_func_exc(a=3, b=5, c=2, d=5)
# assert
# span_enter
mock_span_enter.assert_called_once()
# span_drop
mock_span_drop.assert_called_once()
span_id = f"{async_func_exc.__qualname__}-mock"
bound_args = inspect.signature(async_func_exc).bind(a=3, b=5, c=2, d=5)
args, kwargs = mock_span_drop.call_args
assert args == ()
assert kwargs == {
"id_": span_id,
"bound_args": bound_args,
"instance": None,
"err": cancelled_error,
}
# span_exit
mock_span_exit.assert_not_called()
@pytest.mark.asyncio
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_drop")
@patch.object(Dispatcher, "span_enter")
@patch("llama_index_instrumentation.dispatcher.uuid")
async def test_dispatcher_async_span_drop_args_with_instance(
mock_uuid: MagicMock,
mock_span_enter: MagicMock,
mock_span_drop: MagicMock,
mock_span_exit: MagicMock,
):
# arrange
mock_uuid.uuid4.return_value = "mock"
instance = _TestObject()
with pytest.raises(CancelledError):
_ = await instance.async_func_exc(a=3, b=5, c=2, d=5)
# assert
# span_enter
mock_span_enter.assert_called_once()
# span_drop
mock_span_drop.assert_called_once()
span_id = f"{instance.async_func_exc.__qualname__}-mock"
bound_args = inspect.signature(instance.async_func_exc).bind(a=3, b=5, c=2, d=5)
args, kwargs = mock_span_drop.call_args
assert args == ()
assert kwargs == {
"id_": span_id,
"bound_args": bound_args,
"instance": instance,
"err": cancelled_error,
}
# span_exit
mock_span_exit.assert_not_called()
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_drop")
@patch.object(Dispatcher, "span_enter")
@patch("llama_index_instrumentation.dispatcher.uuid")
def test_dispatcher_fire_event(
mock_uuid: MagicMock,
mock_span_enter: MagicMock,
mock_span_drop: MagicMock,
mock_span_exit: MagicMock,
):
# arrange
mock_uuid.uuid4.return_value = "mock"
event_handler = _TestEventHandler()
dispatcher.add_event_handler(event_handler)
# act
_ = func_with_event(3, c=5)
# assert
span_id = f"{func_with_event.__qualname__}-mock"
assert all(e.span_id == span_id for e in event_handler.events)
# span_enter
mock_span_enter.assert_called_once()
# span
mock_span_drop.assert_not_called()
# span_exit
mock_span_exit.assert_called_once()
@pytest.mark.asyncio
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_drop")
@patch.object(Dispatcher, "span_enter")
async def test_dispatcher_async_fire_event(
mock_span_enter: MagicMock,
mock_span_drop: MagicMock,
mock_span_exit: MagicMock,
):
# arrange
event_handler = _TestEventHandler()
dispatcher.add_event_handler(event_handler)
# act
tasks = [
async_func_with_event(a=3, c=5),
async_func_with_event(5),
async_func_with_event(4),
]
_ = await asyncio.gather(*tasks)
# assert
span_ids = [e.span_id for e in event_handler.events]
id_counts = Counter(span_ids)
assert set(id_counts.values()) == {2}
# span_enter
assert mock_span_enter.call_count == 3
# span
mock_span_drop.assert_not_called()
# span_exit
assert mock_span_exit.call_count == 3
@pytest.mark.asyncio
@pytest.mark.parametrize("use_async", [True, False])
@patch.object(Dispatcher, "span_enter")
async def test_dispatcher_attaches_tags_to_events_and_spans(
mock_span_enter: MagicMock,
use_async: bool,
):
event_handler = _TestEventHandler()
dispatcher.add_event_handler(event_handler)
test_tags = {"test_tag_key": "test_tag_value"}
# Check that tags are set when using context manager
with instrument_tags(test_tags):
if use_async:
await async_func_with_event(a=3, c=5)
else:
func_with_event(a=3, c=5)
mock_span_enter.assert_called_once()
assert mock_span_enter.call_args[1]["tags"] == test_tags
assert all(e.tags == test_tags for e in event_handler.events)
@patch.object(Dispatcher, "span_enter")
def test_dispatcher_attaches_tags_to_concurrent_events(
mock_span_enter: MagicMock,
):
event_handler = _TestEventHandler()
dispatcher.add_event_handler(event_handler)
num_functions = 5
test_tags = [{"test_tag_key": num} for num in range(num_functions)]
test_tags_set = {str(tag) for tag in test_tags}
def run_func_with_tags(tag):
with instrument_tags(tag):
func_with_event(3, c=5)
# Run functions concurrently
futures = []
with ThreadPoolExecutor(max_workers=2) as executor:
for tag in test_tags:
futures.append(executor.submit(run_func_with_tags, tag))
for future in futures:
future.result()
# Ensure that each function recorded a span and event with the tags
assert len(mock_span_enter.call_args_list) == num_functions
assert len(event_handler.events) == num_functions
actual_span_tags = {
str(call_kwargs["tags"]) for _, call_kwargs in mock_span_enter.call_args_list
}
actual_event_tags = {str(e.tags) for e in event_handler.events}
assert actual_span_tags == test_tags_set
assert actual_event_tags == test_tags_set
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_drop")
@patch.object(Dispatcher, "span_enter")
@patch("llama_index_instrumentation.dispatcher.uuid")
def test_dispatcher_fire_event_with_instance(
mock_uuid, mock_span_enter, mock_span_drop, mock_span_exit
):
# arrange
mock_uuid.uuid4.return_value = "mock"
event_handler = _TestEventHandler()
dispatcher.add_event_handler(event_handler)
# act
instance = _TestObject()
_ = instance.func_with_event(a=3, c=5)
# assert
span_id = f"{instance.func_with_event.__qualname__}-mock"
assert all(e.span_id == span_id for e in event_handler.events)
# span_enter
mock_span_enter.assert_called_once()
# span
mock_span_drop.assert_not_called()
# span_exit
mock_span_exit.assert_called_once()
@pytest.mark.asyncio
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_drop")
@patch.object(Dispatcher, "span_enter")
async def test_dispatcher_async_fire_event_with_instance(
mock_span_enter: MagicMock,
mock_span_drop: MagicMock,
mock_span_exit: MagicMock,
):
# arrange
# mock_uuid.return_value = "mock"
event_handler = _TestEventHandler()
dispatcher.add_event_handler(event_handler)
# act
instance = _TestObject()
tasks = [
instance.async_func_with_event(a=3, c=5),
instance.async_func_with_event(5),
]
_ = await asyncio.gather(*tasks)
# assert
span_ids = [e.span_id for e in event_handler.events]
id_counts = Counter(span_ids)
assert set(id_counts.values()) == {2}
# span_enter
assert mock_span_enter.call_count == 4
# span
mock_span_drop.assert_not_called()
# span_exit
assert mock_span_exit.call_count == 4
def test_context_nesting():
# arrange
# A binary tree of parent-child spans
h = 5 # height of binary tree
s = 2 ** (h + 1) - 1 # number of spans per tree
runs = 2 # number of trees (in parallel)
# Below is a tree (r=1) with h=3 (s=15).
# Tn: n-th span run in thread
# An: n-th span run in async
# A1
# ┌───────┴───────┐
# A2 A3
# ┌───┴───┐ ┌───┴───┐
# T4 T5 A6 A7
# ┌─┴─┐ ┌─┴─┐ ┌─┴─┐ ┌─┴─┐
# T8 T9 A10 A11 T12 T13 A14 A15
# Note that child.n // 2 == parent.n, e.g. 11 // 2 == 5.
# We'll check that the parent-child associations are correct.
class Span(BaseSpan):
r: int # tree id
n: int # span id (per tree)
class Event(BaseEvent):
r: int # tree id
n: int # span id (per tree)
lock = Lock()
spans: Dict[str, Span] = {}
events: List[Event] = []
class SpanHandler(BaseSpanHandler):
def new_span(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
parent_span_id: Optional[str] = None,
tags: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> None:
r, n = bound_args.args[:2]
span = Span(r=r, n=n, id_=id_, parent_id=parent_span_id)
with lock:
spans[id_] = span
def prepare_to_drop_span(self, *args: Any, **kwargs: Any) -> None: ...
def prepare_to_exit_span(self, *args: Any, **kwargs: Any) -> None: ...
class EventHandler(BaseEventHandler):
def handle(self, event: Event, **kwargs) -> None: # type: ignore
with lock:
events.append(event)
dispatcher = Dispatcher(
event_handlers=[EventHandler()],
span_handlers=[SpanHandler()],
propagate=False,
)
@dispatcher.span
def bar(r: int, n: int, callback: Callable[[], None] = lambda: None) -> None:
dispatcher.event(Event(r=r, n=n))
if n > 2**h - 1:
callback()
return
if n % 2:
asyncio.run(_foo(r, n))
else:
t0 = Thread(target=bar, args=(r, n * 2))
t1 = Thread(target=bar, args=(r, n * 2 + 1))
t0.start()
t1.start()
time.sleep(0.01)
t0.join()
t1.join()
callback()
@dispatcher.span
async def foo(r: int, n: int) -> None:
dispatcher.event(Event(r=r, n=n))
if n > 2**h - 1:
return
if n % 2:
await _foo(r, n)
else:
q, loop = Queue(), get_event_loop()
Thread(target=bar, args=(r, n * 2, _callback(q, loop))).start()
Thread(target=bar, args=(r, n * 2 + 1, _callback(q, loop))).start()
await gather(q.get(), q.get())
async def _foo(r: int, n: int) -> None:
await gather(foo(r, n * 2), foo(r, n * 2 + 1), sleep(0.01))
def _callback(q: Queue, loop: AbstractEventLoop) -> Callable[[], None]:
return lambda: loop.call_soon_threadsafe(q.put_nowait(1)) # type: ignore
# act
# Use regular thread to ensure that `Token.MISSING` is being handled.
regular_threads = [
(
threading.Thread(target=asyncio.run, args=(foo(r, 1),))
if r % 2
else threading.Thread(target=bar, args=(r, 1))
)
for r in range(runs)
]
[t.start() for t in regular_threads]
[t.join() for t in regular_threads]
# assert
# parent-child associations should be correct
assert sorted(span.n for span in spans.values()) == sorted(
list(range(1, s + 1)) * runs
)
for span in spans.values():
if span.n > 1:
if not span.parent_id:
print(span)
assert span.r == spans[span.parent_id].r # same tree #type:ignore
assert span.n // 2 == spans[span.parent_id].n # type:ignore
# # event-span associations should be correct
# assert sorted(event.n for event in events) == sorted(list(range(1, s + 1)) * runs)
# for event in events:
# assert event.r == spans[event.span_id].r # same tree
# assert event.n == spans[event.span_id].n # same span
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_drop")
@patch.object(Dispatcher, "span_enter")
@patch("llama_index_instrumentation.dispatcher.uuid")
def test_dispatcher_fire_event_backwards_compat(
mock_uuid: MagicMock,
mock_span_enter: MagicMock,
mock_span_drop: MagicMock,
mock_span_exit: MagicMock,
):
# arrange
mock_uuid.uuid4.return_value = "mock"
event_handler = _TestEventHandler()
dispatcher.add_event_handler(event_handler)
# act
_ = func_with_event_backwards_compat(3, c=5)
# assert
span_id = f"{func_with_event_backwards_compat.__qualname__}-mock"
assert all(e.span_id == span_id for e in event_handler.events)
# span_enter
mock_span_enter.assert_called_once()
# span
mock_span_drop.assert_not_called()
# span_exit
mock_span_exit.assert_called_once()
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_drop")
@patch.object(Dispatcher, "span_enter")
@patch("llama_index_instrumentation.dispatcher.uuid")
def test_dispatcher_fire_event_with_instance_backwards_compat(
mock_uuid, mock_span_enter, mock_span_drop, mock_span_exit
):
# arrange
mock_uuid.uuid4.return_value = "mock"
event_handler = _TestEventHandler()
dispatcher.add_event_handler(event_handler)
# act
instance = _TestObject()
_ = instance.func_with_event_backwards_compat(a=3, c=5)
# assert
span_id = f"{instance.func_with_event_backwards_compat.__qualname__}-mock"
assert all(e.span_id == span_id for e in event_handler.events)
# span_enter
mock_span_enter.assert_called_once()
# span
mock_span_drop.assert_not_called()
# span_exit
mock_span_exit.assert_called_once()
@patch.object(Dispatcher, "span_enter")
def test_span_decorator_is_idempotent(mock_span_enter):
x, z = random(), dispatcher.span
assert z(z(z(lambda: x)))() == x
mock_span_enter.assert_called_once()
@patch.object(Dispatcher, "span_enter")
def test_span_decorator_is_idempotent_with_pass_through(mock_span_enter):
x, z = random(), dispatcher.span
a, b, c, d = (wrapt.decorator(lambda f, *_: f()) for _ in range(4))
assert z(a(b(z(c(d(z(lambda: x)))))))() == x
mock_span_enter.assert_called_once()
@patch.object(Dispatcher, "span_enter")
def test_mixin_decorates_abstract_method(mock_span_enter):
x, z = random(), abstractmethod
A = type("A", (DispatcherSpanMixin,), {"f": z(lambda _: ...)})
B = type("B", (A,), {"f": lambda _: x + 0})
C = type("C", (B,), {"f": lambda _: x + 1})
D = type("D", (C, B), {"f": lambda _: x + 2})
for i, T in enumerate((B, C, D)):
assert T().f() - i == pytest.approx(x) # type:ignore
assert mock_span_enter.call_count - i == 1
@patch.object(Dispatcher, "span_enter")
def test_mixin_decorates_overridden_method(mock_span_enter):
x, z = random(), dispatcher.span
A = type("A", (DispatcherSpanMixin,), {"f": z(lambda _: x)})
B = type("B", (A,), {"f": lambda _: x + 1})
C = type("C", (B,), {"f": lambda _: x + 2})
D = type("D", (C, B), {"f": lambda _: x + 3})
for i, T in enumerate((A, B, C, D)):
assert T().f() - i == pytest.approx(x) # type:ignore
assert mock_span_enter.call_count - i == 1
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_enter")
@patch("llama_index_instrumentation.dispatcher.uuid")
def test_span_naming_with_inheritance(mock_uuid, mock_span_enter, mock_span_exit):
"""Test that span IDs use the runtime class name, not the definition class name."""
# arrange
mock_uuid.uuid4.return_value = "mock"
class BaseClass:
@dispatcher.span
def base_method(self, x):
return x * 2
@dispatcher.span
async def async_base_method(self, x):
return x * 3
class DerivedClass(BaseClass):
pass
class AnotherDerivedClass(BaseClass):
@dispatcher.span
def derived_method(self, x):
return x * 4
# act
base_instance = BaseClass()
derived_instance = DerivedClass()
another_derived_instance = AnotherDerivedClass()
base_result = base_instance.base_method(5)
derived_result = derived_instance.base_method(5)
another_derived_result = another_derived_instance.derived_method(5)
# assert
assert mock_span_enter.call_count == 3
# Check that span IDs use the actual runtime class names
calls = mock_span_enter.call_args_list
# BaseClass.base_method called on BaseClass instance
assert calls[0][1]["id_"] == "BaseClass.base_method-mock"
# BaseClass.base_method called on DerivedClass instance (should use DerivedClass)
assert calls[1][1]["id_"] == "DerivedClass.base_method-mock"
# AnotherDerivedClass.derived_method called on AnotherDerivedClass instance
assert calls[2][1]["id_"] == "AnotherDerivedClass.derived_method-mock"
@pytest.mark.asyncio
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_enter")
@patch("llama_index_instrumentation.dispatcher.uuid")
async def test_async_span_naming_with_inheritance(
mock_uuid, mock_span_enter, mock_span_exit
):
"""Test that async span IDs use the runtime class name, not the definition class name."""
# arrange
mock_uuid.uuid4.return_value = "mock"
class BaseClass:
@dispatcher.span
async def async_base_method(self, x):
return x * 3
class DerivedClass(BaseClass):
pass
# act
base_instance = BaseClass()
derived_instance = DerivedClass()
base_result = await base_instance.async_base_method(5)
derived_result = await derived_instance.async_base_method(5)
# assert
assert mock_span_enter.call_count == 2
calls = mock_span_enter.call_args_list
# BaseClass.async_base_method called on BaseClass instance
assert calls[0][1]["id_"] == "BaseClass.async_base_method-mock"
# BaseClass.async_base_method called on DerivedClass instance (should use DerivedClass)
assert calls[1][1]["id_"] == "DerivedClass.async_base_method-mock"
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_enter")
@patch("llama_index_instrumentation.dispatcher.uuid")
def test_span_naming_regular_functions_unchanged(
mock_uuid, mock_span_enter, mock_span_exit
):
"""Test that regular functions (non-methods) still use __qualname__."""
# arrange
mock_uuid.uuid4.return_value = "mock"
@dispatcher.span
def regular_function(x):
return x * 5
# act
result = regular_function(10)
# assert
mock_span_enter.assert_called_once()
call_kwargs = mock_span_enter.call_args[1]
# Regular functions should still use __qualname__
assert call_kwargs["id_"] == f"{regular_function.__qualname__}-mock"
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_enter")
@patch("llama_index_instrumentation.dispatcher.uuid")
def test_span_naming_complex_inheritance(mock_uuid, mock_span_enter, mock_span_exit):
"""Test span naming with multiple levels of inheritance."""
# arrange
mock_uuid.uuid4.return_value = "mock"
class GrandParent:
@dispatcher.span
def shared_method(self, x):
return x
class Parent(GrandParent):
pass
class Child(Parent):
@dispatcher.span
def child_method(self, x):
return x * 2
class GrandChild(Child):
pass
# act
instances = [GrandParent(), Parent(), Child(), GrandChild()]
# Call shared_method on all instances
for instance in instances:
instance.shared_method(1)
# Call child_method on child and grandchild
instances[2].child_method(1) # Child
instances[3].child_method(1) # GrandChild
# assert
assert mock_span_enter.call_count == 6
calls = mock_span_enter.call_args_list
# shared_method calls should use the runtime class names
assert calls[0][1]["id_"] == "GrandParent.shared_method-mock"
assert calls[1][1]["id_"] == "Parent.shared_method-mock"
assert calls[2][1]["id_"] == "Child.shared_method-mock"
assert calls[3][1]["id_"] == "GrandChild.shared_method-mock"
# child_method calls should use the runtime class names
assert calls[4][1]["id_"] == "Child.child_method-mock"
assert calls[5][1]["id_"] == "GrandChild.child_method-mock"
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_enter")
@patch("llama_index_instrumentation.dispatcher.uuid")
def test_span_naming_with_method_override(mock_uuid, mock_span_enter, mock_span_exit):
"""Test span naming when methods are overridden in derived classes."""
# arrange
mock_uuid.uuid4.return_value = "mock"
class Base:
@dispatcher.span
def method(self, x):
return x
class Derived(Base):
@dispatcher.span
def method(self, x):
return x * 2
# act
base_instance = Base()
derived_instance = Derived()
base_instance.method(1)
derived_instance.method(1)
# assert
assert mock_span_enter.call_count == 2
calls = mock_span_enter.call_args_list
# Each should use their respective class names
assert calls[0][1]["id_"] == "Base.method-mock"
assert calls[1][1]["id_"] == "Derived.method-mock"
@patch.object(Dispatcher, "span_exit")
@patch.object(Dispatcher, "span_enter")
@patch("llama_index_instrumentation.dispatcher.uuid")
def test_span_naming_with_nested_classes(mock_uuid, mock_span_enter, mock_span_exit):
"""Test span naming with nested classes."""
# arrange
mock_uuid.uuid4.return_value = "mock"
class Outer:
class Inner:
@dispatcher.span
def inner_method(self, x):
return x
@dispatcher.span
def outer_method(self, x):
return x * 2
# act
outer_instance = Outer()
inner_instance = Outer.Inner()
outer_instance.outer_method(1)
inner_instance.inner_method(1)
# assert
assert mock_span_enter.call_count == 2
calls = mock_span_enter.call_args_list
# Should use the simple class names (not qualified names)
assert calls[0][1]["id_"] == "Outer.outer_method-mock"
assert calls[1][1]["id_"] == "Inner.inner_method-mock"
def test_aevent_with_sync_handlers():
"""Test that aevent works with sync handlers via default ahandle implementation."""
# arrange
event_handler = _TestEventHandler()
test_dispatcher = Dispatcher(event_handlers=[event_handler], propagate=False)
event = _TestStartEvent()
# act
asyncio.run(test_dispatcher.aevent(event))
# assert
assert len(event_handler.events) == 1
assert event_handler.events[0] == event
@pytest.mark.asyncio
async def test_aevent_with_async_handlers():
"""Test that aevent works with async handlers."""
# arrange
event_handler = _TestAsyncEventHandler()
test_dispatcher = Dispatcher(event_handlers=[event_handler], propagate=False)
event = _TestStartEvent()
# act
await test_dispatcher.aevent(event)
# assert
assert len(event_handler.events) == 1
assert event_handler.events[0] == event
assert event_handler.async_calls == 1
@pytest.mark.asyncio
async def test_aevent_concurrent_handlers():
"""Test that aevent runs handlers concurrently."""
# arrange
handler1 = _TestAsyncEventHandler()
handler2 = _TestAsyncEventHandler()
test_dispatcher = Dispatcher(event_handlers=[handler1, handler2], propagate=False)
event = _TestStartEvent()
# act
start_time = time.time()
await test_dispatcher.aevent(event)
end_time = time.time()
# assert
# Should take ~0.01s (concurrent) not ~0.02s (sequential)
assert end_time - start_time < 0.015
assert len(handler1.events) == 1
assert len(handler2.events) == 1
assert handler1.async_calls == 1
assert handler2.async_calls == 1
@pytest.mark.asyncio
async def test_aevent_error_isolation():
"""Test that handler errors don't affect other handlers."""
# arrange
class FailingHandler(BaseEventHandler):
def handle(self, e: BaseEvent, **kwargs: Any) -> Any:
raise ValueError("Handler failed")
handler1 = _TestAsyncEventHandler()
handler2 = FailingHandler()
handler3 = _TestAsyncEventHandler()
test_dispatcher = Dispatcher(
event_handlers=[handler1, handler2, handler3], propagate=False
)
event = _TestStartEvent()
# act
await test_dispatcher.aevent(event)
# assert
# Both working handlers should have processed the event
assert len(handler1.events) == 1
assert len(handler3.events) == 1
assert handler1.async_calls == 1
assert handler3.async_calls == 1
@pytest.mark.asyncio
async def test_aevent_propagation():
"""Test that aevent respects propagation settings."""
# arrange
child_handler = _TestAsyncEventHandler()
parent_handler = _TestAsyncEventHandler()
child_dispatcher = Dispatcher(
name="child", event_handlers=[child_handler], propagate=True
)
parent_dispatcher = Dispatcher(
name="parent", event_handlers=[parent_handler], propagate=False
)
manager = Manager(parent_dispatcher)
manager.add_dispatcher(child_dispatcher)
child_dispatcher.manager = manager
child_dispatcher.parent_name = "parent"
event = _TestStartEvent()
# act
await child_dispatcher.aevent(event)
# assert
# Both handlers should have processed the event due to propagation
assert len(child_handler.events) == 1
assert len(parent_handler.events) == 1
assert child_handler.async_calls == 1
assert parent_handler.async_calls == 1
@pytest.mark.asyncio
async def test_aevent_no_propagation():
"""Test that aevent respects no-propagation settings."""
# arrange
child_handler = _TestAsyncEventHandler()
parent_handler = _TestAsyncEventHandler()
child_dispatcher = Dispatcher(
name="child", event_handlers=[child_handler], propagate=False
)
parent_dispatcher = Dispatcher(
name="parent", event_handlers=[parent_handler], propagate=False
)
manager = Manager(parent_dispatcher)
manager.add_dispatcher(child_dispatcher)
child_dispatcher.manager = manager
child_dispatcher.parent_name = "parent"
event = _TestStartEvent()
# act
await child_dispatcher.aevent(event)
# assert
# Only child handler should have processed the event
assert len(child_handler.events) == 1
assert len(parent_handler.events) == 0
assert child_handler.async_calls == 1
assert parent_handler.async_calls == 0
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-instrumentation/tests/test_dispatcher.py",
"license": "MIT License",
"lines": 976,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-instrumentation/tests/test_manager.py | import llama_index_instrumentation as instrument
def test_root_manager_add_dispatcher():
# arrange
root_manager = instrument.root_manager
# act
dispatcher = instrument.get_dispatcher("test")
# assert
assert "root" in root_manager.dispatchers
assert "test" in root_manager.dispatchers
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-instrumentation/tests/test_manager.py",
"license": "MIT License",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-artifact-editor/llama_index/tools/artifact_editor/base.py | from typing import Any, Dict, List, Optional, Type, TypeVar, Union
from llama_index.core.bridge.pydantic import BaseModel, Field, ValidationError
from llama_index.core.tools import BaseTool, FunctionTool
from llama_index.core.tools.tool_spec.base import BaseToolSpec
T = TypeVar("T", bound=BaseModel)
class PatchOperation(BaseModel):
"""Represents a single patch operation."""
op: str = Field(
..., description="Operation type: 'replace', 'add', 'remove', 'move', 'copy'"
)
path: str = Field(..., description="JSON pointer path to the target location")
value: Any = Field(
None, description="Value for the operation (not used for 'remove' and 'test')"
)
from_path: str = Field(
None, description="Source path for 'move' and 'copy' operations"
)
class JsonPatch(BaseModel):
"""Collection of patch operations to apply to any Pydantic model."""
operations: List[PatchOperation] = Field(
..., description="List of patch operations to apply"
)
class ArtifactEditorToolSpec(BaseToolSpec):
"""
A tool spec that allows you to edit an artifact in-memory.
Using JSON patch operations, an LLM/Agent can be prompted to create, modify, and iterate on an artifact like a report, code, or anything that can be represented as a Pydantic model.
Attributes:
pydantic_cls: The Pydantic model class to edit
current_artifact: The current artifact instance
Methods:
to_tool_list: Returns a list of tools that can be used to edit the artifact
create_artifact: Creates an initial artifact instance
get_current_artifact: Gets the current artifact instance
apply_patch: Applies a JSON patch to the current artifact instance
"""
# The `create_artifact` function is excluded as it is manually injected into the tool spec
spec_functions = [
"apply_patch",
"get_current_artifact",
]
def __init__(
self,
pydantic_cls: Type[T],
current_artifact: Optional[T] = None,
) -> None:
"""
Initialize the artifact editor tool spec.
Args:
pydantic_cls (BaseModel): The Pydantic model class to edit
current_artifact (Optional[BaseModel]): The initial artifact instance to use
"""
self.pydantic_cls = pydantic_cls
self.current_artifact: Optional[T] = current_artifact
def to_tool_list(self) -> List[BaseTool]:
tools = super().to_tool_list()
tools.append(
FunctionTool.from_defaults(
self.create_artifact,
description=self.pydantic_cls.__doc__
or "Create an initial artifact instance.",
fn_schema=self.pydantic_cls,
)
)
return tools
def create_artifact(self, **kwargs: Any) -> dict:
"""Create an initial artifact instance."""
self.current_artifact = self.pydantic_cls.model_validate(kwargs)
return self.current_artifact.model_dump()
def get_current_artifact(self) -> Optional[dict]:
"""Get the current artifact instance."""
return self.current_artifact.model_dump() if self.current_artifact else None
def apply_patch(self, patch: JsonPatch) -> dict:
"""
Apply a JSON patch to the current Pydantic model instance.
Args:
patch: JsonPatch containing operations to apply
Returns:
New instance of the same model type with patches applied.
Also overwrites and saves the new instance as the current artifact.
Raises:
ValueError: If patch operation is invalid
IndexError: If array index is out of range
ValidationError: If patch results in invalid model
"""
# Validate patch object
if isinstance(patch, dict):
patch = JsonPatch.model_validate(patch)
elif isinstance(patch, str):
patch = JsonPatch.model_validate_json(patch)
# Convert to dict for easier manipulation
model_dict = self.current_artifact.model_dump()
model_class = self.pydantic_cls
for operation in patch.operations:
try:
self._apply_single_operation(model_dict, operation)
except Exception as e:
raise ValueError(
f"Failed to apply operation {operation.op} at {operation.path}: {e!s}"
)
# Convert back to original model type and validate
try:
self.current_artifact = model_class.model_validate(model_dict)
return self.current_artifact.model_dump()
except ValidationError as e:
raise ValueError(
f"Patch resulted in invalid {model_class.__name__} structure: {e!s}"
)
def _apply_single_operation(
self, data: Dict[str, Any], operation: PatchOperation
) -> None:
"""Apply a single patch operation to the data dictionary."""
path_parts = self._parse_path(operation.path)
# Validate path before applying operation
if operation.op in ["add", "replace"]:
self._validate_path_against_schema(path_parts, self.pydantic_cls)
if operation.op == "replace":
self._set_value_at_path(data, path_parts, operation.value)
elif operation.op == "add":
self._add_value_at_path(data, path_parts, operation.value)
elif operation.op == "remove":
self._remove_value_at_path(data, path_parts)
elif operation.op == "move":
if not operation.from_path:
raise ValueError("'move' operation requires 'from_path'")
from_parts = self._parse_path(operation.from_path)
to_parts = path_parts
# Validate both paths
self._validate_path_against_schema(to_parts, self.pydantic_cls)
value = self._get_value_at_path(data, from_parts)
self._remove_value_at_path(data, from_parts)
self._set_value_at_path(data, to_parts, value)
elif operation.op == "copy":
if not operation.from_path:
raise ValueError("'copy' operation requires 'from_path'")
from_parts = self._parse_path(operation.from_path)
to_parts = path_parts
# Validate target path
self._validate_path_against_schema(to_parts, self.pydantic_cls)
value = self._get_value_at_path(data, from_parts)
self._set_value_at_path(data, to_parts, value)
else:
raise ValueError(f"Unknown operation: {operation.op}")
def _validate_path_against_schema(
self, path_parts: List[Union[str, int]], model_class: Type[BaseModel]
) -> None:
"""
Validate that a path corresponds to valid fields in the Pydantic model schema.
Args:
path_parts: Parsed path components
model_class: The Pydantic model class to validate against
Raises:
ValueError: If the path contains invalid fields
"""
if not path_parts:
return
current_model = model_class
current_path = ""
for i, part in enumerate(path_parts):
current_path += f"/{part}" if current_path else f"{part}"
# If part is an integer or '-' (array append), we're dealing with an array index
if isinstance(part, int) or part == "-":
continue
# Check if this field exists in the current model
if hasattr(current_model, "model_fields"):
fields = current_model.model_fields
else:
# Fallback for older Pydantic versions
fields = getattr(current_model, "__fields__", {})
if part not in fields:
raise ValueError(
f"Invalid field '{part}' at path '/{current_path}'. Valid fields are: {list(fields.keys())}"
)
# Get the field type for nested validation
field_info = fields[part]
# Handle nested models
if hasattr(field_info, "annotation"):
field_type = field_info.annotation
else:
# Fallback for older Pydantic versions
field_type = getattr(field_info, "type_", None)
if field_type:
# Handle Optional types
if hasattr(field_type, "__origin__") and field_type.__origin__ is Union:
# Extract non-None type from Optional
args = getattr(field_type, "__args__", ())
field_type = next(
(arg for arg in args if arg is not type(None)), field_type
)
# Handle List types
if hasattr(field_type, "__origin__") and field_type.__origin__ in (
list,
List,
):
# For list types, the next part should be an index or '-'
if i + 1 < len(path_parts) and (
isinstance(path_parts[i + 1], int) or path_parts[i + 1] == "-"
):
continue
# If we're at the end of the path and it's a list, that's valid too
elif i + 1 == len(path_parts):
continue
# If it's a BaseModel subclass, use it for next iteration
if isinstance(field_type, type) and issubclass(field_type, BaseModel):
current_model = field_type
else:
# If we have more path parts but current field is not a model or list, check validity
if (
i + 1 < len(path_parts)
and not isinstance(path_parts[i + 1], int)
and path_parts[i + 1] != "-"
):
raise ValueError(
f"Cannot access nested field '{path_parts[i + 1]}' on non-object field '{part}' of type {field_type}"
)
def _parse_path(self, path: str) -> List[Union[str, int]]:
"""Parse a JSON pointer path into components."""
if not path.startswith("/"):
raise ValueError("Path must start with '/'")
if path == "/":
return []
parts = []
for part in path[1:].split("/"):
# Unescape JSON pointer characters
part = part.replace("~1", "/").replace("~0", "~")
# Try to convert to int if it looks like an array index
if part.isdigit():
parts.append(int(part))
else:
parts.append(part)
return parts
def _get_value_at_path(
self, data: Dict[str, Any], path_parts: List[Union[str, int]]
) -> Any:
"""Get value at the specified path."""
current = data
for part in path_parts:
if isinstance(current, dict):
if part not in current:
raise KeyError(f"Key '{part}' not found")
current = current[part]
elif isinstance(current, list):
if not isinstance(part, int):
raise ValueError(f"Array index must be integer, got {part}")
if part >= len(current) or part < -len(current):
raise IndexError(f"Array index {part} out of range")
current = current[part]
else:
raise ValueError(
f"Cannot index into {type(current).__name__} with {part}"
)
return current
def _set_value_at_path(
self, data: Dict[str, Any], path_parts: List[Union[str, int]], value: Any
) -> None:
"""Set value at the specified path."""
if not path_parts:
raise ValueError("Cannot replace root")
current = data
for part in path_parts[:-1]:
if isinstance(current, dict):
if part not in current:
raise KeyError(f"Key '{part}' not found")
current = current[part]
elif isinstance(current, list):
if not isinstance(part, int):
raise ValueError(f"Array index must be integer, got {part}")
if part >= len(current) or part < -len(current):
raise IndexError(f"Array index {part} out of range")
current = current[part]
else:
raise ValueError(
f"Cannot index into {type(current).__name__} with {part}"
)
last_part = path_parts[-1]
if isinstance(current, dict):
current[last_part] = value
elif isinstance(current, list):
if not isinstance(last_part, int):
raise ValueError(f"Array index must be integer, got {last_part}")
if last_part >= len(current) or last_part < -len(current):
raise IndexError(f"Array index {last_part} out of range")
current[last_part] = value
else:
raise ValueError(f"Cannot set value in {type(current).__name__}")
def _add_value_at_path(
self, data: Dict[str, Any], path_parts: List[Union[str, int]], value: Any
) -> None:
"""Add value at the specified path."""
if not path_parts:
raise ValueError("Cannot add to root")
current = data
for part in path_parts[:-1]:
if isinstance(current, dict):
if part not in current:
raise KeyError(f"Key '{part}' not found")
current = current[part]
elif isinstance(current, list):
if not isinstance(part, int):
raise ValueError(f"Array index must be integer, got {part}")
if part >= len(current) or part < -len(current):
raise IndexError(f"Array index {part} out of range")
current = current[part]
else:
raise ValueError(
f"Cannot index into {type(current).__name__} with {part}"
)
last_part = path_parts[-1]
if isinstance(current, dict):
current[last_part] = value
elif isinstance(current, list):
if isinstance(last_part, int):
if last_part > len(current) or last_part < -len(current) - 1:
raise IndexError(
f"Array index {last_part} out of range for insertion"
)
current.insert(last_part, value)
elif last_part == "-": # Special case for appending to array
current.append(value)
else:
raise ValueError(f"Invalid array index for add operation: {last_part}")
else:
raise ValueError(f"Cannot add value to {type(current).__name__}")
def _remove_value_at_path(
self, data: Dict[str, Any], path_parts: List[Union[str, int]]
) -> None:
"""Remove value at the specified path."""
if not path_parts:
raise ValueError("Cannot remove root")
current = data
for part in path_parts[:-1]:
if isinstance(current, dict):
if part not in current:
raise KeyError(f"Key '{part}' not found")
current = current[part]
elif isinstance(current, list):
if not isinstance(part, int):
raise ValueError(f"Array index must be integer, got {part}")
if part >= len(current) or part < -len(current):
raise IndexError(f"Array index {part} out of range")
current = current[part]
else:
raise ValueError(
f"Cannot index into {type(current).__name__} with {part}"
)
last_part = path_parts[-1]
if isinstance(current, dict):
if last_part not in current:
raise KeyError(f"Key '{last_part}' not found")
del current[last_part]
elif isinstance(current, list):
if not isinstance(last_part, int):
raise ValueError(f"Array index must be integer, got {last_part}")
if last_part >= len(current) or last_part < -len(current):
raise IndexError(f"Array index {last_part} out of range")
del current[last_part]
else:
raise ValueError(f"Cannot remove value from {type(current).__name__}")
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-artifact-editor/llama_index/tools/artifact_editor/base.py",
"license": "MIT License",
"lines": 357,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-artifact-editor/llama_index/tools/artifact_editor/memory_block.py | from typing import List, Optional, Any
from llama_index.core.bridge.pydantic import Field
from llama_index.core.llms import ChatMessage
from llama_index.core.memory import BaseMemoryBlock
from llama_index.tools.artifact_editor.base import ArtifactEditorToolSpec
class ArtifactMemoryBlock(BaseMemoryBlock[str]):
"""Custom memory block to maintain the artifact in-memory."""
name: str = Field(
default="current_artifact", description="The name of the artifact block"
)
artifact_spec: Optional[ArtifactEditorToolSpec] = Field(
default=None, description="The artifact spec for the artifact block"
)
async def _aget(
self, messages: Optional[List[ChatMessage]] = None, **kwargs: Any
) -> str:
if self.artifact_spec.get_current_artifact() is None:
return "No artifact created yet"
return str(self.artifact_spec.get_current_artifact())
async def _aput(self, messages: List[ChatMessage]) -> None:
pass
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-artifact-editor/llama_index/tools/artifact_editor/memory_block.py",
"license": "MIT License",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-artifact-editor/tests/test_artifact_editor.py | import pytest
from typing import List, Optional
from pydantic import BaseModel
from llama_index.tools.artifact_editor.base import (
ArtifactEditorToolSpec,
JsonPatch,
PatchOperation,
)
# Test models for testing purposes
class Address(BaseModel):
"""Address model for testing nested objects."""
street: str
city: str
zipcode: str
country: Optional[str] = None
class Person(BaseModel):
"""Person model for testing the artifact editor."""
name: str
age: int
email: Optional[str] = None
tags: List[str] = []
address: Optional[Address] = None
class SimpleModel(BaseModel):
"""Simple model for basic testing."""
value: str
number: Optional[int] = None
optional_number: Optional[int] = None
@pytest.fixture
def editor():
return ArtifactEditorToolSpec(Person)
@pytest.fixture
def simple_editor():
return ArtifactEditorToolSpec(SimpleModel)
def test_create_artifact(editor: ArtifactEditorToolSpec):
"""Test creating an initial artifact."""
result = editor.create_artifact(
name="John Doe", age=30, email="john@example.com", tags=["developer", "python"]
)
expected = {
"name": "John Doe",
"age": 30,
"email": "john@example.com",
"tags": ["developer", "python"],
"address": None,
}
assert result == expected
assert editor.get_current_artifact() == expected
def test_create_artifact_with_nested_object(editor: ArtifactEditorToolSpec):
"""Test creating artifact with nested objects."""
address_data = {
"street": "123 Main St",
"city": "Springfield",
"zipcode": "12345",
"country": None,
}
result = editor.create_artifact(name="Jane Doe", age=25, address=address_data)
assert result["address"] == address_data
assert isinstance(editor.current_artifact.address, Address)
def test_get_current_artifact(editor: ArtifactEditorToolSpec):
"""Test getting the current artifact."""
# Test when no artifact exists
assert editor.get_current_artifact() is None
# Create an artifact and test retrieval
editor.create_artifact(name="Test User", age=20)
result = editor.get_current_artifact()
expected = {
"name": "Test User",
"age": 20,
"email": None,
"tags": [],
"address": None,
}
assert result == expected
assert editor.get_current_artifact() == expected
def test_apply_patch_replace_operation(editor: ArtifactEditorToolSpec):
"""Test applying replace operations."""
editor.create_artifact(name="John", age=30)
patch = JsonPatch(
operations=[
PatchOperation(op="replace", path="/name", value="Jane"),
PatchOperation(op="replace", path="/age", value=25),
]
)
result = editor.apply_patch(patch)
assert result["name"] == "Jane"
assert result["age"] == 25
assert editor.get_current_artifact() == result
def test_apply_patch_add_operation(editor: ArtifactEditorToolSpec):
"""Test applying add operations."""
editor.create_artifact(name="John", age=30, tags=["python"])
patch = JsonPatch(
operations=[
PatchOperation(op="add", path="/email", value="john@example.com"),
PatchOperation(op="add", path="/tags/1", value="developer"),
PatchOperation(op="add", path="/tags/-", value="expert"), # Append to array
]
)
result = editor.apply_patch(patch)
assert result["email"] == "john@example.com"
assert result["tags"] == ["python", "developer", "expert"]
assert editor.get_current_artifact() == result
def test_apply_patch_remove_operation(editor: ArtifactEditorToolSpec):
"""Test applying remove operations."""
editor.create_artifact(
name="John",
age=30,
email="john@example.com",
tags=["python", "developer", "expert"],
)
patch = JsonPatch(
operations=[
PatchOperation(op="remove", path="/email"),
PatchOperation(op="remove", path="/tags/1"), # Remove "developer"
]
)
result = editor.apply_patch(patch)
assert result["email"] is None
assert result["tags"] == ["python", "expert"]
def test_apply_patch_move_operation(simple_editor: ArtifactEditorToolSpec):
"""Test applying move operations."""
simple_editor.create_artifact(value="test", number=42)
patch = JsonPatch(
operations=[
PatchOperation(op="move", path="/optional_number", from_path="/number")
]
)
result = simple_editor.apply_patch(patch)
# Note: This test assumes we're moving the value, not the key
# The actual behavior depends on the implementation details
assert result["number"] is None
assert result["optional_number"] == 42
def test_apply_patch_copy_operation(editor: ArtifactEditorToolSpec):
"""Test applying copy operations."""
editor.create_artifact(name="John", age=30)
patch = JsonPatch(
operations=[PatchOperation(op="copy", path="/email", from_path="/name")]
)
result = editor.apply_patch(patch)
assert result["email"] == "John"
assert result["name"] == "John" # Original should still exist
def test_apply_patch_nested_paths(editor: ArtifactEditorToolSpec):
"""Test operations on nested object paths."""
address_data = {"street": "123 Main St", "city": "Springfield", "zipcode": "12345"}
editor.create_artifact(name="John", age=30, address=address_data)
patch = JsonPatch(
operations=[
PatchOperation(op="replace", path="/address/city", value="New York"),
PatchOperation(op="add", path="/address/country", value="USA"),
]
)
result = editor.apply_patch(patch)
assert result["address"]["city"] == "New York"
assert result["address"]["country"] == "USA"
def test_apply_patch_array_operations(editor: ArtifactEditorToolSpec):
"""Test various array operations."""
editor.create_artifact(name="John", age=30, tags=["python", "java", "go"])
patch = JsonPatch(
operations=[
PatchOperation(op="replace", path="/tags/1", value="javascript"),
PatchOperation(op="add", path="/tags/0", value="rust"),
PatchOperation(
op="remove", path="/tags/3"
), # Remove "java" (now at index 3)
]
)
result = editor.apply_patch(patch)
# Expected: ["rust", "python", "javascript", "go"]
assert "rust" in result["tags"]
assert "javascript" in result["tags"]
def test_path_parsing():
"""Test path parsing functionality."""
editor = ArtifactEditorToolSpec(Person)
# Test basic path parsing
assert editor._parse_path("/") == []
assert editor._parse_path("/name") == ["name"]
assert editor._parse_path("/tags/0") == ["tags", 0]
assert editor._parse_path("/address/street") == ["address", "street"]
# Test escaped characters
assert editor._parse_path("/field~0name") == ["field~name"]
assert editor._parse_path("/field~1name") == ["field/name"]
def test_invalid_path_format(editor: ArtifactEditorToolSpec):
"""Test error handling for invalid path formats."""
editor.create_artifact(name="John", age=30)
patch = JsonPatch(
operations=[PatchOperation(op="replace", path="invalid_path", value="test")]
)
with pytest.raises(ValueError, match="Path must start with"):
editor.apply_patch(patch)
def test_nonexistent_path(editor: ArtifactEditorToolSpec):
"""Test error handling for nonexistent paths."""
editor.create_artifact(name="John", age=30)
patch = JsonPatch(
operations=[PatchOperation(op="replace", path="/nonexistent", value="test")]
)
with pytest.raises(ValueError):
editor.apply_patch(patch)
def test_array_index_out_of_range(editor: ArtifactEditorToolSpec):
"""Test error handling for array index out of range."""
editor.create_artifact(name="John", age=30, tags=["python"])
patch = JsonPatch(
operations=[PatchOperation(op="replace", path="/tags/5", value="test")]
)
with pytest.raises(ValueError, match="Failed to apply operation"):
editor.apply_patch(patch)
def test_invalid_operation_type(editor: ArtifactEditorToolSpec):
"""Test error handling for invalid operation types."""
editor.create_artifact(name="John", age=30)
patch = JsonPatch(
operations=[PatchOperation(op="invalid_op", path="/name", value="test")]
)
with pytest.raises(ValueError, match="Unknown operation"):
editor.apply_patch(patch)
def test_move_without_from_path(editor: ArtifactEditorToolSpec):
"""Test error handling for move operation without from_path."""
editor.create_artifact(name="John", age=30)
patch = JsonPatch(
operations=[
PatchOperation(op="move", path="/name", value="test") # Missing from_path
]
)
with pytest.raises(ValueError, match="'move' operation requires 'from_path'"):
editor.apply_patch(patch)
def test_copy_without_from_path(editor: ArtifactEditorToolSpec):
"""Test error handling for copy operation without from_path."""
editor.create_artifact(name="John", age=30)
patch = JsonPatch(
operations=[
PatchOperation(op="copy", path="/email", value="test") # Missing from_path
]
)
with pytest.raises(ValueError, match="'copy' operation requires 'from_path'"):
editor.apply_patch(patch)
def test_patch_validation_error(editor: ArtifactEditorToolSpec):
"""Test error handling when patch results in invalid model."""
editor.create_artifact(name="John", age=30)
# Try to set age to a string, which should violate the model
patch = JsonPatch(
operations=[
PatchOperation(op="replace", path="/name", value=None) # Required field
]
)
with pytest.raises(ValueError, match="Patch resulted in invalid"):
editor.apply_patch(patch)
def test_patch_from_dict(editor: ArtifactEditorToolSpec):
"""Test applying patch from dictionary format."""
editor.create_artifact(name="John", age=30)
patch_dict = {"operations": [{"op": "replace", "path": "/name", "value": "Jane"}]}
result = editor.apply_patch(patch_dict)
assert result["name"] == "Jane"
def test_patch_from_json_string(editor: ArtifactEditorToolSpec):
"""Test applying patch from JSON string format."""
editor.create_artifact(name="John", age=30)
patch_json = '{"operations": [{"op": "replace", "path": "/name", "value": "Jane"}]}'
result = editor.apply_patch(patch_json)
assert result["name"] == "Jane"
def test_to_tool_list(editor: ArtifactEditorToolSpec):
"""Test converting to tool list includes all expected tools."""
tools = editor.to_tool_list()
# Should have 3 tools: apply_patch, get_current_artifact, create_artifact
assert len(tools) == 3
tool_names = [tool.metadata.name for tool in tools]
assert "apply_patch" in tool_names
assert "get_current_artifact" in tool_names
assert "create_artifact" in tool_names
def test_no_current_artifact_apply_patch(editor: ArtifactEditorToolSpec):
"""Test error when trying to apply patch without current artifact."""
patch = JsonPatch(
operations=[PatchOperation(op="replace", path="/name", value="Jane")]
)
with pytest.raises(AttributeError):
editor.apply_patch(patch)
def test_complex_nested_operations(editor: ArtifactEditorToolSpec):
"""Test complex operations on deeply nested structures."""
complex_data = {
"name": "John",
"age": 30,
"address": {"street": "123 Main St", "city": "Springfield", "zipcode": "12345"},
"tags": ["python", "developer"],
}
editor.create_artifact(**complex_data)
patch = JsonPatch(
operations=[
PatchOperation(op="replace", path="/address/street", value="456 Oak Ave"),
PatchOperation(op="add", path="/tags/-", value="senior"),
PatchOperation(op="remove", path="/tags/0"), # Remove "python"
]
)
result = editor.apply_patch(patch)
assert result["address"]["street"] == "456 Oak Ave"
assert "senior" in result["tags"]
assert "python" not in result["tags"]
assert result["age"] == 30
def test_set_invalid_field_path(editor: ArtifactEditorToolSpec):
"""Test setting a field that doesn't exist in the Pydantic model schema."""
editor.create_artifact(name="John", age=30)
# Try to add a field that doesn't exist in the Person model
patch = JsonPatch(
operations=[PatchOperation(op="add", path="/invalid_field", value="test")]
)
# This should raise an error since invalid_field is not in the Person model
with pytest.raises(ValueError, match="Invalid field 'invalid_field'"):
editor.apply_patch(patch)
def test_set_invalid_nested_field_path(editor: ArtifactEditorToolSpec):
"""Test setting a nested field that doesn't exist in the Pydantic model schema."""
address_data = {"street": "123 Main St", "city": "Springfield", "zipcode": "12345"}
editor.create_artifact(name="John", age=30, address=address_data)
# Try to add a field that doesn't exist in the Address model
patch = JsonPatch(
operations=[
PatchOperation(op="add", path="/address/invalid_nested_field", value="test")
]
)
# This should raise an error since invalid_nested_field is not in the Address model
with pytest.raises(ValueError, match="Invalid field 'invalid_nested_field'"):
editor.apply_patch(patch)
def test_valid_nested_field_addition(editor: ArtifactEditorToolSpec):
"""Test adding a valid nested field that exists in the model schema."""
address_data = {"street": "123 Main St", "city": "Springfield", "zipcode": "12345"}
editor.create_artifact(name="John", age=30, address=address_data)
# Add the country field which exists in the Address model
patch = JsonPatch(
operations=[PatchOperation(op="add", path="/address/country", value="USA")]
)
result = editor.apply_patch(patch)
assert result["address"]["country"] == "USA"
def test_validation_with_array_access(editor: ArtifactEditorToolSpec):
"""Test validation works correctly with array access patterns."""
editor.create_artifact(name="John", age=30, tags=["python", "developer"])
# Valid array operations should work
patch = JsonPatch(
operations=[
PatchOperation(op="replace", path="/tags/0", value="rust"),
PatchOperation(op="add", path="/tags/-", value="expert"),
]
)
result = editor.apply_patch(patch)
assert result["tags"] == ["rust", "developer", "expert"]
def test_validation_does_not_affect_existing_operations(editor: ArtifactEditorToolSpec):
"""Test that validation doesn't break existing valid operations."""
editor.create_artifact(name="John", age=30, email="john@example.com")
# All these operations should still work
patch = JsonPatch(
operations=[
PatchOperation(op="replace", path="/name", value="Jane"),
PatchOperation(op="replace", path="/age", value=25),
PatchOperation(op="remove", path="/email"),
]
)
result = editor.apply_patch(patch)
assert result["name"] == "Jane"
assert result["age"] == 25
assert result["email"] is None
def test_move_operation_validates_target_path(editor: ArtifactEditorToolSpec):
"""Test that move operations validate the target path."""
editor.create_artifact(name="John", age=30, email="john@example.com")
# Try to move to an invalid field
patch = JsonPatch(
operations=[
PatchOperation(op="move", path="/invalid_field", from_path="/email")
]
)
with pytest.raises(ValueError, match="Invalid field 'invalid_field'"):
editor.apply_patch(patch)
def test_copy_operation_validates_target_path(editor: ArtifactEditorToolSpec):
"""Test that copy operations validate the target path."""
editor.create_artifact(name="John", age=30, email="john@example.com")
# Try to copy to an invalid field
patch = JsonPatch(
operations=[
PatchOperation(op="copy", path="/invalid_field", from_path="/email")
]
)
with pytest.raises(ValueError, match="Invalid field 'invalid_field'"):
editor.apply_patch(patch)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-artifact-editor/tests/test_artifact_editor.py",
"license": "MIT License",
"lines": 374,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/voice_agents/llama-index-voice-agents-elevenlabs/llama_index/voice_agents/elevenlabs/base.py | import base64
import json
import threading
import warnings
from llama_index.core.voice_agents import (
BaseVoiceAgent,
BaseVoiceAgentInterface,
BaseVoiceAgentEvent,
)
from statistics import mean
from websockets.sync.client import Connection
from typing import Optional, Callable, Dict, List, Any, Union
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import BaseTool
from elevenlabs.conversational_ai.conversation import (
Conversation,
ClientTools,
ConversationInitiationData,
)
from websockets import connect, ConnectionClosedOK
from elevenlabs.base_client import BaseElevenLabs
from .interface import ElevenLabsVoiceAgentInterface
from .utils import (
callback_agent_message,
callback_agent_message_correction,
callback_latency_measurement,
callback_user_message,
make_function_from_tool_model,
get_messages_from_chat,
)
from .events import (
PingEvent,
AudioEvent,
AgentResponseEvent,
AgentResponseCorrectionEvent,
UserTranscriptionEvent,
InterruptionEvent,
ConversationInitEvent,
ClientToolCallEvent,
)
class ElevenLabsVoiceAgent(Conversation, BaseVoiceAgent):
"""
Conversational AI session.
BETA: This API is subject to change without regard to backwards compatibility.
Attributes:
client (BaseElevenLabs): The ElevenLabs client to use for the conversation.
agent_id (str): The ID of the agent to converse with.
requires_auth (bool): Whether the agent requires authentication.
audio_interface (AudioInterface): The audio interface to use for input and output.
config (Optional[ConversationInitiationData]): The configuration for the conversation
client_tools (Optional[ClientTools]): The client tools to use for the conversation.
"""
interface: Optional[BaseVoiceAgentInterface]
client: BaseElevenLabs
requires_auth: bool
agent_id: str
tools: Optional[List[BaseTool]]
_last_message_id: int
_callback_agent_response: Callable
_callback_agent_response_correction: Callable
_callback_user_transcript: Callable
_callback_latency_measurement: Callable
_all_chat: Dict[int, List[ChatMessage]]
_messages: List[ChatMessage]
_events: List[BaseVoiceAgentEvent]
_thread: Optional[threading.Thread]
_should_stop: threading.Event
_conversation_id: Optional[str]
_last_interrupt_id: int
_ws: Optional[Connection]
def __init__(
self,
client: BaseElevenLabs,
agent_id: str,
requires_auth: bool,
interface: Optional[BaseVoiceAgentInterface] = None,
config: Optional[ConversationInitiationData] = None,
tools: Optional[List[BaseTool]] = None,
) -> None:
self.client = client
self.agent_id = agent_id
self.requires_auth = requires_auth
self.interface = interface
if not interface:
self.interface = ElevenLabsVoiceAgentInterface()
self.config = config or ConversationInitiationData()
client_tools = ClientTools()
if tools:
for tool in tools:
if tool.metadata.fn_schema is not None:
fn = make_function_from_tool_model(
model_cls=tool.metadata.fn_schema, tool=tool
)
client_tools.register(
tool_name=tool.metadata.get_name(), handler=fn
)
else:
warnings.warn(
f"Tool {tool.metadata.get_name()} could not added, since its function schema seems to be unavailable"
)
self.client_tools = client_tools or ClientTools()
self.client_tools.start()
self._callback_agent_response = callback_agent_message
self._callback_agent_response_correction = callback_agent_message_correction
self._callback_user_transcript = callback_user_message
self._callback_latency_measurement = callback_latency_measurement
self._latencies: List[int] = []
self._all_chat: Dict[int, List[ChatMessage]] = {}
self._messages: List[ChatMessage] = []
self._events: List[BaseVoiceAgentEvent] = []
self._current_message_id: int = 0
self._thread = None
self._ws: Optional[Connection] = None
self._should_stop = threading.Event()
self._conversation_id = None
self._last_interrupt_id = 0
def start(self, *args: Any, **kwargs: Any) -> None:
self.start_session()
def stop(self) -> None:
self.end_session()
self.wait_for_session_end()
def interrupt(self) -> None:
self.interface.interrupt()
def _run(self, ws_url: str):
with connect(ws_url, max_size=16 * 1024 * 1024) as ws:
self._ws = ws
ws.send(
json.dumps(
{
"type": "conversation_initiation_client_data",
"custom_llm_extra_body": self.config.extra_body,
"conversation_config_override": self.config.conversation_config_override,
"dynamic_variables": self.config.dynamic_variables,
}
)
)
self._ws = ws
def input_callback(audio):
try:
ws.send(
json.dumps(
{
"user_audio_chunk": base64.b64encode(audio).decode(),
}
)
)
except ConnectionClosedOK:
self.end_session()
except Exception as e:
print(f"Error sending user audio chunk: {e}")
self.end_session()
self.audio_interface.start(input_callback)
while not self._should_stop.is_set():
try:
message = json.loads(ws.recv(timeout=0.5))
if self._should_stop.is_set():
return
self.handle_message(message, ws)
except ConnectionClosedOK as e:
self.end_session()
except TimeoutError:
pass
except Exception as e:
print(f"Error receiving message: {e}")
self.end_session()
self._ws = None
def handle_message(self, message: Dict, ws: Any) -> None:
if message["type"] == "conversation_initiation_metadata":
event = message["conversation_initiation_metadata_event"]
self._events.append(
ConversationInitEvent(
type_t="conversation_initiation_metadata", **event
)
)
assert self._conversation_id is None
self._conversation_id = event["conversation_id"]
elif message["type"] == "audio":
event = message["audio_event"]
self._events.append(AudioEvent(type_t="audio", **event))
if int(event["event_id"]) <= self._last_interrupt_id:
return
audio = base64.b64decode(event["audio_base_64"])
self._callback_agent_response(
messages=self._all_chat,
message_id=self._current_message_id,
audio=event["audio_base_64"],
)
self.audio_interface.output(audio)
elif message["type"] == "agent_response":
event = message["agent_response_event"]
self._events.append(AgentResponseEvent(type_t="agent_response", **event))
self._callback_agent_response(
messages=self._all_chat,
message_id=self._current_message_id,
text=event["agent_response"].strip(),
)
elif message["type"] == "agent_response_correction":
event = message["agent_response_correction_event"]
self._events.append(
AgentResponseCorrectionEvent(
type_t="agent_response_correction", **event
)
)
self._callback_agent_response_correction(
messages=self._all_chat,
message_id=self._current_message_id,
text=event["corrected_agent_response"].strip(),
)
elif message["type"] == "user_transcript":
self._current_message_id += 1
event = message["user_transcription_event"]
self._events.append(
UserTranscriptionEvent(type_t="user_transcript", **event)
)
self._callback_user_transcript(
messages=self._all_chat,
message_id=self._current_message_id,
text=event["user_transcript"].strip(),
)
elif message["type"] == "interruption":
event = message["interruption_event"]
self._events.append(InterruptionEvent(type_t="interruption", **event))
self._last_interrupt_id = int(event["event_id"])
self.audio_interface.interrupt()
elif message["type"] == "ping":
event = message["ping_event"]
self._events.append(PingEvent(type_t="ping", **event))
ws.send(
json.dumps(
{
"type": "pong",
"event_id": event["event_id"],
}
)
)
if event["ping_ms"] is None:
event["ping_ms"] = 0
self._callback_latency_measurement(self._latencies, int(event["ping_ms"]))
elif message["type"] == "client_tool_call":
self._events.append(ClientToolCallEvent(type_t="client_tool_call", **event))
tool_call = message.get("client_tool_call", {})
tool_name = tool_call.get("tool_name")
parameters = {
"tool_call_id": tool_call["tool_call_id"],
**tool_call.get("parameters", {}),
}
def send_response(response):
if not self._should_stop.is_set():
ws.send(json.dumps(response))
self.client_tools.execute_tool(tool_name, parameters, send_response)
message = f"Calling tool: {tool_name} with parameters: {parameters}"
self._callback_agent_response(
messages=self._all_chat,
message_id=self._current_message_id,
text=message,
)
else:
pass # Ignore all other message types.
self._messages = get_messages_from_chat(self._all_chat)
@property
def average_latency(self) -> Union[int, float]:
"""
Get the average latency of your conversational agent.
Returns:
The average latency if latencies are recorded, otherwise 0.
"""
if not self._latencies:
return 0
return mean(self._latencies)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/voice_agents/llama-index-voice-agents-elevenlabs/llama_index/voice_agents/elevenlabs/base.py",
"license": "MIT License",
"lines": 269,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/voice_agents/llama-index-voice-agents-elevenlabs/llama_index/voice_agents/elevenlabs/utils.py | from pydantic import BaseModel
from inspect import Signature, Parameter
from typing import Any, Dict, Optional, List, Callable
from llama_index.core.llms import ChatMessage, AudioBlock, TextBlock, MessageRole
from llama_index.core.tools import BaseTool
def make_function_from_tool_model(
model_cls: type[BaseModel], tool: BaseTool
) -> Callable:
fields = model_cls.model_fields
parameters = [
Parameter(name, Parameter.POSITIONAL_OR_KEYWORD, annotation=field.annotation)
for name, field in fields.items()
]
sig = Signature(parameters)
def func_template(*args, **kwargs):
bound = func_template.__signature__.bind(*args, **kwargs)
bound.apply_defaults()
return tool(**bound.arguments).raw_output
func_template.__signature__ = sig
return func_template
def callback_user_message(
messages: Dict[int, List[ChatMessage]],
message_id: int,
text: Optional[str] = None,
audio: Optional[Any] = None,
) -> None:
if messages.get(message_id) is None:
if text:
messages[message_id] = []
messages[message_id].append(
ChatMessage(role=MessageRole.USER, blocks=[TextBlock(text=text)])
)
else:
messages[message_id] = []
messages[message_id].append(
ChatMessage(role=MessageRole.USER, blocks=[AudioBlock(audio=audio)])
)
else:
last_user_messages = [
message
for message in messages[message_id]
if message.role == MessageRole.USER
]
if len(last_user_messages) > 0:
last_user_message = last_user_messages[-1]
else:
messages[message_id].append(ChatMessage(role=MessageRole.USER, blocks=[]))
last_user_message = [
message
for message in messages[message_id]
if message.role == MessageRole.USER
][-1]
if text:
last_user_message.blocks.append(TextBlock(text=text))
else:
last_user_message.blocks.append(AudioBlock(audio=audio))
def callback_agent_message(
messages: Dict[int, List[ChatMessage]],
message_id: int,
text: Optional[str] = None,
audio: Optional[Any] = None,
) -> None:
if messages.get(message_id) is None:
if text:
messages[message_id] = []
messages[message_id].append(
ChatMessage(role=MessageRole.ASSISTANT, blocks=[TextBlock(text=text)])
)
else:
messages[message_id] = []
messages[message_id].append(
ChatMessage(
role=MessageRole.ASSISTANT, blocks=[AudioBlock(audio=audio)]
)
)
else:
last_agent_messages = [
message
for message in messages[message_id]
if message.role == MessageRole.ASSISTANT
]
if len(last_agent_messages) > 0:
last_agent_message = last_agent_messages[-1]
else:
messages[message_id].append(
ChatMessage(role=MessageRole.ASSISTANT, blocks=[])
)
last_agent_message = [
message
for message in messages[message_id]
if message.role == MessageRole.ASSISTANT
][-1]
if text:
last_agent_message.blocks.append(TextBlock(text=text))
else:
last_agent_message.blocks.append(AudioBlock(audio=audio))
def callback_agent_message_correction(
messages: Dict[int, List[ChatMessage]], message_id: int, text: str
) -> None:
last_agent_message = [
message
for message in messages[message_id]
if message.role == MessageRole.ASSISTANT
][-1]
last_block = [
block for block in last_agent_message.blocks if block.block_type == "text"
][-1]
last_block.text = text
def callback_latency_measurement(latencies: List[int], latency: int) -> None:
latencies.append(latency)
def get_messages_from_chat(chat: Dict[int, List[ChatMessage]]) -> List[ChatMessage]:
messages: List[ChatMessage] = []
for msgs in chat.values():
for msg in msgs:
messages.append(msg)
return messages
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/voice_agents/llama-index-voice-agents-elevenlabs/llama_index/voice_agents/elevenlabs/utils.py",
"license": "MIT License",
"lines": 116,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/voice_agents/llama-index-voice-agents-elevenlabs/tests/test_utils.py | import pytest
from typing import Dict, List
from llama_index.core.llms import ChatMessage, MessageRole, TextBlock, AudioBlock
from llama_index.voice_agents.elevenlabs.utils import (
callback_agent_message,
callback_agent_message_correction,
callback_latency_measurement,
callback_user_message,
get_messages_from_chat,
)
data = b"fake_audio_data"
@pytest.fixture()
def messages() -> Dict[int, List[ChatMessage]]:
return {
1: [
ChatMessage(role=MessageRole.ASSISTANT, blocks=[AudioBlock(audio=data)]),
ChatMessage(role=MessageRole.USER, blocks=[TextBlock(text="Hello")]),
]
}
@pytest.fixture()
def latencies() -> List[int]:
return [1, 3]
def test_agent_message(messages: Dict[int, List[ChatMessage]]):
local_messages = messages.copy()
callback_agent_message(messages=local_messages, message_id=1, text="Hello")
assert {
1: [
ChatMessage(
role=MessageRole.ASSISTANT,
blocks=[AudioBlock(audio=data), TextBlock(text="Hello")],
),
ChatMessage(role=MessageRole.USER, blocks=[TextBlock(text="Hello")]),
]
} == local_messages
callback_agent_message(messages=local_messages, message_id=2, text="Hello")
assert {
1: [
ChatMessage(
role=MessageRole.ASSISTANT,
blocks=[AudioBlock(audio=data), TextBlock(text="Hello")],
),
ChatMessage(role=MessageRole.USER, blocks=[TextBlock(text="Hello")]),
],
2: [ChatMessage(role=MessageRole.ASSISTANT, blocks=[TextBlock(text="Hello")])],
} == local_messages
callback_agent_message(messages=local_messages, message_id=2, audio=data)
assert {
1: [
ChatMessage(
role=MessageRole.ASSISTANT,
blocks=[AudioBlock(audio=data), TextBlock(text="Hello")],
),
ChatMessage(role=MessageRole.USER, blocks=[TextBlock(text="Hello")]),
],
2: [
ChatMessage(
role=MessageRole.ASSISTANT,
blocks=[TextBlock(text="Hello"), AudioBlock(audio=data)],
)
],
} == local_messages
def test_user_message(messages: Dict[int, List[ChatMessage]]):
local_messages = messages.copy()
callback_user_message(messages=local_messages, message_id=1, audio=data)
assert {
1: [
ChatMessage(role=MessageRole.ASSISTANT, blocks=[AudioBlock(audio=data)]),
ChatMessage(
role=MessageRole.USER,
blocks=[TextBlock(text="Hello"), AudioBlock(audio=data)],
),
]
} == local_messages
callback_user_message(messages=local_messages, message_id=2, text="Hello")
assert {
1: [
ChatMessage(role=MessageRole.ASSISTANT, blocks=[AudioBlock(audio=data)]),
ChatMessage(
role=MessageRole.USER,
blocks=[TextBlock(text="Hello"), AudioBlock(audio=data)],
),
],
2: [ChatMessage(role=MessageRole.USER, blocks=[TextBlock(text="Hello")])],
} == local_messages
callback_user_message(messages=local_messages, message_id=2, audio=data)
assert {
1: [
ChatMessage(role=MessageRole.ASSISTANT, blocks=[AudioBlock(audio=data)]),
ChatMessage(
role=MessageRole.USER,
blocks=[TextBlock(text="Hello"), AudioBlock(audio=data)],
),
],
2: [
ChatMessage(
role=MessageRole.USER,
blocks=[TextBlock(text="Hello"), AudioBlock(audio=data)],
)
],
} == local_messages
def test_agent_message_correction(messages: Dict[int, List[ChatMessage]]):
local_messages = messages.copy()
local_messages[1][0].blocks.append(TextBlock(text="Hell"))
callback_agent_message_correction(
messages=local_messages, message_id=1, text="Hello"
)
assert local_messages[1][0].blocks[1].text == "Hello"
def test_latencies(latencies: List[int]):
local_lats = latencies.copy()
callback_latency_measurement(local_lats, 3)
callback_latency_measurement(local_lats, 9)
assert local_lats == [*latencies, 3, 9]
def test_get_messages_from_chat(messages: Dict[int, List[ChatMessage]]) -> None:
assert get_messages_from_chat(messages) == messages[1]
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/voice_agents/llama-index-voice-agents-elevenlabs/tests/test_utils.py",
"license": "MIT License",
"lines": 114,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/graph_stores/llama-index-graph-stores-ApertureDB/llama_index/graph_stores/ApertureDB/property_graph.py | import logging
from typing import Any, Dict, List, Optional
from llama_index.core.graph_stores.types import (
LabelledNode,
PropertyGraphStore,
EntityNode,
Relation,
ChunkNode,
Triplet,
)
query_executor = None
query_builder = None
# Prefix for properties that are in the client metadata
PROPERTY_PREFIX = "lm_"
TEXT_PROPERTY = "text" # Property name for the text
UNIQUEID_PROPERTY = "uniqueid" # Property name for the unique id
BATCHSIZE = 1000
logger = logging.getLogger(__name__)
def get_entity(client, label, id):
"""Get entity by id."""
query = [
{
"FindEntity": {
"_ref": 1,
"constraints": {UNIQUEID_PROPERTY: ["==", id]},
"results": {"all_properties": True},
}
}
]
if label is not None:
query[0]["FindEntity"]["with_class"] = label
result, response, _ = query_executor(
client,
query,
)
assert result == 0, response
if (
"entities" in response[0]["FindEntity"]
and len(response[0]["FindEntity"]["entities"]) > 0
):
return response[0]["FindEntity"]["entities"][0]
return None
def changed(entity, properties):
"""Check if properties have changed."""
to_update = {}
to_delete = []
if entity is None:
return properties, to_delete
for k, v in properties.items():
if k not in entity:
to_update[k] = v
elif entity[k] != v:
to_update[k] = v
for k, v in entity.items():
if k not in properties and not k.startswith("_") and k not in ["id", "name"]:
to_delete.append(k)
return to_update, to_delete
def query_for_ids(command: str, ids: List[str]) -> List[dict]:
"""Create a query for a list of ids."""
constraints = {}
constraints.setdefault("any", {UNIQUEID_PROPERTY: ["in", ids]})
if command == "FindEntity":
query = [{command: {"results": {"all_properties": True}}}]
else:
query = [{command: {}}]
if len(constraints) > 0:
query[0][command]["constraints"] = constraints
return query
def query_for_properties(command: str, properties: dict) -> List[dict]:
"""Create a query for a list of properties."""
constraints = {}
for k, v in properties.items():
constraints.setdefault("all", {k: ["==", v]})
query = [{command: {"results": {"all_properties": True}}}]
if len(constraints) > 0:
query[0][command]["constraints"] = constraints
return query
class ApertureDBGraphStore(PropertyGraphStore):
"""
ApertureDB graph store.
Args:
config (dict): Configuration for the graph store.
**kwargs: Additional keyword arguments.
"""
flat_metadata: bool = True
@property
def client(self) -> Any:
"""Get client."""
return self._client
def __init__(self, *args, **kwargs) -> None:
try:
from aperturedb.CommonLibrary import create_connector, execute_query
from aperturedb.Query import QueryBuilder
except ImportError:
raise ImportError(
"ApertureDB is not installed. Please install it using "
"'pip install --upgrade aperturedb'"
)
self._client = create_connector()
global query_executor
query_executor = execute_query
global query_builder
query_builder = QueryBuilder
def get_rel_map(
self,
subjs: List[LabelledNode],
depth: int = 2,
limit: int = 30,
ignore_rels: Optional[List[str]] = None,
) -> List[Triplet]:
"""Get depth-aware rel map."""
if subjs is None or len(subjs) == 0:
return []
if depth <= 0:
return []
rel_map = []
ignore_rels = ignore_rels or []
for s in subjs:
query = [
query_builder.find_command(
oclass=s.label,
params={
"_ref": 1,
"constraints": {UNIQUEID_PROPERTY: ["==", s.id]},
"results": {"all_properties": True, "limit": limit},
},
)
]
for i in range(1, 2):
query.extend(
[
{
"FindEntity": {
"_ref": i + 1,
"is_connected_to": {"ref": i, "direction": "out"},
"results": {"all_properties": True, "limit": limit},
}
},
{
"FindConnection": {
"src": i,
"results": {"all_properties": True, "limit": limit},
}
},
]
)
result, response, _ = query_executor(
self._client,
query,
)
assert result == 0, response
adjacent_nodes = []
if "entities" in response[0]["FindEntity"]:
for entity in response[0]["FindEntity"]["entities"]:
for c, ce in zip(
response[1]["FindEntity"]["entities"],
response[2]["FindConnection"]["connections"],
):
if ce[UNIQUEID_PROPERTY] in ignore_rels:
continue
source = EntityNode(
name=entity[UNIQUEID_PROPERTY],
label=entity["label"],
properties=entity,
)
target = EntityNode(
name=c[UNIQUEID_PROPERTY],
label=c["label"],
properties=c,
)
relation = Relation(
source_id=c[UNIQUEID_PROPERTY],
target_id=c[UNIQUEID_PROPERTY],
label=ce[UNIQUEID_PROPERTY],
)
adjacent_nodes.append(target)
rel_map.append([source, relation, target])
rel_map.extend(self.get_rel_map(adjacent_nodes, depth - 1))
return rel_map
def delete(
self,
entity_names: Optional[List[str]] = None,
relation_names: Optional[List[str]] = None,
properties: Optional[dict] = None,
ids: Optional[List[str]] = None,
) -> None:
"""Delete nodes."""
if ids and len(ids) > 0:
query = query_for_ids("DeleteEntity", [id.capitalize() for id in ids])
result, response, _ = query_executor(
self._client,
query,
)
assert result == 0, response
if properties and len(properties) > 0:
query = query_for_properties("DeleteEntity", properties)
result, response, _ = query_executor(
self._client,
query,
)
assert result == 0, response
if entity_names and len(entity_names) > 0:
for name in entity_names:
query = [
{
"DeleteEntity": {
"with_class": name,
"constraints": {"_uniqueid": ["!=", "0.0.0"]},
}
}
]
result, response, _ = query_executor(
self._client,
query,
)
assert result == 0, response
if relation_names and len(relation_names) > 0:
for relation_name in set(relation_names):
query = [
{
"DeleteConnection": {
"with_class": relation_name,
"constraints": {"_uniqueid": ["!=", "0.0.0"]},
}
}
]
result, response, _ = query_executor(
self._client,
query,
)
assert result == 0, response
def get(
self, properties: Optional[dict] = None, ids: Optional[List[str]] = None
) -> List[LabelledNode]:
entities = []
if ids and len(ids) > 0:
query = query_for_ids("FindEntity", ids)
result, response, _ = query_executor(
self._client,
query,
)
assert result == 0, response
entities.extend(response[0]["FindEntity"].get("entities", []))
elif properties and len(properties) > 0:
query = query_for_properties("FindEntity", properties)
result, response, _ = query_executor(
self._client,
query,
)
assert result == 0, response
entities.extend(response[0]["FindEntity"].get("entities", []))
else:
query = [
{
"FindEntity": {
"results": {"all_properties": True, "limit": BATCHSIZE}
}
}
]
result, response, _ = query_executor(
self._client,
query,
)
assert result == 0, response
entities.extend(response[0]["FindEntity"].get("entities", []))
response = []
if len(entities) > 0:
for e in entities:
if e["label"] == "text_chunk":
node = ChunkNode(
properties={
"_node_content": e["node_content"],
"_node_type": e["node_type"],
},
text=e["text"],
id=e[UNIQUEID_PROPERTY],
)
else:
node = EntityNode(
label=e["label"], properties=e, name=e[UNIQUEID_PROPERTY]
)
response.append(node)
return response
def get_triplets(
self, entity_names=None, relation_names=None, properties=None, ids=None
):
raise NotImplementedError("get_triplets is not implemented")
def structured_query(
self, query: str, param_map: Optional[Dict[str, Any]] = None
) -> Any:
query = [{query: param_map}]
blobs = []
result, response, _ = query_executor(self._client, query, blobs)
assert result == 0, response
return response
def upsert_nodes(self, nodes: List[EntityNode]) -> List[str]:
ids = []
data = []
for node in nodes:
# TODO: nodes can be of type EntityNode or ChunkNode
properties = node.properties
id = node.id.capitalize()
if isinstance(node, ChunkNode):
sane_props = {
"text": node.text,
}
for k, v in node.properties.items():
if k.startswith("_"):
sane_props[k[1:]] = v
properties = sane_props
entity = get_entity(self._client, node.label, id)
combined_properties = properties | {
UNIQUEID_PROPERTY: id,
"label": node.label,
}
command = None
if entity is None:
command = {
"AddEntity": {
"class": node.label,
"if_not_found": {UNIQUEID_PROPERTY: ["==", id]},
"properties": combined_properties,
}
}
else:
to_update, to_delete = changed(entity, combined_properties)
if len(to_update) > 0 or len(to_delete) > 0:
command = {
"UpdateEntity": {
"constraints": {UNIQUEID_PROPERTY: ["==", id]},
"properties": to_update,
"remove_props": to_delete,
}
}
if command is not None:
query = [command]
blobs = []
result, response, _ = query_executor(self._client, query, blobs)
assert result == 0, response
data.append((query, blobs))
ids.append(id)
return ids
def upsert_relations(self, relations: List[Relation]) -> None:
"""Upsert relations."""
ids = []
for i, r in enumerate(relations):
query = [
{
"FindEntity": {
"constraints": {
UNIQUEID_PROPERTY: ["==", r.source_id.capitalize()]
},
"_ref": 1,
}
},
{
"FindEntity": {
"constraints": {
UNIQUEID_PROPERTY: ["==", r.target_id.capitalize()]
},
"_ref": 2,
}
},
{
"AddConnection": {
"class": r.label,
"src": 1,
"dst": 2,
"properties": r.properties
| {
UNIQUEID_PROPERTY: f"{r.id}",
"src_id": r.source_id.capitalize(),
"dst_id": r.target_id.capitalize(),
},
"if_not_found": {
UNIQUEID_PROPERTY: ["==", f"{r.id}"],
"src_id": ["==", r.source_id.capitalize()],
"dst_id": ["==", r.target_id.capitalize()],
},
}
},
]
result, response, _ = query_executor(
self._client, query, success_statuses=[0, 2]
)
assert result == 0, response
ids.append(r.id)
return ids
def vector_query(self, query, **kwargs):
raise NotImplementedError("vector_query is not implemented")
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/graph_stores/llama-index-graph-stores-ApertureDB/llama_index/graph_stores/ApertureDB/property_graph.py",
"license": "MIT License",
"lines": 390,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/graph_stores/llama-index-graph-stores-ApertureDB/tests/test_pg_stores_ApertureDB.py | import aperturedb.CommonLibrary
from llama_index.core.graph_stores.types import Relation, EntityNode
from llama_index.graph_stores.ApertureDB import ApertureDBGraphStore
import aperturedb
import json
import pytest
@pytest.fixture
def create_store(monkeypatch):
class MockConnector:
def __init__(self, exists) -> None:
self.exists = exists
self.queries = []
print(f"self.exists: {self.exists}")
def clone(self):
return self
def query(self, *args, **kwargs):
print("query called with args:", args, "and kwargs:", kwargs)
self.queries.append(args[0])
response, blobs = [], []
if self.exists:
response = [
{
"FindEntity": {
"returned": 1,
"status": 0,
"entities": [
{"id": "James", "name": "James", "label": "PERSON"}
],
}
}
]
else:
response = [{"FindEntity": {"returned": 0, "status": 0}}]
print("query response:", response)
return response, blobs
def last_query_ok(self):
return True
def get_last_response_str(self):
return "response"
def store_creator(data_exists):
monkeypatch.setattr(
aperturedb.CommonLibrary,
"create_connector",
lambda *args, **kwargs: MockConnector(data_exists),
)
return ApertureDBGraphStore()
return store_creator
@pytest.fixture
def synthetic_data():
entities = [
EntityNode(label="PERSON", name="James"),
EntityNode(label="DISH", name="Butter Chicken"),
EntityNode(label="DISH", name="Scrambled Eggs"),
EntityNode(label="INGREDIENT", name="Butter"),
EntityNode(label="INGREDIENT", name="Chicken"),
EntityNode(label="INGREDIENT", name="Eggs"),
EntityNode(label="INGREDIENT", name="Salt"),
]
relations = [
Relation(
label="EATS",
source_id=entities[0].id,
target_id=entities[1].id,
),
Relation(
label="EATS",
source_id=entities[0].id,
target_id=entities[2].id,
),
Relation(
label="CONTAINS",
source_id=entities[1].id,
target_id=entities[3].id,
),
Relation(
label="HAS",
source_id=entities[1].id,
target_id=entities[4].id,
),
Relation(
label="COMPRISED_OF",
source_id=entities[2].id,
target_id=entities[5].id,
),
Relation(
label="GOT",
source_id=entities[2].id,
target_id=entities[6].id,
),
]
return entities, relations
def test_ApertureDB_pg_store_data_update(create_store, synthetic_data) -> None:
pg_store = create_store(True)
entities, relations = synthetic_data
pg_store.upsert_nodes(entities)
pg_store.upsert_relations(relations)
# 20 = 2 (PERSON) + 4 (DISH) + 8 (INGREDIENT) + 6 (relations)
assert len(pg_store.client.queries) == 20, json.dumps(
pg_store.client.queries, indent=2
)
# Check if the queries are correct, FindEntity followed by AddEntity.
for i in range(14):
q = pg_store.client.queries[i]
assert len(q) == 1, json.dumps(q, indent=2)
if i % 2 == 0:
assert "FindEntity" in q[0]
else:
assert "UpdateEntity" in q[0]
# Check if the queries are correct, FindEntity x 2 followed by AddConnection.
for i in range(14, 20):
q = pg_store.client.queries[i]
assert len(q) == 3, json.dumps(q, indent=2)
assert "FindEntity" in q[0]
assert "FindEntity" in q[1]
assert "AddConnection" in q[2]
def test_ApertureDB_pg_store_data_add(create_store, synthetic_data) -> None:
pg_store = create_store(False)
entities, relations = synthetic_data
pg_store.upsert_nodes(entities)
pg_store.upsert_relations(relations)
# 20 = 2 (PERSON) + 4 (DISH) + 8 (INGREDIENT) + 6 (relations)
assert len(pg_store.client.queries) == 20, json.dumps(
pg_store.client.queries, indent=2
)
# Check if the queries are correct, FindEntity followed by AddEntity.
for i in range(14):
q = pg_store.client.queries[i]
assert len(q) == 1, json.dumps(q, indent=2)
if i % 2 == 0:
assert "FindEntity" in q[0]
else:
assert "AddEntity" in q[0]
# Check if the queries are correct, FindEntity x 2 followed by AddConnection.
for i in range(14, 20):
q = pg_store.client.queries[i]
assert len(q) == 3, json.dumps(q, indent=2)
assert "FindEntity" in q[0]
assert "FindEntity" in q[1]
assert "AddConnection" in q[2]
def test_ApertureDB_pg_store_delete(create_store, synthetic_data) -> None:
entities, relations = synthetic_data
pg_store = create_store(True)
pg_store.upsert_nodes(entities)
pg_store.upsert_relations(relations)
# 20 = 2 (PERSON) + 4 (DISH) + 8 (INGREDIENT) + 6 (relations)
assert len(pg_store.client.queries) == 20, json.dumps(
pg_store.client.queries, indent=2
)
pg_store.client.queries = []
pg_store.delete(ids=[e.id for e in entities])
assert len(pg_store.client.queries) == 1, json.dumps(
pg_store.client.queries, indent=2
)
assert "DeleteEntity" in pg_store.client.queries[0][0]
assert "results" not in pg_store.client.queries[0][0]["DeleteEntity"]
delete_query_constraints = pg_store.client.queries[0][0]["DeleteEntity"][
"constraints"
]
assert len(delete_query_constraints) == 1, json.dumps(
delete_query_constraints, indent=2
)
def test_ApertureDB_pg_store_structured_query(create_store, synthetic_data) -> None:
entities, relations = synthetic_data
pg_store = create_store(True)
pg_store.structured_query(
"FindEntity", {"constraints": [{"name": ["==", "James"]}]}
)
assert len(pg_store.client.queries) == 1, json.dumps(
pg_store.client.queries, indent=2
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/graph_stores/llama-index-graph-stores-ApertureDB/tests/test_pg_stores_ApertureDB.py",
"license": "MIT License",
"lines": 170,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-core/llama_index/core/program/streaming_utils.py | """
Simplified streaming utilities for processing structured outputs from message content.
This module provides utilities for processing streaming responses that contain
structured data directly in the message content (not in function calls).
"""
from typing import Optional, Type, Union
from pydantic import ValidationError
from llama_index.core.base.llms.types import ChatResponse
from llama_index.core.program.utils import (
FlexibleModel,
_repair_incomplete_json,
create_flexible_model,
)
from llama_index.core.types import Model
def process_streaming_content_incremental(
chat_response: ChatResponse,
output_cls: Type[Model],
cur_object: Optional[Union[Model, FlexibleModel]] = None,
) -> Union[Model, FlexibleModel]:
"""
Process streaming response content with true incremental list handling.
This version can extract partial progress from incomplete JSON and build
lists incrementally (e.g., 1 joke → 2 jokes → 3 jokes) rather than
jumping from empty to complete lists.
Args:
chat_response (ChatResponse): The chat response to process
output_cls (Type[BaseModel]): The target output class
cur_object (Optional[BaseModel]): Current best object (for comparison)
flexible_mode (bool): Whether to use flexible schema during parsing
Returns:
Union[BaseModel, FlexibleModel]: Processed object with incremental updates
"""
partial_output_cls = create_flexible_model(output_cls)
# Get content from message
content = chat_response.message.content
if not content:
return cur_object if cur_object is not None else partial_output_cls()
try:
parsed_obj = partial_output_cls.model_validate_json(content)
except (ValidationError, ValueError):
try:
repaired_json = _repair_incomplete_json(content)
parsed_obj = partial_output_cls.model_validate_json(repaired_json)
except (ValidationError, ValueError):
extracted_obj = _extract_partial_list_progress(
content, output_cls, cur_object, partial_output_cls
)
parsed_obj = (
extracted_obj if extracted_obj is not None else partial_output_cls()
)
# If we still couldn't parse anything, use previous object
if parsed_obj is None:
if cur_object is not None:
return cur_object
else:
return partial_output_cls()
# Use incremental comparison that considers list progress
try:
return output_cls.model_validate(parsed_obj.model_dump(exclude_unset=True))
except ValidationError:
return parsed_obj
def _extract_partial_list_progress(
content: str,
output_cls: Type[Model],
cur_object: Optional[Union[Model, FlexibleModel]],
partial_output_cls: Type[FlexibleModel],
) -> Optional[FlexibleModel]:
"""
Try to extract partial list progress from incomplete JSON.
This attempts to build upon the current object by detecting partial
list additions even when JSON is malformed.
"""
if not isinstance(content, str) or cur_object is None:
return None
try:
import re
# Try to extract list patterns from incomplete JSON
# Look for patterns like: "jokes": [{"setup": "...", "punchline": "..."}
list_pattern = r'"(\w+)":\s*\[([^\]]*)'
matches = re.findall(list_pattern, content)
if not matches:
return None
# Start with current object data
current_data = (
cur_object.model_dump() if hasattr(cur_object, "model_dump") else {}
)
for field_name, list_content in matches:
if (
hasattr(output_cls, "model_fields")
and field_name in output_cls.model_fields
):
# Try to parse individual items from the list content
items = _parse_partial_list_items(list_content, field_name, output_cls)
if items:
current_data[field_name] = items
# Try to create object with updated data
return partial_output_cls.model_validate(current_data)
except Exception:
return None
def _parse_partial_list_items(
list_content: str, field_name: str, output_cls: Type[Model]
) -> list:
"""
Parse individual items from partial list content.
"""
try:
import json
import re
items = []
# Look for complete object patterns within the list
# Pattern: {"key": "value", "key2": "value2"}
object_pattern = r"\{[^{}]*\}"
object_matches = re.findall(object_pattern, list_content)
for obj_str in object_matches:
try:
# Try to parse as complete JSON object
obj_data = json.loads(obj_str)
items.append(obj_data)
except (json.JSONDecodeError, SyntaxError):
# Try to repair and parse
try:
repaired = _repair_incomplete_json(obj_str)
obj_data = json.loads(repaired)
items.append(obj_data)
except (json.JSONDecodeError, SyntaxError):
continue
return items
except Exception:
return []
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/llama_index/core/program/streaming_utils.py",
"license": "MIT License",
"lines": 129,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-core/tests/program/test_streaming_utils.py | """Test streaming utilities."""
from typing import List, Optional
from llama_index.core.bridge.pydantic import BaseModel, Field
from llama_index.core.base.llms.types import ChatMessage, ChatResponse, MessageRole
from llama_index.core.program.streaming_utils import (
process_streaming_content_incremental,
_extract_partial_list_progress,
_parse_partial_list_items,
)
class Joke(BaseModel):
"""Test joke model."""
setup: str
punchline: Optional[str] = None
class Show(BaseModel):
"""Test show model with jokes list."""
title: str = ""
jokes: List[Joke] = Field(default_factory=list)
class Person(BaseModel):
"""Test person model."""
name: str
age: Optional[int] = None
hobbies: List[str] = Field(default_factory=list)
def test_process_streaming_content_incremental_complete_json():
"""Test processing complete JSON content."""
response = ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content='{"name": "John", "age": 30, "hobbies": ["reading", "coding"]}',
)
)
result = process_streaming_content_incremental(response, Person)
assert isinstance(result, Person)
assert result.name == "John"
assert result.age == 30
assert result.hobbies == ["reading", "coding"]
def test_process_streaming_content_incremental_incomplete_json():
"""Test processing incomplete JSON content."""
response = ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content='{"name": "John", "age": 30',
)
)
result = process_streaming_content_incremental(response, Person)
# Should handle incomplete JSON gracefully
assert hasattr(result, "name")
def test_process_streaming_content_incremental_with_current_object():
"""Test processing with existing current object."""
current_person = Person(name="Jane", age=25)
response = ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content='{"name": "John", "age": 30}',
)
)
result = process_streaming_content_incremental(response, Person, current_person)
assert isinstance(result, Person)
assert result.name == "John"
assert result.age == 30
def test_process_streaming_content_incremental_empty_content():
"""Test processing empty content."""
response = ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content="",
)
)
result = process_streaming_content_incremental(response, Person)
# Should return FlexibleModel instance when no content
assert hasattr(result, "__dict__")
def test_process_streaming_content_incremental_with_list():
"""Test processing content with list structures."""
response = ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content='{"title": "Comedy Show", "jokes": [{"setup": "Why did the chicken cross the road?", "punchline": "To get to the other side!"}]}',
)
)
result = process_streaming_content_incremental(response, Show)
assert isinstance(result, Show)
assert result.title == "Comedy Show"
assert len(result.jokes) == 1
assert result.jokes[0].setup == "Why did the chicken cross the road?"
assert result.jokes[0].punchline == "To get to the other side!"
def test_process_streaming_content_incremental_malformed_json():
"""Test processing malformed JSON with current object."""
current_show = Show(
title="Comedy Show",
jokes=[Joke(setup="First joke", punchline="First punchline")],
)
response = ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content='{"jokes": [{"setup": "Second joke", "punchline": "Second punchline"}',
)
)
result = process_streaming_content_incremental(response, Show, current_show)
# Should attempt to extract partial progress
assert hasattr(result, "jokes")
def test_extract_partial_list_progress_valid():
"""Test extracting partial list progress with valid content."""
content = '{"jokes": [{"setup": "Why did the chicken cross the road?", "punchline": "To get to the other side!"}]'
current_show = Show(title="Comedy Show")
from llama_index.core.program.utils import create_flexible_model
partial_cls = create_flexible_model(Show)
result = _extract_partial_list_progress(content, Show, current_show, partial_cls)
if result is not None:
assert hasattr(result, "jokes")
def test_extract_partial_list_progress_no_current():
"""Test extracting partial list progress without current object."""
content = '{"jokes": [{"setup": "Why did the chicken cross the road?"}]'
from llama_index.core.program.utils import create_flexible_model
partial_cls = create_flexible_model(Show)
result = _extract_partial_list_progress(content, Show, None, partial_cls)
assert result is None
def test_extract_partial_list_progress_invalid_content():
"""Test extracting partial list progress with invalid content."""
content = "invalid json content"
current_show = Show(title="Comedy Show")
from llama_index.core.program.utils import create_flexible_model
partial_cls = create_flexible_model(Show)
result = _extract_partial_list_progress(content, Show, current_show, partial_cls)
assert result is None
def test_parse_partial_list_items_complete_objects():
"""Test parsing complete objects from list content."""
list_content = '{"setup": "Why did the chicken cross the road?", "punchline": "To get to the other side!"}, {"setup": "Second joke", "punchline": "Second punchline"}'
result = _parse_partial_list_items(list_content, "jokes", Show)
assert isinstance(result, list)
assert len(result) == 2
assert result[0]["setup"] == "Why did the chicken cross the road?"
assert result[0]["punchline"] == "To get to the other side!"
assert result[1]["setup"] == "Second joke"
assert result[1]["punchline"] == "Second punchline"
def test_parse_partial_list_items_incomplete_objects():
"""Test parsing incomplete objects from list content."""
list_content = '{"setup": "Why did the chicken cross the road?", "punchline": "To get to the other side!"}, {"setup": "Second joke"'
result = _parse_partial_list_items(list_content, "jokes", Show)
assert isinstance(result, list)
# Should get at least the complete object
assert len(result) >= 1
assert result[0]["setup"] == "Why did the chicken cross the road?"
def test_parse_partial_list_items_invalid_content():
"""Test parsing invalid content returns empty list."""
list_content = "completely invalid content"
result = _parse_partial_list_items(list_content, "jokes", Show)
assert isinstance(result, list)
assert len(result) == 0
def test_parse_partial_list_items_empty_content():
"""Test parsing empty content returns empty list."""
list_content = ""
result = _parse_partial_list_items(list_content, "jokes", Show)
assert isinstance(result, list)
assert len(result) == 0
def test_parse_partial_list_items_malformed_json():
"""Test parsing malformed JSON objects."""
list_content = '{"setup": "Why did the chicken cross the road?", "punchline": "To get to the other side!"}, {"setup": "Second joke", invalid'
result = _parse_partial_list_items(list_content, "jokes", Show)
assert isinstance(result, list)
# Should get the complete object, ignore the malformed one
assert len(result) >= 1
assert result[0]["setup"] == "Why did the chicken cross the road?"
def test_process_streaming_content_incremental_none_message():
"""Test processing when message content is None."""
response = ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content=None,
)
)
result = process_streaming_content_incremental(response, Person)
# Should return FlexibleModel instance when no content
assert hasattr(result, "__dict__")
def test_process_streaming_content_incremental_progressive_list_building():
"""Test progressive list building with incremental updates."""
# Start with empty show
current_show = Show(title="Comedy Show", jokes=[])
# First update - add one joke
response1 = ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content='{"title": "Comedy Show", "jokes": [{"setup": "First joke", "punchline": "First punchline"}]}',
)
)
result1 = process_streaming_content_incremental(response1, Show, current_show)
assert len(result1.jokes) == 1
assert result1.jokes[0].setup == "First joke"
# Second update - add another joke
response2 = ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content='{"title": "Comedy Show", "jokes": [{"setup": "First joke", "punchline": "First punchline"}, {"setup": "Second joke", "punchline": "Second punchline"}]}',
)
)
result2 = process_streaming_content_incremental(response2, Show, result1)
assert len(result2.jokes) == 2
assert result2.jokes[1].setup == "Second joke"
def test_process_streaming_content_incremental_validation_error_fallback():
"""Test fallback when validation to target class fails."""
# Create content that validates to FlexibleModel but not to strict Person
response = ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content='{"name": "John", "unknown_field": "value"}',
)
)
result = process_streaming_content_incremental(response, Person)
# Should return the flexible model instance when strict validation fails
assert hasattr(result, "name")
# Should still have the name field
if hasattr(result, "name"):
assert result.name == "John"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/program/test_streaming_utils.py",
"license": "MIT License",
"lines": 211,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-core/tests/storage/kvstore/test_mutable_mapping_kvstore.py | import pytest
from llama_index.core.storage.kvstore.types import MutableMappingKVStore
from llama_index.core.storage.kvstore.simple_kvstore import SimpleKVStore
def test_simple_kvstore():
kv_store = SimpleKVStore()
assert isinstance(kv_store, MutableMappingKVStore)
kv_store.put(key="foo", val={"foo": "bar"})
assert kv_store.get_all() == {"foo": {"foo": "bar"}}
def test_sync_methods():
mut_mapping = MutableMappingKVStore(dict)
mut_mapping.put(key="foo", val={"foo": "bar"})
assert mut_mapping.get("foo") == {"foo": "bar"}
assert mut_mapping.get("bar") is None
mut_mapping.put(key="bar", val={"bar": "foo"})
assert mut_mapping.get_all() == {"foo": {"foo": "bar"}, "bar": {"bar": "foo"}}
mut_mapping.delete(key="bar")
assert mut_mapping.get_all() == {"foo": {"foo": "bar"}}
@pytest.mark.asyncio
async def test_async_methods():
mut_mapping = MutableMappingKVStore(dict)
await mut_mapping.aput(key="foo", val={"foo": "bar"})
assert await mut_mapping.aget("foo") == {"foo": "bar"}
assert await mut_mapping.aget("bar") is None
await mut_mapping.aput(key="bar", val={"bar": "foo"})
assert await mut_mapping.aget_all() == {
"foo": {"foo": "bar"},
"bar": {"bar": "foo"},
}
await mut_mapping.adelete(key="bar")
assert await mut_mapping.aget_all() == {"foo": {"foo": "bar"}}
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/storage/kvstore/test_mutable_mapping_kvstore.py",
"license": "MIT License",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-core/tests/schema/test_image_document.py | import httpx
import pytest
from pathlib import Path
from llama_index.core.schema import ImageDocument
@pytest.fixture()
def image_url() -> str:
return "https://astrabert.github.io/hophop-science/images/whale_doing_science.png"
def test_real_image_path(tmp_path: Path, image_url: str) -> None:
content = httpx.get(image_url).content
fl_path = tmp_path / "test_image.png"
fl_path.write_bytes(content)
doc = ImageDocument(image_path=fl_path.__str__())
assert isinstance(doc, ImageDocument)
def test_real_image_url(image_url: str) -> None:
doc = ImageDocument(image_url=image_url)
assert isinstance(doc, ImageDocument)
def test_non_image_path(tmp_path: Path) -> None:
fl_path = tmp_path / "test_file.txt"
fl_path.write_text("Hello world!")
with pytest.raises(expected_exception=ValueError):
doc = ImageDocument(image_path=fl_path.__str__())
def test_non_image_url(image_url: str) -> None:
image_url = image_url.replace("png", "txt")
with pytest.raises(expected_exception=ValueError):
doc = ImageDocument(image_url=image_url)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/schema/test_image_document.py",
"license": "MIT License",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-measurespace/examples/example.py | from llama_index.tools.measurespace import MeasureSpaceToolSpec
from llama_index.core.agent.workflow import FunctionAgent
from llama_index.llms.openai import OpenAI
from dotenv import load_dotenv
import os
load_dotenv()
api_keys = {
'hourly_weather': os.getenv('HOURLY_WEATHER_API_KEY'),
'daily_weather': os.getenv('DAILY_WEATHER_API_KEY'),
'daily_climate': os.getenv('DAILY_CLIMATE_API_KEY'),
'air_quality': os.getenv('AIR_QUALITY_API_KEY'),
'geocoding': os.getenv('GEOCODING_API_KEY'),
}
tool_spec = MeasureSpaceToolSpec(api_keys)
agent = FunctionAgent(
tools=tool_spec.to_tool_list(),
llm=OpenAI(model="gpt-4.1"),
)
print(
await agent.run("How's the temperature for New York in next 3 days?")
)
print(
await agent.run("What's the latitude and longitude of New York?")
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-measurespace/examples/example.py",
"license": "MIT License",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-measurespace/llama_index/tools/measurespace/base.py | """Measure Space tool spec."""
from typing import List, Dict
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class MeasureSpaceToolSpec(BaseToolSpec):
"""Measure Space tool spec."""
spec_functions = [
"get_hourly_weather_forecast",
"get_daily_weather_forecast",
"get_daily_climate_forecast",
"get_daily_air_quality_forecast",
"get_latitude_longitude_from_location",
"get_location_from_latitude_longitude",
]
def __init__(self, api_keys: Dict[str, str], unit: str = "metric") -> None:
"""Initialize with parameters."""
try:
import measure_space_api as msa
except ImportError:
raise ImportError(
"The Measure Space tool requires the measure-space-api package to be installed. "
"Please install it using `pip install measure-space-api`."
)
self.api_keys = api_keys
self.unit = unit
self.msa = msa
def _get_api_key(self, api_name: str):
"""
Get API keys.
Args:
api_name (str): API service name
"""
api_key = self.api_keys.get(api_name)
if not api_key:
raise ValueError(
f"API key is required for {api_name} service. Please get your API key from measurespace.io/pricing."
)
return api_key
def _format_output(self, wx: Dict) -> List[str]:
"""
Format output to a list of string with the following format.
['total precipitation: 1 mm, wind speed: 10 m/s', 'total precipitation: 1 mm, wind speed: 10 m/s']
Args:
wx (Dict): API output in json format
"""
wx_list = []
for i in range(len(wx["time"])):
tmp_list = []
for key, value in wx.items():
if key != "time":
a_name, a_unit = self.msa.get_metadata(key, self.unit)
tmp_list.append(f"{a_name}: {value[i]} {a_unit}")
if tmp_list:
wx_list.append(",".join(tmp_list))
return wx_list
def get_hourly_weather_forecast(self, location: str) -> List[Document]:
"""
Get hourly weather forecast for given location.
Args:
location (str): location name
"""
api_key = self._get_api_key("hourly_weather")
geocoding_api_key = self._get_api_key("geocoding")
params = {"variables": "tp, t2m, windSpeed, windDegree, r2"}
wx = self.msa.get_hourly_weather(
api_key,
geocoding_api_key,
location,
params,
)
# Get variable metadata
for x in ["latitude", "longitude"]:
if x in wx:
del wx[x]
output = self._format_output(wx)
documents = []
for i in range(len(wx["time"])):
documents.append(
Document(
text=output[i],
metadata={
"Hourly weather for location": location,
"Date and time": wx["time"][i],
},
)
)
return documents
def get_daily_weather_forecast(self, location: str) -> List[Document]:
"""
Get daily weather forecast for given location.
Args:
location (str): location name
"""
api_key = self._get_api_key("daily_weather")
geocoding_api_key = self._get_api_key("geocoding")
params = {"variables": "tp, minT, maxT, meanwindSpeed, meanwindDegree, meanRH"}
wx = self.msa.get_daily_weather(
api_key,
geocoding_api_key,
location,
params,
)
# Get variable metadata
for x in ["latitude", "longitude"]:
if x in wx:
del wx[x]
output = self._format_output(wx)
documents = []
for i in range(len(wx["time"])):
documents.append(
Document(
text=output[i],
metadata={
"Daily weather for location": location,
"Date": wx["time"][i],
},
)
)
return documents
def get_daily_climate_forecast(self, location: str) -> List[Document]:
"""
Get hourly climate forecast for given location.
Args:
location (str): location name
"""
api_key = self._get_api_key("daily_climate")
geocoding_api_key = self._get_api_key("geocoding")
params = {"variables": "t2m, tmin, tmax, sh2"}
wx = self.msa.get_daily_climate(
api_key,
geocoding_api_key,
location,
params,
)
# Get variable metadata
for x in ["latitude", "longitude"]:
if x in wx:
del wx[x]
output = self._format_output(wx)
documents = []
for i in range(len(wx["time"])):
documents.append(
Document(
text=output[i],
metadata={
"Daily climate for location": location,
"Date": wx["time"][i],
},
)
)
return documents
def get_daily_air_quality_forecast(self, location: str) -> List[Document]:
"""
Get daily air quality forecast for given location.
Args:
location (str): location name
"""
api_key = self._get_api_key("daily_air_quality")
geocoding_api_key = self._get_api_key("geocoding")
params = {"variables": "AQI, maxPM10, maxPM25"}
wx = self.msa.get_daily_air_quality(
api_key,
geocoding_api_key,
location,
params,
)
# Get variable metadata
for x in ["latitude", "longitude"]:
if x in wx:
del wx[x]
output = self._format_output(wx)
documents = []
for i in range(len(wx["time"])):
documents.append(
Document(
text=output[i],
metadata={
"Daily air quality for location": location,
"Date": wx["time"][i],
},
)
)
return documents
def get_latitude_longitude_from_location(self, location: str) -> List[Document]:
"""
Get latitude and longitude from given location.
Args:
location (str): location name
"""
api_key = self._get_api_key("geocoding")
latitude, longitude = self.msa.get_lat_lon_from_city(
api_key=api_key, location_name=location
)
return [
Document(
text=f"latitude: {latitude}, longitude: {longitude}",
metadata={"Latitude and longitude for location": location},
)
]
def get_location_from_latitude_longitude(
self, latitude: float, longitude: float
) -> List[Document]:
"""
Get nearest location name from given latitude and longitude.
Args:
latitude (float): latitude
longitude (float): longitude
"""
api_key = self._get_api_key("geocoding")
res = self.msa.get_city_from_lat_lon(api_key, latitude, longitude)
return [
Document(
text=f"Location name: {res}",
metadata="Nearest location for given longitude and latitude",
)
]
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-measurespace/llama_index/tools/measurespace/base.py",
"license": "MIT License",
"lines": 216,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-ibm/tests/test_tool_required.py | from unittest.mock import MagicMock, patch
from llama_index.core.tools import FunctionTool
from llama_index.llms.ibm import WatsonxLLM
def search(query: str) -> str:
"""Search for information about a query."""
return f"Results for {query}"
calculator_tool = FunctionTool.from_defaults(
fn=lambda a, b: a + b,
name="calculator",
description="A tool for calculating the sum of two numbers",
)
search_tool = FunctionTool.from_defaults(
fn=search, name="search_tool", description="A tool for searching information"
)
@patch("llama_index.llms.ibm.base.ModelInference")
@patch("llama_index.llms.ibm.base.resolve_watsonx_credentials")
def test_prepare_chat_with_tools_tool_required_single_tool(
mock_resolve_credentials, MockModelInference
):
"""Test that tool_required selects the first tool when there's only one tool."""
mock_resolve_credentials.return_value = {}
mock_instance = MockModelInference.return_value
llm = WatsonxLLM(
model_id="test_model",
project_id="test_project",
apikey="test_apikey",
url="https://test-url.com",
api_client=MagicMock(), # Use mock client to bypass credential checks
)
# Test with tool_required=True and a single tool
result = llm._prepare_chat_with_tools(tools=[search_tool], tool_required=True)
assert "tool_choice" in result
assert result["tool_choice"]["type"] == "function"
assert result["tool_choice"]["function"]["name"] == "search_tool"
assert len(result["tools"]) == 1
assert result["tools"][0]["function"]["name"] == "search_tool"
@patch("llama_index.llms.ibm.base.ModelInference")
@patch("llama_index.llms.ibm.base.resolve_watsonx_credentials")
def test_prepare_chat_with_tools_tool_required_multiple_tools(
mock_resolve_credentials, MockModelInference
):
"""Test that tool_required selects the first tool when there are multiple tools."""
mock_resolve_credentials.return_value = {}
mock_instance = MockModelInference.return_value
llm = WatsonxLLM(
model_id="test_model",
project_id="test_project",
apikey="test_apikey",
url="https://test-url.com",
api_client=MagicMock(), # Use mock client to bypass credential checks
)
# Test with tool_required=True and multiple tools
result = llm._prepare_chat_with_tools(
tools=[search_tool, calculator_tool], tool_required=True
)
assert "tool_choice" in result
assert result["tool_choice"]["type"] == "function"
# It should select the first tool when tool_required=True
assert result["tool_choice"]["function"]["name"] == "search_tool"
assert len(result["tools"]) == 2
@patch("llama_index.llms.ibm.base.ModelInference")
@patch("llama_index.llms.ibm.base.resolve_watsonx_credentials")
def test_prepare_chat_with_tools_tool_not_required(
mock_resolve_credentials, MockModelInference
):
"""Test that tool_required=False doesn't specify a tool choice."""
mock_resolve_credentials.return_value = {}
mock_instance = MockModelInference.return_value
llm = WatsonxLLM(
model_id="test_model",
project_id="test_project",
apikey="test_apikey",
url="https://test-url.com",
api_client=MagicMock(), # Use mock client to bypass credential checks
)
# Test with tool_required=False (default)
result = llm._prepare_chat_with_tools(
tools=[search_tool, calculator_tool],
)
# When tool_required=False, there should be no tool_choice specified
assert "tool_choice" not in result
assert len(result["tools"]) == 2
@patch("llama_index.llms.ibm.base.ModelInference")
@patch("llama_index.llms.ibm.base.resolve_watsonx_credentials")
def test_prepare_chat_with_tools_explicit_tool_choice(
mock_resolve_credentials, MockModelInference
):
"""Test that an explicit tool_choice overrides tool_required."""
mock_resolve_credentials.return_value = {}
mock_instance = MockModelInference.return_value
llm = WatsonxLLM(
model_id="test_model",
project_id="test_project",
apikey="test_apikey",
url="https://test-url.com",
api_client=MagicMock(), # Use mock client to bypass credential checks
)
# Test with explicit tool_choice parameter, which should override tool_required
result = llm._prepare_chat_with_tools(
tools=[search_tool, calculator_tool],
tool_required=True,
tool_choice="calculator",
)
assert "tool_choice" in result
assert result["tool_choice"]["type"] == "function"
assert result["tool_choice"]["function"]["name"] == "calculator"
assert len(result["tools"]) == 2
@patch("llama_index.llms.ibm.base.ModelInference")
@patch("llama_index.llms.ibm.base.resolve_watsonx_credentials")
def test_prepare_chat_with_tools_no_tools(mock_resolve_credentials, MockModelInference):
"""Test that tool_required=True with no tools doesn't add a tool_choice."""
mock_resolve_credentials.return_value = {}
mock_instance = MockModelInference.return_value
llm = WatsonxLLM(
model_id="test_model",
project_id="test_project",
apikey="test_apikey",
url="https://test-url.com",
api_client=MagicMock(), # Use mock client to bypass credential checks
)
# Test with tool_required=True but no tools
result = llm._prepare_chat_with_tools(tools=[], tool_required=True)
# When there are no tools, tool_choice should not be specified even if tool_required=True
assert "tool_choice" not in result
assert result["tools"] is None
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-ibm/tests/test_tool_required.py",
"license": "MIT License",
"lines": 125,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-vertex/tests/test_tool_required.py | from unittest.mock import Mock, patch
from llama_index.core.tools import FunctionTool
from llama_index.llms.vertex import Vertex
from vertexai.generative_models import ToolConfig
def search(query: str) -> str:
"""Search for information about a query."""
return f"Results for {query}"
def calculate(a: int, b: int) -> int:
"""Calculate the sum of two numbers."""
return a + b
search_tool = FunctionTool.from_defaults(
fn=search, name="search_tool", description="A tool for searching information"
)
calculator_tool = FunctionTool.from_defaults(
fn=calculate,
name="calculator",
description="A tool for calculating the sum of two numbers",
)
class TestVertexToolRequired:
"""Test suite for Vertex AI tool_required functionality."""
@patch("llama_index.llms.vertex.gemini_utils.create_gemini_client")
def test_to_function_calling_config_tool_required_true(self, mock_create_client):
"""Test that _to_function_calling_config correctly sets mode to ANY when tool_required=True."""
mock_client = Mock()
mock_create_client.return_value = mock_client
llm = Vertex(model="gemini-pro", project="test-project")
config = llm._to_function_calling_config(tool_required=True)
# Check config mode through string representation since direct attribute access is problematic
config_str = str(config)
assert isinstance(config, ToolConfig)
assert "mode: ANY" in config_str
@patch("llama_index.llms.vertex.gemini_utils.create_gemini_client")
def test_to_function_calling_config_tool_required_false(self, mock_create_client):
"""Test that _to_function_calling_config correctly sets mode to AUTO when tool_required=False."""
mock_client = Mock()
mock_create_client.return_value = mock_client
llm = Vertex(model="gemini-pro", project="test-project")
config = llm._to_function_calling_config(tool_required=False)
# Check config mode through string representation
config_str = str(config)
assert isinstance(config, ToolConfig)
assert "mode: AUTO" in config_str
@patch("llama_index.llms.vertex.gemini_utils.create_gemini_client")
def test_prepare_chat_with_tools_tool_required_gemini(self, mock_create_client):
"""Test that tool_required is correctly passed to tool_config for Gemini models."""
mock_client = Mock()
mock_create_client.return_value = mock_client
llm = Vertex(model="gemini-pro", project="test-project")
# Test with tool_required=True
result = llm._prepare_chat_with_tools(tools=[search_tool], tool_required=True)
# Verify tool_config mode using string representation
tool_config_str = str(result["tool_config"])
assert "tool_config" in result
assert isinstance(result["tool_config"], ToolConfig)
assert "mode: ANY" in tool_config_str
assert len(result["tools"]) == 1
assert result["tools"][0]["name"] == "search_tool"
@patch("llama_index.llms.vertex.gemini_utils.create_gemini_client")
def test_prepare_chat_with_tools_tool_not_required_gemini(self, mock_create_client):
"""Test that tool_required=False correctly sets mode to AUTO for Gemini models."""
mock_client = Mock()
mock_create_client.return_value = mock_client
llm = Vertex(model="gemini-pro", project="test-project")
# Test with tool_required=False
result = llm._prepare_chat_with_tools(tools=[search_tool], tool_required=False)
# Verify tool_config mode using string representation
tool_config_str = str(result["tool_config"])
assert "tool_config" in result
assert isinstance(result["tool_config"], ToolConfig)
assert "mode: AUTO" in tool_config_str
assert len(result["tools"]) == 1
assert result["tools"][0]["name"] == "search_tool"
@patch("llama_index.llms.vertex.gemini_utils.create_gemini_client")
def test_prepare_chat_with_tools_default_behavior_gemini(self, mock_create_client):
"""Test default behavior when tool_required is not specified for Gemini models."""
mock_client = Mock()
mock_create_client.return_value = mock_client
llm = Vertex(model="gemini-pro", project="test-project")
# Test without specifying tool_required (should default to False)
result = llm._prepare_chat_with_tools(tools=[search_tool])
# Verify tool_config mode using string representation
tool_config_str = str(result["tool_config"])
assert "tool_config" in result
assert isinstance(result["tool_config"], ToolConfig)
# Should default to AUTO when tool_required=False (default)
assert "mode: AUTO" in tool_config_str
assert len(result["tools"]) == 1
assert result["tools"][0]["name"] == "search_tool"
@patch("llama_index.llms.vertex.gemini_utils.create_gemini_client")
def test_prepare_chat_with_tools_multiple_tools_gemini(self, mock_create_client):
"""Test tool_required with multiple tools for Gemini models."""
mock_client = Mock()
mock_create_client.return_value = mock_client
llm = Vertex(model="gemini-pro", project="test-project")
# Test with tool_required=True and multiple tools
result = llm._prepare_chat_with_tools(
tools=[search_tool, calculator_tool], tool_required=True
)
# Verify tool_config mode using string representation
tool_config_str = str(result["tool_config"])
assert "tool_config" in result
assert isinstance(result["tool_config"], ToolConfig)
assert "mode: ANY" in tool_config_str
assert len(result["tools"]) == 2
tool_names = [tool["name"] for tool in result["tools"]]
assert "search_tool" in tool_names
assert "calculator" in tool_names
@patch("vertexai.language_models.TextGenerationModel.from_pretrained")
@patch("vertexai.language_models.ChatModel.from_pretrained")
def test_prepare_chat_with_tools_non_gemini_no_tool_config(
self, mock_chat_from_pretrained, mock_text_from_pretrained
):
"""Test that non-Gemini models don't include tool_config regardless of tool_required."""
mock_chat_client = Mock()
mock_text_client = Mock()
mock_chat_from_pretrained.return_value = mock_chat_client
mock_text_from_pretrained.return_value = mock_text_client
# Use a non-Gemini model name
llm = Vertex(model="text-bison", project="test-project")
# Test with tool_required=True for non-Gemini model
result = llm._prepare_chat_with_tools(tools=[search_tool], tool_required=True)
# Non-Gemini models should not have tool_config
assert "tool_config" not in result
assert len(result["tools"]) == 1
assert result["tools"][0]["name"] == "search_tool"
# Test with tool_required=False for non-Gemini model
result = llm._prepare_chat_with_tools(tools=[search_tool], tool_required=False)
# Non-Gemini models should not have tool_config
assert "tool_config" not in result
assert len(result["tools"]) == 1
assert result["tools"][0]["name"] == "search_tool"
@patch("llama_index.llms.vertex.gemini_utils.create_gemini_client")
def test_prepare_chat_with_tools_no_tools_gemini(self, mock_create_client):
"""Test tool behavior when no tools are provided for Gemini models."""
mock_client = Mock()
mock_create_client.return_value = mock_client
llm = Vertex(model="gemini-pro", project="test-project")
# Test with tool_required=True but no tools
result = llm._prepare_chat_with_tools(tools=[], tool_required=True)
# Verify tool_config mode using string representation
tool_config_str = str(result["tool_config"])
# The current implementation still includes tool_config even with no tools if tool_required=True
assert "tool_config" in result
assert isinstance(result["tool_config"], ToolConfig)
assert "mode: ANY" in tool_config_str
assert result["tools"] is None
@patch("llama_index.llms.vertex.gemini_utils.create_gemini_client")
def test_prepare_chat_with_tools_with_kwargs_gemini(self, mock_create_client):
"""Test that additional kwargs are preserved when using tool_required for Gemini models."""
mock_client = Mock()
mock_create_client.return_value = mock_client
llm = Vertex(model="gemini-pro", project="test-project")
# Test with tool_required=True and additional kwargs
result = llm._prepare_chat_with_tools(
tools=[search_tool], tool_required=True, temperature=0.7, max_tokens=1000
)
# Verify tool_config mode using string representation
tool_config_str = str(result["tool_config"])
assert "tool_config" in result
assert isinstance(result["tool_config"], ToolConfig)
assert "mode: ANY" in tool_config_str
assert len(result["tools"]) == 1
assert result["tools"][0]["name"] == "search_tool"
assert result["temperature"] == 0.7
assert result["max_tokens"] == 1000
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-vertex/tests/test_tool_required.py",
"license": "MIT License",
"lines": 165,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-core/tests/embeddings/test_with_cache.py | import pytest
from llama_index.core import MockEmbedding
from llama_index.core.storage.kvstore import SimpleKVStore
from unittest.mock import patch
expected_embedding = [0.5, 0.5, 0.5, 0.5]
# Create unique embeddings for each text to verify order
def custom_embeddings(texts):
return [[float(ord(c)) for c in text[-4:]] for text in texts]
def test_sync_get_with_cache():
embeddings_cache = SimpleKVStore()
embed_model = MockEmbedding(embed_dim=4, embeddings_cache=embeddings_cache)
text = "Hello"
text_embedding = embed_model.get_text_embedding(text)
assert text_embedding == expected_embedding
assert embeddings_cache.get(key="Hello", collection="embeddings") is not None
embd_dict = embeddings_cache.get(key="Hello", collection="embeddings")
first_key = next(iter(embd_dict.keys()))
assert embd_dict[first_key] == expected_embedding
def test_sync_get_batch_with_cache():
"""Test mixed scenario with some cached and some new inputs."""
embeddings_cache = SimpleKVStore()
embed_model = MockEmbedding(embed_dim=4, embeddings_cache=embeddings_cache)
texts = ["Cached1", "Miss1", "Cached2", "Miss2"]
# Pre-cache
embed_model.embeddings_cache.put(
key="Cached1",
val={"uuid1": [104.0, 101.0, 100.0, 49.0]},
collection="embeddings",
)
embed_model.embeddings_cache.put(
key="Cached2",
val={"uuid3": [104.0, 101.0, 100.0, 50.0]},
collection="embeddings",
)
with patch.object(
embed_model,
"_get_text_embeddings",
side_effect=custom_embeddings,
) as mock_get_embeddings:
text_embeddings = embed_model.get_text_embedding_batch(texts)
expected_embeddings = [
[104.0, 101.0, 100.0, 49.0], # Cached1
[105.0, 115.0, 115.0, 49.0], # Miss1 (first in batch)
[104.0, 101.0, 100.0, 50.0], # Cached2
[105.0, 115.0, 115.0, 50.0], # Miss2 (second in batch)
]
assert text_embeddings == expected_embeddings
assert mock_get_embeddings.call_count == 1
# Check cache
for i, text in enumerate(texts):
embd_dict = embeddings_cache.get(key=text, collection="embeddings")
first_key = next(iter(embd_dict.keys()))
assert embd_dict[first_key] == expected_embeddings[i]
@pytest.mark.asyncio
async def test_async_get_with_cache():
embeddings_cache = SimpleKVStore()
embed_model = MockEmbedding(embed_dim=4, embeddings_cache=embeddings_cache)
text = "Hello"
text_embedding = await embed_model.aget_text_embedding(text)
assert text_embedding == expected_embedding
assert embeddings_cache.get(key="Hello", collection="embeddings") is not None
embd_dict = embeddings_cache.get(key="Hello", collection="embeddings")
first_key = next(iter(embd_dict.keys()))
assert embd_dict[first_key] == expected_embedding
@pytest.mark.asyncio
async def test_async_get_batch_with_cache():
"""Test mixed scenario with some cached and some new inputs."""
embeddings_cache = SimpleKVStore()
embed_model = MockEmbedding(embed_dim=4, embeddings_cache=embeddings_cache)
texts = ["Cached1", "Miss1", "Cached2", "Miss2"]
# Pre-cache
embed_model.embeddings_cache.put(
key="Cached1",
val={"uuid1": [104.0, 101.0, 100.0, 49.0]},
collection="embeddings",
)
embed_model.embeddings_cache.put(
key="Cached2",
val={"uuid3": [104.0, 101.0, 100.0, 50.0]},
collection="embeddings",
)
with patch.object(
embed_model,
"_aget_text_embeddings",
side_effect=custom_embeddings,
) as mock_get_embeddings:
text_embeddings = await embed_model.aget_text_embedding_batch(texts)
expected_embeddings = [
[104.0, 101.0, 100.0, 49.0], # Cached1
[105.0, 115.0, 115.0, 49.0], # Miss1 (first in batch)
[104.0, 101.0, 100.0, 50.0], # Cached2
[105.0, 115.0, 115.0, 50.0], # Miss2 (second in batch)
]
assert text_embeddings == expected_embeddings
assert mock_get_embeddings.call_count == 1
# Check cache
for i, text in enumerate(texts):
embd_dict = embeddings_cache.get(key=text, collection="embeddings")
first_key = next(iter(embd_dict.keys()))
assert embd_dict[first_key] == expected_embeddings[i]
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/embeddings/test_with_cache.py",
"license": "MIT License",
"lines": 100,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-sarvam/tests/test_llms_servam.py | from llama_index.core.base.llms.base import BaseLLM
from llama_index.llms.sarvam import Sarvam
def test_llm_class():
names_of_base_classes = [b.__name__ for b in Sarvam.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-sarvam/tests/test_llms_servam.py",
"license": "MIT License",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-mcp/tests/server.py | import asyncio
import random
import time
from contextlib import asynccontextmanager
from collections.abc import AsyncIterator
from datetime import datetime
from typing import Dict, Optional
from mcp.server.fastmcp import FastMCP, Context, Image
from mcp.server.fastmcp.prompts import base
from PIL import Image as PILImage
import numpy as np
import io
from tests.schemas import TestName, TestMethod, TestList
@asynccontextmanager
async def app_lifespan(server: FastMCP) -> AsyncIterator[None]:
"""
Context manager for the MCP server lifetime.
"""
task = asyncio.create_task(periodic_updates())
try:
yield
finally:
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
# Create the MCP server
mcp = FastMCP(
"TestAllFeatures",
instructions="A test server that demonstrates all MCP features",
dependencies=["pillow", "numpy"],
lifespan=app_lifespan,
)
# --- In-memory data store for testing ---
users = {
"123": {
"name": "Test User",
"email": "test@example.com",
"last_updated": time.time(),
},
"456": {
"name": "Another User",
"email": "another@example.com",
"last_updated": time.time(),
},
}
# Resource that changes periodically for subscription testing
counter = 0
last_weather = {"temperature": 22, "condition": "sunny"}
# --- Tools ---
@mcp.tool(description="Echo back the input message")
def echo(message: str) -> str:
"""Simple echo tool to test basic tool functionality."""
return f"Echo: {message}"
@mcp.tool(description="Add two numbers together")
def add(a: float, b: float) -> float:
"""Add two numbers to test numeric tool arguments."""
return a + b
@mcp.tool(description="Get current server time")
def get_time() -> str:
"""Get the current server time to test tools without arguments."""
return datetime.now().isoformat()
@mcp.tool(description="Generate a random image")
def generate_image(width: int = 100, height: int = 100, color: str = "random") -> Image:
"""Generate an image to test image return values."""
# Create a random color image
if color == "random":
img_array = np.random.randint(0, 255, (height, width, 3), dtype=np.uint8)
elif color == "red":
img_array = np.zeros((height, width, 3), dtype=np.uint8)
img_array[:, :, 0] = 255
elif color == "green":
img_array = np.zeros((height, width, 3), dtype=np.uint8)
img_array[:, :, 1] = 255
elif color == "blue":
img_array = np.zeros((height, width, 3), dtype=np.uint8)
img_array[:, :, 2] = 255
else:
img_array = np.zeros((height, width, 3), dtype=np.uint8)
# Convert numpy array to PIL Image
pil_img = PILImage.fromarray(img_array)
# Save to bytes
img_byte_arr = io.BytesIO()
pil_img.save(img_byte_arr, format="PNG")
img_byte_arr = img_byte_arr.getvalue()
return Image(data=img_byte_arr, format="png")
@mcp.tool(description="Update user information")
def update_user(
user_id: str, name: Optional[str] = None, email: Optional[str] = None
) -> Dict:
"""Update a user to test triggering resource changes."""
global users
if user_id not in users:
raise ValueError(f"User {user_id} not found")
if name is not None:
users[user_id]["name"] = name
if email is not None:
users[user_id]["email"] = email
users[user_id]["last_updated"] = time.time()
return users[user_id]
@mcp.tool(description="Long running task with progress updates")
async def long_task(steps: int, ctx: Context) -> str:
"""Long-running task to test progress reporting."""
for i in range(1, steps + 1):
await ctx.report_progress(i, steps, f"Processing step {i}/{steps}")
return f"Completed {steps} steps"
@mcp.tool(description="Update weather data")
def update_weather(temperature: float, condition: str) -> Dict:
"""Update weather data to test resource change notifications."""
global last_weather
last_weather = {"temperature": temperature, "condition": condition}
return last_weather
@mcp.tool(description="Test tool with Pydantic models")
def test_pydantic(name: TestName, method: TestMethod, lst: TestList) -> str:
"""Test tool with Pydantic models."""
return f"Name: {name.name}, Method: {method.method}, List: {lst.lst}"
# --- Static Resources ---
@mcp.resource("config://app")
def get_app_config() -> str:
"""Static configuration resource."""
return """
{
"app_name": "MCP Test Server",
"version": "1.0.0",
"environment": "testing"
}
"""
@mcp.resource("help://usage")
def get_help() -> str:
"""Static help text resource."""
return """
This server lets you test all MCP client features:
- Call tools with various argument types
- Read static and dynamic resources
- Subscribe to changing resources
- Use prompts with templates
"""
# --- Dynamic Resources ---
@mcp.resource("users://{user_id}/profile")
def get_user_profile(user_id: str) -> str:
"""Dynamic user profile resource."""
if user_id not in users:
return f"User {user_id} not found"
user = users[user_id]
return f"""
Name: {user["name"]}
Email: {user["email"]}
Last Updated: {datetime.fromtimestamp(user["last_updated"]).isoformat()}
"""
@mcp.resource("counter://value")
def get_counter() -> str:
"""Resource that changes on every access for testing subscriptions."""
global counter
counter += 1
return f"Current counter value: {counter}"
@mcp.resource("weather://current")
def get_weather() -> str:
"""Weather resource that can be updated via a tool."""
global last_weather
return f"""
Current Weather:
Temperature: {last_weather["temperature"]}°C
Condition: {last_weather["condition"]}
Last Updated: {datetime.now().isoformat()}
"""
# --- Prompts ---
@mcp.prompt()
def simple_greeting() -> str:
"""Simple prompt without arguments."""
return "Hello! How can I help you today?"
@mcp.prompt()
def personalized_greeting(name: str) -> str:
"""Prompt with arguments."""
return f"Hello, {name}! How can I assist you today?"
@mcp.prompt()
def analyze_data(data: str) -> list[base.Message]:
"""Multi-message prompt template."""
return [
base.UserMessage("Please analyze this data:"),
base.UserMessage(data),
base.AssistantMessage("I'll analyze this data for you. Let me break it down:"),
]
# --- Start periodic resource updater ---
# This simulates resources changing on their own for subscription testing
async def periodic_updates():
"""Task that periodically updates resources to test subscriptions."""
while True:
await asyncio.sleep(10)
# Update weather randomly
new_temp = last_weather["temperature"] + random.uniform(-2, 2)
conditions = ["sunny", "cloudy", "rainy", "windy", "snowy"]
new_condition = random.choice(conditions)
update_weather(new_temp, new_condition)
# --- Run the server ---
if __name__ == "__main__":
mcp.run()
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-mcp/tests/server.py",
"license": "MIT License",
"lines": 197,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-mcp/tests/test_client.py | import os
from httpx import AsyncClient
import pytest
from llama_index.tools.mcp import BasicMCPClient
from llama_index.tools.mcp.client import enable_sse
from mcp import types
# Path to the test server script - adjust as needed
SERVER_SCRIPT = os.path.join(os.path.dirname(__file__), "server.py")
@pytest.fixture(scope="session")
def client() -> BasicMCPClient:
"""Create a basic MCP client connected to the test server."""
return BasicMCPClient("python", args=[SERVER_SCRIPT], timeout=5)
@pytest.mark.asyncio
async def test_list_tools(client: BasicMCPClient):
"""Test listing tools from the server."""
tools = await client.list_tools()
# Check that we got a list of tools
assert isinstance(tools, types.ListToolsResult)
assert len(tools.tools) > 0
# Verify some expected tools are present
tool_names = [tool.name for tool in tools.tools]
assert "echo" in tool_names
assert "add" in tool_names
assert "get_time" in tool_names
@pytest.mark.asyncio
async def test_call_tools(client: BasicMCPClient):
"""Test calling various tools."""
# Test echo tool
result = await client.call_tool("echo", {"message": "Hello, World!"})
assert result.content[0].text == "Echo: Hello, World!"
# Test add tool
result = await client.call_tool("add", {"a": 5, "b": 7})
assert result.content[0].text == "12.0"
# Test get_time tool (just verify it returns something)
result = await client.call_tool("get_time", {})
assert isinstance(result.content[0].text, str)
assert len(result.content[0].text) > 0
@pytest.mark.asyncio
async def test_list_resources(client: BasicMCPClient):
"""Test listing resources from the server."""
resources = await client.list_resources()
# Check that we got a list of resources
assert isinstance(resources, types.ListResourcesResult)
assert len(resources.resources) > 0
# Verify some expected resources are present
resource_names = [str(resource.uri) for resource in resources.resources]
assert "config://app" in resource_names
assert "help://usage" in resource_names
assert "counter://value" in resource_names
assert "weather://current" in resource_names
@pytest.mark.asyncio
async def test_read_resources(client: BasicMCPClient):
"""Test reading various resources."""
# Test static resource
resource = await client.read_resource("config://app")
assert isinstance(resource, types.ReadResourceResult)
assert resource.contents[0].mimeType == "text/plain"
config_text = resource.contents[0].text
assert "app_name" in config_text
assert "MCP Test Server" in config_text
# Test parametrized resource
resource = await client.read_resource("users://123/profile")
profile_text = resource.contents[0].text
assert "Test User" in profile_text
assert "test@example.com" in profile_text
@pytest.mark.asyncio
async def test_list_prompts(client: BasicMCPClient):
"""Test listing prompts from the server."""
prompts = await client.list_prompts()
# Check that we got a list of prompts
assert isinstance(prompts, types.ListPromptsResult)
assert len(prompts.prompts) > 0
# Verify some expected prompts are present
prompt_names = [prompt.name for prompt in prompts.prompts]
assert "simple_greeting" in prompt_names
assert "personalized_greeting" in prompt_names
assert "analyze_data" in prompt_names
@pytest.mark.asyncio
async def test_get_prompts(client: BasicMCPClient):
"""Test getting various prompts."""
# Test simple prompt
result = await client.get_prompt("simple_greeting")
assert len(result) > 0
# Test prompt with arguments
result = await client.get_prompt("personalized_greeting", {"name": "Tester"})
assert len(result) > 0
assert "Tester" in result[0].content
# Test multi-message prompt
result = await client.get_prompt("analyze_data", {"data": "1,2,3,4,5"})
assert len(result) > 1
assert any("1,2,3,4,5" in msg.content for msg in result)
@pytest.mark.asyncio
async def test_resource_updates_via_tool(client: BasicMCPClient):
"""Test updating a resource via a tool and reading the changes."""
# First read the initial user profile
resource = await client.read_resource("users://123/profile")
profile1 = resource.contents[0].text
assert "Test User" in profile1
# Update the user via the tool
result = await client.call_tool(
"update_user", {"user_id": "123", "name": "Updated User"}
)
profile2 = result.content[0].text
assert "Updated User" in profile2
assert "Test User" not in profile2
@pytest.mark.asyncio
async def test_default_in_memory_storage():
"""Test the default in-memory token storage."""
# Create client with OAuth using default storage
client = BasicMCPClient.with_oauth(
"python",
args=[SERVER_SCRIPT],
client_name="Test Client",
redirect_uris=["http://localhost:3000/callback"],
redirect_handler=lambda url: None, # Do nothing in test
callback_handler=lambda: ("fake_code", None), # Return fake code
)
# Just verify initialization works
assert client.auth is not None
@pytest.mark.asyncio
async def test_use_provided_http_client():
"""Test the use of a provided HTTP client."""
# Create client
provided_http_client = AsyncClient()
client = BasicMCPClient(
"python", args=[SERVER_SCRIPT], timeout=5, http_client=provided_http_client
)
# Just verify initialization works
assert client.client_provided is True
assert client.http_client is provided_http_client
@pytest.mark.asyncio
async def test_use_provided_http_client_with_oauth():
"""Test the use of a provided HTTP client."""
# Create client with OAuth
provided_http_client = AsyncClient()
client = BasicMCPClient.with_oauth(
"python",
args=[SERVER_SCRIPT],
client_name="Test Client",
redirect_uris=["http://localhost:3000/callback"],
redirect_handler=lambda url: None, # Do nothing in test
callback_handler=lambda: ("fake_code", None), # Return fake code
http_client=provided_http_client,
)
# Just verify initialization works
assert client.client_provided is True
assert client.http_client is provided_http_client
assert provided_http_client.auth is not None
@pytest.mark.asyncio
async def test_long_running_task(client: BasicMCPClient):
"""Test a long-running task with progress updates."""
# This will run a task that takes a few seconds and reports progress
current_progress = 0
current_message = ""
expected_total = None
async def progress_callback(progress: float, total: float, message: str):
nonlocal current_progress
nonlocal current_message
nonlocal expected_total
current_progress = progress
current_message = message
expected_total = total
result = await client.call_tool(
"long_task", {"steps": 3}, progress_callback=progress_callback
)
assert "Completed 3 steps" in result.content[0].text
assert current_progress == 3.0
assert current_progress == expected_total
assert current_message == "Processing step 3/3"
@pytest.mark.asyncio
async def test_image_return_value(client: BasicMCPClient):
"""Test tools that return images."""
result = await client.call_tool(
"generate_image", {"width": 50, "height": 50, "color": "blue"}
)
# Check that we got image data back
assert isinstance(result, types.CallToolResult)
assert len(result.content[0].data) > 0
def test_enable_sse():
"""Test the enable_sse function with various URL formats."""
# Test query parameter detection (composio style)
assert enable_sse("https://example.com/api?transport=sse") is True
assert enable_sse("http://localhost:8080?transport=sse&other=param") is True
assert enable_sse("https://example.com/api?other=param&transport=sse") is True
# Test path suffix detection
assert enable_sse("https://example.com/sse") is True
assert enable_sse("http://localhost:8080/api/sse") is True
assert enable_sse("https://example.com/sse/") is True
# Test path containing /sse/
assert enable_sse("https://example.com/api/sse/v1") is True
assert enable_sse("http://localhost:8080/sse/events") is True
assert enable_sse("https://example.com/v1/sse/stream") is True
# Test non-SSE URLs
assert enable_sse("https://example.com/api") is False
assert enable_sse("http://localhost:8080") is False
assert enable_sse("https://example.com/api?transport=http") is False
assert enable_sse("https://example.com/assets") is False
# Test edge cases
assert enable_sse("https://example.com/sse-like") is False
assert enable_sse("https://example.com/my-sse") is False
assert enable_sse("https://example.com/api?sse=true") is False
# Test with multiple transport values (should use first one)
assert enable_sse("https://example.com?transport=sse&transport=http") is True
# Test command-style inputs (non-URL)
assert enable_sse("python") is False
assert enable_sse("/usr/bin/python") is False
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-mcp/tests/test_client.py",
"license": "MIT License",
"lines": 206,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/observability/llama-index-observability-otel/llama_index/observability/otel/base.py | import inspect
from datetime import datetime
from typing import Any, Dict, List, Literal, Mapping, Optional, Sequence, Union, cast
import llama_index_instrumentation as instrument
from llama_index.observability.otel.utils import flatten_dict
from llama_index_instrumentation.base.event import BaseEvent
from llama_index_instrumentation.event_handlers import BaseEventHandler
from llama_index_instrumentation.span import SimpleSpan, active_span_id
from llama_index_instrumentation.span_handlers.simple import SimpleSpanHandler
from opentelemetry import context, trace
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
from opentelemetry.sdk.trace import SpanProcessor, TracerProvider, _Span
from opentelemetry.sdk.trace.export import (
BatchSpanProcessor,
ConsoleSpanExporter,
SimpleSpanProcessor,
SpanExporter,
)
from opentelemetry.trace import set_span_in_context
from pydantic import BaseModel, ConfigDict, Field, PrivateAttr
from termcolor.termcolor import cprint
class OTelEventAttributes(BaseModel):
name: str
attributes: Optional[
Mapping[
str,
Union[
str,
bool,
int,
float,
Sequence[str],
Sequence[bool],
Sequence[int],
Sequence[float],
],
]
]
class OTelCompatibleSpanHandler(SimpleSpanHandler):
"""OpenTelemetry-compatible span handler."""
_tracer: trace.Tracer = PrivateAttr()
_events_by_span: Dict[str, List[OTelEventAttributes]] = PrivateAttr(
default_factory=dict,
)
all_spans: Dict[str, Union[trace.Span, _Span]] = Field(
default_factory=dict, description="All the registered OpenTelemetry spans."
)
debug: bool = Field(
default=False,
description="Debug the start and end of span and the recording of events",
)
def __init__(
self,
tracer: trace.Tracer,
debug: bool = False,
open_spans: Optional[Dict[str, SimpleSpan]] = None,
completed_spans: Optional[List[SimpleSpan]] = None,
dropped_spans: Optional[List[SimpleSpan]] = None,
current_span_ids: Optional[Dict[Any, str]] = None,
):
super().__init__(
open_spans=open_spans or {},
completed_spans=completed_spans or [],
dropped_spans=dropped_spans or [],
current_span_ids=cast(Dict[str, Any], current_span_ids or {}),
)
self._tracer = tracer
self._events_by_span = {}
self.debug = debug
@classmethod
def class_name(cls) -> str: # type: ignore
"""Class name."""
return "OTelCompatibleSpanHandler"
def new_span(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
parent_span_id: Optional[str] = None,
tags: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> SimpleSpan:
span = super().new_span(
id_, bound_args, instance, parent_span_id, tags, **kwargs
)
if parent_span_id is not None:
ctx = set_span_in_context(span=self.all_spans[parent_span_id])
else:
ctx = context.get_current()
ctx.update(bound_args.arguments)
otel_span = self._tracer.start_span(name=id_, context=ctx)
self.all_spans.update({id_: otel_span})
if self.debug:
cprint(
f"Emitting span {id_} at time: {datetime.now()}",
color="yellow",
attrs=["bold"],
)
return span
def prepare_to_exit_span(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
result: Optional[Any] = None,
**kwargs: Any,
) -> SimpleSpan:
if self.debug:
cprint(
f"Preparing to end span {id_} at time: {datetime.now()}",
color="blue",
attrs=["bold"],
)
sp = super().prepare_to_exit_span(id_, bound_args, instance, result, **kwargs)
span = self.all_spans.pop(id_)
# Get and process events specific to this span
events = self._events_by_span.pop(id_, [])
for event in events:
span.add_event(name=event.name, attributes=event.attributes)
span.set_status(status=trace.StatusCode.OK)
span.end()
return sp
def prepare_to_drop_span(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
err: Optional[BaseException] = None,
**kwargs: Any,
) -> Optional[SimpleSpan]:
if self.debug:
cprint(
f"Preparing to exit span {id_} with an error at time: {datetime.now()}",
color="red",
attrs=["bold"],
)
sp = super().prepare_to_drop_span(id_, bound_args, instance, err, **kwargs)
span = self.all_spans.pop(id_)
# Get and process events specific to this span
events = self._events_by_span.pop(id_, [])
for event in events:
span.add_event(name=event.name, attributes=event.attributes)
span.set_status(status=trace.StatusCode.ERROR, description=err.__str__())
span.end()
return sp
class OTelCompatibleEventHandler(BaseEventHandler):
"""OpenTelemetry-compatible event handler."""
span_handler: OTelCompatibleSpanHandler = Field(
description="Span Handler associated with the event handler"
)
debug: bool = Field(
default=False,
description="Debug the start and end of span and the recording of events",
)
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "OtelCompatibleEventHandler"
def handle(self, event: BaseEvent, **kwargs: Any) -> None:
"""Handle events by pushing them to the correct span's bucket for later attachment to OpenTelemetry."""
if self.debug:
cprint(
f"Registering a {event.class_name()} event at time: {datetime.now()}",
color="green",
attrs=["bold"],
)
# Get the current span id from the contextvars context
current_span_id = active_span_id.get()
if current_span_id is None:
# The event is happening outside of any span - nothing to do
return
try:
event_data = event.model_dump()
except TypeError:
# Some events can be unserializable,
# so we just convert to a string as a fallback
event_data = {"event_data": str(event)}
otel_event = OTelEventAttributes(
name=event.class_name(), attributes=flatten_dict(event_data)
)
self.span_handler._events_by_span.setdefault(current_span_id, []).append(
otel_event
)
class LlamaIndexOpenTelemetry(BaseModel):
"""
LlamaIndexOpenTelemetry is a configuration and integration class for OpenTelemetry tracing within LlamaIndex.
This class manages the setup and registration of OpenTelemetry span and event handlers, configures the tracer provider,
and exports trace data using the specified span exporter and processor. It supports both simple and batch span processors,
and allows customization of the service name or resource, as well as the dispatcher name.
Attributes:
span_exporter (Optional[SpanExporter]): The OpenTelemetry span exporter. Defaults to ConsoleSpanExporter.
span_processor (Literal["simple", "batch"]): The span processor type, either 'simple' or 'batch'. Defaults to 'batch'.
service_name_or_resource (Union[str, Resource]): The service name or OpenTelemetry Resource. Defaults to a Resource with service name 'llamaindex.opentelemetry'.
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
span_exporter: Optional[SpanExporter] = Field(
default=ConsoleSpanExporter(),
description="OpenTelemetry span exporter. Supports all SpanExporter-compatible interfaces, defaults to ConsoleSpanExporter.",
)
span_processor: Literal["simple", "batch"] = Field(
default="batch",
description="OpenTelemetry span processor. Can be either 'batch' (-> BatchSpanProcessor), 'simple' (-> SimpleSpanProcessor). Defaults to 'batch'",
)
extra_span_processors: List[SpanProcessor] = Field(
default_factory=list,
description="List of OpenTelemetry Span Processors to add to the tracer provider.",
)
tracer_provider: Optional[TracerProvider] = Field(
default=None,
description="Tracer Provider to inherint from the existing observability context. Defaults to None.",
)
service_name_or_resource: Union[str, Resource] = Field(
default=Resource(attributes={SERVICE_NAME: "llamaindex.opentelemetry"}),
description="Service name or resource for OpenTelemetry. Defaults to a Resource with 'llamaindex.opentelemetry' as service name.",
)
debug: bool = Field(
default=False,
description="Debug the start and end of span and the recording of events",
)
_tracer: Optional[trace.Tracer] = PrivateAttr(default=None)
def _start_otel(
self,
) -> None:
if isinstance(self.service_name_or_resource, str):
self.service_name_or_resource = Resource(
attributes={SERVICE_NAME: self.service_name_or_resource}
)
if self.tracer_provider is None:
tracer_provider = TracerProvider(resource=self.service_name_or_resource)
else:
tracer_provider = self.tracer_provider
assert self.span_exporter is not None, (
"span_exporter has to be non-null to be used within simple or batch span processors"
)
if self.span_processor == "simple":
span_processor = SimpleSpanProcessor(self.span_exporter)
else:
span_processor = BatchSpanProcessor(self.span_exporter)
for extra_span_processor in self.extra_span_processors:
tracer_provider.add_span_processor(extra_span_processor)
tracer_provider.add_span_processor(span_processor)
trace.set_tracer_provider(tracer_provider)
self._tracer = trace.get_tracer("llamaindex.opentelemetry.tracer")
def start_registering(
self,
) -> None:
"""Starts LlamaIndex instrumentation."""
self._start_otel()
dispatcher = instrument.get_dispatcher()
assert self._tracer is not None, (
"The tracer has to be non-null to start observabiliy"
)
span_handler = OTelCompatibleSpanHandler(
tracer=self._tracer,
debug=self.debug,
)
dispatcher.add_span_handler(span_handler)
dispatcher.add_event_handler(
OTelCompatibleEventHandler(span_handler=span_handler, debug=self.debug)
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/observability/llama-index-observability-otel/llama_index/observability/otel/base.py",
"license": "MIT License",
"lines": 260,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.