sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-solr/llama_index/vector_stores/solr/query_utils.py | """
Utilities for use with the Apache Solr Vector Store integration.
This module provides utility functions for working with Solr in the context of
LlamaIndex vector stores. It includes functions for:
- Query escaping and preprocessing
- Node relationship serialization/deserialization
- Metadata filter conversion to Solr query syntax
- Sparse vector encoding for Solr's delimited term frequency filters
The utilities handle the transformation between LlamaIndex data structures and
Solr-compatible formats, ensuring proper query syntax and data encoding.
"""
import logging
from types import MappingProxyType
from typing import Union
from llama_index.core.vector_stores.types import (
ExactMatchFilter,
FilterCondition,
FilterOperator,
MetadataFilter,
MetadataFilters,
)
from llama_index.vector_stores.solr.constants import (
ESCAPE_RULES_GENERIC,
)
logger = logging.getLogger(__name__)
def escape_query_characters(
value: str, translation_table: MappingProxyType[int, str] = ESCAPE_RULES_GENERIC
) -> str:
"""
Escape query characters in order to prevent from user query injection.
Reference: `Standard Query Parser <https://solr.apache.org/guide/solr/latest/query-guide/standard-query-parser.html#escaping-special-characters>`_
Args:
value: The input text to be escaped.
translation_table: The translation table to use for escaping.
Returns:
The input text escaped appropriately for use with Solr.
"""
return value.translate(translation_table)
def _handle_metadata_filter(subfilter: Union[MetadataFilter, ExactMatchFilter]) -> str:
"""
Convert a single metadata filter to Solr query string format.
Handles various filter operators (EQ, NE, GT, LT, IN, NIN, etc.) and converts them
to appropriate Solr query syntax. Special handling is provided for list values
with IN/NIN/ALL/ANY operators.
Args:
subfilter: A metadata filter or exact match filter to convert.
Returns:
A Solr-compatible query string for the filter.
Raises:
ValueError: If an unsupported operator is used or if list values are used
with incompatible operators.
"""
key = subfilter.key
value = subfilter.value
op = subfilter.operator
# 1. List-handling branches (limited operator support on list values)
if isinstance(value, list):
if op == FilterOperator.ALL:
return f"({key}:({' AND '.join(value)}))"
if op in (FilterOperator.ANY, FilterOperator.IN):
# ANY (multi-valued field contains any of) and IN (value is a member of set)
# both reduce to an OR disjunction in Solr.
return f"({key}:({' OR '.join([str(x) for x in value])}))"
if op == FilterOperator.NIN:
return f"(-{key}:({' OR '.join([str(x) for x in value])}))"
# Any other operator combined with a list value is invalid
raise ValueError(
"Query filter uses a list value for an incompatible operator, only 'IN', 'NIN', 'ANY' and 'ALL' are supported with lists: "
f"{subfilter}"
)
# 2. Fallbacks for list-only operators when the user supplied a scalar value
if op in (
FilterOperator.ALL,
FilterOperator.ANY,
FilterOperator.IN,
) and not isinstance(value, list):
logger.warning(
"Query filter contains '%s' operator for non-list value (type=%s), treating as 'EQ' operator: %s",
op.value,
type(value),
subfilter,
)
return f"({key}:{value})"
if op == FilterOperator.NIN and not isinstance(value, list):
logger.warning(
"Query filter contains 'NIN' operator for non-list value (type=%s), treating as 'NE' operator: %s",
type(value),
subfilter,
)
return f"(-{key}:{value})"
# 3. Scalar operator handling (value is str/int/float/etc.)
if op == FilterOperator.GT:
return f"({key}:{{{value} TO *])"
if op == FilterOperator.GTE:
return f"({key}:[{value} TO *])"
if op == FilterOperator.LT:
return f"({key}:[* TO {value}}})"
if op == FilterOperator.LTE:
return f"({key}:[* TO {value}])"
if op == FilterOperator.EQ:
return f"({key}:{value})"
if op == FilterOperator.NE:
return f"(-({key}:{value}))"
if op in (FilterOperator.TEXT_MATCH, FilterOperator.TEXT_MATCH_INSENSITIVE):
if isinstance(value, str):
# NOTE: Ensure that the field is properly configured for text_match_insensitive in the Solr schema
return f'({key}:"{value}")'
if op == FilterOperator.TEXT_MATCH:
raise ValueError(
f"Query filter uses a non-string with the 'TEXT_MATCH' operator: {subfilter}"
)
# For TEXT_MATCH_INSENSITIVE with non-string, fall through to unknown operator error below.
# 4. Explicitly disallowed operators
if op in (FilterOperator.CONTAINS, FilterOperator.IS_EMPTY):
raise ValueError(f"Disallowed operator used in filter: {subfilter}")
# 5. Unknown / future operator (parity with original pragma: no cover branch)
raise ValueError(
f"Unknown operator used in filter: {subfilter}"
) # pragma: no cover
def recursively_unpack_filters(filters: MetadataFilters) -> list[str]:
"""
Recursively unpack metadata filters to Solr filter query.
Notes: Solr has issues with complex filters. We have noticed queries to not be
returning correct results always when you have nested filters. This is not a problem
with this function as far as we can tell, it is likely an issue with the Solr parser.
Not all ``llama-index`` filter operations are supported in the optional ``filters``
attribute of :py:class:`~llama_index.core.vector_stores.VectorStoreQuery`. If any of
the following filters are passed, an error will be raised:
* ``contains``
* ``is_empty``
See :py:class:`~llama_index.core.vector_stores.types.FilterOperator` for the
complete list of operators
Args:
filters: The set of filters to be converted into Solr-specific query parameters
Returns:
A set of filters converted into the Solr-specific query language, linked using the
relevant query condition (e.g., AND, OR).
If the input filters do not contain a value for ``condition`` in
:py:class:`~llama_index.core.vector_stores.types.MetadataFilter` , then the
sub-filters will be returned without being linked
Raises:
ValueError: If an unsupported or unknown filter operator is passed.
"""
if not filters.filters:
logger.info("Input MetadataFilters contains no subfilters: %s", filters)
return []
# convert all the individual filter statements
filter_queries: list[str] = []
for subfilter in filters.filters:
if isinstance(subfilter, MetadataFilters):
filter_queries.extend(recursively_unpack_filters(subfilter))
elif isinstance(subfilter, MetadataFilter):
filter_queries.append(_handle_metadata_filter(subfilter))
else: # pragma: no cover
raise ValueError(
f"Unknown subfilter type, type={type(subfilter)}: {subfilter}"
)
# combine the filter statements using the appropriate condition
condition = filters.condition
if condition == FilterCondition.AND:
filter_output = [f"({' AND '.join(filter_queries)})"]
elif condition == FilterCondition.OR:
filter_output = [f"({' OR '.join(filter_queries)})"]
elif condition == FilterCondition.NOT:
filter_output = [f"(NOT ({' AND '.join(filter_queries)}))"]
elif condition is None:
logger.warning(
"No filter condition specified, sub-filters will be returned unlinked"
)
filter_output = filter_queries
else: # pragma: no cover
raise ValueError(f"Unknown filter condition: {filters.condition}")
logger.debug(
"Converted query filters to Solr filters, input=%s: %s", filters, filter_output
)
return filter_output
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-solr/llama_index/vector_stores/solr/query_utils.py",
"license": "MIT License",
"lines": 176,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-solr/llama_index/vector_stores/solr/types.py | """Shared type declarations for the Apache Solr vector store integration."""
from typing import TypedDict
from pydantic import BaseModel
from typing_extensions import NotRequired
class BoostedTextField(BaseModel):
"""
A text field with an optional boost value for Solr queries.
This model represents a Solr field that can have a multiplicative boost
factor applied to increase or decrease its relevance in search results.
Boost factors greater than 1.0 increase relevance, while factors between
0.0 and 1.0 decrease it.
Attributes:
field: The Solr field name to include in the search.
boost_factor: The boost multiplier to apply. Defaults
to 1.0 (no boost). Values > 1.0 increase relevance, 0.0 < values < 1.0
decrease it.
"""
field: str
boost_factor: float = 1.0
def get_query_str(self) -> str: # pragma: no cover
"""
Return Solr query syntax representation for this field.
If the boost factor is 1.0 (default) the field term is returned as-is;
otherwise the canonical Solr boost syntax ``field^boost_factor`` is produced.
"""
if self.boost_factor != 1.0:
return f"{self.field}^{self.boost_factor}"
return self.field
class SolrQueryDict(TypedDict):
"""
Dictionary representing a Solr query with parameters.
This is not an exhaustive list of Solr parameters, only those currently
used by the vector store implementation.
"""
q: str
fq: list[str]
fl: NotRequired[str]
rows: NotRequired[str]
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-solr/llama_index/vector_stores/solr/types.py",
"license": "MIT License",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-solr/tests/integration/test_solr_vector_store.py | """
end-to-end integration tests for ApacheSolrVectorStore.
Functionality Covered:
1. Index (add / async_add) a handful of nodes with metadata + embeddings.
2. Retrieve via:
- Match all (*:*) raw query (implicit through vector store query without embedding)
- Dense KNN query (VectorStoreQueryMode.DEFAULT)
- Lexical BM25 query (VectorStoreQueryMode.TEXT_SEARCH)
- Same dense & lexical queries but with metadata filters applied.
3. Delete one node and validate it is gone using *:*
"""
from __future__ import annotations
import time
from datetime import datetime, timezone
try:
from datetime import UTC
except ImportError:
UTC = timezone.utc
from typing import Callable
import pytest
from llama_index.core.schema import TextNode
from llama_index.core.vector_stores.types import (
FilterOperator,
MetadataFilter,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryMode,
)
from llama_index.vector_stores.solr.base import ApacheSolrVectorStore
from llama_index.vector_stores.solr.client.async_ import AsyncSolrClient
from llama_index.vector_stores.solr.client.sync import SyncSolrClient
from llama_index.vector_stores.solr.types import BoostedTextField
# ---------------------------------------------------------------------------
# Fixtures (local to this file to keep file self-contained)
# ---------------------------------------------------------------------------
@pytest.fixture()
def sample_nodes() -> list[TextNode]:
"""
Return nodes with simple hardcoded embeddings (64-dim) + metadata.
Embedding design:
n1: all 0.125 -> close to history query [0.15]*64
n2: all 0.25 -> also close to history query
n3: all 0.90 -> close to politics query [0.85]*64
This yields deterministic nearest-neighbor expectations.
"""
base_dt = datetime(2025, 1, 1, 12, 0, 0, tzinfo=UTC)
return [
TextNode(
id_="n1",
text="Abraham Lincoln was President of the United States.",
embedding=[0.125] * 64,
metadata={"topic": "history", "published": base_dt},
),
TextNode(
id_="n2",
text="Benjamin Franklin was a Founding Father, not a President.",
embedding=[0.25] * 64,
metadata={"topic": "history", "published": base_dt.replace(day=2)},
),
TextNode(
id_="n3",
text="John Major served as Prime Minister of the United Kingdom.",
embedding=[0.9] * 64,
metadata={"topic": "politics", "published": base_dt.replace(day=3)},
),
]
@pytest.fixture()
def vector_store(
function_unique_solr_with_knn_collection_url: str,
) -> ApacheSolrVectorStore:
"""
Create a real vector store hitting the per-test Solr collection.
We configure both dense (embedding) and lexical (BM25) search fields.
"""
sync_client = SyncSolrClient(base_url=function_unique_solr_with_knn_collection_url)
async_client = AsyncSolrClient(
base_url=function_unique_solr_with_knn_collection_url
)
return ApacheSolrVectorStore(
sync_client=sync_client,
async_client=async_client,
nodeid_field="id",
docid_field="docid",
content_field="text_txt_en",
embedding_field="vector_field", # dense vector field configured in test Solr schema see conftest.py
metadata_to_solr_field_mapping=[
("topic", "topic_s"),
("published", "published_dt"),
],
text_search_fields=[BoostedTextField(field="text_txt_en", boost_factor=2.0)],
solr_field_preprocessor_kwargs={},
)
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _poll_count(
store: ApacheSolrVectorStore,
target_pred: Callable[[int], bool],
*,
max_attempts: int = 30,
sleep_s: float = 0.2,
) -> int:
"""
Poll Solr until predicate satisfied or timeout.
Returns final count or raises AssertionError.
"""
last_count = -1
for _ in range(max_attempts):
resp = store.sync_client.search({"q": "*:*"})
last_count = len(resp.response.docs)
if target_pred(last_count):
return last_count
time.sleep(sleep_s)
raise AssertionError(f"Timeout waiting for condition; last_count={last_count}")
def _build_dense_query(embedding: list[float], top_k: int = 3) -> VectorStoreQuery:
return VectorStoreQuery(
mode=VectorStoreQueryMode.DEFAULT,
query_embedding=embedding,
similarity_top_k=top_k,
)
def _build_text_query(q: str, top_k: int = 3) -> VectorStoreQuery:
return VectorStoreQuery(
mode=VectorStoreQueryMode.TEXT_SEARCH,
query_str=q,
sparse_top_k=top_k,
)
def _topic_filter(value: str) -> MetadataFilters:
return MetadataFilters(
filters=[MetadataFilter(key="topic_s", value=value, operator=FilterOperator.EQ)]
)
# Predefined query embeddings for deterministic dense search
DEFAULT_EMBEDDINGS = {
"history_query": [0.15] * 64, # closer to n1/n2
"politics_query": [0.85] * 64, # closer to n3
"founding_father_query": [0.18] * 64, # still closer to n1/n2
}
# ---------------------------------------------------------------------------
# Synchronous end-to-end test
# ---------------------------------------------------------------------------
@pytest.mark.uses_docker
def test_solr_vector_store_sync_minimal_flow(
vector_store: ApacheSolrVectorStore,
sample_nodes: list[TextNode],
) -> None:
"""
End-to-end sync flow covering add, * query, dense & lexical, filters, delete.
Steps:
1. add() nodes
2. verify they are visible via raw *:*
3. dense KNN query
4. lexical BM25 query
5. filtered dense + lexical queries
6. delete one node (using delete_nodes since we mapped node ids, simpler)
7. verify count decreased
"""
# 1. Index
added_ids = vector_store.add(sample_nodes)
assert added_ids == [n.id_ for n in sample_nodes]
# 2. Ensure visibility
_poll_count(vector_store, lambda c: c == len(sample_nodes))
# 3. Dense KNN query (predefined query embedding)
dense_q = _build_dense_query(DEFAULT_EMBEDDINGS["history_query"], top_k=3)
dense_res = vector_store.query(dense_q)
assert len(dense_res.ids) <= 3
# expect at least one history doc (n1/n2) retrieved
assert any(doc_id in dense_res.ids for doc_id in ["n1", "n2"])
# 4. Lexical BM25 query
text_q = _build_text_query("President", top_k=3)
text_res = vector_store.query(text_q)
assert len(text_res.ids) >= 1
# 5. Filtered queries
hist_filter = _topic_filter("history")
dense_hist_q = _build_dense_query(DEFAULT_EMBEDDINGS["history_query"], top_k=5)
dense_hist_q.filters = hist_filter
dense_hist_res = vector_store.query(dense_hist_q)
assert set(dense_hist_res.ids).issubset({"n1", "n2"})
text_hist_q = _build_text_query("President", top_k=5)
text_hist_q.filters = hist_filter
text_hist_res = vector_store.query(text_hist_q)
assert set(text_hist_res.ids).issubset({"n1", "n2"})
# 6. Delete one node by node id
vector_store.delete_nodes(node_ids=["n1"])
_poll_count(vector_store, lambda c: c == len(sample_nodes) - 1)
# 7. Final assertion: n1 removed
remaining = vector_store.sync_client.search({"q": "*:*"}).response.docs
remaining_ids = {d["id"] for d in remaining}
assert "n1" not in remaining_ids
# ---------------------------------------------------------------------------
# Asynchronous end-to-end test
# ---------------------------------------------------------------------------
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.uses_docker
async def test_solr_vector_store_async_minimal_flow(
vector_store: ApacheSolrVectorStore,
sample_nodes: list[TextNode],
) -> None:
"""
Async variant of the minimal flow using async_add, aquery, adelete_nodes.
"""
# 1. Index (async)
added_ids = await vector_store.async_add(sample_nodes)
assert added_ids == [n.id_ for n in sample_nodes]
_poll_count(vector_store, lambda c: c == len(sample_nodes))
# Dense query via encoded text
dense_q = _build_dense_query(DEFAULT_EMBEDDINGS["politics_query"], top_k=3)
dense_res = await vector_store.aquery(dense_q)
# Expect politics doc n3 likely present
assert "n3" in dense_res.ids
# Lexical query (BM25)
text_q = _build_text_query("Minister", top_k=3)
text_res = await vector_store.aquery(text_q)
assert len(text_res.ids) >= 1
# Filtered dense query for history topic
hist_filter = _topic_filter("history")
dense_hist_q = _build_dense_query(
DEFAULT_EMBEDDINGS["founding_father_query"], top_k=5
)
dense_hist_q.filters = hist_filter
dense_hist_res = await vector_store.aquery(dense_hist_q)
assert set(dense_hist_res.ids).issubset({"n1", "n2"})
# Delete one node
await vector_store.adelete_nodes(node_ids=["n2"]) # remove a history doc
_poll_count(vector_store, lambda c: c == len(sample_nodes) - 1)
remaining = vector_store.sync_client.search({"q": "*:*"}).response.docs
remaining_ids = {d["id"] for d in remaining}
assert "n2" not in remaining_ids
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-solr/tests/integration/test_solr_vector_store.py",
"license": "MIT License",
"lines": 227,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-solr/tests/test_async_client.py | import asyncio
import sys
from typing import Any, Optional
from unittest import mock
from unittest.mock import MagicMock, patch
import aiohttp
import aiosolr
import pytest
from pydantic import ValidationError
from llama_index.vector_stores.solr.client import (
AsyncSolrClient,
SolrSelectResponse,
SolrUpdateResponse,
)
from tests.conftest import compare_documents, params_delete_by_id, params_search_queries
_MODULE_PATH = "llama_index.vector_stores.solr.client.async_"
@patch(f"{_MODULE_PATH}.aiosolr.Client.update")
async def test_async_solr_client_add_valid(
mock_aiosolr_add: MagicMock,
mock_aiosolr_update_response: aiosolr.Response,
mock_solr_update_response: SolrUpdateResponse,
mock_solr_raw_input_documents: list[dict[str, Any]],
mock_solr_updated_input_documents: list[dict[str, Any]],
mock_base_solr_url: str,
) -> None:
# GIVEN
async_client = AsyncSolrClient(base_url=mock_base_solr_url)
mock_aiosolr_add.return_value = mock_aiosolr_update_response
# WHEN
actual_response = await async_client.add(mock_solr_raw_input_documents)
# THEN
mock_aiosolr_add.assert_called_once_with(data=mock_solr_updated_input_documents)
assert actual_response == mock_solr_update_response
@patch(f"{_MODULE_PATH}.aiosolr.Client.update")
async def test_async_solr_client_add_aiosolr_error(
mock_aiosolr_update: MagicMock,
mock_solr_raw_input_documents: list[dict[str, Any]],
mock_base_solr_url: str,
) -> None:
# GIVEN
async_client = AsyncSolrClient(base_url=mock_base_solr_url)
mock_aiosolr_update.side_effect = aiosolr.SolrError("some error")
# WHEN / THEN
with pytest.raises(
ValueError,
match=f"Error during Aiosolr call, type={aiosolr.SolrError}",
):
await async_client.add(mock_solr_raw_input_documents)
@pytest.mark.uses_docker
async def test_async_solr_client_add_docker_solr(
function_unique_solr_collection_url: str,
mock_solr_raw_input_documents: list[dict[str, Any]],
mock_solr_expected_retrieved_documents: list[dict[str, Any]],
) -> None:
# GIVEN
async_client = AsyncSolrClient(base_url=function_unique_solr_collection_url)
# WHEN
await async_client.add(mock_solr_raw_input_documents)
await asyncio.sleep(5)
# THEN
results = await async_client.search({"q": "*:*"})
compare_documents(mock_solr_expected_retrieved_documents, results.response.docs)
@patch(f"{_MODULE_PATH}.aiosolr.Client.update")
async def test_async_solr_client_delete_by_query_valid(
mock_aiosolr_update: MagicMock,
mock_aiosolr_update_response: aiosolr.Response,
mock_solr_update_response: SolrUpdateResponse,
mock_base_solr_url: str,
) -> None:
# GIVEN
async_client = AsyncSolrClient(base_url=mock_base_solr_url)
input_query_string = "id:doc1"
expected_query = {"delete": {"query": "id:doc1"}}
mock_aiosolr_update.return_value = mock_aiosolr_update_response
# WHEN
actual_response = await async_client.delete_by_query(input_query_string)
# THEN
mock_aiosolr_update.assert_called_once_with(data=expected_query)
assert actual_response == mock_solr_update_response
@patch(f"{_MODULE_PATH}.aiosolr.Client.update")
async def test_async_solr_client_delete_by_query_aiosolr_error(
mock_aiosolr_update: MagicMock,
mock_base_solr_url: str,
) -> None:
# GIVEN
async_client = AsyncSolrClient(base_url=mock_base_solr_url)
mock_aiosolr_update.side_effect = aiosolr.SolrError("some error")
# WHEN / THEN
with pytest.raises(
ValueError,
match=f"Error during Aiosolr call, type={aiosolr.SolrError}",
):
await async_client.delete_by_query("id:doc1")
@pytest.mark.uses_docker
async def test_async_solr_client_delete_by_query_docker_solr(
function_unique_solr_collection_url: str,
mock_solr_raw_input_documents: list[dict[str, Any]],
) -> None:
# GIVEN
async_client = AsyncSolrClient(base_url=function_unique_solr_collection_url)
delete_query = "int_i:1"
search_query = {"q": delete_query, "fl": "id,text_txt_en,score"}
# WHEN
# add, and ensure the docs are present
await async_client.add(mock_solr_raw_input_documents)
await asyncio.sleep(5)
res_after_add = await async_client.search(search_query)
assert len(res_after_add.response.docs) == 1
# delete once we're sure they're there
await async_client.delete_by_query(delete_query)
await asyncio.sleep(5)
# THEN
res_after_del = await async_client.search(search_query)
assert len(res_after_del.response.docs) == 0
@pytest.mark.parametrize(
"input_ids", [["doc1"], ["doc1", "doc2"]], ids=["len(ids)==1", "len(ids)>1"]
)
@patch(f"{_MODULE_PATH}.aiosolr.Client.update")
async def test_async_solr_client_delete_by_id_valid(
mock_aiosolr_update: MagicMock,
mock_aiosolr_update_response: aiosolr.Response,
mock_solr_update_response: SolrUpdateResponse,
mock_base_solr_url: str,
input_ids: list[str],
) -> None:
# GIVEN
async_client = AsyncSolrClient(base_url=mock_base_solr_url)
expected_query = {"delete": input_ids}
mock_aiosolr_update.return_value = mock_aiosolr_update_response
# WHEN
actual_response = await async_client.delete_by_id(input_ids)
# THEN
mock_aiosolr_update.assert_called_once_with(data=expected_query)
assert actual_response == mock_solr_update_response
@patch(f"{_MODULE_PATH}.aiosolr.Client.update")
async def test_async_solr_client_delete_by_id_empty_id_list(
mock_aiohttp_client_session: MagicMock,
mock_base_solr_url: str,
) -> None:
# GIVEN
async_client = AsyncSolrClient(base_url=mock_base_solr_url)
# WHEN / THEN
with pytest.raises(ValueError, match="The list of IDs to delete cannot be empty"):
await async_client.delete_by_id([])
@patch(f"{_MODULE_PATH}.aiosolr.Client.update")
async def test_async_solr_client_delete_by_id_aiosolr_error(
mock_aiosolr_update: MagicMock,
mock_base_solr_url: str,
) -> None:
# GIVEN
async_client = AsyncSolrClient(base_url=mock_base_solr_url)
mock_aiosolr_update.side_effect = aiosolr.SolrError("some error")
# WHEN / THEN
with pytest.raises(
ValueError,
match=f"Error during Aiosolr call, type={aiosolr.SolrError}",
):
await async_client.delete_by_id(["doc1", "doc2"])
@params_delete_by_id
@pytest.mark.uses_docker
async def test_async_solr_client_delete_by_id_docker_solr(
function_unique_solr_collection_url: str,
mock_solr_raw_input_documents: list[dict[str, Any]],
ids_to_delete: list[str],
expected_remaining_ids: list[str],
) -> None:
# GIVEN
async_client = AsyncSolrClient(base_url=function_unique_solr_collection_url)
search_query = {"q": "*:*", "fl": "id"}
expected_status = 200
# WHEN
# add, and ensure the docs are present
await async_client.add(mock_solr_raw_input_documents)
await asyncio.sleep(5)
res_after_add = await async_client.search(search_query)
assert len(res_after_add.response.docs) == len(mock_solr_raw_input_documents)
actual_response = await async_client.delete_by_id(ids_to_delete)
await asyncio.sleep(5)
# THEN
assert actual_response.response_header.status == expected_status
res_after_del = await async_client.search(search_query)
retrieved_ids = sorted([doc["id"] for doc in res_after_del.response.docs])
assert retrieved_ids == expected_remaining_ids
@patch(f"{_MODULE_PATH}.aiosolr.Client.update")
async def test_async_solr_client_clear_collection_valid(
mock_aiosolr_update: MagicMock,
mock_aiosolr_update_response: aiosolr.Response,
mock_solr_update_response: SolrUpdateResponse,
mock_base_solr_url: str,
) -> None:
# GIVEN
async_client = AsyncSolrClient(base_url=mock_base_solr_url)
expected_query = {"delete": {"query": "*:*"}}
mock_aiosolr_update.return_value = mock_aiosolr_update_response
# WHEN
actual_response = await async_client.clear_collection()
# THEN
mock_aiosolr_update.assert_called_once_with(data=expected_query)
assert actual_response == mock_solr_update_response
@patch(f"{_MODULE_PATH}.aiosolr.Client.update")
async def test_async_solr_client_clear_collection_aiosolr_error(
mock_aiosolr_update: MagicMock,
mock_base_solr_url: str,
) -> None:
# GIVEN
async_client = AsyncSolrClient(base_url=mock_base_solr_url)
mock_aiosolr_update.side_effect = aiosolr.SolrError("some error")
# WHEN / THEN
with pytest.raises(
ValueError,
match=f"Error during Aiosolr call, type={aiosolr.SolrError}",
):
await async_client.clear_collection()
@pytest.mark.uses_docker
async def test_async_solr_client_clear_collection_docker_solr(
function_unique_solr_collection_url: str,
mock_solr_raw_input_documents: list[dict[str, Any]],
) -> None:
# GIVEN
async_client = AsyncSolrClient(base_url=function_unique_solr_collection_url)
search_query = {"q": "*:*", "fl": "id,text_txt_en,score"}
# WHEN
# add, and ensure the docs are present
await async_client.add(mock_solr_raw_input_documents)
await asyncio.sleep(5)
res_after_add = await async_client.search(search_query)
assert len(res_after_add.response.docs) == len(mock_solr_raw_input_documents)
# delete once we're sure they're there
await async_client.clear_collection()
await asyncio.sleep(5)
# THEN
res_after_del = await async_client.search(search_query)
assert len(res_after_del.response.docs) == 0
@patch(f"{_MODULE_PATH}.aiosolr.Client.query")
async def test_async_solr_client_search_valid(
mock_aiosolr_search: MagicMock,
mock_aiosolr_search_results: aiosolr.Response,
mock_solr_select_response: SolrSelectResponse,
mock_base_solr_url: str,
) -> None:
# GIVEN
async_client = AsyncSolrClient(base_url=mock_base_solr_url)
mock_aiosolr_search.return_value = mock_aiosolr_search_results
# WHEN
actual_response = await async_client.search({"q": "president", "fl": "*,score"})
# THEN
mock_aiosolr_search.assert_called_once_with(q="president", fl="*,score")
assert actual_response == mock_solr_select_response
@patch(f"{_MODULE_PATH}.aiosolr.Client.query")
async def test_async_solr_client_search_aiosolr_error(
mock_aiosolr_search: MagicMock,
mock_base_solr_url: str,
) -> None:
# GIVEN
async_client = AsyncSolrClient(base_url=mock_base_solr_url)
mock_aiosolr_search.side_effect = aiosolr.SolrError("some error")
# WHEN / THEN
with pytest.raises(
ValueError,
match=f"Error during Aiosolr call, type={aiosolr.SolrError}",
):
await async_client.search({"q": "president", "fl": "*,score"})
@patch(f"{_MODULE_PATH}.aiosolr.Client.query")
async def test_async_solr_client_search_validation_error(
mock_aiosolr_search: MagicMock,
mock_base_solr_url: str,
) -> None:
# GIVEN
async_client = AsyncSolrClient(base_url=mock_base_solr_url)
mock_aiosolr_search.side_effect = ValidationError("fake", [])
# WHEN / THEN
with pytest.raises(ValueError, match="Unexpected response format from Solr"):
await async_client.search({"q": "president", "fl": "*,score"})
@params_search_queries
@pytest.mark.uses_docker
async def test_async_solr_client_search_docker_solr(
function_unique_solr_collection_url: str,
mock_solr_raw_input_documents: list[dict[str, Any]],
mock_solr_expected_retrieved_documents: list[dict[str, Any]],
input_query: dict[str, Any],
expected_doc_indexes: list[int],
requires_score: bool,
) -> None:
# GIVEN
async_client = AsyncSolrClient(base_url=function_unique_solr_collection_url)
expected_docs = [
doc
for i, doc in enumerate(mock_solr_expected_retrieved_documents)
if i in expected_doc_indexes
]
if requires_score:
for doc in expected_docs:
doc["score"] = mock.ANY
# WHEN
await async_client.add(mock_solr_raw_input_documents)
await asyncio.sleep(5)
results = await async_client.search(input_query)
# THEN
compare_documents(expected_docs, results.response.docs)
def test_async_solr_client_str_output(
mock_base_solr_url: str,
) -> None:
# GIVEN
async_client = AsyncSolrClient(base_url=mock_base_solr_url)
# WHEN / THEN
assert str(async_client) == f"AsyncSolrClient(base_url='{mock_base_solr_url}')"
@pytest.mark.parametrize(
("input_url", "expected_args"),
[
(
"http://localhost:80/solr/my-collection",
{
"host": "localhost",
"port": 80,
"scheme": "http",
"collection": "my-collection",
"read_timeout": 10,
"write_timeout": 10,
},
),
(
"http://localhost:80/solr/my-collection/",
{
"host": "localhost",
"port": 80,
"scheme": "http",
"collection": "my-collection",
"read_timeout": 10,
"write_timeout": 10,
},
),
(
"http://0.0.0.0:80/solr/my-collection",
{
"host": "0.0.0.0",
"port": 80,
"scheme": "http",
"collection": "my-collection",
"read_timeout": 10,
"write_timeout": 10,
},
),
(
"http://0.0.0.0:80/solr/my-collection/",
{
"host": "0.0.0.0",
"port": 80,
"scheme": "http",
"collection": "my-collection",
"read_timeout": 10,
"write_timeout": 10,
},
),
(
"https://some.solr.host.com/api/solr/my-collection",
{
"connection_url": "https://some.solr.host.com/api/solr/my-collection",
"read_timeout": 10,
"write_timeout": 10,
},
),
(
"https://some.solr.host.com/api/solr/my-collection/",
{
"connection_url": "https://some.solr.host.com/api/solr/my-collection",
"read_timeout": 10,
"write_timeout": 10,
},
),
],
ids=[
"localhost URL",
"localhost URL with trailing slash",
"0.0.0.0 URL",
"0.0.0.0 URL with trailing slash",
"External URL",
"External URL with trailing slash",
],
)
@pytest.mark.parametrize(
("input_headers", "expected_headers"),
[
(None, {}),
({}, {}),
(
{"Content-Type": "application/json"},
{"Content-Type": "application/json"},
),
],
ids=["null value", "empty dict", "valid header dict"],
)
@pytest.mark.parametrize(
"client_kwargs",
[{}, {"ttl_dns_cache": 4800}],
ids=["empty dict", "valid extra kwargs"],
)
@patch(f"{_MODULE_PATH}.aiosolr.Client", autospec=True)
async def test_async_solr_client_build_client(
mock_aiosolr_client_init: MagicMock,
input_url: str,
expected_args: dict[str, Any],
input_headers: Optional[dict[str, str]],
expected_headers: dict[str, str],
client_kwargs: dict[str, Any],
) -> None:
# GIVEN
mock_aiosolr_client_instance = mock_aiosolr_client_init.return_value
mock_session = MagicMock(spec=aiohttp.ClientSession, headers={})
mock_aiosolr_client_instance.session = mock_session
expected_args = {**expected_args, **client_kwargs}
# WHEN
client = AsyncSolrClient(
base_url=input_url,
request_timeout_sec=10,
headers=input_headers,
**client_kwargs,
)
# ensure the inner client gets built
_ = await client._build_client()
# THEN
# handle py3.9
if sys.version_info < (3, 10):
expected_args["timeout"] = expected_args["read_timeout"]
del expected_args["read_timeout"]
del expected_args["write_timeout"]
mock_aiosolr_client_init.assert_called_once_with(**expected_args)
assert mock_aiosolr_client_instance.session.headers == expected_headers
@pytest.mark.parametrize(
("input_url", "input_timeout"),
[("https://some.solr.host", -1), (" ", 10), ("", 10), ("", -1)],
ids=[
"Negative timeout value",
"Non-empty whitespace URL",
"Empty URL",
"Empty URL + negative timeout",
],
)
def test_async_solr_client_build_client_invalid_params(
input_url: str, input_timeout: int
) -> None:
# WHEN / THEN
with pytest.raises(ValueError):
_ = AsyncSolrClient(base_url=input_url, request_timeout_sec=input_timeout)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-solr/tests/test_async_client.py",
"license": "MIT License",
"lines": 443,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-solr/tests/test_client_utils.py | from datetime import date, datetime, timezone
try:
from datetime import UTC
except ImportError:
UTC = timezone.utc
from typing import Any, Union
from zoneinfo import ZoneInfo
import pytest
from llama_index.vector_stores.solr.client.utils import (
format_datetime_for_solr,
prepare_document_for_solr,
)
@pytest.mark.parametrize(
("input_dt", "expected_output"),
[
(datetime(2025, 2, 18, 1, 2, 3, tzinfo=UTC), "2025-02-18T01:02:03Z"),
(datetime(2025, 2, 18, 1, 2, 3), "2025-02-18T01:02:03Z"),
(
datetime(2025, 2, 18, 1, 2, 3, tzinfo=ZoneInfo("America/New_York")),
"2025-02-18T06:02:03Z",
),
(date(2025, 2, 18), "2025-02-18T00:00:00Z"),
],
ids=["UTC datetime", "Naive datetime", "Local datetime", "Date (no timezone)"],
)
def test_format_datetime_for_solr(
input_dt: Union[datetime, date], expected_output: str
) -> None:
# WHEN
actual_output = format_datetime_for_solr(input_dt)
# THEN
assert actual_output == expected_output
def test_prepare_document_for_solr(
mock_solr_raw_input_documents: list[dict[str, Any]],
mock_solr_updated_input_documents: list[dict[str, Any]],
) -> None:
# WHEN
actual_updated_docs = [
prepare_document_for_solr(doc) for doc in mock_solr_raw_input_documents
]
# THEN
assert actual_updated_docs == mock_solr_updated_input_documents
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-solr/tests/test_client_utils.py",
"license": "MIT License",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-solr/tests/test_solr_vector_store.py | """Test OSS Apache Solr vector store."""
from typing import Any, Optional
from unittest.mock import AsyncMock, MagicMock
import pytest
from llama_index.core.schema import BaseNode, TextNode
from llama_index.core.vector_stores.types import (
FilterCondition,
FilterOperator,
MetadataFilter,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
from llama_index.vector_stores.solr.base import (
ApacheSolrVectorStore,
)
from llama_index.vector_stores.solr.client.async_ import AsyncSolrClient
from llama_index.vector_stores.solr.client.responses import (
SolrResponseHeader,
SolrSelectResponse,
SolrSelectResponseBody,
)
from llama_index.vector_stores.solr.client.sync import SyncSolrClient
from llama_index.vector_stores.solr.types import BoostedTextField, SolrQueryDict
# Test parameter decorators for reuse
params_add_kwargs = pytest.mark.parametrize(
"add_kwargs",
[{"some": "arg"}, {}],
ids=["Has add_kwargs", "No add_kwargs"],
)
params_delete_kwargs = pytest.mark.parametrize(
"delete_kwargs",
[{"some": "arg"}, {}],
ids=["Has delete_kwargs", "No delete_kwargs"],
)
@pytest.fixture
def mock_sync_client() -> MagicMock:
"""Mock synchronous Solr client."""
return MagicMock(spec=SyncSolrClient)
@pytest.fixture
def mock_async_client() -> AsyncMock:
"""Mock asynchronous Solr client."""
return AsyncMock(spec=AsyncSolrClient)
@pytest.fixture
def mock_solr_response_docs() -> list[dict[str, Any]]:
"""Mock Solr response documents."""
return [
{
"id": "node0",
"contents": "some text",
"embedding": [0.1, 0.2, 0.3],
"extra_field": "extra field",
"other_extra_field": "other extra field",
"score": 0.95,
},
{
"id": "node1",
"contents": "some text",
"embedding": [0.1, 0.2, 0.3],
"extra_field": "extra field",
"other_extra_field": "other extra field",
"score": 0.85,
},
]
@pytest.fixture
def mock_solr_response(
mock_solr_response_docs: list[dict[str, Any]],
) -> SolrSelectResponse:
"""Mock Solr select response."""
return SolrSelectResponse(
response=SolrSelectResponseBody(
docs=mock_solr_response_docs,
num_found=len(mock_solr_response_docs),
num_found_exact=True,
start=0,
),
response_header=SolrResponseHeader(status=200),
)
@pytest.fixture
def mock_vector_store_query_result(
mock_solr_response_docs: list[dict[str, Any]],
) -> VectorStoreQueryResult:
"""Mock vector store query result."""
return VectorStoreQueryResult(
ids=[doc["id"] for doc in mock_solr_response_docs],
nodes=[
TextNode(
id_=doc["id"],
text=doc["contents"],
embedding=doc["embedding"],
metadata={
"extra_field": doc["extra_field"],
"other_extra_field": doc["other_extra_field"],
},
)
for doc in mock_solr_response_docs
],
similarities=[doc["score"] for doc in mock_solr_response_docs],
)
@pytest.fixture
def mock_solr_vector_store(
mock_sync_client: MagicMock,
mock_async_client: AsyncMock,
) -> ApacheSolrVectorStore:
"""Mock Solr vector store with basic configuration."""
return ApacheSolrVectorStore(
sync_client=mock_sync_client,
async_client=mock_async_client,
nodeid_field="id",
docid_field="docid",
content_field="contents",
embedding_field="embedding",
metadata_to_solr_field_mapping=[("author", "author_field")],
solr_field_preprocessor_kwargs={},
)
def create_sample_input_nodes(
num_nodes: int = 3,
) -> tuple[list[BaseNode], list[dict[str, Any]]]:
"""Create sample input nodes and expected Solr data for testing."""
nodes = []
expected_data = []
for i in range(num_nodes):
node = TextNode(
id_=f"node{i}",
text=f"content {i}",
embedding=[0.1 * i, 0.2 * i, 0.3 * i],
metadata={"author": f"author{i}", "topic": f"topic{i}"},
)
nodes.append(node)
expected_data.append(
{
"id": f"node{i}",
"contents": f"content {i}",
"embedding": [0.1 * i, 0.2 * i, 0.3 * i],
"docid": None,
"author_field": f"author{i}", # mapped via metadata_to_solr_field_mapping
}
)
return nodes, expected_data
@pytest.mark.parametrize(
("query_dict", "expected_fields"),
[
# Minimal required fields only
(
{"q": "*:*", "fq": []},
{"q": "*:*", "fq": []},
),
# All fields present
(
{
"q": "{!knn f=embedding topK=10}[0.1, 0.2, 0.3]",
"fq": ["field1:value1", "field2:value2"],
"fl": "id,content,score",
"rows": "20",
},
{
"q": "{!knn f=embedding topK=10}[0.1, 0.2, 0.3]",
"fq": ["field1:value1", "field2:value2"],
"fl": "id,content,score",
"rows": "20",
},
),
# With optional fl only
(
{"q": "title:test", "fq": ["status:active"], "fl": "id,title"},
{"q": "title:test", "fq": ["status:active"], "fl": "id,title"},
),
# With optional rows only
(
{"q": "content:search", "fq": [], "rows": "50"},
{"q": "content:search", "fq": [], "rows": "50"},
),
],
ids=[
"Minimal required fields",
"All fields present",
"With optional fl",
"With optional rows",
],
)
def test_solr_query_dict_successful_creation(
query_dict: dict[str, Any], expected_fields: dict[str, Any]
) -> None:
"""Test successful creation of SolrQueryDict with various field combinations."""
solr_query: SolrQueryDict = query_dict
for key, expected_value in expected_fields.items():
assert solr_query[key] == expected_value
"""Store Creation Tests"""
@pytest.mark.parametrize(
("additional_config", "expected_attributes"),
[
# Minimal configuration
({}, {"docid_field": "docid", "content_field": "contents"}),
# Override some fields
(
{"content_field": "text_content", "embedding_field": "vectors"},
{"content_field": "text_content", "embedding_field": "vectors"},
),
# Add text search fields
(
{"text_search_fields": [BoostedTextField(field="title", boost_factor=2.0)]},
{"text_search_fields": [BoostedTextField(field="title", boost_factor=2.0)]},
),
# Custom output fields
(
{"output_fields": ["id", "title"]},
{"output_fields": ["id", "title", "score"]}, # score auto-added
),
],
ids=[
"Default fixture config",
"Override fields",
"With text search",
"Custom output fields",
],
)
def test_vector_store_successful_creation(
mock_sync_client: MagicMock,
mock_async_client: AsyncMock,
additional_config: dict[str, Any],
expected_attributes: dict[str, Any],
) -> None:
"""Test successful creation of ApacheSolrVectorStore using existing fixtures."""
base_config = {
"sync_client": mock_sync_client,
"async_client": mock_async_client,
"nodeid_field": "id",
"docid_field": "docid",
"content_field": "contents",
"embedding_field": "embedding",
"metadata_to_solr_field_mapping": [("author", "author_field")],
"solr_field_preprocessor_kwargs": {},
}
store = ApacheSolrVectorStore(**{**base_config, **additional_config})
assert store.sync_client is mock_sync_client
assert store.async_client is mock_async_client
assert store.nodeid_field == "id"
for attr_name, expected_value in expected_attributes.items():
assert getattr(store, attr_name) == expected_value
@pytest.mark.parametrize(
("invalid_config", "error_match"),
[
# Missing required sync_client
(
{
"async_client": AsyncMock(),
"nodeid_field": "id",
"solr_field_preprocessor_kwargs": {},
},
"Field required",
),
# Missing required async_client
(
{
"sync_client": MagicMock(),
"nodeid_field": "id",
"solr_field_preprocessor_kwargs": {},
},
"Field required",
),
# Missing required nodeid_field
(
{
"sync_client": MagicMock(),
"async_client": AsyncMock(),
"solr_field_preprocessor_kwargs": {},
},
"Field required",
),
# Empty text_search_fields (violates MinLen(1))
(
{
"sync_client": MagicMock(),
"async_client": AsyncMock(),
"nodeid_field": "id",
"text_search_fields": [],
"solr_field_preprocessor_kwargs": {},
},
"at least 1",
),
# Empty output_fields (violates MinLen(1))
(
{
"sync_client": MagicMock(),
"async_client": AsyncMock(),
"nodeid_field": "id",
"output_fields": [],
"solr_field_preprocessor_kwargs": {},
},
"at least 1",
),
],
ids=[
"Missing sync_client",
"Missing async_client",
"Missing nodeid_field",
"Empty text_search_fields",
"Empty output_fields",
],
)
def test_apache_solr_vector_store_creation_failures(
invalid_config: dict[str, Any],
error_match: str,
) -> None:
"""Test ApacheSolrVectorStore creation validation failures."""
with pytest.raises(ValueError, match=error_match):
ApacheSolrVectorStore(**invalid_config)
def test_vector_store_output_fields_validation(
mock_sync_client: MagicMock,
mock_async_client: AsyncMock,
) -> None:
"""Test that output_fields validator ensures 'score' is always included."""
store = ApacheSolrVectorStore(
sync_client=mock_sync_client,
async_client=mock_async_client,
nodeid_field="id",
output_fields=["field1", "field2"],
solr_field_preprocessor_kwargs={},
)
#'score' should be automatically added
assert "score" in store.output_fields
assert set(store.output_fields) == {"field1", "field2", "score"}
# output_fields already with 'score'
store2 = ApacheSolrVectorStore(
sync_client=mock_sync_client,
async_client=mock_async_client,
nodeid_field="id",
output_fields=["field1", "score", "field2"],
solr_field_preprocessor_kwargs={},
)
#'score' should not be duplicated
assert store2.output_fields.count("score") == 1
def test_vector_store_client_property(
mock_solr_vector_store: ApacheSolrVectorStore,
mock_sync_client: MagicMock,
) -> None:
"""Test client property returns sync client."""
actual_client = mock_solr_vector_store.client
assert actual_client is mock_sync_client
def test_vector_store_aclient_property(
mock_solr_vector_store: ApacheSolrVectorStore,
mock_async_client: AsyncMock,
) -> None:
"""Test aclient property returns async client."""
actual_client = mock_solr_vector_store.aclient
assert actual_client is mock_async_client
"""Query Construction Tests"""
@pytest.mark.parametrize(
(
"store_embedding_field",
"query_embedding_field",
"query_embedding",
"similarity_top_k",
"should_succeed",
),
[
# Success cases
("store_embedding", None, [1, 2, 3], None, True),
(None, "query_embedding", [1, 2, 3], None, True),
("store_embedding", "query_embedding", [1, 2, 3], 10, True),
# Failure cases
(None, None, [1, 2, 3], None, False), # No embedding field specified
],
ids=[
"store_field_only_success",
"query_field_only_success",
"both_fields_success",
"no_field_fail",
],
)
def test_to_solr_query_dense(
mock_sync_client: MagicMock,
mock_async_client: AsyncMock,
store_embedding_field: Optional[str],
query_embedding_field: Optional[str],
query_embedding: Optional[list[float]],
similarity_top_k: Optional[int],
should_succeed: bool,
) -> None:
"""Test _to_solr_query for dense vector queries - both success and failure cases."""
store = ApacheSolrVectorStore(
sync_client=mock_sync_client,
async_client=mock_async_client,
nodeid_field="id",
embedding_field=store_embedding_field,
solr_field_preprocessor_kwargs={},
)
query_args = {
"mode": VectorStoreQueryMode.DEFAULT,
"query_embedding": query_embedding,
}
if query_embedding_field:
query_args["embedding_field"] = query_embedding_field
if similarity_top_k:
query_args["similarity_top_k"] = similarity_top_k
query = VectorStoreQuery(**query_args)
if should_succeed:
# success case
result = store._to_solr_query(query)
# Verify KNN query structure
assert "{!knn f=" in result["q"]
assert result["q"].endswith(f"{query_embedding}")
# Check field precedence: query field > store field
expected_field = query_embedding_field or store_embedding_field
assert f"f={expected_field}" in result["q"]
# Check topK value
expected_topk = similarity_top_k or 1
assert f"topK={expected_topk}" in result["q"]
else:
# failure case
with pytest.raises(ValueError):
store._to_solr_query(query)
@pytest.mark.parametrize(
("text_search_fields", "query_str", "sparse_top_k", "should_succeed"),
[
# Success cases
(
[BoostedTextField(field="title", boost_factor=2.0)],
"test query",
None,
True,
),
(
[BoostedTextField(field="title"), BoostedTextField(field="content")],
"test",
20,
True,
),
# Failure cases
(None, "test query", None, False), # No text search fields
([BoostedTextField(field="title")], None, None, False), # No query string
],
ids=[
"single_field_success",
"multiple_fields_success",
"no_fields_fail",
"no_query_str_fail",
],
)
def test_to_solr_query_bm25(
mock_sync_client: MagicMock,
mock_async_client: AsyncMock,
text_search_fields: Optional[list[BoostedTextField]],
query_str: Optional[str],
sparse_top_k: Optional[int],
should_succeed: bool,
) -> None:
"""Test _to_solr_query for BM25 text search queries - both success and failure cases."""
store = ApacheSolrVectorStore(
sync_client=mock_sync_client,
async_client=mock_async_client,
nodeid_field="id",
text_search_fields=text_search_fields,
solr_field_preprocessor_kwargs={},
)
query_args = {
"mode": VectorStoreQueryMode.TEXT_SEARCH,
"query_str": query_str,
}
if sparse_top_k:
query_args["sparse_top_k"] = sparse_top_k
query = VectorStoreQuery(**query_args)
if should_succeed:
# success case
result = store._to_solr_query(query)
# verify dismax query structure
assert "{!dismax" in result["q"]
assert "qf=" in result["q"]
# Check for escaped query string (spaces become \\ )
escaped_query_str = query_str.replace(" ", "\\ ")
assert escaped_query_str in result["q"]
# Check field boosting is preserved
for field in text_search_fields:
field_str = field.get_query_str()
assert field_str in result["q"]
# Check rows parameter - only if sparse_top_k was provided
if sparse_top_k is not None:
assert result["rows"] == str(sparse_top_k)
else:
# When sparse_top_k is None, rows should not be set
assert result["rows"] == "None"
else:
# failure case
with pytest.raises(ValueError):
store._to_solr_query(query)
@pytest.mark.parametrize(
("doc_ids", "node_ids", "filters", "output_fields"),
[
# Test various combinations of optional parameters
(["doc1", "doc2"], None, None, None),
(None, ["node1", "node2"], None, ["field1", "field2"]),
(
None,
None,
MetadataFilters(filters=[MetadataFilter(key="status", value="active")]),
None,
),
(
["doc1"],
["node1"],
MetadataFilters(filters=[MetadataFilter(key="type", value="article")]),
["id", "title"],
),
],
ids=[
"doc_ids_only",
"node_ids_and_output_fields",
"filters_only",
"all_optional_params",
],
)
def test_to_solr_query_optional_params(
mock_solr_vector_store: ApacheSolrVectorStore,
doc_ids: Optional[list[str]],
node_ids: Optional[list[str]],
filters: Optional[MetadataFilters],
output_fields: Optional[list[str]],
) -> None:
"""Test _to_solr_query handles optional parameters correctly."""
query = VectorStoreQuery(
mode=VectorStoreQueryMode.DEFAULT,
query_embedding=[0.1, 0.2, 0.3],
doc_ids=doc_ids,
node_ids=node_ids,
filters=filters,
output_fields=output_fields,
)
result = mock_solr_vector_store._to_solr_query(query)
# Check filter queries (fq) are built correctly
if doc_ids:
doc_fq = f"docid:({' OR '.join(doc_ids)})"
assert doc_fq in result["fq"]
if node_ids:
node_fq = f"id:({' OR '.join(node_ids)})"
assert node_fq in result["fq"]
if filters:
# At least one filter should be present in fq
assert len(result["fq"]) > 0
# Check field list (fl) parameter
if output_fields:
expected_fl = ",".join([*output_fields, "score"])
assert result["fl"] == expected_fl
else:
assert result["fl"] == "*,score"
def test_to_solr_query_docid_field_missing_error(
mock_sync_client: MagicMock,
mock_async_client: AsyncMock,
) -> None:
"""Test _to_solr_query raises error when doc_ids provided but docid_field is None."""
store = ApacheSolrVectorStore(
sync_client=mock_sync_client,
async_client=mock_async_client,
nodeid_field="id",
docid_field=None, # No docid field configured
embedding_field="embedding",
solr_field_preprocessor_kwargs={},
)
query = VectorStoreQuery(
mode=VectorStoreQueryMode.DEFAULT,
query_embedding=[0.1, 0.2, 0.3],
doc_ids=["doc1", "doc2"], # Trying to filter by doc_ids
)
with pytest.raises(ValueError, match="`docid_field` must be passed"):
store._to_solr_query(query)
"""Query Execution Tests"""
@pytest.mark.parametrize(
"content_field", ["contents", None], ids=["Has contents", "No contents"]
)
@pytest.mark.parametrize(
"embedding_field", ["embedding", None], ids=["Has embedding", "No embedding"]
)
def test_vector_store_process_query_results(
mock_sync_client: MagicMock,
mock_async_client: AsyncMock,
mock_solr_response_docs: list[dict[str, Any]],
content_field: Optional[str],
embedding_field: Optional[str],
) -> None:
"""Test _process_query_results method."""
store = ApacheSolrVectorStore(
sync_client=mock_sync_client,
async_client=mock_async_client,
nodeid_field="id",
docid_field="docid",
content_field=content_field,
embedding_field=embedding_field,
metadata_to_solr_field_mapping=None,
solr_field_preprocessor_kwargs={},
)
# Prepare test data
results = mock_solr_response_docs.copy()
if not embedding_field:
for doc in results:
del doc["embedding"]
# Prepare expected metadata
metadata: dict[str, Any] = {
"extra_field": "extra field",
"other_extra_field": "other extra field",
}
if not content_field:
metadata["contents"] = "some text"
expected_results = VectorStoreQueryResult(
ids=["node0", "node1"],
nodes=[
TextNode(
id_="node0",
text="some text" if content_field else "",
embedding=[0.1, 0.2, 0.3] if embedding_field else None,
metadata=metadata,
),
TextNode(
id_="node1",
text="some text" if content_field else "",
embedding=[0.1, 0.2, 0.3] if embedding_field else None,
metadata=metadata,
),
],
similarities=[0.95, 0.85],
)
actual_results = store._process_query_results(results)
assert actual_results == expected_results
@pytest.mark.parametrize(
"mode",
[VectorStoreQueryMode.HYBRID, VectorStoreQueryMode.SEMANTIC_HYBRID],
ids=["Hybrid Mode", "Semantic Hybrid Mode"],
)
def test_vector_store_validate_query_mode_unsupported(
mock_solr_vector_store: ApacheSolrVectorStore,
mode: VectorStoreQueryMode,
) -> None:
"""Test that unsupported query modes raise ValueError."""
query = VectorStoreQuery(mode=mode)
with pytest.raises(ValueError, match="ApacheSolrVectorStore does not support"):
mock_solr_vector_store._validate_query_mode(query)
def test_vector_store_query(
mock_solr_vector_store: ApacheSolrVectorStore,
mock_solr_response: SolrSelectResponse,
mock_vector_store_query_result: VectorStoreQueryResult,
) -> None:
"""Test synchronous query method."""
input_query = VectorStoreQuery(
embedding_field="embedding",
query_embedding=[0.1, 0.2, 0.3],
output_fields=None,
)
mock_solr_vector_store.sync_client.search.return_value = mock_solr_response
actual_results = mock_solr_vector_store.query(input_query)
assert actual_results == mock_vector_store_query_result
mock_solr_vector_store.sync_client.search.assert_called_once()
@pytest.mark.asyncio
async def test_vector_store_aquery(
mock_solr_vector_store: ApacheSolrVectorStore,
mock_solr_response: SolrSelectResponse,
mock_vector_store_query_result: VectorStoreQueryResult,
) -> None:
"""Test asynchronous query method."""
input_query = VectorStoreQuery(
embedding_field="embedding",
query_embedding=[0.1, 0.2, 0.3],
output_fields=None,
)
mock_solr_vector_store.async_client.search.return_value = mock_solr_response
actual_results = await mock_solr_vector_store.aquery(input_query)
assert actual_results == mock_vector_store_query_result
mock_solr_vector_store.async_client.search.assert_called_once()
@pytest.mark.parametrize(
("nodeid_field", "nodeid_data"),
[("id", {"id": "node1"})],
ids=["Has nodeid"],
)
@pytest.mark.parametrize(
("content_field", "content_data"),
[("contents", {"contents": "some text"}), (None, {})],
ids=["Has contents", "No contents"],
)
@pytest.mark.parametrize(
("embedding_field", "embedding_data"),
[("embedding", {"embedding": [1, 2, 3]}), (None, {})],
ids=["Has embedding", "No embedding"],
)
@pytest.mark.parametrize(
("docid_field", "docid_data"),
[("docid", {"docid": "doc1"}), (None, {})],
ids=["Has docid", "No docid"],
)
@pytest.mark.parametrize(
("metadata_to_solr_field_mapping", "solr_field_data"),
[
(
[
("doc_field1", "solr_field1"),
("doc_field2", "solr_field2"),
("doc_missing_field", "solr_missing_field"),
],
{"solr_field1": "v1", "solr_field2": "v2"},
),
(None, {}),
],
ids=["Has metadata_to_solr_field_mapping", "No metadata_to_solr_field_mapping"],
)
def test_apache_solr_vector_store_get_data_from_node(
mock_sync_client: MagicMock,
mock_async_client: AsyncMock,
nodeid_field: Optional[str],
nodeid_data: dict[str, Any],
content_field: Optional[str],
content_data: dict[str, Any],
embedding_field: Optional[str],
embedding_data: dict[str, Any],
docid_field: Optional[str],
docid_data: dict[str, Any],
metadata_to_solr_field_mapping: Optional[list[tuple[str, str]]],
solr_field_data: dict[str, Any],
) -> None:
"""Test _get_data_from_node method with various field configurations."""
store = ApacheSolrVectorStore(
sync_client=mock_sync_client,
async_client=mock_async_client,
nodeid_field=nodeid_field,
docid_field=docid_field,
content_field=content_field,
embedding_field=embedding_field,
metadata_to_solr_field_mapping=metadata_to_solr_field_mapping,
solr_field_preprocessor_kwargs={},
)
expected_data = {
**nodeid_data,
**content_data,
**embedding_data,
**docid_data,
**solr_field_data,
}
# Mock node
input_node = MagicMock(
node_id="node1",
get_content=MagicMock(return_value="some text"),
get_embedding=MagicMock(return_value=[1, 2, 3]),
ref_doc_id="doc1",
metadata={"doc_field1": "v1", "doc_field2": "v2"},
)
actual_data = store._get_data_from_node(input_node)
assert actual_data == expected_data
def test_apache_solr_vector_store_get_data_from_nodes_valid(
mock_solr_vector_store: ApacheSolrVectorStore,
) -> None:
"""Test _get_data_from_nodes method."""
input_nodes, expected_data = create_sample_input_nodes()
expected_ids = [node.id_ for node in input_nodes]
actual_ids, actual_data = mock_solr_vector_store._get_data_from_nodes(input_nodes)
assert actual_ids == expected_ids
assert actual_data == expected_data
"""Add and Delete Tests"""
@params_add_kwargs
def test_vector_store_add(
mock_solr_vector_store: ApacheSolrVectorStore,
add_kwargs: dict[str, Any],
) -> None:
"""Test synchronous add method."""
input_nodes, expected_data = create_sample_input_nodes()
expected_ids = [node.id_ for node in input_nodes]
actual_ids = mock_solr_vector_store.add(input_nodes, **add_kwargs)
assert actual_ids == expected_ids
mock_solr_vector_store.sync_client.add.assert_called_once_with(expected_data)
@params_add_kwargs
@pytest.mark.asyncio
async def test_vector_store_async_add(
mock_solr_vector_store: ApacheSolrVectorStore,
add_kwargs: dict[str, Any],
) -> None:
"""Test asynchronous add method."""
input_nodes, expected_data = create_sample_input_nodes()
expected_ids = [node.id_ for node in input_nodes]
actual_ids = await mock_solr_vector_store.async_add(input_nodes, **add_kwargs)
assert actual_ids == expected_ids
mock_solr_vector_store.async_client.add.assert_called_once_with(expected_data)
@pytest.mark.asyncio
async def test_vector_store_async_add_raises_for_empty_node_list(
mock_solr_vector_store: ApacheSolrVectorStore,
) -> None:
"""Test async_add raises error for empty node list."""
with pytest.raises(ValueError, match="Call to 'async_add' with no contents"):
await mock_solr_vector_store.async_add([])
@params_delete_kwargs
def test_vector_store_delete(
mock_solr_vector_store: ApacheSolrVectorStore,
delete_kwargs: dict[str, Any],
) -> None:
"""Test synchronous delete method."""
input_ref_doc_id = "doc1"
mock_solr_vector_store.delete(input_ref_doc_id, **delete_kwargs)
mock_solr_vector_store.sync_client.delete_by_id.assert_called_once_with(
[input_ref_doc_id]
)
def test_vector_store_delete_no_docid_field(
mock_sync_client: MagicMock,
mock_async_client: AsyncMock,
) -> None:
"""Test delete works even when docid_field is None."""
store = ApacheSolrVectorStore(
sync_client=mock_sync_client,
async_client=mock_async_client,
nodeid_field="id",
docid_field=None,
solr_field_preprocessor_kwargs={},
)
store.delete("doc1")
# delete_by_id should still be called with the ref_doc_id
mock_sync_client.delete_by_id.assert_called_once_with(["doc1"])
@params_delete_kwargs
@pytest.mark.asyncio
async def test_vector_store_adelete(
mock_solr_vector_store: ApacheSolrVectorStore,
delete_kwargs: dict[str, Any],
) -> None:
"""Test asynchronous delete method."""
input_ref_doc_id = "doc1"
await mock_solr_vector_store.adelete(input_ref_doc_id, **delete_kwargs)
mock_solr_vector_store.async_client.delete_by_id.assert_called_once_with(
[input_ref_doc_id]
)
# Parameters for delete_nodes / adelete_nodes that use delete_by_id (no filters)
@pytest.mark.parametrize(
(
"input_nodes",
"input_filters",
),
[
(["node1", "node2"], None),
(["node1", "node2"], MetadataFilters(filters=[])),
],
ids=[
"node_ids=non-empty, filters=None",
"node_ids=non-empty, filters=empty set",
],
)
@params_delete_kwargs
def test_vector_store_delete_nodes_by_id(
mock_solr_vector_store: ApacheSolrVectorStore,
input_nodes: Optional[list[str]],
input_filters: Optional[MetadataFilters],
delete_kwargs: dict[str, Any],
) -> None:
"""Test synchronous delete_nodes method using delete_by_id."""
mock_solr_vector_store.delete_nodes(input_nodes, input_filters, **delete_kwargs)
mock_solr_vector_store.sync_client.delete_by_id.assert_called_once_with(input_nodes)
# Parameters for delete_nodes / adelete_nodes that use delete_by_query (with filters)
@pytest.mark.parametrize(
(
"input_nodes",
"input_filters",
"expected_query",
),
[
(
None,
MetadataFilters(
filters=[
MetadataFilter(
key="docid", value="doc1", operator=FilterOperator.EQ
)
]
),
"((docid:doc1))",
),
(
[],
MetadataFilters(filters=[MetadataFilter(key="docid", value="doc1")]),
"((docid:doc1))",
),
(
None,
MetadataFilters(
filters=[
MetadataFilter(key="docid", value="doc1"),
MetadataFilter(key="docid", value="doc2"),
],
condition=FilterCondition.OR,
),
"((docid:doc1) OR (docid:doc2))",
),
(
["node1", "node2"],
MetadataFilters(filters=[MetadataFilter(key="docid", value="doc1")]),
"(id:(node1 OR node2) AND ((docid:doc1)))",
),
],
ids=[
"node_ids=None, filters=1 filter",
"node_ids=[], filters=1 filter",
"node_ids=None, filters=2 filters",
"node_ids=non-empty, filters=1 filter",
],
)
@params_delete_kwargs
def tes_vector_store_delete_nodes(
mock_solr_vector_store: ApacheSolrVectorStore,
input_nodes: Optional[list[str]],
input_filters: Optional[MetadataFilters],
expected_query: str,
delete_kwargs: dict[str, Any],
) -> None:
"""Test synchronous delete_nodes method using delete_by_query."""
mock_solr_vector_store.delete_nodes(input_nodes, input_filters, **delete_kwargs)
mock_solr_vector_store.sync_client.delete_by_query.assert_called_once_with(
expected_query
)
# Parameters for adelete_nodes that use delete_by_id (no filters)
@pytest.mark.parametrize(
(
"input_nodes",
"input_filters",
),
[
(["node1", "node2"], None),
],
ids=[
"node_ids=non-empty, filters=None",
],
)
@params_delete_kwargs
@pytest.mark.asyncio
async def test_vector_store_adelete_nodes_by_id(
mock_solr_vector_store: ApacheSolrVectorStore,
input_nodes: Optional[list[str]],
input_filters: Optional[MetadataFilters],
delete_kwargs: dict[str, Any],
) -> None:
"""Test asynchronous delete_nodes method using delete_by_id."""
await mock_solr_vector_store.adelete_nodes(
input_nodes, input_filters, **delete_kwargs
)
mock_solr_vector_store.async_client.delete_by_id.assert_called_once_with(
input_nodes
)
# Parameters for adelete_nodes that use delete_by_query (with filters)
@pytest.mark.parametrize(
(
"input_nodes",
"input_filters",
"expected_query",
),
[
(
None,
MetadataFilters(
filters=[
MetadataFilter(
key="docid", value="doc1", operator=FilterOperator.EQ
)
]
),
"((docid:doc1))",
),
],
ids=[
"node_ids=None, filters=1 filter",
],
)
@params_delete_kwargs
@pytest.mark.asyncio
async def test_vector_store_adelete_nodes(
mock_solr_vector_store: ApacheSolrVectorStore,
input_nodes: Optional[list[str]],
input_filters: Optional[MetadataFilters],
expected_query: str,
delete_kwargs: dict[str, Any],
) -> None:
"""Test asynchronous delete_nodes method using delete_by_query."""
await mock_solr_vector_store.adelete_nodes(
input_nodes, input_filters, **delete_kwargs
)
mock_solr_vector_store.async_client.delete_by_query.assert_called_once_with(
expected_query
)
@pytest.mark.parametrize(
("input_nodes", "input_filters", "error_match"),
[
(None, None, "At least one of `node_ids` or `filters` must be passed"),
([], None, "At least one of `node_ids` or `filters` must be passed"),
(
None,
MetadataFilters(filters=[]),
"Neither `node_ids` nor non-empty `filters` were passed",
),
(
[],
MetadataFilters(filters=[]),
"Neither `node_ids` nor non-empty `filters` were passed",
),
],
ids=[
"node_ids=None, filters=None",
"node_ids=[], filters=None",
"node_ids=None, filters=empty filter",
"node_ids=[], filters=empty filter",
],
)
@params_delete_kwargs
def test_vector_store_delete_nodes_invalid_input(
mock_solr_vector_store: ApacheSolrVectorStore,
input_nodes: Optional[list[str]],
input_filters: Optional[MetadataFilters],
delete_kwargs: dict[str, Any],
error_match: str,
) -> None:
"""Test delete_nodes raises error for invalid input."""
with pytest.raises(ValueError, match=error_match):
mock_solr_vector_store.delete_nodes(input_nodes, input_filters, **delete_kwargs)
"""Cleanup Tests"""
def test_vector_store_clear(
mock_solr_vector_store: ApacheSolrVectorStore,
) -> None:
"""Test synchronous clear method."""
mock_solr_vector_store.clear()
mock_solr_vector_store.sync_client.clear_collection.assert_called_once()
@pytest.mark.asyncio
async def test_vector_store_aclear(
mock_solr_vector_store: ApacheSolrVectorStore,
) -> None:
"""Test asynchronous clear method."""
await mock_solr_vector_store.aclear()
mock_solr_vector_store.async_client.clear_collection.assert_called_once()
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-solr/tests/test_solr_vector_store.py",
"license": "MIT License",
"lines": 1023,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-solr/tests/test_solr_vector_store_query_utils.py | """Testing solr vector store utils."""
from typing import Union
import pytest
from llama_index.core.vector_stores.types import (
FilterCondition,
FilterOperator,
MetadataFilter,
MetadataFilters,
)
from llama_index.vector_stores.solr.query_utils import (
recursively_unpack_filters,
)
@pytest.mark.parametrize(
("input_filters", "expected_output"),
[
(MetadataFilters(filters=[]), []),
(
MetadataFilters(
filters=[MetadataFilter(key="f1", value=1, operator=FilterOperator.GT)],
),
["((f1:{1 TO *]))"],
),
(
MetadataFilters(
filters=[
MetadataFilter(key="f1", value=1, operator=FilterOperator.GT),
MetadataFilter(key="f2", value=2, operator=FilterOperator.GTE),
MetadataFilter(key="f3", value=3, operator=FilterOperator.LT),
MetadataFilter(key="f4", value=4, operator=FilterOperator.LTE),
],
),
["((f1:{1 TO *]) AND (f2:[2 TO *]) AND (f3:[* TO 3}) AND (f4:[* TO 4]))"],
),
(
MetadataFilters(
filters=[MetadataFilter(key="f1", value=1, operator=FilterOperator.GT)],
condition=FilterCondition.AND,
),
["((f1:{1 TO *]))"],
),
(
MetadataFilters(
filters=[
MetadataFilter(key="f5", value=5, operator=FilterOperator.EQ),
MetadataFilter(key="f6", value=6, operator=FilterOperator.NE),
MetadataFilter(key="f7", value="v1", operator=FilterOperator.IN),
MetadataFilter(
key="f8", value=["v1", "v2"], operator=FilterOperator.IN
),
],
condition=FilterCondition.AND,
),
["((f5:5) AND (-(f6:6)) AND (f7:v1) AND (f8:(v1 OR v2)))"],
),
(
MetadataFilters(
filters=[MetadataFilter(key="f1", value=1, operator=FilterOperator.GT)],
condition=FilterCondition.OR,
),
["((f1:{1 TO *]))"],
),
(
MetadataFilters(
filters=[
MetadataFilter(key="f9", value="v1", operator=FilterOperator.NIN),
MetadataFilter(
key="f10", value=["v1", "v2"], operator=FilterOperator.NIN
),
MetadataFilter(
key="f11", value="v3", operator=FilterOperator.TEXT_MATCH
),
],
condition=FilterCondition.OR,
),
['((-f9:v1) OR (-f10:(v1 OR v2)) OR (f11:"v3"))'],
),
(
MetadataFilters(
filters=[MetadataFilter(key="f1", value=1, operator=FilterOperator.GT)],
condition=FilterCondition.NOT,
),
["(NOT ((f1:{1 TO *])))"],
),
(
MetadataFilters(
filters=[
MetadataFilter(key="f1", value=1, operator=FilterOperator.GT),
MetadataFilter(key="f2", value=2, operator=FilterOperator.GTE),
MetadataFilter(key="f3", value=3, operator=FilterOperator.LT),
MetadataFilter(key="f4", value=4, operator=FilterOperator.LTE),
],
condition=FilterCondition.NOT,
),
[
"(NOT ((f1:{1 TO *]) AND (f2:[2 TO *]) AND (f3:[* TO 3}) AND (f4:[* TO 4])))"
],
),
(
MetadataFilters(
filters=[
MetadataFilters(
filters=[
MetadataFilter(
key="field1", operator=FilterOperator.GT, value=10
),
MetadataFilter(
key="field2", operator=FilterOperator.LT, value=20
),
MetadataFilter(
key="field2", operator=FilterOperator.NE, value=15
),
],
condition=FilterCondition.AND,
),
MetadataFilter(
key="field3", operator=FilterOperator.EQ, value="value3"
),
],
condition=FilterCondition.OR,
),
[
"(((field1:{10 TO *]) AND (field2:[* TO 20}) AND (-(field2:15))) OR (field3:value3))"
],
),
(
MetadataFilters(
filters=[MetadataFilter(key="f1", value=1, operator=FilterOperator.GT)],
condition=None,
),
["(f1:{1 TO *])"],
),
(
MetadataFilters(
filters=[
MetadataFilter(key="f1", value=1, operator=FilterOperator.GT),
MetadataFilter(key="f2", value=2, operator=FilterOperator.GTE),
MetadataFilter(key="f3", value=3, operator=FilterOperator.LT),
MetadataFilter(key="f4", value=4, operator=FilterOperator.LTE),
],
condition=None,
),
["(f1:{1 TO *])", "(f2:[2 TO *])", "(f3:[* TO 3})", "(f4:[* TO 4])"],
),
],
ids=[
"Empty subfilters list",
"Implicit AND of one filter",
"Implicit AND of multiple filters",
"Explicit AND of one filter",
"Explicit AND of multiple filters",
"OR of one filter",
"OR of multiple filters",
"NOT of one filter",
"NOT of multiple filters (implicit AND)",
"Nested MetadataFilters",
"Condition=None for one filter",
"Condition=None for multiple filters (multiple strings returned)",
],
)
def test_recursively_unpack_filters_valid_inputs(
input_filters: MetadataFilters,
expected_output: list[str],
) -> None:
actual_output = recursively_unpack_filters(input_filters)
assert actual_output == expected_output
@pytest.mark.parametrize(
("input_operator", "input_value", "error_match"),
[
# value type does not matter
(FilterOperator.CONTAINS, "some_string", "Disallowed operator used in filter"),
# value type matters
(
FilterOperator.TEXT_MATCH,
10,
"Query filter uses a non-string with the 'TEXT_MATCH'",
),
(
FilterOperator.TEXT_MATCH,
2.0,
"Query filter uses a non-string with the 'TEXT_MATCH'",
),
],
ids=[
"Unsupported operator: contains",
"text_match operator with int",
"text_match operator with float",
],
)
def test_recursively_unpack_filters_invalid_operators(
input_operator: FilterOperator,
input_value: str,
error_match: str,
) -> None:
input_filters = MetadataFilters(
filters=[
MetadataFilter(key="f1", value=1, operator=FilterOperator.GT),
MetadataFilter(key="f2", value=input_value, operator=input_operator),
],
condition=FilterCondition.AND,
)
with pytest.raises(ValueError, match=error_match):
_ = recursively_unpack_filters(input_filters)
@pytest.mark.parametrize(
"input_operator",
[
FilterOperator.GT,
FilterOperator.GTE,
FilterOperator.LT,
FilterOperator.LTE,
FilterOperator.EQ,
FilterOperator.NE,
FilterOperator.TEXT_MATCH,
],
)
@pytest.mark.parametrize(
"input_value",
[["string", "list"], [1, 2], [1.0, 2.0]],
ids=["string list", "int list", "float list"],
)
def test_recursively_unpack_filters_invalid_list_value_with_non_list_operator(
input_operator: FilterOperator,
input_value: Union[list[str], list[int], list[float]],
) -> None:
input_filters = MetadataFilters(
filters=[
MetadataFilter(key="f1", value=1, operator=FilterOperator.GT),
MetadataFilter(key="f2", value=input_value, operator=input_operator),
],
condition=FilterCondition.AND,
)
with pytest.raises(
ValueError, match="Query filter uses a list value for an incompatible operator"
):
_ = recursively_unpack_filters(input_filters)
@pytest.mark.parametrize(
("operator", "value", "expected_warning"),
[
(FilterOperator.ANY, "single_value", "treating as 'EQ' operator"),
(FilterOperator.ALL, "single_value", "treating as 'EQ' operator"),
(FilterOperator.IN, "single_value", "treating as 'EQ' operator"),
(FilterOperator.NIN, "single_value", "treating as 'NE' operator"),
],
ids=[
"ANY with non-list",
"ALL with non-list",
"IN with non-list",
"NIN with non-list",
],
)
def test_recursively_unpack_filters_warnings(
operator: FilterOperator, value: str, expected_warning: str, caplog
) -> None:
input_filters = MetadataFilters(
filters=[
MetadataFilter(key="f1", value=value, operator=operator),
],
condition=FilterCondition.AND,
)
with caplog.at_level("WARNING"):
result = recursively_unpack_filters(input_filters)
assert len(result) == 1
assert expected_warning in caplog.text
def test_recursively_unpack_filters_no_condition_warning(caplog) -> None:
input_filters = MetadataFilters(
filters=[
MetadataFilter(key="f1", value=1, operator=FilterOperator.GT),
MetadataFilter(key="f2", value=2, operator=FilterOperator.GT),
],
condition=None,
)
with caplog.at_level("WARNING"):
result = recursively_unpack_filters(input_filters)
assert len(result) == 2
assert (
"No filter condition specified, sub-filters will be returned unlinked"
in caplog.text
)
def test_any_and_in_list_equivalence() -> None:
"""ANY and IN with list values should compile to equivalent Solr queries."""
any_filters = MetadataFilters(
filters=[
MetadataFilter(key="tags", value=["v1", "v2"], operator=FilterOperator.ANY)
]
)
in_filters = MetadataFilters(
filters=[
MetadataFilter(key="tags", value=["v1", "v2"], operator=FilterOperator.IN)
]
)
any_output = recursively_unpack_filters(any_filters)
in_output = recursively_unpack_filters(in_filters)
assert any_output == in_output == ["((tags:(v1 OR v2)))"]
def test_all_list_and_semantics() -> None:
"""ALL with a list should AND the values."""
all_filters = MetadataFilters(
filters=[
MetadataFilter(key="tags", value=["v1", "v2"], operator=FilterOperator.ALL)
]
)
output = recursively_unpack_filters(all_filters)
assert output == ["((tags:(v1 AND v2)))"]
def test_all_any_in_fallbacks_warnings(caplog) -> None:
"""Non-list value for ALL/ANY/IN should fallback to EQ with warning."""
filters = MetadataFilters(
filters=[
MetadataFilter(key="f_all", value="x", operator=FilterOperator.ALL),
MetadataFilter(key="f_any", value="y", operator=FilterOperator.ANY),
MetadataFilter(key="f_in", value="z", operator=FilterOperator.IN),
],
condition=FilterCondition.AND,
)
with caplog.at_level("WARNING"):
out = recursively_unpack_filters(filters)
# Expect one combined AND group
assert len(out) == 1
assert "treating as 'EQ' operator" in caplog.text
# Basic shape check
assert "(f_all:x)" in out[0]
assert "(f_any:y)" in out[0]
assert "(f_in:z)" in out[0]
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-solr/tests/test_solr_vector_store_query_utils.py",
"license": "MIT License",
"lines": 320,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-solr/tests/test_sync_client.py | import time
from typing import Any, Optional
from unittest import mock
from unittest.mock import MagicMock, patch
import pysolr
import pytest
import requests
from pydantic import ValidationError
from llama_index.vector_stores.solr.client import (
SolrSelectResponse,
SolrUpdateResponse,
SyncSolrClient,
)
from tests.conftest import compare_documents, params_delete_by_id, params_search_queries
_MODULE_PATH = "llama_index.vector_stores.solr.client.sync"
@patch(f"{_MODULE_PATH}.pysolr.Solr.add")
def test_sync_solr_client_add_valid(
mock_pysolr_add: MagicMock,
mock_solr_raw_input_documents: list[dict[str, Any]],
mock_solr_updated_input_documents: list[dict[str, Any]],
mock_base_solr_url: str,
mock_pysolr_update_response: str,
) -> None:
# GIVEN
sync_client = SyncSolrClient(base_url=mock_base_solr_url)
mock_pysolr_add.return_value = mock_pysolr_update_response
# WHEN
sync_client.add(mock_solr_raw_input_documents)
# THEN
mock_pysolr_add.assert_called_once_with(mock_solr_updated_input_documents)
@patch(f"{_MODULE_PATH}.pysolr.Solr.add")
def test_sync_solr_client_add_pysolr_error(
mock_pysolr_add: MagicMock,
mock_solr_raw_input_documents: list[dict[str, Any]],
mock_base_solr_url: str,
) -> None:
# GIVEN
sync_client = SyncSolrClient(base_url=mock_base_solr_url)
mock_pysolr_add.side_effect = pysolr.SolrError
# WHEN / THEN
with pytest.raises(
ValueError, match=f"Error during Pysolr call, type={pysolr.SolrError}"
):
sync_client.add(mock_solr_raw_input_documents)
@patch(f"{_MODULE_PATH}.pysolr.Solr.add")
def test_sync_solr_client_add_validation_error(
mock_pysolr_add: MagicMock,
mock_solr_raw_input_documents: list[dict[str, Any]],
mock_base_solr_url: str,
) -> None:
# GIVEN
sync_client = SyncSolrClient(base_url=mock_base_solr_url)
mock_pysolr_add.return_value = '{"bad_response": "dict"}'
# WHEN / THEN
with pytest.raises(ValueError, match="Unexpected response format from Solr"):
sync_client.add(mock_solr_raw_input_documents)
@pytest.mark.uses_docker
def test_sync_solr_client_add_docker_solr(
function_unique_solr_collection_url: str,
mock_solr_raw_input_documents: list[dict[str, Any]],
mock_solr_expected_retrieved_documents: list[dict[str, Any]],
) -> None:
# GIVEN
client = SyncSolrClient(base_url=function_unique_solr_collection_url)
query = {"q": "*:*", "fl": ",".join(mock_solr_raw_input_documents[0])}
# WHEN
client.add(mock_solr_raw_input_documents)
time.sleep(5)
# THEN
results = client.search(query)
compare_documents(mock_solr_expected_retrieved_documents, results.response.docs)
@patch(f"{_MODULE_PATH}.pysolr.Solr.delete")
def test_sync_solr_client_delete_by_query_valid(
mock_pysolr_delete: MagicMock,
mock_base_solr_url: str,
mock_solr_delete_response_xml: str,
mock_solr_delete_response: SolrUpdateResponse,
) -> None:
# GIVEN
sync_client = SyncSolrClient(base_url=mock_base_solr_url)
input_query_string = "id:doc1"
mock_pysolr_delete.return_value = mock_solr_delete_response_xml
# WHEN
actual_response = sync_client.delete_by_query(input_query_string)
# THEN
mock_pysolr_delete.assert_called_once_with(q=input_query_string, id=None)
assert actual_response == mock_solr_delete_response
@patch(f"{_MODULE_PATH}.pysolr.Solr.delete")
def test_sync_solr_client_delete_by_query_pysolr_error(
mock_pysolr_delete: MagicMock,
mock_base_solr_url: str,
) -> None:
# GIVEN
sync_client = SyncSolrClient(base_url=mock_base_solr_url)
mock_pysolr_delete.side_effect = pysolr.SolrError
# WHEN / THEN
with pytest.raises(
ValueError, match=f"Error during Pysolr call, type={pysolr.SolrError}"
):
sync_client.delete_by_query("id:doc1")
@patch(f"{_MODULE_PATH}.pysolr.Solr.delete")
def test_sync_solr_client_delete_by_query_validation_error(
mock_pysolr_delete: MagicMock,
mock_base_solr_url: str,
) -> None:
# GIVEN
sync_client = SyncSolrClient(base_url=mock_base_solr_url)
mock_pysolr_delete.return_value = '{"bad_response": "dict"}'
# WHEN / THEN
with pytest.raises(ValueError):
sync_client.delete_by_query("id:doc1")
@pytest.mark.uses_docker
def test_sync_solr_client_delete_by_query_docker_solr(
function_unique_solr_collection_url: str,
mock_solr_raw_input_documents: list[dict[str, Any]],
) -> None:
# GIVEN
sync_client = SyncSolrClient(base_url=function_unique_solr_collection_url)
delete_query = "int_i:1"
search_query = {"q": delete_query, "fl": "id,text_txt_en,score"}
# WHEN
# add, and ensure the docs are present
sync_client.add(mock_solr_raw_input_documents)
time.sleep(5)
res_after_add = sync_client.search(search_query)
assert len(res_after_add.response.docs) == 1
# delete once we're sure they're there
sync_client.delete_by_query(delete_query)
time.sleep(5)
# THEN
res_after_del = sync_client.search(search_query)
assert len(res_after_del.response.docs) == 0
@pytest.mark.parametrize(
"input_ids", [["doc1"], ["doc1", "doc2"]], ids=["len(ids)==1", "len(ids)>1"]
)
@patch(f"{_MODULE_PATH}.pysolr.Solr.delete")
def test_sync_solr_client_delete_by_id_valid(
mock_pysolr_delete: MagicMock,
mock_base_solr_url: str,
input_ids: list[str],
mock_solr_delete_response_xml: str,
mock_solr_delete_response: SolrUpdateResponse,
) -> None:
# GIVEN
sync_client = SyncSolrClient(base_url=mock_base_solr_url)
mock_pysolr_delete.return_value = mock_solr_delete_response_xml
# WHEN
actual_response = sync_client.delete_by_id(input_ids)
# THEN
mock_pysolr_delete.assert_called_once_with(id=input_ids, q=None)
assert actual_response == mock_solr_delete_response
@patch(f"{_MODULE_PATH}.pysolr.Solr.delete")
def test_sync_solr_client_delete_by_id_pysolr_error(
mock_pysolr_delete: MagicMock,
mock_base_solr_url: str,
) -> None:
# GIVEN
sync_client = SyncSolrClient(base_url=mock_base_solr_url)
mock_pysolr_delete.side_effect = pysolr.SolrError
# WHEN / THEN
with pytest.raises(
ValueError, match=f"Error during Pysolr call, type={pysolr.SolrError}"
):
sync_client.delete_by_id(["doc1", "doc2"])
@patch(f"{_MODULE_PATH}.pysolr.Solr.delete")
def test_sync_solr_client_delete_by_id_validation_error(
mock_pysolr_delete: MagicMock,
mock_base_solr_url: str,
) -> None:
# GIVEN
sync_client = SyncSolrClient(base_url=mock_base_solr_url)
mock_pysolr_delete.return_value = '{"bad_response": "dict"}'
# WHEN / THEN
with pytest.raises(ValueError):
sync_client.delete_by_id(["doc1", "doc2"])
@params_delete_by_id
@pytest.mark.uses_docker
def test_sync_solr_client_delete_by_id_docker_solr(
function_unique_solr_collection_url: str,
mock_solr_raw_input_documents: list[dict[str, Any]],
ids_to_delete: list[str],
expected_remaining_ids: list[str],
) -> None:
# GIVEN
sync_client = SyncSolrClient(base_url=function_unique_solr_collection_url)
search_query = {"q": "*:*", "fl": "id"}
# WHEN
# add, and ensure the docs are present
sync_client.add(mock_solr_raw_input_documents)
time.sleep(5)
res_after_add = sync_client.search(search_query)
assert len(res_after_add.response.docs) == len(mock_solr_raw_input_documents)
# delete once we're sure they're there
actual_response = sync_client.delete_by_id(ids_to_delete)
time.sleep(5)
# THEN
assert actual_response.response_header.status == 0
res_after_del = sync_client.search(search_query)
retrieved_ids = sorted([doc["id"] for doc in res_after_del.response.docs])
assert retrieved_ids == expected_remaining_ids
@patch(f"{_MODULE_PATH}.pysolr.Solr.delete")
def test_sync_solr_client_clear_collection_valid(
mock_pysolr_delete: MagicMock,
mock_base_solr_url: str,
mock_solr_delete_response_xml: str,
mock_solr_delete_response: SolrUpdateResponse,
) -> None:
# GIVEN
sync_client = SyncSolrClient(base_url=mock_base_solr_url)
mock_pysolr_delete.return_value = mock_solr_delete_response_xml
# WHEN
actual_response = sync_client.clear_collection()
# THEN
mock_pysolr_delete.assert_called_once_with(id=None, q="*:*")
assert actual_response == mock_solr_delete_response
@patch(f"{_MODULE_PATH}.pysolr.Solr.delete")
def test_sync_solr_client_clear_collection_pysolr_error(
mock_pysolr_delete: MagicMock,
mock_base_solr_url: str,
) -> None:
# GIVEN
sync_client = SyncSolrClient(base_url=mock_base_solr_url)
mock_pysolr_delete.side_effect = pysolr.SolrError
# WHEN / THEN
with pytest.raises(
ValueError, match=f"Error during Pysolr call, type={pysolr.SolrError}"
):
sync_client.clear_collection()
@patch(f"{_MODULE_PATH}.pysolr.Solr.delete")
def test_sync_solr_client_clear_collection_validation_error(
mock_pysolr_delete: MagicMock,
mock_base_solr_url: str,
) -> None:
# GIVEN
sync_client = SyncSolrClient(base_url=mock_base_solr_url)
mock_pysolr_delete.return_value = '{"bad_response": "dict"}'
# WHEN / THEN
with pytest.raises(ValueError):
sync_client.clear_collection()
@pytest.mark.uses_docker
def test_sync_solr_client_clear_collection_docker_solr(
function_unique_solr_collection_url: str,
mock_solr_raw_input_documents: list[dict[str, Any]],
) -> None:
# GIVEN
sync_client = SyncSolrClient(base_url=function_unique_solr_collection_url)
search_query = {"q": "*:*", "fl": "id,text_txt_en,score"}
# WHEN
# add, and ensure the docs are present
sync_client.add(mock_solr_raw_input_documents)
time.sleep(5)
res_after_add = sync_client.search(search_query)
assert len(res_after_add.response.docs) == len(mock_solr_raw_input_documents)
# delete once we're sure they're there
sync_client.clear_collection()
time.sleep(5)
# THEN
res_after_del = sync_client.search(search_query)
assert len(res_after_del.response.docs) == 0
@patch(f"{_MODULE_PATH}.pysolr.Solr.search")
def test_sync_solr_client_search_valid(
mock_pysolr_search: MagicMock,
mock_pysolr_search_results: pysolr.Results,
mock_solr_select_response: SolrSelectResponse,
mock_base_solr_url: str,
) -> None:
# GIVEN
sync_client = SyncSolrClient(base_url=mock_base_solr_url)
mock_pysolr_search.return_value = mock_pysolr_search_results
# WHEN
actual_response = sync_client.search({"q": "president", "fl": "*,score"})
# THEN
mock_pysolr_search.assert_called_once_with(q="president", fl="*,score")
assert actual_response == mock_solr_select_response
@patch(f"{_MODULE_PATH}.pysolr.Solr.search")
def test_sync_solr_client_search_pysolr_error(
mock_pysolr_search: MagicMock,
mock_base_solr_url: str,
) -> None:
# GIVEN
sync_client = SyncSolrClient(base_url=mock_base_solr_url)
mock_pysolr_search.side_effect = pysolr.SolrError
# WHEN / THEN
with pytest.raises(
ValueError, match=f"Error during Pysolr call, type={pysolr.SolrError}"
):
sync_client.search({"q": "president", "fl": "*,score"})
@patch(f"{_MODULE_PATH}.pysolr.Solr.search")
def test_sync_solr_client_search_validation_error(
mock_pysolr_search: MagicMock,
mock_base_solr_url: str,
) -> None:
# GIVEN
sync_client = SyncSolrClient(base_url=mock_base_solr_url)
mock_pysolr_search.side_effect = ValidationError("fake", [])
# WHEN / THEN
with pytest.raises(ValueError, match="Unexpected response format from Solr"):
sync_client.search({"q": "president", "fl": "*,score"})
@params_search_queries
@pytest.mark.uses_docker
def test_sync_solr_client_search_docker_solr(
function_unique_solr_collection_url: str,
mock_solr_raw_input_documents: list[dict[str, Any]],
mock_solr_expected_retrieved_documents: list[dict[str, Any]],
input_query: dict[str, Any],
expected_doc_indexes: list[int],
requires_score: bool,
) -> None:
# GIVEN
sync_client = SyncSolrClient(base_url=function_unique_solr_collection_url)
expected_docs = [
doc
for i, doc in enumerate(mock_solr_expected_retrieved_documents)
if i in expected_doc_indexes
]
if requires_score:
for doc in expected_docs:
doc["score"] = mock.ANY
# WHEN
sync_client.add(mock_solr_raw_input_documents)
time.sleep(5)
actual_results = sync_client.search(input_query)
# THEN
compare_documents(expected_docs, actual_results.response.docs)
def test_sync_solr_client_str_output(
mock_base_solr_url: str,
) -> None:
# GIVEN
sync_client = SyncSolrClient(base_url=mock_base_solr_url)
# WHEN / THEN
assert str(sync_client) == f"SyncSolrClient(base_url='{mock_base_solr_url}')"
@pytest.mark.parametrize(
"input_url",
[
"http://localhost:80/solr/my-collection",
"http://0.0.0.0:80/solr/my-collection",
"https://some.solr.host.com/api/solr/my-collection",
],
ids=["localhost URL", "0.0.0.0 URL", "External URL"],
)
@pytest.mark.parametrize(
("input_headers", "expected_headers"),
[
(None, {}),
({}, {}),
(
{"Content-Type": "application/json"},
{"Content-Type": "application/json"},
),
],
ids=["null value", "empty dict", "valid header dict"],
)
@pytest.mark.parametrize(
"client_kwargs",
[{}, {"search_handler": "search2"}],
ids=["empty dict", "valid extra kwargs"],
)
@patch(f"{_MODULE_PATH}.pysolr.Solr", autospec=True)
def test_sync_solr_client_build_client(
mock_pysolr_solr_init: MagicMock,
input_url: str,
input_headers: Optional[dict[str, str]],
expected_headers: dict[str, str],
client_kwargs: dict[str, str],
) -> None:
# GIVEN
mock_pysolr_solr_instance = mock_pysolr_solr_init.return_value
mock_session = MagicMock(spec=requests.Session, headers={})
mock_pysolr_solr_instance.get_session.return_value = mock_session
expected_args = {"url": input_url, "timeout": 10, **client_kwargs}
# WHEN
client = SyncSolrClient(
base_url=input_url,
request_timeout_sec=10,
headers=input_headers,
**client_kwargs,
)
# ensure the inner client gets built
_ = client._build_client()
# THEN
mock_pysolr_solr_init.assert_called_once_with(**expected_args)
assert mock_session.headers == expected_headers
@pytest.mark.parametrize(
("input_url", "input_timeout"),
[("https://some.solr.host", -1), (" ", 10), ("", 10), ("", -1)],
ids=[
"Negative timeout value",
"Non-empty whitespace URL",
"Empty URL",
"Empty URL + negative timeout",
],
)
def test_sync_solr_client_build_client_invalid_params(
input_url: str, input_timeout: int
) -> None:
# WHEN / THEN
with pytest.raises(ValueError):
_ = SyncSolrClient(base_url=input_url, request_timeout_sec=input_timeout)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-solr/tests/test_sync_client.py",
"license": "MIT License",
"lines": 398,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-solr/tests/test_types.py | import pytest
from llama_index.vector_stores.solr.types import BoostedTextField, SolrQueryDict
@pytest.mark.parametrize(
("field", "boost", "expected"),
[
("title", 1.0, "title"),
("body", 2.5, "body^2.5"),
("summary", 0.8, "summary^0.8"),
("content", 0.0, "content^0.0"),
("abstract", 10.0, "abstract^10.0"),
],
ids=["No boost", "Boost > 1", "Boost < 1", "Zero boost", "Large boost"],
)
def test_boosted_text_field_get_query_str(
field: str, boost: float, expected: str
) -> None:
field = BoostedTextField(field=field, boost_factor=boost)
result = field.get_query_str()
assert result == expected
def test_solr_query_dict_typing():
# ensure we can construct a dict conforming to the TypedDict
query: SolrQueryDict = { # type: ignore[assignment]
"q": "*:*",
"fq": [],
}
# optional fields
query["fl"] = "*"
query["rows"] = "10"
assert query["q"] == "*:*"
assert query["fq"] == []
assert query["fl"] == "*"
assert query["rows"] == "10"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-solr/tests/test_types.py",
"license": "MIT License",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-dev/tests/release/test_changelog.py | import json
from datetime import date
from pathlib import Path
from unittest import mock
import pytest
from click.testing import CliRunner
from llama_dev.cli import cli
from llama_dev.release.changelog import (
CHANGELOG_PLACEHOLDER,
_extract_pr_data,
_get_changelog_text,
_get_latest_tag,
_get_pr_numbers,
_run_command,
_update_changelog_file,
)
@mock.patch("llama_dev.release.changelog._run_command")
@mock.patch("llama_dev.release.changelog._get_latest_tag")
@mock.patch("llama_dev.release.changelog._get_pr_numbers")
@mock.patch("llama_dev.release.changelog._extract_pr_data")
@mock.patch("llama_dev.release.changelog._get_changelog_text")
@mock.patch("llama_dev.release.changelog._update_changelog_file")
def test_command_nothing_changed(
mock_update_changelog_file,
mock_get_changelog_text,
mock_extract_pr_data,
mock_get_pr_numbers,
mock_get_latest_tag,
mock_run_command,
data_path,
):
mock_get_pr_numbers.return_value = []
runner = CliRunner()
result = runner.invoke(
cli,
["--repo-root", data_path, "release", "changelog"],
)
assert result.exit_code == 1
assert "No pull requests found since the last tag" in result.stdout
@mock.patch("llama_dev.release.changelog._run_command")
@mock.patch("llama_dev.release.changelog._get_latest_tag")
@mock.patch("llama_dev.release.changelog._get_pr_numbers")
@mock.patch("llama_dev.release.changelog._extract_pr_data")
@mock.patch("llama_dev.release.changelog._get_changelog_text")
@mock.patch("llama_dev.release.changelog._update_changelog_file")
def test_command_pr_fetch_failed(
mock_update_changelog_file,
mock_get_changelog_text,
mock_extract_pr_data,
mock_get_pr_numbers,
mock_get_latest_tag,
mock_run_command,
data_path,
):
mock_get_pr_numbers.return_value = ["42"]
mock_extract_pr_data.side_effect = FileNotFoundError()
runner = CliRunner()
result = runner.invoke(
cli,
["--repo-root", data_path, "release", "changelog"],
)
assert result.exit_code == 0
assert "Could not fetch details for PR #42" in result.stdout
@mock.patch("llama_dev.release.changelog._run_command")
@mock.patch("llama_dev.release.changelog._get_latest_tag")
@mock.patch("llama_dev.release.changelog._get_pr_numbers")
@mock.patch("llama_dev.release.changelog._extract_pr_data")
@mock.patch("llama_dev.release.changelog._get_changelog_text")
@mock.patch("llama_dev.release.changelog._update_changelog_file")
def test_command_dry_run(
mock_update_changelog_file,
mock_get_changelog_text,
mock_extract_pr_data,
mock_get_pr_numbers,
mock_get_latest_tag,
mock_run_command,
data_path,
):
mock_get_changelog_text.return_value = "Fake Changelog Here"
runner = CliRunner()
result = runner.invoke(
cli,
["--repo-root", data_path, "release", "changelog", "--dry-run"],
)
assert result.exit_code == 0
mock_update_changelog_file.assert_not_called()
assert "Fake Changelog Here" in result.stdout
@mock.patch("llama_dev.release.changelog._run_command")
@mock.patch("llama_dev.release.changelog._get_latest_tag")
@mock.patch("llama_dev.release.changelog._get_pr_numbers")
@mock.patch("llama_dev.release.changelog._extract_pr_data")
@mock.patch("llama_dev.release.changelog._get_changelog_text")
@mock.patch("llama_dev.release.changelog._update_changelog_file")
def test_command_success(
mock_update_changelog_file,
mock_get_changelog_text,
mock_extract_pr_data,
mock_get_pr_numbers,
mock_get_latest_tag,
mock_run_command,
data_path,
):
mock_get_pr_numbers.return_value = ["42"]
mock_extract_pr_data.return_value = ({"foo": {}}, {"foo": "v1.2.3"})
runner = CliRunner()
result = runner.invoke(
cli,
["--repo-root", data_path, "release", "changelog"],
)
assert result.exit_code == 0
mock_update_changelog_file.assert_called_once()
def test_run_command_success():
with mock.patch("subprocess.run") as mock_run:
mock_result = mock.MagicMock()
mock_result.returncode = 0
mock_result.stdout = "Success"
mock_run.return_value = mock_result
assert _run_command("echo 'Success'") == "Success"
def test_run_command_failure():
with mock.patch("subprocess.run") as mock_run:
mock_result = mock.MagicMock()
mock_result.returncode = 1
mock_result.stderr = "Error"
mock_run.return_value = mock_result
with pytest.raises(RuntimeError):
_run_command("false")
@mock.patch("llama_dev.release.changelog._run_command")
def test_get_latest_tag(mock_run_command):
mock_run_command.return_value = "v1.2.3"
assert _get_latest_tag() == "v1.2.3"
mock_run_command.assert_called_once_with(
'git describe --tags --match "v[0-9]*" --abbrev=0'
)
@mock.patch("llama_dev.release.changelog._run_command")
def test_get_pr_numbers(mock_run_command):
log_output = """
commit 123 (HEAD -> main)
feat: new feature (#123)
commit 456
fix: a bug (#456)
commit 789
docs: update readme
"""
mock_run_command.return_value = log_output
pr_numbers = _get_pr_numbers("v1.2.3")
assert pr_numbers == {"123", "456"}
mock_run_command.assert_called_once_with(
'git log v1.2.3..HEAD --pretty="format:%H %s"'
)
@mock.patch("llama_dev.release.changelog._run_command")
@mock.patch("llama_dev.release.changelog.load_pyproject")
@mock.patch("llama_dev.release.changelog.get_changed_packages")
def test_extract_pr_data(
mock_get_changed_packages, mock_load_pyproject, mock_run_command
):
repo_root = Path("/path/to/repo")
all_packages = [repo_root / "pkg1", repo_root / "pkg2"]
pr_number = "123"
pr_json = {
"number": 123,
"title": "Test PR",
"url": "https://github.com/test/repo/pull/123",
"files": [{"path": "pkg1/file.py"}],
}
mock_run_command.return_value = json.dumps(pr_json)
mock_get_changed_packages.return_value = [repo_root / "pkg1"]
mock_load_pyproject.return_value = {"project": {"version": "0.1.0"}}
package_prs, package_versions = _extract_pr_data(repo_root, all_packages, pr_number)
assert "pkg1" in package_prs
assert package_prs["pkg1"][0]["number"] == 123
assert "pkg1" in package_versions
assert package_versions["pkg1"] == "0.1.0"
mock_run_command.assert_called_once_with(
f"gh pr view {pr_number} --json number,title,url,files"
)
mock_get_changed_packages.assert_called_once()
mock_load_pyproject.assert_called_once_with(repo_root / "pkg1")
def test_get_changelog_text():
package_prs = {
"pkg1": [
{
"number": 123,
"title": "Feat: New feature",
"url": "https://github.com/test/repo/pull/123",
}
],
"pkg2": [
{
"number": 456,
"title": "Fix: A bug",
"url": "https://github.com/test/repo/pull/456",
}
],
}
package_versions = {"pkg1": "0.1.0", "pkg2": "0.2.0"}
today = date.today().strftime("%Y-%m-%d")
expected_text = f"""{CHANGELOG_PLACEHOLDER}
## [{today}]
### pkg1 [0.1.0]
- Feat: New feature ([#123](https://github.com/test/repo/pull/123))
### pkg2 [0.2.0]
- Fix: A bug ([#456](https://github.com/test/repo/pull/456))"""
changelog_text = _get_changelog_text(package_prs, package_versions)
assert changelog_text == expected_text
def test_update_changelog_file():
repo_root = Path("/path/to/repo")
changelog_text = "New changelog content"
initial_content = (
f"Some initial content\n{CHANGELOG_PLACEHOLDER}\nSome other content"
)
expected_content = f"Some initial content\n{changelog_text}\nSome other content"
m = mock.mock_open(read_data=initial_content)
with mock.patch("builtins.open", m):
_update_changelog_file(repo_root, changelog_text)
m.assert_called_once_with(repo_root / "CHANGELOG.md", "r+")
handle = m()
handle.read.assert_called_once()
handle.seek.assert_called_once_with(0)
handle.truncate.assert_called_once()
handle.write.assert_called_once_with(expected_content)
def test_run_command_no_shell_and_args_list():
with mock.patch("subprocess.run") as mock_run:
mock_result = mock.MagicMock()
mock_result.returncode = 0
mock_result.stdout = "ok"
mock_run.return_value = mock_result
_run_command("echo ok")
args, kwargs = mock_run.call_args
assert isinstance(args[0], list)
assert not kwargs.get("shell", False)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-dev/tests/release/test_changelog.py",
"license": "MIT License",
"lines": 230,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-dev/llama_dev/release/changelog.py | import json
import re
import shlex
import subprocess
from datetime import date
from pathlib import Path
import click
from llama_dev.utils import find_all_packages, get_changed_packages, load_pyproject
CHANGELOG_PLACEHOLDER = "<!--- generated changelog --->"
def _run_command(command: str) -> str:
"""Helper to run a shell command and return the output."""
args = shlex.split(command)
result = subprocess.run(args, capture_output=True, text=True)
if result.returncode != 0:
raise RuntimeError(f"Command failed: {command}\n{result.stderr}")
return result.stdout.strip()
def _get_latest_tag() -> str:
"""Get the most recent tag with the form v1.2.3"""
return _run_command('git describe --tags --match "v[0-9]*" --abbrev=0')
def _get_pr_numbers(latest_tag: str) -> set[str]:
"""Get the list of PR numbers merged between `latest_tag` and HEAD"""
log_output = _run_command(f'git log {latest_tag}..HEAD --pretty="format:%H %s"')
pr_numbers = set()
pr_pattern = re.compile(r"\(#(\d+)\)")
for line in log_output.splitlines():
match = pr_pattern.search(line)
if match:
pr_numbers.add(match.group(1))
return pr_numbers
def _extract_pr_data(
repo_root: Path, all_packages: list[Path], pr_number: str
) -> tuple[dict, dict]:
"""For each PR, extract the package name, version and PR data"""
package_prs = {}
package_versions = {}
pr_json_str = _run_command(f"gh pr view {pr_number} --json number,title,url,files")
pr_data = json.loads(pr_json_str)
files = [repo_root / f["path"] for f in pr_data.get("files", [])]
changed_packages = get_changed_packages(files, all_packages)
for pkg in changed_packages:
pkg_name = pkg.name
if pkg_name not in package_prs:
package_prs[pkg_name] = []
package_data = load_pyproject(pkg)
ver = package_data["project"]["version"]
package_versions[pkg_name] = ver
package_prs[pkg_name].append(pr_data)
return package_prs, package_versions
def _get_changelog_text(package_prs: dict, package_versions: dict) -> str:
"""Return the changelog text, sorted by package name."""
changelog_text = (
f"{CHANGELOG_PLACEHOLDER}\n\n## [{date.today().strftime('%Y-%m-%d')}]"
)
sorted_pkgs = sorted(package_prs.keys())
for pkg in sorted_pkgs:
changelog_text += f"\n\n### {pkg} [{package_versions[pkg]}]"
prs = sorted(package_prs[pkg], key=lambda p: p["number"])
for pr in prs:
changelog_text += f"\n- {pr['title']} ([#{pr['number']}]({pr['url']}))"
return changelog_text
def _update_changelog_file(repo_root: Path, changelog_text: str) -> None:
"""Update the content of the monorepo changelog file."""
with open(repo_root / "CHANGELOG.md", "r+") as f:
content = f.read()
f.seek(0)
f.truncate()
f.write(content.replace(CHANGELOG_PLACEHOLDER, changelog_text))
@click.command(short_help="Generate the changelog from the previous release tag")
@click.option(
"--dry-run",
is_flag=True,
help="Show the changelog text without altering the CHANGELOG.md file",
)
@click.pass_obj
def changelog(obj: dict, dry_run: bool) -> None:
"""
Generate the changelog in markdown syntax.
\b
This command will:
- get the list of GitHub PRs that happened since the last release tag
- create a bullet list in Markdown syntax using the PR titles
- group the changes per package, depending on the path that changed
""" # noqa
console = obj["console"]
repo_root = obj["repo_root"]
all_packages = find_all_packages(repo_root)
latest_tag = _get_latest_tag()
console.print(f"Generating changelog since tag '{latest_tag}'...")
# Get commits since the last tag and extract PR numbers
pr_numbers = _get_pr_numbers(latest_tag)
if not pr_numbers:
raise click.ClickException("No pull requests found since the last tag.")
package_prs = {}
package_versions = {}
with click.progressbar(sorted(pr_numbers), label="Fetching PR details") as bar:
for pr_number in bar:
try:
prs, versions = _extract_pr_data(repo_root, all_packages, pr_number)
# Merge PR lists for each package instead of overwriting
for pkg_name, pr_list in prs.items():
if pkg_name not in package_prs:
package_prs[pkg_name] = []
package_prs[pkg_name].extend(pr_list)
package_versions |= versions
except Exception as e:
console.print(
f"Warning: Could not fetch details for PR #{pr_number}. {e}",
style="error",
)
# Generate the markdown output
changelog_text = _get_changelog_text(package_prs, package_versions)
if dry_run:
console.print(changelog_text)
else:
_update_changelog_file(repo_root, changelog_text)
console.print("CHANGELOG.md file updated.")
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-dev/llama_dev/release/changelog.py",
"license": "MIT License",
"lines": 116,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-web/tests/test_firecrawl_requests.py | import os
import pytest
from typing import Optional
from llama_index.readers.web.firecrawl_web.base import FireCrawlWebReader
# Set PRINT_RESULTS = True to print documents, otherwise tests will use asserts.
PRINT_RESULTS = False
@pytest.fixture(scope="session", autouse=True)
def _print_firecrawl_version() -> None:
try:
import firecrawl # type: ignore
version = getattr(firecrawl, "__version__", None)
print(f"firecrawl version: {version}")
except Exception as exc:
print(f"firecrawl import failed: {exc}")
def _require_api_key() -> str:
api_key = os.getenv("FIRECRAWL_API_KEY", "").strip()
if not api_key:
pytest.skip("FIRECRAWL_API_KEY not set")
return api_key
def _api_url() -> Optional[str]:
return os.getenv("FIRECRAWL_API_URL") or os.getenv("FIRECRAWL_BASE_URL")
TEST_URL = os.getenv("FIRECRAWL_TEST_URL", "https://example.pt")
TEST_QUERY = os.getenv("FIRECRAWL_TEST_QUERY", "LlamaIndex")
TEST_PROMPT = os.getenv("FIRECRAWL_TEST_PROMPT", "Extract the title as 'title'")
def test_scrape_prints_documents() -> None:
reader = FireCrawlWebReader(
api_key=_require_api_key(),
api_url=_api_url(),
mode="scrape",
params={"formats": ["markdown"]},
)
for doc in reader.load_data(url=TEST_URL):
if PRINT_RESULTS:
print(f"[SCRAPE] document: {doc}")
else:
assert doc.text is not None
assert doc.metadata is not None
def test_crawl_prints_documents() -> None:
reader = FireCrawlWebReader(
api_key=_require_api_key(),
api_url=_api_url(),
mode="crawl",
params={"limit": 3},
)
for doc in reader.load_data(url=TEST_URL):
if PRINT_RESULTS:
print(f"[CRAWL] document: {doc}")
else:
assert doc.text is not None
assert doc.metadata is not None
def test_map_prints_documents() -> None:
reader = FireCrawlWebReader(
api_key=_require_api_key(),
api_url=_api_url(),
mode="map",
params={"limit": 10},
)
for doc in reader.load_data(url=TEST_URL):
if PRINT_RESULTS:
print(f"[MAP] document: {doc}")
else:
assert doc.text is not None
assert doc.metadata is not None
def test_search_prints_documents() -> None:
reader = FireCrawlWebReader(
api_key=_require_api_key(),
api_url=_api_url(),
mode="search",
)
for doc in reader.load_data(query=TEST_QUERY):
if PRINT_RESULTS:
print(f"[SEARCH] document: {doc}")
else:
assert doc.text is not None
assert doc.metadata is not None
def test_extract_prints_documents() -> None:
reader = FireCrawlWebReader(
api_key=_require_api_key(),
api_url=_api_url(),
mode="extract",
params={"prompt": TEST_PROMPT},
)
for doc in reader.load_data(urls=[TEST_URL]):
if PRINT_RESULTS:
print(f"[EXTRACT] document: {doc}")
else:
assert doc.text is not None
assert doc.metadata is not None
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-web/tests/test_firecrawl_requests.py",
"license": "MIT License",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-web/tests/test_firecrawl_web_reader.py | import sys
import types
import pytest # type: ignore
from llama_index.readers.web.firecrawl_web.base import FireCrawlWebReader
def _install_fake_firecrawl(FirecrawlClass) -> None:
mod = types.ModuleType("firecrawl")
mod.Firecrawl = FirecrawlClass
mod.__version__ = "test"
sys.modules["firecrawl"] = mod
class _Link:
def __init__(self, url: str, title: str = "", description: str = "") -> None:
self.url = url
self.title = title
self.description = description
class _MapResponse:
def __init__(self, links):
self.links = links
def test_class_name_returns_expected():
class Firecrawl:
def __init__(self, *args, **kwargs) -> None:
pass
_install_fake_firecrawl(Firecrawl)
assert FireCrawlWebReader.class_name() == "Firecrawl_reader"
def test_init_uses_api_key_and_url():
class Firecrawl:
def __init__(self, api_key: str, api_url: str = None) -> None: # type: ignore[assignment]
self.api_key = api_key
self.api_url = api_url
_install_fake_firecrawl(Firecrawl)
reader = FireCrawlWebReader(
api_key="KEY123", api_url="https://api.example", mode="scrape"
)
assert reader.firecrawl.api_key == "KEY123"
assert reader.firecrawl.api_url == "https://api.example"
def test_scrape_mode_with_dict_response_includes_text_and_metadata():
scrape_called = {}
class Firecrawl:
def __init__(self, *_, **__):
pass
def scrape(self, url: str, **kwargs):
scrape_called["url"] = url
scrape_called["kwargs"] = kwargs
return {
"success": True,
"warning": None,
"data": {
"markdown": "Hello MD",
"metadata": {"a": 1},
"links": ["x"],
},
}
_install_fake_firecrawl(Firecrawl)
reader = FireCrawlWebReader(
api_key="k", mode="scrape", params={"formats": ["markdown"]}
)
docs = reader.load_data(url="https://site")
assert len(docs) == 1
assert docs[0].text == "Hello MD"
assert docs[0].metadata.get("a") == 1
assert docs[0].metadata.get("success") is True
assert scrape_called["url"] == "https://site"
# Allow additional kwargs (e.g., integration flag) but ensure formats are passed through
assert scrape_called["kwargs"].get("formats") == ["markdown"]
assert scrape_called["kwargs"].get("integration") == "llamaindex"
def test_scrape_mode_with_object_response_includes_text_and_metadata():
class Meta:
def __init__(self):
self.lang = "en"
def model_dump(self):
return {"lang": self.lang}
class ScrapeObj:
def __init__(self):
self.markdown = "Obj MD"
self.metadata = Meta()
self.links = ["y"]
self.warning = None
class Firecrawl:
def __init__(self, *_, **__):
pass
def scrape(self, url: str, **kwargs):
return ScrapeObj()
_install_fake_firecrawl(Firecrawl)
reader = FireCrawlWebReader(api_key="k", mode="scrape")
docs = reader.load_data(url="https://site")
assert len(docs) == 1
assert docs[0].text == "Obj MD"
assert docs[0].metadata.get("lang") == "en"
def test_crawl_mode_strips_maxDepth_and_maps_docs():
last_kwargs = {}
class Firecrawl:
def __init__(self, *_, **__):
pass
def crawl(self, url: str, **kwargs):
last_kwargs.update(kwargs)
return {
"data": [
{"markdown": "A", "metadata": {"u": url}},
{"content": "B", "metadata": {"n": 2}},
]
}
_install_fake_firecrawl(Firecrawl)
reader = FireCrawlWebReader(
api_key="k", mode="crawl", params={"maxDepth": 2, "limit": 1}
)
docs = reader.load_data(url="https://site/x")
assert [d.text for d in docs] == ["A", "B"]
assert "maxDepth" not in last_kwargs
assert last_kwargs.get("limit") == 1
def test_map_mode_success_yields_link_documents():
class Firecrawl:
def __init__(self, *_, **__):
pass
def map(self, url: str, **kwargs): # type: ignore[override]
return _MapResponse(
[
_Link(url="https://a", title="T1", description="D1"),
_Link(url="https://b", title="", description="D2"),
]
)
_install_fake_firecrawl(Firecrawl)
reader = FireCrawlWebReader(api_key="k", mode="map", params={"limit": 2})
docs = reader.load_data(url="https://root")
assert len(docs) == 2
assert docs[0].metadata["source"] == "map"
assert docs[0].metadata["url"] == "https://a"
# text falls back to title/description/url
assert docs[1].text == "D2"
def test_map_mode_error_returns_single_error_document():
class Firecrawl:
def __init__(self, *_, **__):
pass
def map(self, url: str, **kwargs): # type: ignore[override]
return {"success": False, "error": "rate limit"}
_install_fake_firecrawl(Firecrawl)
reader = FireCrawlWebReader(api_key="k", mode="map")
docs = reader.load_data(url="https://root")
assert len(docs) == 1
assert "rate limit" in docs[0].text
assert docs[0].metadata["error"] == "rate limit"
def test_search_mode_with_dict_success_and_markdown_fallbacks():
passed_kwargs = {}
class Firecrawl:
def __init__(self, *_, **__):
pass
def search(self, query: str, **kwargs):
passed_kwargs.update(kwargs)
return {
"success": True,
"data": [
{"title": "A", "url": "u1", "markdown": "M1", "metadata": {"x": 1}},
{"title": "B", "url": "u2", "description": "D2"},
],
}
_install_fake_firecrawl(Firecrawl)
reader = FireCrawlWebReader(
api_key="k", mode="search", params={"query": "dup", "region": "us"}
)
docs = reader.load_data(query="q")
assert [d.text for d in docs] == ["M1", "D2"]
# ensure reader removed duplicate 'query' from params before call
assert "query" not in passed_kwargs
assert passed_kwargs.get("region") == "us"
def test_search_mode_with_dict_failure_returns_error_document():
class Firecrawl:
def __init__(self, *_, **__):
pass
def search(self, query: str, **kwargs):
return {"success": False, "warning": "bad query"}
_install_fake_firecrawl(Firecrawl)
reader = FireCrawlWebReader(api_key="k", mode="search")
docs = reader.load_data(query="q")
assert len(docs) == 1
assert "unsuccessful" in docs[0].text
assert docs[0].metadata["error"] == "bad query"
def test_search_mode_with_sdk_object_lists():
class Item:
def __init__(self, url: str, title: str, description: str) -> None:
self.url = url
self.title = title
self.description = description
self.rank = 7
class SearchResp:
def __init__(self):
self.web = [Item("https://a", "T1", "D1")]
self.news = []
self.images = [Item("https://img", "", "image desc")]
class Firecrawl:
def __init__(self, *_, **__):
pass
def search(self, query: str, **kwargs):
return SearchResp()
_install_fake_firecrawl(Firecrawl)
reader = FireCrawlWebReader(api_key="k", mode="search")
docs = reader.load_data(query="q")
assert len(docs) == 2
types = {d.metadata.get("search_type") for d in docs}
assert types == {"web", "images"}
assert any(d.metadata.get("rank") == 7 for d in docs)
def test_extract_mode_success_with_sources_and_status():
class Firecrawl:
def __init__(self, *_, **__):
pass
def extract(self, *, urls, **payload):
# Accept additional fields such as integration while verifying prompt
assert payload.get("prompt") == "Do it"
assert urls == ["https://a", "https://b"]
return {
"success": True,
"status": "ok",
"expiresAt": "2030-01-01",
"data": {"k1": "v1", "k2": 2},
"sources": {"https://a": {"score": 1.0}},
}
_install_fake_firecrawl(Firecrawl)
reader = FireCrawlWebReader(api_key="k", mode="extract", params={"prompt": "Do it"})
docs = reader.load_data(urls=["https://a", "https://b"])
assert len(docs) == 1
assert "k1: v1" in docs[0].text
assert docs[0].metadata["status"] == "ok"
assert "sources" in docs[0].metadata
def test_extract_mode_success_no_data_yields_notice():
class Firecrawl:
def __init__(self, *_, **__):
pass
def extract(self, *, urls, **payload):
return {"success": True, "data": {}}
_install_fake_firecrawl(Firecrawl)
reader = FireCrawlWebReader(api_key="k", mode="extract", params={"prompt": "x"})
docs = reader.load_data(urls=["https://x"])
assert len(docs) == 1
assert "no data" in docs[0].text.lower()
def test_extract_mode_failure_returns_error_document():
class Firecrawl:
def __init__(self, *_, **__):
pass
def extract(self, *, urls, **payload):
return {"success": False, "warning": "no quota"}
_install_fake_firecrawl(Firecrawl)
reader = FireCrawlWebReader(api_key="k", mode="extract", params={"prompt": "x"})
docs = reader.load_data(urls=["https://x"])
assert len(docs) == 1
assert docs[0].metadata["error"] == "no quota"
def test_invalid_mode_raises_value_error():
class Firecrawl:
def __init__(self, *_, **__):
pass
_install_fake_firecrawl(Firecrawl)
reader = FireCrawlWebReader(api_key="k", mode="invalid")
with pytest.raises(ValueError):
reader.load_data(url="https://x")
def test_argument_validation_requires_exactly_one_of_url_query_urls():
class Firecrawl:
def __init__(self, *_, **__):
pass
_install_fake_firecrawl(Firecrawl)
reader = FireCrawlWebReader(api_key="k", mode="scrape")
with pytest.raises(ValueError):
reader.load_data() # none
with pytest.raises(ValueError):
reader.load_data(url="u", query="q") # two
with pytest.raises(ValueError):
reader.load_data(url="u", urls=["u"]) # two
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-web/tests/test_firecrawl_web_reader.py",
"license": "MIT License",
"lines": 262,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-valyu/examples/retriever_example.py | """
Example demonstrating the ValyuRetriever for URL content extraction.
This example shows how to use the ValyuRetriever to extract content from URLs
and integrate it with LlamaIndex retrieval pipelines.
"""
import os
from llama_index.tools.valyu import ValyuRetriever
from llama_index.core import QueryBundle
def main():
"""Demonstrate ValyuRetriever usage."""
# Initialize the Valyu retriever
valyu_retriever = ValyuRetriever(
api_key=os.environ.get("VALYU_API_KEY", "your-api-key-here"),
verbose=True,
# Configure contents extraction (user-controlled settings)
contents_summary=True, # Enable AI summarization
contents_extract_effort="normal", # Extraction thoroughness
contents_response_length="medium", # Content length per URL
# Note: contents_max_price is set by developer/user, not exposed to models
)
# Example 1: Single URL retrieval
print("=== Single URL Retrieval ===")
query_bundle = QueryBundle(
query_str="https://en.wikipedia.org/wiki/Transformer_(machine_learning_model)"
)
try:
nodes = valyu_retriever.retrieve(query_bundle)
print(f"Retrieved {len(nodes)} documents:")
for i, node in enumerate(nodes):
print(f"\nDocument {i+1}:")
print(f"URL: {node.node.metadata.get('url', 'N/A')}")
print(f"Title: {node.node.metadata.get('title', 'N/A')}")
print(f"Content length: {len(node.node.text)} characters")
print(f"Score: {node.score}")
# Show content preview
preview = (
node.node.text[:200] + "..."
if len(node.node.text) > 200
else node.node.text
)
print(f"Content preview: {preview}")
except Exception as e:
print(f"Error: {e}")
print("Note: This example requires a valid VALYU_API_KEY environment variable")
# Example 2: Multiple URLs
print("\n=== Multiple URLs Retrieval ===")
multi_url_query = QueryBundle(
query_str="https://arxiv.org/abs/1706.03762 https://en.wikipedia.org/wiki/Attention_(machine_learning)"
)
try:
nodes = valyu_retriever.retrieve(multi_url_query)
print(f"Retrieved {len(nodes)} documents from multiple URLs")
for i, node in enumerate(nodes):
print(
f"Document {i+1}: {node.node.metadata.get('title', 'Unknown')} - {len(node.node.text)} chars"
)
except Exception as e:
print(f"Error: {e}")
# Example 3: Natural language query with URLs
print("\n=== Natural Language Query with URLs ===")
natural_query = QueryBundle(
query_str="Please extract content from these research papers: https://arxiv.org/abs/1706.03762 and also from https://en.wikipedia.org/wiki/Large_language_model"
)
try:
nodes = valyu_retriever.retrieve(natural_query)
print(
f"Extracted content from {len(nodes)} URLs found in natural language query"
)
except Exception as e:
print(f"Error: {e}")
def demonstrate_url_parsing():
"""Demonstrate URL parsing capabilities."""
print("\n=== URL Parsing Examples ===")
retriever = ValyuRetriever(
api_key="test-key"
) # API key not needed for parsing demo
test_cases = [
"https://example.com",
"https://site1.com, https://site2.com",
"Please extract content from https://news.com and https://blog.com",
"Check out these links: https://paper1.org https://paper2.org",
"No URLs in this text",
]
for i, test_case in enumerate(test_cases, 1):
urls = retriever._parse_urls_from_query(test_case)
print(f"Test {i}: '{test_case}'")
print(f" Extracted URLs: {urls}")
if __name__ == "__main__":
main()
demonstrate_url_parsing()
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-valyu/examples/retriever_example.py",
"license": "MIT License",
"lines": 90,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-valyu/llama_index/tools/valyu/retriever.py | """Valyu retriever implementation."""
from typing import List, Optional, Union, Dict, Any
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.schema import NodeWithScore, TextNode
from llama_index.core.callbacks import CallbackManager
class ValyuRetriever(BaseRetriever):
"""Valyu retriever for extracting content from URLs."""
def __init__(
self,
api_key: str,
verbose: bool = False,
# Contents API parameters
contents_summary: Optional[Union[bool, str, Dict[str, Any]]] = None,
contents_extract_effort: Optional[str] = "normal",
contents_response_length: Optional[Union[str, int]] = "short",
callback_manager: Optional[CallbackManager] = None,
) -> None:
"""
Initialize Valyu retriever.
Args:
api_key (str): Valyu API key
verbose (bool): Enable verbose logging. Defaults to False
contents_summary (Optional[Union[bool, str, Dict[str, Any]]]): AI summary configuration:
- False/None: No AI processing (raw content)
- True: Basic automatic summarization
- str: Custom instructions (max 500 chars)
- dict: JSON schema for structured extraction
contents_extract_effort (Optional[str]): Extraction thoroughness:
- "normal": Fast extraction (default)
- "high": More thorough but slower
- "auto": Automatically determine extraction effort but slowest
contents_response_length (Optional[Union[str, int]]): Content length per URL:
- "short": 25,000 characters (default)
- "medium": 50,000 characters
- "large": 100,000 characters
- "max": No limit
- int: Custom character limit
callback_manager (Optional[CallbackManager]): Callback manager for tracking operations
"""
from valyu import Valyu
# Validate parameters
if not api_key or not isinstance(api_key, str) or not api_key.strip():
raise ValueError("api_key must be a non-empty string")
if not isinstance(verbose, bool):
raise ValueError("verbose must be a boolean")
# Validate contents_summary
if contents_summary is not None:
if isinstance(contents_summary, str):
if len(contents_summary) > 500:
raise ValueError(
f"contents_summary string must be 500 characters or less. "
f"Current length: {len(contents_summary)} characters."
)
elif not isinstance(contents_summary, (bool, dict)):
raise ValueError(
"contents_summary must be a boolean, string, dict, or None"
)
# Validate contents_extract_effort
valid_extract_efforts = ["normal", "high", "auto"]
if (
contents_extract_effort is not None
and contents_extract_effort not in valid_extract_efforts
):
raise ValueError(
f"contents_extract_effort must be one of {valid_extract_efforts}"
)
# Validate contents_response_length
if contents_response_length is not None:
valid_preset_lengths = ["short", "medium", "large", "max"]
if isinstance(contents_response_length, str):
if contents_response_length not in valid_preset_lengths:
raise ValueError(
f"contents_response_length string must be one of {valid_preset_lengths}"
)
elif isinstance(contents_response_length, int):
if contents_response_length < 1:
raise ValueError(
"contents_response_length must be a positive integer when using custom length"
)
else:
raise ValueError(
"contents_response_length must be a string preset, positive integer, or None"
)
self.client = Valyu(api_key=api_key)
self._verbose = verbose
self._contents_summary = contents_summary
self._contents_extract_effort = contents_extract_effort
self._contents_response_length = contents_response_length
super().__init__(callback_manager=callback_manager)
def _retrieve(self, query_bundle) -> List[NodeWithScore]:
"""
Retrieve content from URLs.
The query_bundle.query_str should contain URLs (space or comma separated).
This method extracts content from those URLs and returns them as scored nodes.
Args:
query_bundle: Query bundle containing URLs to extract content from
Returns:
List[NodeWithScore]: List of nodes with extracted content and relevance scores
"""
# Parse URLs from query string
urls = self._parse_urls_from_query(query_bundle.query_str)
if not urls:
return []
# Get content using Valyu API
response = self.client.contents(
urls=urls,
summary=self._contents_summary,
extract_effort=self._contents_extract_effort,
response_length=self._contents_response_length,
)
if self._verbose:
print(f"[Valyu Retriever] Contents Response: {response}")
nodes = []
if response and response.results:
for result in response.results:
metadata = {
"url": result.url,
"title": result.title,
"source": result.source,
"length": result.length,
"data_type": result.data_type,
"citation": result.citation,
}
# Add summary info if available
if hasattr(result, "summary") and result.summary:
metadata["summary"] = result.summary
if (
hasattr(result, "summary_success")
and result.summary_success is not None
):
metadata["summary_success"] = result.summary_success
# Add image URL if available
if hasattr(result, "image_url") and result.image_url:
metadata["image_url"] = result.image_url
# Create text node
node = TextNode(
text=str(result.content),
metadata=metadata,
)
# Add as scored node (all retrieved content gets score of 1.0)
nodes.append(NodeWithScore(node=node, score=1.0))
return nodes
def _parse_urls_from_query(self, query_str: str) -> List[str]:
"""
Parse URLs from query string.
Args:
query_str: String containing URLs (space or comma separated)
Returns:
List[str]: List of valid URLs
"""
# Split by common separators
import re
# Split by whitespace or commas
potential_urls = re.split(r"[,\s]+", query_str.strip())
# Filter for valid URLs
urls = []
for url in potential_urls:
url = url.strip()
if url and url.startswith(("http://", "https://")):
urls.append(url)
return urls[:10] # Limit to 10 URLs as per API constraint
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-valyu/llama_index/tools/valyu/retriever.py",
"license": "MIT License",
"lines": 161,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/llama_index/vector_stores/azure_postgres/base.py | """VectorStore integration for Azure Database for PostgreSQL using LlamaIndex."""
import sys
from typing import Any
import numpy as np
from pgvector.psycopg import register_vector # type: ignore[import-untyped]
from psycopg import sql
from psycopg.rows import dict_row
from psycopg.types.json import Jsonb
from llama_index.core.schema import BaseNode, MetadataMode
from llama_index.core.vector_stores.types import (
BasePydanticVectorStore,
FilterOperator,
MetadataFilter,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
from llama_index.core.vector_stores.utils import (
metadata_dict_to_node,
node_to_metadata_dict,
)
from .common import (
Algorithm,
AzurePGConnectionPool,
BaseAzurePGVectorStore,
)
if sys.version_info < (3, 12):
from typing_extensions import override
else:
from typing import override
def metadata_filters_to_sql(filters: MetadataFilters | None) -> sql.SQL:
"""Convert LlamaIndex MetadataFilters to a SQL WHERE clause.
Args:
filters: Optional MetadataFilters object.
Returns:
sql.SQL: SQL WHERE clause representing the filters.
"""
if not filters or not filters.filters:
return sql.SQL("TRUE")
def _filter_to_sql(filter_item: MetadataFilter | MetadataFilters) -> sql.SQL:
"""Recursively convert MetadataFilter or MetadataFilters to SQL."""
if isinstance(filter_item, MetadataFilters):
# Handle nested MetadataFilters
if not filter_item.filters:
return sql.SQL("TRUE")
filter_sqls = [_filter_to_sql(f) for f in filter_item.filters]
if filter_item.condition.lower() == "and":
return sql.SQL("({})").format(sql.SQL(" AND ").join(filter_sqls))
elif filter_item.condition.lower() == "or":
return sql.SQL("({})").format(sql.SQL(" OR ").join(filter_sqls))
else: # NOT
if len(filter_sqls) == 1:
return sql.SQL("NOT ({})").format(filter_sqls[0])
else:
# For multiple filters with NOT, apply NOT to the AND of all filters
return sql.SQL("NOT ({})").format(
sql.SQL(" AND ").join(filter_sqls)
)
elif isinstance(filter_item, MetadataFilter):
# Handle individual MetadataFilter
key = filter_item.key
value = filter_item.value
operator = filter_item.operator
# Use JSONB path for metadata column
column_ref = sql.SQL("metadata ->> {}").format(sql.Literal(key))
if operator == FilterOperator.EQ:
return sql.SQL("{} = {}").format(column_ref, sql.Literal(str(value)))
elif operator == FilterOperator.NE:
return sql.SQL("{} != {}").format(column_ref, sql.Literal(str(value)))
elif operator == FilterOperator.GT:
return sql.SQL("({}) > {}").format(column_ref, sql.Literal(value))
elif operator == FilterOperator.LT:
return sql.SQL("({}) < {}").format(column_ref, sql.Literal(value))
elif operator == FilterOperator.GTE:
return sql.SQL("({}) >= {}").format(column_ref, sql.Literal(value))
elif operator == FilterOperator.LTE:
return sql.SQL("({}) <= {}").format(column_ref, sql.Literal(value))
elif operator == FilterOperator.IN:
if isinstance(value, list):
values = sql.SQL(", ").join([sql.Literal(str(v)) for v in value])
return sql.SQL("{} IN ({})").format(column_ref, values)
else:
return sql.SQL("{} = {}").format(
column_ref, sql.Literal(str(value))
)
elif operator == FilterOperator.NIN:
if isinstance(value, list):
values = sql.SQL(", ").join([sql.Literal(str(v)) for v in value])
return sql.SQL("{} NOT IN ({})").format(column_ref, values)
else:
return sql.SQL("{} != {}").format(
column_ref, sql.Literal(str(value))
)
elif operator == FilterOperator.CONTAINS:
# For JSONB array contains
return sql.SQL("metadata -> {} ? {}").format(
sql.Literal(key), sql.Literal(str(value))
)
elif operator == FilterOperator.TEXT_MATCH:
return sql.SQL("{} LIKE {}").format(
column_ref, sql.Literal(f"%{value}%")
)
elif operator == FilterOperator.TEXT_MATCH_INSENSITIVE:
return sql.SQL("{} ILIKE {}").format(
column_ref, sql.Literal(f"%{value}%")
)
elif operator == FilterOperator.IS_EMPTY:
return sql.SQL("({} IS NULL OR {} = '')").format(column_ref, column_ref)
else:
# Default to equality for unsupported operators
return sql.SQL("{} = {}").format(column_ref, sql.Literal(str(value)))
return sql.SQL("TRUE")
filter_sqls = [_filter_to_sql(f) for f in filters.filters]
if filters.condition.lower() == "and":
return sql.SQL(" AND ").join(filter_sqls)
elif filters.condition.lower() == "or":
return sql.SQL(" OR ").join(filter_sqls)
else: # NOT
if len(filter_sqls) == 1:
return sql.SQL("NOT ({})").format(filter_sqls[0])
else:
return sql.SQL("NOT ({})").format(sql.SQL(" AND ").join(filter_sqls))
class AzurePGVectorStore(BasePydanticVectorStore, BaseAzurePGVectorStore):
"""Azure PostgreSQL vector store for LlamaIndex."""
stores_text: bool = True
metadata_columns: str | None = "metadata"
@classmethod
def class_name(cls) -> str:
"""Return the class name for this vector store."""
return "AzurePGVectorStore"
@property
def client(self) -> None:
"""Return the client property (not used for AzurePGVectorStore)."""
return
@override
@classmethod
def from_params(
cls,
connection_pool: AzurePGConnectionPool,
schema_name: str = "public",
table_name: str = "llamaindex_vectors",
embed_dim: int = 1536,
embedding_index: Algorithm | None = None,
) -> "AzurePGVectorStore":
"""Create an AzurePGVectorStore from connection and configuration parameters."""
return cls(
connection_pool=connection_pool,
schema_name=schema_name,
table_name=table_name,
embed_dim=embed_dim,
embedding_index=embedding_index,
)
def _table_row_to_node(self, row: dict[str, Any]) -> BaseNode:
"""Convert a table row dictionary to a BaseNode object."""
metadata = row.get("metadata")
if metadata is None:
raise ValueError("Metadata not found in row data.")
node = metadata_dict_to_node(metadata, text=row.get("content"))
# Convert UUID to string if needed
node_id = row.get("id")
if node_id is not None:
node.node_id = str(node_id)
embedding = row.get("embedding")
if isinstance(embedding, str):
embedding = row.get("embedding").strip("[]").split(",")
node.embedding = list(map(float, embedding))
elif embedding is not None:
node.embedding = embedding
else:
raise ValueError("Missing embedding value")
return node
def _get_insert_sql_dict(
self, node: BaseNode, on_conflict_update: bool
) -> tuple[sql.SQL, dict[str, Any]]:
"""Get the SQL command and dictionary for inserting a node."""
if on_conflict_update:
update = sql.SQL(
"""
UPDATE SET
{content_col} = EXCLUDED.{content_col},
{embedding_col} = EXCLUDED.{embedding_col},
{metadata_col} = EXCLUDED.{metadata_col}
"""
).format(
id_col=sql.Identifier(self.id_column),
content_col=sql.Identifier(self.content_column),
embedding_col=sql.Identifier(self.embedding_column),
metadata_col=sql.Identifier(self.metadata_columns),
)
else:
update = sql.SQL("nothing")
insert_sql = sql.SQL(
"""
INSERT INTO {schema}.{table} ({id_col}, {content_col}, {embedding_col}, {metadata_col})
VALUES (%(id)s, %(content)s, %(embedding)s, %(metadata)s)
ON CONFLICT ({id_col}) DO {update}
"""
).format(
schema=sql.Identifier(self.schema_name),
table=sql.Identifier(self.table_name),
id_col=sql.Identifier(self.id_column),
content_col=sql.Identifier(self.content_column),
embedding_col=sql.Identifier(self.embedding_column),
metadata_col=sql.Identifier(self.metadata_columns),
update=update,
)
return (
insert_sql,
{
"id": node.node_id,
"content": node.get_content(metadata_mode=MetadataMode.NONE),
"embedding": np.array(node.get_embedding(), dtype=np.float32),
"metadata": Jsonb(node_to_metadata_dict(node)),
},
)
@override
def add(self, nodes: list[BaseNode], **add_kwargs: Any) -> list[str]:
"""Add a list of BaseNode objects to the vector store.
Args:
nodes: List of BaseNode objects to add.
**add_kwargs: Additional keyword arguments.
Returns:
List of node IDs added.
"""
ids = []
on_conflict_update = bool(add_kwargs.pop("on_conflict_update", None))
with self.connection_pool.connection() as conn:
register_vector(conn)
with conn.cursor(row_factory=dict_row) as cursor:
for node in nodes:
ids.append(node.node_id)
insert_sql, insert_dict = self._get_insert_sql_dict(
node, on_conflict_update
)
cursor.execute(insert_sql, insert_dict)
return ids
@override
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""Perform a similarity search using the provided query.
Args:
query: VectorStoreQuery object containing the query embedding and parameters.
**kwargs: Additional keyword arguments.
Returns:
VectorStoreQueryResult containing the search results.
"""
results = self._similarity_search_by_vector_with_distance(
embedding=query.query_embedding,
k=query.similarity_top_k,
filter_expression=metadata_filters_to_sql(query.filters),
**kwargs,
)
if query.mode == VectorStoreQueryMode.HYBRID:
text_results = self._full_text_search(
query_str=query.query_str,
**kwargs,
)
results = self._dedup_results(results + text_results)
nodes = []
similarities = []
ids = []
for row in results:
node = metadata_dict_to_node(row[0]["metadata"], text=row[0]["content"])
nodes.append(node)
similarities.append(row[1])
ids.append(row[0]["id"])
return VectorStoreQueryResult(
nodes=nodes,
similarities=similarities,
ids=ids,
)
@override
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete a node from the vector store by reference document ID.
Args:
ref_doc_id: The reference document ID to delete.
**delete_kwargs: Additional keyword arguments.
"""
with self.connection_pool.connection() as conn:
register_vector(conn)
with conn.cursor(row_factory=dict_row) as cursor:
delete_sql = sql.SQL(
"DELETE FROM {table} WHERE metadata ->> 'doc_id' = %s"
).format(table=sql.Identifier(self.schema_name, self.table_name))
cursor.execute(delete_sql, (ref_doc_id,))
@override
def delete_nodes(
self,
node_ids: list[str] | None = None,
filters: MetadataFilters | None = None,
**delete_kwargs: Any,
) -> None:
"""Delete nodes from the vector store by node IDs or filters.
Args:
node_ids: Optional list of node IDs to delete.
filters: Optional MetadataFilters to filter nodes for deletion.
**delete_kwargs: Additional keyword arguments.
"""
if not node_ids:
return
self._delete_rows_from_table(
ids=node_ids, filters=metadata_filters_to_sql(filters), **delete_kwargs
)
@override
def clear(self) -> None:
"""Clear all data from the vector store table."""
with self.connection_pool.connection() as conn:
register_vector(conn)
with conn.cursor(row_factory=dict_row) as cursor:
stmt = sql.SQL("TRUNCATE TABLE {schema}.{table}").format(
schema=sql.Identifier(self.schema_name),
table=sql.Identifier(self.table_name),
)
cursor.execute(stmt)
conn.commit()
@override
def get_nodes(
self,
node_ids: list[str] | None = None,
filters: MetadataFilters | None = None,
**kwargs: Any,
) -> list[BaseNode]:
"""Retrieve nodes by IDs or filters.
Args:
node_ids: Optional list of node IDs to retrieve.
filters: Optional MetadataFilters to filter nodes.
**kwargs: Additional keyword arguments.
Returns:
List of BaseNode objects matching the criteria.
"""
# TODO: Implement filter handling
documents = self._get_by_ids(node_ids)
nodes = []
for doc in documents:
node = self._table_row_to_node(doc)
nodes.append(node)
return nodes
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/llama_index/vector_stores/azure_postgres/base.py",
"license": "MIT License",
"lines": 337,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/llama_index/vector_stores/azure_postgres/common/_base.py | """Base VectorStore integration for Azure Database for PostgreSQL."""
import logging
import re
import sys
from collections.abc import Sequence
from typing import Any
import numpy as np
from pgvector.psycopg import register_vector # type: ignore[import-untyped]
from psycopg import sql
from psycopg.rows import dict_row
from pydantic import BaseModel, ConfigDict, PositiveInt, model_validator
from ._connection import AzurePGConnectionPool
from ._shared import (
HNSW,
Algorithm,
DiskANN,
IVFFlat,
VectorOpClass,
VectorType,
)
if sys.version_info < (3, 11):
from typing_extensions import Self
else:
from typing import Self
_logger = logging.getLogger(__name__)
class BaseAzurePGVectorStore(BaseModel):
"""Base Pydantic model for an Azure PostgreSQL-backed vector store.
This class encapsulates configuration (connection pool, table/column
names, embedding type/dimension, index configuration and metadata
column) and performs runtime verification that the target table
exists with expected columns and index configuration. If the table
does not exist, ``verify_and_init_store`` will create it.
"""
connection_pool: AzurePGConnectionPool
schema_name: str = "public"
table_name: str = "vector_store"
id_column: str = "id"
content_column: str = "content"
embedding_column: str = "embedding"
embedding_type: VectorType | None = None
embedding_dimension: PositiveInt | None = None
embedding_index: Algorithm | None = None
metadata_column: str | None = "metadata"
model_config = ConfigDict(
arbitrary_types_allowed=True, # Allow arbitrary types like Embeddings and AzurePGConnectionPool
)
@model_validator(mode="after")
def verify_and_init_store(self) -> Self:
"""Validate the store configuration and initialize DB schema and index.
This validator runs after Pydantic model initialization. It queries
the database to detect an existing table and its columns/indexes,
performs type and dimension checks for the embedding column, and
sets inferred properties (like embedding_type and embedding_dimension)
when they are not explicitly provided. If the table does not exist,
it will create the table with sensible defaults.
Returns:
Self: The same model instance, possibly updated with inferred values.
"""
# verify that metadata_column is not empty if provided
if self.metadata_column is not None and len(self.metadata_column) == 0:
raise ValueError("'metadata_column' cannot be empty if provided.")
_logger.debug(
"checking if table '%s.%s' exists with the required columns",
self.schema_name,
self.table_name,
)
with (
self.connection_pool.connection() as conn,
conn.cursor(row_factory=dict_row) as cursor,
):
cursor.execute(
sql.SQL(
"""
select a.attname as column_name,
format_type(a.atttypid, a.atttypmod) as column_type
from pg_attribute a
join pg_class c on a.attrelid = c.oid
join pg_namespace n on c.relnamespace = n.oid
where a.attnum > 0
and not a.attisdropped
and n.nspname = %(schema_name)s
and c.relname = %(table_name)s
order by a.attnum asc
"""
),
{"schema_name": self.schema_name, "table_name": self.table_name},
)
resultset = cursor.fetchall()
existing_columns: dict[str, str] = {
row["column_name"]: row["column_type"] for row in resultset
}
# if table exists, verify that required columns exist and have correct types
if len(existing_columns) > 0:
_logger.debug(
"table '%s.%s' exists with the following column mapping: %s",
self.schema_name,
self.table_name,
existing_columns,
)
id_column_type = existing_columns.get(self.id_column)
if id_column_type != "uuid":
raise ValueError(
f"Table '{self.schema_name}.{self.table_name}' must have a column '{self.id_column}' of type 'uuid'."
)
content_column_type = existing_columns.get(self.content_column)
if content_column_type is None or (
content_column_type != "text"
and not content_column_type.startswith("varchar")
):
raise ValueError(
f"Table '{self.schema_name}.{self.table_name}' must have a column '{self.content_column}' of type 'text' or 'varchar'."
)
embedding_column_type = existing_columns.get(self.embedding_column)
pattern = re.compile(r"(?P<type>\w+)(?:\((?P<dim>\d+)\))?")
m = pattern.match(embedding_column_type if embedding_column_type else "")
parsed_type: str | None = m.group("type") if m else None
parsed_dim: PositiveInt | None = (
PositiveInt(m.group("dim")) if m and m.group("dim") else None
)
vector_types = [t.value for t in VectorType.__members__.values()]
if parsed_type not in vector_types:
raise ValueError(
f"Column '{self.embedding_column}' in table '{self.schema_name}.{self.table_name}' must be one of the following types: {vector_types}."
)
elif (
self.embedding_type is not None
and parsed_type != self.embedding_type.value
):
raise ValueError(
f"Column '{self.embedding_column}' in table '{self.schema_name}.{self.table_name}' has type '{parsed_type}', but the specified embedding_type is '{self.embedding_type.value}'. They must match."
)
elif self.embedding_type is None:
_logger.info(
"embedding_type is not specified, but the column '%s' in table '%s.%s' has type '%s'. Overriding embedding_type accordingly.",
self.embedding_column,
self.schema_name,
self.table_name,
parsed_type,
)
self.embedding_type = VectorType(parsed_type)
if parsed_dim is not None and self.embedding_dimension is None:
_logger.info(
"embedding_dimension is not specified, but the column '%s' in table '%s.%s' has a dimension of %d. Overriding embedding_dimension accordingly.",
self.embedding_column,
self.schema_name,
self.table_name,
parsed_dim,
)
self.embedding_dimension = parsed_dim
elif (
parsed_dim is not None
and self.embedding_dimension is not None
and parsed_dim != self.embedding_dimension
):
raise ValueError(
f"Column '{self.embedding_column}' in table '{self.schema_name}.{self.table_name}' has a dimension of {parsed_dim}, but the specified embedding_dimension is {self.embedding_dimension}. They must match."
)
if self.metadata_column is not None:
existing_type = existing_columns.get(self.metadata_column)
if existing_type is None:
raise ValueError(
f"Column '{self.metadata_column}' does not exist in table '{self.schema_name}.{self.table_name}'."
)
with (
self.connection_pool.connection() as conn,
conn.cursor(row_factory=dict_row) as cursor,
):
_logger.debug(
"checking if table '%s.%s' has a vector index on column '%s'",
self.schema_name,
self.table_name,
self.embedding_column,
)
cursor.execute(
sql.SQL(
"""
with cte as (
select n.nspname as schema_name,
ct.relname as table_name,
ci.relname as index_name,
a.amname as index_type,
pg_get_indexdef(
ci.oid, -- index OID
generate_series(1, array_length(ii.indkey, 1)), -- column no
true -- pretty print
) as index_column,
o.opcname as index_opclass,
ci.reloptions as index_opts
from pg_class ci
join pg_index ii on ii.indexrelid = ci.oid
join pg_am a on a.oid = ci.relam
join pg_class ct on ct.oid = ii.indrelid
join pg_namespace n on n.oid = ci.relnamespace
join pg_opclass o on o.oid = any(ii.indclass)
where ci.relkind = 'i'
and ct.relkind = 'r'
and ii.indisvalid
and ii.indisready
) select schema_name, table_name, index_name, index_type,
index_column, index_opclass, index_opts
from cte
where schema_name = %(schema_name)s
and table_name = %(table_name)s
and index_column like %(embedding_column)s
and (
index_opclass like '%%vector%%'
or index_opclass like '%%halfvec%%'
or index_opclass like '%%sparsevec%%'
or index_opclass like '%%bit%%'
)
order by schema_name, table_name, index_name
"""
),
{
"schema_name": self.schema_name,
"table_name": self.table_name,
"embedding_column": f"%{self.embedding_column}%",
},
)
resultset = cursor.fetchall()
if len(resultset) > 0:
_logger.debug(
"table '%s.%s' has %d vector index(es): %s",
self.schema_name,
self.table_name,
len(resultset),
resultset,
)
if self.embedding_index is None:
_logger.info(
"embedding_index is not specified, using the first found index: %s",
resultset[0],
)
index_type = resultset[0]["index_type"]
index_opclass = VectorOpClass(resultset[0]["index_opclass"])
index_opts = {
opts.split("=")[0]: opts.split("=")[1]
for opts in resultset[0]["index_opts"]
}
index = (
DiskANN(op_class=index_opclass, **index_opts)
if index_type == "diskann"
else (
HNSW(op_class=index_opclass, **index_opts)
if index_type == "hnsw"
else IVFFlat(op_class=index_opclass, **index_opts)
)
)
self.embedding_index = index
else:
_logger.info(
"embedding_index is specified as '%s'; will try to find a matching index.",
self.embedding_index,
)
print(resultset)
index_opclass = self.embedding_index.op_class.value # type: ignore[assignment]
if isinstance(self.embedding_index, DiskANN):
index_type = "diskann"
elif isinstance(self.embedding_index, HNSW):
index_type = "hnsw"
else:
index_type = "ivfflat"
for row in resultset:
if (
row["index_type"] == index_type
and row["index_opclass"] == index_opclass
):
_logger.info(
"found a matching index: %s. overriding embedding_index.",
row,
)
index_opts = {
opts.split("=")[0]: opts.split("=")[1]
for opts in row["index_opts"]
}
index = (
DiskANN(op_class=index_opclass, **index_opts)
if index_type == "diskann"
else (
HNSW(op_class=index_opclass, **index_opts)
if index_type == "hnsw"
else IVFFlat(op_class=index_opclass, **index_opts)
)
)
self.embedding_index = index
break
elif self.embedding_index is None:
_logger.info(
"embedding_index is not specified, and no vector index found in table '%s.%s'. defaulting to 'DiskANN' with 'vector_cosine_ops' opclass.",
self.schema_name,
self.table_name,
)
self.embedding_index = DiskANN(op_class=VectorOpClass.vector_cosine_ops)
# if table does not exist, create it
else:
_logger.debug(
"table '%s.%s' does not exist, creating it with the required columns",
self.schema_name,
self.table_name,
)
if self.embedding_type is None:
_logger.warning(
"Embedding type is not specified, defaulting to 'vector'."
)
self.embedding_type = VectorType.vector
if self.embedding_dimension is None:
_logger.warning(
"Embedding dimension is not specified, defaulting to 1536."
)
self.embedding_dimension = PositiveInt(1_536)
if self.embedding_index is None:
_logger.warning(
"Embedding index is not specified, defaulting to 'DiskANN' with 'vector_cosine_ops' opclass."
)
self.embedding_index = DiskANN(op_class=VectorOpClass.vector_cosine_ops)
with self.connection_pool.connection() as conn, conn.cursor() as cursor:
cursor.execute(
sql.SQL(
"""
create table {table_name} (
{id_column} uuid primary key,
{content_column} text,
{embedding_column} {embedding_type}({embedding_dimension}),
{metadata_column} jsonb
)
"""
).format(
table_name=sql.Identifier(self.schema_name, self.table_name),
id_column=sql.Identifier(self.id_column),
content_column=sql.Identifier(self.content_column),
embedding_column=sql.Identifier(self.embedding_column),
embedding_type=sql.Identifier(self.embedding_type.value),
embedding_dimension=sql.Literal(self.embedding_dimension),
metadata_column=sql.Identifier(self.metadata_column),
)
)
return self
def _delete_rows_from_table(
self, ids: list[str] | None = None, **kwargs: Any
) -> bool | None:
"""Delete rows from the table by their IDs or truncate the table.
Args:
ids (list[str] | None): List of IDs to delete. If None, truncates the table.
**kwargs: Additional options, such as 'restart' and 'cascade' for truncation.
Returns:
bool | None: True if successful, False if an exception occurred, None otherwise.
"""
with self.connection_pool.connection() as conn:
conn.autocommit = False
try:
with conn.transaction() as _tx, conn.cursor() as cursor:
if ids is None:
restart = bool(kwargs.pop("restart", None))
cascade = bool(kwargs.pop("cascade", None))
cursor.execute(
sql.SQL(
"""
truncate table {table_name} {restart} {cascade}
"""
).format(
table_name=sql.Identifier(
self.schema_name, self.table_name
),
restart=sql.SQL(
"restart identity"
if restart
else "continue identity"
),
cascade=sql.SQL("cascade" if cascade else "restrict"),
)
)
else:
cursor.execute(
sql.SQL(
"""
delete from {table_name}
where {id_column} = any(%(id)s)
"""
).format(
table_name=sql.Identifier(
self.schema_name, self.table_name
),
id_column=sql.Identifier(self.id_column),
),
{"id": ids},
)
except Exception:
return False
else:
return True
def _similarity_search_by_vector_with_distance(
self, embedding: list[float], k: int = 4, **kwargs: Any
) -> list[tuple[dict, float, np.ndarray | None]]:
"""Perform a similarity search using a vector embedding and return results with distances.
Args:
embedding (list[float]): The query embedding vector.
k (int): Number of top results to return.
**kwargs: Additional options such as 'return_embeddings', 'top_m', and 'filter_expression'.
Returns:
list[tuple[dict, float, np.ndarray | None]]: List of tuples containing document dict, distance, and optionally the embedding.
"""
assert self.embedding_index is not None, (
"embedding_index should have already been set"
)
return_embeddings = bool(kwargs.pop("return_embeddings", None))
top_m = int(kwargs.pop("top_m", 5 * k))
filter_expression: sql.SQL = kwargs.pop("filter_expression", sql.SQL("true"))
with self.connection_pool.connection() as conn:
register_vector(conn)
with conn.cursor(row_factory=dict_row) as cursor:
metadata_column: list[str]
if isinstance(self.metadata_column, list):
metadata_column = [
col if isinstance(col, str) else col[0]
for col in self.metadata_column
]
elif isinstance(self.metadata_column, str):
metadata_column = [self.metadata_column]
else:
metadata_column = []
# do reranking for the following cases:
# - binary or scalar quantizations (for HNSW and IVFFlat), or
# - product quantization (for DiskANN)
if (
self.embedding_index.op_class == VectorOpClass.bit_hamming_ops
or self.embedding_index.op_class == VectorOpClass.bit_jaccard_ops
or self.embedding_index.op_class == VectorOpClass.halfvec_cosine_ops
or self.embedding_index.op_class == VectorOpClass.halfvec_ip_ops
or self.embedding_index.op_class == VectorOpClass.halfvec_l1_ops
or self.embedding_index.op_class == VectorOpClass.halfvec_l2_ops
or (
isinstance(self.embedding_index, DiskANN)
and self.embedding_index.product_quantized
)
):
sql_query = sql.SQL(
"""
select {outer_columns},
{embedding_column} {op} %(query)s as distance,
{maybe_embedding_column}
from (
select {inner_columns}
from {table_name}
where {filter_expression}
order by {expression} asc
limit %(top_m)s
) i
order by {embedding_column} {op} %(query)s asc
limit %(top_k)s
"""
).format(
outer_columns=sql.SQL(", ").join(
map(
sql.Identifier,
[
self.id_column,
self.content_column,
*metadata_column,
],
)
),
embedding_column=sql.Identifier(self.embedding_column),
op=(
sql.SQL(
VectorOpClass.vector_cosine_ops.to_operator()
) # TODO(arda): Think of getting this from outside
if (
self.embedding_index.op_class
in (
VectorOpClass.bit_hamming_ops,
VectorOpClass.bit_jaccard_ops,
)
)
else sql.SQL(self.embedding_index.op_class.to_operator())
),
maybe_embedding_column=(
sql.Identifier(self.embedding_column)
if return_embeddings
else sql.SQL(" as ").join(
(sql.NULL, sql.Identifier(self.embedding_column))
)
),
inner_columns=sql.SQL(", ").join(
map(
sql.Identifier,
[
self.id_column,
self.content_column,
self.embedding_column,
*metadata_column,
],
)
),
table_name=sql.Identifier(self.schema_name, self.table_name),
filter_expression=filter_expression,
expression=(
sql.SQL(
"binary_quantize({embedding_column})::bit({embedding_dim}) {op} binary_quantize({query})"
).format(
embedding_column=sql.Identifier(self.embedding_column),
embedding_dim=sql.Literal(self.embedding_dimension),
op=sql.SQL(self.embedding_index.op_class.to_operator()),
query=sql.Placeholder("query"),
)
if self.embedding_index.op_class
in (
VectorOpClass.bit_hamming_ops,
VectorOpClass.bit_jaccard_ops,
)
else (
sql.SQL(
"{embedding_column}::halfvec({embedding_dim}) {op} {query}::halfvec({embedding_dim})"
).format(
embedding_column=sql.Identifier(
self.embedding_column
),
embedding_dim=sql.Literal(self.embedding_dimension),
op=sql.SQL(
self.embedding_index.op_class.to_operator()
),
query=sql.Placeholder("query"),
)
if self.embedding_index.op_class
in (
VectorOpClass.halfvec_cosine_ops,
VectorOpClass.halfvec_ip_ops,
VectorOpClass.halfvec_l1_ops,
VectorOpClass.halfvec_l2_ops,
)
else sql.SQL("{embedding_column} {op} {query}").format(
embedding_column=sql.Identifier(
self.embedding_column
),
op=sql.SQL(
self.embedding_index.op_class.to_operator()
),
query=sql.Placeholder("query"),
)
)
),
)
# otherwise (i.e., no quantization), do not do reranking
else:
sql_query = sql.SQL(
"""
select {outer_columns},
{embedding_column} {op} %(query)s as distance,
{maybe_embedding_column}
from {table_name}
where {filter_expression}
order by {embedding_column} {op} %(query)s asc
limit %(top_k)s
"""
).format(
outer_columns=sql.SQL(", ").join(
map(
sql.Identifier,
[
self.id_column,
self.content_column,
*metadata_column,
],
)
),
embedding_column=sql.Identifier(self.embedding_column),
op=sql.SQL(self.embedding_index.op_class.to_operator()),
maybe_embedding_column=(
sql.Identifier(self.embedding_column)
if return_embeddings
else sql.SQL(" as ").join(
(sql.NULL, sql.Identifier(self.embedding_column))
)
),
table_name=sql.Identifier(self.schema_name, self.table_name),
filter_expression=filter_expression,
)
cursor.execute(
sql_query,
{
"query": np.array(embedding, dtype=np.float32),
"top_m": top_m,
"top_k": k,
},
)
resultset = cursor.fetchall()
return [
(
{
"id": result[self.id_column],
"content": result[self.content_column],
"metadata": (
result[metadata_column[0]]
if isinstance(self.metadata_column, str)
else {col: result[col] for col in metadata_column}
),
},
result["distance"],
result.get(self.embedding_column), # type: ignore[return-value]
)
for result in resultset
]
def _get_by_ids(self, ids: Sequence[str], /) -> list[dict[str, Any]]:
"""Retrieve documents from the table by their IDs.
Args:
ids (Sequence[str]): List of IDs to retrieve.
Returns:
list[dict[str, Any]]: List of document dictionaries with id, content, embedding, and metadata.
"""
with (
self.connection_pool.connection() as conn,
conn.cursor(row_factory=dict_row) as cursor,
):
metadata_column: list[str]
if isinstance(self.metadata_column, list):
metadata_column = [
col if isinstance(col, str) else col[0]
for col in self.metadata_column
]
elif isinstance(self.metadata_column, str):
metadata_column = [self.metadata_column]
else:
metadata_column = []
if ids is not None:
where_clause = sql.SQL(" where {id_column} = any(%(id)s)").format(
id_column=sql.Identifier(self.id_column)
)
else:
where_clause = sql.SQL("")
get_sql = sql.SQL(
"""
select {columns}
from {table_name}
{where_clause}
"""
).format(
columns=sql.SQL(", ").join(
map(
sql.Identifier,
[
self.id_column,
self.content_column,
self.embedding_column,
*metadata_column,
],
)
),
table_name=sql.Identifier(self.schema_name, self.table_name),
where_clause=where_clause,
)
if ids is not None:
cursor.execute(get_sql, {"id": ids})
else:
cursor.execute(get_sql)
resultset = cursor.fetchall()
documents = [
{
"id": result[self.id_column],
"content": result[self.content_column],
"embedding": result[self.embedding_column],
"metadata": (
result[metadata_column[0]]
if isinstance(self.metadata_column, str)
else {col: result[col] for col in metadata_column}
),
}
for result in resultset
]
return documents
def _full_text_search(
self,
query_str: str,
k: int = 4,
language: str = "english",
**kwargs: Any,
) -> list[tuple[dict, float, None]]:
"""Run a Postgres full-text search using plainto_tsquery and return ranked results.
Args:
query_str: The free-text query string to search for.
k: Maximum number of results to return.
language: The text search configuration/language to use (e.g. 'english').
**kwargs: Reserved for future options; currently ignored.
Returns:
List of tuples (document_dict, rank, None). Document dict contains id, content, and metadata.
"""
with (
self.connection_pool.connection() as conn,
conn.cursor(row_factory=dict_row) as cursor,
):
# normalize metadata column(s)
metadata_columns: list[str]
if isinstance(self.metadata_column, list):
metadata_columns = [
col if isinstance(col, str) else col[0]
for col in self.metadata_column
]
elif isinstance(self.metadata_column, str):
metadata_columns = [self.metadata_column]
else:
metadata_columns = []
sql_query = sql.SQL(
"""
SELECT {id_col}, {content_col},
rank() OVER (
ORDER BY ts_rank_cd(
to_tsvector({lang}, {content_col}),
plainto_tsquery({lang}, %(q)s)
) DESC
) AS rank
FROM {table}
WHERE plainto_tsquery({lang}, %(q)s) @@ to_tsvector({lang}, {content_col})
ORDER BY rank
LIMIT %(top_k)s
"""
).format(
id_col=sql.Identifier(self.id_column),
content_col=sql.Identifier(self.content_column),
lang=sql.Literal(language),
table=sql.Identifier(self.schema_name, self.table_name),
)
cursor.execute(sql_query, {"q": query_str, "top_k": k})
rows = cursor.fetchall()
results: list[tuple[dict, float, None]] = []
for row in rows:
doc = {
"id": row[self.id_column],
"content": row[self.content_column],
"metadata": (
row[metadata_columns[0]]
if isinstance(self.metadata_column, str)
else {col: row[col] for col in metadata_columns}
),
}
rank_val = float(row["rank"]) if row.get("rank") is not None else 0.0
results.append((doc, rank_val, None))
return results
def _dedup_results(
self, results: list[tuple[dict, float, Any]]
) -> list[tuple[dict, float, Any]]:
"""Deduplicate search results by document id, preserving order.
Accepts a list of tuples (document_dict, score, optional_embedding) where
document_dict contains at least the id column (self.id_column) or 'id'.
Returns a filtered list keeping the first occurrence of each id.
"""
seen_ids: set = set()
deduped: list[tuple[dict, float, Any]] = []
for doc, score, emb in results:
# robustly get id value using configured id_column or fallback to 'id'
doc_id = doc.get(self.id_column) if isinstance(doc, dict) else None
if doc_id is None:
doc_id = doc.get("id") if isinstance(doc, dict) else None
# If there's no id, treat the row as unique and keep it
if doc_id is None:
deduped.append((doc, score, emb))
continue
if doc_id not in seen_ids:
deduped.append((doc, score, emb))
seen_ids.add(doc_id)
return deduped
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/llama_index/vector_stores/azure_postgres/common/_base.py",
"license": "MIT License",
"lines": 758,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/llama_index/vector_stores/azure_postgres/common/_connection.py | """Synchronous connection handling for Azure Database for PostgreSQL."""
import logging
import time
from collections.abc import Callable
from azure.core.credentials import TokenCredential
from azure.identity import DefaultAzureCredential
from psycopg import Connection, sql
from psycopg.rows import dict_row
from psycopg_pool import ConnectionPool
from pydantic import ConfigDict
from ._shared import (
TOKEN_CREDENTIAL_SCOPE,
BaseConnectionInfo,
BasicAuth,
Extension,
get_username_password,
)
_logger = logging.getLogger(__name__)
class ConnectionInfo(BaseConnectionInfo):
"""Base connection information for Azure Database for PostgreSQL connections.
:param host: Hostname of the Azure Database for PostgreSQL server.
:type host: str | None
:param dbname: Name of the database to connect to.
:type dbname: str
:param port: Port number for the connection.
:type port: int
:param sslmode: SSL mode for the connection.
:type sslmode: SSLMode
:param credentials: Credentials for the connection.
:type credentials: BasicAuth | TokenCredential
"""
model_config = ConfigDict(
arbitrary_types_allowed=True # True to allow TokenCredential
)
credentials: BasicAuth | TokenCredential = DefaultAzureCredential()
def check_connection(conn: Connection, /, required_extensions: list[Extension] = []):
"""Check if the connection to Azure Database for PostgreSQL is valid and required extensions are installed.
:param conn: Connection to the Azure Database for PostgreSQL.
:type conn: Connection
:param required_extensions: List of required extensions to check if they are installed.
:type required_extensions: list[Extension]
:raises RuntimeError: If the connection check fails or required extensions are not installed.
"""
with conn.cursor(row_factory=dict_row) as cursor:
_logger.debug("checking connection")
t_start = time.perf_counter()
cursor.execute("select 1")
result = cursor.fetchone()
t_elapsed = time.perf_counter() - t_start
assert result is not None, "Connection check failed: no result returned."
_logger.debug(
"connection check successful. elapsed time: %.3f ms", t_elapsed * 1000
)
for ext in required_extensions:
ext_name = ext.ext_name
ext_version = ext.ext_version
schema_name = ext.schema_name
cursor.execute(
sql.SQL(
"""
select extname as ext_name, extversion as ext_version,
n.nspname as schema_name
from pg_extension e
left join pg_namespace n on e.extnamespace = n.oid
where extname = %(ext_name)s
"""
),
{"ext_name": ext_name},
)
resultset = cursor.fetchone()
if resultset is None:
raise RuntimeError(f"Required extension '{ext_name}' is not installed.")
if ext_version is not None and resultset["ext_version"] != ext_version:
raise RuntimeError(
f"Required extension '{ext_name}' version mismatch: "
f"expected {ext_version}, got {resultset['ext_version']}."
)
if schema_name is not None and resultset["schema_name"] != schema_name:
raise RuntimeError(
f"Required extension '{ext_name}' is not installed in the expected schema: "
f"expected {schema_name}, got {resultset['schema_name']}."
)
_logger.debug(
"required extension '%s' is installed (version: %s, schema: %s)",
resultset["ext_name"],
resultset["ext_version"],
resultset["schema_name"],
)
def create_extensions(conn: Connection, /, required_extensions: list[Extension] = []):
"""Create required extensions in the Azure Database for PostgreSQL connection.
:param conn: Connection to the Azure Database for PostgreSQL.
:type conn: Connection
:param required_extensions: List of required extensions to create.
:type required_extensions: list[Extension]
:raises Exception: If the connection is not valid or if an error occurs during extension creation.
"""
with conn.cursor() as cursor:
for ext in required_extensions:
ext_name = ext.ext_name
ext_version = ext.ext_version
schema_name = ext.schema_name
cascade = ext.cascade
_logger.debug(
"creating extension (if not exists): %s (version: %s, schema: %s, cascade: %s)",
ext_name,
ext_version,
schema_name,
cascade,
)
cursor.execute(
sql.SQL(
"""
create extension if not exists {ext_name}
with {schema_expr}
{version_expr}
{cascade_expr}
"""
).format(
ext_name=sql.Identifier(ext_name),
schema_expr=sql.SQL("schema {schema_name}").format(
schema_name=sql.Identifier(schema_name)
)
if schema_name is not None
else sql.SQL(""),
version_expr=sql.SQL("version {version}").format(
version=sql.Literal(ext_version)
)
if ext_version is not None
else sql.SQL(""),
cascade_expr=sql.SQL("cascade") if cascade else sql.SQL(""),
)
)
class AzurePGConnectionPool(ConnectionPool):
"""Connection pool for Azure Database for PostgreSQL connections."""
def __init__(
self,
conninfo: str = "",
*,
azure_conn_info: ConnectionInfo = ConnectionInfo(),
**kwargs,
):
if isinstance(azure_conn_info.credentials, TokenCredential):
_logger.debug(
"getting token from TokenCredential for the scope: %s",
TOKEN_CREDENTIAL_SCOPE,
)
credential_provider = azure_conn_info.credentials
token = credential_provider.get_token(TOKEN_CREDENTIAL_SCOPE)
_logger.info("getting username and password from token")
username, password = get_username_password(token)
_logger.debug("wrapping reconnect_failed function")
reconnect_failed: Callable[[ConnectionPool], None] | None = kwargs.get(
"reconnect_failed"
)
def reconnect_failed_wrapper(pool: ConnectionPool) -> None:
if reconnect_failed:
reconnect_failed(pool)
_logger.debug(
"getting token from TokenCredential for the scope: %s",
TOKEN_CREDENTIAL_SCOPE,
)
token = credential_provider.get_token(TOKEN_CREDENTIAL_SCOPE)
_logger.info("getting username and password from token")
username, password = get_username_password(token)
pool.kwargs.update(
user=username,
password=password,
)
kwargs["reconnect_failed"] = reconnect_failed_wrapper
else:
username, password = get_username_password(azure_conn_info.credentials)
azure_conn_info_kwargs = azure_conn_info.model_dump(
mode="json", exclude_none=True, exclude=set(["credentials"])
)
_logger.debug(
"updating ConnectionPool kwargs with those from: %s",
azure_conn_info_kwargs,
)
kwargs_ = kwargs.get("kwargs", {})
kwargs_.update(user=username, password=password, **azure_conn_info_kwargs)
kwargs["kwargs"] = kwargs_
super().__init__(conninfo, **kwargs)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/llama_index/vector_stores/azure_postgres/common/_connection.py",
"license": "MIT License",
"lines": 182,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/llama_index/vector_stores/azure_postgres/common/_shared.py | """Shared utilities and models for asynchronous and synchronous operations."""
import asyncio
import base64
import json
import sys
import threading
from abc import abstractmethod
from collections.abc import Coroutine
from concurrent.futures import ThreadPoolExecutor
from enum import Enum
from typing import Annotated, Any, Generic, TypeVar
# typing.Self is introduced in Python 3.11
if sys.version_info >= (3, 11):
from typing import Self
else:
from typing_extensions import Self
# typing.override is introduced in Python 3.12
if sys.version_info >= (3, 12):
from typing import override
else:
from typing_extensions import override
from azure.core.credentials import AccessToken
from pydantic import (
BaseModel,
Field,
NonNegativeInt,
PositiveFloat,
PositiveInt,
model_validator,
)
R = TypeVar("R")
SP = TypeVar("SP", bound="SearchParams")
TOKEN_CREDENTIAL_SCOPE = "https://ossrdbms-aad.database.windows.net/.default"
class SSLMode(str, Enum):
"""SSL mode for Azure Database for PostgreSQL connections."""
disable = "disable"
allow = "allow"
prefer = "prefer"
require = "require"
verify_ca = "verify-ca"
verify_full = "verify-full"
class BasicAuth(BaseModel):
"""Basic username/password authentication for Azure Database for PostgreSQL connections.
:param username: Username for the connection.
:type username: str
:param password: Password for the connection.
:type password: str
"""
username: str = "postgres"
password: str = ""
class BaseConnectionInfo(BaseModel):
"""Base connection information for Azure Database for PostgreSQL connections.
:param application_name: Name of the application connecting to the database.
:type application_name: str
:param host: Hostname of the Azure Database for PostgreSQL server.
:type host: str | None
:param dbname: Name of the database to connect to.
:type dbname: str
:param port: Port number for the connection.
:type port: int
:param sslmode: SSL mode for the connection.
:type sslmode: SSLMode
"""
application_name: str = "azure-postgresql"
host: str | None = None
dbname: str = "postgres"
port: Annotated[NonNegativeInt, Field(le=65535)] = 5432
sslmode: SSLMode = SSLMode.require
def run_coroutine_in_sync(coroutine: Coroutine[Any, Any, R]) -> R:
def run_in_new_loop() -> R:
new_loop = asyncio.new_event_loop()
asyncio.set_event_loop(new_loop)
try:
return new_loop.run_until_complete(coroutine)
finally:
new_loop.close()
try:
loop = asyncio.get_running_loop()
except RuntimeError:
result = asyncio.run(coroutine)
else:
if threading.current_thread() is threading.main_thread():
if not loop.is_running():
result = loop.run_until_complete(coroutine)
else:
with ThreadPoolExecutor() as pool:
future = pool.submit(run_in_new_loop)
result = future.result()
else:
future = asyncio.run_coroutine_threadsafe(coroutine, loop)
result = future.result()
return result
def get_username_password(
credentials: BasicAuth | AccessToken,
) -> tuple[str, str]:
"""Get username and password from credentials.
:param credentials: BasicAuth for username/password or AccessToken for JWT token.
:type credentials: BasicAuth | AccessToken
:raises ValueError: User name not found in JWT token header
:raises TypeError: Invalid credentials type
:return: Tuple of username and password strings (plaintext).
:rtype: tuple[str, str]
"""
if isinstance(credentials, BasicAuth):
return credentials.username, credentials.password
elif isinstance(credentials, AccessToken):
token = credentials.token
_header, body_, _signature = token.split(".")
body = json.loads(
base64.b64decode(body_ + "=" * (4 - len(body_) % 4)).decode("utf-8")
)
username: str | None = body.get("upn", body.get("unique_name"))
if username is None:
raise ValueError("User name not found in JWT token header")
return username, token
else:
raise TypeError(
f"Invalid credentials type: {type(credentials)}. "
"Expected BasicAuth or TokenCredential."
)
class VectorOpClass(str, Enum):
"""Enumeration for operator classes used in vector indexes."""
# Full-precision dense vector operator classes
vector_cosine_ops = "vector_cosine_ops"
vector_ip_ops = "vector_ip_ops"
vector_l1_ops = "vector_l1_ops"
vector_l2_ops = "vector_l2_ops"
# Half-precision dense vector operator classes
halfvec_cosine_ops = "halfvec_cosine_ops"
halfvec_ip_ops = "halfvec_ip_ops"
halfvec_l1_ops = "halfvec_l1_ops"
halfvec_l2_ops = "halfvec_l2_ops"
# Sparse vector operator classes
sparsevec_cosine_ops = "sparsevec_cosine_ops"
sparsevec_ip_ops = "sparsevec_ip_ops"
sparsevec_l1_ops = "sparsevec_l1_ops"
sparsevec_l2_ops = "sparsevec_l2_ops"
# Bit vector operator classes
bit_hamming_ops = "bit_hamming_ops"
bit_jaccard_ops = "bit_jaccard_ops"
def to_operator(self) -> str:
"""Return the distance operator as a string.
:return: The distance operator string.
:rtype: str
:raises ValueError: If the vector operator class is unsupported.
"""
match self:
case (
VectorOpClass.vector_cosine_ops
| VectorOpClass.halfvec_cosine_ops
| VectorOpClass.sparsevec_cosine_ops
):
return "<=>"
case (
VectorOpClass.vector_ip_ops
| VectorOpClass.halfvec_ip_ops
| VectorOpClass.sparsevec_ip_ops
):
return "<#>"
case (
VectorOpClass.vector_l1_ops
| VectorOpClass.halfvec_l1_ops
| VectorOpClass.sparsevec_l1_ops
):
return "<+>"
case (
VectorOpClass.vector_l2_ops
| VectorOpClass.halfvec_l2_ops
| VectorOpClass.sparsevec_l2_ops
):
return "<->"
case VectorOpClass.bit_hamming_ops:
return "<~>"
case VectorOpClass.bit_jaccard_ops:
return "<%>"
case _:
raise ValueError(f"Unsupported vector operator class: {self}")
class VectorType(str, Enum):
"""Enumeration for vector types used in vector similarity search."""
bit = "bit"
halfvec = "halfvec"
sparsevec = "sparsevec"
vector = "vector"
class Algorithm(BaseModel, Generic[SP]):
op_class: VectorOpClass = VectorOpClass.vector_cosine_ops
maintenance_work_mem: str | None = None
max_parallel_maintenance_workers: Annotated[
NonNegativeInt | None, Field(le=1_024)
] = None
max_parallel_workers: Annotated[NonNegativeInt | None, Field(le=1_024)] = None
@abstractmethod
def default_search_params(self) -> SP:
"""Return the default search parameters for the algorithm.
:return: An instance of the search parameters model.
:rtype: SP
"""
...
@abstractmethod
def build_settings(self, exclude_none: bool = True) -> dict[str, Any]:
"""Return the specific index build settings for the algorithm.
:param exclude_none: Whether to exclude keys with None values in the dictionary.
:type exclude_none: bool
:return: A dictionary containing the settings.
:rtype: dict[str, Any]
"""
...
def index_settings(self, exclude_none: bool = True) -> dict[str, Any]:
"""Return the general index settings for the algorithm.
:param exclude_none: Whether to exclude keys with None values in the dictionary.
:type exclude_none: bool
:return: A dictionary containing the index settings.
:rtype: dict[str, Any]
"""
return {
key: value
for key, value in self.model_dump(
mode="json", exclude_none=exclude_none
).items()
if key
in [
"maintenance_work_mem",
"max_parallel_maintenance_workers",
"max_parallel_workers",
]
}
class SearchParams(BaseModel):
@abstractmethod
def search_settings(self, exclude_none: bool = True) -> dict[str, Any]:
"""Return the specific index search settings for the algorithm.
:param exclude_none: Whether to exclude keys with None values in the dictionary.
:type exclude_none: bool
:return: A dictionary containing the search settings.
:rtype: dict[str, Any]
"""
...
class DiskANNIterativeScanMode(str, Enum):
"""Enumeration for DiskANN iterative scan modes."""
off = "off"
relaxed = "relaxed_order"
strict = "strict_order"
class DiskANNSearchParams(SearchParams):
l_value_is: Annotated[PositiveInt | None, Field(ge=10, le=10_000)] = None
iterative_search: DiskANNIterativeScanMode | None = None
@override
def search_settings(self, exclude_none=True):
return {
f"diskann.{key}": value
for key, value in self.model_dump(
mode="json", exclude_none=exclude_none
).items()
}
class DiskANN(Algorithm[DiskANNSearchParams]):
max_neighbors: Annotated[PositiveInt | None, Field(ge=20, le=1_538)] = None
l_value_ib: Annotated[PositiveInt | None, Field(ge=10, le=500)] = None
product_quantized: bool | None = None
pq_param_num_chunks: Annotated[NonNegativeInt | None, Field(le=8_000)] = None
pq_param_training_samples: Annotated[NonNegativeInt | None, Field(le=1_000_000)] = (
None
)
@model_validator(mode="after")
def sanity_check(self) -> Self:
if not self.product_quantized and self.pq_param_num_chunks is not None:
raise ValueError(
"Parameter 'product_quantized' must be True when 'pq_param_num_chunks' is set."
)
if not self.product_quantized and self.pq_param_training_samples is not None:
raise ValueError(
"Parameter 'product_quantized' must be True when 'pq_param_training_samples' is set."
)
return self
@override
def build_settings(self, exclude_none: bool = True) -> dict[str, Any]:
return {
key: value
for key, value in self.model_dump(
mode="json", exclude_none=exclude_none
).items()
if key
in [
"max_neighbors",
"l_value_ib",
"product_quantized",
"pq_param_num_chunks",
"pq_param_training_samples",
]
}
@override
def default_search_params(self) -> DiskANNSearchParams:
return DiskANNSearchParams()
class HNSWIterativeScanMode(str, Enum):
"""Enumeration for HNSW iterative scan modes."""
off = "off"
relaxed = "relaxed_order"
strict = "strict_order"
class HNSWSearchParams(SearchParams):
ef_search: Annotated[PositiveInt | None, Field(le=1_000)] = None
iterative_scan: HNSWIterativeScanMode | None = None
max_scan_tuples: PositiveInt | None = None
scan_mem_multiplier: Annotated[PositiveFloat | None, Field(le=1_000)] = None
@override
def search_settings(self, exclude_none=True):
return {
f"hnsw.{key}": value
for key, value in self.model_dump(
mode="json", exclude_none=exclude_none
).items()
}
class HNSW(Algorithm[HNSWSearchParams]):
m: Annotated[PositiveInt | None, Field(ge=2, le=100)] = None
ef_construction: Annotated[PositiveInt | None, Field(ge=4, le=1_000)] = None
@model_validator(mode="after")
def sanity_check(self) -> Self:
if (
self.m is not None
and self.ef_construction is not None
and self.ef_construction < 2 * self.m
):
raise ValueError(
"Parameter 'ef_construction' must be at least twice the value of 'm'."
)
return self
@override
def build_settings(self, exclude_none=True):
return {
key: value
for key, value in self.model_dump(
mode="json", exclude_none=exclude_none
).items()
if key in ["m", "ef_construction"]
}
@override
def default_search_params(self) -> HNSWSearchParams:
return HNSWSearchParams()
class IVFFlatIterativeScanMode(str, Enum):
"""Enumeration for IVFFlat iterative scan modes."""
off = "off"
relaxed = "relaxed_order"
class IVFFlatSearchParams(SearchParams):
probes: Annotated[PositiveInt | None, Field(le=32_768)] = None
iterative_scan: IVFFlatIterativeScanMode | None = None
max_probes: Annotated[PositiveInt | None, Field(le=32_768)] = None
@override
def search_settings(self, exclude_none=True):
return {
f"ivfflat.{key}": value
for key, value in self.model_dump(
mode="json", exclude_none=exclude_none
).items()
}
class IVFFlat(Algorithm[IVFFlatSearchParams]):
lists: Annotated[PositiveInt | None, Field(le=32_768)] = None
@override
def build_settings(self, exclude_none: bool = True) -> dict[str, Any]:
return {
key: value
for key, value in self.model_dump(
mode="json", exclude_none=exclude_none
).items()
if key in ["lists"]
}
@override
def default_search_params(self) -> IVFFlatSearchParams:
return IVFFlatSearchParams()
class Extension(BaseModel):
"""Model representing a PostgreSQL extension.
:param ext_name: Name of the extension to be created, checked or dropped.
:type ext_name: str
:param ext_version: Optional version of the extension to be created or checked.
:type ext_version: str | None
:param schema_name: Optional schema name where the extension should be created
or checked.
:type schema_name: str | None
:param cascade: Whether to automatically install the extension dependencies or
drop the objects that depend on the extension.
:type cascade: bool
"""
ext_name: str
ext_version: str | None = None
schema_name: str | None = None
cascade: bool = False
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/llama_index/vector_stores/azure_postgres/common/_shared.py",
"license": "MIT License",
"lines": 380,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/llama_index/vector_stores/azure_postgres/common/aio/_connection.py | """Asynchronous connection handling for Azure Database for PostgreSQL."""
import logging
import time
from collections.abc import Awaitable, Callable
from azure.core.credentials_async import AsyncTokenCredential
from azure.identity.aio import DefaultAzureCredential
from psycopg import AsyncConnection, sql
from psycopg.rows import dict_row
from psycopg_pool import AsyncConnectionPool
from pydantic import ConfigDict
from .._shared import (
TOKEN_CREDENTIAL_SCOPE,
BaseConnectionInfo,
BasicAuth,
Extension,
get_username_password,
run_coroutine_in_sync,
)
_logger = logging.getLogger(__name__)
async def async_check_connection(
conn: AsyncConnection, /, required_extensions: list[Extension] = []
):
"""Check if the connection to Azure Database for PostgreSQL is valid and required extensions are installed.
:param conn: Async connection to the Azure Database for PostgreSQL.
:type conn: AsyncConnection
:param required_extensions: List of required extensions to check if they are installed.
:type required_extensions: list[Extension]
:raises RuntimeError: If the connection check fails or required extensions are not installed.
"""
async with conn.cursor(row_factory=dict_row) as cursor:
_logger.debug("checking connection")
t_start = time.perf_counter()
await cursor.execute("select 1")
result = await cursor.fetchone()
t_elapsed = time.perf_counter() - t_start
assert result is not None, "Connection check failed: no result returned."
_logger.debug(
"connection check successful. elapsed time: %.3f ms", t_elapsed * 1000
)
for ext in required_extensions:
ext_name = ext.ext_name
ext_version = ext.ext_version
schema_name = ext.schema_name
await cursor.execute(
sql.SQL(
"""
select extname as ext_name, extversion as ext_version,
n.nspname as schema_name
from pg_extension e
left join pg_namespace n on e.extnamespace = n.oid
where extname = %(ext_name)s
"""
),
{"ext_name": ext_name},
)
resultset = await cursor.fetchone()
if resultset is None:
raise RuntimeError(f"Required extension '{ext_name}' is not installed.")
if ext_version is not None and resultset["ext_version"] != ext_version:
raise RuntimeError(
f"Required extension '{ext_name}' version mismatch: "
f"expected {ext_version}, got {resultset['ext_version']}."
)
if schema_name is not None and resultset["schema_name"] != schema_name:
raise RuntimeError(
f"Required extension '{ext_name}' is not installed in the expected schema: "
f"expected {schema_name}, got {resultset['schema_name']}."
)
_logger.debug(
"required extension '%s' is installed (version: %s, schema: %s)",
resultset["ext_name"],
resultset["ext_version"],
resultset["schema_name"],
)
async def async_create_extensions(
conn: AsyncConnection, /, required_extensions: list[Extension] = []
):
"""Create required extensions in the Azure Database for PostgreSQL connection.
:param conn: Async connection to the Azure Database for PostgreSQL.
:type conn: AsyncConnection
:param required_extensions: List of required extensions to create.
:type required_extensions: list[Extension]
:raises Exception: If the connection is not valid or if an error occurs during extension creation.
"""
async with conn.cursor() as cursor:
for ext in required_extensions:
ext_name = ext.ext_name
ext_version = ext.ext_version
schema_name = ext.schema_name
cascade = ext.cascade
_logger.debug(
"creating extension (if not exists): %s (version: %s, schema: %s, cascade: %s)",
ext_name,
ext_version,
schema_name,
cascade,
)
await cursor.execute(
sql.SQL(
"""
create extension if not exists {ext_name}
with {schema_expr}
{version_expr}
{cascade_expr}
"""
).format(
ext_name=sql.Identifier(ext_name),
schema_expr=sql.SQL("schema {schema_name}").format(
schema_name=sql.Identifier(schema_name)
)
if schema_name is not None
else sql.SQL(""),
version_expr=sql.SQL("version {version}").format(
version=sql.Literal(ext_version)
)
if ext_version is not None
else sql.SQL(""),
cascade_expr=sql.SQL("cascade") if cascade else sql.SQL(""),
)
)
class AsyncConnectionInfo(BaseConnectionInfo):
"""Base connection information for Azure Database for PostgreSQL connections.
:param host: Hostname of the Azure Database for PostgreSQL server.
:type host: str | None
:param dbname: Name of the database to connect to.
:type dbname: str
:param port: Port number for the connection.
:type port: int
:param credentials: Credentials for authentication.
:type credentials: BasicAuth | AsyncTokenCredential
:param sslmode: SSL mode for the connection.
:type sslmode: SSLMode
"""
model_config = ConfigDict(
arbitrary_types_allowed=True, # True to allow AsyncTokenCredential
)
credentials: BasicAuth | AsyncTokenCredential = DefaultAzureCredential()
class AsyncAzurePGConnectionPool(AsyncConnectionPool):
"""Async connection pool for Azure Database for PostgreSQL connections."""
def __init__(
self,
conninfo: str = "",
*,
azure_conn_info: AsyncConnectionInfo = AsyncConnectionInfo(),
**kwargs,
):
if isinstance(azure_conn_info.credentials, AsyncTokenCredential):
credential_provider = azure_conn_info.credentials
coroutine = credential_provider.get_token(TOKEN_CREDENTIAL_SCOPE)
_logger.debug(
"getting token from TokenCredential for the scope: %s",
TOKEN_CREDENTIAL_SCOPE,
)
token = run_coroutine_in_sync(coroutine)
_logger.info("getting username and password from token")
username, password = get_username_password(token)
_logger.debug("wrapping reconnect_failed function")
reconnect_failed: (
Callable[[AsyncConnectionPool], Awaitable[None]] | None
) = kwargs.get("reconnect_failed")
async def reconnect_failed_wrapper(pool: AsyncConnectionPool) -> None:
if reconnect_failed:
await reconnect_failed(pool)
_logger.debug(
"getting token from TokenCredential for the scope: %s",
TOKEN_CREDENTIAL_SCOPE,
)
token = await credential_provider.get_token(TOKEN_CREDENTIAL_SCOPE)
_logger.info("getting username and password from token")
username, password = get_username_password(token)
pool.kwargs.update(
user=username,
password=password,
)
kwargs["reconnect_failed"] = reconnect_failed_wrapper
else:
username, password = get_username_password(azure_conn_info.credentials)
azure_conn_info_kwargs = azure_conn_info.model_dump(
mode="json", exclude_none=True, exclude=set(["credentials"])
)
_logger.debug(
"updating AsyncConnectionPool kwargs with those from: %s",
azure_conn_info_kwargs,
)
kwargs_ = kwargs.get("kwargs", {})
kwargs_.update(user=username, password=password, **azure_conn_info_kwargs)
kwargs["kwargs"] = kwargs_
super().__init__(conninfo, **kwargs)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/llama_index/vector_stores/azure_postgres/common/aio/_connection.py",
"license": "MIT License",
"lines": 188,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/tests/common/test_connection.py | """Synchronous connection handling tests for Azure Database for PostgreSQL."""
from collections.abc import Generator
from contextlib import contextmanager, nullcontext
from typing import Any
import pytest
from psycopg import Connection, sql
from pydantic import BaseModel, ConfigDict
from llama_index.vector_stores.azure_postgres.common import (
Extension,
check_connection,
create_extensions,
)
class MockCursorBase(BaseModel):
"""A minimal mock cursor base model used for testing DB interactions.
Attributes:
broken (bool): If True, simulates a broken cursor that fails queries.
last_query (str | sql.SQL | None): Stores the last executed query for inspection.
response (dict | None): Value to return from fetchone() when appropriate.
"""
broken: bool = False
last_query: str | sql.SQL | None = None
response: dict | None = None
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
class MockCursor(MockCursorBase):
"""A mock cursor implementing execute and fetchone for tests.
The mock cursor records the last executed query and returns canned
responses from the ``response`` attribute. When ``broken`` is True,
``fetchone`` returns None to simulate failures.
"""
def execute(self, query: str | sql.SQL, _params=None) -> None:
"""Execute a SQL query and record it for later inspection."""
self.last_query = query
def fetchone(self) -> None | dict:
"""Return a single-row result dict."""
assert self.last_query is not None, "No query executed."
# We either give `"select 1"` or `sql.SQL(...)` as the last query.
if isinstance(self.last_query, str):
return None if self.broken else {"?column?": 1}
return self.response
@pytest.fixture
def mock_cursor(
connection: Connection,
monkeypatch: pytest.MonkeyPatch,
request: pytest.FixtureRequest,
):
"""Pytest fixture that replaces a real DB cursor with a MockCursor.
Expects the parameterization to pass an instance of ``MockCursor``
via ``request.param``. The fixture monkeypatches the connection's
``cursor`` method to return the supplied mock cursor as a context
manager.
"""
assert isinstance(request.param, MockCursor), "Expected a MockCursor instance."
@contextmanager
def mock_cursor(**_kwargs):
yield request.param
monkeypatch.setattr(connection, "cursor", mock_cursor)
class TestCheckConnection:
"""Tests for verifying the database connection and required extensions.
These tests exercise ``check_connection`` with various mocked cursor
responses to validate behavior for installed extensions, missing
extensions, version mismatches, and broken cursors.
"""
def test_it_works(self, connection: Connection) -> None:
"""Ensure ``check_connection`` returns None on a healthy connection."""
assert check_connection(connection) is None
@pytest.mark.parametrize(
["extension", "mock_cursor", "expected_result"],
[
(
Extension(ext_name="test_ext", ext_version="1.0", schema_name="public"),
MockCursor(
broken=False,
response={
"ext_name": "test_ext",
"ext_version": "1.0",
"schema_name": "public",
},
),
nullcontext(None),
),
(
Extension(ext_name="test_ext", ext_version="1.0", schema_name="public"),
MockCursor(broken=True, response=None),
pytest.raises(AssertionError, match="Connection check failed"),
),
(
Extension(ext_name="test_ext", ext_version="1.0", schema_name="public"),
MockCursor(broken=False, response=None),
pytest.raises(
RuntimeError,
match="Required extension 'test_ext' is not installed.",
),
),
(
Extension(ext_name="test_ext", ext_version="1.0", schema_name="public"),
MockCursor(broken=False, response={"ext_version": "wrong_version"}),
pytest.raises(
RuntimeError,
match="Required extension 'test_ext' version mismatch: expected 1.0, got wrong_version.",
),
),
(
Extension(ext_name="test_ext", ext_version="1.0", schema_name="public"),
MockCursor(
broken=False,
response={"ext_version": "1.0", "schema_name": "wrong_schema"},
),
pytest.raises(
RuntimeError,
match="Required extension 'test_ext' is not installed in the expected schema: expected public, got wrong_schema.",
),
),
],
ids=[
"extension-installed",
"broken-cursor",
"extension-not-installed",
"version-mismatch",
"schema-mismatch",
],
indirect=["mock_cursor"],
)
def test_mock_it_works(
self,
connection: Connection,
extension: Extension,
mock_cursor,
expected_result: nullcontext | pytest.RaisesExc,
) -> None:
"""Run parameterized checks of ``check_connection`` using mocked cursors.
Parameterization covers installed extension, broken cursor,
missing extension, version mismatch, and schema mismatch cases.
"""
with expected_result as e:
assert check_connection(connection, required_extensions=[extension]) == e
@pytest.fixture
def extension_creatable(
connection: Connection, request: pytest.FixtureRequest
) -> Generator[Extension, Any, None]:
"""Fixture that attempts to create (and later drop) a DB extension.
Uses the provided ``Extension`` instance via ``request.param`` and
will skip the test if creation fails. After the test, the extension
is dropped if it was not previously installed.
"""
assert isinstance(request.param, Extension), "Expected an Extension instance."
ext_already_installed = False
with connection.cursor() as cursor:
cursor.execute(
sql.SQL(
"""
select extname, extversion
from pg_extension
where extname = %(ext_name)s
"""
),
{"ext_name": request.param.ext_name},
)
result = cursor.fetchone()
ext_already_installed = result is not None
try:
cursor.execute(
sql.SQL(
"""
create extension if not exists {ext_name}
with {schema_expr}
{version_expr}
{cascade_expr}
"""
).format(
ext_name=sql.Identifier(request.param.ext_name),
schema_expr=sql.SQL("schema {schema_name}").format(
schema_name=sql.Identifier(request.param.schema_name)
)
if request.param.schema_name
else sql.SQL(""),
version_expr=sql.SQL("version {ext_version}").format(
ext_version=sql.Literal(request.param.ext_version)
)
if request.param.ext_version
else sql.SQL(""),
cascade_expr=sql.SQL("cascade")
if request.param.cascade
else sql.SQL(""),
)
)
except Exception as e:
pytest.skip(
reason=f"Extension {request.param.ext_name} could not be created: {e}"
)
yield request.param
if not ext_already_installed:
with connection.cursor() as cursor:
cursor.execute(
sql.SQL(
"""
drop extension if exists {ext_name}
"""
).format(
ext_name=sql.Identifier(request.param.ext_name),
)
)
class TestCreateExtensions:
"""Tests that validate creating and handling of Postgres extensions.
- ``test_it_works`` verifies that a valid extension can be created.
- ``test_it_fails`` ensures that attempting to create a non-existent
extension raises an informative exception.
"""
@pytest.mark.parametrize(
"extension_creatable",
[Extension(ext_name="vector")],
ids=["vector"],
indirect=True,
)
def test_it_works(self, connection: Connection, extension_creatable: Extension):
"""Assert that creating a valid extension returns None (no error)."""
assert (
create_extensions(
connection,
required_extensions=[extension_creatable],
)
is None
)
def test_it_fails(self, connection: Connection):
"""Verify that creating a missing extension raises an exception."""
extension = Extension(
ext_name="non_existent_ext",
ext_version="1.0",
schema_name="public",
)
with pytest.raises(
Exception, match='extension "non_existent_ext" is not available'
):
create_extensions(
connection,
required_extensions=[extension],
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/tests/common/test_connection.py",
"license": "MIT License",
"lines": 238,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/tests/common/test_shared.py | """Unit tests for shared utilities related to credential parsing (get_username_password)."""
import base64
import hashlib
import hmac
import json
from contextlib import nullcontext
import pytest
from azure.core.credentials import AccessToken, TokenCredential
from llama_index.vector_stores.azure_postgres.common import BasicAuth
from llama_index.vector_stores.azure_postgres.common._shared import (
TOKEN_CREDENTIAL_SCOPE,
get_username_password,
)
class TestGetUsernamePassword:
"""Test suite for get_username_password covering BasicAuth, TokenCredential, invalid inputs, and JWT-like token payload extraction."""
def test_it_works(self, credentials: BasicAuth | TokenCredential) -> None:
"""Ensure username/password extraction works for both credential types."""
if isinstance(credentials, BasicAuth):
username, password = get_username_password(credentials)
assert username == credentials.username, (
"Username should match BasicAuth username"
)
assert password == credentials.password, (
"Password should match BasicAuth password"
)
elif isinstance(credentials, TokenCredential):
token = credentials.get_token(TOKEN_CREDENTIAL_SCOPE)
username, password = get_username_password(token)
assert len(username) > 0, "Username should not be empty for TokenCredential"
assert password == token.token, (
"Password should match TokenCredential token"
)
def test_invalid_credentials_type(self) -> None:
"""Assert passing an invalid type raises a TypeError."""
with pytest.raises(TypeError, match="Invalid credentials type"):
get_username_password("invalid_credentials_type") # type: ignore[arg-type]
@pytest.mark.parametrize(
["payload", "username"],
[
({"upn": "test_user_1"}, nullcontext("test_user_1")),
({"unique_name": "test_user_2"}, nullcontext("test_user_2")),
(
{"upn": "test_user_3", "unique_name": "test_user_4"},
nullcontext("test_user_3"),
),
(
{"no-upn-or-unique_name": "test_user_5"},
pytest.raises(
ValueError, match="User name not found in JWT token header"
),
),
],
ids=[
"only-upn",
"only-unique_name",
"upn-over-unique_name",
"no-upn-or-unique_name",
],
)
def test_mock_it_works(
self, payload: dict, username: nullcontext | pytest.RaisesExc
) -> None:
"""Validate extraction from JWT-like access token payloads."""
_header = {"alg": "HS256", "typ": "JWT"}
_header_encoded = base64.urlsafe_b64encode(
json.dumps(_header).encode()
).decode()
_payload_encoded = base64.urlsafe_b64encode(
json.dumps(payload).encode()
).decode()
h = hmac.new(
b"secret",
".".join([_header_encoded, _payload_encoded]).encode(),
hashlib.sha256,
)
_signature = base64.urlsafe_b64encode(h.digest()).decode()
token = AccessToken(
".".join([_header_encoded, _payload_encoded, _signature]), -1
)
with username as expected_username:
username_, password = get_username_password(token)
assert username_ == expected_username, (
"Username should match expected username from JWT token"
)
assert password == token.token, (
"Password should match TokenCredential token"
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/tests/common/test_shared.py",
"license": "MIT License",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/tests/llama_index/test_vectorstore.py | """VectorStore integration tests for Azure Database for PostgreSQL using LlamaIndex."""
import re
from contextlib import nullcontext
from typing import Any
import pytest
from psycopg import sql
from psycopg.rows import dict_row
from psycopg_pool import ConnectionPool
from pydantic import PositiveInt
from llama_index.core.schema import (
TextNode,
)
from llama_index.core.vector_stores.types import (
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryMode,
)
from llama_index.vector_stores.azure_postgres import AzurePGVectorStore
from llama_index.vector_stores.azure_postgres.common import DiskANN
from .conftest import Table
# SQL constants to be used in tests
_GET_TABLE_COLUMNS_AND_TYPES = sql.SQL(
"""
select a.attname as column_name,
format_type(a.atttypid, a.atttypmod) as column_type
from pg_attribute a
join pg_class c on a.attrelid = c.oid
join pg_namespace n on c.relnamespace = n.oid
where a.attnum > 0
and not a.attisdropped
and n.nspname = %(schema_name)s
and c.relname = %(table_name)s
order by a.attnum asc
"""
)
# Utility/assertion functions to be used in tests
def verify_table_created(table: Table, resultset: list[dict[str, Any]]) -> None:
"""Verify that the table has been created with the correct columns and types.
:param table: Expected table to be created
:type table: Table
:param resultset: Actual result set from the database
:type resultset: list[dict[str, Any]]
"""
# Verify that the ID column has been created correctly
result = next((r for r in resultset if r["column_name"] == table.id_column), None)
assert result is not None, "ID column was not created in the table."
assert result["column_type"] == "uuid", "ID column type is incorrect."
# Verify that the content column has been created correctly
result = next(
(r for r in resultset if r["column_name"] == table.content_column), None
)
assert result is not None, "Content column was not created in the table."
assert result["column_type"] == "text", "Content column type is incorrect."
# Verify that the embedding column has been created correctly
result = next(
(r for r in resultset if r["column_name"] == table.embedding_column), None
)
assert result is not None, "Embedding column was not created in the table."
embedding_column_type = result["column_type"]
pattern = re.compile(r"(?P<type>\w+)(?:\((?P<dim>\d+)\))?")
m = pattern.match(embedding_column_type if embedding_column_type else "")
parsed_type: str | None = m.group("type") if m else None
parsed_dim: PositiveInt | None = (
PositiveInt(m.group("dim")) if m and m.group("dim") else None
)
assert parsed_type == table.embedding_type.value, (
"Embedding column type is incorrect."
)
assert parsed_dim == table.embedding_dimension, (
"Embedding column dimension is incorrect."
)
# Verify that metadata column have been created correctly
result = next(
(r for r in resultset if r["column_name"] == table.metadata_column), None
)
assert result is not None, (
f"Metadata column '{table.metadata_column}' was not created in the table."
)
class TestAzurePGVectorStore:
"""Integration tests for the AzurePGVectorStore implementation.
Covers table creation, initialization via parameters, CRUD operations,
and similarity queries against seeded data in the test database.
"""
def test_table_creation_success(
self, vectorstore: AzurePGVectorStore, table: Table
):
"""Verify the database table is created with the expected columns."""
with (
vectorstore.connection_pool.connection() as conn,
conn.cursor(row_factory=dict_row) as cursor,
):
cursor.execute(
_GET_TABLE_COLUMNS_AND_TYPES,
{
"schema_name": table.schema_name,
"table_name": table.table_name,
},
)
resultset = cursor.fetchall()
verify_table_created(table, resultset)
def test_vectorstore_initialization_from_params(
self,
connection_pool: ConnectionPool,
schema: str,
):
"""Create a store using class factory `from_params` and assert type."""
table_name = "vs_init_from_params"
embedding_dimension = 3
diskann = DiskANN(
op_class="vector_cosine_ops",
max_neighbors=32,
l_value_ib=100,
l_value_is=100,
)
vectorstore = AzurePGVectorStore.from_params(
connection_pool=connection_pool,
schema_name=schema,
table_name=table_name,
embed_dim=embedding_dimension,
embedding_index=diskann,
)
assert isinstance(vectorstore, AzurePGVectorStore)
def test_get_nodes(
self,
vectorstore: AzurePGVectorStore,
):
"""Retrieve all nodes and assert expected seeded node count."""
in_nodes = vectorstore.get_nodes()
assert len(in_nodes) == 4, "Retrieved node count does not match expected"
@pytest.mark.parametrize(
["node_tuple", "expected"],
[
("node-success", nullcontext(AzurePGVectorStore)),
("node-not-found", pytest.raises(IndexError)),
],
indirect=["node_tuple"],
ids=[
"success",
"not-found",
],
)
def test_get_nodes_with_ids(
self,
vectorstore: AzurePGVectorStore,
node_tuple: tuple[TextNode, str | None],
expected: nullcontext[AzurePGVectorStore] | pytest.RaisesExc,
):
"""Retrieve nodes by ID and validate returned node matches expected."""
node, expected_node_id = node_tuple
in_nodes = vectorstore.get_nodes([node.node_id])
with expected:
assert expected_node_id == in_nodes[0].node_id, (
"Retrieved node ID does not match expected"
)
@pytest.mark.parametrize(
["node_tuple", "expected"],
[
("node-success", nullcontext(AzurePGVectorStore)),
# ("node-failure", pytest.raises(AssertionError)),
],
indirect=["node_tuple"],
ids=[
"success",
# "failure",
],
)
def test_add(
self,
vectorstore: AzurePGVectorStore,
node_tuple: tuple[TextNode, str | None],
expected: nullcontext[AzurePGVectorStore] | pytest.RaisesExc,
):
"""Add a node to the store and assert the returned ID matches."""
node, expected_node_id = node_tuple
with expected:
assert node.node_id is not None, "Node ID must be provided for this test"
returned_ids = vectorstore.add([node])
assert returned_ids[0] == expected_node_id, "Inserted text IDs do not match"
@pytest.mark.parametrize(
["doc_id"],
[
("1",),
("10",),
],
ids=["existing", "non-existing"],
)
def test_delete(
self,
vectorstore: AzurePGVectorStore,
doc_id: str,
):
"""Delete a node by reference doc id and assert it was removed."""
vectorstore.delete(doc_id)
with (
vectorstore.connection_pool.connection() as conn,
conn.cursor(row_factory=dict_row) as cursor,
):
cursor.execute(
sql.SQL(
"""
select {metadata} ->> 'doc_id' as doc_id
from {table_name}
"""
).format(
metadata=sql.Identifier(vectorstore.metadata_columns),
table_name=sql.Identifier(
vectorstore.schema_name, vectorstore.table_name
),
)
)
resultset = cursor.fetchall()
remaining_set = set(str(r["doc_id"]) for r in resultset)
assert doc_id not in remaining_set, (
"Deleted document IDs should not exist in the remaining set"
)
@pytest.mark.parametrize(
["node_tuple"],
[
("node-success",),
("node-not-found",),
],
indirect=["node_tuple"],
ids=[
"success",
"not-found",
],
)
def test_delete_nodes(
self,
vectorstore: AzurePGVectorStore,
node_tuple: tuple[TextNode, str | None],
):
"""Delete a list of node IDs and assert they are removed from the table."""
node, expected_node_id = node_tuple
vectorstore.delete_nodes([node.node_id])
with (
vectorstore.connection_pool.connection() as conn,
conn.cursor(row_factory=dict_row) as cursor,
):
cursor.execute(
sql.SQL(
"""
select {id_column} as node_id
from {table_name}
"""
).format(
id_column=sql.Identifier(vectorstore.id_column),
table_name=sql.Identifier(
vectorstore.schema_name, vectorstore.table_name
),
)
)
resultset = cursor.fetchall()
remaining_set = set(str(r["node_id"]) for r in resultset)
assert expected_node_id not in remaining_set, (
"Deleted document IDs should not exist in the remaining set"
)
def test_clear(
self,
vectorstore: AzurePGVectorStore,
):
"""Clear all nodes from the underlying table and verify none remain."""
vectorstore.clear()
with (
vectorstore.connection_pool.connection() as conn,
conn.cursor(row_factory=dict_row) as cursor,
):
cursor.execute(
sql.SQL(
"""
select {id_column} as node_id
from {table_name}
"""
).format(
id_column=sql.Identifier(vectorstore.id_column),
table_name=sql.Identifier(
vectorstore.schema_name, vectorstore.table_name
),
)
)
resultset = cursor.fetchall()
remaining_set = set(str(r["node_id"]) for r in resultset)
assert not remaining_set, "All document IDs should have been deleted"
@pytest.mark.parametrize(
["query", "embedding", "k", "filters", "mode"],
[
("query about cats", [0.99] * 1536, 2, None, None),
("query about cats", [0.99] * 1536, 2, None, "hybrid"),
("query about animals", [0.5] * 1536, 3, None, None),
("query about cats", [0.99] * 1536, 2, "filter1", None),
("query about cats", [0.99] * 1536, 2, "filter2", None),
],
indirect=["filters"],
ids=[
"search-cats",
"search-cats-hybrid",
"search-animals",
"search-cats-filtered",
"search-cats-multifiltered",
],
)
def test_query(
self,
vectorstore: AzurePGVectorStore,
query: str,
embedding: list[float],
k: int,
filters: MetadataFilters | None,
mode: str | None,
):
"""Run a similarity query and assert returned documents match expectations.
Tests multiple query types (cats/animals) and optional metadata
filters to ensure the vector search returns relevant documents and
that filtering works as intended.
"""
vsquery = VectorStoreQuery(
query_str=query,
query_embedding=embedding,
similarity_top_k=k,
filters=filters,
mode=(
VectorStoreQueryMode.HYBRID
if mode == "hybrid"
else VectorStoreQueryMode.DEFAULT
),
)
results = vectorstore.query(query=vsquery)
results = results.nodes
contents = [row.get_content() for row in results]
if ("cats" in query) or ("animals" in query):
assert len(results) == k, f"Expected {k} results"
assert any("cats" in c for c in contents) or any(
"tigers" in c for c in contents
), (
f"Expected 'cats' or 'tigers' in retrieved documents' contents for query: {query}"
)
if "cats" in query:
assert all("dogs" not in c for c in contents), (
f"Expected 'dogs' not to be in retrieved documents' contents for query: {query}"
)
elif "animals" in query:
assert any("dogs" in c for c in contents), (
f"Expected 'dogs' to be in retrieved documents' contents for query: {query}"
)
assert all("plants" not in c for c in contents), (
f"Expected 'plants' not to be in retrieved documents' contents for query: {query}"
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/tests/llama_index/test_vectorstore.py",
"license": "MIT License",
"lines": 347,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-dev/llama_dev/release/check.py | import json
import subprocess
import urllib.request
from pathlib import Path
import click
import tomli
from packaging.version import parse as parse_version
def _get_current_branch_name() -> str:
return (
subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"])
.decode("utf-8")
.strip()
)
def _get_version_from_pyproject(repo_root: Path) -> str:
with open(repo_root / "llama-index-core" / "pyproject.toml", "rb") as f:
pyproject_data = tomli.load(f)
return pyproject_data["project"]["version"]
def _get_version_from_pypi() -> str:
try:
url = "https://pypi.org/pypi/llama-index-core/json"
with urllib.request.urlopen(url, timeout=10) as response:
data = json.load(response)
return data["info"]["version"]
except Exception as e:
raise click.ClickException(
f"Failed to fetch llama-index-core version from PyPI: {e}"
)
@click.command(short_help="Check if requisites for the release are satisfied")
@click.option(
"--before-core",
is_flag=True,
help="Run the check during pre-release (before releasing llama-index-core)",
default=False,
)
@click.pass_obj
def check(obj: dict, before_core: bool):
"""
Check if all the requisites for the release are satisfied.
\b
Requisites before releasing llama-index-core (passing --before-core):
- llama-index-core/pyproject.toml is newer than the latest on PyPI
Requisite after llama-index-core was published (without passing --before-core):
- current branch is `main`
- version from llama-index-core/pyproject.toml is the latest on PyPI
""" # noqa
console = obj["console"]
repo_root = obj["repo_root"]
current_branch = _get_current_branch_name()
# Check current branch IS main
if current_branch != "main":
console.print(
"❌ To release 'llama-index' you have to checkout the `main` branch.",
style="error",
)
exit(1)
console.print("✅ You are on the `main` branch.")
if before_core:
# Check llama-index-core version is NEWER than PyPI
pyproject_version = _get_version_from_pyproject(repo_root)
pypi_version = _get_version_from_pypi()
if not parse_version(pyproject_version) > parse_version(pypi_version):
console.print(
f"❌ Version {pyproject_version} is not newer than the latest on PyPI ({pypi_version}).",
style="error",
)
exit(1)
console.print(
f"✅ Version {pyproject_version} is newer than the latest on PyPI ({pypi_version})."
)
else:
# Check llama-index-core version is SAME as PyPI
pyproject_version = _get_version_from_pyproject(repo_root)
pypi_version = _get_version_from_pypi()
if parse_version(pyproject_version) > parse_version(pypi_version):
console.print(
f"❌ Version {pyproject_version} is not available on PyPI.",
style="error",
)
exit(1)
console.print(f"✅ Version {pyproject_version} is the latest on PyPI.")
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-dev/llama_dev/release/check.py",
"license": "MIT License",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-dev/tests/release/test_check.py | import json
from unittest import mock
import click
import pytest
from llama_dev.cli import cli
from llama_dev.release.check import (
_get_current_branch_name,
_get_version_from_pypi,
_get_version_from_pyproject,
check,
)
def test_get_current_branch_name():
with mock.patch("subprocess.check_output", return_value=b"my-branch\n"):
assert _get_current_branch_name() == "my-branch"
def test_get_version_from_pyproject(tmp_path):
core_path = tmp_path / "llama-index-core"
core_path.mkdir()
pyproject_content = """
[project]
version = \"1.2.3\"
"""
(core_path / "pyproject.toml").write_text(pyproject_content)
assert _get_version_from_pyproject(tmp_path) == "1.2.3"
def test_get_version_from_pypi():
with mock.patch("urllib.request.urlopen") as mock_urlopen:
mock_response = mock.MagicMock()
mock_response.read.return_value = json.dumps(
{"info": {"version": "1.2.3"}}
).encode("utf-8")
mock_urlopen.return_value.__enter__.return_value = mock_response
assert _get_version_from_pypi() == "1.2.3"
def test_get_version_from_pypi_error():
with mock.patch("urllib.request.urlopen", side_effect=Exception("test error")):
with pytest.raises(click.ClickException):
_get_version_from_pypi()
@pytest.mark.parametrize(
(
"test_id",
"branch_name",
"pyproject_version",
"init_version",
"pypi_version",
"should_pass",
"expected_message",
),
[
(
"fail",
"my-release-branch",
"0.1.1",
"0.1.1",
"0.1.0",
False,
[
"❌ To release 'llama-index' you have to checkout the `main` branch.",
],
),
(
"on_main",
"main",
"0.1.1",
"0.1.1",
"0.1.0",
True,
[
"✅ You are on the `main` branch.",
"✅ Version 0.1.1 is newer than the latest on PyPI (0.1.0).",
],
),
(
"not_newer",
"main",
"0.1.0",
"0.1.0",
"0.1.0",
False,
[
"❌ Version 0.1.0 is not newer than the latest on PyPI (0.1.0).",
],
),
],
)
def test_check_command(
mock_rich_console,
test_id,
branch_name,
pyproject_version,
init_version,
pypi_version,
should_pass,
expected_message,
):
with (
mock.patch(
"llama_dev.release.check._get_current_branch_name", return_value=branch_name
),
mock.patch(
"llama_dev.release.check._get_version_from_pyproject",
return_value=pyproject_version,
),
mock.patch(
"llama_dev.release.check._get_version_from_pypi", return_value=pypi_version
),
):
ctx = click.Context(cli)
ctx.obj = {"console": mock_rich_console, "repo_root": ""}
if should_pass:
ctx.invoke(check, before_core=True)
# print messages from console
for msg in expected_message:
mock_rich_console.print.assert_any_call(msg)
else:
with pytest.raises(SystemExit) as e:
ctx.invoke(check, before_core=True)
assert e.type is SystemExit
assert e.value.code == 1
for msg in expected_message:
mock_rich_console.print.assert_any_call(msg, style="error")
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-dev/tests/release/test_check.py",
"license": "MIT License",
"lines": 118,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-baseten/tests/test_baseten_dynamic.py | #!/usr/bin/env python3
"""
Test script for Baseten LLM dynamic validation implementation.
This demonstrates the new dynamic model validation pattern adapted from NVIDIA.
"""
import os
import sys
# Add the Baseten LLM integration to the path
sys.path.insert(
0,
"/Users/alexker/code/llama_index/llama-index-integrations/llms/llama-index-llms-baseten",
)
def test_baseten_dynamic_validation():
"""Test the dynamic validation features."""
print("🧪 Testing Baseten Dynamic Validation Implementation")
print("=" * 60)
try:
from llama_index.llms.baseten import Baseten
from llama_index.llms.baseten.utils import Model, get_supported_models
print("✅ Successfully imported Baseten with dynamic validation")
print()
# Test 1: Static model list (existing functionality)
print("📋 Test 1: Static Model List")
static_models = get_supported_models()
print(f" Found {len(static_models)} static models:")
for i, model in enumerate(static_models[:5]): # Show first 5
print(f" {i + 1}. {model}")
if len(static_models) > 5:
print(f" ... and {len(static_models) - 5} more")
print()
# Test 2: Model class functionality
print("🔧 Test 2: Model Class")
test_model = Model(id="test-model")
print(
f" Created model: {test_model.id} (type: {test_model.model_type}, client: {test_model.client})"
)
print()
# Test 3: Dynamic validation with API key
print("🔑 Test 3: Dynamic Validation")
if os.getenv("BASETEN_API_KEY"):
print(" API key found - testing live dynamic validation")
try:
# Use a known valid model from the static list
valid_model = static_models[0]
print(f" Creating Baseten LLM with model: {valid_model}")
llm = Baseten(model_id=valid_model, model_apis=True)
print(" ✅ Successfully created Baseten LLM with dynamic validation")
# Test available_models property
print(" 📡 Testing available_models property...")
try:
available = llm.available_models
print(f" ✅ Fetched {len(available)} models dynamically")
print(f" First few available models:")
for i, model in enumerate(available[:3]):
print(f" {i + 1}. {model.id}")
# Compare static vs dynamic
dynamic_ids = {model.id for model in available}
static_ids = set(static_models)
if dynamic_ids != static_ids:
print(" 📊 Differences between static and dynamic lists:")
only_dynamic = dynamic_ids - static_ids
only_static = static_ids - dynamic_ids
if only_dynamic:
print(
f" New models (dynamic only): {list(only_dynamic)[:3]}"
)
if only_static:
print(
f" Removed models (static only): {list(only_static)[:3]}"
)
else:
print(" 📊 Static and dynamic lists match perfectly")
except Exception as e:
print(f" ⚠️ Dynamic model fetching failed (using fallback): {e}")
except Exception as e:
print(f" ❌ Failed to create Baseten LLM: {e}")
else:
print(" ⚠️ No BASETEN_API_KEY found - skipping live API tests")
print(
" Set BASETEN_API_KEY environment variable to test dynamic validation"
)
print()
# Test 4: Error handling with invalid model
print("🚫 Test 4: Error Handling")
print(" Testing with invalid model name...")
try:
llm = Baseten(model_id="invalid-model-name-12345", model_apis=True)
print(" ❌ Should have failed with invalid model")
except ValueError as e:
error_msg = str(e)
print(f" ✅ Correctly caught validation error")
print(f" Error message: {error_msg[:80]}...")
# Check if error message includes suggestions
if "Did you mean" in error_msg or "Available models" in error_msg:
print(" ✅ Error message includes helpful suggestions")
else:
print(" ⚠️ Error message could be more helpful")
except Exception as e:
print(f" ⚠️ Unexpected error type: {type(e).__name__}: {e}")
print()
# Test 5: Dedicated deployment mode (no dynamic validation)
print("🏗️ Test 5: Dedicated Deployment Mode")
print(" Testing with model_apis=False (dedicated deployment)...")
try:
dedicated_llm = Baseten(model_id="12345678", model_apis=False)
print(" ✅ Successfully created dedicated deployment LLM (no validation)")
available_dedicated = dedicated_llm.available_models
print(
f" Available models for dedicated: {len(available_dedicated)} models"
)
if available_dedicated:
print(f" Models: {[m.id for m in available_dedicated]}")
except Exception as e:
print(f" ❌ Failed to create dedicated LLM: {e}")
print()
print("🎉 All tests completed!")
print("=" * 60)
except ImportError as e:
print(f"❌ Import error: {e}")
print("Make sure you're running this from the llama_index directory")
print("And that you have the necessary dependencies installed")
except Exception as e:
print(f"❌ Unexpected error: {e}")
import traceback
traceback.print_exc()
def print_usage():
"""Print usage instructions."""
print("📖 How to run this test:")
print()
print("1. Set up your environment:")
print(" export BASETEN_API_KEY='your-api-key-here'")
print()
print("2. Run the test:")
print(" cd /Users/alexker/code/llama_index")
print(" python test_baseten_dynamic.py")
print()
print("3. Optional: Run without API key (limited testing):")
print(" python test_baseten_dynamic.py --no-api")
print()
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] in ["--help", "-h"]:
print_usage()
else:
test_baseten_dynamic_validation()
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-baseten/tests/test_baseten_dynamic.py",
"license": "MIT License",
"lines": 144,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-baseten/tests/test_coverage_comprehensive.py | #!/usr/bin/env python3
"""
Comprehensive test coverage for Baseten dynamic validation functions.
This file ensures all lines in utils.py and base.py are covered.
"""
import sys
import warnings
from unittest.mock import Mock, patch
# Add the Baseten LLM integration to the path
sys.path.insert(
0,
"/Users/alexker/code/llama_index/llama-index-integrations/llms/llama-index-llms-baseten",
)
from llama_index.llms.baseten.utils import (
Model,
validate_model_dynamic,
get_available_models_dynamic,
validate_model_slug,
SUPPORTED_MODEL_SLUGS,
)
from llama_index.llms.baseten.base import Baseten
def test_model_class():
"""Test the Model class comprehensively."""
print("Testing Model class...")
# Test basic creation
model = Model(id="test-model")
assert model.id == "test-model"
assert model.model_type == "chat"
assert model.client == "Baseten"
# Test with custom values
model2 = Model(id="custom-model", model_type="completion", client="Custom")
assert model2.id == "custom-model"
assert model2.model_type == "completion"
assert model2.client == "Custom"
# Test hash functionality
model3 = Model(id="test-model")
assert hash(model) == hash(model3)
# Test that models can be used in sets
model_set = {model, model2, model3}
assert len(model_set) == 2 # model and model3 are the same
print("✅ Model class tests passed")
def test_get_available_models_dynamic():
"""Test the get_available_models_dynamic function comprehensively."""
print("Testing get_available_models_dynamic...")
# Test successful API call
mock_client = Mock()
mock_model1 = Mock()
mock_model1.id = "model-1"
mock_model2 = Mock()
mock_model2.id = "model-2"
mock_response = Mock()
mock_response.data = [mock_model1, mock_model2]
mock_client.models.list.return_value = mock_response
result = get_available_models_dynamic(mock_client)
assert len(result) == 2
assert result[0].id == "model-1"
assert result[1].id == "model-2"
assert all(isinstance(model, Model) for model in result)
# Test API call failure fallback
mock_client.models.list.side_effect = Exception("API Error")
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
result = get_available_models_dynamic(mock_client)
assert len(w) == 1
assert "Failed to fetch models dynamically" in str(w[0].message)
# Should return static models
assert len(result) == len(SUPPORTED_MODEL_SLUGS)
assert all(isinstance(model, Model) for model in result)
assert result[0].id == SUPPORTED_MODEL_SLUGS[0]
# Test empty response
mock_client.models.list.side_effect = None
mock_response.data = []
mock_client.models.list.return_value = mock_response
result = get_available_models_dynamic(mock_client)
assert len(result) == 0
print("✅ get_available_models_dynamic tests passed")
def test_validate_model_dynamic():
"""Test the validate_model_dynamic function comprehensively."""
print("Testing validate_model_dynamic...")
# Test valid model success
mock_client = Mock()
mock_model = Mock()
mock_model.id = "valid-model"
mock_response = Mock()
mock_response.data = [mock_model]
mock_client.models.list.return_value = mock_response
# Should not raise any exception
validate_model_dynamic(mock_client, "valid-model")
# Test invalid model with suggestions
mock_model1 = Mock()
mock_model1.id = "deepseek-model"
mock_model2 = Mock()
mock_model2.id = "llama-model"
mock_response.data = [mock_model1, mock_model2]
mock_client.models.list.return_value = mock_response
try:
validate_model_dynamic(mock_client, "deepseek")
raise AssertionError("Should have raised ValueError")
except ValueError as e:
error_msg = str(e)
assert "not found in available models" in error_msg
assert "Did you mean" in error_msg
# Test invalid model without suggestions
mock_model3 = Mock()
mock_model3.id = "completely-different-model"
mock_response.data = [mock_model3]
mock_client.models.list.return_value = mock_response
try:
validate_model_dynamic(mock_client, "totally-unrelated-model")
raise AssertionError("Should have raised ValueError")
except ValueError as e:
error_msg = str(e)
assert "not found in available models" in error_msg
assert "Available models" in error_msg
# Test API failure fallback to static validation
mock_client.models.list.side_effect = Exception("Network error")
# Use a valid static model
valid_static_model = SUPPORTED_MODEL_SLUGS[0]
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
validate_model_dynamic(mock_client, valid_static_model)
assert len(w) == 1
warning_msg = str(w[0].message)
assert "Failed to fetch models dynamically" in warning_msg
# Test API failure with invalid static model
try:
validate_model_dynamic(mock_client, "invalid-static-model")
raise AssertionError("Should have raised ValueError")
except ValueError as e:
error_msg = str(e)
# The error message comes from dynamic validation, not static
assert "not found in available models" in error_msg
# Test validation error re-raise
mock_client.models.list.side_effect = ValueError(
"Model not found in available models"
)
try:
validate_model_dynamic(mock_client, "some-model")
raise AssertionError("Should have raised ValueError")
except ValueError as e:
# The error gets re-raised with a different message format
assert "not found in available models" in str(e)
print("✅ validate_model_dynamic tests passed")
def test_baseten_class():
"""Test the Baseten class dynamic functionality."""
print("Testing Baseten class...")
# Test available_models property with model_apis=True
with patch(
"llama_index.llms.baseten.base.get_available_models_dynamic"
) as mock_get_models:
with patch(
"llama_index.llms.baseten.base.validate_model_dynamic"
) as mock_validate:
with patch(
"llama_index.llms.baseten.base.get_from_param_or_env"
) as mock_get_key:
mock_get_key.return_value = "fake-api-key"
mock_models = [Model(id="model-1"), Model(id="model-2")]
mock_get_models.return_value = mock_models
llm = Baseten(model_id="test-model", model_apis=True)
llm._get_client = Mock()
result = llm.available_models
assert result == mock_models
mock_get_models.assert_called_once()
# Test available_models property with model_apis=False
with patch("llama_index.llms.baseten.base.get_from_param_or_env") as mock_get_key:
mock_get_key.return_value = "fake-api-key"
llm = Baseten(model_id="test-model", model_apis=False)
result = llm.available_models
assert len(result) == 1
assert result[0].id == "test-model"
# Test available_models property with dedicated deployment but no model attribute
with patch("llama_index.llms.baseten.base.get_from_param_or_env") as mock_get_key:
mock_get_key.return_value = "fake-api-key"
llm = Baseten(model_id="test-model", model_apis=False)
delattr(llm, "model")
result = llm.available_models
assert result == []
# Test dynamic validation in constructor
with patch("llama_index.llms.baseten.base.validate_model_dynamic") as mock_validate:
with patch("openai.OpenAI") as mock_client_class:
with patch(
"llama_index.llms.baseten.base.get_from_param_or_env"
) as mock_get_key:
mock_get_key.return_value = "fake-api-key"
mock_client = Mock()
mock_client_class.return_value = mock_client
llm = Baseten(model_id="test-model", model_apis=True)
mock_validate.assert_called_once_with(mock_client, "test-model")
# Test no validation for dedicated deployment
with patch("llama_index.llms.baseten.base.validate_model_dynamic") as mock_validate:
with patch(
"llama_index.llms.baseten.base.get_from_param_or_env"
) as mock_get_key:
mock_get_key.return_value = "fake-api-key"
llm = Baseten(model_id="test-model", model_apis=False)
mock_validate.assert_not_called()
print("✅ Baseten class tests passed")
def test_static_functions():
"""Test static utility functions."""
print("Testing static functions...")
# Test validate_model_slug with valid model
valid_model = SUPPORTED_MODEL_SLUGS[0]
validate_model_slug(valid_model) # Should not raise exception
# Test validate_model_slug with invalid model
try:
validate_model_slug("invalid-model")
raise AssertionError("Should have raised ValueError")
except ValueError as e:
error_msg = str(e)
assert "not supported by Baseten Model APIs" in error_msg
assert "Supported models are:" in error_msg
print("✅ Static functions tests passed")
def main():
"""Run all tests."""
print("🧪 Running Comprehensive Coverage Tests")
print("=" * 50)
try:
test_model_class()
test_get_available_models_dynamic()
test_validate_model_dynamic()
test_baseten_class()
test_static_functions()
print("=" * 50)
print("🎉 All coverage tests passed!")
except Exception as e:
print(f"❌ Test failed: {e}")
import traceback
traceback.print_exc()
return False
return True
if __name__ == "__main__":
success = main()
sys.exit(0 if success else 1)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-baseten/tests/test_coverage_comprehensive.py",
"license": "MIT License",
"lines": 234,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-paddle-ocr/llama_index/readers/paddle_ocr/base.py | import logging
from pathlib import Path
from typing import Dict, List, Optional
import tempfile
import io
from paddleocr import PaddleOCR
import pdfplumber
import fitz # PyMuPDF
from PIL import Image
import re
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class PDFPaddleOCRReader(BaseReader):
def __init__(self, use_angle_cls: bool = True, lang: str = "en"):
"""Initialize PaddleOCR with given parameters"""
self.ocr = PaddleOCR(use_angle_cls=use_angle_cls, lang=lang)
def extract_text_from_image(self, image_data):
"""
Extract text from image data using PaddleOCR
"""
try:
# Convert image data to PIL Image
image = Image.open(io.BytesIO(image_data))
# Save temporary image file for PaddleOCR
with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as temp_file:
image.save(temp_file.name)
temp_file_path = temp_file.name
# Use PaddleOCR to recognize text in the image
result = self.ocr.predict(temp_file_path)
# Clean up temporary file
Path(temp_file_path).unlink()
# Extract text from recognition results
extracted_text = ""
for line in result:
for text in line["rec_texts"]:
extracted_text += text + " "
return extracted_text.strip()
except Exception as e:
logging.error(f"Error in image OCR recognition: {e!s}")
return ""
def is_text_meaningful(self, text):
"""
Check if the extracted text is meaningful
"""
if not text or len(text.strip()) < 5:
return False
# Filter out cases that are likely just page numbers
if re.match(r"^\d{1,3}$", text.strip()):
return False
# Filter out cases that are likely just headers or footers
common_footers = ["page", "of", "total", "copyright", "all rights reserved"]
if any(footer in text.lower() for footer in common_footers):
return len(text.strip()) > 10
return True
def extract_page_elements(self, pdf_path, page_num):
"""
Extract all elements (text and images) from a PDF page, maintaining original order
"""
elements = []
try:
# Use pdfplumber to extract text and position information
with pdfplumber.open(pdf_path) as pdf:
if page_num < len(pdf.pages):
page = pdf.pages[page_num]
# Extract text and their positions
words = page.extract_words(keep_blank_chars=True)
for word in words:
elements.append(("text", word["text"], word["top"]))
# Use PyMuPDF to extract images and their positions
doc = fitz.open(pdf_path)
pdf_page = doc.load_page(page_num)
# Get all images in the page
image_list = pdf_page.get_images(full=True)
for img_index, img in enumerate(image_list):
# Extract image
xref = img[0]
base_image = doc.extract_image(xref)
image_bytes = base_image["image"]
# Get image position
image_rects = pdf_page.get_image_rects(xref)
if image_rects:
position = image_rects[0].y0
else:
position = 0
elements.append(("image", image_bytes, position))
doc.close()
# Sort elements by position (top to bottom)
elements.sort(key=lambda x: x[2])
except Exception as e:
logging.error(f"Error occurred while extracting page elements: {e!s}")
return elements
def load_data(
self, file_path: Path, extra_info: Optional[Dict] = None
) -> List[Document]:
"""Load data from PDF using PaddleOCR for image content"""
documents = []
file_path = Path(file_path)
try:
# Use PyMuPDF to get the total number of pages
doc = fitz.open(file_path)
total_pages = len(doc)
doc.close()
# Process each page
for page_num in range(total_pages):
logging.info(f"Processing page {page_num + 1}/{total_pages}...")
# Extract all elements from the page (sorted by position)
elements = self.extract_page_elements(file_path, page_num)
page_text = ""
for element_type, content, position in elements:
if element_type == "text":
# Directly add text
if self.is_text_meaningful(content):
page_text += f"[Text Content]: {content} "
elif element_type == "image":
# Perform OCR on the image
ocr_text = self.extract_text_from_image(content)
if ocr_text and self.is_text_meaningful(ocr_text):
page_text += f"[Image Content]: {ocr_text} "
# Create Document object and add page number as metadata
if page_text.strip():
metadata = {"page": page_num + 1, "source": str(file_path)}
if extra_info:
metadata.update(extra_info)
document = Document(text=page_text.strip(), metadata=metadata)
documents.append(document)
except Exception as e:
logging.error(f"Error occurred while reading PDF: {e!s}")
# Return a Document containing error information
error_doc = Document(
text=f"Error occurred while reading PDF: {e!s}",
metadata={"source": str(file_path), "error": True},
)
return [error_doc]
return documents
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-paddle-ocr/llama_index/readers/paddle_ocr/base.py",
"license": "MIT License",
"lines": 134,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-paddle-ocr/tests/test_readers_paddle_ocr.py | import unittest
from unittest.mock import Mock, patch, ANY
import io
from pathlib import Path
from llama_index.readers.paddle_ocr import PDFPaddleOCRReader
from llama_index.core.schema import Document
from llama_index.core.readers.base import BaseReader
class TestPDFPaddleOcrReader(unittest.TestCase):
"""Test suite for PDFPaddleOCRReader class"""
def setUp(self):
"""Set up test fixtures"""
self.reader = PDFPaddleOCRReader(lang="en")
def test_class(self):
names_of_base_classes = [b.__name__ for b in PDFPaddleOCRReader.__mro__]
assert BaseReader.__name__ in names_of_base_classes
@patch("PIL.Image.open")
@patch("tempfile.NamedTemporaryFile")
@patch("pathlib.Path.unlink")
def test_extract_text_from_image_success(
self, mock_unlink, mock_tempfile, mock_image_open
):
"""Test successful text extraction from image"""
# Mock image data
mock_image = Mock()
mock_image_open.return_value = mock_image
# Mock temporary file
mock_temp = Mock()
mock_temp.name = "/tmp/temp.png"
mock_temp.__enter__ = Mock(return_value=mock_temp)
mock_temp.__exit__ = Mock(return_value=None)
mock_tempfile.return_value = mock_temp
# Mock PaddleOCR result
mock_ocr_result = [
{"rec_texts": ["Hello", "World"]},
{"rec_texts": ["Test", "Text"]},
]
self.reader.ocr.predict = Mock(return_value=mock_ocr_result)
# Call method
image_data = b"fake_image_data"
result = self.reader.extract_text_from_image(image_data)
# Assertions
print(f"Result: '{result}'")
print(f"Expected: 'Hello World Test Text'")
self.assertEqual(result, "Hello World Test Text")
# Check if mock_image_open was called once with a BytesIO type argument
mock_image_open.assert_called_once_with(ANY)
args, _ = mock_image_open.call_args
self.assertIsInstance(args[0], io.BytesIO)
# Check if the BytesIO object contains the correct data
args[0].seek(0)
self.assertEqual(args[0].read(), image_data)
mock_image.save.assert_called_once_with("/tmp/temp.png")
self.reader.ocr.predict.assert_called_once_with("/tmp/temp.png")
mock_unlink.assert_called_once()
@patch("PIL.Image.open")
def test_extract_text_from_image_failure(self, mock_image_open):
"""Test text extraction from image when an exception occurs"""
# Mock an exception
mock_image_open.side_effect = Exception("Image open failed")
# Call method
image_data = b"fake_image_data"
result = self.reader.extract_text_from_image(image_data)
# Assertions
assert result == ""
def test_is_text_meaningful_empty_text(self):
"""Test is_text_meaningful with empty text"""
assert not self.reader.is_text_meaningful("")
assert not self.reader.is_text_meaningful(" ")
assert not self.reader.is_text_meaningful(None)
def test_is_text_meaningful_short_text(self):
"""Test is_text_meaningful with short text"""
assert not self.reader.is_text_meaningful("a")
assert not self.reader.is_text_meaningful("ab")
assert not self.reader.is_text_meaningful("abc")
assert not self.reader.is_text_meaningful("abcd")
def test_is_text_meaningful_page_number(self):
"""Test is_text_meaningful with page numbers"""
assert not self.reader.is_text_meaningful("1")
assert not self.reader.is_text_meaningful("10")
assert not self.reader.is_text_meaningful("100")
assert not self.reader.is_text_meaningful(" 100 ")
def test_is_text_meaningful_footer_text(self):
"""Test is_text_meaningful with footer text"""
# Short footer text should be filtered
assert not self.reader.is_text_meaningful("page 1")
assert not self.reader.is_text_meaningful("copyright")
# Longer footer text should be kept
assert self.reader.is_text_meaningful("copyright 2023 by some company")
def test_is_text_meaningful_meaningful_text(self):
"""Test is_text_meaningful with meaningful text"""
assert self.reader.is_text_meaningful("This is a meaningful sentence.")
assert self.reader.is_text_meaningful("Deep learning models")
assert self.reader.is_text_meaningful("Variational autoencoder")
@patch("pdfplumber.open")
@patch("fitz.open")
def test_extract_page_elements_success(self, mock_fitz_open, mock_pdfplumber_open):
"""Test successful extraction of page elements"""
# Mock pdfplumber
mock_pdf = Mock()
mock_page = Mock()
mock_page.extract_words.return_value = [
{"text": "Hello", "top": 100},
{"text": "World", "top": 120},
]
mock_pdf.pages = [mock_page]
mock_pdfplumber_open.return_value.__enter__ = Mock(return_value=mock_pdf)
mock_pdfplumber_open.return_value.__exit__ = Mock(return_value=None)
# Mock PyMuPDF
mock_doc = Mock()
mock_pdf_page = Mock()
mock_pdf_page.get_images.return_value = [(1,)]
mock_pdf_page.get_image_rects.return_value = [Mock(y0=150)]
mock_doc.load_page.return_value = mock_pdf_page
mock_doc.extract_image.return_value = {"image": b"fake_image_data"}
mock_fitz_open.return_value = mock_doc
# Call method
pdf_path = "/fake/path.pdf"
result = self.reader.extract_page_elements(pdf_path, 0)
# Assertions
assert len(result) == 3 # 2 text elements + 1 image element
assert result[0] == ("text", "Hello", 100)
assert result[1] == ("text", "World", 120)
assert result[2][0] == "image"
assert result[2][1] == b"fake_image_data"
assert result[2][2] == 150
# Verify mocks were called correctly
mock_pdfplumber_open.assert_called_once_with(pdf_path)
mock_fitz_open.assert_called_once_with(pdf_path)
mock_page.extract_words.assert_called_once_with(keep_blank_chars=True)
mock_pdf_page.get_images.assert_called_once_with(full=True)
@patch("pdfplumber.open")
@patch("fitz.open")
def test_extract_page_elements_exception(
self, mock_fitz_open, mock_pdfplumber_open
):
"""Test extract_page_elements when an exception occurs"""
# Mock an exception
mock_pdfplumber_open.side_effect = Exception("PDF open failed")
# Call method
pdf_path = "/fake/path.pdf"
result = self.reader.extract_page_elements(pdf_path, 0)
# Assertions
assert result == []
@patch("pdfplumber.open")
@patch("fitz.open")
def test_extract_page_elements_no_images(
self, mock_fitz_open, mock_pdfplumber_open
):
"""Test extract_page_elements when there are no images"""
# Mock pdfplumber
mock_pdf = Mock()
mock_page = Mock()
mock_page.extract_words.return_value = [
{"text": "Hello", "top": 100},
{"text": "World", "top": 120},
]
mock_pdf.pages = [mock_page]
mock_pdfplumber_open.return_value.__enter__ = Mock(return_value=mock_pdf)
mock_pdfplumber_open.return_value.__exit__ = Mock(return_value=None)
# Mock PyMuPDF with no images
mock_doc = Mock()
mock_pdf_page = Mock()
mock_pdf_page.get_images.return_value = [] # No images
mock_doc.load_page.return_value = mock_pdf_page
mock_fitz_open.return_value = mock_doc
# Call method
pdf_path = "/fake/path.pdf"
result = self.reader.extract_page_elements(pdf_path, 0)
# Assertions
assert len(result) == 2 # Only text elements
assert result[0] == ("text", "Hello", 100)
assert result[1] == ("text", "World", 120)
@patch.object(PDFPaddleOCRReader, "extract_page_elements")
@patch("fitz.open")
def test_load_data_success(self, mock_fitz_open, mock_extract_page_elements):
"""Test successful loading of data from PDF"""
# Mock PyMuPDF
mock_doc = Mock()
mock_doc.__len__ = Mock(return_value=1) # 1 page
mock_fitz_open.return_value = mock_doc
# Mock extract_page_elements to return meaningful elements
mock_extract_page_elements.return_value = [
("text", "Hello", 100),
("image", b"fake_image_data", 150),
]
# Mock extract_text_from_image and is_text_meaningful
self.reader.extract_text_from_image = Mock(return_value="Extracted text")
self.reader.is_text_meaningful = Mock(side_effect=lambda x: len(x) > 3)
# Call method
pdf_path = "/fake/path.pdf"
result = self.reader.load_data(pdf_path)
# Assertions
self.assertEqual(len(result), 1) # One document
self.assertIsInstance(result[0], Document)
self.assertIn("Hello", result[0].text)
self.assertIn("Extracted text", result[0].text)
self.assertEqual(result[0].metadata["page"], 1)
# Use Path object for path comparison to avoid OS differences
self.assertEqual(Path(result[0].metadata["source"]), Path(pdf_path))
# Verify mocks were called correctly - accept Path object
mock_fitz_open.assert_called_once_with(ANY)
args, _ = mock_fitz_open.call_args
self.assertIsInstance(args[0], (str, Path)) # Allow string or Path object
# Convert to string for comparison
path_str = str(args[0])
self.assertTrue(path_str.replace("\\", "/").endswith("/fake/path.pdf"))
mock_extract_page_elements.assert_called_once_with(ANY, 0)
args, _ = mock_extract_page_elements.call_args
self.assertIsInstance(args[0], (str, Path)) # Allow string or Path object
# Convert to string for comparison
path_str = str(args[0])
self.assertTrue(path_str.replace("\\", "/").endswith("/fake/path.pdf"))
self.reader.extract_text_from_image.assert_called_once_with(b"fake_image_data")
@patch.object(PDFPaddleOCRReader, "extract_page_elements")
@patch("fitz.open")
def test_load_data_no_meaningful_text(
self, mock_fitz_open, mock_extract_page_elements
):
"""Test load_data when no meaningful text is found"""
# Mock PyMuPDF
mock_doc = Mock()
mock_doc.__len__ = Mock(return_value=1) # 1 page
mock_fitz_open.return_value = mock_doc
# Mock extract_page_elements to return only non-meaningful elements
mock_extract_page_elements.return_value = [
("text", "1", 100), # Page number (not meaningful)
("text", "copyright", 150), # Footer (not meaningful)
]
# Mock is_text_meaningful to return False for all text
self.reader.is_text_meaningful = Mock(return_value=False)
# Call method
pdf_path = "/fake/path.pdf"
result = self.reader.load_data(pdf_path)
# Assertions
assert len(result) == 0 # No documents created
@patch("fitz.open")
def test_load_data_exception(self, mock_fitz_open):
"""Test load_data when an exception occurs"""
# Mock an exception
mock_fitz_open.side_effect = Exception("PDF open failed")
# Call method
pdf_path = "/fake/path.pdf"
result = self.reader.load_data(pdf_path)
# Assertions
assert len(result) == 1
assert isinstance(result[0], Document)
assert "Error occurred while reading PDF" in result[0].text
assert result[0].metadata["error"] is True
@patch("fitz.open")
def test_load_data_with_extra_info(self, mock_fitz_open):
"""Test load_data with extra_info parameter"""
# Mock PyMuPDF
mock_doc = Mock()
mock_doc.__len__ = Mock(return_value=1) # 1 page
mock_fitz_open.return_value = mock_doc
# Mock extract_page_elements to return meaningful text
self.reader.extract_page_elements = Mock(return_value=[("text", "Hello", 100)])
self.reader.is_text_meaningful = Mock(return_value=True)
# Call method with extra_info
pdf_path = "/fake/path.pdf"
extra_info = {"author": "Test Author", "title": "Test Title"}
result = self.reader.load_data(pdf_path, extra_info=extra_info)
# Assertions
self.assertEqual(len(result), 1)
self.assertEqual(result[0].metadata["author"], "Test Author")
self.assertEqual(result[0].metadata["title"], "Test Title")
self.assertEqual(result[0].metadata["page"], 1)
# Use Path object for path comparison to avoid OS differences
self.assertEqual(Path(result[0].metadata["source"]), Path(pdf_path))
def test_load_data_invalid_path(self):
"""Test load_data with invalid file path"""
# Call method with non-existent path
pdf_path = "/non/existent/path.pdf"
result = self.reader.load_data(pdf_path)
# Assertions
assert len(result) == 1
assert "Error occurred while reading PDF" in result[0].text
assert result[0].metadata["error"] is True
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-paddle-ocr/tests/test_readers_paddle_ocr.py",
"license": "MIT License",
"lines": 278,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/retrievers/llama-index-retrievers-alletra-x10000/llama_index/retrievers/alletra_x10000_retriever/base.py | # Copyright Hewlett Packard Enterprise Development LP.
from pydi_client import DIClient
from typing import Any, Dict, Union
from llama_index.core.retrievers import BaseRetriever
from llama_index.core.schema import QueryBundle, NodeWithScore, TextNode
class AlletraX10000Retriever(BaseRetriever):
def __init__(
self,
uri: str,
s3_access_key: str,
s3_secret_key: str,
collection_name: str,
search_config: Union[Any, Dict[str, Any]] = None,
top_k: int = 5,
):
self.uri = uri
self.top_k = top_k
self.collection_name = collection_name
self.access_key = s3_access_key
self.secret_key = s3_secret_key
self.search_config = search_config
def _retrieve(self, query_bundle: QueryBundle) -> list[NodeWithScore]:
query = query_bundle.query_str
client = DIClient(uri=self.uri)
data = client.similarity_search(
collection_name=self.collection_name,
query=query,
top_k=self.top_k,
access_key=self.access_key,
secret_key=self.secret_key,
search_parameters=self.search_config,
)
nodes = []
for item in data:
nodes.append(
NodeWithScore(
node=TextNode(
text=item["dataChunk"], metadata=item["chunkMetadata"]
),
score=item["score"],
)
)
return nodes
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/retrievers/llama-index-retrievers-alletra-x10000/llama_index/retrievers/alletra_x10000_retriever/base.py",
"license": "MIT License",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/retrievers/llama-index-retrievers-alletra-x10000/tests/test_base.py | # Copyright Hewlett Packard Enterprise Development LP.
import pytest
from unittest.mock import MagicMock
from llama_index.core.schema import QueryBundle, NodeWithScore
from llama_index.retrievers.alletra_x10000_retriever import AlletraX10000Retriever
def test_alletra_x10000_retriever_initialization():
retriever = AlletraX10000Retriever(
uri="http://example.com",
s3_access_key="test_access_key",
s3_secret_key="test_secret_key",
collection_name="test_collection",
search_config={"param1": "value1"},
top_k=5,
)
assert retriever.uri == "http://example.com"
assert retriever.access_key == "test_access_key"
assert retriever.secret_key == "test_secret_key"
assert retriever.collection_name == "test_collection"
assert retriever.search_config == {"param1": "value1"}
assert retriever.top_k == 5
def test_alletra_x10000_retriever_retrieve(mocker):
mock_client = mocker.patch(
"llama_index.retrievers.alletra_x10000_retriever.base.DIClient"
)
mock_response = MagicMock()
mock_response = [
{
"dataChunk": "chunk1",
"score": 0.9,
"chunkMetadata": {
"objectKey": "value",
"startCharIndex": 1,
"endCharIndex": 2,
"bucketName": "string",
"pageLabel": "string",
"versionId": "string",
},
},
{
"dataChunk": "chunk2",
"score": 0.8,
"chunkMetadata": {
"objectKey": "value",
"startCharIndex": 1,
"endCharIndex": 2,
"bucketName": "string",
"pageLabel": "string",
"versionId": "string",
},
},
]
mock_client.return_value.similarity_search.return_value = mock_response
retriever = AlletraX10000Retriever(
uri="http://example.com",
s3_access_key="test_access_key",
s3_secret_key="test_secret_key",
collection_name="test_collection",
search_config={"param1": "value1"},
top_k=2,
)
query_bundle = QueryBundle(query_str="test query")
result = retriever._retrieve(query_bundle)
assert len(result) == 2
assert isinstance(result[0], NodeWithScore)
assert result[0].node.text == "chunk1"
assert result[0].score == 0.9
assert result[0].node.metadata["objectKey"] == "value"
assert result[1].node.text == "chunk2"
assert result[1].score == 0.8
assert result[1].node.metadata["objectKey"] == "value"
def test_alletra_x10000_retriever_retrieve_empty_response(mocker):
retriever = AlletraX10000Retriever(
uri="http://example.com",
s3_access_key="test_access_key",
s3_secret_key="test_secret_key",
collection_name="test_collection",
search_config={"param1": "value1"},
top_k=2,
)
# Mock the DIClient to return an empty response
mock_client = mocker.patch(
"llama_index.retrievers.alletra_x10000_retriever.base.DIClient"
)
mock_client.return_value.similarity_search.return_value.json.return_value = []
# Call the method
query_bundle = QueryBundle(query_str="test query")
result = retriever._retrieve(query_bundle)
# Assertions
assert result == []
def test_alletra_x10000_retriever_retrieve_error_handling(mocker):
mock_client = mocker.patch(
"llama_index.retrievers.alletra_x10000_retriever.base.DIClient"
)
mock_client.return_value.similarity_search.side_effect = Exception("Test exception")
retriever = AlletraX10000Retriever(
uri="http://example.com",
s3_access_key="test_access_key",
s3_secret_key="test_secret_key",
collection_name="test_collection",
search_config={"param1": "value1"},
top_k=2,
)
query_bundle = QueryBundle(query_str="test query")
with pytest.raises(Exception, match="Test exception"):
retriever._retrieve(query_bundle)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/retrievers/llama-index-retrievers-alletra-x10000/tests/test_base.py",
"license": "MIT License",
"lines": 105,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-solr/llama_index/readers/solr/base.py | """
Solr reader over REST api.
"""
from typing import Any, Optional
import pysolr
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class SolrReader(BasePydanticReader):
"""
Read documents from a Solr index.
These documents can then be used in a downstream Llama Index data structure.
"""
endpoint: str = Field(description="Full endpoint, including collection info.")
_client: Any = PrivateAttr()
def __init__(
self,
endpoint: str,
):
"""Initialize with parameters."""
super().__init__(endpoint=endpoint)
self._client = pysolr.Solr(endpoint)
def load_data(
self,
query: dict[str, Any],
field: str,
id_field: str = "id",
metadata_fields: Optional[list[str]] = None,
embedding: Optional[str] = None,
) -> list[Document]:
r"""
Read data from the Solr index. At least one field argument must be specified.
Args:
query (dict): The Solr query parameters.
- "q" is required.
- "rows" should be specified or will default to 10 by Solr.
- If "fl" is provided, it is respected exactly as given.
If "fl" is NOT provided, a default `fl` is constructed from
{id_field, field, embedding?, metadata_fields?}.
field (str): Field in Solr to retrieve as document text.
id_field (str): Field in Solr to retrieve as the document identifier. Defaults to "id".
metadata_fields (list[str], optional): Fields to include as metadata. Defaults to None.
embedding (str, optional): Field to use for embeddings. Defaults to None.
Raises:
ValueError: If the HTTP call to Solr fails.
Returns:
list[Document]: A list of retrieved documents where field is populated.
"""
if "q" not in query:
raise ValueError("Query parameters must include a 'q' field for the query.")
fl_default = {}
if "fl" not in query:
fields = [id_field, field]
if embedding:
fields.append(embedding)
if metadata_fields:
fields.extend(metadata_fields)
fl_default = {"fl": ",".join(fields)}
try:
query_params = {
**query,
**fl_default,
}
results = self._client.search(**query_params)
except Exception as e: # pragma: no cover
raise ValueError(f"Failed to query Solr endpoint: {e!s}") from e
documents: list[Document] = []
for doc in results.docs:
if field not in doc:
continue
doc_kwargs: dict[str, Any] = {
"id_": str(doc[id_field]),
"text": doc[field],
**({"embedding": doc.get(embedding)} if embedding else {}),
"metadata": {
metadata_field: doc[metadata_field]
for metadata_field in (metadata_fields or [])
if metadata_field in doc
},
}
documents.append(Document(**doc_kwargs))
return documents
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-solr/llama_index/readers/solr/base.py",
"license": "MIT License",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-solr/tests/test_readers_solr.py | import types
import pytest
import pysolr
from llama_index.core.readers.base import BaseReader
from llama_index.readers.solr import SolrReader
@pytest.fixture(scope="module")
def dummy_endpoint() -> str:
return "http://localhost:8983/solr/collection1"
@pytest.fixture
def mock_solr(mocker) -> pysolr.Solr:
ctor = mocker.patch("llama_index.readers.solr.base.pysolr.Solr", autospec=True)
return ctor.return_value
def test_class() -> None:
names_of_base_classes = [b.__name__ for b in SolrReader.__mro__]
assert BaseReader.__name__ in names_of_base_classes
def test_initialization(mock_solr, dummy_endpoint) -> None:
reader = SolrReader(endpoint=dummy_endpoint)
assert reader._client is mock_solr
def test_load_data_builds_default_fl_and_returns_docs(
mock_solr, dummy_endpoint
) -> None:
mock_solr.search.return_value = types.SimpleNamespace(
docs=[
{
"id": "1",
"content_t": "hello world",
"title_t": "Title",
"vec": [0.1, 0.2],
}
]
)
reader = SolrReader(endpoint=dummy_endpoint)
docs = reader.load_data(
query={"q": "*:*", "rows": 10, "fl": "respected"},
field="content_t",
metadata_fields=["title_t"],
embedding="vec",
)
mock_solr.search.assert_called_once()
assert mock_solr.search.call_args.kwargs["fl"] == "respected"
assert len(docs) == 1
doc = docs[0]
assert doc.id_ == "1"
assert doc.get_content() == "hello world"
assert doc.embedding == [0.1, 0.2]
assert doc.metadata == {"title_t": "Title"}
def test_load_data_constructs_fl_when_missing_and_skips_bad_docs(
mock_solr, dummy_endpoint
) -> None:
mock_solr.search.return_value = types.SimpleNamespace(
docs=[
{
"id": "1",
"title_t": "has title only",
}, # missing content_t, expected to be skipped
{"id": "2", "content_t": "kept"},
]
)
reader = SolrReader(endpoint=dummy_endpoint)
docs = reader.load_data(query={"q": "*:*"}, field="content_t")
called = mock_solr.search.call_args.kwargs
assert called["fl"] == "id,content_t"
assert [d.id_ for d in docs] == ["2"]
assert docs[0].get_content() == "kept"
# Defaults
assert docs[0].embedding is None
assert docs[0].metadata == {}
def test_load_data_custom_id_field_and_numeric_coercion(
mock_solr, dummy_endpoint
) -> None:
mock_solr.search.return_value = types.SimpleNamespace(
docs=[
{
"my_id": 1234567890123, # long-ish numeric id
"body_s": "num id keeps working",
"x": "meta",
}
]
)
reader = SolrReader(endpoint=dummy_endpoint)
docs = reader.load_data(
query={"q": "*:*"},
field="body_s",
id_field="my_id",
metadata_fields=["x"],
)
called = mock_solr.search.call_args.kwargs
assert called["fl"] == "my_id,body_s,x" # custom my_id field
assert len(docs) == 1
d = docs[0]
assert d.id_ == "1234567890123" # coerced to str
assert d.get_content() == "num id keeps working"
assert d.metadata == {"x": "meta"}
assert d.embedding is None
def test_load_data_raises_when_q_missing(mock_solr, dummy_endpoint) -> None:
reader = SolrReader(endpoint=dummy_endpoint)
with pytest.raises(ValueError):
_ = reader.load_data(query={}, field="content_t")
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-solr/tests/test_readers_solr.py",
"license": "MIT License",
"lines": 98,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/olostep_web/base.py | """Olostep Web Reader."""
import requests
from typing import List, Optional, Dict, Callable
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class OlostepWebReader(BasePydanticReader):
"""
A web reader that uses Olostep API to scrape web pages.
Args:
api_key (str): The Olostep API key.
mode (str): The mode to run the loader in. One of "scrape" or "search".
Default is "scrape".
params (Optional[dict]): Additional parameters for the API call.
"""
api_key: str
mode: str
params: Optional[dict]
_metadata_fn: Optional[Callable[[str], Dict]] = PrivateAttr()
def __init__(
self,
api_key: str,
mode: str = "scrape",
params: Optional[dict] = None,
) -> None:
"""Initialize with parameters."""
super().__init__(
api_key=api_key,
mode=mode,
params=params or {},
)
@classmethod
def class_name(cls) -> str:
return "OlostepWebReader"
def load_data(
self,
url: Optional[str] = None,
query: Optional[str] = None,
params: Optional[dict] = None,
) -> List[Document]:
"""
Load data from the input URL or query.
Args:
url (Optional[str]): URL to scrape or for sitemap.
query (Optional[str]): Query for search.
params (Optional[dict]): Additional parameters for the API call.
Returns:
List[Document]: List of documents.
"""
if self.mode == "scrape":
if not url:
raise ValueError("URL must be provided for scrape mode.")
return self._scrape(url, params=params)
elif self.mode == "search":
if not query:
raise ValueError("Query must be provided for search mode.")
return self._search(query, params=params)
else:
raise ValueError("Invalid mode. Choose from 'scrape' or 'search'.")
def _search(self, query: str, params: Optional[dict] = None) -> List[Document]:
"""
Perform a search using Olostep's Google Search parser.
Args:
query (str): The search query.
params (Optional[dict]): Additional parameters for the API call.
Returns:
List[Document]: A list containing a single document with the search results.
"""
import json
combined_params = {**(self.params or {}), **(params or {})}
search_url = f"https://www.google.com/search?q={query}"
api_url = "https://api.olostep.com/v1/scrapes"
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
}
data = {
"url_to_scrape": search_url,
"formats": ["json"],
"parser": {"id": "@olostep/google-search"},
"wait_before_scraping": 0,
}
data.update(combined_params)
response = requests.post(api_url, headers=headers, json=data)
response.raise_for_status()
result = response.json().get("result", {})
json_content = result.get("json_content")
metadata = {
"source": search_url,
"query": query,
"page_metadata": result.get("page_metadata", {}),
}
return [Document(text=json.dumps(json_content, indent=4), metadata=metadata)]
def _scrape(self, url: str, params: Optional[dict] = None) -> List[Document]:
"""
Scrape a single URL.
Args:
url (str): The URL to scrape.
params (Optional[dict]): Additional parameters for the API call.
Returns:
List[Document]: A list containing a single document with the scraped content.
"""
api_url = "https://api.olostep.com/v1/scrapes"
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
}
data = {"url_to_scrape": url, "formats": ["markdown"]}
# Combine and add parameters
combined_params = {**(self.params or {}), **(params or {})}
data.update(combined_params)
response = requests.post(api_url, headers=headers, json=data)
response.raise_for_status()
result = response.json().get("result", {})
import json
content_parts = []
requested_formats = data.get("formats", [])
for format in requested_formats:
content_key = f"{format}_content"
if content_key in result:
content_value = result[content_key]
if format == "json" and isinstance(content_value, (dict, list)):
content_parts.append(json.dumps(content_value, indent=4))
else:
content_parts.append(str(content_value))
content = "\n\n".join(content_parts)
metadata = {"source": url, "page_metadata": result.get("page_metadata", {})}
return [Document(text=content, metadata=metadata)]
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/olostep_web/base.py",
"license": "MIT License",
"lines": 128,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-vectorx/llama_index/vector_stores/vectorx/base.py | import logging
from collections import Counter
import json
from typing import Any, Callable, Dict, List, Optional, cast
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.schema import BaseNode, TextNode
from llama_index.core.vector_stores.types import (
BasePydanticVectorStore,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
from llama_index.core.vector_stores.utils import (
DEFAULT_TEXT_KEY,
legacy_metadata_dict_to_node,
metadata_dict_to_node,
node_to_metadata_dict,
)
ID_KEY = "id"
VECTOR_KEY = "values"
SPARSE_VECTOR_KEY = "sparse_values"
METADATA_KEY = "metadata"
DEFAULT_BATCH_SIZE = 100
_logger = logging.getLogger(__name__)
from llama_index.core.vector_stores.types import FilterOperator
reverse_operator_map = {
FilterOperator.EQ: "$eq",
FilterOperator.NE: "$ne",
FilterOperator.GT: "$gt",
FilterOperator.GTE: "$gte",
FilterOperator.LT: "$lt",
FilterOperator.LTE: "$lte",
FilterOperator.IN: "$in",
FilterOperator.NIN: "$nin",
}
def build_dict(input_batch: List[List[int]]) -> List[Dict[str, Any]]:
"""
Build a list of sparse dictionaries from a batch of input_ids.
NOTE: taken from https://www.pinecone.io/learn/hybrid-search-intro/.
"""
# store a batch of sparse embeddings
sparse_emb = []
# iterate through input batch
for token_ids in input_batch:
indices = []
values = []
# convert the input_ids list to a dictionary of key to frequency values
d = dict(Counter(token_ids))
for idx in d:
indices.append(idx)
values.append(float(d[idx]))
sparse_emb.append({"indices": indices, "values": values})
# return sparse_emb list
return sparse_emb
def generate_sparse_vectors(
context_batch: List[str], tokenizer: Callable
) -> List[Dict[str, Any]]:
"""
Generate sparse vectors from a batch of contexts.
NOTE: taken from https://www.pinecone.io/learn/hybrid-search-intro/.
"""
# create batch of input_ids
outputs = tokenizer(context_batch)
if not isinstance(outputs, dict) or "input_ids" not in outputs:
raise ValueError("Tokenizer must return a dict with 'input_ids'.")
input_ids = outputs["input_ids"]
# create sparse dictionaries
return build_dict(input_ids)
class VectorXVectorStore(BasePydanticVectorStore):
stores_text: bool = True
flat_metadata: bool = False
api_token: Optional[str]
encryption_key: Optional[str]
index_name: Optional[str]
space_type: Optional[str]
dimension: Optional[int]
insert_kwargs: Optional[Dict]
add_sparse_vector: bool
text_key: str
batch_size: int
remove_text_from_metadata: bool
_vectorx_index: Any = PrivateAttr()
def __init__(
self,
vectorx_index: Optional[Any] = None,
api_token: Optional[str] = None,
encryption_key: Optional[str] = None,
index_name: Optional[str] = None,
space_type: Optional[str] = "cosine",
dimension: Optional[int] = None,
insert_kwargs: Optional[Dict] = None,
add_sparse_vector: bool = False,
text_key: str = DEFAULT_TEXT_KEY,
batch_size: int = DEFAULT_BATCH_SIZE,
remove_text_from_metadata: bool = False,
**kwargs: Any,
) -> None:
insert_kwargs = insert_kwargs or {}
super().__init__(
index_name=index_name,
api_token=api_token,
encryption_key=encryption_key,
space_type=space_type,
dimension=dimension,
insert_kwargs=insert_kwargs,
add_sparse_vector=add_sparse_vector,
text_key=text_key,
batch_size=batch_size,
remove_text_from_metadata=remove_text_from_metadata,
)
# Use existing vectorx_index or initialize a new one
self._vectorx_index = vectorx_index or self._initialize_vectorx_index(
api_token, encryption_key, index_name, dimension, space_type
)
@classmethod
def _initialize_vectorx_index(
cls,
api_token: Optional[str],
encryption_key: Optional[str],
index_name: Optional[str],
dimension: Optional[int] = None,
space_type: Optional[str] = "cosine",
) -> Any:
"""Initialize VectorX index using the current API."""
try:
from vecx.vectorx import VectorX
except ImportError as e:
raise ImportError(
"Could not import `vecx` package. "
"Please install it with `pip install vecx`."
) from e
# Initialize VectorX client
vx = VectorX(token=api_token)
try:
# Try to get existing index
index = vx.get_index(name=index_name, key=encryption_key)
_logger.info(f"Retrieved existing index: {index_name}")
return index
except Exception as e:
if dimension is None:
raise ValueError(
"Must provide dimension when creating a new index"
) from e
# Create a new index if it doesn't exist
_logger.info(f"Creating new index: {index_name}")
vx.create_index(
name=index_name,
dimension=dimension,
key=encryption_key,
space_type=space_type,
)
return vx.get_index(name=index_name, key=encryption_key)
@classmethod
def from_params(
cls,
api_token: Optional[str] = None,
encryption_key: Optional[str] = None,
index_name: Optional[str] = None,
dimension: Optional[int] = None,
space_type: str = "cosine",
batch_size: int = DEFAULT_BATCH_SIZE,
) -> "VectorXVectorStore":
"""Create VectorXVectorStore from parameters."""
vectorx_index = cls._initialize_vectorx_index(
api_token, encryption_key, index_name, dimension, space_type
)
return cls(
vectorx_index=vectorx_index,
api_token=api_token,
encryption_key=encryption_key,
index_name=index_name,
dimension=dimension,
space_type=space_type,
batch_size=batch_size,
)
@classmethod
def class_name(cls) -> str:
return "VectorXVectorStore"
def add(
self,
nodes: List[BaseNode],
**add_kwargs: Any,
) -> List[str]:
"""
Add nodes to index.
Args:
nodes: List[BaseNode]: list of nodes with embeddings
"""
ids = []
entries = []
for node in nodes:
node_id = node.node_id
metadata = node_to_metadata_dict(node)
# Filter values must be simple key-value pairs
filter_data = {}
if "file_name" in metadata:
filter_data["file_name"] = metadata["file_name"]
if "doc_id" in metadata:
filter_data["doc_id"] = metadata["doc_id"]
if "category" in metadata:
filter_data["category"] = metadata["category"]
if "difficulty" in metadata:
filter_data["difficulty"] = metadata["difficulty"]
if "language" in metadata:
filter_data["language"] = metadata["language"]
if "field" in metadata:
filter_data["field"] = metadata["field"]
if "type" in metadata:
filter_data["type"] = metadata["type"]
if "feature" in metadata:
filter_data["feature"] = metadata["feature"]
entry = {
"id": node_id,
"vector": node.get_embedding(),
"meta": metadata,
"filter": filter_data,
}
ids.append(node_id)
entries.append(entry)
# Batch insert to avoid hitting API limits
batch_size = self.batch_size
for i in range(0, len(entries), batch_size):
batch = entries[i : i + batch_size]
self._vectorx_index.upsert(batch)
return ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The id of the document to delete.
"""
try:
self._vectorx_index.delete_with_filter({"doc_id": ref_doc_id})
except Exception as e:
_logger.error(f"Error deleting vectors for doc_id {ref_doc_id}: {e}")
@property
def client(self) -> Any:
"""Return vectorX index client."""
return self._vectorx_index
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""
Query index for top k most similar nodes.
Args:
query: VectorStoreQuery object containing query parameters
"""
if not hasattr(self._vectorx_index, "dimension"):
# Get dimension from index if available, otherwise try to infer from query
try:
dimension = self._vectorx_index.describe()["dimension"]
except Exception:
if query.query_embedding is not None:
dimension = len(query.query_embedding)
else:
raise ValueError("Could not determine vector dimension")
else:
dimension = self._vectorx_index.dimension
query_embedding = [0.0] * dimension # Default empty vector
filters = {}
# Apply any metadata filters if provided
if query.filters is not None:
for filter_item in query.filters.filters:
# Case 1: MetadataFilter object
if (
hasattr(filter_item, "key")
and hasattr(filter_item, "value")
and hasattr(filter_item, "operator")
):
op_symbol = reverse_operator_map.get(filter_item.operator)
if not op_symbol:
raise ValueError(
f"Unsupported filter operator: {filter_item.operator}"
)
if filter_item.key not in filters:
filters[filter_item.key] = {}
filters[filter_item.key][op_symbol] = filter_item.value
# Case 2: Raw dict, e.g. {"category": {"$eq": "programming"}}
elif isinstance(filter_item, dict):
for key, op_dict in filter_item.items():
if isinstance(op_dict, dict):
for op, val in op_dict.items():
if key not in filters:
filters[key] = {}
filters[key][op] = val
else:
raise ValueError(f"Unsupported filter format: {filter_item}")
_logger.info(f"Final structured filters: {filters}")
# Use the query embedding if provided
if query.query_embedding is not None:
query_embedding = cast(List[float], query.query_embedding)
if query.alpha is not None and query.mode == VectorStoreQueryMode.HYBRID:
# Apply alpha scaling in hybrid mode
query_embedding = [v * query.alpha for v in query_embedding]
# Execute query
try:
results = self._vectorx_index.query(
vector=query_embedding,
top_k=query.similarity_top_k,
filter=filters if filters else None,
include_vectors=True,
)
except Exception as e:
_logger.error(f"Error querying VectorX: {e}")
raise
# Process results
nodes = []
similarities = []
ids = []
for result in results:
node_id = result["id"]
score = result["similarity"]
# Get metadata from result
metadata = result.get("meta", {})
# Create node from metadata
if self.flat_metadata:
node = metadata_dict_to_node(
metadata=metadata,
text=metadata.pop(self.text_key, None),
id_=node_id,
)
else:
metadata_dict, node_info, relationships = legacy_metadata_dict_to_node(
metadata=metadata,
text_key=self.text_key,
)
# Create TextNode with the extracted metadata
# Step 1: Get the JSON string from "_node_content"
_node_content_str = metadata.get("_node_content", "{}")
# Step 2: Convert JSON string to Python dict
try:
node_content = json.loads(_node_content_str)
except json.JSONDecodeError:
node_content = {}
# Step 3: Get the text
text = node_content.get(self.text_key, "")
node = TextNode(
text=text,
metadata=metadata_dict,
relationships=relationships,
node_id=node_id,
)
# Add any node_info properties to the node
for key, val in node_info.items():
if hasattr(node, key):
setattr(node, key, val)
# If embedding was returned in the results, add it to the node
if "vector" in result:
node.embedding = result["vector"]
nodes.append(node)
similarities.append(score)
ids.append(node_id)
return VectorStoreQueryResult(nodes=nodes, similarities=similarities, ids=ids)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-vectorx/llama_index/vector_stores/vectorx/base.py",
"license": "MIT License",
"lines": 351,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-vectorx/tests/test_vector_stores_vectorx.py | import unittest
import sys
import pytest
import time
import os
from pathlib import Path
from unittest.mock import MagicMock
# Add parent directory to Python path
sys.path.append(str(Path(__file__).parent.parent))
from vecx.vectorx import VectorX
from llama_index.vector_stores.vectorx import VectorXVectorStore
from llama_index.core import Document, StorageContext, VectorStoreIndex, Settings
from llama_index.embeddings.huggingface.base import HuggingFaceEmbedding
from llama_index.core.vector_stores.types import (
MetadataFilters,
MetadataFilter,
FilterOperator,
VectorStoreQuery,
)
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core.query_engine import RetrieverQueryEngine
# ---- Check if credentials exist ----
HAS_VECX = os.getenv("VECTORX_API_TOKEN") is not None
# ------------------ Base Test Setup ------------------
@pytest.mark.skipif(not HAS_VECX, reason="VECTORX_API_TOKEN not set in environment")
class VectorXTestSetup(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.vecx_api_token = os.getenv("VECTORX_API_TOKEN")
if not cls.vecx_api_token:
raise ValueError(
"Missing VECTORX_API_TOKEN. Please set it in your environment."
)
cls.vx = VectorX(token=cls.vecx_api_token)
cls.encryption_key = cls.vx.generate_key()
timestamp = int(time.time())
cls.test_index_name = f"test_index_{timestamp}"
cls.dimension = 384
cls.space_type = "cosine"
cls.test_indexes = {cls.test_index_name}
cls.test_documents = [
Document(
text="Python is a high-level, interpreted programming language known for its readability and simplicity.",
metadata={
"category": "programming",
"language": "python",
"difficulty": "beginner",
},
),
Document(
text="Machine learning algorithms learn patterns from data to make predictions.",
metadata={
"category": "ai",
"field": "machine_learning",
"difficulty": "intermediate",
},
),
Document(
text="Deep learning uses neural networks with multiple layers for complex pattern recognition.",
metadata={
"category": "ai",
"field": "deep_learning",
"difficulty": "advanced",
},
),
]
@classmethod
def tearDownClass(cls):
for index_name in cls.test_indexes:
try:
cls.vx.delete_index(name=index_name)
except Exception as e:
if "not found" not in str(e).lower():
print(f"Error deleting test index {index_name}: {e}")
def tearDown(self):
try:
indexes = self.vx.list_indexes()
if isinstance(indexes, list):
for index in indexes:
if isinstance(index, dict) and "name" in index:
index_name = index["name"]
if index_name.startswith("test_index_"):
try:
self.vx.delete_index(name=index_name)
except Exception as e:
print(f"Error cleaning up test index {index_name}: {e}")
except Exception as e:
print(f"Error listing indexes for cleanup: {e}")
# ------------------ VectorX VectorStore Tests ------------------
class TestVectorXVectorStore(VectorXTestSetup):
def setUp(self):
self.embed_model = HuggingFaceEmbedding(
model_name="sentence-transformers/all-MiniLM-L6-v2", device="cpu"
)
def test_create_vector_store_from_params(self):
vector_store = VectorXVectorStore.from_params(
api_token=self.vecx_api_token,
index_name=self.test_index_name,
encryption_key=self.encryption_key,
dimension=self.dimension,
space_type=self.space_type,
)
self.assertIsNotNone(vector_store)
self.assertEqual(vector_store.index_name, self.test_index_name)
def test_create_vector_store_with_documents(self):
vector_store = VectorXVectorStore.from_params(
api_token=self.vecx_api_token,
index_name=self.test_index_name,
encryption_key=self.encryption_key,
dimension=self.dimension,
space_type=self.space_type,
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
self.test_documents,
storage_context=storage_context,
embed_model=self.embed_model,
)
self.assertIsNotNone(index)
def test_invalid_params(self):
with pytest.raises(Exception):
VectorXVectorStore.from_params(
api_token="invalid:invalid:region",
index_name=self.test_index_name,
encryption_key=self.encryption_key,
dimension=self.dimension,
space_type=self.space_type,
)
# ------------------ Custom Retrieval Tests ------------------
class TestCustomRetrieval(VectorXTestSetup):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.embed_model = HuggingFaceEmbedding(
"sentence-transformers/all-MiniLM-L6-v2", device="cpu"
)
cls.vector_store = VectorXVectorStore.from_params(
api_token=cls.vecx_api_token,
index_name=cls.test_index_name,
encryption_key=cls.encryption_key,
dimension=cls.dimension,
space_type=cls.space_type,
)
cls.storage_context = StorageContext.from_defaults(
vector_store=cls.vector_store
)
Settings.llm = None
cls.index = VectorStoreIndex.from_documents(
cls.test_documents,
storage_context=cls.storage_context,
embed_model=cls.embed_model,
)
def test_custom_retriever(self):
ai_filter = MetadataFilter(
key="category", value="ai", operator=FilterOperator.EQ
)
retriever = VectorIndexRetriever(
index=self.index,
similarity_top_k=3,
filters=MetadataFilters(filters=[ai_filter]),
)
nodes = retriever.retrieve("What is deep learning?")
self.assertGreater(len(nodes), 0)
def test_query_engine(self):
retriever = VectorIndexRetriever(index=self.index, similarity_top_k=3)
query_engine = RetrieverQueryEngine.from_args(retriever=retriever)
response = query_engine.query("Explain machine learning vs deep learning")
self.assertTrue(len(str(response)) > 0)
# ------------------ Query & Filter Tests ------------------
class TestQueryAndFilter(VectorXTestSetup):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.embed_model = HuggingFaceEmbedding(
"sentence-transformers/all-MiniLM-L6-v2", device="cpu"
)
cls.vector_store = VectorXVectorStore.from_params(
api_token=cls.vecx_api_token,
index_name=cls.test_index_name,
encryption_key=cls.encryption_key,
dimension=cls.dimension,
space_type=cls.space_type,
)
cls.storage_context = StorageContext.from_defaults(
vector_store=cls.vector_store
)
Settings.llm = None
cls.index = VectorStoreIndex.from_documents(
cls.test_documents,
storage_context=cls.storage_context,
embed_model=cls.embed_model,
)
def test_basic_query(self):
query_text = "What is Python?"
query_embedding = self.embed_model.get_text_embedding(query_text)
query = VectorStoreQuery(query_embedding=query_embedding, similarity_top_k=2)
results = self.vector_store.query(query)
self.assertGreater(len(results.nodes), 0)
def test_filtered_query(self):
query_text = "Explain machine learning"
query_embedding = self.embed_model.get_text_embedding(query_text)
ai_filter = MetadataFilter(
key="category", value="ai", operator=FilterOperator.EQ
)
query = VectorStoreQuery(
query_embedding=query_embedding,
similarity_top_k=2,
filters=MetadataFilters(filters=[ai_filter]),
)
results = self.vector_store.query(query)
self.assertGreater(len(results.nodes), 0)
# ------------------ Mocked VectorX Tests ------------------
class TestVectorXMock(unittest.TestCase):
def setUp(self):
self.mock_index = MagicMock()
self.mock_index.dimension = 2
self.mock_index.query.return_value = [
{
"id": "1",
"similarity": 0.9,
"meta": {"text": "mock text"},
"vector": [0.1, 0.2],
}
]
self.store = VectorXVectorStore(
vectorx_index=self.mock_index, dimension=2, index_name="mock_index"
)
def test_add_and_query_mock(self):
query = VectorStoreQuery(query_embedding=[0.1, 0.2], similarity_top_k=1)
result = self.store.query(query)
self.assertEqual(result.similarities[0], 0.9)
self.assertEqual(len(result.nodes), 1)
def test_delete_non_existent_node(self):
self.store.delete("nonexistent")
def test_query_with_empty_filters(self):
query = VectorStoreQuery(
query_embedding=[0.1, 0.2],
similarity_top_k=1,
filters=MetadataFilters(filters=[]),
)
result = self.store.query(query)
self.assertEqual(len(result.nodes), 1)
# ------------------ Advanced Tests with Mocking ------------------
class TestVectorXAdvanced(unittest.TestCase):
def setUp(self):
self.mock_index = MagicMock()
self.mock_index.dimension = 2
self.mock_index.query.return_value = [
{
"id": "1",
"similarity": 0.9,
"meta": {"text": "mock text"},
"vector": [0.1, 0.2],
}
]
self.mock_index.delete_with_filter = MagicMock()
self.store = VectorXVectorStore(
vectorx_index=self.mock_index, dimension=2, index_name="mock_index"
)
def test_initialize_vectorx_index_import_error(self):
import builtins
original_import = builtins.__import__
def mock_import(name, *args, **kwargs):
if name == "vecx.vectorx":
raise ImportError("No module named vecx.vectorx")
return original_import(name, *args, **kwargs)
builtins.__import__ = mock_import
with pytest.raises(ImportError):
VectorXVectorStore._initialize_vectorx_index(
api_token="token", encryption_key="key", index_name="idx", dimension=2
)
builtins.__import__ = original_import
def test_query_hybrid_with_alpha(self):
query = VectorStoreQuery(
query_embedding=[0.1, 0.2], similarity_top_k=1, mode="HYBRID", alpha=2.0
)
result = self.store.query(query)
self.assertEqual(result.similarities[0], 0.9)
def test_delete_with_error_logging(self):
self.store._vectorx_index.delete_with_filter.side_effect = Exception(
"Delete failed"
)
self.store.delete("ref_doc")
def test_query_missing_dimension_and_no_embedding(self):
self.store._vectorx_index = MagicMock()
del self.store._vectorx_index.dimension
self.store._vectorx_index.describe.side_effect = Exception("No dimension")
with pytest.raises(ValueError):
self.store.query(VectorStoreQuery(query_embedding=None, similarity_top_k=1))
def test_mocked_vectorx_index_usage(self):
query = VectorStoreQuery(query_embedding=[0.1, 0.2], similarity_top_k=1)
result = self.store.query(query)
self.assertEqual(result.similarities[0], 0.9)
self.assertEqual(len(result.nodes), 1)
# ------------------ Run Tests ------------------
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-vectorx/tests/test_vector_stores_vectorx.py",
"license": "MIT License",
"lines": 297,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-cometapi/llama_index/llms/cometapi/base.py | from typing import Any, Dict, Optional
from llama_index.core.base.llms.types import LLMMetadata
from llama_index.core.bridge.pydantic import Field
from llama_index.core.constants import (
DEFAULT_CONTEXT_WINDOW,
DEFAULT_NUM_OUTPUTS,
DEFAULT_TEMPERATURE,
)
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
from llama_index.llms.openai_like import OpenAILike
DEFAULT_API_BASE = "https://api.cometapi.com/v1"
DEFAULT_MODEL = "gpt-4o-mini"
class CometAPI(OpenAILike):
"""
CometAPI LLM.
CometAPI provides access to various state-of-the-art LLM models including GPT series,
Claude series, Gemini series, and more. To use CometAPI, you need to obtain an API key
from https://api.cometapi.com/console/token.
Examples:
`pip install llama-index-llms-cometapi`
```python
from llama_index.llms.cometapi import CometAPI
llm = CometAPI(
api_key="<your-api-key>",
max_tokens=256,
context_window=4096,
model="gpt-4o-mini",
)
response = llm.complete("Hello World!")
print(str(response))
```
"""
model: str = Field(
description="The CometAPI model to use. See https://api.cometapi.com/pricing for available models."
)
context_window: int = Field(
default=DEFAULT_CONTEXT_WINDOW,
description="The maximum number of context tokens for the model.",
gt=0,
)
is_chat_model: bool = Field(
default=True,
description=LLMMetadata.model_fields["is_chat_model"].description,
)
def __init__(
self,
model: str = DEFAULT_MODEL,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: int = DEFAULT_NUM_OUTPUTS,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 5,
api_base: Optional[str] = DEFAULT_API_BASE,
api_key: Optional[str] = None,
**kwargs: Any,
) -> None:
additional_kwargs = additional_kwargs or {}
api_base = get_from_param_or_env("api_base", api_base, "COMETAPI_API_BASE")
api_key = get_from_param_or_env("api_key", api_key, "COMETAPI_API_KEY")
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
api_base=api_base,
api_key=api_key,
additional_kwargs=additional_kwargs,
max_retries=max_retries,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "CometAPI_LLM"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-cometapi/llama_index/llms/cometapi/base.py",
"license": "MIT License",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-cometapi/tests/test_llms_cometapi.py | from llama_index.core.base.llms.base import BaseLLM
from llama_index.llms.cometapi import CometAPI
def test_embedding_class():
names_of_base_classes = [b.__name__ for b in CometAPI.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
def test_cometapi_initialization():
llm = CometAPI(model="gpt-4o-mini", api_key="test_key")
assert llm.model == "gpt-4o-mini"
assert llm.api_key == "test_key"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-cometapi/tests/test_llms_cometapi.py",
"license": "MIT License",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/storage/chat_store/llama-index-storage-chat-store-yugabytedb/llama_index/storage/chat_store/yugabytedb/base.py | from typing import Any, Optional
from urllib.parse import urlparse, parse_qs
from sqlalchemy import (
Index,
Column,
Integer,
UniqueConstraint,
text,
delete,
select,
create_engine,
)
from sqlalchemy.orm import sessionmaker, declarative_base
from llama_index.core.llms import ChatMessage
from sqlalchemy.dialects.postgresql import JSON, ARRAY, JSONB, VARCHAR, insert
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.storage.chat_store.base import BaseChatStore
from sqlalchemy import cast, bindparam
def get_data_model(
base: type,
index_name: str,
schema_name: str,
use_jsonb: bool = False,
) -> Any:
"""
Create a dynamic SQLAlchemy model class for storing chat data in YugabyteDB.
This function generates a SQLAlchemy model class with a table structure optimized for
storing chat messages. The table includes columns for a unique key and an array of
message values stored in either JSON or JSONB format.
Args:
base (type): The declarative base class from SQLAlchemy's `declarative_base()`.
index_name (str): The base name to use for the table and class. Will be normalized
to create valid SQL identifiers (e.g., 'chat_store' becomes 'data_chat_store').
schema_name (str): The database schema where the table will be created.
use_jsonb (bool, optional): If True, uses JSONB column type for better query performance
and indexing capabilities. If False, uses standard JSON.
Defaults to False.
"""
tablename = f"data_{index_name}" # dynamic table name
class_name = f"Data{index_name}" # dynamic class name
chat_dtype = JSONB if use_jsonb else JSON
class AbstractData(base): # type: ignore
__abstract__ = True # this line is necessary
id = Column(Integer, primary_key=True, autoincrement=True) # Add primary key
key = Column(VARCHAR, nullable=False)
value = Column(ARRAY(chat_dtype))
return type(
class_name,
(AbstractData,),
{
"__tablename__": tablename,
"__table_args__": (
UniqueConstraint("key", name=f"{tablename}:unique_key"),
Index(f"{tablename}:idx_key", "key"),
{"schema": schema_name},
),
},
)
class YugabyteDBChatStore(BaseChatStore):
table_name: Optional[str] = Field(
default="chatstore", description="YugabyteDB table name."
)
schema_name: Optional[str] = Field(
default="public", description="YugabyteDB schema name."
)
_table_class: Optional[Any] = PrivateAttr()
_session: Optional[sessionmaker] = PrivateAttr()
def __init__(
self,
session: sessionmaker,
table_name: str,
schema_name: str = "public",
use_jsonb: bool = False,
):
super().__init__(
table_name=table_name.lower(),
schema_name=schema_name.lower(),
)
# sqlalchemy model
base = declarative_base()
self._table_class = get_data_model(
base,
table_name,
schema_name,
use_jsonb=use_jsonb,
)
self._session = session
self._initialize(base)
@classmethod
def from_params(
cls,
host: Optional[str] = None,
port: Optional[str] = None,
database: Optional[str] = None,
user: Optional[str] = None,
password: Optional[str] = None,
load_balance: Optional[bool] = False,
topology_keys: Optional[str] = None,
yb_servers_refresh_interval: Optional[int] = 300,
fallback_to_topology_keys_only: Optional[bool] = False,
failed_host_ttl_seconds: Optional[int] = 5,
table_name: str = "chatstore",
schema_name: str = "public",
connection_string: Optional[str] = None,
debug: bool = False,
use_jsonb: bool = False,
) -> "YugabyteDBChatStore":
"""
Return connection string from database parameters.
Args:
host (str): YugabyteDB host.
port (str): YugabyteDB port.
database (str): YugabyteDB database name.
user (str): YugabyteDB user.
password (str): YugabyteDB password.
load_balance (bool, optional): Enables uniform load balancing. Defaults to False.
topology_keys (str, optional): Enables topology-aware load balancing.
Specify comma-separated geo-locations in the form of cloud.region.zone:priority.
Ignored if load_balance is false. Defaults to None.
yb_servers_refresh_interval (int, optional): The interval in seconds to refresh the servers list;
ignored if load_balance is false. Defaults to 300.
fallback_to_topology_keys_only (bool, optional): If set to true and topology_keys are specified,
the driver only tries to connect to nodes specified in topology_keys
Defaults to False.
failed_host_ttl_seconds (int, optional): Time, in seconds, to wait before trying to connect to failed nodes.
Defaults to 5.
connection_string (Union[str, sqlalchemy.engine.URL]): Connection string to yugabytedb db.
table_name (str): Table name.
schema_name (str): Schema name.
debug (bool, optional): Debug mode. Defaults to False.
use_jsonb (bool, optional): Use JSONB instead of JSON. Defaults to False.
"""
from urllib.parse import urlencode
query_params = {"load_balance": str(load_balance)}
if topology_keys is not None:
query_params["topology_keys"] = topology_keys
if yb_servers_refresh_interval is not None:
query_params["yb_servers_refresh_interval"] = yb_servers_refresh_interval
if fallback_to_topology_keys_only:
query_params["fallback_to_topology_keys_only"] = (
fallback_to_topology_keys_only
)
if failed_host_ttl_seconds is not None:
query_params["failed_host_ttl_seconds"] = failed_host_ttl_seconds
query_str = urlencode(query_params)
conn_str = (
connection_string
or f"yugabytedb+psycopg2://{user}:{password}@{host}:{port}/{database}?{query_str}"
)
session = cls._connect(conn_str, debug)
return cls(
session=session,
table_name=table_name,
schema_name=schema_name,
use_jsonb=use_jsonb,
)
@classmethod
def from_uri(
cls,
uri: str,
table_name: str = "chatstore",
schema_name: str = "public",
debug: bool = False,
use_jsonb: bool = False,
) -> "YugabyteDBChatStore":
"""Return connection string from database parameters."""
params = params_from_uri(uri)
return cls.from_params(
**params,
table_name=table_name,
schema_name=schema_name,
debug=debug,
use_jsonb=use_jsonb,
)
@classmethod
def _connect(
cls, connection_string: str, debug: bool
) -> tuple[sessionmaker, sessionmaker]:
_engine = create_engine(connection_string, echo=debug)
return sessionmaker(_engine)
def _create_schema_if_not_exists(self) -> None:
with self._session() as session, session.begin():
# Check if the specified schema exists with "CREATE" statement
check_schema_statement = text(
f"SELECT schema_name FROM information_schema.schemata WHERE schema_name = '{self.schema_name}'"
)
result = session.execute(check_schema_statement).fetchone()
# If the schema does not exist, then create it
if not result:
create_schema_statement = text(
f"CREATE SCHEMA IF NOT EXISTS {self.schema_name}"
)
session.execute(create_schema_statement)
session.commit()
def _create_tables_if_not_exists(self, base) -> None:
with self._session() as session, session.begin():
base.metadata.create_all(session.connection())
def _initialize(self, base) -> None:
self._create_schema_if_not_exists()
self._create_tables_if_not_exists(base)
def set_messages(self, key: str, messages: list[ChatMessage]) -> None:
"""Set messages for a key."""
with self._session() as session:
stmt = (
insert(self._table_class)
.values(
key=bindparam("key"), value=cast(bindparam("value"), ARRAY(JSONB))
)
.on_conflict_do_update(
index_elements=["key"],
set_={"value": cast(bindparam("value"), ARRAY(JSONB))},
)
)
params = {
"key": key,
"value": [message.model_dump_json() for message in messages],
}
# Execute the bulk upsert
session.execute(stmt, params)
session.commit()
def get_messages(self, key: str) -> list[ChatMessage]:
"""Get messages for a key."""
with self._session() as session:
result = session.execute(select(self._table_class).filter_by(key=key))
result = result.scalars().first()
if result:
return [
ChatMessage.model_validate(removed_message)
for removed_message in result.value
]
return []
def add_message(self, key: str, message: ChatMessage) -> None:
"""Add a message for a key."""
with self._session() as session:
stmt = (
insert(self._table_class)
.values(
key=bindparam("key"), value=cast(bindparam("value"), ARRAY(JSONB))
)
.on_conflict_do_update(
index_elements=["key"],
set_={"value": cast(bindparam("value"), ARRAY(JSONB))},
)
)
params = {"key": key, "value": [message.model_dump_json()]}
session.execute(stmt, params)
session.commit()
def delete_messages(self, key: str) -> Optional[list[ChatMessage]]:
"""Delete messages for a key."""
with self._session() as session:
session.execute(delete(self._table_class).filter_by(key=key))
session.commit()
return None
def delete_message(self, key: str, idx: int) -> Optional[ChatMessage]:
"""Delete specific message for a key."""
with self._session() as session:
# First, retrieve the current list of messages
stmt = select(self._table_class.value).where(self._table_class.key == key)
result = session.execute(stmt).scalar_one_or_none()
if result is None or idx < 0 or idx >= len(result):
# If the key doesn't exist or the index is out of bounds
return None
# Remove the message at the given index
removed_message = result[idx]
stmt = text(
f"""
UPDATE {self._table_class.__tablename__}
SET value = array_cat(
{self._table_class.__tablename__}.value[: :idx],
{self._table_class.__tablename__}.value[:idx+2:]
)
WHERE key = :key;
"""
)
params = {"key": key, "idx": idx}
session.execute(stmt, params)
session.commit()
return ChatMessage.model_validate(removed_message)
def delete_last_message(self, key: str) -> Optional[ChatMessage]:
"""Delete last message for a key."""
with self._session() as session:
# First, retrieve the current list of messages
stmt = select(self._table_class.value).where(self._table_class.key == key)
result = session.execute(stmt).scalar_one_or_none()
if result is None or len(result) == 0:
# If the key doesn't exist or the array is empty
return None
# Remove the message at the given index
removed_message = result[-1]
stmt = text(
f"""
UPDATE {self._table_class.__tablename__}
SET value = value[1:array_length(value, 1) - 1]
WHERE key = :key;
"""
)
params = {"key": key}
session.execute(stmt, params)
session.commit()
return ChatMessage.model_validate(removed_message)
def get_keys(self) -> list[str]:
"""Get all keys."""
with self._session() as session:
stmt = select(self._table_class.key)
return session.execute(stmt).scalars().all()
def params_from_uri(uri: str) -> dict:
result = urlparse(uri)
database = result.path[1:]
query_params = parse_qs(result.query)
port = result.port if result.port else 5433
return {
"database": database,
"user": result.username,
"password": result.password,
"host": result.hostname,
"port": port,
"load_balance": query_params.get("load_balance", ["false"])[0].lower()
== "true",
"topology_keys": query_params.get("topology_keys", [None])[0],
"yb_servers_refresh_interval": int(
query_params.get("yb_servers_refresh_interval", [300])[0]
),
"fallback_to_topology_keys_only": query_params.get(
"fallback_to_topology_keys_only", ["false"]
)[0].lower()
== "true",
"failed_host_ttl_seconds": int(
query_params.get("failed_host_ttl_seconds", [5])[0]
),
}
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/storage/chat_store/llama-index-storage-chat-store-yugabytedb/llama_index/storage/chat_store/yugabytedb/base.py",
"license": "MIT License",
"lines": 330,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/storage/chat_store/llama-index-storage-chat-store-yugabytedb/tests/test_chat_store_yugabytedb_chat_store.py | import docker
import time
from typing import Dict, Generator, Union
import pytest
from docker.models.containers import Container
from llama_index.core.llms import ChatMessage
from llama_index.core.storage.chat_store.base import BaseChatStore
from llama_index.storage.chat_store.yugabytedb import YugabyteDBChatStore
def test_class():
names_of_base_classes = [b.__name__ for b in YugabyteDBChatStore.__mro__]
assert BaseChatStore.__name__ in names_of_base_classes
@pytest.fixture()
def yugabytedb_container() -> Generator[Dict[str, Union[str, Container]], None, None]:
# Define YugabyteDB settings
YUGABYTEDB_HOST = "0.0.0.0"
YUGABYTEDB_DBNAME = "yugabyte"
YUGABYTEDB_USER = "yugabyte"
YUGABYTEDB_PASSWORD = "yugabyte"
YUGABYTEDB_PORT = 5433
YUGABYTEDB_LOAD_BALANCE = False
yugabytedb_image = "yugabytedb/yugabyte:2.25.2.0-b359"
yugabytedb_command = ["bin/yugabyted", "start", "--background=false"]
# Port mapping (host:container)
yugabytedb_ports = {"5433/tcp": 5433}
container = None
try:
# Initialize Docker client
client = docker.from_env()
# Run YugabyteDB container
container = client.containers.run(
yugabytedb_image,
command=yugabytedb_command,
ports=yugabytedb_ports,
name="yugabyte",
detach=True,
)
# Reload to fetch latest state
container.reload()
print(f"Container started with ID: {container.id}")
# Wait for PostgreSQL to start
time.sleep(10) # Adjust the sleep time if necessary
connection_string = f"yugabytedb+psycopg2://{YUGABYTEDB_USER}:{YUGABYTEDB_PASSWORD}@{YUGABYTEDB_HOST}:{YUGABYTEDB_PORT}/{YUGABYTEDB_DBNAME}?load_balance={YUGABYTEDB_LOAD_BALANCE}"
yield {
"container": container,
"connection_string": connection_string,
}
except Exception as e:
print(f"Error: {e!s}")
finally:
# Stop and remove the container
if container:
container.stop()
container.remove()
client.close()
@pytest.fixture()
def yugabytedb_chat_store(
yugabytedb_container: Dict[str, Union[str, Container]],
) -> Generator[YugabyteDBChatStore, None, None]:
chat_store = None
try:
chat_store = YugabyteDBChatStore.from_uri(
uri=yugabytedb_container["connection_string"],
use_jsonb=True,
)
yield chat_store
finally:
if chat_store:
keys = chat_store.get_keys()
for key in keys:
chat_store.delete_messages(key)
def test_yugabytedb_add_message(yugabytedb_chat_store: YugabyteDBChatStore):
key = "test_add_key"
message = ChatMessage(content="add_message_test", role="user")
yugabytedb_chat_store.add_message(key, message=message)
result = yugabytedb_chat_store.get_messages(key)
assert result[0].content == "add_message_test" and result[0].role == "user"
def test_set_and_retrieve_messages(yugabytedb_chat_store: YugabyteDBChatStore):
messages = [
ChatMessage(content="First message", role="user"),
ChatMessage(content="Second message", role="user"),
]
key = "test_set_key"
yugabytedb_chat_store.set_messages(key, messages)
retrieved_messages = yugabytedb_chat_store.get_messages(key)
assert len(retrieved_messages) == 2
assert retrieved_messages[0].content == "First message"
assert retrieved_messages[1].content == "Second message"
def test_delete_messages(yugabytedb_chat_store: YugabyteDBChatStore):
messages = [ChatMessage(content="Message to delete", role="user")]
key = "test_delete_key"
yugabytedb_chat_store.set_messages(key, messages)
yugabytedb_chat_store.delete_messages(key)
retrieved_messages = yugabytedb_chat_store.get_messages(key)
assert retrieved_messages == []
def test_delete_specific_message(yugabytedb_chat_store: YugabyteDBChatStore):
messages = [
ChatMessage(content="Keep me", role="user"),
ChatMessage(content="Delete me", role="user"),
]
key = "test_delete_message_key"
yugabytedb_chat_store.set_messages(key, messages)
yugabytedb_chat_store.delete_message(key, 1)
retrieved_messages = yugabytedb_chat_store.get_messages(key)
assert len(retrieved_messages) == 1
assert retrieved_messages[0].content == "Keep me"
def test_get_keys(yugabytedb_chat_store: YugabyteDBChatStore):
# Add some test data
yugabytedb_chat_store.set_messages(
"key1", [ChatMessage(content="Test1", role="user")]
)
yugabytedb_chat_store.set_messages(
"key2", [ChatMessage(content="Test2", role="user")]
)
keys = yugabytedb_chat_store.get_keys()
assert "key1" in keys
assert "key2" in keys
def test_delete_last_message(yugabytedb_chat_store: YugabyteDBChatStore):
key = "test_delete_last_message"
messages = [
ChatMessage(content="First message", role="user"),
ChatMessage(content="Last message", role="user"),
]
yugabytedb_chat_store.set_messages(key, messages)
deleted_message = yugabytedb_chat_store.delete_last_message(key)
assert deleted_message.content == "Last message"
remaining_messages = yugabytedb_chat_store.get_messages(key)
assert len(remaining_messages) == 1
assert remaining_messages[0].content == "First message"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/storage/chat_store/llama-index-storage-chat-store-yugabytedb/tests/test_chat_store_yugabytedb_chat_store.py",
"license": "MIT License",
"lines": 128,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-core/tests/agent/workflow/test_thinking_delta.py | """Tests for thinking_delta functionality in agents."""
import pytest
from typing import Any, AsyncGenerator, List
from unittest.mock import AsyncMock
from llama_index.core.base.llms.types import ChatMessage, ChatResponse, LLMMetadata
from llama_index.core.llms import MockLLM
from llama_index.core.agent.workflow import FunctionAgent
from llama_index.core.agent.workflow.codeact_agent import CodeActAgent
from llama_index.core.agent.workflow.react_agent import ReActAgent
from llama_index.core.agent.workflow.workflow_events import AgentStream
from llama_index.core.workflow import Context
class MockThinkingLLM(MockLLM):
"""Mock LLM that supports thinking_delta in responses."""
def __init__(
self, thinking_deltas: List[str] = None, response_deltas: List[str] = None
):
super().__init__()
self._thinking_deltas = thinking_deltas or [
"",
"I need to think about this...",
" Let me consider the options.",
]
self._response_deltas = response_deltas or ["Hello", " there", "!"]
self._response_index = 0
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(is_function_calling_model=True)
async def astream_chat_with_tools(
self, tools: List[Any], chat_history: List[ChatMessage], **kwargs: Any
) -> AsyncGenerator[ChatResponse, None]:
"""Stream chat responses with thinking_delta."""
async def _gen():
for i in range(len(self._response_deltas)):
response_delta = self._response_deltas[i]
thinking_delta = self._thinking_deltas[i]
yield ChatResponse(
message=ChatMessage(
role="assistant",
content=response_delta,
),
delta=response_delta,
additional_kwargs={"thinking_delta": thinking_delta},
raw={
"message": {
"content": response_delta,
"thinking": thinking_delta,
}
},
)
return _gen()
async def astream_chat(
self, messages: List[ChatMessage], **kwargs: Any
) -> AsyncGenerator[ChatResponse, None]:
"""Stream chat responses for CodeActAgent and ReActAgent."""
async def _gen():
for i in range(len(self._response_deltas)):
response_delta = self._response_deltas[i]
thinking_delta = self._thinking_deltas[i]
yield ChatResponse(
message=ChatMessage(
role="assistant",
content=response_delta,
),
delta=response_delta,
additional_kwargs={"thinking_delta": thinking_delta},
raw={
"message": {
"content": response_delta,
"thinking": thinking_delta,
}
},
)
return _gen()
def get_tool_calls_from_response(
self, response: ChatResponse, error_on_no_tool_call: bool = False
):
"""Mock method for getting tool calls from response."""
return []
def test_agent_stream_with_thinking_delta():
"""Test AgentStream creation and serialization with thinking_delta."""
stream = AgentStream(
delta="Hello",
response="Hello there",
current_agent_name="test_agent",
thinking_delta="I'm thinking about this response...",
)
assert stream.delta == "Hello"
assert stream.response == "Hello there"
assert stream.thinking_delta == "I'm thinking about this response..."
assert stream.current_agent_name == "test_agent"
def test_agent_stream_default_thinking_delta_none():
"""
Test AgentStream with thinking_delta value of None does not cause Pydantic validation error.
For Ollama, thinking_delta comes from the message's thinking field, which can be None.
"""
stream = AgentStream(
delta="Hello",
response="Hello there",
current_agent_name="test_agent",
thinking_delta=None,
)
assert stream.thinking_delta is None
def test_agent_stream_default_thinking_delta():
"""Test AgentStream defaults thinking_delta to None."""
stream = AgentStream(
delta="Hello", response="Hello there", current_agent_name="test_agent"
)
assert stream.thinking_delta is None
def test_thinking_delta_extraction():
"""Test that thinking_delta is correctly extracted from ChatResponse additional_kwargs."""
from llama_index.core.base.llms.types import ChatResponse, ChatMessage
# should have thinking_delta present
response_with_thinking = ChatResponse(
message=ChatMessage(role="assistant", content="Hello"),
delta="Hello",
additional_kwargs={"thinking_delta": "I'm thinking..."},
)
thinking_delta = response_with_thinking.additional_kwargs.get("thinking_delta", "")
assert thinking_delta == "I'm thinking..."
# should default to None
response_without_thinking = ChatResponse(
message=ChatMessage(role="assistant", content="Hello"),
delta="Hello",
additional_kwargs={},
)
thinking_delta = response_without_thinking.additional_kwargs.get(
"thinking_delta", None
)
assert thinking_delta is None
@pytest.mark.asyncio
async def test_streaming_an_agent_with_thinking_delta_none():
"""Test an agent runs properly with thinking_delta value of None"""
mock_llm = MockThinkingLLM(thinking_deltas=[None], response_deltas=[None])
agent = FunctionAgent(llm=mock_llm, streaming=True)
# Mock context to capture stream events
mock_context = AsyncMock(spec=Context)
stream_events = []
def capture_event(event):
stream_events.append(event)
mock_context.write_event_to_stream.side_effect = capture_event
# Call the streaming method
await agent._get_streaming_response(
mock_context, [ChatMessage(role="user", content="test")], []
)
# Verify AgentStream events contain thinking_delta
agent_streams = [event for event in stream_events if isinstance(event, AgentStream)]
assert len(agent_streams) == 1 # 1 deltas from mock
# Check that thinking deltas are passed through correctly
assert agent_streams[0].thinking_delta is None
@pytest.mark.asyncio
async def test_function_agent_comprehensive_thinking_streaming():
"""Comprehensive test: FunctionAgent streams thinking_delta correctly."""
mock_llm = MockThinkingLLM()
agent = FunctionAgent(llm=mock_llm, streaming=True)
# Mock context to capture stream events
mock_context = AsyncMock(spec=Context)
stream_events = []
def capture_event(event):
stream_events.append(event)
mock_context.write_event_to_stream.side_effect = capture_event
# Call the streaming method
await agent._get_streaming_response(
mock_context, [ChatMessage(role="user", content="test")], []
)
# Verify AgentStream events contain thinking_delta
agent_streams = [event for event in stream_events if isinstance(event, AgentStream)]
assert len(agent_streams) == 3 # 3 deltas from mock
# Check that thinking deltas are passed through correctly
assert agent_streams[0].thinking_delta == ""
assert agent_streams[1].thinking_delta == "I need to think about this..."
assert agent_streams[2].thinking_delta == " Let me consider the options."
# Verify other fields are still correct
assert agent_streams[0].delta == "Hello"
assert agent_streams[1].delta == " there"
assert agent_streams[2].delta == "!"
@pytest.mark.asyncio
async def test_codeact_agent_comprehensive_thinking_streaming():
"""Comprehensive test: CodeActAgent streams thinking_delta correctly."""
def mock_code_execute(code: str):
return {"output": "executed"}
mock_llm = MockThinkingLLM()
agent = CodeActAgent(
llm=mock_llm, code_execute_fn=mock_code_execute, streaming=True
)
# Mock context to capture stream events
mock_context = AsyncMock(spec=Context)
stream_events = []
def capture_event(event):
stream_events.append(event)
mock_context.write_event_to_stream.side_effect = capture_event
# Call the streaming method
await agent._get_streaming_response(
mock_context, [ChatMessage(role="user", content="test")], []
)
# Verify AgentStream events contain thinking_delta
agent_streams = [event for event in stream_events if isinstance(event, AgentStream)]
assert len(agent_streams) == 3 # 3 deltas from mock
# Check that thinking deltas are passed through correctly
assert agent_streams[0].thinking_delta == ""
assert agent_streams[1].thinking_delta == "I need to think about this..."
assert agent_streams[2].thinking_delta == " Let me consider the options."
@pytest.mark.asyncio
async def test_react_agent_comprehensive_thinking_streaming():
"""Comprehensive test: ReActAgent streams thinking_delta correctly."""
mock_llm = MockThinkingLLM()
agent = ReActAgent(llm=mock_llm, streaming=True)
# Mock context to capture stream events
mock_context = AsyncMock(spec=Context)
mock_context.is_running = True # Required for event writing
stream_events = []
def capture_event(event):
stream_events.append(event)
mock_context.write_event_to_stream.side_effect = capture_event
# Call the streaming method
await agent._get_streaming_response(
mock_context, [ChatMessage(role="user", content="test")]
)
# Verify AgentStream events contain thinking_delta
agent_streams = [event for event in stream_events if isinstance(event, AgentStream)]
assert len(agent_streams) == 3 # 3 deltas from mock
# Check that thinking deltas are passed through correctly
assert agent_streams[0].thinking_delta == ""
assert agent_streams[1].thinking_delta == "I need to think about this..."
assert agent_streams[2].thinking_delta == " Let me consider the options."
@pytest.mark.asyncio
async def test_agents_handle_missing_thinking_delta():
"""Test all agents handle LLMs without thinking_delta gracefully."""
class MockNonThinkingLLM(MockLLM):
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(is_function_calling_model=True)
async def astream_chat_with_tools(
self, tools: List[Any], chat_history: List[ChatMessage], **kwargs: Any
) -> AsyncGenerator[ChatResponse, None]:
async def _gen():
yield ChatResponse(
message=ChatMessage(role="assistant", content="Hello!"),
delta="Hello!",
additional_kwargs={}, # No thinking_delta
raw={"message": {"content": "Hello!"}},
)
return _gen()
def get_tool_calls_from_response(
self, response: ChatResponse, error_on_no_tool_call: bool = False
):
"""Mock method for getting tool calls from response."""
return []
# Test FunctionAgent
mock_llm = MockNonThinkingLLM()
agent = FunctionAgent(llm=mock_llm, streaming=True)
mock_context = AsyncMock(spec=Context)
stream_events = []
mock_context.write_event_to_stream.side_effect = lambda event: stream_events.append(
event
)
await agent._get_streaming_response(
mock_context, [ChatMessage(role="user", content="test")], []
)
agent_streams = [event for event in stream_events if isinstance(event, AgentStream)]
assert len(agent_streams) == 1
assert agent_streams[0].thinking_delta is None # Should default to None
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/agent/workflow/test_thinking_delta.py",
"license": "MIT License",
"lines": 261,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/zenrows_web/base.py | """ZenRows Web Reader."""
import json
import time
from typing import Any, Dict, List, Literal, Optional, Union
import requests
from llama_index.core.bridge.pydantic import Field, PrivateAttr, field_validator
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class ZenRowsWebReader(BasePydanticReader):
"""
ZenRows Web Reader.
Read web pages using ZenRows Universal Scraper API with advanced features like:
- JavaScript rendering for dynamic content
- Anti-bot bypass
- Premium residential proxies with geo-location
- Custom headers and session management
- Advanced data extraction with CSS selectors
- Multiple output formats (HTML, Markdown, Text, PDF)
- Screenshot capabilities
Args:
api_key (str): ZenRows API key. Get one at https://app.zenrows.com/register
js_render (Optional[bool]): Enable JavaScript rendering with a headless browser. Default False.
js_instructions (Optional[str]): Execute custom JavaScript on the page to interact with elements.
premium_proxy (Optional[bool]): Use residential IPs to bypass anti-bot protection. Default False.
proxy_country (Optional[str]): Set the country of the IP used for the request (requires Premium Proxies).
session_id (Optional[int]): Maintain the same IP for multiple requests for up to 10 minutes.
custom_headers (Optional[Dict[str, str]]): Include custom headers in your request to mimic browser behavior.
wait_for (Optional[str]): Wait for a specific CSS Selector to appear in the DOM before returning content.
wait (Optional[int]): Wait a fixed amount of milliseconds after page load.
block_resources (Optional[str]): Block specific resources (images, fonts, etc.) from loading.
response_type (Optional[Literal["markdown", "plaintext", "pdf"]]): Convert HTML to other formats.
css_extractor (Optional[str]): Extract specific elements using CSS selectors (JSON format).
autoparse (Optional[bool]): Automatically extract structured data from HTML. Default False.
screenshot (Optional[str]): Capture an above-the-fold screenshot of the page.
screenshot_fullpage (Optional[str]): Capture a full-page screenshot.
screenshot_selector (Optional[str]): Capture a screenshot of a specific element using CSS Selector.
original_status (Optional[bool]): Return the original HTTP status code from the target page. Default False.
allowed_status_codes (Optional[str]): Returns content even if target page fails with specified status codes.
json_response (Optional[bool]): Capture network requests in JSON format. Default False.
screenshot_format (Optional[Literal["png", "jpeg"]]): Choose between png and jpeg formats for screenshots.
screenshot_quality (Optional[int]): For JPEG format, set quality from 1 to 100.
outputs (Optional[str]): Specify which data types to extract from the scraped HTML.
"""
is_remote: bool = True
api_key: str = Field(description="ZenRows API key")
js_render: Optional[bool] = Field(
default=False,
description="Enable JavaScript rendering with a headless browser. Essential for modern web apps, SPAs, and sites with dynamic content.",
)
js_instructions: Optional[str] = Field(
default=None,
description="Execute custom JavaScript on the page to interact with elements, scroll, click buttons, or manipulate content.",
)
premium_proxy: Optional[bool] = Field(
default=False,
description="Use residential IPs to bypass anti-bot protection. Essential for accessing protected sites.",
)
proxy_country: Optional[str] = Field(
default=None,
description="Set the country of the IP used for the request (requires Premium Proxies). Use for accessing geo-restricted content.",
)
session_id: Optional[int] = Field(
default=None,
description="Maintain the same IP for multiple requests for up to 10 minutes. Essential for multi-step processes.",
)
custom_headers: Optional[Dict[str, str]] = Field(
default=None,
description="Include custom headers in your request to mimic browser behavior.",
)
wait_for: Optional[str] = Field(
default=None,
description="Wait for a specific CSS Selector to appear in the DOM before returning content.",
)
wait: Optional[int] = Field(
default=None, description="Wait a fixed amount of milliseconds after page load."
)
block_resources: Optional[str] = Field(
default=None,
description="Block specific resources (images, fonts, etc.) from loading to speed up scraping.",
)
response_type: Optional[Literal["markdown", "plaintext", "pdf"]] = Field(
default=None,
description="Convert HTML to other formats. Options: markdown, plaintext, pdf.",
)
css_extractor: Optional[str] = Field(
default=None,
description="Extract specific elements using CSS selectors (JSON format).",
)
autoparse: Optional[bool] = Field(
default=False, description="Automatically extract structured data from HTML."
)
screenshot: Optional[str] = Field(
default=None, description="Capture an above-the-fold screenshot of the page."
)
screenshot_fullpage: Optional[str] = Field(
default=None, description="Capture a full-page screenshot."
)
screenshot_selector: Optional[str] = Field(
default=None,
description="Capture a screenshot of a specific element using CSS Selector.",
)
original_status: Optional[bool] = Field(
default=False,
description="Return the original HTTP status code from the target page.",
)
allowed_status_codes: Optional[str] = Field(
default=None,
description="Returns the content even if the target page fails with specified status codes.",
)
json_response: Optional[bool] = Field(
default=False,
description="Capture network requests in JSON format, including XHR or Fetch data.",
)
screenshot_format: Optional[Literal["png", "jpeg"]] = Field(
default=None,
description="Choose between png (default) and jpeg formats for screenshots.",
)
screenshot_quality: Optional[int] = Field(
default=None,
description="For JPEG format, set quality from 1 to 100.",
)
outputs: Optional[str] = Field(
default=None,
description="Specify which data types to extract from the scraped HTML.",
)
_base_url: str = PrivateAttr(default="https://api.zenrows.com/v1/")
@field_validator("css_extractor")
@classmethod
def validate_css_extractor(cls, v):
"""Validate that css_extractor is valid JSON if provided."""
if v is not None:
try:
json.loads(v)
except json.JSONDecodeError:
raise ValueError("css_extractor must be valid JSON")
return v
@field_validator("proxy_country")
@classmethod
def validate_proxy_country(cls, v):
"""Validate that proxy_country is a two-letter country code."""
if v is not None and len(v) != 2:
raise ValueError("proxy_country must be a two-letter country code")
return v
def __init__(self, **kwargs):
"""Initialize ZenRows Web Reader."""
super().__init__(**kwargs)
if not self.api_key:
raise ValueError(
"ZenRows API key is required. Get one at https://app.zenrows.com/register"
)
@classmethod
def class_name(cls) -> str:
"""Get the name identifier of the class."""
return "ZenRowsWebReader"
def _prepare_request_params(
self, url: str, extra_params: Optional[Dict] = None
) -> tuple[Dict[str, Any], Optional[Dict[str, str]]]:
"""Prepare request parameters for ZenRows API."""
params = {"url": url, "apikey": self.api_key}
# Add all configured parameters
if self.js_render:
params["js_render"] = self.js_render
if self.js_instructions:
params["js_instructions"] = self.js_instructions
if self.premium_proxy:
params["premium_proxy"] = self.premium_proxy
if self.proxy_country:
params["proxy_country"] = self.proxy_country
if self.session_id:
params["session_id"] = self.session_id
if self.wait_for:
params["wait_for"] = self.wait_for
if self.wait:
params["wait"] = self.wait
if self.block_resources:
params["block_resources"] = self.block_resources
if self.response_type:
params["response_type"] = self.response_type
if self.css_extractor:
params["css_extractor"] = self.css_extractor
if self.autoparse:
params["autoparse"] = self.autoparse
if self.screenshot:
params["screenshot"] = self.screenshot
if self.screenshot_fullpage:
params["screenshot_fullpage"] = self.screenshot_fullpage
if self.screenshot_selector:
params["screenshot_selector"] = self.screenshot_selector
if self.original_status:
params["original_status"] = self.original_status
if self.allowed_status_codes:
params["allowed_status_codes"] = self.allowed_status_codes
if self.json_response:
params["json_response"] = self.json_response
if self.screenshot_format:
params["screenshot_format"] = self.screenshot_format
if self.screenshot_quality:
params["screenshot_quality"] = self.screenshot_quality
if self.outputs:
params["outputs"] = self.outputs
# Add any extra parameters for this specific request
if extra_params:
params.update(extra_params)
# Auto-enable js_render for parameters that require JavaScript rendering
js_required_params = [
"screenshot",
"screenshot_fullpage",
"screenshot_selector",
"js_instructions",
"json_response",
"wait",
"wait_for",
]
js_required = any(params.get(param) for param in js_required_params)
if js_required:
params["js_render"] = True
# Special handling for screenshot variants
screenshot_variants = ["screenshot_fullpage", "screenshot_selector"]
if any(params.get(param) for param in screenshot_variants):
params["screenshot"] = "true"
# Auto-enable premium_proxy when proxy_country is specified
if params.get("proxy_country"):
params["premium_proxy"] = True
# Handle custom headers
request_headers = None
if "custom_headers" in params and params["custom_headers"]:
# Store the headers dictionary for the request
request_headers = params["custom_headers"]
# Set custom_headers to "true" to enable custom header support in the API
params["custom_headers"] = "true"
elif self.custom_headers:
request_headers = self.custom_headers
params["custom_headers"] = "true"
else:
# Remove custom_headers if not provided or empty
params.pop("custom_headers", None)
# Remove None values to avoid sending unnecessary parameters
params = {k: v for k, v in params.items() if v is not None}
return params, request_headers
def _make_request(
self, url: str, extra_params: Optional[Dict] = None
) -> requests.Response:
"""Make request to ZenRows API."""
params, request_headers = self._prepare_request_params(url, extra_params)
response = requests.get(
self._base_url,
params=params,
headers=request_headers,
)
response.raise_for_status()
return response
def _extract_metadata(
self, response: requests.Response, url: str
) -> Dict[str, Any]:
"""Extract metadata from ZenRows response."""
metadata = {
"source_url": url,
"scraped_at": time.time(),
}
# Extract ZenRows specific headers
if "X-Request-Cost" in response.headers:
metadata["request_cost"] = float(response.headers["X-Request-Cost"])
if "X-Request-Id" in response.headers:
metadata["request_id"] = response.headers["X-Request-Id"]
if "Zr-Final-Url" in response.headers:
metadata["final_url"] = response.headers["Zr-Final-Url"]
if "Concurrency-Remaining" in response.headers:
metadata["concurrency_remaining"] = int(
response.headers["Concurrency-Remaining"]
)
if "Concurrency-Limit" in response.headers:
metadata["concurrency_limit"] = int(response.headers["Concurrency-Limit"])
# Add response info
metadata["status_code"] = response.status_code
metadata["content_type"] = response.headers.get("Content-Type", "")
metadata["content_length"] = len(response.content)
# Add scraping configuration used
metadata["zenrows_config"] = {
"js_render": self.js_render,
"premium_proxy": self.premium_proxy,
"proxy_country": self.proxy_country,
"session_id": self.session_id,
"response_type": self.response_type,
}
return metadata
def _process_response_content(self, response: requests.Response) -> str:
"""Process response content based on whether it's a screenshot or not."""
# Handle screenshot responses
screenshot_params = ["screenshot", "screenshot_fullpage", "screenshot_selector"]
if any(getattr(self, param, None) for param in screenshot_params):
return response.content
# For all other responses, return text
return response.text
def load_data(
self, urls: Union[str, List[str]], extra_params: Optional[Dict] = None, **kwargs
) -> List[Document]:
"""
Load data from URLs using ZenRows API.
Args:
urls: Single URL string or list of URLs to scrape
extra_params: Additional parameters for this specific request
**kwargs: Additional keyword arguments (for compatibility)
Returns:
List of Document objects containing scraped content and metadata
"""
if isinstance(urls, str):
urls = [urls]
documents = []
for url in urls:
try:
response = self._make_request(url, extra_params)
content = self._process_response_content(response)
metadata = self._extract_metadata(response, url)
# Create document
document = Document(
text=content,
metadata=metadata,
)
documents.append(document)
except Exception as e:
# Create error document for failed URLs
error_metadata = {
"source_url": url,
"error": str(e),
"scraped_at": time.time(),
"status": "failed",
}
error_document = Document(
text=f"Error scraping {url}: {e!s}",
metadata=error_metadata,
)
documents.append(error_document)
return documents
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/zenrows_web/base.py",
"license": "MIT License",
"lines": 333,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-web/tests/test_zenrows_web.py | """Tests for ZenRowsWebReader."""
from unittest.mock import Mock, patch
import pytest
import requests
from llama_index.readers.web import ZenRowsWebReader
from llama_index.core.schema import Document
@pytest.fixture
def api_key() -> str:
"""Test API key fixture."""
return "test_api_key_123"
@pytest.fixture
def test_url() -> str:
"""Test URL fixture."""
return "https://example.com"
@pytest.fixture
def mock_html_response() -> str:
"""Mock HTML response content."""
return """
<html>
<head><title>Test Page</title></head>
<body>
<h1>Welcome to Test Page</h1>
<p>This is a test paragraph.</p>
</body>
</html>
"""
@pytest.fixture
def mock_json_response() -> dict:
"""Mock JSON response content."""
return {
"title": "Test Page",
"content": "This is test content",
"links": ["https://example.com/link1", "https://example.com/link2"],
}
@pytest.fixture
def mock_screenshot_response() -> bytes:
"""Mock screenshot response content."""
return b"fake_screenshot_data"
class TestZenRowsWebReader:
"""Test cases for ZenRowsWebReader."""
def test_init_with_api_key(self, api_key):
"""Test initialization with valid API key."""
reader = ZenRowsWebReader(api_key=api_key)
assert reader.api_key == api_key
assert reader.js_render is False
assert reader.premium_proxy is False
def test_init_without_api_key(self):
"""Test initialization without API key raises error."""
with pytest.raises(ValueError, match="ZenRows API key is required"):
ZenRowsWebReader(api_key="")
def test_init_with_custom_params(self, api_key):
"""Test initialization with custom parameters."""
custom_headers = {"User-Agent": "TestAgent"}
reader = ZenRowsWebReader(
api_key=api_key,
js_render=True,
premium_proxy=True,
proxy_country="US",
custom_headers=custom_headers,
wait=5000,
response_type="markdown",
)
assert reader.js_render is True
assert reader.premium_proxy is True
assert reader.proxy_country == "US"
assert reader.custom_headers == custom_headers
assert reader.wait == 5000
assert reader.response_type == "markdown"
@patch("requests.get")
def test_load_data_basic(self, mock_get, api_key, test_url, mock_html_response):
"""Test basic load_data functionality."""
# Mock response
mock_response = Mock()
mock_response.status_code = 200
mock_response.text = mock_html_response
mock_response.content = mock_html_response.encode()
mock_response.headers = {
"Content-Type": "text/html",
"X-Request-Cost": "1.0",
"X-Request-Id": "test_request_123",
}
mock_response.raise_for_status.return_value = None
mock_get.return_value = mock_response
reader = ZenRowsWebReader(api_key=api_key)
documents = reader.load_data(test_url)
assert len(documents) == 1
assert isinstance(documents[0], Document)
assert documents[0].text == mock_html_response
assert documents[0].metadata["source_url"] == test_url
assert documents[0].metadata["request_cost"] == 1.0
assert documents[0].metadata["request_id"] == "test_request_123"
@patch("requests.get")
def test_load_data_multiple_urls(self, mock_get, api_key, mock_html_response):
"""Test load_data with multiple URLs."""
urls = ["https://example1.com", "https://example2.com"]
mock_response = Mock()
mock_response.status_code = 200
mock_response.text = mock_html_response
mock_response.content = mock_html_response.encode()
mock_response.headers = {"Content-Type": "text/html"}
mock_response.raise_for_status.return_value = None
mock_get.return_value = mock_response
reader = ZenRowsWebReader(api_key=api_key)
documents = reader.load_data(urls)
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].metadata["source_url"] == urls[0]
assert documents[1].metadata["source_url"] == urls[1]
@patch("requests.get")
def test_load_data_with_custom_headers(
self, mock_get, api_key, test_url, mock_html_response
):
"""Test load_data with custom headers."""
custom_headers = {"User-Agent": "TestAgent", "Authorization": "Bearer token"}
mock_response = Mock()
mock_response.status_code = 200
mock_response.text = mock_html_response
mock_response.content = mock_html_response.encode()
mock_response.headers = {"Content-Type": "text/html"}
mock_response.raise_for_status.return_value = None
mock_get.return_value = mock_response
# Test instance-level custom headers
reader = ZenRowsWebReader(api_key=api_key, custom_headers=custom_headers)
documents = reader.load_data(test_url)
# Verify request was made with custom headers
mock_get.assert_called_once()
call_args = mock_get.call_args
assert call_args[1]["headers"] == custom_headers
# Test per-request custom headers via extra_params
mock_get.reset_mock()
reader2 = ZenRowsWebReader(api_key=api_key)
per_request_headers = {"User-Agent": "PerRequestAgent"}
documents = reader2.load_data(
test_url, extra_params={"custom_headers": per_request_headers}
)
call_args = mock_get.call_args
assert call_args[1]["headers"] == per_request_headers
@patch("requests.get")
def test_load_data_with_js_render(
self, mock_get, api_key, test_url, mock_html_response
):
"""Test load_data with JavaScript rendering enabled."""
mock_response = Mock()
mock_response.status_code = 200
mock_response.text = mock_html_response
mock_response.content = mock_html_response.encode()
mock_response.headers = {"Content-Type": "text/html"}
mock_response.raise_for_status.return_value = None
mock_get.return_value = mock_response
reader = ZenRowsWebReader(
api_key=api_key, js_render=True, wait=3000, wait_for=".content"
)
documents = reader.load_data(test_url)
# Verify the request parameters include JS rendering options
call_args = mock_get.call_args
params = call_args[1]["params"]
assert params["js_render"] is True
assert params["wait"] == 3000
assert params["wait_for"] == ".content"
@patch("requests.get")
def test_load_data_with_premium_proxy(
self, mock_get, api_key, test_url, mock_html_response
):
"""Test load_data with premium proxy and geo-location."""
mock_response = Mock()
mock_response.status_code = 200
mock_response.text = mock_html_response
mock_response.content = mock_html_response.encode()
mock_response.headers = {"Content-Type": "text/html"}
mock_response.raise_for_status.return_value = None
mock_get.return_value = mock_response
reader = ZenRowsWebReader(
api_key=api_key, premium_proxy=True, proxy_country="GB"
)
documents = reader.load_data(test_url)
# Verify the request parameters include proxy options
call_args = mock_get.call_args
params = call_args[1]["params"]
assert params["premium_proxy"] is True
assert params["proxy_country"] == "GB"
@patch("requests.get")
def test_load_data_error_handling(self, mock_get, api_key, test_url):
"""Test error handling in load_data."""
# Mock a failed request
mock_get.side_effect = requests.exceptions.RequestException("Connection failed")
reader = ZenRowsWebReader(api_key=api_key)
documents = reader.load_data(test_url)
# Should return error document instead of raising exception
assert len(documents) == 1
assert "Error scraping" in documents[0].text
assert documents[0].metadata["status"] == "failed"
assert documents[0].metadata["source_url"] == test_url
@patch("requests.get")
def test_load_data_with_extra_params(
self, mock_get, api_key, test_url, mock_html_response
):
"""Test load_data with extra parameters."""
mock_response = Mock()
mock_response.status_code = 200
mock_response.text = mock_html_response
mock_response.content = mock_html_response.encode()
mock_response.headers = {"Content-Type": "text/html"}
mock_response.raise_for_status.return_value = None
mock_get.return_value = mock_response
reader = ZenRowsWebReader(api_key=api_key)
extra_params = {
"css_extractor": '{"title": "h1", "content": "p"}',
"autoparse": True,
"block_resources": "images,fonts",
}
documents = reader.load_data(test_url, extra_params=extra_params)
# Verify extra parameters were included in the request
call_args = mock_get.call_args
params = call_args[1]["params"]
assert params["css_extractor"] == '{"title": "h1", "content": "p"}'
assert params["autoparse"] is True
assert params["block_resources"] == "images,fonts"
def test_css_extractor_validation(self, api_key):
"""Test CSS extractor validation."""
# Valid JSON should work
reader = ZenRowsWebReader(
api_key=api_key, css_extractor='{"title": "h1", "content": "p"}'
)
assert reader.css_extractor == '{"title": "h1", "content": "p"}'
# Invalid JSON should raise error
with pytest.raises(ValueError, match="css_extractor must be valid JSON"):
ZenRowsWebReader(api_key=api_key, css_extractor="invalid json")
def test_proxy_country_validation(self, api_key):
"""Test proxy country validation."""
# Valid two-letter country code should work
reader = ZenRowsWebReader(api_key=api_key, proxy_country="US")
assert reader.proxy_country == "US"
# Invalid country code should raise error
with pytest.raises(
ValueError, match="proxy_country must be a two-letter country code"
):
ZenRowsWebReader(api_key=api_key, proxy_country="USA")
def test_class_name(self, api_key):
"""Test class name method."""
reader = ZenRowsWebReader(api_key=api_key)
assert reader.class_name() == "ZenRowsWebReader"
@patch("requests.get")
def test_metadata_extraction(self, mock_get, api_key, test_url, mock_html_response):
"""Test metadata extraction from response headers."""
mock_response = Mock()
mock_response.status_code = 200
mock_response.text = mock_html_response
mock_response.content = mock_html_response.encode()
mock_response.headers = {
"Content-Type": "text/html",
"X-Request-Cost": "2.5",
"X-Request-Id": "req_123456",
"Zr-Final-Url": "https://example.com/final",
"Concurrency-Remaining": "10",
"Concurrency-Limit": "100",
}
mock_response.raise_for_status.return_value = None
mock_get.return_value = mock_response
reader = ZenRowsWebReader(api_key=api_key, js_render=True)
documents = reader.load_data(test_url)
metadata = documents[0].metadata
assert metadata["request_cost"] == 2.5
assert metadata["request_id"] == "req_123456"
assert metadata["final_url"] == "https://example.com/final"
assert metadata["concurrency_remaining"] == 10
assert metadata["concurrency_limit"] == 100
assert metadata["status_code"] == 200
assert metadata["content_type"] == "text/html"
assert metadata["zenrows_config"]["js_render"] is True
@patch("requests.get")
def test_auto_js_render_enablement(
self, mock_get, api_key, test_url, mock_html_response
):
"""Test automatic JS render enablement for certain parameters."""
mock_response = Mock()
mock_response.status_code = 200
mock_response.text = mock_html_response
mock_response.content = mock_html_response.encode()
mock_response.headers = {"Content-Type": "text/html"}
mock_response.raise_for_status.return_value = None
mock_get.return_value = mock_response
# Test with screenshot parameter (should auto-enable js_render)
reader = ZenRowsWebReader(api_key=api_key, screenshot="true")
documents = reader.load_data(test_url)
call_args = mock_get.call_args
params = call_args[1]["params"]
assert params["js_render"] is True # Should be auto-enabled
assert params["screenshot"] == "true"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-web/tests/test_zenrows_web.py",
"license": "MIT License",
"lines": 288,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-core/tests/memory/test_memory_schema.py | """Tests for Memory class schema functionality."""
import pytest
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.memory.memory import Memory
from llama_index.core.storage.chat_store.sql import SQLAlchemyChatStore
class TestMemorySchema:
"""Test schema functionality in Memory class."""
def test_from_defaults_schema_parameter(self):
"""Test Memory.from_defaults with and without schema parameter."""
# Without schema
memory_no_schema = Memory.from_defaults(
token_limit=1000,
table_name="test_memory",
)
assert memory_no_schema.sql_store.db_schema is None
assert memory_no_schema.sql_store.table_name == "test_memory"
# With schema
memory_with_schema = Memory.from_defaults(
token_limit=1000,
table_name="test_memory",
db_schema="test_schema",
)
assert memory_with_schema.sql_store.db_schema == "test_schema"
assert memory_with_schema.sql_store.table_name == "test_memory"
def test_schema_parameter_passing(self):
"""Test that schema parameter is correctly passed to SQLAlchemyChatStore."""
memory = Memory.from_defaults(
table_name="param_test",
async_database_uri="postgresql+asyncpg://user:pass@host/db",
db_schema="param_schema",
)
# Verify the SQL store is correctly configured
assert isinstance(memory.sql_store, SQLAlchemyChatStore)
assert memory.sql_store.db_schema == "param_schema"
assert memory.sql_store.table_name == "param_test"
assert (
memory.sql_store.async_database_uri
== "postgresql+asyncpg://user:pass@host/db"
)
@pytest.mark.asyncio
async def test_memory_operations_with_schema(self):
"""Test that Memory operations work with schema."""
memory = Memory.from_defaults(
token_limit=1000,
table_name="integration_test",
db_schema="integration_schema",
)
# Add a message
message = ChatMessage(role="user", content="Hello from Memory with schema!")
await memory.aput(message)
# Retrieve messages
messages = await memory.aget()
assert len(messages) >= 1
# Find our message (there might be system messages)
user_messages = [m for m in messages if m.role == "user"]
assert len(user_messages) == 1
assert user_messages[0].content == "Hello from Memory with schema!"
# Verify schema is preserved
assert memory.sql_store.db_schema == "integration_schema"
@pytest.mark.asyncio
async def test_memory_reset_preserves_schema(self):
"""Test that memory reset preserves schema configuration."""
memory = Memory.from_defaults(
token_limit=1000,
table_name="reset_test",
db_schema="reset_schema",
)
# Add a message
await memory.aput(ChatMessage(role="user", content="Before reset"))
# Reset memory
await memory.areset()
# Verify schema is still set
assert memory.sql_store.db_schema == "reset_schema"
# Verify messages are cleared
messages = await memory.aget_all()
user_messages = [m for m in messages if m.role == "user"]
assert len(user_messages) == 0
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/memory/test_memory_schema.py",
"license": "MIT License",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-core/tests/storage/chat_store/test_sql_schema.py | """Tests for SQLAlchemyChatStore schema functionality."""
import pytest
from unittest.mock import AsyncMock, MagicMock
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.storage.chat_store.sql import SQLAlchemyChatStore
class TestSQLAlchemyChatStoreSchema:
"""Test schema functionality in SQLAlchemyChatStore."""
def test_schema_parameter_initialization(self):
"""Test schema parameter initialization."""
# Without schema
store_no_schema = SQLAlchemyChatStore(
table_name="test_messages",
async_database_uri="sqlite+aiosqlite:///:memory:",
)
assert store_no_schema.db_schema is None
# With schema
store_with_schema = SQLAlchemyChatStore(
table_name="test_messages",
async_database_uri="sqlite+aiosqlite:///:memory:",
db_schema="test_schema",
)
assert store_with_schema.db_schema == "test_schema"
def test_schema_serialization(self):
"""Test that schema is included in serialization."""
store = SQLAlchemyChatStore(
table_name="test_table",
async_database_uri="postgresql+asyncpg://user:pass@host/db",
db_schema="test_schema",
)
# Test dump_store
dumped = store.dump_store()
assert "db_schema" in dumped
assert dumped["db_schema"] == "test_schema"
# Test model serialization
store_dict = store.model_dump()
assert "db_schema" in store_dict
assert store_dict["db_schema"] == "test_schema"
@pytest.mark.asyncio
async def test_postgresql_schema_creation(self):
"""Test that CREATE SCHEMA SQL is called for PostgreSQL."""
store = SQLAlchemyChatStore(
table_name="test_messages",
async_database_uri="postgresql+asyncpg://user:pass@host/db",
db_schema="test_schema",
)
# Mock the engine and connection
async_engine = MagicMock()
async_engine.begin.return_value.__aenter__ = AsyncMock()
async_engine.begin.return_value.__aexit__ = AsyncMock()
mock_conn = MagicMock()
mock_conn.execute = AsyncMock()
mock_conn.run_sync = AsyncMock()
async_engine.begin.return_value.__aenter__.return_value = mock_conn
store._async_engine = async_engine
# Call _setup_tables
await store._setup_tables(async_engine)
# Verify schema creation was called
mock_conn.execute.assert_called()
call_args = mock_conn.execute.call_args_list[0][0][0]
assert 'CREATE SCHEMA IF NOT EXISTS "test_schema"' in str(call_args)
# Verify MetaData has schema
assert store._metadata.schema == "test_schema"
@pytest.mark.asyncio
async def test_sqlite_schema_behavior(self):
"""Test that SQLite preserves schema parameter but doesn't use it in MetaData."""
store = SQLAlchemyChatStore(
table_name="test_messages",
async_database_uri="sqlite+aiosqlite:///:memory:",
db_schema="test_schema",
)
# Add a message to trigger initialization
await store.add_message("test_user", ChatMessage(role="user", content="Hello!"))
# Schema parameter is preserved
assert store.db_schema == "test_schema"
# But MetaData doesn't have schema (SQLite limitation)
assert store._metadata.schema is None
# Operations still work
messages = await store.get_messages("test_user")
assert len(messages) == 1
assert messages[0].content == "Hello!"
def test_is_sqlite_database_with_custom_engine(self):
"""
Test that _is_sqlite_database checks the engine URL when a custom engine is provided.
Regression test for https://github.com/run-llama/llama_index/issues/20746
When a custom async_engine is passed without an explicit async_database_uri,
the URI defaults to SQLite. _is_sqlite_database() should check the actual
engine URL instead of the default URI.
"""
# Simulate a PostgreSQL engine passed without explicit URI
mock_pg_engine = MagicMock()
mock_pg_engine.url = "postgresql+asyncpg://user:pass@host/db"
store = SQLAlchemyChatStore(
table_name="test_messages",
async_engine=mock_pg_engine,
db_schema="test_schema",
)
# The default URI is SQLite, but the engine is PostgreSQL
assert store.async_database_uri.startswith("sqlite")
# _is_sqlite_database should check the engine, not the default URI
assert not store._is_sqlite_database()
def test_is_sqlite_database_with_sqlite_engine(self):
"""Test that _is_sqlite_database returns True for an actual SQLite engine."""
mock_sqlite_engine = MagicMock()
mock_sqlite_engine.url = "sqlite+aiosqlite:///:memory:"
store = SQLAlchemyChatStore(
table_name="test_messages",
async_engine=mock_sqlite_engine,
db_schema="test_schema",
)
assert store._is_sqlite_database()
def test_is_sqlite_database_without_engine(self):
"""Test that _is_sqlite_database falls back to URI when no engine is provided."""
store = SQLAlchemyChatStore(
table_name="test_messages",
async_database_uri="postgresql+asyncpg://user:pass@host/db",
)
assert not store._is_sqlite_database()
store_sqlite = SQLAlchemyChatStore(
table_name="test_messages",
async_database_uri="sqlite+aiosqlite:///:memory:",
)
assert store_sqlite._is_sqlite_database()
@pytest.mark.asyncio
async def test_custom_engine_with_schema_creates_schema(self):
"""
Test that db_schema is respected when a custom non-SQLite engine is provided.
Regression test for https://github.com/run-llama/llama_index/issues/20746
"""
mock_pg_engine = MagicMock()
mock_pg_engine.url = "postgresql+asyncpg://user:pass@host/db"
mock_pg_engine.begin.return_value.__aenter__ = AsyncMock()
mock_pg_engine.begin.return_value.__aexit__ = AsyncMock()
mock_conn = MagicMock()
mock_conn.execute = AsyncMock()
mock_conn.run_sync = AsyncMock()
mock_pg_engine.begin.return_value.__aenter__.return_value = mock_conn
store = SQLAlchemyChatStore(
table_name="test_messages",
async_engine=mock_pg_engine,
db_schema="my_schema",
)
await store._setup_tables(mock_pg_engine)
# Verify schema creation SQL was called
mock_conn.execute.assert_called()
call_args = mock_conn.execute.call_args_list[0][0][0]
assert 'CREATE SCHEMA IF NOT EXISTS "my_schema"' in str(call_args)
assert store._metadata.schema == "my_schema"
@pytest.mark.asyncio
async def test_basic_operations_with_schema(self):
"""Test that basic operations work with schema."""
store = SQLAlchemyChatStore(
table_name="test_messages",
async_database_uri="sqlite+aiosqlite:///:memory:",
db_schema="test_schema",
)
# Add and retrieve message
await store.add_message(
"schema_user", ChatMessage(role="user", content="Hello with schema!")
)
messages = await store.get_messages("schema_user")
assert len(messages) == 1
assert messages[0].content == "Hello with schema!"
# Verify schema is preserved
assert store.db_schema == "test_schema"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/storage/chat_store/test_sql_schema.py",
"license": "MIT License",
"lines": 164,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-chroma/tests/test_mmr.py | import pytest
import chromadb
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.core.vector_stores.types import VectorStoreQuery, VectorStoreQueryMode
import uuid
def _build_simple_collection():
client = chromadb.EphemeralClient()
name = f"chroma_mmr_test_{uuid.uuid4().hex[:8]}"
col = client.get_or_create_collection(name)
embeddings = [
[1.0, 0.0, 0.0],
[0.9, 0.1, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.9, 0.1],
[0.0, 0.0, 1.0],
]
ids = [f"id_{i}" for i in range(len(embeddings))]
documents = [f"doc_{i}" for i in range(len(embeddings))]
metadatas = [{"label": f"m{i}"} for i in range(len(embeddings))] # non-empty
col.add(ids=ids, embeddings=embeddings, documents=documents, metadatas=metadatas)
return col
def test_chroma_mmr_happy_path_returns_k_results():
col = _build_simple_collection()
vs = ChromaVectorStore.from_collection(col)
query = VectorStoreQuery(
query_embedding=[1.0, 0.0, 0.0],
similarity_top_k=2,
mode=VectorStoreQueryMode.MMR,
)
# Should not raise and should return exactly top_k results
res = vs.query(query, mmr_threshold=0.5)
# Either nodes or ids must be populated
if res.ids is not None:
assert len(res.ids) == 2
elif res.nodes is not None:
assert len(res.nodes) == 2
else:
pytest.fail("VectorStoreQueryResult must contain ids or nodes")
def test_chroma_mmr_conflicting_prefetch_params_raises():
col = _build_simple_collection()
vs = ChromaVectorStore.from_collection(col)
query = VectorStoreQuery(
query_embedding=[1.0, 0.0, 0.0],
similarity_top_k=2,
mode=VectorStoreQueryMode.MMR,
)
with pytest.raises(ValueError):
vs.query(query, mmr_threshold=0.5, mmr_prefetch_k=16, mmr_prefetch_factor=2.0)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-chroma/tests/test_mmr.py",
"license": "MIT License",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/embeddings/llama-index-embeddings-baseten/llama_index/embeddings/baseten/base.py | from typing import Any, Dict, Optional
import httpx
from llama_index.core.constants import DEFAULT_EMBED_BATCH_SIZE
from llama_index.core.bridge.pydantic import Field
from llama_index.core.callbacks import CallbackManager
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
from llama_index.embeddings.openai import OpenAIEmbedding
DEFAULT_API_BASE = (
"https://model-{model_id}.api.baseten.co/environments/production/sync/v1"
)
class BasetenEmbedding(OpenAIEmbedding):
"""
Baseten class for embeddings.
Args:
model_id (str): The Baseten model ID (e.g., "03y7n6e3").
api_key (Optional[str]): The Baseten API key.
embed_batch_size (int): The batch size for embedding calls.
additional_kwargs (Optional[Dict[str, Any]]): Additional kwargs for the API.
max_retries (int): The maximum number of retries to make.
timeout (float): Timeout for each request.
callback_manager (Optional[CallbackManager]): Callback manager for logging.
default_headers (Optional[Dict[str, str]]): Default headers for API requests.
Examples:
```python
from llama_index.embeddings.baseten import BasetenEmbedding
# Using dedicated endpoint
# You can find the model_id by in the Baseten dashboard here: https://app.baseten.co/overview
embed_model = BasetenEmbedding(
model_id="MODEL_ID,
api_key="YOUR_API_KEY",
)
# Single embedding
embedding = embed_model.get_text_embedding("Hello, world!")
# Batch embeddings
embeddings = embed_model.get_text_embedding_batch([
"Hello, world!",
"Goodbye, world!"
])
```
"""
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional kwargs for the OpenAI API."
)
api_key: str = Field(description="The Baseten API key.")
api_base: str = Field(default="", description="The base URL for Baseten API.")
api_version: str = Field(default="", description="The version for OpenAI API.")
def __init__(
self,
model_id: str,
dimensions: Optional[int] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
additional_kwargs: Optional[Dict[str, Any]] = None,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
api_version: Optional[str] = None,
max_retries: int = 10,
timeout: float = 60.0,
reuse_client: bool = True,
callback_manager: Optional[CallbackManager] = None,
default_headers: Optional[Dict[str, str]] = None,
http_client: Optional[httpx.Client] = None,
**kwargs: Any,
) -> None:
# Use the dedicated endpoint URL format
api_base = DEFAULT_API_BASE.format(model_id=model_id)
api_key = get_from_param_or_env("api_key", api_key, "BASETEN_API_KEY")
super().__init__(
model_name=model_id,
dimensions=dimensions,
embed_batch_size=embed_batch_size,
additional_kwargs=additional_kwargs,
api_key=api_key,
api_base=api_base,
api_version=api_version,
max_retries=max_retries,
timeout=timeout,
reuse_client=reuse_client,
callback_manager=callback_manager,
default_headers=default_headers,
http_client=http_client,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "BasetenEmbedding"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/embeddings/llama-index-embeddings-baseten/llama_index/embeddings/baseten/base.py",
"license": "MIT License",
"lines": 85,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-baseten/llama_index/llms/baseten/base.py | from typing import Any, Callable, Dict, List, Optional, Sequence
import aiohttp
from llama_index.core.base.llms.types import (
ChatMessage,
CompletionResponse,
LLMMetadata,
)
from llama_index.core.callbacks import CallbackManager
from llama_index.core.constants import DEFAULT_NUM_OUTPUTS, DEFAULT_TEMPERATURE
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
from llama_index.core.types import BaseOutputParser, PydanticProgramMode
from llama_index.llms.openai import OpenAI
from llama_index.core.bridge.pydantic import Field
from .utils import validate_model_dynamic, get_available_models_dynamic, Model
DEFAULT_SYNC_API_BASE = (
"https://model-{model_id}.api.baseten.co/environments/production/sync/v1"
)
DEFAULT_ASYNC_API_BASE = (
"https://model-{model_id}.api.baseten.co/production/async_predict"
)
MODEL_APIS_BASE = "https://inference.baseten.co/v1/"
class Baseten(OpenAI):
"""
Baseten LLM with support for both dedicated and model apis endpoints.
Args:
model_id (str): The Baseten model ID (e.g., "12a3b4c5") or model name (e.g., "deepseek-ai/DeepSeek-V3-0324").
When using model_apis=True, model availability is validated dynamically against the API
with fallback to static validation if the API call fails.
model_apis (bool): If True (default), uses the model apis endpoint. If False, uses the dedicated endpoint.
webhook_endpoint (Optional[str]): Webhook endpoint for async operations. If provided, uses async API.
temperature (float): The temperature to use for generation
max_tokens (int): The maximum number of tokens to generate
additional_kwargs (Optional[Dict[str, Any]]): Additional kwargs for the API
max_retries (int): The maximum number of retries to make
api_key (Optional[str]): The Baseten API key
callback_manager (Optional[CallbackManager]): Callback manager for logging
default_headers (Optional[Dict[str, str]]): Default headers for API requests
system_prompt (Optional[str]): System prompt for chat
messages_to_prompt (Optional[Callable]): Function to format messages to prompt
completion_to_prompt (Optional[Callable]): Function to format completion prompt
pydantic_program_mode (PydanticProgramMode): Mode for Pydantic handling
output_parser (Optional[BaseOutputParser]): Parser for model outputs
Examples:
`pip install llama-index-llms-baseten`
```python
from llama_index.llms.baseten import Baseten
# Using model apis endpoint (default behavior)
llm = Baseten(
model_id="deepseek-ai/DeepSeek-V3-0324",
api_key="YOUR_API_KEY",
model_apis=True, # Default
)
response = llm.complete("Hello, world!")
# Using dedicated endpoint (for custom deployed models)
llm = Baseten(
model_id="YOUR_MODEL_ID",
api_key="YOUR_API_KEY",
model_apis=False,
)
response = llm.complete("Hello, world!")
# Asynchronous usage with webhook (dedicated endpoint only)
async_llm = Baseten(
model_id="YOUR_MODEL_ID",
api_key="YOUR_API_KEY",
model_apis=False, # Required for async operations
webhook_endpoint="https://your-webhook.com/baseten-callback"
)
response = await async_llm.acomplete("Hello, world!")
request_id = response.text # Track this ID for webhook response
# Get available models dynamically (Model APIs only)
llm = Baseten(model_id="deepseek-ai/DeepSeek-V3-0324", model_apis=True)
available = llm.available_models # List[Model] - fetched dynamically
model_ids = [model.id for model in available]
print(f"Available models: {model_ids}")
```
"""
webhook_endpoint: Optional[str] = Field(
default=None, description="Webhook endpoint for async operations"
)
model_apis: bool = Field(
default=True,
description="Whether to use the model apis endpoint or the dedicated endpoint",
)
def __init__(
self,
model_id: str,
model_apis: bool = True,
webhook_endpoint: Optional[str] = None,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: int = DEFAULT_NUM_OUTPUTS,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 10,
api_key: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
default_headers: Optional[Dict[str, str]] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
) -> None:
additional_kwargs = additional_kwargs or {}
callback_manager = callback_manager or CallbackManager([])
# Validate model_id if using model apis endpoint
if model_apis:
# Use dynamic validation with fallback to static validation
# We need to create a temporary client for validation
api_key_temp = get_from_param_or_env("api_key", api_key, "BASETEN_API_KEY")
# Import OpenAI here to avoid circular imports
from openai import OpenAI as OpenAIClient
temp_client = OpenAIClient(
api_key=api_key_temp,
base_url=MODEL_APIS_BASE,
)
validate_model_dynamic(temp_client, model_id)
# Determine API base URL based on endpoint type
if model_apis:
api_base = MODEL_APIS_BASE
else:
api_base = DEFAULT_SYNC_API_BASE.format(model_id=model_id)
api_key = get_from_param_or_env("api_key", api_key, "BASETEN_API_KEY")
super().__init__(
model=model_id, # model_id is either the Baseten model ID or the specific model APIs slug, stored in OpenAI class
temperature=temperature,
max_tokens=max_tokens,
api_base=api_base,
api_key=api_key,
additional_kwargs=additional_kwargs,
max_retries=max_retries,
callback_manager=callback_manager,
default_headers=default_headers,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
# Set webhook endpoint after parent initialization to avoid errors
self.webhook_endpoint = webhook_endpoint
self.model_apis = model_apis
@property
def available_models(self) -> List[Model]:
"""Get available models from Baseten Model APIs."""
if not self.model_apis:
# For dedicated deployments, return current model or empty list
return [Model(id=self.model)] if hasattr(self, "model") else []
# For Model APIs, fetch from the API dynamically
return get_available_models_dynamic(self._get_client())
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "Baseten_LLM"
async def acomplete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
"""Async completion - requires webhook_endpoint for async API."""
if not self.webhook_endpoint:
raise ValueError(
"webhook_endpoint must be provided for async operations with Baseten"
)
if self.model_apis:
raise ValueError(
"Async operations are not supported with model apis endpoints"
)
async with aiohttp.ClientSession() as session:
headers = {"Authorization": f"Api-Key {self.api_key}"}
payload = {
"model_input": {
"prompt": prompt,
"temperature": self.temperature,
"max_tokens": self.max_tokens,
**kwargs,
},
"webhook_endpoint": self.webhook_endpoint,
}
async with session.post(
DEFAULT_ASYNC_API_BASE.format(model_id=self.model),
headers=headers,
json=payload,
) as response:
if response.status not in [200, 201]:
raise Exception(
f"Error from Baseten API: {await response.text()}, Response status: {response.status}"
)
result = await response.json()
request_id = result.get("request_id")
return CompletionResponse(
text=request_id, # Return request_id for tracking
raw=result,
additional_kwargs={"async_request": True},
)
@property
def metadata(self) -> LLMMetadata:
"""Get LLM metadata."""
return LLMMetadata(
num_output=self.max_tokens,
model_name=self.model,
is_chat_model=True, # Use chat completions for model APIs
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-baseten/llama_index/llms/baseten/base.py",
"license": "MIT License",
"lines": 197,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-baseten/llama_index/llms/baseten/utils.py | from typing import List
import warnings
from llama_index.core.bridge.pydantic import BaseModel
# https://docs.baseten.co/development/model-apis/overview#supported-models
# Below is the current list of models supported by Baseten model APIs.
# Other dedicated models are also supported, but not listed here.
SUPPORTED_MODEL_SLUGS = [
"deepseek-ai/DeepSeek-R1",
"deepseek-ai/DeepSeek-R1-0528",
"deepseek-ai/DeepSeek-V3-0324",
"deepseek-ai/DeepSeek-V3.1",
"moonshotai/Kimi-k2-instruct-0905",
"Qwen/Qwen3-235B-A22B-Instruct-2507",
"Qwen/Qwen3-Coder-480B-A35B-Instruct",
"openai/gpt-oss-120b",
"ai-org/GLM-4.6",
]
class Model(BaseModel):
"""
Model information for Baseten models.
Args:
id: unique identifier for the model, passed as model parameter for requests
model_type: API type (defaults to "chat")
client: client name
"""
id: str
model_type: str = "chat"
client: str = "Baseten"
def __hash__(self) -> int:
return hash(self.id)
def validate_model_slug(model_id: str) -> None:
"""
Validate that the model_id is a supported model slug for Baseten Model APIs.
Args:
model_id: The model ID to validate
Raises:
ValueError: If the model_id is not a supported model slug
"""
if model_id not in SUPPORTED_MODEL_SLUGS:
raise ValueError(
f"Model '{model_id}' is not supported by Baseten Model APIs. "
f"Supported models are: {', '.join(SUPPORTED_MODEL_SLUGS)}"
)
def is_supported_model_slug(model_id: str) -> bool:
"""
Check if the model_id is a supported model slug for Baseten Model APIs.
Args:
model_id: The model ID to check
Returns:
True if the model_id is supported, False otherwise
"""
return model_id in SUPPORTED_MODEL_SLUGS
def get_supported_models() -> List[str]:
"""
Get a list of all supported model slugs for Baseten Model APIs.
Returns:
List of supported model slugs
"""
return SUPPORTED_MODEL_SLUGS.copy()
def get_available_models_dynamic(client) -> List[Model]:
"""
Dynamically fetch available models from Baseten Model APIs.
Args:
client: The OpenAI-compatible client instance
Returns:
List of Model objects representing available models
"""
models = []
try:
for element in client.models.list().data:
model = Model(id=element.id)
models.append(model)
# Filter out models that might not work properly with chat completions
# (Currently no exclusions, but this allows for future filtering)
exclude = set()
return [model for model in models if model.id not in exclude]
except Exception as e:
warnings.warn(
f"Failed to fetch models dynamically: {e}. Falling back to static list."
)
# Fallback to current static list
return [Model(id=slug) for slug in SUPPORTED_MODEL_SLUGS]
def validate_model_dynamic(client, model_name: str) -> None:
"""
Validate model against dynamically fetched list from Baseten Model APIs.
Args:
client: The OpenAI-compatible client instance
model_name: The model name to validate
Raises:
ValueError: If the model is not available
"""
try:
available_models = get_available_models_dynamic(client)
available_model_ids = [model.id for model in available_models]
if model_name not in available_model_ids:
# Try to find partial matches for helpful error messages
candidates = [
model_id for model_id in available_model_ids if model_name in model_id
]
if candidates:
suggestion = f"Did you mean one of: {candidates[:3]}"
else:
suggestion = f"Available models: {available_model_ids[:5]}{'...' if len(available_model_ids) > 5 else ''}"
raise ValueError(
f"Model '{model_name}' not found in available models. {suggestion}"
)
except Exception as e:
if "not found in available models" in str(e):
# Re-raise our validation error
raise
else:
# For other errors, fall back to static validation
warnings.warn(f"Dynamic validation failed: {e}. Using static validation.")
validate_model_slug(model_name)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-baseten/llama_index/llms/baseten/utils.py",
"license": "MIT License",
"lines": 114,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-baseten/tests/test_llms_baseten.py | from llama_index.core.base.llms.base import BaseLLM
from llama_index.llms.baseten import Baseten
def test_text_inference_embedding_class():
names_of_base_classes = [b.__name__ for b in Baseten.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-baseten/tests/test_llms_baseten.py",
"license": "MIT License",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/embeddings/llama-index-embeddings-heroku/examples/async_usage.py | """Async usage example for Heroku embeddings."""
import asyncio
import os
from llama_index.embeddings.heroku import HerokuEmbedding
async def main():
"""Demonstrate async usage of Heroku embeddings."""
# Initialize the embedding model. This assumes the presence of EMBEDDING_MODEL_ID,
# EMBEDDING_KEY, and EMBEDDING_URL in the host environment
embedding_model = HerokuEmbedding()
try:
# Example texts to embed
texts = [
"Hello, world!",
"This is a test document about artificial intelligence.",
"Machine learning is a subset of AI.",
"Natural language processing helps computers understand human language.",
]
print("Generating embeddings asynchronously...")
# Get embeddings for individual texts asynchronously
for i, text in enumerate(texts):
embedding = await embedding_model.aget_text_embedding(text)
print(f"Text {i+1}: {text[:50]}...")
print(f" Embedding dimension: {len(embedding)}")
print(f" First 5 values: {embedding[:5]}")
print()
# Get embeddings for all texts at once asynchronously
print("Getting batch embeddings asynchronously...")
all_embeddings = await embedding_model.aget_text_embedding_batch(texts)
print(f"Generated {len(all_embeddings)} embeddings")
# Demonstrate similarity (cosine similarity)
from llama_index.core.base.embeddings.base import similarity
print("\nCalculating similarities...")
for i in range(len(texts)):
for j in range(i + 1, len(texts)):
sim = similarity(all_embeddings[i], all_embeddings[j])
print(f"Similarity between text {i+1} and text {j+1}: {sim:.4f}")
finally:
# Clean up async client
await embedding_model.aclose()
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/embeddings/llama-index-embeddings-heroku/examples/async_usage.py",
"license": "MIT License",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/embeddings/llama-index-embeddings-heroku/examples/basic_usage.py | """Basic usage example for Heroku embeddings."""
import os
from llama_index.embeddings.heroku import HerokuEmbedding
def main():
"""Demonstrate basic usage of Heroku embeddings."""
# Initialize the embedding model. This assumes the presence of EMBEDDING_MODEL_ID,
# EMBEDDING_KEY, and EMBEDDING_URL in the host environment
embedding_model = HerokuEmbedding()
# Example texts to embed
texts = [
"Hello, world!",
"This is a test document about artificial intelligence.",
"Machine learning is a subset of AI.",
"Natural language processing helps computers understand human language.",
]
print("Generating embeddings...")
# Get embeddings for individual texts
for i, text in enumerate(texts):
embedding = embedding_model.get_text_embedding(text)
print(f"Text {i+1}: {text[:50]}...")
print(f" Embedding dimension: {len(embedding)}")
print(f" First 5 values: {embedding[:5]}")
print()
# Get embeddings for all texts at once
print("Getting batch embeddings...")
all_embeddings = embedding_model.get_text_embedding_batch(texts)
print(f"Generated {len(all_embeddings)} embeddings")
# Demonstrate similarity (cosine similarity)
from llama_index.core.base.embeddings.base import similarity
print("\nCalculating similarities...")
for i in range(len(texts)):
for j in range(i + 1, len(texts)):
sim = similarity(all_embeddings[i], all_embeddings[j])
print(f"Similarity between text {i+1} and text {j+1}: {sim:.4f}")
if __name__ == "__main__":
main()
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/embeddings/llama-index-embeddings-heroku/examples/basic_usage.py",
"license": "MIT License",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/embeddings/llama-index-embeddings-heroku/llama_index/embeddings/heroku/base.py | """Heroku embeddings file."""
import logging
from typing import Any, Optional
import httpx
from llama_index.core.base.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
BaseEmbedding,
Embedding,
)
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
logger = logging.getLogger(__name__)
DEFAULT_HEROKU_API_BASE = "https://api.heroku.com"
DEFAULT_HEROKU_API_VERSION = "v1"
class HerokuEmbedding(BaseEmbedding):
"""
Heroku Managed Inference Embeddings Integration.
This class provides an interface to Heroku's Managed Inference API for embeddings.
It connects to your Heroku app's embedding endpoint for embedding models. For more
information about Heroku's embedding endpoint
see: https://devcenter.heroku.com/articles/heroku-inference-api-model-cohere-embed-multilingual
Args:
model (str, optional): The model to use. If not provided, will use EMBEDDING_MODEL_ID.
api_key (str, optional): The API key for Heroku embedding. Defaults to EMBEDDING_KEY.
base_url (str, optional): The base URL for embedding. Defaults to EMBEDDING_URL.
timeout (float, optional): Timeout for requests in seconds. Defaults to 60.0.
**kwargs: Additional keyword arguments.
Environment Variables:
- EMBEDDING_KEY: The API key for Heroku embedding
- EMBEDDING_URL: The base URL for embedding endpoint
- EMBEDDING_MODEL_ID: The model ID to use
Raises:
ValueError: If required environment variables are not set.
"""
model: Optional[str] = Field(
default=None, description="The model to use for embeddings."
)
api_key: Optional[str] = Field(
default=None, description="The API key for Heroku embedding."
)
base_url: Optional[str] = Field(
default=None, description="The base URL for embedding endpoint."
)
timeout: float = Field(default=60.0, description="Timeout for requests in seconds.")
_client: httpx.Client = PrivateAttr()
_aclient: httpx.AsyncClient = PrivateAttr()
def __init__(
self,
model: Optional[str] = None,
api_key: Optional[str] = None,
base_url: Optional[str] = None,
timeout: float = 60.0,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
) -> None:
"""
Initialize an instance of the HerokuEmbedding class.
Args:
model (str, optional): The model to use. If not provided, will use EMBEDDING_MODEL_ID.
api_key (str, optional): The API key for Heroku embedding. Defaults to EMBEDDING_KEY.
base_url (str, optional): The base URL for embedding. Defaults to EMBEDDING_URL.
timeout (float, optional): Timeout for requests in seconds. Defaults to 60.0.
embed_batch_size (int, optional): Batch size for embedding calls. Defaults to DEFAULT_EMBED_BATCH_SIZE.
callback_manager (Optional[CallbackManager], optional): Callback manager. Defaults to None.
**kwargs: Additional keyword arguments.
"""
# Get API key from parameter or environment
try:
api_key = get_from_param_or_env(
"api_key",
api_key,
"EMBEDDING_KEY",
)
except ValueError:
raise ValueError(
"API key is required. Set EMBEDDING_KEY environment variable or pass api_key parameter."
)
# Get embedding URL from parameter or environment
try:
base_url = get_from_param_or_env(
"base_url",
base_url,
"EMBEDDING_URL",
)
except ValueError:
raise ValueError(
"Embedding URL is required. Set EMBEDDING_URL environment variable or pass base_url parameter."
)
# Get model from parameter or environment
try:
model = get_from_param_or_env(
"model",
model,
"EMBEDDING_MODEL_ID",
)
except ValueError:
raise ValueError(
"Model is required. Set EMBEDDING_MODEL_ID environment variable or pass model parameter."
)
super().__init__(
model_name=model,
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
**kwargs,
)
self.model = model
self.api_key = api_key
self.base_url = base_url
self.timeout = timeout
# Initialize HTTP clients
self._client = httpx.Client(
timeout=timeout,
headers={
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
"User-Agent": "llama-index-embeddings-heroku",
},
)
self._aclient = httpx.AsyncClient(
timeout=timeout,
headers={
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
"User-Agent": "llama-index-embeddings-heroku",
},
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "HerokuEmbedding"
def _get_query_embedding(self, query: str) -> Embedding:
"""Get query embedding."""
return self._get_text_embedding(query)
def _get_text_embedding(self, text: str) -> Embedding:
"""Get text embedding."""
try:
response = self._client.post(
f"{self.base_url}/v1/embeddings",
json={
"input": text,
"model": self.model,
},
)
response.raise_for_status()
data = response.json()
return data["data"][0]["embedding"]
except httpx.HTTPStatusError as e:
logger.error(f"HTTP error while embedding text: {e}")
raise ValueError(f"Unable to embed text: {e}")
except Exception as e:
logger.error(f"Error while embedding text: {e}")
raise ValueError(f"Unable to embed text: {e}")
async def _aget_query_embedding(self, query: str) -> Embedding:
"""Get query embedding asynchronously."""
return await self._aget_text_embedding(query)
async def _aget_text_embedding(self, text: str) -> Embedding:
"""Get text embedding asynchronously."""
try:
response = await self._aclient.post(
f"{self.base_url}/v1/embeddings",
json={
"input": text,
"model": self.model,
},
)
response.raise_for_status()
data = response.json()
return data["data"][0]["embedding"]
except httpx.HTTPStatusError as e:
logger.error(f"HTTP error while embedding text: {e}")
raise ValueError(f"Unable to embed text: {e}")
except Exception as e:
logger.error(f"Error while embedding text: {e}")
raise ValueError(f"Unable to embed text: {e}")
def __del__(self) -> None:
"""Clean up resources."""
if hasattr(self, "_client"):
self._client.close()
async def aclose(self) -> None:
"""Close async client."""
if hasattr(self, "_aclient"):
await self._aclient.aclose()
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/embeddings/llama-index-embeddings-heroku/llama_index/embeddings/heroku/base.py",
"license": "MIT License",
"lines": 184,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/embeddings/llama-index-embeddings-heroku/tests/test_heroku_embeddings.py | """Test Heroku embeddings."""
import os
from unittest.mock import MagicMock, patch
import httpx
import pytest
from llama_index.embeddings.heroku.base import HerokuEmbedding
STUB_MODEL = "cohere-embed-multilingual-v3"
STUB_API_KEY = "test-api-key"
STUB_EMBEDDING_URL = "https://test-inference.heroku.com"
@pytest.fixture(name="heroku_embedding")
def fixture_heroku_embedding() -> HerokuEmbedding:
"""Create a HerokuEmbedding instance for testing."""
return HerokuEmbedding(
model=STUB_MODEL,
api_key=STUB_API_KEY,
base_url=STUB_EMBEDDING_URL,
)
@pytest.fixture(name="mock_response")
def fixture_mock_response() -> MagicMock:
"""Create a mock response for testing."""
mock_response = MagicMock()
mock_response.json.return_value = {
"data": [
{
"embedding": [0.1, 0.2, 0.3, 0.4, 0.5],
"index": 0,
"object": "embedding",
}
],
"model": STUB_MODEL,
"object": "list",
"usage": {"prompt_tokens": 5, "total_tokens": 5},
}
mock_response.raise_for_status.return_value = None
return mock_response
class TestHerokuEmbedding:
"""Test HerokuEmbedding class."""
def test_class_name(self, heroku_embedding: HerokuEmbedding) -> None:
"""Test class name."""
assert HerokuEmbedding.class_name() == "HerokuEmbedding"
assert heroku_embedding.class_name() == "HerokuEmbedding"
def test_init_with_parameters(self) -> None:
"""Test initialization with parameters."""
embedding = HerokuEmbedding(
model=STUB_MODEL,
api_key=STUB_API_KEY,
base_url=STUB_EMBEDDING_URL,
timeout=30.0,
)
assert embedding.model == STUB_MODEL
assert embedding.api_key == STUB_API_KEY
assert embedding.base_url == STUB_EMBEDDING_URL
assert embedding.timeout == 30.0
def test_init_with_environment_variables(self) -> None:
"""Test initialization with environment variables."""
with patch.dict(
os.environ,
{
"EMBEDDING_KEY": STUB_API_KEY,
"EMBEDDING_URL": STUB_EMBEDDING_URL,
"EMBEDDING_MODEL_ID": STUB_MODEL,
},
):
embedding = HerokuEmbedding()
assert embedding.model == STUB_MODEL
assert embedding.api_key == STUB_API_KEY
assert embedding.base_url == STUB_EMBEDDING_URL
def test_init_missing_api_key(self) -> None:
"""Test initialization with missing API key."""
with pytest.raises(ValueError, match="API key is required"):
HerokuEmbedding(
model=STUB_MODEL,
base_url=STUB_EMBEDDING_URL,
)
def test_init_missing_base_url(self) -> None:
"""Test initialization with missing embedding URL."""
with pytest.raises(ValueError, match="Embedding URL is required"):
HerokuEmbedding(
model=STUB_MODEL,
api_key=STUB_API_KEY,
)
def test_init_missing_model(self) -> None:
"""Test initialization with missing model."""
with pytest.raises(ValueError, match="Model is required"):
HerokuEmbedding(
api_key=STUB_API_KEY,
base_url=STUB_EMBEDDING_URL,
)
def test_get_text_embedding_success(
self, heroku_embedding: HerokuEmbedding, mock_response: MagicMock
) -> None:
"""Test successful text embedding."""
with patch.object(heroku_embedding._client, "post", return_value=mock_response):
embedding = heroku_embedding.get_text_embedding("test text")
assert embedding == [0.1, 0.2, 0.3, 0.4, 0.5]
def test_get_text_embedding_http_error(
self, heroku_embedding: HerokuEmbedding
) -> None:
"""Test text embedding with HTTP error."""
mock_response = MagicMock()
mock_response.raise_for_status.side_effect = httpx.HTTPStatusError(
"404 Not Found", request=MagicMock(), response=MagicMock()
)
with patch.object(heroku_embedding._client, "post", return_value=mock_response):
with pytest.raises(ValueError, match="Unable to embed text"):
heroku_embedding.get_text_embedding("test text")
def test_get_text_embedding_exception(
self, heroku_embedding: HerokuEmbedding
) -> None:
"""Test text embedding with general exception."""
with patch.object(
heroku_embedding._client, "post", side_effect=Exception("Network error")
):
with pytest.raises(ValueError, match="Unable to embed text"):
heroku_embedding.get_text_embedding("test text")
def test_get_query_embedding(self, heroku_embedding: HerokuEmbedding) -> None:
"""Test query embedding."""
with patch.object(
heroku_embedding, "_get_text_embedding", return_value=[0.1, 0.2, 0.3]
):
embedding = heroku_embedding.get_query_embedding("test query")
assert embedding == [0.1, 0.2, 0.3]
def test_get_text_embeddings(self, heroku_embedding: HerokuEmbedding) -> None:
"""Test batch text embeddings."""
texts = ["text1", "text2", "text3"]
with patch.object(
heroku_embedding, "_get_text_embedding", return_value=[0.1, 0.2, 0.3]
):
embeddings = heroku_embedding.get_text_embedding_batch(texts)
assert len(embeddings) == 3
assert all(embedding == [0.1, 0.2, 0.3] for embedding in embeddings)
@pytest.mark.asyncio
async def test_aget_text_embedding_success(
self, heroku_embedding: HerokuEmbedding, mock_response: MagicMock
) -> None:
"""Test successful async text embedding."""
with patch.object(
heroku_embedding._aclient, "post", return_value=mock_response
):
embedding = await heroku_embedding.aget_text_embedding("test text")
assert embedding == [0.1, 0.2, 0.3, 0.4, 0.5]
@pytest.mark.asyncio
async def test_aget_text_embedding_http_error(
self, heroku_embedding: HerokuEmbedding
) -> None:
"""Test async text embedding with HTTP error."""
mock_response = MagicMock()
mock_response.raise_for_status.side_effect = httpx.HTTPStatusError(
"404 Not Found", request=MagicMock(), response=MagicMock()
)
with patch.object(
heroku_embedding._aclient, "post", return_value=mock_response
):
with pytest.raises(ValueError, match="Unable to embed text"):
await heroku_embedding.aget_text_embedding("test text")
@pytest.mark.asyncio
async def test_aget_text_embedding_exception(
self, heroku_embedding: HerokuEmbedding
) -> None:
"""Test async text embedding with general exception."""
with patch.object(
heroku_embedding._aclient, "post", side_effect=Exception("Network error")
):
with pytest.raises(ValueError, match="Unable to embed text"):
await heroku_embedding.aget_text_embedding("test text")
@pytest.mark.asyncio
async def test_aget_query_embedding(
self, heroku_embedding: HerokuEmbedding
) -> None:
"""Test async query embedding."""
with patch.object(
heroku_embedding, "_aget_text_embedding", return_value=[0.1, 0.2, 0.3]
):
embedding = await heroku_embedding.aget_query_embedding("test query")
assert embedding == [0.1, 0.2, 0.3]
@pytest.mark.asyncio
async def test_aget_text_embeddings(
self, heroku_embedding: HerokuEmbedding
) -> None:
"""Test async batch text embeddings."""
texts = ["text1", "text2", "text3"]
with patch.object(
heroku_embedding, "_aget_text_embedding", return_value=[0.1, 0.2, 0.3]
):
embeddings = await heroku_embedding.aget_text_embedding_batch(texts)
assert len(embeddings) == 3
assert all(embedding == [0.1, 0.2, 0.3] for embedding in embeddings)
def test_cleanup_sync_client(self) -> None:
"""Test cleanup of sync client."""
embedding = HerokuEmbedding(
model=STUB_MODEL,
api_key=STUB_API_KEY,
base_url=STUB_EMBEDDING_URL,
)
with patch.object(embedding._client, "close") as mock_close:
del embedding
mock_close.assert_called_once()
@pytest.mark.asyncio
async def test_cleanup_async_client(self) -> None:
"""Test cleanup of async client."""
embedding = HerokuEmbedding(
model=STUB_MODEL,
api_key=STUB_API_KEY,
base_url=STUB_EMBEDDING_URL,
)
with patch.object(embedding._aclient, "aclose") as mock_aclose:
await embedding.aclose()
mock_aclose.assert_called_once()
def test_embedding_dimensions(self, heroku_embedding: HerokuEmbedding) -> None:
"""Test that embeddings have the expected dimensions."""
mock_response = MagicMock()
mock_response.json.return_value = {
"data": [
{
"embedding": [0.1] * 768, # Common embedding dimension
"index": 0,
"object": "embedding",
}
],
"model": STUB_MODEL,
"object": "list",
"usage": {"prompt_tokens": 5, "total_tokens": 5},
}
mock_response.raise_for_status.return_value = None
with patch.object(heroku_embedding._client, "post", return_value=mock_response):
embedding = heroku_embedding.get_text_embedding("test text")
assert len(embedding) == 768
assert all(isinstance(x, float) for x in embedding)
def test_batch_embedding_consistency(
self, heroku_embedding: HerokuEmbedding
) -> None:
"""Test that batch embeddings are consistent."""
texts = ["text1", "text2"]
mock_embedding = [0.1, 0.2, 0.3]
with patch.object(
heroku_embedding, "_get_text_embedding", return_value=mock_embedding
):
embeddings = heroku_embedding.get_text_embedding_batch(texts)
assert len(embeddings) == 2
assert embeddings[0] == embeddings[1] == mock_embedding
@pytest.mark.asyncio
async def test_async_batch_embedding_consistency(
self, heroku_embedding: HerokuEmbedding
) -> None:
"""Test that async batch embeddings are consistent."""
texts = ["text1", "text2"]
mock_embedding = [0.1, 0.2, 0.3]
with patch.object(
heroku_embedding, "_aget_text_embedding", return_value=mock_embedding
):
embeddings = await heroku_embedding.aget_text_embedding_batch(texts)
assert len(embeddings) == 2
assert embeddings[0] == embeddings[1] == mock_embedding
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/embeddings/llama-index-embeddings-heroku/tests/test_heroku_embeddings.py",
"license": "MIT License",
"lines": 253,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/retrievers/llama-index-retrievers-superlinked/examples/steam_games_example.py | """
Example: Superlinked + LlamaIndex custom retriever (Steam games).
This example shows how to:
- Build a Superlinked pipeline (schema, space, index, app)
- Define a parameterized Superlinked QueryDescriptor using sl.Param("query_text")
- Inject the Superlinked App and QueryDescriptor into the LlamaIndex retriever
- Retrieve nodes with real similarity scores and optional engine usage
Run:
python examples/steam_games_example.py [--csv /path/to/games.csv] [--top_k 5] [--query "strategic sci-fi game"]
If --csv is omitted, a tiny in-memory sample dataset is used.
"""
import argparse
from typing import List, Optional
import pandas as pd
import superlinked.framework as sl
from llama_index.retrievers.superlinked import SuperlinkedRetriever
try:
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.response_synthesizers import get_response_synthesizer
except Exception:
RetrieverQueryEngine = None # type: ignore
get_response_synthesizer = None # type: ignore
def build_dataframe(csv_path: Optional[str]) -> pd.DataFrame:
if csv_path:
df = pd.read_csv(csv_path)
else:
# Minimal fallback sample
df = pd.DataFrame(
[
{
"game_number": 1,
"name": "Star Tactics",
"desc_snippet": "Turn-based strategy in deep space.",
"game_details": "Tactical combat, fleet management",
"languages": "en",
"genre": "Strategy, Sci-Fi",
"game_description": "Engage in strategic battles among the stars.",
"original_price": 29.99,
"discount_price": 19.99,
},
{
"game_number": 2,
"name": "Wizard Party",
"desc_snippet": "Co-op party game with spells.",
"game_details": "Local co-op, party",
"languages": "en",
"genre": "Party, Casual, Magic",
"game_description": "Cast spells with friends in chaotic party modes.",
"original_price": 14.99,
"discount_price": 9.99,
},
]
)
required = [
"game_number",
"name",
"desc_snippet",
"game_details",
"languages",
"genre",
"game_description",
"original_price",
"discount_price",
]
missing = [c for c in required if c not in df.columns]
if missing:
raise ValueError(f"Missing required columns: {missing}")
df["combined_text"] = (
df["name"].astype(str)
+ " "
+ df["desc_snippet"].astype(str)
+ " "
+ df["genre"].astype(str)
+ " "
+ df["game_details"].astype(str)
+ " "
+ df["game_description"].astype(str)
)
return df
def build_superlinked_app(df: pd.DataFrame):
class GameSchema(sl.Schema):
id: sl.IdField
name: sl.String
desc_snippet: sl.String
game_details: sl.String
languages: sl.String
genre: sl.String
game_description: sl.String
original_price: sl.Float
discount_price: sl.Float
combined_text: sl.String
game = GameSchema()
text_space = sl.TextSimilaritySpace(
text=game.combined_text,
model="sentence-transformers/all-mpnet-base-v2",
)
index = sl.Index([text_space])
parser = sl.DataFrameParser(
game,
mapping={
game.id: "game_number",
game.name: "name",
game.desc_snippet: "desc_snippet",
game.game_details: "game_details",
game.languages: "languages",
game.genre: "genre",
game.game_description: "game_description",
game.original_price: "original_price",
game.discount_price: "discount_price",
game.combined_text: "combined_text",
},
)
source = sl.InMemorySource(schema=game, parser=parser)
executor = sl.InMemoryExecutor(sources=[source], indices=[index])
app = executor.run()
source.put([df])
# Build parameterized query using sl.Param("query_text")
query = (
sl.Query(index)
.find(game)
.similar(text_space, sl.Param("query_text"))
.select(
[
game.id,
game.name,
game.desc_snippet,
game.game_details,
game.languages,
game.genre,
game.game_description,
game.original_price,
game.discount_price,
]
)
# Do not set .limit() here; the retriever will cap results via k
)
return app, query, game
def run_demo(csv_path: Optional[str], top_k: int, query_text: str) -> None:
df = build_dataframe(csv_path)
app, query_descriptor, game = build_superlinked_app(df)
# Inject Superlinked App and QueryDescriptor into the LlamaIndex retriever
retriever = SuperlinkedRetriever(
sl_client=app,
sl_query=query_descriptor,
page_content_field="desc_snippet",
query_text_param="query_text",
metadata_fields=[
"id",
"name",
"genre",
"game_details",
"languages",
"game_description",
"original_price",
"discount_price",
],
top_k=top_k,
)
print(f"\nRetrieving for: {query_text!r}")
nodes = retriever.retrieve(query_text)
for i, nws in enumerate(nodes, 1):
print(f"#{i} score={nws.score:.4f} text={nws.node.text!r}")
print(f" metadata: {nws.node.metadata}")
# Optional: use LlamaIndex query engine if packages are available
if RetrieverQueryEngine and get_response_synthesizer:
print("\nBuilding RetrieverQueryEngine...")
try:
engine = RetrieverQueryEngine(
retriever=retriever, response_synthesizer=get_response_synthesizer()
)
response = engine.query(query_text)
print("\nEngine response:", response)
except Exception as e:
print("Engine invocation failed (likely missing LLM setup):", e)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--csv", type=str, default=None, help="Path to games CSV file")
parser.add_argument("--top_k", type=int, default=5, help="Max results to return")
parser.add_argument(
"--query",
type=str,
default="strategic sci-fi game",
help="Query text",
)
args = parser.parse_args()
run_demo(args.csv, args.top_k, args.query)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/retrievers/llama-index-retrievers-superlinked/examples/steam_games_example.py",
"license": "MIT License",
"lines": 186,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/retrievers/llama-index-retrievers-superlinked/tests/test_integration_superlinked_retriever.py | """Integration-like tests using only mocks to simulate Superlinked behavior."""
import pytest
from typing import Any, List
from unittest.mock import Mock
from llama_index.retrievers.superlinked import SuperlinkedRetriever
# Patch superlinked modules once for all tests
class MockApp:
pass
class MockQuery:
pass
@pytest.fixture(autouse=True)
def _patch_superlinked_modules(monkeypatch: Any) -> None:
import sys
mock_app_module = Mock()
mock_query_module = Mock()
mock_app_module.App = MockApp
mock_query_module.QueryDescriptor = MockQuery
sys.modules["superlinked.framework.dsl.app.app"] = mock_app_module
sys.modules["superlinked.framework.dsl.query.query_descriptor"] = mock_query_module
def _make_entries(docs: List[dict]) -> list:
entries = []
for d in docs:
m = Mock()
m.id = d.get("id")
m.fields = d
entries.append(m)
return entries
def test_basic_flow() -> None:
retriever = SuperlinkedRetriever(
sl_client=MockApp(),
sl_query=MockQuery(),
page_content_field="text",
top_k=4,
)
docs = [
{"id": "1", "text": "Eiffel Tower is in Paris", "category": "landmark"},
{"id": "2", "text": "Colosseum is in Rome", "category": "landmark"},
{"id": "3", "text": "Python is a language", "category": "technology"},
]
# attach scores as metadata
entries = _make_entries(docs)
for i, e in enumerate(entries):
e.metadata = Mock(score=1.0 - i * 0.1)
retriever.sl_client.query = Mock(return_value=Mock(entries=entries))
nodes = retriever.retrieve("landmarks")
assert len(nodes) == 3
assert any("Paris" in n.node.text for n in nodes)
assert all("id" in n.node.metadata for n in nodes)
def test_k_limit_and_metadata_subset() -> None:
retriever = SuperlinkedRetriever(
sl_client=MockApp(),
sl_query=MockQuery(),
page_content_field="text",
metadata_fields=["category"],
top_k=2,
)
docs = [
{"id": "1", "text": "doc1", "category": "A", "x": 1},
{"id": "2", "text": "doc2", "category": "B", "x": 2},
{"id": "3", "text": "doc3", "category": "C", "x": 3},
]
entries = _make_entries(docs)
for i, e in enumerate(entries):
e.metadata = Mock(score=0.9 - i * 0.1)
retriever.sl_client.query = Mock(return_value=Mock(entries=entries))
nodes = retriever.retrieve("q")
assert len(nodes) == 2
for n in nodes:
assert set(n.node.metadata.keys()) == {"id", "category"}
# verify scores present
assert all(isinstance(n.score, float) for n in nodes)
def test_error_returns_empty_list() -> None:
retriever = SuperlinkedRetriever(
sl_client=MockApp(),
sl_query=MockQuery(),
page_content_field="text",
)
retriever.sl_client.query = Mock(side_effect=Exception("boom"))
nodes = retriever.retrieve("q")
assert nodes == []
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/retrievers/llama-index-retrievers-superlinked/tests/test_integration_superlinked_retriever.py",
"license": "MIT License",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/retrievers/llama-index-retrievers-superlinked/tests/test_unit_superlinked_retriever.py | """Unit tests for SuperlinkedRetriever (LlamaIndex)."""
import pytest
from typing import Any
from unittest.mock import Mock
from llama_index.retrievers.superlinked import SuperlinkedRetriever
# Patch superlinked types before importing the retriever to satisfy validators
class MockApp:
pass
class MockQuery:
pass
@pytest.fixture(autouse=True)
def _patch_superlinked_modules(monkeypatch: Any) -> None:
import sys
mock_app_module = Mock()
mock_query_module = Mock()
mock_app_module.App = MockApp
mock_query_module.QueryDescriptor = MockQuery
sys.modules["superlinked.framework.dsl.app.app"] = mock_app_module
sys.modules["superlinked.framework.dsl.query.query_descriptor"] = mock_query_module
def test_retriever_validate_and_retrieve_success() -> None:
retriever = SuperlinkedRetriever(
sl_client=MockApp(),
sl_query=MockQuery(),
page_content_field="text",
top_k=4,
)
# Build fake Superlinked response
mock_entry1 = Mock()
mock_entry1.id = "1"
mock_entry1.fields = {"text": "Paris is beautiful.", "city": "Paris"}
mock_entry1.metadata = Mock(score=0.9)
mock_entry2 = Mock()
mock_entry2.id = "2"
mock_entry2.fields = {"text": "Rome has the Colosseum.", "city": "Rome"}
mock_entry2.metadata = Mock(score=0.8)
mock_result = Mock()
mock_result.entries = [mock_entry1, mock_entry2]
retriever.sl_client.query = Mock(return_value=mock_result)
nodes = retriever.retrieve("cities")
assert len(nodes) == 2
assert nodes[0].node.text in {"Paris is beautiful.", "Rome has the Colosseum."}
# metadata should include id and city
md = nodes[0].node.metadata
assert "id" in md and "city" in md
# scores should be propagated
scores = sorted([n.score for n in nodes], reverse=True)
assert scores == [0.9, 0.8]
def test_retriever_respects_k() -> None:
retriever = SuperlinkedRetriever(
sl_client=MockApp(),
sl_query=MockQuery(),
page_content_field="text",
top_k=1,
)
mock_entry = Mock()
mock_entry.id = "1"
mock_entry.fields = {"text": "A", "x": 1}
mock_entry2 = Mock()
mock_entry2.id = "2"
mock_entry2.fields = {"text": "B", "x": 2}
mock_result = Mock()
mock_result.entries = [mock_entry, mock_entry2]
retriever.sl_client.query = Mock(return_value=mock_result)
nodes = retriever.retrieve("q")
assert len(nodes) == 1
def test_retriever_metadata_fields_subset() -> None:
retriever = SuperlinkedRetriever(
sl_client=MockApp(),
sl_query=MockQuery(),
page_content_field="text",
metadata_fields=["city"],
)
mock_entry = Mock()
mock_entry.id = "1"
mock_entry.fields = {"text": "A", "city": "Paris", "drop": True}
mock_result = Mock(entries=[mock_entry])
retriever.sl_client.query = Mock(return_value=mock_result)
nodes = retriever.retrieve("q")
assert nodes[0].node.metadata == {"id": "1", "city": "Paris"}
def test_retriever_missing_page_content_skips() -> None:
retriever = SuperlinkedRetriever(
sl_client=MockApp(),
sl_query=MockQuery(),
page_content_field="text",
)
mock_entry = Mock()
mock_entry.id = "1"
mock_entry.fields = {"not_text": "oops"}
mock_result = Mock(entries=[mock_entry])
retriever.sl_client.query = Mock(return_value=mock_result)
nodes = retriever.retrieve("q")
assert nodes == []
def test_retriever_query_exception_returns_empty() -> None:
retriever = SuperlinkedRetriever(
sl_client=MockApp(),
sl_query=MockQuery(),
page_content_field="text",
)
retriever.sl_client.query = Mock(side_effect=Exception("failure"))
nodes = retriever.retrieve("q")
assert nodes == []
def test_query_text_param_is_used() -> None:
retriever = SuperlinkedRetriever(
sl_client=MockApp(),
sl_query=MockQuery(),
page_content_field="text",
query_text_param="search_term",
)
mock_result = Mock(entries=[])
retriever.sl_client.query = Mock(return_value=mock_result)
retriever.retrieve("hello")
retriever.sl_client.query.assert_called_once()
kwargs = retriever.sl_client.query.call_args.kwargs
assert kwargs["query_descriptor"] is retriever.sl_query
assert kwargs["search_term"] == "hello"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/retrievers/llama-index-retrievers-superlinked/tests/test_unit_superlinked_retriever.py",
"license": "MIT License",
"lines": 115,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/slides/image_extractor.py | """
Image extraction utilities for PowerPoint slides.
Handles image captioning using vision models.
"""
import logging
import tempfile
import os
from typing import Dict, Any
logger = logging.getLogger(__name__)
class ImageExtractor:
"""
Handles image extraction and captioning for PowerPoint slides.
Uses vision transformer models for image captioning.
"""
def __init__(self):
"""Initialize image extractor with vision models."""
self.vision_models = None
self._initialize_vision_models()
def _initialize_vision_models(self) -> None:
"""Initialize vision transformer models for image captioning."""
try:
import torch # noqa
from PIL import Image # noqa
from transformers import (
AutoTokenizer,
VisionEncoderDecoderModel,
ViTFeatureExtractor,
)
except ImportError:
raise ImportError(
"Missing required dependencies for image extraction and captioning.\n"
"Please install the following packages:\n"
" pip install 'torch>=2.7.1' 'transformers<4.50' 'pillow>=11.2.1'\n\n"
"Note: This feature requires PyTorch and transformers for AI-powered image captioning.\n"
"If you don't need image extraction, set extract_images=False when initializing PptxReader."
)
model = VisionEncoderDecoderModel.from_pretrained(
"nlpconnect/vit-gpt2-image-captioning"
)
feature_extractor = ViTFeatureExtractor.from_pretrained(
"nlpconnect/vit-gpt2-image-captioning"
)
tokenizer = AutoTokenizer.from_pretrained(
"nlpconnect/vit-gpt2-image-captioning"
)
self.vision_models = {
"feature_extractor": feature_extractor,
"model": model,
"tokenizer": tokenizer,
}
def caption_image_from_file(self, image_path: str) -> str:
"""
Generate caption for image from file path.
Args:
image_path: Path to image file
Returns:
Image caption text
"""
if not self.vision_models:
raise RuntimeError(
"Image captioning not available - vision models not loaded"
)
from PIL import Image
from llama_index.core.utils import infer_torch_device
model = self.vision_models["model"]
feature_extractor = self.vision_models["feature_extractor"]
tokenizer = self.vision_models["tokenizer"]
device = infer_torch_device()
model.to(device)
max_length = 16
num_beams = 4
gen_kwargs = {"max_length": max_length, "num_beams": num_beams}
i_image = Image.open(image_path)
if i_image.mode != "RGB":
i_image = i_image.convert(mode="RGB")
pixel_values = feature_extractor(
images=[i_image], return_tensors="pt"
).pixel_values
pixel_values = pixel_values.to(device)
output_ids = model.generate(pixel_values, **gen_kwargs)
preds = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
return preds[0].strip()
def extract_image_data(self, shape, slide_number: int) -> Dict[str, Any]:
"""
Extract image data and caption from PowerPoint shape.
Args:
shape: PowerPoint shape containing image
slide_number: Slide number for context
Returns:
Dictionary with image metadata and caption
"""
# Use temp file approach like original code
image_bytes = shape.image.blob
f = tempfile.NamedTemporaryFile(
"wb", delete=False, suffix=f".{shape.image.ext}"
)
try:
f.write(image_bytes)
f.close()
caption = self.caption_image_from_file(f.name)
finally:
os.unlink(f.name)
return {
"type": "image",
"format": shape.image.ext,
"caption": caption,
"content": caption,
}
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/slides/image_extractor.py",
"license": "MIT License",
"lines": 108,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-file/tests/generate_test_ppt.py | """
Generate a comprehensive PowerPoint presentation for testing the enhanced PptxReader.
This script creates a presentation with:
- Text content with formatting
- Tables with structured data
- Charts with series data
- Speaker notes
- Mixed content slides
Perfect for demonstrating the enhanced extraction capabilities.
"""
from pptx import Presentation
from pptx.chart.data import CategoryChartData
from pptx.enum.chart import XL_CHART_TYPE
from pptx.enum.text import PP_ALIGN
from pptx.util import Inches, Pt
def create_comprehensive_test_presentation(
filename="comprehensive_test_presentation.pptx",
):
"""Create a comprehensive test PowerPoint presentation."""
# Create presentation
prs = Presentation()
# Slide 1: Title slide with rich text
slide1 = prs.slides.add_slide(prs.slide_layouts[0]) # Title slide layout
title = slide1.shapes.title
subtitle = slide1.placeholders[1]
title.text = "Enhanced PowerPoint Reader Test"
subtitle.text = (
"Comprehensive Content Extraction Demo\nTesting Tables, Charts, Notes & Text"
)
# Add speaker notes
notes_slide = slide1.notes_slide
notes_slide.notes_text_frame.text = (
"Welcome to the comprehensive test presentation. "
"This presentation demonstrates the enhanced PowerPoint reader's ability to extract "
"various content types including formatted text, tables, charts, and speaker notes. "
"Each slide showcases different content extraction scenarios."
)
# Slide 2: Rich text content with formatting
slide2 = prs.slides.add_slide(prs.slide_layouts[1]) # Content layout
slide2.shapes.title.text = "Sales Performance Analysis"
content = slide2.placeholders[1]
tf = content.text_frame
tf.clear()
# Add formatted paragraphs
p1 = tf.paragraphs[0]
p1.text = "Executive Summary"
p1.font.bold = True
p1.font.size = Pt(18)
p1.alignment = PP_ALIGN.CENTER
p2 = tf.add_paragraph()
p2.text = "Our Q4 performance exceeded expectations with significant growth across all key metrics:"
p2.font.size = Pt(14)
p3 = tf.add_paragraph()
p3.text = "• Revenue increased by 25% year-over-year"
p3.level = 1
p4 = tf.add_paragraph()
p4.text = "• Customer acquisition grew by 40%"
p4.level = 1
p5 = tf.add_paragraph()
p5.text = "• Market share expanded from 15% to 22%"
p5.level = 1
p6 = tf.add_paragraph()
p6.text = "Key Success Factors"
p6.font.bold = True
p6.font.size = Pt(16)
p7 = tf.add_paragraph()
p7.text = "The remarkable growth can be attributed to our enhanced product offerings, strategic partnerships, and improved customer experience initiatives."
# Add speaker notes
slide2.notes_slide.notes_text_frame.text = (
"This slide presents our Q4 sales performance overview. "
"Key talking points: Emphasize the 25% revenue growth and 40% customer acquisition increase. "
"Mention that market share expansion from 15% to 22% demonstrates strong competitive positioning. "
"Be prepared to discuss the strategic initiatives that drove these results."
)
# Slide 3: Table with financial data
slide3 = prs.slides.add_slide(prs.slide_layouts[5]) # Blank layout
slide3.shapes.title.text = "Quarterly Financial Results"
# Add table
rows, cols = 5, 4
left = Inches(1)
top = Inches(2)
width = Inches(8)
height = Inches(3)
table = slide3.shapes.add_table(rows, cols, left, top, width, height).table
# Set table headers
headers = ["Quarter", "Revenue ($M)", "Profit ($M)", "Growth (%)"]
for i, header in enumerate(headers):
table.cell(0, i).text = header
table.cell(0, i).text_frame.paragraphs[0].font.bold = True
# Add data
data = [
["Q1 2023", "45.2", "8.1", "12%"],
["Q2 2023", "52.8", "10.3", "18%"],
["Q3 2023", "58.9", "12.7", "15%"],
["Q4 2023", "67.3", "15.4", "25%"],
]
for row_idx, row_data in enumerate(data, 1):
for col_idx, cell_data in enumerate(row_data):
table.cell(row_idx, col_idx).text = cell_data
# Add speaker notes
slide3.notes_slide.notes_text_frame.text = (
"This table shows our quarterly progression throughout 2023. "
"Notice the consistent growth trend with Q4 showing the strongest performance. "
"Revenue grew from $45.2M in Q1 to $67.3M in Q4, representing a 49% increase. "
"Profit margins improved significantly, reaching $15.4M in Q4. "
"The growth percentages show accelerating momentum, particularly in Q4 with 25% growth."
)
# Slide 4: Chart with sales data
slide4 = prs.slides.add_slide(prs.slide_layouts[5]) # Blank layout
slide4.shapes.title.text = "Monthly Sales Trends"
# Create chart data
chart_data = CategoryChartData()
chart_data.categories = ["Jan", "Feb", "Mar", "Apr", "May", "Jun"]
chart_data.add_series("Product A", (100, 125, 150, 175, 200, 225))
chart_data.add_series("Product B", (80, 90, 110, 140, 160, 180))
chart_data.add_series("Product C", (60, 75, 85, 95, 120, 140))
# Add chart
x, y, cx, cy = Inches(1), Inches(2), Inches(8), Inches(5)
chart = slide4.shapes.add_chart(
XL_CHART_TYPE.COLUMN_CLUSTERED, x, y, cx, cy, chart_data
).chart
chart.has_title = True
chart.chart_title.text_frame.text = "Sales Performance by Product Line"
# Add speaker notes
slide4.notes_slide.notes_text_frame.text = (
"This chart illustrates the monthly sales trends for our three main product lines. "
"Product A shows the strongest performance with consistent growth from 100 to 225 units. "
"Product B demonstrates steady improvement, reaching 180 units by June. "
"Product C shows accelerating growth, particularly in the last two months. "
"The overall trend indicates a healthy product portfolio with all lines contributing to growth."
)
# Slide 5: Mixed content slide
slide5 = prs.slides.add_slide(prs.slide_layouts[1]) # Content layout
slide5.shapes.title.text = "Regional Performance Summary"
# Add text content
content = slide5.placeholders[1]
tf = content.text_frame
tf.clear()
p1 = tf.paragraphs[0]
p1.text = "North America: Leading Market"
p1.font.bold = True
p1.font.size = Pt(16)
p2 = tf.add_paragraph()
p2.text = "Generated $28.5M in revenue (42% of total), driven by strong enterprise adoption and new partnership agreements."
p3 = tf.add_paragraph()
p3.text = "Europe: Emerging Opportunities"
p3.font.bold = True
p3.font.size = Pt(16)
p4 = tf.add_paragraph()
p4.text = "Achieved $18.7M in revenue (28% of total) with significant growth in Germany and UK markets."
# Add small table for regional breakdown
left = Inches(1)
top = Inches(4.5)
width = Inches(6)
height = Inches(1.5)
small_table = slide5.shapes.add_table(4, 3, left, top, width, height).table
# Headers
headers = ["Region", "Revenue ($M)", "Market Share"]
for i, header in enumerate(headers):
small_table.cell(0, i).text = header
small_table.cell(0, i).text_frame.paragraphs[0].font.bold = True
# Data
regional_data = [
["North America", "28.5", "42%"],
["Europe", "18.7", "28%"],
["Asia Pacific", "20.1", "30%"],
]
for row_idx, row_data in enumerate(regional_data, 1):
for col_idx, cell_data in enumerate(row_data):
small_table.cell(row_idx, col_idx).text = cell_data
# Add speaker notes
slide5.notes_slide.notes_text_frame.text = (
"This slide combines textual analysis with supporting data table. "
"North America remains our strongest market, but note the balanced distribution across regions. "
"Europe shows promising growth potential, especially in enterprise segments. "
"Asia Pacific, while showing strong numbers, presents opportunities for expansion. "
"The regional diversification reduces market concentration risk and provides multiple growth avenues."
)
# Slide 6: Future projections with line chart
slide6 = prs.slides.add_slide(prs.slide_layouts[5]) # Blank layout
slide6.shapes.title.text = "2024 Growth Projections"
# Create line chart data
line_chart_data = CategoryChartData()
line_chart_data.categories = ["Q1 2024", "Q2 2024", "Q3 2024", "Q4 2024"]
line_chart_data.add_series("Conservative", (70, 78, 85, 92))
line_chart_data.add_series("Optimistic", (75, 85, 95, 108))
line_chart_data.add_series("Stretch Goal", (80, 92, 105, 120))
# Add line chart
x, y, cx, cy = Inches(1), Inches(2), Inches(8), Inches(4)
line_chart = slide6.shapes.add_chart(
XL_CHART_TYPE.LINE, x, y, cx, cy, line_chart_data
).chart
line_chart.has_title = True
line_chart.chart_title.text_frame.text = "Revenue Projections ($M)"
# Add speaker notes
slide6.notes_slide.notes_text_frame.text = (
"Our 2024 projections show three scenarios based on market conditions and execution capabilities. "
"Conservative scenario assumes 15-20% growth, reaching $92M by Q4. "
"Optimistic scenario projects 25-30% growth, achieving $108M in Q4. "
"Stretch goal represents aggressive expansion with potential $120M Q4 revenue. "
"We're targeting the optimistic scenario while preparing contingencies for the conservative case."
)
# Slide 7: Customer Demographics Table
slide7 = prs.slides.add_slide(prs.slide_layouts[5]) # Blank layout
slide7.shapes.title.text = "Customer Demographics Analysis"
# Add demographics table
rows, cols = 6, 5
left = Inches(0.5)
top = Inches(2)
width = Inches(9)
height = Inches(3.5)
demo_table = slide7.shapes.add_table(rows, cols, left, top, width, height).table
# Set headers
demo_headers = [
"Age Group",
"Percentage",
"Revenue Share",
"Growth Rate",
"Retention",
]
for i, header in enumerate(demo_headers):
demo_table.cell(0, i).text = header
demo_table.cell(0, i).text_frame.paragraphs[0].font.bold = True
# Add demographic data
demo_data = [
["18-25", "15%", "12%", "45%", "78%"],
["26-35", "35%", "38%", "28%", "85%"],
["36-45", "28%", "32%", "15%", "92%"],
["46-55", "18%", "16%", "8%", "95%"],
["55+", "4%", "2%", "5%", "88%"],
]
for row_idx, row_data in enumerate(demo_data, 1):
for col_idx, cell_data in enumerate(row_data):
demo_table.cell(row_idx, col_idx).text = cell_data
slide7.notes_slide.notes_text_frame.text = (
"Customer demographics reveal interesting patterns in our user base. "
"The 26-35 age group represents our largest segment at 35% of customers and 38% of revenue. "
"Younger demographics (18-25) show highest growth at 45% but lower retention at 78%. "
"Older segments demonstrate higher retention rates, with 46-55 age group at 95% retention. "
"This data suggests opportunities for retention improvement in younger segments."
)
# Slide 8: Product Portfolio Pie Chart
slide8 = prs.slides.add_slide(prs.slide_layouts[5]) # Blank layout
slide8.shapes.title.text = "Product Portfolio Distribution"
# Create pie chart data
pie_chart_data = CategoryChartData()
pie_chart_data.categories = [
"Enterprise Software",
"Mobile Apps",
"Cloud Services",
"Consulting",
"Hardware",
]
pie_chart_data.add_series("Revenue Share", (45, 25, 18, 8, 4))
# Add pie chart
x, y, cx, cy = Inches(2), Inches(2), Inches(6), Inches(4.5)
pie_chart = slide8.shapes.add_chart(
XL_CHART_TYPE.PIE, x, y, cx, cy, pie_chart_data
).chart
pie_chart.has_title = True
pie_chart.chart_title.text_frame.text = "Revenue by Product Category"
slide8.notes_slide.notes_text_frame.text = (
"Our product portfolio shows strong diversification across five key categories. "
"Enterprise Software dominates with 45% of revenue, reflecting our B2B focus. "
"Mobile Apps contribute 25%, showing strong consumer market presence. "
"Cloud Services at 18% represent our fastest-growing segment. "
"Consulting services provide 8% steady revenue with high margins. "
"Hardware, while only 4%, offers strategic partnerships and ecosystem benefits."
)
# Slide 9: Competitive Analysis Matrix
slide9 = prs.slides.add_slide(prs.slide_layouts[1]) # Content layout
slide9.shapes.title.text = "Competitive Landscape Assessment"
# Add competitive analysis text
content = slide9.placeholders[1]
tf = content.text_frame
tf.clear()
p1 = tf.paragraphs[0]
p1.text = "Market Position Analysis"
p1.font.bold = True
p1.font.size = Pt(18)
p2 = tf.add_paragraph()
p2.text = "Competitive Advantages:"
p2.font.bold = True
p2.font.size = Pt(14)
p3 = tf.add_paragraph()
p3.text = "• Superior customer support with 24/7 availability"
p3.level = 1
p4 = tf.add_paragraph()
p4.text = "• Advanced AI-driven analytics capabilities"
p4.level = 1
p5 = tf.add_paragraph()
p5.text = "• Comprehensive integration ecosystem"
p5.level = 1
p6 = tf.add_paragraph()
p6.text = "Areas for Improvement:"
p6.font.bold = True
p6.font.size = Pt(14)
p7 = tf.add_paragraph()
p7.text = "• Mobile platform feature parity"
p7.level = 1
p8 = tf.add_paragraph()
p8.text = "• International market expansion"
p8.level = 1
# Add competitive matrix table
left = Inches(1)
top = Inches(4)
width = Inches(8)
height = Inches(2)
comp_table = slide9.shapes.add_table(4, 4, left, top, width, height).table
# Headers
comp_headers = ["Competitor", "Market Share", "Strengths", "Weaknesses"]
for i, header in enumerate(comp_headers):
comp_table.cell(0, i).text = header
comp_table.cell(0, i).text_frame.paragraphs[0].font.bold = True
# Competitive data
comp_data = [
["Company A", "35%", "Brand Recognition", "Limited Innovation"],
["Company B", "22%", "Cost Leadership", "Poor Support"],
["Our Company", "18%", "Technology Edge", "Market Penetration"],
]
for row_idx, row_data in enumerate(comp_data, 1):
for col_idx, cell_data in enumerate(row_data):
comp_table.cell(row_idx, col_idx).text = cell_data
slide9.notes_slide.notes_text_frame.text = (
"Competitive analysis reveals our strong technology position despite smaller market share. "
"Company A leads with 35% share but lacks innovation velocity. "
"Company B competes on price but suffers from support issues. "
"Our 18% share is offset by superior technology and customer satisfaction. "
"Focus should be on leveraging our tech advantages to gain market share."
)
# Slide 10: Financial KPIs Dashboard
slide10 = prs.slides.add_slide(prs.slide_layouts[5]) # Blank layout
slide10.shapes.title.text = "Key Performance Indicators Dashboard"
# Add KPI table
rows, cols = 8, 4
left = Inches(1)
top = Inches(1.5)
width = Inches(8)
height = Inches(4.5)
kpi_table = slide10.shapes.add_table(rows, cols, left, top, width, height).table
# KPI headers
kpi_headers = ["Metric", "Current", "Target", "Status"]
for i, header in enumerate(kpi_headers):
kpi_table.cell(0, i).text = header
kpi_table.cell(0, i).text_frame.paragraphs[0].font.bold = True
# KPI data
kpi_data = [
["Monthly Recurring Revenue", "$5.2M", "$6.0M", "On Track"],
["Customer Acquisition Cost", "$450", "$400", "Needs Work"],
["Lifetime Value", "$2,800", "$3,000", "Good"],
["Churn Rate", "3.2%", "2.5%", "Improving"],
["Net Promoter Score", "68", "70", "Close"],
["Gross Margin", "72%", "75%", "Improving"],
["Employee Satisfaction", "8.1/10", "8.5/10", "Good"],
]
for row_idx, row_data in enumerate(kpi_data, 1):
for col_idx, cell_data in enumerate(row_data):
kpi_table.cell(row_idx, col_idx).text = cell_data
slide10.notes_slide.notes_text_frame.text = (
"Our KPI dashboard shows mixed but generally positive performance. "
"MRR is tracking well toward $6M target, currently at $5.2M. "
"CAC needs attention at $450, above our $400 target. "
"LTV of $2,800 provides healthy unit economics with 6:1 LTV:CAC ratio. "
"Churn improvement from 4.1% to 3.2% shows retention initiatives working. "
"Focus areas: reduce CAC through channel optimization and improve NPS."
)
# Slide 11: Technology Roadmap
slide11 = prs.slides.add_slide(prs.slide_layouts[1]) # Content layout
slide11.shapes.title.text = "Technology Roadmap 2024-2025"
# Add roadmap content
content = slide11.placeholders[1]
tf = content.text_frame
tf.clear()
p1 = tf.paragraphs[0]
p1.text = "Q1 2024 Priorities"
p1.font.bold = True
p1.font.size = Pt(16)
p2 = tf.add_paragraph()
p2.text = "• AI-powered recommendation engine deployment"
p2.level = 1
p3 = tf.add_paragraph()
p3.text = "• Mobile app performance optimization"
p3.level = 1
p4 = tf.add_paragraph()
p4.text = "• Enhanced security framework implementation"
p4.level = 1
p5 = tf.add_paragraph()
p5.text = "Q2-Q3 2024 Initiatives"
p5.font.bold = True
p5.font.size = Pt(16)
p6 = tf.add_paragraph()
p6.text = "• Real-time analytics platform launch"
p6.level = 1
p7 = tf.add_paragraph()
p7.text = "• API ecosystem expansion"
p7.level = 1
p8 = tf.add_paragraph()
p8.text = "• Multi-tenant architecture migration"
p8.level = 1
p9 = tf.add_paragraph()
p9.text = "Q4 2024 & Beyond"
p9.font.bold = True
p9.font.size = Pt(16)
p10 = tf.add_paragraph()
p10.text = "• Machine learning automation suite"
p10.level = 1
p11 = tf.add_paragraph()
p11.text = "• Global infrastructure expansion"
p11.level = 1
p12 = tf.add_paragraph()
p12.text = "• Next-generation user interface rollout"
p12.level = 1
slide11.notes_slide.notes_text_frame.text = (
"Our technology roadmap focuses on three key themes: intelligence, performance, and scale. "
"Q1 priorities center on AI capabilities and mobile optimization for immediate user impact. "
"Mid-year initiatives build platform capabilities for long-term competitive advantage. "
"Q4 and beyond targets transformational capabilities including ML automation. "
"Each initiative aligns with customer feedback and market opportunities."
)
# Slide 12: Summary and Next Steps
slide12 = prs.slides.add_slide(prs.slide_layouts[1]) # Content layout
slide12.shapes.title.text = "Executive Summary & Action Items"
# Add summary content
content = slide12.placeholders[1]
tf = content.text_frame
tf.clear()
p1 = tf.paragraphs[0]
p1.text = "Key Achievements"
p1.font.bold = True
p1.font.size = Pt(18)
p2 = tf.add_paragraph()
p2.text = "✓ 25% revenue growth exceeding targets"
p2.font.size = Pt(14)
p3 = tf.add_paragraph()
p3.text = "✓ Successful market share expansion to 22%"
p3.font.size = Pt(14)
p4 = tf.add_paragraph()
p4.text = "✓ Strong customer retention improvements"
p4.font.size = Pt(14)
p5 = tf.add_paragraph()
p5.text = "Immediate Action Items"
p5.font.bold = True
p5.font.size = Pt(18)
p6 = tf.add_paragraph()
p6.text = "1. Optimize customer acquisition costs"
p6.level = 1
p6.font.size = Pt(14)
p7 = tf.add_paragraph()
p7.text = "2. Accelerate AI feature development"
p7.level = 1
p7.font.size = Pt(14)
p8 = tf.add_paragraph()
p8.text = "3. Expand European market presence"
p8.level = 1
p8.font.size = Pt(14)
p9 = tf.add_paragraph()
p9.text = "4. Enhance mobile platform capabilities"
p9.level = 1
p9.font.size = Pt(14)
# Add final summary table
left = Inches(1)
top = Inches(4.5)
width = Inches(8)
height = Inches(1.5)
summary_table = slide12.shapes.add_table(4, 3, left, top, width, height).table
# Summary headers
summary_headers = ["Priority", "Owner", "Timeline"]
for i, header in enumerate(summary_headers):
summary_table.cell(0, i).text = header
summary_table.cell(0, i).text_frame.paragraphs[0].font.bold = True
# Summary data
summary_data = [
["CAC Optimization", "Marketing Team", "Q1 2024"],
["AI Development", "Engineering Team", "Q2 2024"],
["European Expansion", "Sales Team", "Q1-Q2 2024"],
]
for row_idx, row_data in enumerate(summary_data, 1):
for col_idx, cell_data in enumerate(row_data):
summary_table.cell(row_idx, col_idx).text = cell_data
slide12.notes_slide.notes_text_frame.text = (
"This final slide summarizes our key achievements and establishes clear action items. "
"The 25% revenue growth demonstrates strong execution of our strategy. "
"Market share expansion to 22% positions us well for continued growth. "
"Action items are prioritized based on impact and feasibility. "
"CAC optimization is critical for sustainable growth economics. "
"AI development maintains our competitive technology advantage. "
"European expansion diversifies revenue streams and reduces market risk."
)
# Save presentation
prs.save(filename)
print(f"✅ Created comprehensive test presentation: {filename}")
print("\n📋 12-Slide Presentation Contents:")
print(" • Slide 1: Title slide with speaker notes")
print(" • Slide 2: Rich formatted text with bullet points")
print(" • Slide 3: Financial data table (4x4)")
print(" • Slide 4: Column chart with 3 data series")
print(" • Slide 5: Mixed content (text + table)")
print(" • Slide 6: Line chart with projections")
print(" • Slide 7: Customer demographics table (5x5)")
print(" • Slide 8: Product portfolio pie chart")
print(" • Slide 9: Competitive analysis matrix (mixed content)")
print(" • Slide 10: Financial KPIs dashboard (7x4 table)")
print(" • Slide 11: Technology roadmap (structured text)")
print(" • Slide 12: Summary and action items table")
print("\n🎯 Perfect for testing:")
print(" ✓ Text extraction and formatting preservation")
print(" ✓ Table data extraction with complete content")
print(" ✓ Chart metadata and series data extraction")
print(" ✓ Speaker notes extraction for all slides")
print(" ✓ Mixed content handling (text + tables + charts)")
print(" ✓ Multithreading with 12 slides for concurrency testing")
print(" ✓ Various chart types (column, line, pie)")
print(" ✓ Different table sizes and structures")
print(" ✓ Rich text formatting with hierarchical content")
print(" ✓ Comprehensive speaker notes for LLM consolidation")
print("\n🚀 Concurrency Testing Features:")
print(f" • 12 slides perfect for batch_size testing")
print(f" • Multiple content types per slide")
print(f" • Rich metadata for comprehensive extraction")
print(f" • Suitable for rate limit testing with LLM consolidation")
return filename
if __name__ == "__main__":
create_comprehensive_test_presentation()
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-file/tests/generate_test_ppt.py",
"license": "MIT License",
"lines": 523,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-file/tests/test_slides.py | import pytest
from llama_index.readers.file.slides import PptxReader
from .generate_test_ppt import create_comprehensive_test_presentation
@pytest.fixture()
def pptx_file(tmp_path):
"""Create a temporary PowerPoint file for testing."""
if create_comprehensive_test_presentation is None:
pytest.skip("generate_test_ppt not available")
# Create test presentation in temp directory
file_path = tmp_path / "test_presentation.pptx"
create_comprehensive_test_presentation(str(file_path))
return file_path
def test_pptx_reader_init():
"""Test PptxReader initialization."""
reader = PptxReader(extract_images=False, num_workers=2)
assert reader.extract_images is False
assert reader.num_workers == 2
def test_load_data_pptx(pptx_file):
"""Test loading PowerPoint data."""
reader = PptxReader(extract_images=False, context_consolidation_with_llm=False)
documents = reader.load_data(pptx_file)
# Basic validation
assert len(documents) == 12 # Should have 12 slides
assert all(hasattr(doc, "text") for doc in documents)
assert all(hasattr(doc, "metadata") for doc in documents)
# Check first slide
first_slide = documents[0]
assert "Enhanced PowerPoint Reader Test" in first_slide.text
assert first_slide.metadata["page_label"] == 1
assert len(first_slide.metadata["notes"]) > 0 # Check notes content exists
def test_table_extraction(pptx_file):
"""Test table data extraction."""
reader = PptxReader()
documents = reader.load_data(pptx_file)
# Find slides with tables
table_slides = [doc for doc in documents if len(doc.metadata.get("tables", [])) > 0]
assert len(table_slides) >= 1
# Check table metadata
table_slide = table_slides[0]
tables = table_slide.metadata.get("tables", [])
assert len(tables) > 0
table = tables[0]
assert "headers" in table
assert "data" in table
assert "dimensions" in table
def test_chart_extraction(pptx_file):
"""Test chart metadata extraction."""
reader = PptxReader()
documents = reader.load_data(pptx_file)
# Find slides with charts
chart_slides = [doc for doc in documents if len(doc.metadata.get("charts", [])) > 0]
assert len(chart_slides) >= 1
# Check chart metadata
chart_slide = chart_slides[0]
charts = chart_slide.metadata.get("charts", [])
assert len(charts) > 0
chart = charts[0]
assert "chart_type" in chart
assert "series_info" in chart
def test_speaker_notes_extraction(pptx_file):
"""Test speaker notes extraction."""
reader = PptxReader()
documents = reader.load_data(pptx_file)
# All slides should have notes
slides_with_notes = [
doc for doc in documents if len(doc.metadata.get("notes", "")) > 0
]
assert len(slides_with_notes) == 12
# Check notes content
first_slide = documents[0]
notes = first_slide.metadata.get("notes", "")
assert len(notes) > 0
assert "comprehensive test presentation" in notes.lower()
def test_content_consolidation(pptx_file):
"""Test content consolidation structure."""
reader = PptxReader()
documents = reader.load_data(pptx_file)
# Check content structure
for doc in documents:
assert "-----" in doc.text # Section separators
assert len(doc.text) > 0 # Content should exist
def test_multithreading(pptx_file):
"""Test multithreaded processing."""
reader = PptxReader(num_workers=2, batch_size=4)
documents = reader.load_data(pptx_file)
# Should process successfully with threading
assert len(documents) == 12
assert all(doc.metadata.get("page_label") for doc in documents)
def test_llm_consolidation_with_settings_llm(pptx_file):
"""Test LLM consolidation when LLM is set in Settings but not passed directly."""
from llama_index.core import Settings
from llama_index.core.llms.mock import MockLLM
Settings.llm = MockLLM()
reader = PptxReader(
extract_images=False,
context_consolidation_with_llm=True, # Request LLM consolidation
llm=None, # Don't pass LLM directly
num_workers=2,
)
# Should still return results
documents = reader.load_data(pptx_file)
# Basic validation
assert len(documents) == 12
assert all(hasattr(doc, "text") for doc in documents)
def test_llm_consolidation_with_direct_llm(pptx_file):
"""Test LLM consolidation when LLM is passed directly to PptxReader."""
from llama_index.core.llms.mock import MockLLM
# Create MockLLM directly and pass it to the reader
mock_llm = MockLLM()
reader = PptxReader(
extract_images=False,
context_consolidation_with_llm=True, # Request LLM consolidation
llm=mock_llm, # Pass LLM directly
num_workers=2,
)
# Should use the directly passed LLM
assert reader.context_consolidation_with_llm is True
assert reader.llm is mock_llm # Should be the exact same instance
# Should process successfully
documents = reader.load_data(pptx_file)
# Basic validation
assert len(documents) == 12
assert all(hasattr(doc, "text") for doc in documents)
assert all(hasattr(doc, "metadata") for doc in documents)
# Content should be consolidated
for doc in documents:
assert "-----" in doc.text # Section separators should be there
assert len(doc.text) > 0 # Should have content
def test_title_detection_scoring(tmp_path):
"""Test that title detection correctly identifies titles using position and size scoring."""
from pptx import Presentation
from pptx.util import Inches
# Create a test presentation with different text positions
prs = Presentation()
slide = prs.slides.add_slide(prs.slide_layouts[6]) # Blank layout
# Add title at top (should win)
title_box = slide.shapes.add_textbox(
left=Inches(1), top=Inches(0.5), width=Inches(8), height=Inches(0.8)
)
title_box.text_frame.text = "Test Title"
# Add body text in middle (larger box, should lose)
body_box = slide.shapes.add_textbox(
left=Inches(1), top=Inches(2), width=Inches(8), height=Inches(2)
)
body_box.text_frame.text = (
"This is longer body text that should not be detected as title"
)
# Add footer at bottom (should lose due to position)
footer_box = slide.shapes.add_textbox(
left=Inches(1), top=Inches(6), width=Inches(8), height=Inches(0.5)
)
footer_box.text_frame.text = "Footer"
# Save test presentation
test_file = tmp_path / "title_test.pptx"
prs.save(str(test_file))
# Test title detection
reader = PptxReader(extract_images=False, context_consolidation_with_llm=False)
documents = reader.load_data(test_file)
# Should have one document
assert len(documents) == 1
doc = documents[0]
# Title should be detected correctly
assert doc.metadata["title"] == "Test Title"
def test_title_detection_edge_cases(tmp_path):
"""Test title detection edge cases through the public API."""
from pptx import Presentation
from pptx.util import Inches
# Test 1: Empty slide (no shapes)
prs = Presentation()
empty_slide = prs.slides.add_slide(prs.slide_layouts[6])
test_file = tmp_path / "empty_test.pptx"
prs.save(str(test_file))
reader = PptxReader(extract_images=False, context_consolidation_with_llm=False)
documents = reader.load_data(test_file)
assert len(documents) == 1
assert documents[0].metadata["title"] == "" # Empty slide should have empty title
# Test 2: Multiple titles at different positions (top should win)
prs2 = Presentation()
multi_title_slide = prs2.slides.add_slide(prs2.slide_layouts[6])
# Title 1 at very top
top_title = multi_title_slide.shapes.add_textbox(
left=Inches(1), top=Inches(0.2), width=Inches(8), height=Inches(0.6)
)
top_title.text_frame.text = "Top Title"
# Title 2 lower but smaller
lower_title = multi_title_slide.shapes.add_textbox(
left=Inches(1), top=Inches(1.5), width=Inches(6), height=Inches(0.4)
)
lower_title.text_frame.text = "Lower"
test_file2 = tmp_path / "multi_title_test.pptx"
prs2.save(str(test_file2))
documents2 = reader.load_data(test_file2)
assert len(documents2) == 1
assert documents2[0].metadata["title"] == "Top Title" # Top position should win
def test_raise_on_error_parameter(tmp_path):
"""Test raise_on_error parameter behavior with invalid files."""
# Test 1: raise_on_error=False (default) - should return empty list on error
reader_no_raise = PptxReader(extract_images=False, raise_on_error=False)
# Try to read a non-existent file
non_existent_file = tmp_path / "does_not_exist.pptx"
documents = reader_no_raise.load_data(non_existent_file)
assert documents == [] # Should return empty list, not raise error
# Test 2: raise_on_error=True - should raise ValueError on error
reader_with_raise = PptxReader(extract_images=False, raise_on_error=True)
# Try to read the same non-existent file, should raise ValueError
with pytest.raises(ValueError, match="Failed to extract data"):
reader_with_raise.load_data(non_existent_file)
# Test 3: Create an invalid file and test both behaviors
invalid_file = tmp_path / "invalid.pptx"
invalid_file.write_text("This is not a valid PowerPoint file")
# With raise_on_error=False, should return empty list
documents = reader_no_raise.load_data(invalid_file)
assert documents == []
# With raise_on_error=True, should raise ValueError
with pytest.raises(ValueError, match="Failed to extract data"):
reader_with_raise.load_data(invalid_file)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-file/tests/test_slides.py",
"license": "MIT License",
"lines": 216,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/graph_rag/llama-index-graph-rag-cognee/example.py | """
Simple example demonstrating the CogneeGraphRAG integration.
This script shows how to:
1. Initialize the CogneeGraphRAG
2. Add documents to the knowledge graph
3. Process the data into a graph
4. Search for information
5. Visualize the graph
Requirements:
- Set OPENAI_API_KEY environment variable
- Install the package: pip install llama-index-graph-rag-cognee
"""
import asyncio
import os
from llama_index.core import Document
from llama_index.graph_rag.cognee import CogneeGraphRAG
async def main():
# Check for API key
api_key = os.getenv("OPENAI_API_KEY")
if not api_key:
print("❌ Please set your OPENAI_API_KEY environment variable")
print(" export OPENAI_API_KEY='your-api-key-here'")
return
print("🚀 Initializing CogneeGraphRAG...")
# Initialize the GraphRAG system
cognee_rag = CogneeGraphRAG(
llm_api_key=api_key,
llm_provider="openai",
llm_model="gpt-4o-mini",
graph_db_provider="kuzu",
vector_db_provider="lancedb",
relational_db_provider="sqlite",
relational_db_name="cognee_example_db",
)
print("📄 Creating sample documents...")
# Create sample documents
documents = [
Document(
text="Apple Inc. is a multinational technology company headquartered in Cupertino, California. "
"It was founded by Steve Jobs, Steve Wozniak, and Ronald Wayne in 1976. "
"Apple is known for its consumer electronics, software, and online services."
),
Document(
text="Steve Jobs was the co-founder and longtime CEO of Apple Inc. "
"He was known for his innovation in personal computing, animated movies, and mobile phones. "
"Jobs passed away in 2011, leaving behind a legacy of revolutionary products."
),
Document(
text="The iPhone is Apple's flagship smartphone product, first released in 2007. "
"It revolutionized the mobile phone industry with its touchscreen interface "
"and App Store ecosystem. The iPhone runs on iOS operating system."
),
]
print("➕ Adding documents to the knowledge graph...")
# Add documents to the graph
await cognee_rag.add(documents, dataset_name="apple_knowledge")
print(" ✅ Documents added successfully")
print("🔄 Processing data into knowledge graph...")
# Process the data to create the knowledge graph
await cognee_rag.process_data("apple_knowledge")
print(" ✅ Data processed into graph")
print("🔍 Searching the knowledge graph...")
# Perform searches
queries = [
"Who founded Apple?",
"When was iPhone released?",
"What is Steve Jobs known for?",
]
for query in queries:
print(f"\n Query: {query}")
results = await cognee_rag.search(query)
if results:
print(f" Answer: {results[0] if isinstance(results, list) else results}")
else:
print(" No results found")
print("\n🕸️ Generating graph visualization...")
# Create visualization (saves to home directory by default)
try:
viz_path = await cognee_rag.visualize_graph(
open_browser=True, output_file_path="."
)
print(f" ✅ Graph visualization saved to: {viz_path}")
print(f" 🌐 Open the file in your browser to view the knowledge graph")
except Exception as e:
print(f" ⚠️ Visualization failed: {e}")
print("\n🎉 Example completed! The knowledge graph is ready for use.")
print("\n📚 Next steps:")
print(" - Add more documents with cognee_rag.add()")
print(" - Process with cognee_rag.process_data()")
print(" - Search with cognee_rag.search()")
print(" - Explore related nodes with cognee_rag.get_related_nodes()")
if __name__ == "__main__":
# Run the async main function
asyncio.run(main())
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/graph_rag/llama-index-graph-rag-cognee/example.py",
"license": "MIT License",
"lines": 94,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/graph_rag/llama-index-graph-rag-cognee/tests/test_visualize_graph.py | import os
import sys
import tempfile
from unittest.mock import AsyncMock
import pytest
from llama_index.graph_rag.cognee import CogneeGraphRAG
@pytest.mark.skipif(
sys.version_info < (3, 10), reason="mock strategy requires python3.10 or higher"
)
@pytest.mark.skipif(
os.getenv("OPENAI_API_KEY") is None,
reason="OPENAI_API_KEY not available to test Cognee integration",
)
@pytest.mark.asyncio
async def test_visualize_graph(monkeypatch):
# Instantiate cognee GraphRAG
cogneeRAG = CogneeGraphRAG(
llm_api_key=os.getenv("OPENAI_API_KEY", "your-api-key"),
llm_provider="openai",
llm_model="gpt-4o-mini",
graph_db_provider="kuzu",
vector_db_provider="lancedb",
relational_db_provider="sqlite",
relational_db_name="cognee_db",
)
# Mock cognee's visualize_graph function
mock_visualize = AsyncMock(return_value=None)
import cognee
monkeypatch.setattr(cognee, "visualize_graph", mock_visualize)
# Test with custom output path
with tempfile.TemporaryDirectory() as temp_dir:
result_path = await cogneeRAG.visualize_graph(
open_browser=False, output_file_path=temp_dir
)
# Verify the function was called
mock_visualize.assert_called_once()
# Verify the returned path is correct
expected_path = os.path.join(temp_dir, "graph_visualization.html")
assert result_path == expected_path
# Test with default path (home directory)
mock_visualize.reset_mock()
result_path = await cogneeRAG.visualize_graph(open_browser=False)
# Verify the function was called again
mock_visualize.assert_called_once()
# Verify the returned path points to home directory
home_dir = os.path.expanduser("~")
expected_path = os.path.join(home_dir, "graph_visualization.html")
assert result_path == expected_path
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/graph_rag/llama-index-graph-rag-cognee/tests/test_visualize_graph.py",
"license": "MIT License",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-mcp/llama_index/tools/mcp/tool_spec_mixins.py | from typing import Any, Dict, List, Union, Literal, Type, TYPE_CHECKING
from pydantic import Field
if TYPE_CHECKING:
from llama_index.tools.mcp.base import McpToolSpec
# Map JSON Schema types to Python types
json_type_mapping: Dict[str, Type] = {
"string": str,
"number": float,
"integer": int,
"boolean": bool,
"object": Dict,
"array": List,
}
class TypeResolutionMixin:
def _resolve_field_type(
self: "McpToolSpec",
field_schema: dict,
defs: dict,
) -> Any:
"""Resolve the Python type from a field schema."""
if "$ref" in field_schema:
return self._resolve_reference(field_schema, defs)
if "enum" in field_schema:
return Literal[tuple(field_schema["enum"])]
if "anyOf" in field_schema:
return self._resolve_union_type(field_schema, defs)
return self._resolve_basic_type(field_schema, defs)
def _resolve_reference(
self: "McpToolSpec",
field_schema: dict,
defs: dict,
) -> Any:
"""Resolve a $ref reference."""
ref_name = self._extract_ref_name(field_schema["$ref"])
if ref_name not in defs:
return self.properties_cache.get(ref_name)
ref_schema = defs[ref_name]
if "anyOf" in ref_schema:
return self._resolve_union_type(ref_schema, defs)
if self._is_simple_array(ref_schema):
return self._create_list_type(ref_schema, defs)
if self._is_simple_object(ref_schema):
return self._create_dict_type(ref_schema, defs)
return self.properties_cache.get(ref_name) or self._create_model(
ref_schema,
ref_name,
defs,
)
def _resolve_union_type(
self: "McpToolSpec",
schema: dict,
defs: dict,
) -> Any:
"""Resolve a Union type (anyOf)."""
union_types = [
self._resolve_union_option(option, defs) for option in schema["anyOf"]
]
return Union[tuple(union_types)] if len(union_types) > 1 else union_types[0]
def _resolve_union_option(
self: "McpToolSpec",
option: dict,
defs: dict,
) -> Any:
"""Resolve a single option in a union type."""
if "$ref" in option:
return self._resolve_reference(option, defs)
if "enum" in option:
return Literal[tuple(option["enum"])]
if option.get("type") == "null":
return type(None)
return self._resolve_basic_type(option, defs)
def _resolve_basic_type(
self: "McpToolSpec",
schema: dict,
defs: dict,
) -> Any:
"""Resolve a basic JSON Schema type."""
json_type = schema.get("type", "string")
json_type = json_type[0] if isinstance(json_type, list) else json_type
if self._is_simple_array(schema):
return self._create_list_type(schema, defs)
if self._is_simple_object(schema):
return self._create_dict_type(schema, defs)
return json_type_mapping.get(json_type, str)
class TypeCreationMixin:
def _create_list_type(self: "McpToolSpec", schema: dict, defs: dict) -> type:
"""Create a List type from schema."""
item_type = self._resolve_field_type(schema["items"], defs)
return List[item_type]
def _create_dict_type(self: "McpToolSpec", schema: dict, defs: dict) -> type:
"""Create a Dict type from schema."""
additional_props = schema.get("additionalProperties")
if additional_props is False or additional_props is None:
return Dict[str, Any]
if isinstance(additional_props, dict):
value_type = self._resolve_field_type(additional_props, defs)
return Dict[str, value_type]
return Dict[str, Any]
def _is_simple_array(self: "McpToolSpec", schema: dict) -> bool:
"""Check if schema is a simple array type."""
return schema.get("type") == "array" and "items" in schema
def _is_simple_object(self: "McpToolSpec", schema: dict) -> bool:
"""Check if schema is a simple object type."""
additional_props = schema.get("additionalProperties")
return (
schema.get("type") == "object"
and "additionalProperties" in schema
and additional_props is not False
and isinstance(additional_props, dict)
)
def _extract_ref_name(self: "McpToolSpec", ref_path: str) -> str:
"""Extract reference name from $ref path."""
return ref_path.split("#/$defs/")[-1]
class FieldExtractionMixin:
def _extract_fields(self: "McpToolSpec", schema: dict, defs: dict) -> dict:
"""Extract Pydantic fields from schema."""
properties = self._get_properties(schema)
required_fields = set(schema.get("required", []))
# For enum schemas, treat them as required by default
if "enum" in schema:
required_fields = {schema.get("title", "enum_field")}
fields = {}
for field_name, field_schema in properties.items():
field_type = self._resolve_field_type(field_schema, defs)
default_value, final_type = self._set_field_default(
field_name,
required_fields,
field_type,
field_schema,
)
fields[field_name] = (
final_type,
Field(default_value, description=field_schema.get("description", "")),
)
return fields
def _get_properties(self: "McpToolSpec", schema: dict) -> dict:
"""Get properties from schema, handling enum types."""
if "enum" in schema:
# For enum types, create a property with the schema name as the key
# This ensures the enum is treated as a required field
return {schema.get("title", "enum_field"): schema}
return schema.get("properties", {})
@staticmethod
def _set_field_default(
field: str,
required_fields: set[str],
ftype: Any,
field_schema: dict,
) -> tuple[type(Ellipsis) | None, Any]:
"""Set default value and make type optional if needed."""
if field in required_fields:
return ..., ftype
default_value = field_schema.get("default")
if default_value is None:
ftype = ftype | type(None)
return default_value, ftype
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-mcp/llama_index/tools/mcp/tool_spec_mixins.py",
"license": "MIT License",
"lines": 156,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-mcp/tests/schemas.py | from typing import Literal, List
from pydantic import BaseModel
MethodType = Literal["POST", "GET", "UPDATE", "DELETE"]
XY = List[str]
class TestName(BaseModel):
name: str
class TestMethod(BaseModel):
method: MethodType
class TestList(BaseModel):
lst: List[int]
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-mcp/tests/schemas.py",
"license": "MIT License",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-heroku/llama_index/llms/heroku/base.py | from typing import Any, Optional
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.base.llms.generic_utils import (
get_from_param_or_env,
)
from llama_index.llms.openai_like import OpenAILike
class Heroku(OpenAILike):
"""Heroku Managed Inference LLM Integration."""
_client: Any = PrivateAttr()
_aclient: Any = PrivateAttr()
def __init__(
self,
model: Optional[str] = None,
api_key: Optional[str] = None,
inference_url: Optional[str] = None,
max_tokens: Optional[int] = 1024,
is_chat_model: bool = True,
**kwargs: Any,
) -> None:
"""
Initialize an instance of the Heroku class.
This class provides an interface to Heroku's Managed Inference API.
It connects to your Heroku app's inference endpoints for chat and completion models.
Args:
model (str, optional): The model to use. If not provided, will use INFERENCE_MODEL_ID.
api_key (str, optional): The API key for Heroku inference. Defaults to INFERENCE_KEY.
inference_url (str, optional): The base URL for inference. Defaults to INFERENCE_URL.
max_tokens (int, optional): The maximum number of tokens to generate. Defaults to 1024.
**kwargs: Additional keyword arguments.
Environment Variables:
- INFERENCE_KEY: The API key for Heroku inference
- INFERENCE_URL: The base URL for inference endpoints
- INFERENCE_MODEL_ID: The model ID to use
Raises:
ValueError: If required environment variables are not set.
"""
# Get API key from parameter or environment
try:
api_key = get_from_param_or_env(
"api_key",
api_key,
"INFERENCE_KEY",
)
except ValueError:
raise ValueError(
"API key is required. Set INFERENCE_KEY environment variable or pass api_key parameter."
)
# Get inference URL from parameter or environment
try:
inference_url = get_from_param_or_env(
"inference_url",
inference_url,
"INFERENCE_URL",
)
except ValueError:
raise ValueError(
"Inference URL is required. Set INFERENCE_URL environment variable or pass inference_url parameter."
)
# Get model from parameter or environment
try:
model = get_from_param_or_env(
"model",
model,
"INFERENCE_MODEL_ID",
)
except ValueError:
raise ValueError(
"Model is required. Set INFERENCE_MODEL_ID environment variable or pass model parameter."
)
# Construct the base URL for the API
base_url = f"{inference_url}/v1"
super().__init__(
model=model,
api_key=api_key,
api_base=base_url,
max_tokens=max_tokens,
is_chat_model=is_chat_model,
default_headers={"User-Agent": "llama-index-llms-heroku"},
**kwargs,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "Heroku"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-heroku/llama_index/llms/heroku/base.py",
"license": "MIT License",
"lines": 84,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-heroku/tests/test_api_key.py | import os
import pytest
from typing import Any
from pytest_httpx import HTTPXMock
from llama_index.llms.heroku import Heroku
@pytest.fixture()
def mock_heroku_models(httpx_mock: HTTPXMock):
"""Mock Heroku models endpoint response."""
mock_response = {
"data": [
{
"id": "claude-3-5-haiku",
"object": "model",
"created": 1234567890,
"owned_by": "heroku",
"root": "claude-3-5-haiku",
}
]
}
httpx_mock.add_response(
url="https://test-app.herokuapp.com/v1/models",
method="GET",
json=mock_response,
status_code=200,
)
def get_api_key(instance: Any) -> str:
"""Helper function to get API key from instance."""
return instance.api_key
def test_create_without_api_key_raises_error() -> None:
"""Test that creating without API key raises ValueError."""
with pytest.raises(ValueError, match="API key is required"):
Heroku()
def test_create_without_inference_url_raises_error() -> None:
"""Test that creating without inference URL raises ValueError."""
with pytest.raises(ValueError, match="Inference URL is required"):
Heroku(api_key="test-key")
def test_create_without_model_raises_error() -> None:
"""Test that creating without model raises ValueError."""
with pytest.raises(ValueError, match="Model is required"):
Heroku(api_key="test-key", inference_url="https://test-app.herokuapp.com")
def test_create_with_all_parameters() -> None:
"""Test creating with all required parameters."""
instance = Heroku(
model="claude-3-5-haiku",
api_key="test-key",
inference_url="https://test-app.herokuapp.com",
)
assert instance.api_key == "test-key"
assert instance.api_base == "https://test-app.herokuapp.com/v1"
assert instance.model == "claude-3-5-haiku"
def test_api_key_from_environment() -> None:
"""Test that API key is read from environment variable."""
try:
os.environ["INFERENCE_KEY"] = "env-key"
os.environ["INFERENCE_URL"] = "https://test-app.herokuapp.com"
os.environ["INFERENCE_MODEL_ID"] = "claude-3-5-haiku"
instance = Heroku()
assert instance.api_key == "env-key"
assert instance.api_base == "https://test-app.herokuapp.com/v1"
assert instance.model == "claude-3-5-haiku"
finally:
# Clean up environment variables
for key in ["INFERENCE_KEY", "INFERENCE_URL", "INFERENCE_MODEL_ID"]:
if key in os.environ:
del os.environ[key]
def test_parameter_overrides_environment() -> None:
"""Test that parameters override environment variables."""
try:
os.environ["INFERENCE_KEY"] = "env-key"
os.environ["INFERENCE_URL"] = "https://env-app.herokuapp.com"
os.environ["INFERENCE_MODEL_ID"] = "env-model"
instance = Heroku(
model="param-model",
api_key="param-key",
inference_url="https://param-app.herokuapp.com",
)
assert instance.api_key == "param-key"
assert instance.api_base == "https://param-app.herokuapp.com/v1"
assert instance.model == "param-model"
finally:
# Clean up environment variables
for key in ["INFERENCE_KEY", "INFERENCE_URL", "INFERENCE_MODEL_ID"]:
if key in os.environ:
del os.environ[key]
def test_model_parameter_overrides_environment() -> None:
"""Test that model parameter overrides environment variable."""
try:
os.environ["INFERENCE_MODEL_ID"] = "env-model"
instance = Heroku(
model="explicit-model",
api_key="test-key",
inference_url="https://test-app.herokuapp.com",
)
assert instance.model == "explicit-model"
finally:
if "INFERENCE_MODEL_ID" in os.environ:
del os.environ["INFERENCE_MODEL_ID"]
def test_model_from_environment() -> None:
"""Test that model is read from environment variable when not provided."""
try:
os.environ["INFERENCE_MODEL_ID"] = "env-model"
instance = Heroku(
api_key="test-key", inference_url="https://test-app.herokuapp.com"
)
assert instance.model == "env-model"
finally:
if "INFERENCE_MODEL_ID" in os.environ:
del os.environ["INFERENCE_MODEL_ID"]
@pytest.mark.integration
def test_missing_api_key_error() -> None:
"""Test that missing API key results in proper error."""
with pytest.raises(ValueError, match="API key is required"):
Heroku(inference_url="https://test-app.herokuapp.com", model="test-model")
@pytest.mark.integration
def test_missing_inference_url_error() -> None:
"""Test that missing inference URL results in proper error."""
with pytest.raises(ValueError, match="Inference URL is required"):
Heroku(api_key="test-key", model="test-model")
@pytest.mark.integration
def test_missing_model_error() -> None:
"""Test that missing model results in proper error."""
with pytest.raises(ValueError, match="Model is required"):
Heroku(api_key="test-key", inference_url="https://test-app.herokuapp.com")
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-heroku/tests/test_api_key.py",
"license": "MIT License",
"lines": 123,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-heroku/tests/test_integration.py | from typing import Annotated
import pytest
from pytest_httpx import HTTPXMock
from llama_index.llms.heroku import Heroku
from llama_index.core.llms import ChatMessage, MessageRole
from llama_index.core.tools import FunctionTool
@pytest.fixture()
def mock_heroku_chat_completion(httpx_mock: HTTPXMock):
"""Mock Heroku chat completion endpoint response."""
mock_response = {
"id": "chatcmpl-123",
"object": "chat.completion",
"created": 1677652288,
"model": "claude-3-5-haiku",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "Hello! I'm here to help you with any questions you might have.",
},
"finish_reason": "stop",
}
],
"usage": {"prompt_tokens": 9, "completion_tokens": 12, "total_tokens": 21},
}
httpx_mock.add_response(
url="https://test-app.herokuapp.com/v1/chat/completions",
method="POST",
json=mock_response,
status_code=200,
match_headers={"Authorization": "Bearer test-key"},
)
@pytest.fixture()
def mock_heroku_completion(httpx_mock: HTTPXMock):
"""Mock Heroku completion endpoint response."""
mock_response = {
"id": "cmpl-123",
"object": "text_completion",
"created": 1677652288,
"model": "claude-3-5-haiku",
"choices": [
{
"text": "This is a test completion response.",
"index": 0,
"logprobs": None,
"finish_reason": "stop",
}
],
"usage": {"prompt_tokens": 5, "completion_tokens": 8, "total_tokens": 13},
}
httpx_mock.add_response(
url="https://test-app.herokuapp.com/v1/completions",
method="POST",
json=mock_response,
status_code=200,
match_headers={"Authorization": "Bearer test-key"},
)
@pytest.fixture()
def mock_heroku_tool_call_completion(httpx_mock: HTTPXMock):
"""Mock Heroku tool call completion endpoint response."""
mock_response = {
"id": "chatcmpl-1839adcc2079997417288",
"object": "chat.completion",
"created": 1745617422,
"model": "claude-4-sonnet",
"system_fingerprint": "heroku-inf-1y38gdr",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "I'll help you check the current weather in Portland. Since Portland could refer to either Portland, Oregon or Portland, Maine, I should specify the state.\nI'll check Portland, OR as it's the larger and more commonly referenced Portland.",
"refusal": None,
"tool_calls": [
{
"id": "tooluse_aFByQsacQ_2BmYMGHvkBmg",
"type": "function",
"function": {
"name": "get_current_weather",
"arguments": '{"location":"Portland, OR"}',
},
}
],
},
"finish_reason": "tool_calls",
}
],
"usage": {"prompt_tokens": 407, "completion_tokens": 107, "total_tokens": 514},
}
# More flexible mock that matches any POST request to the chat completions endpoint
httpx_mock.add_response(
url="https://test-app.herokuapp.com/v1/chat/completions",
method="POST",
json=mock_response,
status_code=200,
match_headers={"Authorization": "Bearer test-key"},
)
@pytest.mark.usefixtures("mock_heroku_chat_completion")
def test_chat_completion() -> None:
"""Test chat completion functionality."""
llm = Heroku(
model="claude-3-5-haiku",
api_key="test-key",
inference_url="https://test-app.herokuapp.com",
is_chat_model=True,
)
messages = [ChatMessage(role=MessageRole.USER, content="Hello, how are you?")]
response = llm.chat(messages)
assert (
response.message.content
== "Hello! I'm here to help you with any questions you might have."
)
@pytest.mark.usefixtures("mock_heroku_completion")
def test_text_completion() -> None:
"""Test text completion functionality."""
llm = Heroku(
model="claude-3-5-haiku",
api_key="test-key",
inference_url="https://test-app.herokuapp.com",
is_chat_model=False,
)
response = llm.complete("Test prompt")
assert response.text == "This is a test completion response."
@pytest.mark.usefixtures("mock_heroku_chat_completion")
def test_chat_with_system_message() -> None:
"""Test chat with system message."""
llm = Heroku(
model="claude-3-5-haiku",
api_key="test-key",
inference_url="https://test-app.herokuapp.com",
is_chat_model=True,
)
messages = [
ChatMessage(role=MessageRole.SYSTEM, content="You are a helpful assistant."),
ChatMessage(role=MessageRole.USER, content="Hello, how are you?"),
]
response = llm.chat(messages)
assert (
response.message.content
== "Hello! I'm here to help you with any questions you might have."
)
@pytest.mark.usefixtures("mock_heroku_chat_completion")
def test_chat_with_max_tokens() -> None:
"""Test chat with max_tokens parameter."""
llm = Heroku(
model="claude-3-5-haiku",
api_key="test-key",
inference_url="https://test-app.herokuapp.com",
is_chat_model=True,
max_tokens=50,
)
messages = [ChatMessage(role=MessageRole.USER, content="Hello, how are you?")]
response = llm.chat(messages)
assert (
response.message.content
== "Hello! I'm here to help you with any questions you might have."
)
def test_class_name() -> None:
"""Test that class_name returns correct value."""
llm = Heroku(
model="claude-3-5-haiku",
api_key="test-key",
inference_url="https://test-app.herokuapp.com",
is_chat_model=True,
)
assert llm.class_name() == "Heroku"
@pytest.mark.usefixtures("mock_heroku_tool_call_completion")
def test_chat_with_tool_call_completion() -> None:
"""Test chat with tool call completion."""
llm = Heroku(
model="claude-4-sonnet",
api_key="test-key",
inference_url="https://test-app.herokuapp.com",
is_chat_model=True,
)
weather_tool = FunctionTool.from_defaults(get_current_weather)
# Test direct tool calling with the LLM
messages = [
ChatMessage(
role=MessageRole.USER,
content="What is the weather in Portland?",
tools=[weather_tool],
)
]
response = llm.chat(messages)
# Verify the response contains tool calls
assert response.message.additional_kwargs.get("tool_calls") is not None
tool_calls = response.message.additional_kwargs["tool_calls"]
assert len(tool_calls) > 0
assert tool_calls[0].function.name == "get_current_weather"
def get_current_weather(
location: Annotated[str, "A city name and state, formatted like '<name>, <state>'"],
) -> str:
"""Get the current weather in a given location."""
return f"The current weather in {location} is sunny."
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-heroku/tests/test_integration.py",
"license": "MIT License",
"lines": 196,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-core/tests/agent/react/test_prompt_customization.py | from llama_index.core import PromptTemplate
from llama_index.core.agent.workflow import ReActAgent
from textwrap import dedent
def test_partial_formatted_system_prompt():
"""Partially formatted context should be preserved."""
agent = ReActAgent()
prompt = PromptTemplate(
dedent(
"""\
Required template variables:
{tool_desc}
{tool_names}
Additional variables:
{dummy_var}
"""
)
)
dummy_var = "dummy_context"
agent.update_prompts({"react_header": prompt.partial_format(dummy_var=dummy_var)})
assert dummy_var in agent.formatter.system_header
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/agent/react/test_prompt_customization.py",
"license": "MIT License",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-aws-bedrock-agentcore/llama_index/tools/aws_bedrock_agentcore/browser/base.py | import os
import logging
from typing import Dict, Optional
from urllib.parse import urlparse
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from bedrock_agentcore.tools.browser_client import BrowserClient
from .browser_session_manager import BrowserSessionManager
from .utils import aget_current_page, get_current_page
DEFAULT_BROWSER_IDENTIFIER = "aws.browser.v1"
DEFAULT_BROWSER_SESSION_TIMEOUT = 3600
DEFAULT_BROWSER_LIVE_VIEW_PRESIGNED_URL_TIMEOUT = 300
logger = logging.getLogger(__name__)
def get_aws_region() -> str:
"""Get the AWS region from environment variables or use default."""
return os.getenv("AWS_REGION") or os.getenv("AWS_DEFAULT_REGION") or "us-west-2"
class AgentCoreBrowserToolSpec(BaseToolSpec):
"""
AWS Bedrock AgentCore Browser Tool Spec.
This toolkit provides a set of tools for working with a remote browser environment:
* navigate_browser - Navigate to a URL
* click_element - Click on an element using CSS selectors
* extract_text - Extract all text from the current webpage
* extract_hyperlinks - Extract all hyperlinks from the current webpage
* get_elements - Get elements matching a CSS selector
* navigate_back - Navigate to the previous page
* current_webpage - Get information about the current webpage
The toolkit supports multiple threads by maintaining separate browser sessions for each thread ID.
"""
spec_functions = [
("navigate_browser", "anavigate_browser"),
("click_element", "aclick_element"),
("extract_text", "aextract_text"),
("extract_hyperlinks", "aextract_hyperlinks"),
("get_elements", "aget_elements"),
("navigate_back", "anavigate_back"),
("current_webpage", "acurrent_webpage"),
]
def __init__(self, region: Optional[str] = None) -> None:
"""
Initialize the AWS Bedrock AgentCore Browser Tool Spec.
Args:
region (Optional[str]): AWS region to use for Bedrock AgentCore services.
If not provided, will try to get it from environment variables.
"""
self.region = region if region is not None else get_aws_region()
self._browser_clients: Dict[str, BrowserClient] = {}
self._session_manager = BrowserSessionManager(region=self.region)
def _get_or_create_browser_client(
self, thread_id: str = "default"
) -> BrowserClient:
"""
Get or create a browser client for the specified thread.
Args:
thread_id: Thread ID for the browser session
Returns:
BrowserClient instance
"""
if thread_id in self._browser_clients:
return self._browser_clients[thread_id]
# Create a new browser client for this thread
browser_client = BrowserClient(self.region)
self._browser_clients[thread_id] = browser_client
return browser_client
def navigate_browser(
self,
url: str,
thread_id: str = "default",
) -> str:
"""
Navigate to a URL (synchronous version).
Args:
url (str): URL to navigate to.
thread_id (str): Thread ID for the browser session.
Returns:
str: Confirmation message.
"""
try:
# Validate URL scheme
parsed_url = urlparse(url)
if parsed_url.scheme not in ("http", "https"):
return f"URL scheme must be 'http' or 'https', got: {parsed_url.scheme}"
# Get browser and navigate to URL
browser = self._session_manager.get_sync_browser(thread_id)
page = get_current_page(browser)
response = page.goto(url)
status = response.status if response else "unknown"
# Release the browser
self._session_manager.release_sync_browser(thread_id)
return f"Navigated to {url} with status code {status}"
except Exception as e:
return f"Error navigating to URL: {e!s}"
async def anavigate_browser(
self,
url: str,
thread_id: str = "default",
) -> str:
"""
Navigate to a URL (asynchronous version).
Args:
url (str): URL to navigate to.
thread_id (str): Thread ID for the browser session.
Returns:
str: Confirmation message.
"""
try:
# Validate URL scheme
parsed_url = urlparse(url)
if parsed_url.scheme not in ("http", "https"):
return f"URL scheme must be 'http' or 'https', got: {parsed_url.scheme}"
# Get browser and navigate to URL
browser = await self._session_manager.get_async_browser(thread_id)
page = await aget_current_page(browser)
response = await page.goto(url)
status = response.status if response else "unknown"
# Release the browser
await self._session_manager.release_async_browser(thread_id)
return f"Navigated to {url} with status code {status}"
except Exception as e:
return f"Error navigating to URL: {e!s}"
def click_element(
self,
selector: str,
thread_id: str = "default",
) -> str:
"""
Click on an element with the given CSS selector (synchronous version).
Args:
selector (str): CSS selector for the element to click on.
thread_id (str): Thread ID for the browser session.
Returns:
str: Confirmation message.
"""
try:
# Get browser and click on element
browser = self._session_manager.get_sync_browser(thread_id)
page = get_current_page(browser)
try:
page.click(selector, timeout=5000)
result = f"Clicked on element with selector '{selector}'"
except Exception as click_error:
result = f"Unable to click on element with selector '{selector}': {click_error!s}"
# Release the browser
self._session_manager.release_sync_browser(thread_id)
return result
except Exception as e:
return f"Error clicking on element: {e!s}"
async def aclick_element(
self,
selector: str,
thread_id: str = "default",
) -> str:
"""
Click on an element with the given CSS selector (asynchronous version).
Args:
selector (str): CSS selector for the element to click on.
thread_id (str): Thread ID for the browser session.
Returns:
str: Confirmation message.
"""
try:
# Get browser and click on element
browser = await self._session_manager.get_async_browser(thread_id)
page = await aget_current_page(browser)
try:
await page.click(selector, timeout=5000)
result = f"Clicked on element with selector '{selector}'"
except Exception as click_error:
result = f"Unable to click on element with selector '{selector}': {click_error!s}"
# Release the browser
await self._session_manager.release_async_browser(thread_id)
return result
except Exception as e:
return f"Error clicking on element: {e!s}"
def extract_text(
self,
selector: Optional[str] = None,
thread_id: str = "default",
) -> str:
"""
Extract text from the current page (synchronous version).
Args:
selector (Optional[str]): CSS selector for the element to extract text from. If not provided, extracts text from the entire page.
thread_id (str): Thread ID for the browser session.
Returns:
str: The extracted text.
"""
try:
# Get browser and extract text
browser = self._session_manager.get_sync_browser(thread_id)
page = get_current_page(browser)
if selector:
try:
element = page.query_selector(selector)
if element:
text = element.text_content()
result = text if text else "Element found but contains no text"
else:
result = f"No element found with selector '{selector}'"
except Exception as selector_error:
result = f"Error extracting text from selector '{selector}': {selector_error!s}"
else:
# Extract text from the entire page
result = page.content()
# Release the browser
self._session_manager.release_sync_browser(thread_id)
return result
except Exception as e:
return f"Error extracting text: {e!s}"
async def aextract_text(
self,
selector: Optional[str] = None,
thread_id: str = "default",
) -> str:
"""
Extract text from the current page (asynchronous version).
Args:
selector (Optional[str]): CSS selector for the element to extract text from. If not provided, extracts text from the entire page.
thread_id (str): Thread ID for the browser session.
Returns:
str: The extracted text.
"""
try:
# Get browser and extract text
browser = await self._session_manager.get_async_browser(thread_id)
page = await aget_current_page(browser)
if selector:
try:
element = await page.query_selector(selector)
if element:
text = await element.text_content()
result = text if text else "Element found but contains no text"
else:
result = f"No element found with selector '{selector}'"
except Exception as selector_error:
result = f"Error extracting text from selector '{selector}': {selector_error!s}"
else:
# Extract text from the entire page
result = await page.content()
# Release the browser
await self._session_manager.release_async_browser(thread_id)
return result
except Exception as e:
return f"Error extracting text: {e!s}"
def extract_hyperlinks(
self,
thread_id: str = "default",
) -> str:
"""
Extract hyperlinks from the current page (synchronous version).
Args:
thread_id (str): Thread ID for the browser session.
Returns:
str: The extracted hyperlinks.
"""
try:
# Get browser and extract hyperlinks
browser = self._session_manager.get_sync_browser(thread_id)
page = get_current_page(browser)
# Extract all hyperlinks from the page
links = page.eval_on_selector_all(
"a[href]",
"""
(elements) => {
return elements.map(el => {
return {
text: el.innerText || el.textContent,
href: el.href
};
});
}
""",
)
# Format the links
formatted_links = []
for i, link in enumerate(links):
formatted_links.append(
f"{i + 1}. {link.get('text', 'No text')}: {link.get('href', 'No href')}"
)
result = (
"\n".join(formatted_links)
if formatted_links
else "No hyperlinks found on the page"
)
# Release the browser
self._session_manager.release_sync_browser(thread_id)
return result
except Exception as e:
return f"Error extracting hyperlinks: {e!s}"
async def aextract_hyperlinks(
self,
thread_id: str = "default",
) -> str:
"""
Extract hyperlinks from the current page (asynchronous version).
Args:
thread_id (str): Thread ID for the browser session.
Returns:
str: The extracted hyperlinks.
"""
try:
# Get browser and extract hyperlinks
browser = await self._session_manager.get_async_browser(thread_id)
page = await aget_current_page(browser)
# Extract all hyperlinks from the page
links = await page.eval_on_selector_all(
"a[href]",
"""
(elements) => {
return elements.map(el => {
return {
text: el.innerText || el.textContent,
href: el.href
};
});
}
""",
)
# Format the links
formatted_links = []
for i, link in enumerate(links):
formatted_links.append(
f"{i + 1}. {link.get('text', 'No text')}: {link.get('href', 'No href')}"
)
result = (
"\n".join(formatted_links)
if formatted_links
else "No hyperlinks found on the page"
)
# Release the browser
await self._session_manager.release_async_browser(thread_id)
return result
except Exception as e:
return f"Error extracting hyperlinks: {e!s}"
def get_elements(
self,
selector: str,
thread_id: str = "default",
) -> str:
"""
Get elements matching a CSS selector (synchronous version).
Args:
selector (str): CSS selector for the elements to get.
thread_id (str): Thread ID for the browser session.
Returns:
str: Information about the matching elements.
"""
try:
# Get browser and find elements
browser = self._session_manager.get_sync_browser(thread_id)
page = get_current_page(browser)
# Find elements matching the selector
elements = page.query_selector_all(selector)
if not elements:
result = f"No elements found matching selector '{selector}'"
else:
# Extract information about the elements
elements_info = []
for i, element in enumerate(elements):
tag_name = element.evaluate("el => el.tagName.toLowerCase()")
text = element.text_content() or ""
attributes = element.evaluate("""
(el) => {
const attrs = {};
for (const attr of el.attributes) {
attrs[attr.name] = attr.value;
}
return attrs;
}
""")
# Format element info
attr_str = ", ".join([f'{k}="{v}"' for k, v in attributes.items()])
elements_info.append(
f"{i + 1}. <{tag_name} {attr_str}>{text}</{tag_name}>"
)
result = (
f"Found {len(elements)} element(s) matching selector '{selector}':\n"
+ "\n".join(elements_info)
)
# Release the browser
self._session_manager.release_sync_browser(thread_id)
return result
except Exception as e:
return f"Error getting elements: {e!s}"
async def aget_elements(
self,
selector: str,
thread_id: str = "default",
) -> str:
"""
Get elements matching a CSS selector (asynchronous version).
Args:
selector (str): CSS selector for the elements to get.
thread_id (str): Thread ID for the browser session.
Returns:
str: Information about the matching elements.
"""
try:
# Get browser and find elements
browser = await self._session_manager.get_async_browser(thread_id)
page = await aget_current_page(browser)
# Find elements matching the selector
elements = await page.query_selector_all(selector)
if not elements:
result = f"No elements found matching selector '{selector}'"
else:
# Extract information about the elements
elements_info = []
for i, element in enumerate(elements):
tag_name = await element.evaluate("el => el.tagName.toLowerCase()")
text = await element.text_content() or ""
attributes = await element.evaluate("""
(el) => {
const attrs = {};
for (const attr of el.attributes) {
attrs[attr.name] = attr.value;
}
return attrs;
}
""")
# Format element info
attr_str = ", ".join([f'{k}="{v}"' for k, v in attributes.items()])
elements_info.append(
f"{i + 1}. <{tag_name} {attr_str}>{text}</{tag_name}>"
)
result = (
f"Found {len(elements)} element(s) matching selector '{selector}':\n"
+ "\n".join(elements_info)
)
# Release the browser
await self._session_manager.release_async_browser(thread_id)
return result
except Exception as e:
return f"Error getting elements: {e!s}"
def navigate_back(
self,
thread_id: str = "default",
) -> str:
"""
Navigate to the previous page (synchronous version).
Args:
thread_id (str): Thread ID for the browser session.
Returns:
str: Confirmation message.
"""
try:
# Get browser and navigate back
browser = self._session_manager.get_sync_browser(thread_id)
page = get_current_page(browser)
# Navigate back
response = page.go_back()
# Get the current URL after navigating back
current_url = page.url if response else "unknown"
# Release the browser
self._session_manager.release_sync_browser(thread_id)
if response:
return f"Navigated back to {current_url}"
else:
return "Could not navigate back (no previous page in history)"
except Exception as e:
return f"Error navigating back: {e!s}"
async def anavigate_back(
self,
thread_id: str = "default",
) -> str:
"""
Navigate to the previous page (asynchronous version).
Args:
thread_id (str): Thread ID for the browser session.
Returns:
str: Confirmation message.
"""
try:
# Get browser and navigate back
browser = await self._session_manager.get_async_browser(thread_id)
page = await aget_current_page(browser)
# Navigate back
response = await page.go_back()
# Get the current URL after navigating back
current_url = page.url if response else "unknown"
# Release the browser
await self._session_manager.release_async_browser(thread_id)
if response:
return f"Navigated back to {current_url}"
else:
return "Could not navigate back (no previous page in history)"
except Exception as e:
return f"Error navigating back: {e!s}"
def current_webpage(
self,
thread_id: str = "default",
) -> str:
"""
Get information about the current webpage (synchronous version).
Args:
thread_id (str): Thread ID for the browser session.
Returns:
str: Information about the current webpage.
"""
try:
# Get browser and get current webpage info
browser = self._session_manager.get_sync_browser(thread_id)
page = get_current_page(browser)
# Get the current URL
url = page.url
# Get the page title
title = page.title()
# Get basic page metrics
metrics = page.evaluate("""
() => {
return {
width: document.documentElement.clientWidth,
height: document.documentElement.clientHeight,
links: document.querySelectorAll('a').length,
images: document.querySelectorAll('img').length,
forms: document.querySelectorAll('form').length
}
}
""")
# Format the result
result = f"Current webpage information:\n"
result += f"URL: {url}\n"
result += f"Title: {title}\n"
result += f"Viewport size: {metrics['width']}x{metrics['height']}\n"
result += f"Links: {metrics['links']}\n"
result += f"Images: {metrics['images']}\n"
result += f"Forms: {metrics['forms']}"
# Release the browser
self._session_manager.release_sync_browser(thread_id)
return result
except Exception as e:
return f"Error getting current webpage information: {e!s}"
async def acurrent_webpage(
self,
thread_id: str = "default",
) -> str:
"""
Get information about the current webpage (asynchronous version).
Args:
thread_id (str): Thread ID for the browser session.
Returns:
str: Information about the current webpage.
"""
try:
# Get browser and get current webpage info
browser = await self._session_manager.get_async_browser(thread_id)
page = await aget_current_page(browser)
# Get the current URL
url = page.url
# Get the page title
title = await page.title()
# Get basic page metrics
metrics = await page.evaluate("""
() => {
return {
width: document.documentElement.clientWidth,
height: document.documentElement.clientHeight,
links: document.querySelectorAll('a').length,
images: document.querySelectorAll('img').length,
forms: document.querySelectorAll('form').length
}
}
""")
# Format the result
result = f"Current webpage information:\n"
result += f"URL: {url}\n"
result += f"Title: {title}\n"
result += f"Viewport size: {metrics['width']}x{metrics['height']}\n"
result += f"Links: {metrics['links']}\n"
result += f"Images: {metrics['images']}\n"
result += f"Forms: {metrics['forms']}"
# Release the browser
await self._session_manager.release_async_browser(thread_id)
return result
except Exception as e:
return f"Error getting current webpage information: {e!s}"
async def cleanup(self, thread_id: Optional[str] = None) -> None:
"""
Clean up resources
Args:
thread_id: Optional thread ID to clean up. If None, cleans up all sessions.
"""
if thread_id:
# Clean up a specific thread's session
if thread_id in self._browser_clients:
try:
self._browser_clients[thread_id].stop()
del self._browser_clients[thread_id]
logger.info(f"Browser session for thread {thread_id} cleaned up")
except Exception as e:
logger.warning(
f"Error stopping browser for thread {thread_id}: {e}"
)
else:
# Clean up all sessions
thread_ids = list(self._browser_clients.keys())
for tid in thread_ids:
try:
self._browser_clients[tid].stop()
except Exception as e:
logger.warning(f"Error stopping browser for thread {tid}: {e}")
self._browser_clients = {}
logger.info("All browser sessions cleaned up")
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-aws-bedrock-agentcore/llama_index/tools/aws_bedrock_agentcore/browser/base.py",
"license": "MIT License",
"lines": 602,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-aws-bedrock-agentcore/llama_index/tools/aws_bedrock_agentcore/browser/browser_session_manager.py | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Dict, Tuple
from bedrock_agentcore.tools.browser_client import BrowserClient
if TYPE_CHECKING:
from playwright.async_api import Browser as AsyncBrowser
from playwright.sync_api import Browser as SyncBrowser
logger = logging.getLogger(__name__)
class BrowserSessionManager:
"""
Manages browser sessions for different threads.
This class maintains separate browser sessions for different threads,
enabling concurrent usage of browsers in multi-threaded environments.
Browsers are created lazily only when needed by tools.
Concurrency protection is also implemented. Each browser session is tied
to a specific thread_id and includes protection against concurrent usage.
When a browser is obtained via get_async_browser() or get_sync_browser(),
it is marked as "in use", and subsequent attempts to access the same
browser session will raise a RuntimeError until it is released.
"""
def __init__(self, region: str = "us-west-2"):
"""
Initialize the browser session manager.
Args:
region: AWS region for browser client
"""
self.region = region
self._async_sessions: Dict[str, Tuple[BrowserClient, AsyncBrowser, bool]] = {}
self._sync_sessions: Dict[str, Tuple[BrowserClient, SyncBrowser, bool]] = {}
async def get_async_browser(self, thread_id: str) -> AsyncBrowser:
"""
Get or create an async browser for the specified thread.
Args:
thread_id: Unique identifier for the thread requesting the browser
Returns:
An async browser instance specific to the thread
Raises:
RuntimeError: If the browser session is already in use by another caller
"""
if thread_id in self._async_sessions:
client, browser, in_use = self._async_sessions[thread_id]
if in_use:
raise RuntimeError(
f"Browser session for thread {thread_id} is already in use. "
"Use a different thread_id for concurrent operations."
)
self._async_sessions[thread_id] = (client, browser, True)
return browser
return await self._create_async_browser_session(thread_id)
def get_sync_browser(self, thread_id: str) -> SyncBrowser:
"""
Get or create a sync browser for the specified thread.
Args:
thread_id: Unique identifier for the thread requesting the browser
Returns:
A sync browser instance specific to the thread
Raises:
RuntimeError: If the browser session is already in use by another caller
"""
if thread_id in self._sync_sessions:
client, browser, in_use = self._sync_sessions[thread_id]
if in_use:
raise RuntimeError(
f"Browser session for thread {thread_id} is already in use. "
"Use a different thread_id for concurrent operations."
)
self._sync_sessions[thread_id] = (client, browser, True)
return browser
return self._create_sync_browser_session(thread_id)
async def _create_async_browser_session(self, thread_id: str) -> AsyncBrowser:
"""
Create a new async browser session for the specified thread.
Args:
thread_id: Unique identifier for the thread
Returns:
The newly created async browser instance
Raises:
Exception: If browser session creation fails
"""
browser_client = BrowserClient(region=self.region)
try:
# Start browser session
browser_client.start()
# Get WebSocket connection info
ws_url, headers = browser_client.generate_ws_headers()
logger.info(
f"Connecting to async WebSocket endpoint for thread {thread_id}: {ws_url}"
)
from playwright.async_api import async_playwright
# Connect to browser using Playwright
playwright = await async_playwright().start()
browser = await playwright.chromium.connect_over_cdp(
endpoint_url=ws_url, headers=headers, timeout=30000
)
logger.info(
f"Successfully connected to async browser for thread {thread_id}"
)
# Store session resources
self._async_sessions[thread_id] = (browser_client, browser, True)
return browser
except Exception as e:
logger.error(
f"Failed to create async browser session for thread {thread_id}: {e}"
)
# Clean up resources if session creation fails
if browser_client:
try:
browser_client.stop()
except Exception as cleanup_error:
logger.warning(f"Error cleaning up browser client: {cleanup_error}")
raise
def _create_sync_browser_session(self, thread_id: str) -> SyncBrowser:
"""
Create a new sync browser session for the specified thread.
Args:
thread_id: Unique identifier for the thread
Returns:
The newly created sync browser instance
Raises:
Exception: If browser session creation fails
"""
browser_client = BrowserClient(region=self.region)
try:
# Start browser session
browser_client.start()
# Get WebSocket connection info
ws_url, headers = browser_client.generate_ws_headers()
logger.info(
f"Connecting to sync WebSocket endpoint for thread {thread_id}: {ws_url}"
)
from playwright.sync_api import sync_playwright
# Connect to browser using Playwright
playwright = sync_playwright().start()
browser = playwright.chromium.connect_over_cdp(
endpoint_url=ws_url, headers=headers, timeout=30000
)
logger.info(
f"Successfully connected to sync browser for thread {thread_id}"
)
# Store session resources
self._sync_sessions[thread_id] = (browser_client, browser, True)
return browser
except Exception as e:
logger.error(
f"Failed to create sync browser session for thread {thread_id}: {e}"
)
# Clean up resources if session creation fails
if browser_client:
try:
browser_client.stop()
except Exception as cleanup_error:
logger.warning(f"Error cleaning up browser client: {cleanup_error}")
raise
async def release_async_browser(self, thread_id: str) -> None:
"""
Release the async browser session for the specified thread.
Args:
thread_id: Unique identifier for the thread
"""
if thread_id not in self._async_sessions:
logger.warning(f"No async browser session found for thread {thread_id}")
return
client, browser, in_use = self._async_sessions[thread_id]
if in_use:
self._async_sessions[thread_id] = (client, browser, False)
logger.info(f"Released async browser for thread {thread_id}")
def release_sync_browser(self, thread_id: str) -> None:
"""
Release the sync browser session for the specified thread.
Args:
thread_id: Unique identifier for the thread
"""
if thread_id not in self._sync_sessions:
logger.warning(f"No sync browser session found for thread {thread_id}")
return
client, browser, in_use = self._sync_sessions[thread_id]
if in_use:
self._sync_sessions[thread_id] = (client, browser, False)
logger.info(f"Released sync browser for thread {thread_id}")
async def close_async_browser(self, thread_id: str) -> None:
"""
Close the async browser session for the specified thread.
Args:
thread_id: Unique identifier for the thread
"""
if thread_id not in self._async_sessions:
logger.warning(f"No async browser session found for thread {thread_id}")
return
client, browser, _ = self._async_sessions[thread_id]
# Close browser
if browser:
try:
await browser.close()
except Exception as e:
logger.warning(
f"Error closing async browser for thread {thread_id}: {e}"
)
# Stop browser client
if client:
try:
client.stop()
except Exception as e:
logger.warning(
f"Error stopping browser client for thread {thread_id}: {e}"
)
# Remove session from dictionary
del self._async_sessions[thread_id]
logger.info(f"Async browser session cleaned up for thread {thread_id}")
def close_sync_browser(self, thread_id: str) -> None:
"""
Close the sync browser session for the specified thread.
Args:
thread_id: Unique identifier for the thread
"""
if thread_id not in self._sync_sessions:
logger.warning(f"No sync browser session found for thread {thread_id}")
return
client, browser, _ = self._sync_sessions[thread_id]
# Close browser
if browser:
try:
browser.close()
except Exception as e:
logger.warning(
f"Error closing sync browser for thread {thread_id}: {e}"
)
# Stop browser client
if client:
try:
client.stop()
except Exception as e:
logger.warning(
f"Error stopping browser client for thread {thread_id}: {e}"
)
# Remove session from dictionary
del self._sync_sessions[thread_id]
logger.info(f"Sync browser session cleaned up for thread {thread_id}")
async def close_all_browsers(self) -> None:
"""Close all browser sessions."""
# Close all async browsers
async_thread_ids = list(self._async_sessions.keys())
for thread_id in async_thread_ids:
await self.close_async_browser(thread_id)
# Close all sync browsers
sync_thread_ids = list(self._sync_sessions.keys())
for thread_id in sync_thread_ids:
self.close_sync_browser(thread_id)
logger.info("All browser sessions closed")
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-aws-bedrock-agentcore/llama_index/tools/aws_bedrock_agentcore/browser/browser_session_manager.py",
"license": "MIT License",
"lines": 248,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-aws-bedrock-agentcore/llama_index/tools/aws_bedrock_agentcore/browser/utils.py | """Utility functions for browser tools."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Union
if TYPE_CHECKING:
from playwright.async_api import Browser as AsyncBrowser
from playwright.async_api import Page as AsyncPage
from playwright.sync_api import Browser as SyncBrowser
from playwright.sync_api import Page as SyncPage
async def aget_current_page(browser: Union[AsyncBrowser, Any]) -> AsyncPage:
"""
Asynchronously get the current page of the browser.
Args:
browser: The browser (AsyncBrowser) to get the current page from.
Returns:
AsyncPage: The current page.
"""
if not browser.contexts:
context = await browser.new_context()
return await context.new_page()
context = browser.contexts[0]
if not context.pages:
return await context.new_page()
return context.pages[-1]
def get_current_page(browser: Union[SyncBrowser, Any]) -> SyncPage:
"""
Get the current page of the browser.
Args:
browser: The browser to get the current page from.
Returns:
SyncPage: The current page.
"""
if not browser.contexts:
context = browser.new_context()
return context.new_page()
context = browser.contexts[0]
if not context.pages:
return context.new_page()
return context.pages[-1]
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-aws-bedrock-agentcore/llama_index/tools/aws_bedrock_agentcore/browser/utils.py",
"license": "MIT License",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-aws-bedrock-agentcore/llama_index/tools/aws_bedrock_agentcore/code_interpreter/base.py | import json
import os
import logging
from typing import Dict, Optional, List
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from bedrock_agentcore.tools.code_interpreter_client import CodeInterpreter
DEFAULT_CODE_INTERPRETER_IDENTIFIER = "aws.codeinterpreter.v1"
DEFAULT_CODE_INTERPRETER_TIMEOUT = 900
logger = logging.getLogger(__name__)
def get_aws_region() -> str:
"""Get the AWS region from environment variables or use default."""
return os.getenv("AWS_REGION") or os.getenv("AWS_DEFAULT_REGION") or "us-west-2"
def extract_output_from_stream(response):
"""
Extract output from code interpreter response stream
Args:
response: Response from code interpreter execution
Returns:
Extracted output as string
"""
output = []
for event in response["stream"]:
if "result" in event:
result = event["result"]
if "content" in result:
for content_item in result["content"]:
if content_item["type"] == "text":
output.append(content_item["text"])
if content_item["type"] == "resource":
resource = content_item["resource"]
if "text" in resource:
file_path = resource["uri"].replace("file://", "")
file_content = resource["text"]
output.append(
f"==== File: {file_path} ====\n{file_content}\n"
)
else:
output.append(json.dumps(resource))
return "\n".join(output)
class AgentCoreCodeInterpreterToolSpec(BaseToolSpec):
"""
AWS Bedrock AgentCore Code Interpreter Tool Spec.
This toolkit provides a set of tools for working with a remote code interpreter environment:
* execute_code - Run code in various languages (primarily Python)
* execute_command - Run shell commands
* read_files - Read content of files in the environment
* list_files - List files in directories
* delete_files - Remove files from the environment
* write_files - Create or update files
* start_command - Start long-running commands asynchronously
* get_task - Check status of async tasks
* stop_task - Stop running tasks
The toolkit lazily initializes the code interpreter session on first use.
It supports multiple threads by maintaining separate code interpreter sessions for each thread ID.
"""
spec_functions = [
("execute_code", "aexecute_code"),
("execute_command", "aexecute_command"),
("read_files", "aread_files"),
("list_files", "alist_files"),
("delete_files", "adelete_files"),
("write_files", "awrite_files"),
("start_command", "astart_command"),
("get_task", "aget_task"),
("stop_task", "astop_task"),
]
def __init__(self, region: Optional[str] = None) -> None:
"""
Initialize the AWS Bedrock AgentCore Code Interpreter Tool Spec.
Args:
region (Optional[str]): AWS region to use for Bedrock AgentCore services.
If not provided, will try to get it from environment variables.
"""
self.region = region if region is not None else get_aws_region()
self._code_interpreters: Dict[str, CodeInterpreter] = {}
def _get_or_create_interpreter(self, thread_id: str = "default") -> CodeInterpreter:
"""
Get or create a code interpreter for the specified thread.
Args:
thread_id: Thread ID for the code interpreter session
Returns:
CodeInterpreter instance
"""
if thread_id in self._code_interpreters:
return self._code_interpreters[thread_id]
# Create a new code interpreter for this thread
code_interpreter = CodeInterpreter(region=self.region)
code_interpreter.start()
logger.info(
f"Started code interpreter with session_id:{code_interpreter.session_id} for thread:{thread_id}"
)
# Store the interpreter
self._code_interpreters[thread_id] = code_interpreter
return code_interpreter
def execute_code(
self,
code: str,
language: str = "python",
clear_context: bool = False,
thread_id: str = "default",
) -> str:
"""
Execute code in the code interpreter sandbox (synchronous version).
Args:
code (str): The code to execute.
language (str): The programming language of the code. Default is "python".
clear_context (bool): Whether to clear execution context. Default is False.
thread_id (str): Thread ID for the code interpreter session. Default is "default".
Returns:
str: The result of the code execution.
"""
try:
# Get or create code interpreter
code_interpreter = self._get_or_create_interpreter(thread_id=thread_id)
# Execute code
response = code_interpreter.invoke(
method="executeCode",
params={
"code": code,
"language": language,
"clearContext": clear_context,
},
)
return extract_output_from_stream(response)
except Exception as e:
return f"Error executing code: {e!s}"
async def aexecute_code(
self,
code: str,
language: str = "python",
clear_context: bool = False,
thread_id: str = "default",
) -> str:
"""
Execute code in the code interpreter sandbox (asynchronous version).
Args:
code (str): The code to execute.
language (str): The programming language of the code. Default is "python".
clear_context (bool): Whether to clear execution context. Default is False.
thread_id (str): Thread ID for the code interpreter session. Default is "default".
Returns:
str: The result of the code execution.
"""
# Use the synchronous version as the underlying API is thread-safe
return self.execute_code(
code=code,
language=language,
clear_context=clear_context,
thread_id=thread_id,
)
def execute_command(
self,
command: str,
thread_id: str = "default",
) -> str:
"""
Execute a shell command in the code interpreter sandbox (synchronous version).
Args:
command (str): The command to execute.
thread_id (str): Thread ID for the code interpreter session. Default is "default".
Returns:
str: The result of the command execution.
"""
try:
# Get or create code interpreter
code_interpreter = self._get_or_create_interpreter(thread_id=thread_id)
# Execute command
response = code_interpreter.invoke(
method="executeCommand", params={"command": command}
)
return extract_output_from_stream(response)
except Exception as e:
return f"Error executing command: {e!s}"
async def aexecute_command(
self,
command: str,
thread_id: str = "default",
) -> str:
"""
Execute a shell command in the code interpreter sandbox (asynchronous version).
Args:
command (str): The command to execute.
thread_id (str): Thread ID for the code interpreter session. Default is "default".
Returns:
str: The result of the command execution.
"""
# Use the synchronous version as the underlying API is thread-safe
return self.execute_command(command=command, thread_id=thread_id)
def read_files(
self,
paths: List[str],
thread_id: str = "default",
) -> str:
"""
Read content of files in the environment (synchronous version).
Args:
paths (List[str]): List of file paths to read.
thread_id (str): Thread ID for the code interpreter session. Default is "default".
Returns:
str: The content of the files.
"""
try:
# Get or create code interpreter
code_interpreter = self._get_or_create_interpreter(thread_id=thread_id)
# Read files
response = code_interpreter.invoke(
method="readFiles", params={"paths": paths}
)
return extract_output_from_stream(response)
except Exception as e:
return f"Error reading files: {e!s}"
async def aread_files(
self,
paths: List[str],
thread_id: str = "default",
) -> str:
"""
Read content of files in the environment (asynchronous version).
Args:
paths (List[str]): List of file paths to read.
thread_id (str): Thread ID for the code interpreter session. Default is "default".
Returns:
str: The content of the files.
"""
# Use the synchronous version as the underlying API is thread-safe
return self.read_files(paths=paths, thread_id=thread_id)
def list_files(
self,
directory_path: str = "",
thread_id: str = "default",
) -> str:
"""
List files in directories in the environment (synchronous version).
Args:
directory_path (str): Path to the directory to list. Default is current directory.
thread_id (str): Thread ID for the code interpreter session. Default is "default".
Returns:
str: The list of files.
"""
try:
# Get or create code interpreter
code_interpreter = self._get_or_create_interpreter(thread_id=thread_id)
# List files
response = code_interpreter.invoke(
method="listFiles", params={"directoryPath": directory_path}
)
return extract_output_from_stream(response)
except Exception as e:
return f"Error listing files: {e!s}"
async def alist_files(
self,
directory_path: str = "",
thread_id: str = "default",
) -> str:
"""
List files in directories in the environment (asynchronous version).
Args:
directory_path (str): Path to the directory to list. Default is current directory.
thread_id (str): Thread ID for the code interpreter session. Default is "default".
Returns:
str: The list of files.
"""
# Use the synchronous version as the underlying API is thread-safe
return self.list_files(directory_path=directory_path, thread_id=thread_id)
def delete_files(
self,
paths: List[str],
thread_id: str = "default",
) -> str:
"""
Remove files from the environment (synchronous version).
Args:
paths (List[str]): List of file paths to delete.
thread_id (str): Thread ID for the code interpreter session. Default is "default".
Returns:
str: The result of the delete operation.
"""
try:
# Get or create code interpreter
code_interpreter = self._get_or_create_interpreter(thread_id=thread_id)
# Remove files
response = code_interpreter.invoke(
method="removeFiles", params={"paths": paths}
)
return extract_output_from_stream(response)
except Exception as e:
return f"Error deleting files: {e!s}"
async def adelete_files(
self,
paths: List[str],
thread_id: str = "default",
) -> str:
"""
Remove files from the environment (asynchronous version).
Args:
paths (List[str]): List of file paths to delete.
thread_id (str): Thread ID for the code interpreter session. Default is "default".
Returns:
str: The result of the delete operation.
"""
# Use the synchronous version as the underlying API is thread-safe
return self.delete_files(paths=paths, thread_id=thread_id)
def write_files(
self,
files: List[Dict[str, str]],
thread_id: str = "default",
) -> str:
"""
Create or update files in the environment (synchronous version).
Args:
files (List[Dict[str, str]]): List of dictionaries with path and text fields.
thread_id (str): Thread ID for the code interpreter session. Default is "default".
Returns:
str: The result of the write operation.
"""
try:
# Get or create code interpreter
code_interpreter = self._get_or_create_interpreter(thread_id=thread_id)
# Write files
response = code_interpreter.invoke(
method="writeFiles", params={"content": files}
)
return extract_output_from_stream(response)
except Exception as e:
return f"Error writing files: {e!s}"
async def awrite_files(
self,
files: List[Dict[str, str]],
thread_id: str = "default",
) -> str:
"""
Create or update files in the environment (asynchronous version).
Args:
files (List[Dict[str, str]]): List of dictionaries with path and text fields.
thread_id (str): Thread ID for the code interpreter session. Default is "default".
Returns:
str: The result of the write operation.
"""
# Use the synchronous version as the underlying API is thread-safe
return self.write_files(files=files, thread_id=thread_id)
def start_command(
self,
command: str,
thread_id: str = "default",
) -> str:
"""
Start a long-running command asynchronously (synchronous version).
Args:
command (str): The command to execute asynchronously.
thread_id (str): Thread ID for the code interpreter session. Default is "default".
Returns:
str: The task ID and status.
"""
try:
# Get or create code interpreter
code_interpreter = self._get_or_create_interpreter(thread_id=thread_id)
# Start command execution
response = code_interpreter.invoke(
method="startCommandExecution", params={"command": command}
)
return extract_output_from_stream(response)
except Exception as e:
return f"Error starting command: {e!s}"
async def astart_command(
self,
command: str,
thread_id: str = "default",
) -> str:
"""
Start a long-running command asynchronously (asynchronous version).
Args:
command (str): The command to execute asynchronously.
thread_id (str): Thread ID for the code interpreter session. Default is "default".
Returns:
str: The task ID and status.
"""
# Use the synchronous version as the underlying API is thread-safe
return self.start_command(command=command, thread_id=thread_id)
def get_task(
self,
task_id: str,
thread_id: str = "default",
) -> str:
"""
Check status of an async task (synchronous version).
Args:
task_id (str): The ID of the task to check.
thread_id (str): Thread ID for the code interpreter session. Default is "default".
Returns:
str: The task status.
"""
try:
# Get or create code interpreter
code_interpreter = self._get_or_create_interpreter(thread_id=thread_id)
# Get task status
response = code_interpreter.invoke(
method="getTask", params={"taskId": task_id}
)
return extract_output_from_stream(response)
except Exception as e:
return f"Error getting task status: {e!s}"
async def aget_task(
self,
task_id: str,
thread_id: str = "default",
) -> str:
"""
Check status of an async task (asynchronous version).
Args:
task_id (str): The ID of the task to check.
thread_id (str): Thread ID for the code interpreter session. Default is "default".
Returns:
str: The task status.
"""
# Use the synchronous version as the underlying API is thread-safe
return self.get_task(task_id=task_id, thread_id=thread_id)
def stop_task(
self,
task_id: str,
thread_id: str = "default",
) -> str:
"""
Stop a running task (synchronous version).
Args:
task_id (str): The ID of the task to stop.
thread_id (str): Thread ID for the code interpreter session. Default is "default".
Returns:
str: The result of the stop operation.
"""
try:
# Get or create code interpreter
code_interpreter = self._get_or_create_interpreter(thread_id=thread_id)
# Stop task
response = code_interpreter.invoke(
method="stopTask", params={"taskId": task_id}
)
return extract_output_from_stream(response)
except Exception as e:
return f"Error stopping task: {e!s}"
async def astop_task(
self,
task_id: str,
thread_id: str = "default",
) -> str:
"""
Stop a running task (asynchronous version).
Args:
task_id (str): The ID of the task to stop.
thread_id (str): Thread ID for the code interpreter session. Default is "default".
Returns:
str: The result of the stop operation.
"""
# Use the synchronous version as the underlying API is thread-safe
return self.stop_task(task_id=task_id, thread_id=thread_id)
async def cleanup(self, thread_id: Optional[str] = None) -> None:
"""
Clean up resources
Args:
thread_id: Optional thread ID to clean up. If None, cleans up all sessions.
"""
if thread_id:
# Clean up a specific thread's session
if thread_id in self._code_interpreters:
try:
self._code_interpreters[thread_id].stop()
del self._code_interpreters[thread_id]
logger.info(
f"Code interpreter session for thread {thread_id} cleaned up"
)
except Exception as e:
logger.warning(
f"Error stopping code interpreter for thread {thread_id}: {e}"
)
else:
# Clean up all sessions
thread_ids = list(self._code_interpreters.keys())
for tid in thread_ids:
try:
self._code_interpreters[tid].stop()
except Exception as e:
logger.warning(
f"Error stopping code interpreter for thread {tid}: {e}"
)
self._code_interpreters = {}
logger.info("All code interpreter sessions cleaned up")
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-aws-bedrock-agentcore/llama_index/tools/aws_bedrock_agentcore/code_interpreter/base.py",
"license": "MIT License",
"lines": 485,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-aws-bedrock-agentcore/tests/test_browser.py | import os
from unittest.mock import patch, MagicMock
from llama_index.tools.aws_bedrock_agentcore import AgentCoreBrowserToolSpec
from llama_index.tools.aws_bedrock_agentcore.browser.base import get_aws_region
from llama_index.tools.aws_bedrock_agentcore.browser.utils import get_current_page
class TestGetAwsRegion:
@patch.dict(os.environ, {"AWS_REGION": "us-east-1"})
def test_get_aws_region_from_aws_region(self):
assert get_aws_region() == "us-east-1"
@patch.dict(
os.environ, {"AWS_DEFAULT_REGION": "us-west-1", "AWS_REGION": ""}, clear=True
)
def test_get_aws_region_from_aws_default_region(self):
assert get_aws_region() == "us-west-1"
@patch.dict(os.environ, {}, clear=True)
def test_get_aws_region_default(self):
assert get_aws_region() == "us-west-2"
class TestBrowserUtils:
def test_get_current_page_no_contexts(self):
mock_browser = MagicMock()
mock_browser.contexts = []
mock_context = MagicMock()
mock_page = MagicMock()
mock_browser.new_context.return_value = mock_context
mock_context.new_page.return_value = mock_page
result = get_current_page(mock_browser)
assert result == mock_page
mock_browser.new_context.assert_called_once()
mock_context.new_page.assert_called_once()
def test_get_current_page_with_context_no_pages(self):
mock_browser = MagicMock()
mock_context = MagicMock()
mock_context.pages = []
mock_browser.contexts = [mock_context]
mock_page = MagicMock()
mock_context.new_page.return_value = mock_page
result = get_current_page(mock_browser)
assert result == mock_page
mock_browser.new_context.assert_not_called()
mock_context.new_page.assert_called_once()
def test_get_current_page_with_context_and_pages(self):
mock_browser = MagicMock()
mock_context = MagicMock()
mock_page1 = MagicMock()
mock_page2 = MagicMock()
mock_context.pages = [mock_page1, mock_page2]
mock_browser.contexts = [mock_context]
result = get_current_page(mock_browser)
assert result == mock_page2
mock_browser.new_context.assert_not_called()
mock_context.new_page.assert_not_called()
class TestAgentCoreBrowserToolSpec:
@patch("llama_index.tools.aws_bedrock_agentcore.browser.base.BrowserSessionManager")
def test_init(self, mock_browser_session_manager):
tool_spec = AgentCoreBrowserToolSpec(region="us-east-1")
assert tool_spec.region == "us-east-1"
assert tool_spec._browser_clients == {}
mock_browser_session_manager.assert_called_once_with(region="us-east-1")
@patch("llama_index.tools.aws_bedrock_agentcore.browser.base.get_aws_region")
@patch("llama_index.tools.aws_bedrock_agentcore.browser.base.BrowserSessionManager")
def test_init_default_region(
self, mock_browser_session_manager, mock_get_aws_region
):
mock_get_aws_region.return_value = "us-west-2"
tool_spec = AgentCoreBrowserToolSpec()
assert tool_spec.region == "us-west-2"
mock_get_aws_region.assert_called_once()
mock_browser_session_manager.assert_called_once_with(region="us-west-2")
@patch("llama_index.tools.aws_bedrock_agentcore.browser.base.BrowserClient")
def test_get_or_create_browser_client_new(self, mock_browser_client):
mock_instance = MagicMock()
mock_browser_client.return_value = mock_instance
tool_spec = AgentCoreBrowserToolSpec(region="us-east-1")
client = tool_spec._get_or_create_browser_client("test-thread")
assert client == mock_instance
assert "test-thread" in tool_spec._browser_clients
assert tool_spec._browser_clients["test-thread"] == mock_instance
mock_browser_client.assert_called_once_with("us-east-1")
@patch("llama_index.tools.aws_bedrock_agentcore.browser.base.BrowserClient")
def test_get_or_create_browser_client_existing(self, mock_browser_client):
mock_instance = MagicMock()
tool_spec = AgentCoreBrowserToolSpec(region="us-east-1")
tool_spec._browser_clients["test-thread"] = mock_instance
client = tool_spec._get_or_create_browser_client("test-thread")
assert client == mock_instance
mock_browser_client.assert_not_called()
def test_navigate_browser_invalid_url(self):
tool_spec = AgentCoreBrowserToolSpec()
result = tool_spec.navigate_browser(
url="ftp://example.com", thread_id="test-thread"
)
assert "URL scheme must be 'http' or 'https'" in result
@patch("llama_index.tools.aws_bedrock_agentcore.browser.base.get_current_page")
def test_navigate_browser(self, mock_get_current_page):
mock_session_manager = MagicMock()
mock_browser = MagicMock()
mock_page = MagicMock()
mock_response = MagicMock()
mock_response.status = 200
mock_session_manager.get_sync_browser.return_value = mock_browser
mock_get_current_page.return_value = mock_page
mock_page.goto.return_value = mock_response
tool_spec = AgentCoreBrowserToolSpec()
tool_spec._session_manager = mock_session_manager
result = tool_spec.navigate_browser(
url="https://example.com", thread_id="test-thread"
)
mock_session_manager.get_sync_browser.assert_called_once_with("test-thread")
mock_get_current_page.assert_called_once_with(mock_browser)
mock_page.goto.assert_called_once_with("https://example.com")
mock_session_manager.release_sync_browser.assert_called_once_with("test-thread")
assert "Navigated to https://example.com with status code 200" in result
def test_navigate_browser_exception(self):
mock_session_manager = MagicMock()
mock_session_manager.get_sync_browser.side_effect = Exception("Test error")
tool_spec = AgentCoreBrowserToolSpec()
tool_spec._session_manager = mock_session_manager
result = tool_spec.navigate_browser(
url="https://example.com", thread_id="test-thread"
)
assert "Error navigating to URL: Test error" in result
@patch("llama_index.tools.aws_bedrock_agentcore.browser.base.get_current_page")
def test_click_element(self, mock_get_current_page):
mock_session_manager = MagicMock()
mock_browser = MagicMock()
mock_page = MagicMock()
mock_session_manager.get_sync_browser.return_value = mock_browser
mock_get_current_page.return_value = mock_page
tool_spec = AgentCoreBrowserToolSpec()
tool_spec._session_manager = mock_session_manager
result = tool_spec.click_element(selector="#button", thread_id="test-thread")
mock_session_manager.get_sync_browser.assert_called_once_with("test-thread")
mock_get_current_page.assert_called_once_with(mock_browser)
mock_page.click.assert_called_once_with("#button", timeout=5000)
mock_session_manager.release_sync_browser.assert_called_once_with("test-thread")
assert "Clicked on element with selector '#button'" in result
@patch("llama_index.tools.aws_bedrock_agentcore.browser.base.get_current_page")
def test_click_element_not_found(self, mock_get_current_page):
mock_session_manager = MagicMock()
mock_browser = MagicMock()
mock_page = MagicMock()
mock_session_manager.get_sync_browser.return_value = mock_browser
mock_get_current_page.return_value = mock_page
mock_page.click.side_effect = Exception("Element not found")
tool_spec = AgentCoreBrowserToolSpec()
tool_spec._session_manager = mock_session_manager
result = tool_spec.click_element(selector="#button", thread_id="test-thread")
mock_session_manager.get_sync_browser.assert_called_once_with("test-thread")
mock_get_current_page.assert_called_once_with(mock_browser)
mock_page.click.assert_called_once_with("#button", timeout=5000)
mock_session_manager.release_sync_browser.assert_called_once_with("test-thread")
assert "Unable to click on element with selector '#button'" in result
@patch("llama_index.tools.aws_bedrock_agentcore.browser.base.get_current_page")
def test_extract_text_whole_page(self, mock_get_current_page):
mock_session_manager = MagicMock()
mock_browser = MagicMock()
mock_page = MagicMock()
mock_session_manager.get_sync_browser.return_value = mock_browser
mock_get_current_page.return_value = mock_page
mock_page.content.return_value = "<html><body>Hello World</body></html>"
tool_spec = AgentCoreBrowserToolSpec()
tool_spec._session_manager = mock_session_manager
result = tool_spec.extract_text(thread_id="test-thread")
mock_session_manager.get_sync_browser.assert_called_once_with("test-thread")
mock_get_current_page.assert_called_once_with(mock_browser)
mock_page.content.assert_called_once()
mock_session_manager.release_sync_browser.assert_called_once_with("test-thread")
assert result == "<html><body>Hello World</body></html>"
@patch("llama_index.tools.aws_bedrock_agentcore.browser.base.get_current_page")
def test_extract_text_with_selector(self, mock_get_current_page):
mock_session_manager = MagicMock()
mock_browser = MagicMock()
mock_page = MagicMock()
mock_element = MagicMock()
mock_session_manager.get_sync_browser.return_value = mock_browser
mock_get_current_page.return_value = mock_page
mock_page.query_selector.return_value = mock_element
mock_element.text_content.return_value = "Hello World"
tool_spec = AgentCoreBrowserToolSpec()
tool_spec._session_manager = mock_session_manager
result = tool_spec.extract_text(selector="#content", thread_id="test-thread")
mock_session_manager.get_sync_browser.assert_called_once_with("test-thread")
mock_get_current_page.assert_called_once_with(mock_browser)
mock_page.query_selector.assert_called_once_with("#content")
mock_element.text_content.assert_called_once()
mock_session_manager.release_sync_browser.assert_called_once_with("test-thread")
assert result == "Hello World"
@patch("llama_index.tools.aws_bedrock_agentcore.browser.base.get_current_page")
def test_extract_text_selector_not_found(self, mock_get_current_page):
mock_session_manager = MagicMock()
mock_browser = MagicMock()
mock_page = MagicMock()
mock_session_manager.get_sync_browser.return_value = mock_browser
mock_get_current_page.return_value = mock_page
mock_page.query_selector.return_value = None
tool_spec = AgentCoreBrowserToolSpec()
tool_spec._session_manager = mock_session_manager
result = tool_spec.extract_text(selector="#content", thread_id="test-thread")
mock_session_manager.get_sync_browser.assert_called_once_with("test-thread")
mock_get_current_page.assert_called_once_with(mock_browser)
mock_page.query_selector.assert_called_once_with("#content")
mock_session_manager.release_sync_browser.assert_called_once_with("test-thread")
assert "No element found with selector '#content'" in result
@patch("llama_index.tools.aws_bedrock_agentcore.browser.base.get_current_page")
def test_extract_hyperlinks(self, mock_get_current_page):
mock_session_manager = MagicMock()
mock_browser = MagicMock()
mock_page = MagicMock()
mock_session_manager.get_sync_browser.return_value = mock_browser
mock_get_current_page.return_value = mock_page
mock_page.eval_on_selector_all.return_value = [
{"text": "Link 1", "href": "https://example.com/1"},
{"text": "Link 2", "href": "https://example.com/2"},
]
tool_spec = AgentCoreBrowserToolSpec()
tool_spec._session_manager = mock_session_manager
result = tool_spec.extract_hyperlinks(thread_id="test-thread")
mock_session_manager.get_sync_browser.assert_called_once_with("test-thread")
mock_get_current_page.assert_called_once_with(mock_browser)
mock_page.eval_on_selector_all.assert_called_once()
mock_session_manager.release_sync_browser.assert_called_once_with("test-thread")
assert "1. Link 1: https://example.com/1" in result
assert "2. Link 2: https://example.com/2" in result
@patch("llama_index.tools.aws_bedrock_agentcore.browser.base.get_current_page")
def test_extract_hyperlinks_no_links(self, mock_get_current_page):
mock_session_manager = MagicMock()
mock_browser = MagicMock()
mock_page = MagicMock()
mock_session_manager.get_sync_browser.return_value = mock_browser
mock_get_current_page.return_value = mock_page
mock_page.eval_on_selector_all.return_value = []
tool_spec = AgentCoreBrowserToolSpec()
tool_spec._session_manager = mock_session_manager
result = tool_spec.extract_hyperlinks(thread_id="test-thread")
mock_session_manager.get_sync_browser.assert_called_once_with("test-thread")
mock_get_current_page.assert_called_once_with(mock_browser)
mock_page.eval_on_selector_all.assert_called_once()
mock_session_manager.release_sync_browser.assert_called_once_with("test-thread")
assert "No hyperlinks found on the page" in result
@patch("llama_index.tools.aws_bedrock_agentcore.browser.base.get_current_page")
def test_get_elements(self, mock_get_current_page):
mock_session_manager = MagicMock()
mock_browser = MagicMock()
mock_page = MagicMock()
mock_element1 = MagicMock()
mock_element2 = MagicMock()
mock_session_manager.get_sync_browser.return_value = mock_browser
mock_get_current_page.return_value = mock_page
mock_page.query_selector_all.return_value = [mock_element1, mock_element2]
mock_element1.evaluate.side_effect = [
"div",
{"id": "div1", "class": "container"},
]
mock_element1.text_content.return_value = "Content 1"
mock_element2.evaluate.side_effect = [
"div",
{"id": "div2", "class": "container"},
]
mock_element2.text_content.return_value = "Content 2"
tool_spec = AgentCoreBrowserToolSpec()
tool_spec._session_manager = mock_session_manager
result = tool_spec.get_elements(
selector="div.container", thread_id="test-thread"
)
mock_session_manager.get_sync_browser.assert_called_once_with("test-thread")
mock_get_current_page.assert_called_once_with(mock_browser)
mock_page.query_selector_all.assert_called_once_with("div.container")
mock_session_manager.release_sync_browser.assert_called_once_with("test-thread")
assert "Found 2 element(s) matching selector 'div.container'" in result
assert '1. <div id="div1", class="container">Content 1</div>' in result
assert '2. <div id="div2", class="container">Content 2</div>' in result
@patch("llama_index.tools.aws_bedrock_agentcore.browser.base.get_current_page")
def test_get_elements_not_found(self, mock_get_current_page):
mock_session_manager = MagicMock()
mock_browser = MagicMock()
mock_page = MagicMock()
mock_session_manager.get_sync_browser.return_value = mock_browser
mock_get_current_page.return_value = mock_page
mock_page.query_selector_all.return_value = []
tool_spec = AgentCoreBrowserToolSpec()
tool_spec._session_manager = mock_session_manager
result = tool_spec.get_elements(
selector="div.container", thread_id="test-thread"
)
mock_session_manager.get_sync_browser.assert_called_once_with("test-thread")
mock_get_current_page.assert_called_once_with(mock_browser)
mock_page.query_selector_all.assert_called_once_with("div.container")
mock_session_manager.release_sync_browser.assert_called_once_with("test-thread")
assert "No elements found matching selector 'div.container'" in result
@patch("llama_index.tools.aws_bedrock_agentcore.browser.base.get_current_page")
def test_navigate_back(self, mock_get_current_page):
mock_session_manager = MagicMock()
mock_browser = MagicMock()
mock_page = MagicMock()
mock_response = MagicMock()
mock_session_manager.get_sync_browser.return_value = mock_browser
mock_get_current_page.return_value = mock_page
mock_page.go_back.return_value = mock_response
mock_page.url = "https://example.com/previous"
tool_spec = AgentCoreBrowserToolSpec()
tool_spec._session_manager = mock_session_manager
result = tool_spec.navigate_back(thread_id="test-thread")
mock_session_manager.get_sync_browser.assert_called_once_with("test-thread")
mock_get_current_page.assert_called_once_with(mock_browser)
mock_page.go_back.assert_called_once()
mock_session_manager.release_sync_browser.assert_called_once_with("test-thread")
assert "Navigated back to https://example.com/previous" in result
@patch("llama_index.tools.aws_bedrock_agentcore.browser.base.get_current_page")
def test_navigate_back_no_history(self, mock_get_current_page):
mock_session_manager = MagicMock()
mock_browser = MagicMock()
mock_page = MagicMock()
mock_session_manager.get_sync_browser.return_value = mock_browser
mock_get_current_page.return_value = mock_page
mock_page.go_back.return_value = None
tool_spec = AgentCoreBrowserToolSpec()
tool_spec._session_manager = mock_session_manager
result = tool_spec.navigate_back(thread_id="test-thread")
mock_session_manager.get_sync_browser.assert_called_once_with("test-thread")
mock_get_current_page.assert_called_once_with(mock_browser)
mock_page.go_back.assert_called_once()
mock_session_manager.release_sync_browser.assert_called_once_with("test-thread")
assert "Could not navigate back" in result
@patch("llama_index.tools.aws_bedrock_agentcore.browser.base.get_current_page")
def test_current_webpage(self, mock_get_current_page):
mock_session_manager = MagicMock()
mock_browser = MagicMock()
mock_page = MagicMock()
mock_session_manager.get_sync_browser.return_value = mock_browser
mock_get_current_page.return_value = mock_page
mock_page.url = "https://example.com"
mock_page.title.return_value = "Example Website"
mock_page.evaluate.return_value = {
"width": 1024,
"height": 768,
"links": 10,
"images": 5,
"forms": 2,
}
tool_spec = AgentCoreBrowserToolSpec()
tool_spec._session_manager = mock_session_manager
result = tool_spec.current_webpage(thread_id="test-thread")
mock_session_manager.get_sync_browser.assert_called_once_with("test-thread")
mock_get_current_page.assert_called_once_with(mock_browser)
mock_page.title.assert_called_once()
mock_page.evaluate.assert_called_once()
mock_session_manager.release_sync_browser.assert_called_once_with("test-thread")
assert "URL: https://example.com" in result
assert "Title: Example Website" in result
assert "Viewport size: 1024x768" in result
assert "Links: 10" in result
assert "Images: 5" in result
assert "Forms: 2" in result
def test_cleanup_thread(self):
mock_browser_client = MagicMock()
tool_spec = AgentCoreBrowserToolSpec()
tool_spec._browser_clients = {"test-thread": mock_browser_client}
# Call cleanup synchronously for testing
tool_spec._browser_clients["test-thread"].stop = MagicMock()
# Simulate cleanup
tool_spec._browser_clients["test-thread"].stop()
del tool_spec._browser_clients["test-thread"]
mock_browser_client.stop.assert_called_once()
assert "test-thread" not in tool_spec._browser_clients
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-aws-bedrock-agentcore/tests/test_browser.py",
"license": "MIT License",
"lines": 364,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-aws-bedrock-agentcore/tests/test_browser_async.py | import pytest
from unittest.mock import patch, MagicMock, AsyncMock
from llama_index.tools.aws_bedrock_agentcore import AgentCoreBrowserToolSpec
class TestAsyncBrowserFunctions:
@pytest.mark.asyncio
async def test_anavigate_browser_invalid_url(self):
"""Test anavigate_browser with an invalid URL scheme."""
tool_spec = AgentCoreBrowserToolSpec()
result = await tool_spec.anavigate_browser(
url="ftp://example.com", thread_id="test-thread"
)
assert "URL scheme must be 'http' or 'https'" in result
@pytest.mark.asyncio
@patch("llama_index.tools.aws_bedrock_agentcore.browser.base.aget_current_page")
async def test_anavigate_browser(self, mock_aget_current_page):
"""Test anavigate_browser with a valid URL."""
mock_session_manager = MagicMock()
mock_browser = AsyncMock()
mock_page = AsyncMock()
mock_response = AsyncMock()
mock_response.status = 200
mock_session_manager.get_async_browser = AsyncMock(return_value=mock_browser)
mock_session_manager.release_async_browser = AsyncMock()
mock_aget_current_page.return_value = mock_page
mock_page.goto = AsyncMock(return_value=mock_response)
tool_spec = AgentCoreBrowserToolSpec()
tool_spec._session_manager = mock_session_manager
result = await tool_spec.anavigate_browser(
url="https://example.com", thread_id="test-thread"
)
mock_session_manager.get_async_browser.assert_awaited_once_with("test-thread")
mock_aget_current_page.assert_awaited_once_with(mock_browser)
mock_page.goto.assert_awaited_once_with("https://example.com")
mock_session_manager.release_async_browser.assert_awaited_once_with(
"test-thread"
)
assert "Navigated to https://example.com with status code 200" in result
@pytest.mark.asyncio
async def test_anavigate_browser_exception(self):
"""Test anavigate_browser with an exception."""
mock_session_manager = MagicMock()
mock_session_manager.get_async_browser = AsyncMock(
side_effect=Exception("Test error")
)
tool_spec = AgentCoreBrowserToolSpec()
tool_spec._session_manager = mock_session_manager
result = await tool_spec.anavigate_browser(
url="https://example.com", thread_id="test-thread"
)
assert "Error navigating to URL: Test error" in result
@pytest.mark.asyncio
@patch("llama_index.tools.aws_bedrock_agentcore.browser.base.aget_current_page")
async def test_aclick_element(self, mock_aget_current_page):
"""Test aclick_element with a valid selector."""
mock_session_manager = MagicMock()
mock_browser = AsyncMock()
mock_page = AsyncMock()
mock_session_manager.get_async_browser = AsyncMock(return_value=mock_browser)
mock_session_manager.release_async_browser = AsyncMock()
mock_aget_current_page.return_value = mock_page
mock_page.click = AsyncMock()
tool_spec = AgentCoreBrowserToolSpec()
tool_spec._session_manager = mock_session_manager
result = await tool_spec.aclick_element(
selector="#button", thread_id="test-thread"
)
mock_session_manager.get_async_browser.assert_awaited_once_with("test-thread")
mock_aget_current_page.assert_awaited_once_with(mock_browser)
mock_page.click.assert_awaited_once_with("#button", timeout=5000)
mock_session_manager.release_async_browser.assert_awaited_once_with(
"test-thread"
)
assert "Clicked on element with selector '#button'" in result
@pytest.mark.asyncio
@patch("llama_index.tools.aws_bedrock_agentcore.browser.base.aget_current_page")
async def test_aclick_element_not_found(self, mock_aget_current_page):
"""Test aclick_element with a selector that doesn't match any elements."""
mock_session_manager = MagicMock()
mock_browser = AsyncMock()
mock_page = AsyncMock()
mock_session_manager.get_async_browser = AsyncMock(return_value=mock_browser)
mock_session_manager.release_async_browser = AsyncMock()
mock_aget_current_page.return_value = mock_page
mock_page.click = AsyncMock(side_effect=Exception("Element not found"))
tool_spec = AgentCoreBrowserToolSpec()
tool_spec._session_manager = mock_session_manager
result = await tool_spec.aclick_element(
selector="#button", thread_id="test-thread"
)
mock_session_manager.get_async_browser.assert_awaited_once_with("test-thread")
mock_aget_current_page.assert_awaited_once_with(mock_browser)
mock_page.click.assert_awaited_once_with("#button", timeout=5000)
mock_session_manager.release_async_browser.assert_awaited_once_with(
"test-thread"
)
assert "Unable to click on element with selector '#button'" in result
@pytest.mark.asyncio
@patch("llama_index.tools.aws_bedrock_agentcore.browser.base.aget_current_page")
async def test_aextract_text_whole_page(self, mock_aget_current_page):
"""Test aextract_text for the whole page."""
mock_session_manager = MagicMock()
mock_browser = AsyncMock()
mock_page = AsyncMock()
mock_session_manager.get_async_browser = AsyncMock(return_value=mock_browser)
mock_session_manager.release_async_browser = AsyncMock()
mock_aget_current_page.return_value = mock_page
mock_page.content = AsyncMock(
return_value="<html><body>Hello World</body></html>"
)
tool_spec = AgentCoreBrowserToolSpec()
tool_spec._session_manager = mock_session_manager
result = await tool_spec.aextract_text(thread_id="test-thread")
mock_session_manager.get_async_browser.assert_awaited_once_with("test-thread")
mock_aget_current_page.assert_awaited_once_with(mock_browser)
mock_page.content.assert_awaited_once()
mock_session_manager.release_async_browser.assert_awaited_once_with(
"test-thread"
)
assert result == "<html><body>Hello World</body></html>"
@pytest.mark.asyncio
@patch("llama_index.tools.aws_bedrock_agentcore.browser.base.aget_current_page")
async def test_aextract_text_with_selector(self, mock_aget_current_page):
"""Test aextract_text with a selector."""
mock_session_manager = MagicMock()
mock_browser = AsyncMock()
mock_page = AsyncMock()
mock_element = AsyncMock()
mock_session_manager.get_async_browser = AsyncMock(return_value=mock_browser)
mock_session_manager.release_async_browser = AsyncMock()
mock_aget_current_page.return_value = mock_page
mock_page.query_selector = AsyncMock(return_value=mock_element)
mock_element.text_content = AsyncMock(return_value="Hello World")
tool_spec = AgentCoreBrowserToolSpec()
tool_spec._session_manager = mock_session_manager
result = await tool_spec.aextract_text(
selector="#content", thread_id="test-thread"
)
mock_session_manager.get_async_browser.assert_awaited_once_with("test-thread")
mock_aget_current_page.assert_awaited_once_with(mock_browser)
mock_page.query_selector.assert_awaited_once_with("#content")
mock_element.text_content.assert_awaited_once()
mock_session_manager.release_async_browser.assert_awaited_once_with(
"test-thread"
)
assert result == "Hello World"
@pytest.mark.asyncio
@patch("llama_index.tools.aws_bedrock_agentcore.browser.base.aget_current_page")
async def test_aextract_text_selector_not_found(self, mock_aget_current_page):
"""Test aextract_text with a selector that doesn't match any elements."""
mock_session_manager = MagicMock()
mock_browser = AsyncMock()
mock_page = AsyncMock()
mock_session_manager.get_async_browser = AsyncMock(return_value=mock_browser)
mock_session_manager.release_async_browser = AsyncMock()
mock_aget_current_page.return_value = mock_page
mock_page.query_selector = AsyncMock(return_value=None)
tool_spec = AgentCoreBrowserToolSpec()
tool_spec._session_manager = mock_session_manager
result = await tool_spec.aextract_text(
selector="#content", thread_id="test-thread"
)
mock_session_manager.get_async_browser.assert_awaited_once_with("test-thread")
mock_aget_current_page.assert_awaited_once_with(mock_browser)
mock_page.query_selector.assert_awaited_once_with("#content")
mock_session_manager.release_async_browser.assert_awaited_once_with(
"test-thread"
)
assert "No element found with selector '#content'" in result
@pytest.mark.asyncio
@patch("llama_index.tools.aws_bedrock_agentcore.browser.base.aget_current_page")
async def test_aextract_hyperlinks(self, mock_aget_current_page):
"""Test aextract_hyperlinks."""
mock_session_manager = MagicMock()
mock_browser = AsyncMock()
mock_page = AsyncMock()
mock_session_manager.get_async_browser = AsyncMock(return_value=mock_browser)
mock_session_manager.release_async_browser = AsyncMock()
mock_aget_current_page.return_value = mock_page
mock_page.eval_on_selector_all = AsyncMock(
return_value=[
{"text": "Link 1", "href": "https://example.com/1"},
{"text": "Link 2", "href": "https://example.com/2"},
]
)
tool_spec = AgentCoreBrowserToolSpec()
tool_spec._session_manager = mock_session_manager
result = await tool_spec.aextract_hyperlinks(thread_id="test-thread")
mock_session_manager.get_async_browser.assert_awaited_once_with("test-thread")
mock_aget_current_page.assert_awaited_once_with(mock_browser)
mock_page.eval_on_selector_all.assert_awaited_once()
mock_session_manager.release_async_browser.assert_awaited_once_with(
"test-thread"
)
assert "1. Link 1: https://example.com/1" in result
assert "2. Link 2: https://example.com/2" in result
@pytest.mark.asyncio
@patch("llama_index.tools.aws_bedrock_agentcore.browser.base.aget_current_page")
async def test_aextract_hyperlinks_no_links(self, mock_aget_current_page):
"""Test aextract_hyperlinks when no links are found."""
mock_session_manager = MagicMock()
mock_browser = AsyncMock()
mock_page = AsyncMock()
mock_session_manager.get_async_browser = AsyncMock(return_value=mock_browser)
mock_session_manager.release_async_browser = AsyncMock()
mock_aget_current_page.return_value = mock_page
mock_page.eval_on_selector_all = AsyncMock(return_value=[])
tool_spec = AgentCoreBrowserToolSpec()
tool_spec._session_manager = mock_session_manager
result = await tool_spec.aextract_hyperlinks(thread_id="test-thread")
mock_session_manager.get_async_browser.assert_awaited_once_with("test-thread")
mock_aget_current_page.assert_awaited_once_with(mock_browser)
mock_page.eval_on_selector_all.assert_awaited_once()
mock_session_manager.release_async_browser.assert_awaited_once_with(
"test-thread"
)
assert "No hyperlinks found on the page" in result
@pytest.mark.asyncio
@patch("llama_index.tools.aws_bedrock_agentcore.browser.base.aget_current_page")
async def test_aget_elements(self, mock_aget_current_page):
"""Test aget_elements."""
mock_session_manager = MagicMock()
mock_browser = AsyncMock()
mock_page = AsyncMock()
mock_element1 = AsyncMock()
mock_element2 = AsyncMock()
mock_session_manager.get_async_browser = AsyncMock(return_value=mock_browser)
mock_session_manager.release_async_browser = AsyncMock()
mock_aget_current_page.return_value = mock_page
mock_page.query_selector_all = AsyncMock(
return_value=[mock_element1, mock_element2]
)
mock_element1.evaluate = AsyncMock(
side_effect=["div", {"id": "div1", "class": "container"}]
)
mock_element1.text_content = AsyncMock(return_value="Content 1")
mock_element2.evaluate = AsyncMock(
side_effect=["div", {"id": "div2", "class": "container"}]
)
mock_element2.text_content = AsyncMock(return_value="Content 2")
tool_spec = AgentCoreBrowserToolSpec()
tool_spec._session_manager = mock_session_manager
result = await tool_spec.aget_elements(
selector="div.container", thread_id="test-thread"
)
mock_session_manager.get_async_browser.assert_awaited_once_with("test-thread")
mock_aget_current_page.assert_awaited_once_with(mock_browser)
mock_page.query_selector_all.assert_awaited_once_with("div.container")
mock_session_manager.release_async_browser.assert_awaited_once_with(
"test-thread"
)
assert "Found 2 element(s) matching selector 'div.container'" in result
assert '1. <div id="div1", class="container">Content 1</div>' in result
assert '2. <div id="div2", class="container">Content 2</div>' in result
@pytest.mark.asyncio
@patch("llama_index.tools.aws_bedrock_agentcore.browser.base.aget_current_page")
async def test_aget_elements_not_found(self, mock_aget_current_page):
"""Test aget_elements when no elements are found."""
mock_session_manager = MagicMock()
mock_browser = AsyncMock()
mock_page = AsyncMock()
mock_session_manager.get_async_browser = AsyncMock(return_value=mock_browser)
mock_session_manager.release_async_browser = AsyncMock()
mock_aget_current_page.return_value = mock_page
mock_page.query_selector_all = AsyncMock(return_value=[])
tool_spec = AgentCoreBrowserToolSpec()
tool_spec._session_manager = mock_session_manager
result = await tool_spec.aget_elements(
selector="div.container", thread_id="test-thread"
)
mock_session_manager.get_async_browser.assert_awaited_once_with("test-thread")
mock_aget_current_page.assert_awaited_once_with(mock_browser)
mock_page.query_selector_all.assert_awaited_once_with("div.container")
mock_session_manager.release_async_browser.assert_awaited_once_with(
"test-thread"
)
assert "No elements found matching selector 'div.container'" in result
@pytest.mark.asyncio
@patch("llama_index.tools.aws_bedrock_agentcore.browser.base.aget_current_page")
async def test_anavigate_back(self, mock_aget_current_page):
"""Test anavigate_back."""
mock_session_manager = MagicMock()
mock_browser = AsyncMock()
mock_page = AsyncMock()
mock_response = AsyncMock()
mock_session_manager.get_async_browser = AsyncMock(return_value=mock_browser)
mock_session_manager.release_async_browser = AsyncMock()
mock_aget_current_page.return_value = mock_page
mock_page.go_back = AsyncMock(return_value=mock_response)
mock_page.url = "https://example.com/previous"
tool_spec = AgentCoreBrowserToolSpec()
tool_spec._session_manager = mock_session_manager
result = await tool_spec.anavigate_back(thread_id="test-thread")
mock_session_manager.get_async_browser.assert_awaited_once_with("test-thread")
mock_aget_current_page.assert_awaited_once_with(mock_browser)
mock_page.go_back.assert_awaited_once()
mock_session_manager.release_async_browser.assert_awaited_once_with(
"test-thread"
)
assert "Navigated back to https://example.com/previous" in result
@pytest.mark.asyncio
@patch("llama_index.tools.aws_bedrock_agentcore.browser.base.aget_current_page")
async def test_anavigate_back_no_history(self, mock_aget_current_page):
"""Test anavigate_back when there's no history."""
mock_session_manager = MagicMock()
mock_browser = AsyncMock()
mock_page = AsyncMock()
mock_session_manager.get_async_browser = AsyncMock(return_value=mock_browser)
mock_session_manager.release_async_browser = AsyncMock()
mock_aget_current_page.return_value = mock_page
mock_page.go_back = AsyncMock(return_value=None)
tool_spec = AgentCoreBrowserToolSpec()
tool_spec._session_manager = mock_session_manager
result = await tool_spec.anavigate_back(thread_id="test-thread")
mock_session_manager.get_async_browser.assert_awaited_once_with("test-thread")
mock_aget_current_page.assert_awaited_once_with(mock_browser)
mock_page.go_back.assert_awaited_once()
mock_session_manager.release_async_browser.assert_awaited_once_with(
"test-thread"
)
assert "Could not navigate back" in result
@pytest.mark.asyncio
@patch("llama_index.tools.aws_bedrock_agentcore.browser.base.aget_current_page")
async def test_acurrent_webpage(self, mock_aget_current_page):
"""Test acurrent_webpage."""
mock_session_manager = MagicMock()
mock_browser = AsyncMock()
mock_page = AsyncMock()
mock_session_manager.get_async_browser = AsyncMock(return_value=mock_browser)
mock_session_manager.release_async_browser = AsyncMock()
mock_aget_current_page.return_value = mock_page
mock_page.url = "https://example.com"
mock_page.title = AsyncMock(return_value="Example Website")
mock_page.evaluate = AsyncMock(
return_value={
"width": 1024,
"height": 768,
"links": 10,
"images": 5,
"forms": 2,
}
)
tool_spec = AgentCoreBrowserToolSpec()
tool_spec._session_manager = mock_session_manager
result = await tool_spec.acurrent_webpage(thread_id="test-thread")
mock_session_manager.get_async_browser.assert_awaited_once_with("test-thread")
mock_aget_current_page.assert_awaited_once_with(mock_browser)
mock_page.title.assert_awaited_once()
mock_page.evaluate.assert_awaited_once()
mock_session_manager.release_async_browser.assert_awaited_once_with(
"test-thread"
)
assert "URL: https://example.com" in result
assert "Title: Example Website" in result
assert "Viewport size: 1024x768" in result
assert "Links: 10" in result
assert "Images: 5" in result
assert "Forms: 2" in result
@pytest.mark.asyncio
async def test_cleanup_specific_thread(self):
"""Test cleanup for a specific thread."""
mock_browser_client = MagicMock()
tool_spec = AgentCoreBrowserToolSpec()
tool_spec._browser_clients = {"test-thread": mock_browser_client}
await tool_spec.cleanup(thread_id="test-thread")
mock_browser_client.stop.assert_called_once()
assert "test-thread" not in tool_spec._browser_clients
@pytest.mark.asyncio
async def test_cleanup_all_threads(self):
"""Test cleanup for all threads."""
mock_browser_client1 = MagicMock()
mock_browser_client2 = MagicMock()
tool_spec = AgentCoreBrowserToolSpec()
tool_spec._browser_clients = {
"thread-1": mock_browser_client1,
"thread-2": mock_browser_client2,
}
await tool_spec.cleanup()
mock_browser_client1.stop.assert_called_once()
mock_browser_client2.stop.assert_called_once()
assert tool_spec._browser_clients == {}
@pytest.mark.asyncio
async def test_cleanup_with_exception(self):
"""Test cleanup when an exception occurs."""
mock_browser_client = MagicMock()
mock_browser_client.stop.side_effect = Exception("Test error")
tool_spec = AgentCoreBrowserToolSpec()
tool_spec._browser_clients = {"test-thread": mock_browser_client}
# Should not raise an exception
await tool_spec.cleanup(thread_id="test-thread")
mock_browser_client.stop.assert_called_once()
# Note: In the actual implementation, the thread is not removed from _browser_clients
# when an exception occurs during stop(), so we don't assert that here
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-aws-bedrock-agentcore/tests/test_browser_async.py",
"license": "MIT License",
"lines": 392,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-aws-bedrock-agentcore/tests/test_browser_session_manager.py | import pytest
from unittest.mock import patch, MagicMock
from llama_index.tools.aws_bedrock_agentcore.browser.browser_session_manager import (
BrowserSessionManager,
)
class TestBrowserSessionManager:
def test_init(self):
manager = BrowserSessionManager(region="us-east-1")
assert manager.region == "us-east-1"
assert manager._async_sessions == {}
assert manager._sync_sessions == {}
def test_init_default_region(self):
manager = BrowserSessionManager()
assert manager.region == "us-west-2"
assert manager._async_sessions == {}
assert manager._sync_sessions == {}
@patch(
"llama_index.tools.aws_bedrock_agentcore.browser.browser_session_manager.BrowserClient"
)
def test_get_sync_browser_existing(self, mock_browser_client):
manager = BrowserSessionManager()
mock_browser = MagicMock()
mock_client = MagicMock()
manager._sync_sessions = {"test-thread": (mock_client, mock_browser, False)}
browser = manager.get_sync_browser("test-thread")
assert browser == mock_browser
assert manager._sync_sessions["test-thread"] == (
mock_client,
mock_browser,
True,
)
def test_get_sync_browser_in_use(self):
manager = BrowserSessionManager()
mock_browser = MagicMock()
mock_client = MagicMock()
manager._sync_sessions = {"test-thread": (mock_client, mock_browser, True)}
with pytest.raises(
RuntimeError,
match="Browser session for thread test-thread is already in use",
):
manager.get_sync_browser("test-thread")
@patch.object(BrowserSessionManager, "_create_sync_browser_session")
def test_get_sync_browser_new(self, mock_create_sync_browser):
mock_browser = MagicMock()
mock_create_sync_browser.return_value = mock_browser
manager = BrowserSessionManager()
browser = manager.get_sync_browser("test-thread")
assert browser == mock_browser
mock_create_sync_browser.assert_called_once_with("test-thread")
def test_release_sync_browser(self):
manager = BrowserSessionManager()
mock_browser = MagicMock()
mock_client = MagicMock()
manager._sync_sessions = {"test-thread": (mock_client, mock_browser, True)}
manager.release_sync_browser("test-thread")
assert manager._sync_sessions["test-thread"] == (
mock_client,
mock_browser,
False,
)
def test_release_sync_browser_not_found(self):
manager = BrowserSessionManager()
# Should not raise an exception
manager.release_sync_browser("test-thread")
def test_close_sync_browser(self):
manager = BrowserSessionManager()
mock_browser = MagicMock()
mock_client = MagicMock()
manager._sync_sessions = {"test-thread": (mock_client, mock_browser, False)}
manager.close_sync_browser("test-thread")
mock_browser.close.assert_called_once()
mock_client.stop.assert_called_once()
assert "test-thread" not in manager._sync_sessions
def test_close_sync_browser_not_found(self):
manager = BrowserSessionManager()
# Should not raise an exception
manager.close_sync_browser("test-thread")
def test_close_sync_browser_with_errors(self):
manager = BrowserSessionManager()
mock_browser = MagicMock()
mock_browser.close.side_effect = Exception("Browser close error")
mock_client = MagicMock()
mock_client.stop.side_effect = Exception("Client stop error")
manager._sync_sessions = {"test-thread": (mock_client, mock_browser, False)}
# Should not raise an exception
manager.close_sync_browser("test-thread")
mock_browser.close.assert_called_once()
mock_client.stop.assert_called_once()
assert "test-thread" not in manager._sync_sessions
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-aws-bedrock-agentcore/tests/test_browser_session_manager.py",
"license": "MIT License",
"lines": 89,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-aws-bedrock-agentcore/tests/test_browser_spec.py | from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.tools.aws_bedrock_agentcore import AgentCoreBrowserToolSpec
def test_class():
names_of_base_classes = [b.__name__ for b in AgentCoreBrowserToolSpec.__mro__]
assert BaseToolSpec.__name__ in names_of_base_classes
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-aws-bedrock-agentcore/tests/test_browser_spec.py",
"license": "MIT License",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-aws-bedrock-agentcore/tests/test_browser_utils.py | from unittest.mock import MagicMock
from llama_index.tools.aws_bedrock_agentcore.browser.utils import get_current_page
class TestGetCurrentPage:
def test_no_contexts(self):
mock_browser = MagicMock()
mock_browser.contexts = []
mock_context = MagicMock()
mock_page = MagicMock()
mock_browser.new_context.return_value = mock_context
mock_context.new_page.return_value = mock_page
result = get_current_page(mock_browser)
assert result == mock_page
mock_browser.new_context.assert_called_once()
mock_context.new_page.assert_called_once()
def test_context_no_pages(self):
mock_browser = MagicMock()
mock_context = MagicMock()
mock_context.pages = []
mock_browser.contexts = [mock_context]
mock_page = MagicMock()
mock_context.new_page.return_value = mock_page
result = get_current_page(mock_browser)
assert result == mock_page
mock_browser.new_context.assert_not_called()
mock_context.new_page.assert_called_once()
def test_context_with_pages(self):
mock_browser = MagicMock()
mock_context = MagicMock()
mock_page1 = MagicMock()
mock_page2 = MagicMock()
mock_context.pages = [mock_page1, mock_page2]
mock_browser.contexts = [mock_context]
result = get_current_page(mock_browser)
assert result == mock_page2 # Should return the last page
mock_browser.new_context.assert_not_called()
mock_context.new_page.assert_not_called()
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-aws-bedrock-agentcore/tests/test_browser_utils.py",
"license": "MIT License",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-aws-bedrock-agentcore/tests/test_code_interpreter.py | from unittest.mock import patch, MagicMock
import os
import json
from llama_index.tools.aws_bedrock_agentcore import AgentCoreCodeInterpreterToolSpec
from llama_index.tools.aws_bedrock_agentcore.code_interpreter.base import (
extract_output_from_stream,
get_aws_region,
)
class TestGetAwsRegion:
@patch.dict(os.environ, {"AWS_REGION": "us-east-1"})
def test_get_aws_region_from_aws_region(self):
assert get_aws_region() == "us-east-1"
@patch.dict(
os.environ, {"AWS_DEFAULT_REGION": "us-west-1", "AWS_REGION": ""}, clear=True
)
def test_get_aws_region_from_aws_default_region(self):
assert get_aws_region() == "us-west-1"
@patch.dict(os.environ, {}, clear=True)
def test_get_aws_region_default(self):
assert get_aws_region() == "us-west-2"
class TestExtractOutputFromStream:
def test_extract_output_text_only(self):
response = {
"stream": [
{
"result": {
"content": [
{"type": "text", "text": "Hello"},
{"type": "text", "text": " World"},
]
}
}
]
}
output = extract_output_from_stream(response)
assert output == "Hello\n World"
def test_extract_output_with_resource_text(self):
response = {
"stream": [
{
"result": {
"content": [
{"type": "text", "text": "Created file:"},
{
"type": "resource",
"resource": {
"uri": "file:///tmp/test.py",
"text": "print('Hello World')",
},
},
]
}
}
]
}
output = extract_output_from_stream(response)
assert (
output
== "Created file:\n==== File: /tmp/test.py ====\nprint('Hello World')\n"
)
def test_extract_output_with_resource_no_text(self):
resource_data = {"uri": "file:///tmp/image.png", "mime": "image/png"}
response = {
"stream": [
{
"result": {
"content": [
{"type": "text", "text": "Generated image:"},
{"type": "resource", "resource": resource_data},
]
}
}
]
}
output = extract_output_from_stream(response)
assert output == f"Generated image:\n{json.dumps(resource_data)}"
def test_extract_output_multiple_events(self):
response = {
"stream": [
{"result": {"content": [{"type": "text", "text": "First part"}]}},
{"result": {"content": [{"type": "text", "text": "Second part"}]}},
]
}
output = extract_output_from_stream(response)
assert output == "First part\nSecond part"
class TestAgentCoreCodeInterpreterToolSpec:
@patch(
"llama_index.tools.aws_bedrock_agentcore.code_interpreter.base.CodeInterpreter"
)
def test_init(self, mock_code_interpreter):
tool_spec = AgentCoreCodeInterpreterToolSpec(region="us-east-1")
assert tool_spec.region == "us-east-1"
assert tool_spec._code_interpreters == {}
@patch(
"llama_index.tools.aws_bedrock_agentcore.code_interpreter.base.get_aws_region"
)
def test_init_default_region(self, mock_get_aws_region):
mock_get_aws_region.return_value = "us-west-2"
tool_spec = AgentCoreCodeInterpreterToolSpec()
assert tool_spec.region == "us-west-2"
mock_get_aws_region.assert_called_once()
@patch(
"llama_index.tools.aws_bedrock_agentcore.code_interpreter.base.CodeInterpreter"
)
def test_get_or_create_interpreter_new(self, mock_code_interpreter):
mock_instance = MagicMock()
mock_code_interpreter.return_value = mock_instance
tool_spec = AgentCoreCodeInterpreterToolSpec(region="us-east-1")
interpreter = tool_spec._get_or_create_interpreter("test-thread")
assert interpreter == mock_instance
assert "test-thread" in tool_spec._code_interpreters
assert tool_spec._code_interpreters["test-thread"] == mock_instance
mock_code_interpreter.assert_called_once_with(region="us-east-1")
mock_instance.start.assert_called_once()
@patch(
"llama_index.tools.aws_bedrock_agentcore.code_interpreter.base.CodeInterpreter"
)
def test_get_or_create_interpreter_existing(self, mock_code_interpreter):
mock_instance = MagicMock()
tool_spec = AgentCoreCodeInterpreterToolSpec(region="us-east-1")
tool_spec._code_interpreters["test-thread"] = mock_instance
interpreter = tool_spec._get_or_create_interpreter("test-thread")
assert interpreter == mock_instance
mock_code_interpreter.assert_not_called()
mock_instance.start.assert_not_called()
@patch(
"llama_index.tools.aws_bedrock_agentcore.code_interpreter.base.extract_output_from_stream"
)
def test_execute_code(self, mock_extract_output):
mock_code_interpreter = MagicMock()
mock_response = {
"stream": [
{"result": {"content": [{"type": "text", "text": "Hello World"}]}}
]
}
mock_code_interpreter.invoke.return_value = mock_response
mock_extract_output.return_value = "Hello World"
tool_spec = AgentCoreCodeInterpreterToolSpec()
tool_spec._get_or_create_interpreter = MagicMock(
return_value=mock_code_interpreter
)
result = tool_spec.execute_code(
code="print('Hello World')",
language="python",
clear_context=True,
thread_id="test-thread",
)
tool_spec._get_or_create_interpreter.assert_called_once_with(
thread_id="test-thread"
)
mock_code_interpreter.invoke.assert_called_once_with(
method="executeCode",
params={
"code": "print('Hello World')",
"language": "python",
"clearContext": True,
},
)
mock_extract_output.assert_called_once_with(mock_response)
assert result == "Hello World"
@patch(
"llama_index.tools.aws_bedrock_agentcore.code_interpreter.base.extract_output_from_stream"
)
def test_execute_code_exception(self, mock_extract_output):
mock_code_interpreter = MagicMock()
mock_code_interpreter.invoke.side_effect = Exception("Test error")
tool_spec = AgentCoreCodeInterpreterToolSpec()
tool_spec._get_or_create_interpreter = MagicMock(
return_value=mock_code_interpreter
)
result = tool_spec.execute_code(
code="print('Hello World')", language="python", thread_id="test-thread"
)
assert "Error executing code: Test error" in result
mock_extract_output.assert_not_called()
@patch(
"llama_index.tools.aws_bedrock_agentcore.code_interpreter.base.extract_output_from_stream"
)
def test_execute_command(self, mock_extract_output):
mock_code_interpreter = MagicMock()
mock_response = {
"stream": [
{"result": {"content": [{"type": "text", "text": "command output"}]}}
]
}
mock_code_interpreter.invoke.return_value = mock_response
mock_extract_output.return_value = "command output"
tool_spec = AgentCoreCodeInterpreterToolSpec()
tool_spec._get_or_create_interpreter = MagicMock(
return_value=mock_code_interpreter
)
result = tool_spec.execute_command(command="ls -la", thread_id="test-thread")
tool_spec._get_or_create_interpreter.assert_called_once_with(
thread_id="test-thread"
)
mock_code_interpreter.invoke.assert_called_once_with(
method="executeCommand", params={"command": "ls -la"}
)
mock_extract_output.assert_called_once_with(mock_response)
assert result == "command output"
@patch(
"llama_index.tools.aws_bedrock_agentcore.code_interpreter.base.extract_output_from_stream"
)
def test_read_files(self, mock_extract_output):
mock_code_interpreter = MagicMock()
mock_response = {
"stream": [
{"result": {"content": [{"type": "text", "text": "file content"}]}}
]
}
mock_code_interpreter.invoke.return_value = mock_response
mock_extract_output.return_value = "file content"
tool_spec = AgentCoreCodeInterpreterToolSpec()
tool_spec._get_or_create_interpreter = MagicMock(
return_value=mock_code_interpreter
)
result = tool_spec.read_files(paths=["/tmp/test.txt"], thread_id="test-thread")
tool_spec._get_or_create_interpreter.assert_called_once_with(
thread_id="test-thread"
)
mock_code_interpreter.invoke.assert_called_once_with(
method="readFiles", params={"paths": ["/tmp/test.txt"]}
)
mock_extract_output.assert_called_once_with(mock_response)
assert result == "file content"
@patch(
"llama_index.tools.aws_bedrock_agentcore.code_interpreter.base.extract_output_from_stream"
)
def test_list_files(self, mock_extract_output):
mock_code_interpreter = MagicMock()
mock_response = {
"stream": [
{
"result": {
"content": [{"type": "text", "text": "file1.txt\nfile2.txt"}]
}
}
]
}
mock_code_interpreter.invoke.return_value = mock_response
mock_extract_output.return_value = "file1.txt\nfile2.txt"
tool_spec = AgentCoreCodeInterpreterToolSpec()
tool_spec._get_or_create_interpreter = MagicMock(
return_value=mock_code_interpreter
)
result = tool_spec.list_files(directory_path="/tmp", thread_id="test-thread")
tool_spec._get_or_create_interpreter.assert_called_once_with(
thread_id="test-thread"
)
mock_code_interpreter.invoke.assert_called_once_with(
method="listFiles", params={"directoryPath": "/tmp"}
)
mock_extract_output.assert_called_once_with(mock_response)
assert result == "file1.txt\nfile2.txt"
@patch(
"llama_index.tools.aws_bedrock_agentcore.code_interpreter.base.extract_output_from_stream"
)
def test_delete_files(self, mock_extract_output):
mock_code_interpreter = MagicMock()
mock_response = {
"stream": [
{"result": {"content": [{"type": "text", "text": "Files deleted"}]}}
]
}
mock_code_interpreter.invoke.return_value = mock_response
mock_extract_output.return_value = "Files deleted"
tool_spec = AgentCoreCodeInterpreterToolSpec()
tool_spec._get_or_create_interpreter = MagicMock(
return_value=mock_code_interpreter
)
result = tool_spec.delete_files(
paths=["/tmp/test.txt"], thread_id="test-thread"
)
tool_spec._get_or_create_interpreter.assert_called_once_with(
thread_id="test-thread"
)
mock_code_interpreter.invoke.assert_called_once_with(
method="removeFiles", params={"paths": ["/tmp/test.txt"]}
)
mock_extract_output.assert_called_once_with(mock_response)
assert result == "Files deleted"
@patch(
"llama_index.tools.aws_bedrock_agentcore.code_interpreter.base.extract_output_from_stream"
)
def test_write_files(self, mock_extract_output):
mock_code_interpreter = MagicMock()
mock_response = {
"stream": [
{"result": {"content": [{"type": "text", "text": "Files written"}]}}
]
}
mock_code_interpreter.invoke.return_value = mock_response
mock_extract_output.return_value = "Files written"
tool_spec = AgentCoreCodeInterpreterToolSpec()
tool_spec._get_or_create_interpreter = MagicMock(
return_value=mock_code_interpreter
)
files = [{"path": "/tmp/test.txt", "text": "Hello World"}]
result = tool_spec.write_files(files=files, thread_id="test-thread")
tool_spec._get_or_create_interpreter.assert_called_once_with(
thread_id="test-thread"
)
mock_code_interpreter.invoke.assert_called_once_with(
method="writeFiles", params={"content": files}
)
mock_extract_output.assert_called_once_with(mock_response)
assert result == "Files written"
@patch(
"llama_index.tools.aws_bedrock_agentcore.code_interpreter.base.extract_output_from_stream"
)
def test_start_command(self, mock_extract_output):
mock_code_interpreter = MagicMock()
mock_response = {
"stream": [
{
"result": {
"content": [{"type": "text", "text": "Task started: task-123"}]
}
}
]
}
mock_code_interpreter.invoke.return_value = mock_response
mock_extract_output.return_value = "Task started: task-123"
tool_spec = AgentCoreCodeInterpreterToolSpec()
tool_spec._get_or_create_interpreter = MagicMock(
return_value=mock_code_interpreter
)
result = tool_spec.start_command(command="sleep 10", thread_id="test-thread")
tool_spec._get_or_create_interpreter.assert_called_once_with(
thread_id="test-thread"
)
mock_code_interpreter.invoke.assert_called_once_with(
method="startCommandExecution", params={"command": "sleep 10"}
)
mock_extract_output.assert_called_once_with(mock_response)
assert result == "Task started: task-123"
@patch(
"llama_index.tools.aws_bedrock_agentcore.code_interpreter.base.extract_output_from_stream"
)
def test_get_task(self, mock_extract_output):
mock_code_interpreter = MagicMock()
mock_response = {
"stream": [
{
"result": {
"content": [{"type": "text", "text": "Task status: running"}]
}
}
]
}
mock_code_interpreter.invoke.return_value = mock_response
mock_extract_output.return_value = "Task status: running"
tool_spec = AgentCoreCodeInterpreterToolSpec()
tool_spec._get_or_create_interpreter = MagicMock(
return_value=mock_code_interpreter
)
result = tool_spec.get_task(task_id="task-123", thread_id="test-thread")
tool_spec._get_or_create_interpreter.assert_called_once_with(
thread_id="test-thread"
)
mock_code_interpreter.invoke.assert_called_once_with(
method="getTask", params={"taskId": "task-123"}
)
mock_extract_output.assert_called_once_with(mock_response)
assert result == "Task status: running"
@patch(
"llama_index.tools.aws_bedrock_agentcore.code_interpreter.base.extract_output_from_stream"
)
def test_stop_task(self, mock_extract_output):
mock_code_interpreter = MagicMock()
mock_response = {
"stream": [
{"result": {"content": [{"type": "text", "text": "Task stopped"}]}}
]
}
mock_code_interpreter.invoke.return_value = mock_response
mock_extract_output.return_value = "Task stopped"
tool_spec = AgentCoreCodeInterpreterToolSpec()
tool_spec._get_or_create_interpreter = MagicMock(
return_value=mock_code_interpreter
)
result = tool_spec.stop_task(task_id="task-123", thread_id="test-thread")
tool_spec._get_or_create_interpreter.assert_called_once_with(
thread_id="test-thread"
)
mock_code_interpreter.invoke.assert_called_once_with(
method="stopTask", params={"taskId": "task-123"}
)
mock_extract_output.assert_called_once_with(mock_response)
assert result == "Task stopped"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-aws-bedrock-agentcore/tests/test_code_interpreter.py",
"license": "MIT License",
"lines": 390,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-aws-bedrock-agentcore/tests/test_code_interpreter_spec.py | from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.tools.aws_bedrock_agentcore import AgentCoreCodeInterpreterToolSpec
def test_class():
names_of_base_classes = [
b.__name__ for b in AgentCoreCodeInterpreterToolSpec.__mro__
]
assert BaseToolSpec.__name__ in names_of_base_classes
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-aws-bedrock-agentcore/tests/test_code_interpreter_spec.py",
"license": "MIT License",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-vercel-ai-gateway/llama_index/llms/vercel_ai_gateway/base.py | from typing import Any, Dict, Optional
from llama_index.core.base.llms.types import LLMMetadata
from llama_index.core.bridge.pydantic import Field
from llama_index.core.constants import (
DEFAULT_CONTEXT_WINDOW,
DEFAULT_NUM_OUTPUTS,
DEFAULT_TEMPERATURE,
)
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
from llama_index.llms.openai_like import OpenAILike
DEFAULT_API_BASE = "https://ai-gateway.vercel.sh/v1"
DEFAULT_MODEL = "anthropic/claude-4-sonnet"
class VercelAIGateway(OpenAILike):
"""
Vercel AI Gateway LLM.
To instantiate the `VercelAIGateway` class, you will need to provide authentication credentials.
You can authenticate in the following ways (in order of precedence):
1. Pass an API key or OIDC token directly to the `api_key` parameter
2. Set the `VERCEL_AI_GATEWAY_API_KEY` environment variable
3. Set the `VERCEL_OIDC_TOKEN` environment variable
If you haven't obtained an API key or OIDC token yet, you can visit the Vercel AI Gateway docs
at (https://vercel.com/ai-gateway) for instructions. Once you have your credentials, you can use
the `VercelAIGateway` class to interact with the LLM for tasks like chatting, streaming, and
completing prompts.
Examples:
`pip install llama-index-llms-vercel-ai-gateway`
```python
from llama_index.llms.vercel_ai_gateway import VercelAIGateway
# Using API key directly
llm = VercelAIGateway(
api_key="<your-api-key>",
max_tokens=64000,
context_window=200000,
model="anthropic/claude-4-sonnet",
)
# Using OIDC token directly
llm = VercelAIGateway(
api_key="<your-oidc-token>",
max_tokens=64000,
context_window=200000,
model="anthropic/claude-4-sonnet",
)
# Using environment variables (VERCEL_AI_GATEWAY_API_KEY or VERCEL_OIDC_TOKEN)
llm = VercelAIGateway(
max_tokens=64000,
context_window=200000,
model="anthropic/claude-4-sonnet",
)
# Customizing headers (overrides default http-referer and x-title)
llm = VercelAIGateway(
api_key="<your-api-key>",
model="anthropic/claude-4-sonnet",
default_headers={
"http-referer": "https://myapp.com/",
"x-title": "My App"
}
)
response = llm.complete("Hello World!")
print(str(response))
```
"""
model: str = Field(
description="The model to use through Vercel AI Gateway. From your Vercel dashboard, go to the AI Gateway tab and select the Model List tab on the left dropdown to see the available models."
)
context_window: int = Field(
default=DEFAULT_CONTEXT_WINDOW,
description="The maximum number of context tokens for the model. From your Vercel dashboard, go to the AI Gateway tab and select the Model List tab on the left dropdown to see the available models and their context window sizes.",
gt=0,
)
is_chat_model: bool = Field(
default=True,
description=LLMMetadata.model_fields["is_chat_model"].description,
)
def __init__(
self,
model: str = DEFAULT_MODEL,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: int = DEFAULT_NUM_OUTPUTS,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 5,
api_base: Optional[str] = DEFAULT_API_BASE,
api_key: Optional[str] = None,
default_headers: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> None:
additional_kwargs = additional_kwargs or {}
api_base = get_from_param_or_env(
"api_base", api_base, "VERCEL_AI_GATEWAY_API_BASE"
)
# Check for API key from multiple sources in order of precedence:
if api_key is None:
try:
api_key = get_from_param_or_env(
"api_key", None, "VERCEL_AI_GATEWAY_API_KEY"
)
except ValueError:
try:
api_key = get_from_param_or_env(
"oidc_token", None, "VERCEL_OIDC_TOKEN"
)
except ValueError:
pass
# Set up required Vercel AI Gateway headers
gateway_headers = {
"http-referer": "https://www.llamaindex.ai/",
"x-title": "LlamaIndex",
}
if default_headers:
gateway_headers.update(default_headers)
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
api_base=api_base,
api_key=api_key,
additional_kwargs=additional_kwargs,
max_retries=max_retries,
default_headers=gateway_headers,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "VercelAIGateway_LLM"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-vercel-ai-gateway/llama_index/llms/vercel_ai_gateway/base.py",
"license": "MIT License",
"lines": 123,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-vercel-ai-gateway/tests/test_vercel_ai_gateway.py | import inspect
import os
from collections.abc import AsyncGenerator, AsyncIterator
from unittest.mock import AsyncMock, patch
import pytest
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
CompletionResponse,
MessageRole,
)
from llama_index.llms.vercel_ai_gateway import VercelAIGateway
@pytest.fixture()
def vercel_ai_gateway_llm():
api_key = os.getenv("VERCEL_AI_GATEWAY_API_KEY") or os.getenv("VERCEL_OIDC_TOKEN")
if api_key is None:
pytest.skip(
"VERCEL_AI_GATEWAY_API_KEY or VERCEL_OIDC_TOKEN not set in environment"
)
return VercelAIGateway(api_key=api_key)
@pytest.fixture()
def mock_vercel_ai_gateway_llm():
return VercelAIGateway(api_key="test")
def test_get_context_window():
llm = VercelAIGateway(api_key="dummy", model="anthropic/claude-4-sonnet")
assert (
llm.context_window == 3900
) # Default context window from DEFAULT_CONTEXT_WINDOW
llm.context_window = 200000
assert llm.context_window == 200000
def test_get_all_kwargs():
llm = VercelAIGateway(
api_key="dummy", additional_kwargs={"foo": "bar"}, temperature=0.7
)
# Test that additional_kwargs are accessible
assert llm.additional_kwargs["foo"] == "bar"
assert llm.temperature == 0.7
def test_initialization_with_api_key():
llm = VercelAIGateway(api_key="test-key")
assert llm.api_key == "test-key"
assert llm.model == "anthropic/claude-4-sonnet"
assert llm.api_base == "https://ai-gateway.vercel.sh/v1"
def test_initialization_with_custom_model():
llm = VercelAIGateway(api_key="test-key", model="openai/gpt-4")
assert llm.model == "openai/gpt-4"
def test_class_name():
llm = VercelAIGateway(api_key="test-key")
assert llm.class_name() == "VercelAIGateway_LLM"
def test_chat(vercel_ai_gateway_llm):
messages = [
ChatMessage(role="system", content="Be precise and concise."),
ChatMessage(role="user", content="Tell me 5 sentences about AI."),
]
response = vercel_ai_gateway_llm.chat(messages)
assert isinstance(response, ChatResponse)
assert response.message.content.strip()
def test_complete(vercel_ai_gateway_llm):
prompt = "Artificial Intelligence is a field that focuses on"
response = vercel_ai_gateway_llm.complete(prompt)
assert isinstance(response, CompletionResponse)
assert response.text.strip()
def test_stream_chat(vercel_ai_gateway_llm):
messages = [
ChatMessage(role="system", content="You are a helpful assistant."),
ChatMessage(
role="user", content="Name the first 5 elements in the periodic table."
),
]
stream = vercel_ai_gateway_llm.stream_chat(messages)
assert inspect.isgenerator(stream)
response = ""
for chunk in stream:
assert isinstance(chunk, ChatResponse)
assert chunk.delta is not None
response += chunk.delta
assert response.strip()
def test_stream_complete(vercel_ai_gateway_llm):
prompt = "List the first 5 planets in the solar system:"
stream = vercel_ai_gateway_llm.stream_complete(prompt)
assert inspect.isgenerator(stream)
response = ""
for chunk in stream:
assert isinstance(chunk, CompletionResponse)
assert chunk.delta is not None
response += chunk.delta
assert response.strip()
@pytest.mark.asyncio
async def test_achat(vercel_ai_gateway_llm):
messages = [
ChatMessage(role=MessageRole.SYSTEM, content="You are a helpful assistant."),
ChatMessage(
role=MessageRole.USER,
content="What is the largest planet in our solar system?",
),
]
response = await vercel_ai_gateway_llm.achat(messages)
assert isinstance(response, ChatResponse)
assert response.message.content.strip()
@pytest.mark.asyncio
async def test_acomplete(vercel_ai_gateway_llm):
prompt = "The largest planet in our solar system is"
response = await vercel_ai_gateway_llm.acomplete(prompt)
assert isinstance(response, CompletionResponse)
assert response.text.strip()
@pytest.mark.asyncio
async def test_astream_chat(vercel_ai_gateway_llm):
messages = [
ChatMessage(role=MessageRole.SYSTEM, content="You are a helpful assistant."),
ChatMessage(
role=MessageRole.USER,
content="Name the first 5 elements in the periodic table.",
),
]
stream = await vercel_ai_gateway_llm.astream_chat(messages)
assert isinstance(stream, AsyncIterator)
response = ""
async for chunk in stream:
assert isinstance(chunk, ChatResponse)
assert chunk.delta is not None
response += chunk.delta
assert response.strip()
@pytest.mark.asyncio
async def test_astream_complete(vercel_ai_gateway_llm):
prompt = "List the first 5 elements in the periodic table:"
stream = await vercel_ai_gateway_llm.astream_complete(prompt)
assert isinstance(stream, AsyncIterator)
response = ""
async for chunk in stream:
assert isinstance(chunk, CompletionResponse)
assert chunk.delta is not None
response += chunk.delta
assert response.strip()
def test_chat_mock(mock_vercel_ai_gateway_llm):
# Mock the client.chat.completions.create method that OpenAI base class calls
with patch.object(mock_vercel_ai_gateway_llm, "_get_client") as mock_get_client:
mock_client = mock_get_client.return_value
mock_response = type(
"MockResponse",
(),
{
"choices": [
type(
"MockChoice",
(),
{
"message": type(
"MockMessage",
(),
{
"content": "mock response",
"role": "assistant",
"tool_calls": None,
"function_call": None,
"audio": None,
},
)(),
"logprobs": None,
},
)()
],
"usage": type(
"MockUsage",
(),
{"prompt_tokens": 10, "completion_tokens": 5, "total_tokens": 15},
)(),
},
)()
mock_client.chat.completions.create.return_value = mock_response
messages = [ChatMessage(role="user", content="Hi")]
result = mock_vercel_ai_gateway_llm.chat(messages)
assert result.message.content == "mock response"
def test_complete_mock(mock_vercel_ai_gateway_llm):
# Mock the client.chat.completions.create method since complete() converts to chat
with patch.object(mock_vercel_ai_gateway_llm, "_get_client") as mock_get_client:
mock_client = mock_get_client.return_value
mock_response = type(
"MockResponse",
(),
{
"choices": [
type(
"MockChoice",
(),
{
"message": type(
"MockMessage",
(),
{
"content": "mock completion",
"role": "assistant",
"tool_calls": None,
"function_call": None,
"audio": None,
},
)(),
"logprobs": None,
},
)()
],
"usage": type(
"MockUsage",
(),
{"prompt_tokens": 10, "completion_tokens": 5, "total_tokens": 15},
)(),
},
)()
mock_client.chat.completions.create.return_value = mock_response
result = mock_vercel_ai_gateway_llm.complete("hello")
assert result.text == "mock completion"
@pytest.mark.asyncio
async def test_achat_mock(mock_vercel_ai_gateway_llm):
with patch.object(mock_vercel_ai_gateway_llm, "_get_aclient") as mock_get_aclient:
mock_client = mock_get_aclient.return_value
mock_response = type(
"MockResponse",
(),
{
"choices": [
type(
"MockChoice",
(),
{
"message": type(
"MockMessage",
(),
{
"content": "mock async",
"role": "assistant",
"tool_calls": None,
"function_call": None,
"audio": None,
},
)(),
"logprobs": None,
},
)()
],
"usage": type(
"MockUsage",
(),
{"prompt_tokens": 10, "completion_tokens": 5, "total_tokens": 15},
)(),
},
)()
mock_client.chat.completions.create = AsyncMock(return_value=mock_response)
messages = [ChatMessage(role="user", content="Hi")]
result = await mock_vercel_ai_gateway_llm.achat(messages)
assert result.message.content == "mock async"
@pytest.mark.asyncio
async def test_acomplete_mock(mock_vercel_ai_gateway_llm):
with patch.object(mock_vercel_ai_gateway_llm, "_get_aclient") as mock_get_aclient:
mock_client = mock_get_aclient.return_value
mock_response = type(
"MockResponse",
(),
{
"choices": [
type(
"MockChoice",
(),
{
"message": type(
"MockMessage",
(),
{
"content": "mock async completion",
"role": "assistant",
"tool_calls": None,
"function_call": None,
"audio": None,
},
)(),
"logprobs": None,
},
)()
],
"usage": type(
"MockUsage",
(),
{"prompt_tokens": 10, "completion_tokens": 5, "total_tokens": 15},
)(),
},
)()
mock_client.chat.completions.create = AsyncMock(return_value=mock_response)
result = await mock_vercel_ai_gateway_llm.acomplete("hello")
assert result.text == "mock async completion"
def test_stream_chat_mock(mock_vercel_ai_gateway_llm):
with patch.object(mock_vercel_ai_gateway_llm, "_get_client") as mock_get_client:
mock_client = mock_get_client.return_value
# Create mock streaming response
def mock_stream_response():
chunk1 = type(
"MockChunk",
(),
{
"choices": [
type(
"MockChoice",
(),
{
"delta": type(
"MockDelta",
(),
{
"content": "Hello ",
"tool_calls": None,
"function_call": None,
"role": None,
},
)()
},
)()
]
},
)()
chunk2 = type(
"MockChunk",
(),
{
"choices": [
type(
"MockChoice",
(),
{
"delta": type(
"MockDelta",
(),
{
"content": "world",
"tool_calls": None,
"function_call": None,
"role": None,
},
)()
},
)()
]
},
)()
yield chunk1
yield chunk2
mock_client.chat.completions.create.return_value = mock_stream_response()
messages = [ChatMessage(role="user", content="Hi")]
result = "".join(
chunk.delta for chunk in mock_vercel_ai_gateway_llm.stream_chat(messages)
)
assert result == "Hello world"
def test_stream_complete_mock(mock_vercel_ai_gateway_llm):
with patch.object(mock_vercel_ai_gateway_llm, "_get_client") as mock_get_client:
mock_client = mock_get_client.return_value
# Create mock streaming response
def mock_stream_response():
chunk1 = type(
"MockChunk",
(),
{
"choices": [
type(
"MockChoice",
(),
{
"delta": type(
"MockDelta",
(),
{
"content": "Hi ",
"tool_calls": None,
"function_call": None,
"role": None,
},
)()
},
)()
]
},
)()
chunk2 = type(
"MockChunk",
(),
{
"choices": [
type(
"MockChoice",
(),
{
"delta": type(
"MockDelta",
(),
{
"content": "there",
"tool_calls": None,
"function_call": None,
"role": None,
},
)()
},
)()
]
},
)()
yield chunk1
yield chunk2
mock_client.chat.completions.create.return_value = mock_stream_response()
result = "".join(
chunk.delta for chunk in mock_vercel_ai_gateway_llm.stream_complete("Yo")
)
assert result == "Hi there"
@pytest.mark.asyncio
async def test_astream_chat_mock(mock_vercel_ai_gateway_llm):
with patch.object(mock_vercel_ai_gateway_llm, "_get_aclient") as mock_get_aclient:
mock_client = mock_get_aclient.return_value
# Create mock async streaming response
async def mock_astream_response():
chunk1 = type(
"MockChunk",
(),
{
"choices": [
type(
"MockChoice",
(),
{
"delta": type(
"MockDelta",
(),
{
"content": "Mocked ",
"tool_calls": None,
"function_call": None,
"role": None,
},
)()
},
)()
]
},
)()
chunk2 = type(
"MockChunk",
(),
{
"choices": [
type(
"MockChoice",
(),
{
"delta": type(
"MockDelta",
(),
{
"content": "streamed ",
"tool_calls": None,
"function_call": None,
"role": None,
},
)()
},
)()
]
},
)()
chunk3 = type(
"MockChunk",
(),
{
"choices": [
type(
"MockChoice",
(),
{
"delta": type(
"MockDelta",
(),
{
"content": "chat",
"tool_calls": None,
"function_call": None,
"role": None,
},
)()
},
)()
]
},
)()
yield chunk1
yield chunk2
yield chunk3
mock_client.chat.completions.create = AsyncMock(
return_value=mock_astream_response()
)
messages = [
ChatMessage(role=MessageRole.USER, content="Test message 1"),
ChatMessage(role=MessageRole.USER, content="Test message 2"),
]
stream = await mock_vercel_ai_gateway_llm.astream_chat(messages)
assert isinstance(stream, AsyncGenerator)
full_response = ""
async for each in stream:
full_response += each.delta
assert full_response == "Mocked streamed chat"
@pytest.mark.asyncio
async def test_astream_complete_mock(mock_vercel_ai_gateway_llm):
with patch.object(mock_vercel_ai_gateway_llm, "_get_aclient") as mock_get_aclient:
mock_client = mock_get_aclient.return_value
# Create mock async streaming response
async def mock_astream_response():
chunk1 = type(
"MockChunk",
(),
{
"choices": [
type(
"MockChoice",
(),
{
"delta": type(
"MockDelta",
(),
{
"content": "Mocked ",
"tool_calls": None,
"function_call": None,
"role": None,
},
)()
},
)()
]
},
)()
chunk2 = type(
"MockChunk",
(),
{
"choices": [
type(
"MockChoice",
(),
{
"delta": type(
"MockDelta",
(),
{
"content": "streamed ",
"tool_calls": None,
"function_call": None,
"role": None,
},
)()
},
)()
]
},
)()
chunk3 = type(
"MockChunk",
(),
{
"choices": [
type(
"MockChoice",
(),
{
"delta": type(
"MockDelta",
(),
{
"content": "completion",
"tool_calls": None,
"function_call": None,
"role": None,
},
)()
},
)()
]
},
)()
yield chunk1
yield chunk2
yield chunk3
mock_client.chat.completions.create = AsyncMock(
return_value=mock_astream_response()
)
prompt = "Test prompt"
stream = await mock_vercel_ai_gateway_llm.astream_complete(prompt)
assert isinstance(stream, AsyncGenerator)
full_response = ""
async for each in stream:
full_response += each.delta
assert full_response == "Mocked streamed completion"
def test_environment_variable_fallback():
"""Test that the LLM can be initialized using environment variables."""
with patch.dict(os.environ, {"VERCEL_AI_GATEWAY_API_KEY": "env-key"}):
llm = VercelAIGateway()
assert llm.api_key == "env-key"
def test_oidc_token_fallback():
"""Test that the LLM falls back to OIDC token when API key is not available."""
with patch.dict(os.environ, {"VERCEL_OIDC_TOKEN": "oidc-token"}, clear=True):
llm = VercelAIGateway()
assert llm.api_key == "oidc-token"
def test_custom_api_base():
"""Test that custom API base can be set."""
custom_base = "https://custom.vercel.ai/v1"
llm = VercelAIGateway(api_key="test", api_base=custom_base)
assert llm.api_base == custom_base
def test_custom_context_window():
"""Test that custom context window can be set."""
llm = VercelAIGateway(api_key="test", context_window=100000)
assert llm.context_window == 100000
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-vercel-ai-gateway/tests/test_vercel_ai_gateway.py",
"license": "MIT License",
"lines": 611,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-bigquery/llama_index/vector_stores/bigquery/base.py | """
Google BigQuery Vector Search.
BigQuery Vector Search is a fully managed feature of BigQuery that enables fast,
scalable similarity search over high-dimensional embeddings using approximate
nearest neighbor methods. For more information visit:
https://cloud.google.com/bigquery/docs/vector-search-intro
"""
import logging
from enum import Enum
from typing import Any, Dict, List, Optional, Union
from google.auth import credentials
from google.cloud import bigquery
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.schema import BaseNode, MetadataMode, TextNode
from llama_index.core.vector_stores.types import (
BasePydanticVectorStore,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryResult,
)
from llama_index.core.vector_stores.utils import (
metadata_dict_to_node,
node_to_metadata_dict,
)
from pydantic import BaseModel
from llama_index.vector_stores.bigquery.utils import build_where_clause_and_params
_logger = logging.getLogger(__name__)
class _BigQueryRow(BaseModel):
node_id: str
embedding: List[float]
text: str
metadata: Dict[str, Any]
distance: Optional[float] = None
class DistanceType(str, Enum):
EUCLIDEAN = "EUCLIDEAN"
COSINE = "COSINE"
DOT_PRODUCT = "DOT_PRODUCT"
class BigQueryVectorStore(BasePydanticVectorStore):
"""
Vector store index using Google BigQuery.
Provides integration with BigQuery for storing and querying vector embeddings.
For more information, visit: https://cloud.google.com/bigquery/docs/vector-search-intro
Required IAM Permissions:
- `roles/bigquery.dataOwner` (BigQuery Data Owner)
- `roles/bigquery.dataEditor` (BigQuery Data Editor)
Examples:
`pip install llama-index-vector-stores-bigquery`
```python
from google.cloud.bigquery import Client
from llama_index.vector_stores.bigquery import BigQueryVectorStore
client = Client()
vector_store = BigQueryVectorStore(
table_id="my_bigquery_table",
dataset_id="my_bigquery_dataset",
bigquery_client=client,
)
```
"""
stores_text: bool = True
distance_type: DistanceType = DistanceType.EUCLIDEAN
_table: bigquery.Table = PrivateAttr()
_dataset: bigquery.Dataset = PrivateAttr()
_client: bigquery.Client = PrivateAttr()
_full_table_id: str = PrivateAttr()
def __init__(
self,
table_id: str,
dataset_id: str,
project_id: Optional[str] = None,
region: Optional[str] = None,
distance_type: Optional[DistanceType] = DistanceType.EUCLIDEAN,
auth_credentials: Optional[credentials.Credentials] = None,
bigquery_client: Optional[bigquery.Client] = None,
**kwargs: Any,
):
"""
Initialize a BigQuery Vector store.
If a `bigquery_client` is provided, it will be used directly. Otherwise, a client will be initialized using
the optional `project_id`, `region`, and/or `auth_credentials`. If none are provided, default credentials
will be used. For details on authentication, visit:
https://googleapis.dev/python/google-api-core/latest/auth.html
Args:
table_id: The ID of the BigQuery table to use for vector storage.
dataset_id: The ID of the dataset containing the table.
project_id: The GCP project ID. If not provided, it will be inferred from the client or environment.
region: Optionally specify a default location for datasets / tables.
distance_type: Optionally specify a distance type to use `EUCLIDEAN`, `COSINE`, or `DOT_PRODUCT`.
auth_credentials: Optional credentials object used to authenticate with BigQuery.
bigquery_client: An existing BigQuery client instance. If not provided, one will be created.
**kwargs: Additional keyword arguments passed to the parent class.
"""
super().__init__(
**kwargs,
)
self._client: bigquery.Client = bigquery_client or self._initialize_client(
project_id, region, auth_credentials
)
self._dataset: bigquery.Dataset = self._create_dataset_if_not_exists(dataset_id)
self._table: bigquery.Table = self._create_table_if_not_exists(table_id)
self._full_table_id: str = (
f"{self._client.project}.{self._dataset.dataset_id}.{self._table.table_id}"
)
self.distance_type: DistanceType = DistanceType(distance_type)
@classmethod
def from_params(
cls,
table_id: str,
dataset_id: str,
project_id: Optional[str] = None,
region: Optional[str] = None,
distance_type: Optional[DistanceType] = DistanceType.EUCLIDEAN,
auth_credentials: Optional[credentials.Credentials] = None,
bigquery_client: Optional[bigquery.Client] = None,
) -> "BigQueryVectorStore":
"""
Initialize a BigQuery Vector store.
Args:
table_id: The ID of the BigQuery table to use for vector storage.
dataset_id: The ID of the dataset containing the table.
project_id: The GCP project ID. If not provided, it will be inferred from the client or environment.
region: Optionally specify a default location for datasets / tables.
distance_type: Optionally specify a distance type to use `EUCLIDEAN`, `COSINE`, or `DOT_PRODUCT`.
auth_credentials: Optional credentials object used to authenticate with BigQuery.
bigquery_client: An existing BigQuery client instance. If not provided, one will be created.
Returns:
BigQueryVectorStore
"""
return cls(
table_id=table_id,
dataset_id=dataset_id,
project_id=project_id,
region=region,
distance_type=distance_type,
auth_credentials=auth_credentials,
bigquery_client=bigquery_client,
)
@property
def client(self) -> Union[bigquery.Client, None]:
"""Return the BigQuery client."""
if not self._client:
return None
return self._client
@staticmethod
def _initialize_client(
project_id: Union[str, None],
region: Union[str, None],
auth_credentials: Union[credentials.Credentials, None],
) -> bigquery.Client:
"""
Initialize a new BigQuery client using the provided `project_id`, `region` and/or `auth_credentials`.
Defaults will be used in place of missing arguments. For details on authentication, see:
https://googleapis.dev/python/google-api-core/latest/auth.html
Args:
project_id: GCP project ID for the new client, or None to use default project resolution.
region: GCP region for the new client, or None to use default region.
auth_credentials: Credentials to authenticate the new client, or None to use default credentials.
Returns:
An initialized BigQuery client.
"""
return bigquery.Client(
project=project_id or None,
location=region or None,
credentials=auth_credentials or None,
)
@staticmethod
def _bigquery_row_to_node(row: _BigQueryRow) -> BaseNode:
"""
Convert a BigQuery row to a BaseNode object.
Args:
row: A row retrieved from BigQuery containing node_id, text,
metadata, embedding, and optional distance.
Returns:
Node object.
"""
node_id: str = row.node_id
text: str = row.text
metadata: Dict[str, Any] = row.metadata
embedding: List[float] = row.embedding
_: Union[float, None] = row.distance
try:
node = metadata_dict_to_node(metadata)
node.set_content(text)
node.embedding = embedding
except (ValueError, TypeError) as e:
node = TextNode(
id_=node_id,
text=text,
metadata=metadata,
embedding=embedding,
)
_logger.warning(
f"Failed to construct node {node_id} from metadata. Falling back to manual construction. Error: {e}"
)
return node
def _create_dataset_if_not_exists(self, dataset_id: str) -> bigquery.Dataset:
"""
Create a BigQuery dataset if it does not already exist.
For more details on creating datasets, visit:
https://cloud.google.com/bigquery/docs/datasets#create-dataset
Args:
dataset_id: The ID of the dataset to create.
Returns:
Dataset ID.
"""
dataset_ref = bigquery.dataset.DatasetReference(
project=self._client.project, dataset_id=dataset_id
)
return self._client.create_dataset(dataset_ref, exists_ok=True)
def _create_table_if_not_exists(self, table_id) -> bigquery.Table:
"""
Create a BigQuery table if it does not already exist.
For more information on creating tables, visit:
https://cloud.google.com/bigquery/docs/tables#create-table
Args:
table_id: The ID of the table to create.
Returns:
BigQuery table instance.
"""
schema = [
bigquery.SchemaField("node_id", "STRING", mode="REQUIRED"),
bigquery.SchemaField("text", "STRING", mode="REQUIRED"),
bigquery.SchemaField("metadata", "JSON"),
bigquery.SchemaField("embedding", "FLOAT", mode="REPEATED"),
]
table_ref = bigquery.TableReference.from_string(
f"{self._client.project}.{self._dataset.dataset_id}.{table_id}"
)
to_create = bigquery.Table(table_ref, schema=schema)
return self._client.create_table(to_create, exists_ok=True)
def add(self, nodes: List[BaseNode], **add_kwargs: Any) -> List[str]:
"""
Add nodes to index.
Args:
nodes: List of nodes with embeddings.
Returns:
List of node IDs that were added.
"""
node_ids: List[str] = []
json_records: List[Dict[str, Any]] = []
for node in nodes:
record = {
"node_id": node.node_id,
"text": node.get_content(metadata_mode=MetadataMode.NONE),
"embedding": node.get_embedding(),
"metadata": node_to_metadata_dict(
node, remove_text=True, flat_metadata=False
),
}
node_ids.append(node.node_id)
json_records.append(record)
job_config = bigquery.LoadJobConfig(schema=self._table.schema)
job = self._client.load_table_from_json(
json_rows=json_records, destination=self._table, job_config=job_config
)
job.result()
return node_ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id : The doc_id of the document to delete.
"""
query = f"""
DELETE FROM `{self._full_table_id}`
WHERE SAFE.JSON_VALUE(metadata, '$."doc_id"') = @to_delete;
"""
job_config = bigquery.QueryJobConfig(
query_parameters=[
bigquery.ScalarQueryParameter(
name="to_delete", type_="STRING", value=ref_doc_id
),
]
)
self._client.query_and_wait(query, job_config=job_config)
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""
Query the vector store using BigQuery's VECTOR_SEARCH to retrieve the top-k most similar nodes.
When `MetadataFilters` are provided and the table is indexed on relevant columns, BigQuery attempts to optimize
the search with pre-filtering before nearest neighbor search. If filters don't align with an index,
post-filtering is applied after similarity search, potentially returning fewer than `similarity_top_k results`.
Consider increasing `similarity_top_k` when post-filtering is expected.
For more information on pre-filtering and post-filtering, see:
https://cloud.google.com/bigquery/docs/vector-index#pre-filters_and_post-filters
Assumes embeddings are normalized for similarity scoring.
Args:
query: Contains the query embedding, similarity_top_k value, and optional metadata filters.
Returns:
VectorStoreQueryResult
"""
where_clause, query_params = build_where_clause_and_params(
filters=query.filters, node_ids=query.node_ids
)
base_table_query = f"""
SELECT
node_id,
text,
metadata,
embedding
FROM `{self._full_table_id}`
"""
if where_clause:
base_table_query += f" WHERE {where_clause}"
query_table_query = f"SELECT {query.query_embedding} AS input_embedding"
vector_search_query = f"""
SELECT base.node_id AS node_id,
base.text AS text,
base.metadata AS metadata,
base.embedding AS embedding,
distance
FROM
VECTOR_SEARCH(
({base_table_query}), 'embedding',
({query_table_query}), 'input_embedding',
top_k => @top_k,
distance_type => @distance_type
);
"""
query_params.extend(
[
bigquery.ScalarQueryParameter(
"top_k", type_="INTEGER", value=query.similarity_top_k
),
bigquery.ScalarQueryParameter(
"distance_type", type_="STRING", value=self.distance_type
),
]
)
job_config = bigquery.QueryJobConfig(
query_parameters=query_params,
)
rows: bigquery.table.RowIterator = self._client.query_and_wait(
vector_search_query, job_config=job_config
)
top_k_nodes: List[BaseNode] = []
top_k_scores: List[float] = []
top_k_ids: List[str] = []
for record in rows:
row = _BigQueryRow(
node_id=record.node_id,
text=record.text,
metadata=record.metadata,
embedding=record.embedding,
distance=record.distance,
)
node = self._bigquery_row_to_node(row)
node_id = record.node_id
# Assumes embeddings are normalized.
score = (
1 / (1 + record.distance)
if self.distance_type == DistanceType.EUCLIDEAN
else (1 + record.distance) / 2
)
top_k_nodes.append(node)
top_k_scores.append(score)
top_k_ids.append(node_id)
return VectorStoreQueryResult(
nodes=top_k_nodes, similarities=top_k_scores, ids=top_k_ids
)
def get_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
) -> List[BaseNode]:
"""
Retrieve nodes from BigQuery using node IDs, metadata filters, or both.
If both `node_ids` and `filters` are provided, only nodes that satisfy
both conditions will be returned.
Args:
node_ids: Optional list of node IDs for retrieval.
filters : Optional MetadataFilters filters for retrieval.
Returns:
A list of matching nodes.
Raises:
ValueError: If neither `node_ids` nor `filters` is provided.
"""
if not (node_ids or filters):
raise ValueError(
"get_nodes requires at least one filtering parameter: "
"'node_ids', 'filters', or both. Received neither."
)
where_clause, query_params = build_where_clause_and_params(node_ids, filters)
query = f"""
SELECT node_id,
text,
embedding,
metadata
FROM `{self._full_table_id}`
WHERE {where_clause};
"""
job_config = bigquery.QueryJobConfig(
query_parameters=query_params,
)
rows: bigquery.table.RowIterator = self._client.query_and_wait(
query, job_config=job_config
)
nodes: List[BaseNode] = []
for record in rows:
row = _BigQueryRow(
node_id=record.node_id,
text=record.text,
metadata=record.metadata,
embedding=record.embedding,
distance=record.distance,
)
node = self._bigquery_row_to_node(row)
nodes.append(node)
return nodes
def delete_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
**delete_kwargs: Any,
) -> None:
"""
Delete nodes from BigQuery based on node IDs, metadata filters, or both.
If both `node_ids` and `filters` are provided, only nodes matching both
criteria will be deleted.
Args:
node_ids: Optional list of node IDs to delete.
filters : Optional MetadataFilters filters for deletion.
Raises:
ValueError: If neither `node_ids` nor `filters` are provided.
"""
if not (node_ids or filters):
raise ValueError(
"delete_nodes requires at least one filtering parameter: "
"'node_ids', 'filters', or both. Received neither."
)
where_clause, query_params = build_where_clause_and_params(node_ids, filters)
query = f"""
DELETE FROM `{self._full_table_id}`
WHERE {where_clause};
"""
job_config = bigquery.QueryJobConfig(
query_parameters=query_params,
)
self._client.query_and_wait(query, job_config=job_config)
def clear(self) -> None:
"""
Clears the index.
This truncates the underlying table in BigQuery.
"""
query = f"""TRUNCATE TABLE `{self._full_table_id}`;"""
self._client.query_and_wait(query)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-bigquery/llama_index/vector_stores/bigquery/base.py",
"license": "MIT License",
"lines": 444,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-bigquery/llama_index/vector_stores/bigquery/utils.py | from typing import List, Optional, Tuple, Union
from google.cloud import bigquery
from llama_index.core.vector_stores.types import (
FilterOperator,
MetadataFilter,
MetadataFilters,
)
def build_where_clause_and_params(
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
) -> Tuple[
str, List[Union[bigquery.ScalarQueryParameter, bigquery.ArrayQueryParameter]]
]:
"""
Construct a parameterized SQL WHERE clause and corresponding query parameters.
The clause is built from the provided node IDs and metadata filters. Parameters
are returned separately to support safe, parameterized queries in BigQuery,
helping to prevent SQL injection. See:
https://cloud.google.com/bigquery/docs/parameterized-queries
If both `node_ids` and `filters` are provided, the resulting WHERE clause
combines conditions using AND logic.
Args:
node_ids: A list of node IDs to include in the filter.
filters: Metadata filters to apply to the query.
Returns:
A tuple (where_clause, query_params), where where_clause is a SQL WHERE clause string,
and query_params is a list of query parameters to bind to the query.
"""
query_params: List[
Union[bigquery.ScalarQueryParameter, bigquery.ArrayQueryParameter]
] = []
conditions: List[str] = []
if filters:
filter_where_clause, filter_query_params = (
_recursive_build_where_clause_from_filters(filters)
)
conditions.append(filter_where_clause)
query_params.extend(filter_query_params)
if node_ids:
conditions.append("node_id IN UNNEST(@node_ids)")
query_params.append(
bigquery.ArrayQueryParameter(
name="node_ids", array_type="STRING", values=node_ids
)
)
# if both `node_ids` and `filters` are provided, both criteria should be considered
where_clause = " AND ".join(conditions)
return where_clause, query_params
def _recursive_build_where_clause_from_filters(
meta_filters: MetadataFilters,
) -> Tuple[
str, List[Union[bigquery.ScalarQueryParameter, bigquery.ArrayQueryParameter]]
]:
"""
Recursively construct a SQL WHERE clause and corresponding query parameters
The provided MetadataFilters object may contain nested filter groups. This function
traverses them recursively to build a complete WHERE clause and the associated
query parameters for use in a parameterized BigQuery query.
Args:
meta_filters: A potentially nested MetadataFilters filter object
Returns:
A tuple (where_clause, query_params), where where_clause is a parameterized SQL WHERE clause string,
and query_params is a list of query parameters to bind to the query.
"""
filters_list: List[str] = []
query_params: List[
Union[bigquery.ScalarQueryParameter, bigquery.ArrayQueryParameter]
] = []
for filter_ in meta_filters.filters:
clause, params = (
_recursive_build_where_clause_from_filters(filter_)
if isinstance(filter_, MetadataFilters)
else _build_filter_clause(filter_)
)
filters_list.append(clause)
query_params.extend(params)
condition = f" {meta_filters.condition.value.upper()} "
filters_ = f"({condition.join(filters_list)})"
return filters_, query_params
def _build_filter_clause(
filter_: MetadataFilter,
) -> Tuple[
str, List[Union[bigquery.ScalarQueryParameter, bigquery.ArrayQueryParameter]]
]:
field = filter_.key
operator = filter_.operator
value = filter_.value
if operator == FilterOperator.IS_EMPTY:
clause = f"JSON_TYPE(JSON_QUERY(metadata, '$.\"{field}\"')) = 'null'"
params = []
elif operator == FilterOperator.IN or operator == FilterOperator.NIN:
bigquery_operator = _llama_to_bigquery_operator(operator)
clause = (
f"SAFE.JSON_VALUE(metadata, '$.\"{field}\"') {bigquery_operator} UNNEST(?)"
)
params = [
bigquery.ArrayQueryParameter(name=None, array_type="STRING", values=value)
]
elif operator == FilterOperator.TEXT_MATCH:
bigquery_operator = _llama_to_bigquery_operator(operator)
clause = (
f"SAFE.JSON_VALUE(metadata, '$.\"{field}\"') {bigquery_operator} '{value}'"
)
params = [bigquery.ScalarQueryParameter(name=None, type_="STRING", value=value)]
elif operator == FilterOperator.TEXT_MATCH_INSENSITIVE:
bigquery_operator = _llama_to_bigquery_operator(operator)
clause = f"LOWER(SAFE.JSON_VALUE(metadata, '$.\"{field}\"')) {bigquery_operator} LOWER('{value}')"
params = [bigquery.ScalarQueryParameter(name=None, type_="STRING", value=value)]
else:
bigquery_operator = _llama_to_bigquery_operator(operator)
clause = f"SAFE.JSON_VALUE(metadata, '$.\"{field}\"') {bigquery_operator} ?"
params = [bigquery.ScalarQueryParameter(name=None, type_="STRING", value=value)]
return clause, params
def _llama_to_bigquery_operator(operator: FilterOperator) -> str:
operator_map = {
FilterOperator.EQ: "=",
FilterOperator.GT: ">",
FilterOperator.LT: "<",
FilterOperator.NE: "!=",
FilterOperator.GTE: ">=",
FilterOperator.LTE: "<=",
FilterOperator.IN: "IN",
FilterOperator.NIN: "NOT IN",
FilterOperator.TEXT_MATCH: "LIKE",
FilterOperator.TEXT_MATCH_INSENSITIVE: "LIKE",
FilterOperator.IS_EMPTY: "IS NULL",
}
try:
return operator_map[operator]
except KeyError:
raise ValueError(
f"Invalid operator `{operator.value}` is not a supported BigQuery operator."
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-bigquery/llama_index/vector_stores/bigquery/utils.py",
"license": "MIT License",
"lines": 133,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-bigquery/tests/sql_assertions.py | import sqlparse
def assert_equivalent_sql_statements(actual_query: str, expected_query: str):
def standardize_format(q: str) -> str:
"""Standardize SQL formatting for more reliable string comparison."""
formatted = sqlparse.format(
q,
strip_comments=True,
reindent=True,
indent_tabs=False,
)
return " ".join(formatted.lower().split())
formatted_query = standardize_format(actual_query)
formatted_expected = standardize_format(expected_query)
assert formatted_query == formatted_expected, (
f"\n[Actual Query]:\n{formatted_query}\n\n"
f"[Expected Query]:\n{formatted_expected}\n"
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-bigquery/tests/sql_assertions.py",
"license": "MIT License",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-bigquery/tests/test_add.py | import json
from llama_index.core.schema import TextNode
from llama_index.vector_stores.bigquery import BigQueryVectorStore
def test_add_sends_correct_node_data_to_bigquery(vector_store: BigQueryVectorStore):
"""It should insert nodes into BigQuery and return the corresponding IDs."""
# Given a list of nodes
nodes = [
TextNode(
id_="node-id",
text="Lorem Ipsum",
embedding=[0.1, 0.2, 0.3],
metadata={"source": "unit-test"},
)
]
# When the nodes are added to the vector store
result = vector_store.add(nodes)
# Then the load job should be triggered
vector_store.client.load_table_from_json.assert_called_once()
# And the correct node data should be sent to BigQuery
_, kwargs = vector_store.client.load_table_from_json.call_args
json_rows = kwargs["json_rows"]
expected_metadata = {
"source": "unit-test",
"_node_content": json.dumps(
{
"id_": "node-id",
"embedding": None,
"metadata": {"source": "unit-test"},
"excluded_embed_metadata_keys": [],
"excluded_llm_metadata_keys": [],
"relationships": {},
"metadata_template": "{key}: {value}",
"metadata_separator": "\n",
"text": "",
"mimetype": "text/plain",
"start_char_idx": None,
"end_char_idx": None,
"metadata_seperator": "\n",
"text_template": "{metadata_str}\n\n{content}",
"class_name": "TextNode",
}
),
"_node_type": "TextNode",
"document_id": "None",
"doc_id": "None",
"ref_doc_id": "None",
}
assert json_rows[0]["node_id"] == "node-id"
assert json_rows[0]["text"] == "Lorem Ipsum"
assert json_rows[0]["embedding"] == [0.1, 0.2, 0.3]
assert json_rows[0]["metadata"] == expected_metadata
# And the returned node ID list should match the inserted node IDs
assert result == ["node-id"]
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-bigquery/tests/test_add.py",
"license": "MIT License",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-bigquery/tests/test_delete.py | from google.cloud import bigquery
from llama_index.vector_stores.bigquery import BigQueryVectorStore
from sql_assertions import assert_equivalent_sql_statements
def test_delete_generates_correct_sql_and_params(vector_store: BigQueryVectorStore):
"""It should execute a parameterized DELETE query to remove nodes with the specified `ref_doc_id`."""
# Given a `ref_doc_id` to delete
ref_doc_id = "doc-1"
# When `delete` is called with the `ref_doc_id`
vector_store.delete(ref_doc_id)
# Then it should call BigQuery with the correct query parameters
vector_store.client.query_and_wait.assert_called_once()
args, kwargs = vector_store.client.query_and_wait.call_args
actual_query = args[0]
job_config = kwargs["job_config"]
expected_query_params = [
bigquery.ScalarQueryParameter(name="to_delete", type_="STRING", value="doc-1")
]
assert isinstance(job_config, bigquery.QueryJobConfig)
assert job_config.query_parameters == expected_query_params
# And the actual SQL query should match the expected SQL query
expected_query = """
DELETE FROM `mock-project.mock_dataset.mock_table`
WHERE SAFE.JSON_VALUE(metadata, '$."doc_id"') = @to_delete;
"""
assert_equivalent_sql_statements(actual_query, expected_query)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-bigquery/tests/test_delete.py",
"license": "MIT License",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-bigquery/tests/test_delete_nodes.py | import pytest
from google.cloud import bigquery
from llama_index.core.vector_stores import MetadataFilter, MetadataFilters
from llama_index.vector_stores.bigquery import BigQueryVectorStore
from sql_assertions import assert_equivalent_sql_statements
def test_delete_nodes_with_filters_generates_correct_sql_and_params(
vector_store: BigQueryVectorStore,
):
"""It should execute a parameterized DELETE query with correct filtering criteria."""
# Given filter criteria
filters = MetadataFilters(
filters=[
MetadataFilter(key="author", value="ceo@company.com"),
MetadataFilter(key="author", value="cfo@company.com"),
],
condition="or",
)
node_ids = ["node1", "node2"]
# When `delete` is called with the filter criteria
vector_store.delete_nodes(node_ids, filters)
# Then it should call BigQuery with the correct query parameters
vector_store.client.query_and_wait.assert_called_once()
args, kwargs = vector_store.client.query_and_wait.call_args
actual_query = args[0]
job_config = kwargs["job_config"]
expected_query_params = [
bigquery.ScalarQueryParameter(
name=None, type_="STRING", value="ceo@company.com"
),
bigquery.ScalarQueryParameter(
name=None, type_="STRING", value="cfo@company.com"
),
bigquery.ArrayQueryParameter(
name="node_ids", array_type="STRING", values=["node1", "node2"]
),
]
assert isinstance(job_config, bigquery.QueryJobConfig)
assert job_config.query_parameters == expected_query_params
# And the actual SQL query should match the expected SQL query
expected_query = """
DELETE FROM `mock-project.mock_dataset.mock_table`
WHERE (SAFE.JSON_VALUE(metadata, '$."author"') = ? OR SAFE.JSON_VALUE(metadata, '$."author"') = ?)
AND node_id IN UNNEST(@node_ids);
"""
assert_equivalent_sql_statements(actual_query, expected_query)
def test_delete_nodes_without_arguments_raises_value_error(
vector_store: BigQueryVectorStore,
):
"""It should raise a ValueError when neither `node_ids` nor `filters` is provided."""
# When `delete_nodes` is called without arguments, it should raise a ValueError
with pytest.raises(ValueError):
vector_store.delete_nodes()
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-bigquery/tests/test_delete_nodes.py",
"license": "MIT License",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-bigquery/tests/test_get_nodes.py | import json
from unittest.mock import MagicMock, patch
import pytest
from google.cloud import bigquery
from llama_index.core.schema import TextNode
from llama_index.core.vector_stores import MetadataFilter, MetadataFilters
from llama_index.vector_stores.bigquery import BigQueryVectorStore
from sql_assertions import assert_equivalent_sql_statements
def test_get_nodes_with_filters_generates_correct_sql_and_params(
vector_store: BigQueryVectorStore,
):
"""It should execute a parameterized query to get nodes based on filter criteria"""
# Given filtering criteria
filters = MetadataFilters(
filters=[
MetadataFilter(key="author", value="ceo@company.com"),
MetadataFilter(key="author", value="cfo@company.com"),
],
condition="or",
)
node_ids = ["node1", "node2"]
# When `get_nodes` is called with the filtering criteria
vector_store.get_nodes(node_ids, filters)
# Then it should call BigQuery with the correct query parameters
vector_store.client.query_and_wait.assert_called_once()
args, kwargs = vector_store.client.query_and_wait.call_args
actual_query = args[0]
job_config = kwargs["job_config"]
expected_query_params = [
bigquery.ScalarQueryParameter(
name=None, type_="STRING", value="ceo@company.com"
),
bigquery.ScalarQueryParameter(
name=None, type_="STRING", value="cfo@company.com"
),
bigquery.ArrayQueryParameter(
name="node_ids", array_type="STRING", values=["node1", "node2"]
),
]
assert isinstance(job_config, bigquery.QueryJobConfig)
assert job_config.query_parameters == expected_query_params
# And the actual SQL query should match the expected SQL query
expected_query = """
SELECT node_id,
text,
embedding,
metadata
FROM `mock-project.mock_dataset.mock_table`
WHERE (SAFE.JSON_VALUE(metadata, '$."author"') = ? OR SAFE.JSON_VALUE(metadata, '$."author"') = ?)
AND node_id IN UNNEST(@node_ids);
"""
assert_equivalent_sql_statements(actual_query, expected_query)
def test_get_nodes_constructs_nodes_from_valid_metadata_row(
vector_store: BigQueryVectorStore,
):
"""It should construct a Node when metadata includes valid `_node_content` and `_node_type`."""
# Mock BigQuery returned record
mock_row = MagicMock()
mock_row.node_id = "node1"
mock_row.text = "Lorem Ipsum"
mock_row.embedding = [0.1, 0.2, 0.3]
mock_row.metadata = {
"author": "ceo@company.com",
"_node_content": json.dumps(
{
"id_": "node1",
"embedding": None,
"metadata": {"author": "ceo@company.com"},
"excluded_embed_metadata_keys": [],
"excluded_llm_metadata_keys": [],
"relationships": {},
"metadata_template": "{key}: {value}",
"metadata_separator": "\n",
"text": "",
"mimetype": "text/plain",
"start_char_idx": None,
"end_char_idx": None,
"metadata_seperator": "\n",
"text_template": "{metadata_str}\n\n{content}",
"class_name": "TextNode",
}
),
"_node_type": "TextNode",
"document_id": "None",
"doc_id": "None",
"ref_doc_id": "None",
}
vector_store.client.query_and_wait.return_value = [mock_row]
# When `get_nodes` is called
nodes = vector_store.get_nodes(node_ids=["node1"])
# Then a node should be returned corresponding to the record returned from BigQuery
assert nodes == [
TextNode(
id_="node1",
text="Lorem Ipsum",
embedding=[0.1, 0.2, 0.3],
metadata={"author": "ceo@company.com"},
)
]
@patch(
"llama_index.vector_stores.bigquery.base.metadata_dict_to_node",
side_effect=ValueError("_node_content not found in metadata dict."),
)
def test_get_nodes_falls_back_to_manual_textnode_on_metadata_parse_error(
mock_metadata_dict_to_node, vector_store: BigQueryVectorStore
):
"""It should fall back to constructing a TextNode when metadata lacks `_node_content` and `_node_type`."""
# Mock BigQuery returned record
mock_row = MagicMock()
mock_row.node_id = "node1"
mock_row.text = "This is a test node"
mock_row.embedding = [0.1, 0.2, 0.3]
mock_row.metadata = {"author": "ceo@company.com"}
vector_store.client.query_and_wait.return_value = [mock_row]
# When `get_nodes` is called and the parser raises an Exception
nodes = vector_store.get_nodes(node_ids=["node1"])
mock_metadata_dict_to_node.assert_called_once_with({"author": "ceo@company.com"})
assert mock_metadata_dict_to_node.raises_exception
# Then a fallback TextNode is constructed and returned
assert nodes == [
TextNode(
id_="node1",
text="This is a test node",
embedding=[0.1, 0.2, 0.3],
metadata={"author": "ceo@company.com"},
)
]
def test_get_nodes_without_arguments_raises_value_error(
vector_store: BigQueryVectorStore,
):
"""It should raise a ValueError when neither node_ids nor filters is provided."""
# When `get_nodes` is called without arguments, it should raise a `ValueError`
with pytest.raises(ValueError):
vector_store.get_nodes()
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-bigquery/tests/test_get_nodes.py",
"license": "MIT License",
"lines": 134,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-bigquery/tests/test_parameterized_queries.py | import pytest
from google.cloud import bigquery
from google.cloud.bigquery import ArrayQueryParameter, ScalarQueryParameter
from llama_index.core.vector_stores import MetadataFilter, MetadataFilters
from llama_index.vector_stores.bigquery.utils import build_where_clause_and_params
from sql_assertions import assert_equivalent_sql_statements
def test_build_where_clause_and_params():
"""It should build a query from MetadataFilters and node IDs"""
# Given a list of `filters` and a list of `node_ids`
filters = MetadataFilters(
filters=[
MetadataFilter(key="author", value="ceo@company.com"),
MetadataFilter(key="author", value="cfo@company.com"),
],
condition="or",
)
node_ids = ["node1", "node2"]
# When the WHERE clause and query parameters are built
where_clause, query_params = build_where_clause_and_params(node_ids, filters)
# Then the SQL query WHERE clause should reflect both `filters` and `node_ids`,
query = f"""
SELECT *
FROM table
WHERE {where_clause}
"""
expected_query = """
SELECT *
FROM table
WHERE
(SAFE.JSON_VALUE(metadata, '$."author"') = ? OR SAFE.JSON_VALUE(metadata, '$."author"') = ?)
AND node_id IN UNNEST(@node_ids)
"""
assert_equivalent_sql_statements(query, expected_query)
# And the parameters should match the expected values
expected_query_params = [
ScalarQueryParameter(None, "STRING", "ceo@company.com"),
ScalarQueryParameter(None, "STRING", "cfo@company.com"),
ArrayQueryParameter("node_ids", "STRING", ["node1", "node2"]),
]
assert query_params == expected_query_params
def test_build_where_clause_and_params_with_nested_filters():
"""It should build a query from nested MetadataFilters and node IDs"""
# Given a nested list of `filters` and a list of `node_ids`
filters = MetadataFilters(
filters=[
MetadataFilters(
filters=[
MetadataFilter(
key="commit_date", value="2023-08-01", operator=">="
),
MetadataFilter(
key="commit_date", value="2023-08-15", operator="<="
),
],
condition="and",
),
MetadataFilters(
filters=[
MetadataFilter(key="author", value="mats@timescale.com"),
MetadataFilter(key="author", value="sven@timescale.com"),
],
condition="or",
),
],
condition="and",
)
node_ids = ["node1", "node2"]
# When the WHERE clause and query parameters are built
where_clause, query_params = build_where_clause_and_params(node_ids, filters)
# Then the SQL query WHERE clause should reflect both `filters` and `node_ids`,
query = f"""
SELECT *
FROM table
WHERE {where_clause}
"""
expected_query = """
SELECT *
FROM table
WHERE ((
SAFE.JSON_VALUE(metadata, '$."commit_date"') >= ? AND
SAFE.JSON_VALUE(metadata, '$."commit_date"') <= ?
) AND (
SAFE.JSON_VALUE(metadata, '$."author"') = ? OR
SAFE.JSON_VALUE(metadata, '$."author"') = ?
)) AND node_id IN UNNEST(@node_ids)
"""
assert_equivalent_sql_statements(query, expected_query)
# And the parameters should match the expected values
expected_query_params = [
ScalarQueryParameter(None, "STRING", "2023-08-01"),
ScalarQueryParameter(None, "STRING", "2023-08-15"),
ScalarQueryParameter(None, "STRING", "mats@timescale.com"),
ScalarQueryParameter(None, "STRING", "sven@timescale.com"),
ArrayQueryParameter("node_ids", "STRING", ["node1", "node2"]),
]
assert query_params == expected_query_params
@pytest.mark.parametrize(
(
"key",
"value",
"operator",
"expected_where_clause",
"expected_query_parameter",
),
[
(
"magna_carta",
"1215-12-15",
"==",
"(SAFE.JSON_VALUE(metadata, '$.\"magna_carta\"') = ?)",
[bigquery.ScalarQueryParameter(None, "STRING", "1215-12-15")],
),
(
"ramanujan",
1729,
"!=",
"(SAFE.JSON_VALUE(metadata, '$.\"ramanujan\"') != ?)",
[bigquery.ScalarQueryParameter(None, "STRING", 1729)],
),
(
"salary",
50_000,
">",
"(SAFE.JSON_VALUE(metadata, '$.\"salary\"') > ?)",
[bigquery.ScalarQueryParameter(None, "STRING", 50_000)],
),
(
"height",
6.5,
">=",
"(SAFE.JSON_VALUE(metadata, '$.\"height\"') >= ?)",
[bigquery.ScalarQueryParameter(None, "STRING", 6.5)],
),
(
"speed",
100,
"<",
"(SAFE.JSON_VALUE(metadata, '$.\"speed\"') < ?)",
[bigquery.ScalarQueryParameter(None, "STRING", 100)],
),
(
"weight",
120,
"<=",
"(SAFE.JSON_VALUE(metadata, '$.\"weight\"') <= ?)",
[bigquery.ScalarQueryParameter(None, "STRING", 120)],
),
(
"name",
["Alan Turing", "Grace Hopper"],
"in",
"(SAFE.JSON_VALUE(metadata, '$.\"name\"') IN UNNEST(?))",
[
bigquery.ArrayQueryParameter(
None, "STRING", ["Alan Turing", "Grace Hopper"]
)
],
),
(
"numbers",
[10, 20, 30],
"nin",
"(SAFE.JSON_VALUE(metadata, '$.\"numbers\"') NOT IN UNNEST(?))",
[bigquery.ArrayQueryParameter(None, "STRING", [10, 20, 30])],
),
(
"foo",
None,
"is_empty",
"(JSON_TYPE(JSON_QUERY(metadata, '$.\"foo\"')) = 'null')",
[],
),
],
)
def test_build_where_clause_and_params_with_single_filter(
key, value, operator, expected_where_clause, expected_query_parameter
):
"""It should construct a parameterized SQL WHERE clause and corresponding query parameters."""
# Given a MetadataFilters instance
filters = MetadataFilters(
filters=[MetadataFilter(key=key, value=value, operator=operator)]
)
# When the WHERE clause and query parameters are built
where_clause, query_params = build_where_clause_and_params(filters=filters)
# Then the WHERE clause should reflect the MetadataFilters
assert where_clause == expected_where_clause
# And the parameters should match the expected values
assert query_params == expected_query_parameter
def test_build_where_clause_and_params_without_args():
"""It should return empty where clause and parameters if no arguments are provided."""
# When there are no parameters
where_clause, query_params = build_where_clause_and_params()
# Then the results should be empty
assert query_params == []
assert where_clause == ""
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-bigquery/tests/test_parameterized_queries.py",
"license": "MIT License",
"lines": 195,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-bigquery/tests/test_vector_search.py | import json
from unittest.mock import MagicMock
import pytest
from google.cloud import bigquery
from llama_index.core.schema import TextNode
from llama_index.vector_stores.bigquery import BigQueryVectorStore
from llama_index.vector_stores.bigquery.base import DistanceType
from llama_index.core.vector_stores.types import (
VectorStoreQuery,
MetadataFilters,
MetadataFilter,
VectorStoreQueryResult,
)
from sql_assertions import assert_equivalent_sql_statements
def test_query_vector_search_generates_correct_sql_and_params(
vector_store: BigQueryVectorStore,
):
"""It should construct and execute a VECTOR_SEARCH query with correct parameters."""
# Given a VectorStoreQuery
query = VectorStoreQuery(
similarity_top_k=5,
query_embedding=[1.0, 2.0, 3.0],
)
# When `query` is called
vector_store.query(query)
# Then it should call BigQuery with the correct query parameters
vector_store.client.query_and_wait.assert_called_once()
args, kwargs = vector_store.client.query_and_wait.call_args
actual_query = args[0]
job_config = kwargs["job_config"]
expected_query_params = [
bigquery.ScalarQueryParameter("top_k", "INTEGER", 5),
bigquery.ScalarQueryParameter("distance_type", "STRING", "EUCLIDEAN"),
]
assert isinstance(job_config, bigquery.QueryJobConfig)
assert job_config.query_parameters == expected_query_params
# And the actual SQL query should match the expected SQL query
expected_query = f"""
SELECT base.node_id AS node_id,
base.text AS text,
base.metadata AS metadata,
base.embedding AS embedding,
distance
FROM VECTOR_SEARCH(
(
SELECT node_id,
text,
metadata,
embedding
FROM `mock-project.mock_dataset.mock_table` ), 'embedding',
( SELECT [1.0, 2.0, 3.0] AS input_embedding ), 'input_embedding',
top_k => @top_k,
distance_type => @distance_type
);
"""
assert_equivalent_sql_statements(actual_query, expected_query)
def test_query_vector_store_with_filters_generates_correct_sql_and_params(
vector_store: BigQueryVectorStore,
):
"""It should apply metadata filters and node ID constraints in the VECTOR_SEARCH query."""
# Given a VectorStoreQuery
query = VectorStoreQuery(
similarity_top_k=5,
query_embedding=[1.0, 2.0, 3.0],
filters=MetadataFilters(
filters=[
MetadataFilter(key="author", value="ceo@company.com"),
MetadataFilter(key="author", value="cfo@company.com"),
],
condition="or",
),
node_ids=["node1", "node2"],
)
# When `query` is called
vector_store.query(query)
# Then it should call BigQuery with the correct query parameters
vector_store.client.query_and_wait.assert_called_once()
args, kwargs = vector_store.client.query_and_wait.call_args
actual_query = args[0]
job_config = kwargs["job_config"]
expected_query_params = [
bigquery.ScalarQueryParameter(
name=None, type_="STRING", value="ceo@company.com"
),
bigquery.ScalarQueryParameter(
name=None, type_="STRING", value="cfo@company.com"
),
bigquery.ArrayQueryParameter(
name="node_ids", array_type="STRING", values=["node1", "node2"]
),
bigquery.ScalarQueryParameter("top_k", "INTEGER", 5),
bigquery.ScalarQueryParameter("distance_type", "STRING", "EUCLIDEAN"),
]
assert isinstance(job_config, bigquery.QueryJobConfig)
assert job_config.query_parameters == expected_query_params
# And the actual SQL query should match the expected SQL query
expected_query = f"""
SELECT base.node_id AS node_id,
base.text AS text,
base.metadata AS metadata,
base.embedding AS embedding,
distance
FROM
VECTOR_SEARCH(
(
SELECT node_id,
text,
metadata,
embedding
FROM `mock-project.mock_dataset.mock_table`
WHERE (SAFE.JSON_VALUE(metadata, '$."author"') = ? OR SAFE.JSON_VALUE(metadata, '$."author"') = ?)
AND node_id IN UNNEST(@node_ids)
), 'embedding',
(SELECT [1.0, 2.0, 3.0] AS input_embedding),
'input_embedding',
top_k => @top_k,
distance_type => @distance_type
);
"""
assert_equivalent_sql_statements(actual_query, expected_query)
@pytest.mark.parametrize(
("distance_type", "distance", "expected_similarity"),
[
(DistanceType.EUCLIDEAN, 1.5, 0.4),
(DistanceType.COSINE, 0.9974149030430577, 0.9987074515215288),
(DistanceType.DOT_PRODUCT, 17.0, 9.0),
],
)
def test_query_vector_store_result(
mock_bigquery_client,
distance_type: DistanceType,
distance: float,
expected_similarity: float,
):
"""It should return a VectorStoreQueryResult with correct nodes, IDs, and similarities based on distance type."""
# Mock BigQuery returned record
mock_row = MagicMock()
mock_row.node_id = "node1"
mock_row.text = "Lorem Ipsum"
mock_row.embedding = [0.1, 0.2, 0.3]
mock_row.metadata = {
"author": "ceo@company.com",
"_node_content": json.dumps(
{
"id_": "node1",
"embedding": None,
"metadata": {"author": "ceo@company.com"},
"excluded_embed_metadata_keys": [],
"excluded_llm_metadata_keys": [],
"relationships": {},
"metadata_template": "{key}: {value}",
"metadata_separator": "\n",
"text": "",
"mimetype": "text/plain",
"start_char_idx": None,
"end_char_idx": None,
"metadata_seperator": "\n",
"text_template": "{metadata_str}\n\n{content}",
"class_name": "TextNode",
}
),
"_node_type": "TextNode",
"document_id": "None",
"doc_id": "None",
"ref_doc_id": "None",
}
mock_row.distance = distance
# Given a vector store
vector_store = BigQueryVectorStore(
project_id="mock-project",
dataset_id="mock_dataset",
table_id="mock_table",
distance_type=distance_type,
bigquery_client=mock_bigquery_client,
)
vector_store.client.query_and_wait.return_value = [mock_row]
# And a VectorStoreQuery
query = VectorStoreQuery(
similarity_top_k=1,
query_embedding=[1.5, 2.5, 3.5],
)
# When `query` is called
results = vector_store.query(query)
# Then the correct VectorStoreQueryResult should be returned
expected_results = VectorStoreQueryResult(
nodes=[
TextNode(
id_="node1",
embedding=[0.1, 0.2, 0.3],
metadata={"author": "ceo@company.com"},
text="Lorem Ipsum",
)
],
similarities=[expected_similarity],
ids=["node1"],
)
assert results == expected_results
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-bigquery/tests/test_vector_search.py",
"license": "MIT License",
"lines": 197,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-bigquery/tests/test_vector_stores_bigquery.py | from llama_index.core.vector_stores.types import BasePydanticVectorStore
from llama_index.vector_stores.bigquery import BigQueryVectorStore
def test_class():
"""It should inherit from BasePydanticVectorStore"""
names_of_base_classes = [b.__name__ for b in BigQueryVectorStore.__mro__]
assert BasePydanticVectorStore.__name__ in names_of_base_classes
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-bigquery/tests/test_vector_stores_bigquery.py",
"license": "MIT License",
"lines": 6,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-github/llama_index/readers/github/repository/event.py | from typing import Optional
from llama_index.core.instrumentation.events import BaseEvent
from llama_index.core.schema import Document
# GitHub-specific LlamaIndex events
class GitHubRepositoryProcessingStartedEvent(BaseEvent):
"""Event dispatched when GitHub repository processing starts."""
repository_name: str
branch_or_commit: str
@classmethod
def class_name(cls) -> str:
return "GitHubRepositoryProcessingStartedEvent"
class GitHubRepositoryProcessingCompletedEvent(BaseEvent):
"""Event dispatched when GitHub repository processing completes."""
repository_name: str
branch_or_commit: str
total_documents: int = 0
@classmethod
def class_name(cls) -> str:
return "GitHubRepositoryProcessingCompletedEvent"
class GitHubTotalFilesToProcessEvent(BaseEvent):
"""Event dispatched with total number of files to process."""
repository_name: str
branch_or_commit: str
total_files: int
@classmethod
def class_name(cls) -> str:
return "GitHubTotalFilesToProcessEvent"
class GitHubFileProcessingStartedEvent(BaseEvent):
"""Event dispatched when file processing starts."""
file_path: str
file_type: str
@classmethod
def class_name(cls) -> str:
return "GitHubFileProcessingStartedEvent"
class GitHubFileProcessedEvent(BaseEvent):
"""Event dispatched when a file is successfully processed."""
file_path: str
file_type: str
file_size: Optional[int] = None
document: Optional[Document] = None
@classmethod
def class_name(cls) -> str:
return "GitHubFileProcessedEvent"
class GitHubFileSkippedEvent(BaseEvent):
"""Event dispatched when a file is skipped."""
file_path: str
file_type: str
reason: str = ""
@classmethod
def class_name(cls) -> str:
return "GitHubFileSkippedEvent"
class GitHubFileFailedEvent(BaseEvent):
"""Event dispatched when file processing fails."""
file_path: str
file_type: str
error: str = ""
@classmethod
def class_name(cls) -> str:
return "GitHubFileFailedEvent"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-github/llama_index/readers/github/repository/event.py",
"license": "MIT License",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-core/tests/llms/test_mock.py | import pytest
import json
from typing import Optional
from llama_index.core.llms import MockLLM
from llama_index.core.llms.mock import MockFunctionCallingLLM, BlockToContentCallback
from llama_index.core.llms.llm import ToolSelection
from llama_index.core.base.llms.types import (
ChatMessage,
TextBlock,
DocumentBlock,
ImageBlock,
ToolCallBlock,
ContentBlock,
)
@pytest.fixture()
def messages() -> list[ChatMessage]:
return [
ChatMessage(
role="user",
blocks=[
TextBlock(text="hello world"),
DocumentBlock(data=b"hello world"),
ImageBlock(image=b"1px"),
],
)
]
@pytest.fixture()
def tool_calls() -> list[ToolCallBlock]:
return [
ToolCallBlock(
tool_name="divide", tool_kwargs={"x": 6, "y": 2}, tool_call_id="1"
),
ToolCallBlock(
tool_name="divide",
tool_kwargs=json.dumps({"x": 6, "y": 2}),
tool_call_id="2",
),
ToolCallBlock(tool_name="divide", tool_kwargs="{", tool_call_id="3"),
ToolCallBlock(tool_name="hello", tool_kwargs={}, tool_call_id="4"),
ToolCallBlock(
tool_name="divide", tool_kwargs={"x": 1, "y": 0}, tool_call_id="5"
),
]
@pytest.fixture()
def blocks_to_content_callback() -> BlockToContentCallback:
def blocks_to_content(
blocks: list[ContentBlock], tool_calls: Optional[list[ToolCallBlock]] = None
) -> str:
def divide(x: int, y: int) -> int:
return int(x / y)
content = ""
for block in blocks:
if isinstance(block, TextBlock):
content += block.text
elif isinstance(block, ToolCallBlock):
if block.tool_name == "divide":
if isinstance(block.tool_kwargs, dict):
try:
content += f"<toolcall id={block.tool_call_id}>{divide(**block.tool_kwargs)}</toolcall>"
except Exception:
content += (
f"<toolcall id={block.tool_call_id}>error</toolcall>"
)
else:
try:
args = json.loads(block.tool_kwargs)
content += f"<toolcall id={block.tool_call_id}>{divide(**args)}</toolcall>"
except Exception:
content += (
f"<toolcall id={block.tool_call_id}>error</toolcall>"
)
else:
continue
return content
return blocks_to_content
def test_mock_llm_stream_complete_empty_prompt_no_max_tokens() -> None:
"""
Test that MockLLM.stream_complete with an empty prompt and max_tokens=None
does not raise a validation error.
This test case is based on issue #19353.
"""
llm = MockLLM(max_tokens=None)
response_gen = llm.stream_complete("")
# Consume the generator to trigger the potential error
responses = list(response_gen)
# Check that we received a single, empty response
assert len(responses) == 1
assert responses[0].text == ""
assert responses[0].delta == ""
def test_mock_function_calling_llm_init() -> None:
llm = MockFunctionCallingLLM()
assert llm.metadata.is_function_calling_model
def test_mock_function_calling_llm_sync_methods(messages: list[ChatMessage]) -> None:
llm = MockFunctionCallingLLM(max_tokens=200)
result = llm.chat(messages)
assert (
result.message.content
== "hello world<document>hello world</document>"
)
cont = ""
stream = llm.stream_chat(messages)
for s in stream:
cont += s.message.content or ""
assert cont == "hello world<document>hello world</document>"
@pytest.mark.asyncio
async def test_mock_function_calling_llm_async_methods(
messages: list[ChatMessage],
) -> None:
llm = MockFunctionCallingLLM(max_tokens=200)
result = await llm.achat(messages)
assert (
result.message.content
== "hello world<document>hello world</document>"
)
cont = ""
stream = await llm.astream_chat(messages)
async for s in stream:
cont += s.message.content or ""
assert cont == "hello world<document>hello world</document>"
def test_mock_function_calling_llm_tool_calls(
tool_calls: list[ToolCallBlock],
) -> None:
llm = MockFunctionCallingLLM(max_tokens=200)
result = llm.chat(messages=[ChatMessage(role="user", blocks=tool_calls)])
assert result.message.content == "<empty>"
assert llm.tool_calls == tool_calls
def test_mock_function_calling_llm_custom_callback(
tool_calls: list[ToolCallBlock],
blocks_to_content_callback: BlockToContentCallback,
) -> None:
llm = MockFunctionCallingLLM(
max_tokens=200, blocks_to_content_callback=blocks_to_content_callback
)
blocks = [TextBlock(text="hello world"), *tool_calls]
result = llm.chat(messages=[ChatMessage(role="user", blocks=blocks)])
assert (
result.message.content
== "hello world<toolcall id=1>3</toolcall><toolcall id=2>3</toolcall><toolcall id=3>error</toolcall><toolcall id=5>error</toolcall>"
)
@pytest.mark.asyncio
async def test_mock_function_calling_llm_astream_chat_with_tools(
messages: list[ChatMessage],
) -> None:
"""Test that astream_chat_with_tools works correctly."""
llm = MockFunctionCallingLLM(max_tokens=200)
# Mock tools list (can be empty for this test)
tools = []
cont = ""
stream = await llm.astream_chat_with_tools(tools=tools, chat_history=messages)
async for s in stream:
cont += s.message.content or ""
assert cont == "hello world<document>hello world</document>"
def test_mock_function_calling_llm_get_tool_calls_from_response() -> None:
"""Test that get_tool_calls_from_response extracts tool calls correctly."""
llm = MockFunctionCallingLLM(max_tokens=200)
# Create a response with tool calls in additional_kwargs
tool_selection = ToolSelection(
tool_id="test_id",
tool_name="test_tool",
tool_kwargs={"arg1": "value1"},
)
from llama_index.core.base.llms.types import ChatResponse
response = ChatResponse(
message=ChatMessage(
role="assistant",
blocks=[
ToolCallBlock(
tool_call_id="test_id",
tool_name="test_tool",
tool_kwargs={"arg1": "value1"},
)
],
)
)
tool_calls = llm.get_tool_calls_from_response(response)
assert len(tool_calls) == 1
assert tool_calls[0].tool_id == tool_selection.tool_id
assert tool_calls[0].tool_name == tool_selection.tool_name
assert tool_calls[0].tool_kwargs == tool_selection.tool_kwargs
def test_mock_function_calling_llm_get_tool_calls_from_response_empty() -> None:
"""Test that get_tool_calls_from_response returns empty list when no tool calls."""
llm = MockFunctionCallingLLM(max_tokens=200)
from llama_index.core.base.llms.types import ChatResponse
response = ChatResponse(
message=ChatMessage(
role="assistant",
content="test",
additional_kwargs={},
)
)
tool_calls = llm.get_tool_calls_from_response(response)
assert len(tool_calls) == 0
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/llms/test_mock.py",
"license": "MIT License",
"lines": 192,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/voice_agents/llama-index-voice-agents-gemini-live/llama_index/voice_agents/gemini_live/audio_interface.py | import pyaudio
import asyncio
from typing import Any, Optional
from typing_extensions import override
from google.genai.live import AsyncSession
from llama_index.core.voice_agents import BaseVoiceAgentInterface
FORMAT = pyaudio.paInt16
CHANNELS = 1
SEND_SAMPLE_RATE = 16000
RECEIVE_SAMPLE_RATE = 24000
CHUNK_SIZE = 1024
pya = pyaudio.PyAudio()
class GeminiLiveVoiceAgentInterface(BaseVoiceAgentInterface):
def __init__(self) -> None:
self.audio_in_queue: Optional[asyncio.Queue] = None
self.out_queue: Optional[asyncio.Queue] = None
self.session: Optional[AsyncSession] = None
self.audio_stream: Optional[pyaudio.Stream] = None
def _speaker_callback(self, *args: Any, **kwargs: Any) -> Any:
"""
Callback function for the audio output device.
Args:
*args: Can take any positional argument.
**kwargs: Can take any keyword argument.
Returns:
out (Any): This function can return any output.
"""
@override
async def _microphone_callback(self) -> None:
"""
Callback function for the audio input device.
Args:
*args: Can take any positional argument.
**kwargs: Can take any keyword argument.
Returns:
out (Any): This function can return any output.
"""
mic_info = pya.get_default_input_device_info()
self.audio_stream = await asyncio.to_thread(
pya.open,
format=FORMAT,
channels=CHANNELS,
rate=SEND_SAMPLE_RATE,
input=True,
input_device_index=mic_info["index"],
frames_per_buffer=CHUNK_SIZE,
)
if __debug__:
kwargs = {"exception_on_overflow": False}
else:
kwargs = {}
while True:
data = await asyncio.to_thread(self.audio_stream.read, CHUNK_SIZE, **kwargs)
await self.out_queue.put({"data": data, "mime_type": "audio/pcm"})
@override
def start(self, session: AsyncSession) -> None:
"""
Start the interface.
Args:
session (AsyncSession): the session to which the API is bound.
"""
self.session = session
self.audio_in_queue = asyncio.Queue()
self.out_queue = asyncio.Queue(maxsize=5)
def stop(self) -> None:
"""
Stop the interface.
Args:
None
Returns:
out (None): This function does not return anything.
"""
if self.audio_stream:
self.audio_stream.close()
else:
raise ValueError("Audio stream has never been opened, cannot be closed.")
def interrupt(self) -> None:
"""
Interrupt the interface.
Args:
None
Returns:
out (None): This function does not return anything.
"""
self.audio_in_queue.get_nowait()
@override
async def output(self, *args: Any, **kwargs: Any) -> Any:
"""
Process and output the audio.
Args:
*args: Can take any positional argument.
**kwargs: Can take any keyword argument.
Returns:
out (Any): This function can return any output.
"""
stream = await asyncio.to_thread(
pya.open,
format=FORMAT,
channels=CHANNELS,
rate=RECEIVE_SAMPLE_RATE,
output=True,
)
while True:
bytestream = await self.audio_in_queue.get()
await asyncio.to_thread(stream.write, bytestream)
@override
async def receive(self, data: bytes) -> Any:
"""
Receive audio data.
Args:
data (Any): received audio data (generally as bytes or str, but it is kept open also to other types).
*args: Can take any positional argument.
**kwargs: Can take any keyword argument.
Returns:
out (Any): This function can return any output.
"""
self.audio_in_queue.put_nowait(data)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/voice_agents/llama-index-voice-agents-gemini-live/llama_index/voice_agents/gemini_live/audio_interface.py",
"license": "MIT License",
"lines": 117,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
run-llama/llama_index:llama-index-integrations/voice_agents/llama-index-voice-agents-gemini-live/llama_index/voice_agents/gemini_live/base.py | import asyncio
from importlib.metadata import PackageNotFoundError, version
import logging
from typing import Optional, Any, List, Dict, Callable
from typing_extensions import override
from .audio_interface import GeminiLiveVoiceAgentInterface
from .utils import tools_to_gemini_tools, tools_to_functions_dict
from .events import (
TextReceivedEvent,
AudioReceivedEvent,
ToolCallEvent,
ToolCallResultEvent,
)
from google.genai.live import AsyncSession
from google.genai import Client, types
from llama_index.core.llms import ChatMessage, AudioBlock, TextBlock
from llama_index.core.voice_agents import BaseVoiceAgent
from llama_index.core.tools import BaseTool
DEFAULT_MODEL = "models/gemini-2.0-flash-live-001"
logging.basicConfig(
level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s"
)
class GeminiLiveVoiceAgent(BaseVoiceAgent):
"""
Gemini Live Voice Agent.
"""
def __init__(
self,
model: Optional[str] = None,
interface: Optional[GeminiLiveVoiceAgentInterface] = None,
api_key: Optional[str] = None,
tools: Optional[List[BaseTool]] = None,
):
self.model: str = model or DEFAULT_MODEL
self._client: Optional[Client] = None
self.session: Optional[AsyncSession] = None
self._quitflag: bool = False
interface = interface or GeminiLiveVoiceAgentInterface()
super().__init__(api_key=api_key, tools=tools, interface=interface)
if self.tools is not None:
self.gemini_tools: List[Dict[str, List[Dict[str, str]]]] = (
tools_to_gemini_tools(tools)
)
self._functions_dict: Dict[
str, Callable[[Dict[str, Any], str, str], types.FunctionResponse]
] = tools_to_functions_dict(self.tools)
else:
self.gemini_tools = []
self._functions_dict = {}
@property
def client(self) -> Client:
if not self._client:
try:
package_v = version("llama-index-voice-agents-gemini-live")
except PackageNotFoundError:
package_v = "0.0.0"
self._client = Client(
api_key=self.api_key,
http_options={
"api_version": "v1beta",
"headers": {"x-goog-api-client": f"llamaindex/{package_v}"},
},
)
return self._client
def _signal_exit(self):
logging.info("Preparing exit...")
self._quitflag = True
@override
async def _start(self, session: AsyncSession) -> None:
"""
Start the voice agent.
"""
self.interface.start(session=session)
async def _run_loop(self) -> None:
logging.info("The agent is ready for the conversation")
logging.info("Type q and press enter to stop the conversation at any time")
while not self._quitflag:
text = await asyncio.to_thread(
input,
"",
)
if text == "q":
self._signal_exit()
await self.session.send(input=text or ".", end_of_turn=True)
logging.info("Session has been successfully closed")
await self.interrupt()
await self.stop()
async def send(self) -> None:
"""
Send audio to the websocket underlying the voice agent.
"""
while True:
msg = await self.interface.out_queue.get()
await self.session.send(input=msg)
@override
async def handle_message(self) -> Any:
"""
Handle incoming message.
Args:
message (Any): incoming message (should be dict, but it is kept open also for other types).
*args: Can take any positional argument.
**kwargs: Can take any keyword argument.
Returns:
out (Any): This function can return any output.
"""
while True:
turn = self.session.receive()
async for response in turn:
if response.server_content:
if data := response.data:
await self.interface.receive(data=data)
self._messages.append(
ChatMessage(
role="assistant", blocks=[AudioBlock(audio=data)]
)
)
self._events.append(
AudioReceivedEvent(type_t="audio_received", data=data)
)
continue
if text := response.text:
self._messages.append(
ChatMessage(role="assistant", blocks=[TextBlock(text=text)])
)
self._events.append(
TextReceivedEvent(type_t="text_received", text=text)
)
elif tool_call := response.tool_call:
function_responses: List[types.FunctionResponse] = []
for fn_call in tool_call.function_calls:
self._events.append(
ToolCallEvent(
type_t="tool_call",
tool_name=fn_call.name,
tool_args=fn_call.args,
)
)
result = self._functions_dict[fn_call.name](
fn_call.args, fn_call.id, fn_call.name
)
self._events.append(
ToolCallResultEvent(
type_t="tool_call_result",
tool_name=result.name,
tool_result=result.response,
)
)
function_responses.append(result)
await self.session.send_tool_response(
function_responses=function_responses
)
while not self.interface.audio_in_queue.empty():
await self.interrupt()
async def start(self):
try:
async with (
self.client.aio.live.connect(
model=self.model,
config={
"response_modalities": ["AUDIO"],
"tools": self.gemini_tools,
},
) as session,
asyncio.TaskGroup() as tg,
):
self.session = session
await self._start(session=session)
_run_loop = tg.create_task(self._run_loop())
tg.create_task(self.send())
tg.create_task(self.interface._microphone_callback())
tg.create_task(self.handle_message())
tg.create_task(self.interface.output())
await _run_loop
raise asyncio.CancelledError("User requested exit")
except asyncio.CancelledError:
pass
except ExceptionGroup as EG:
await self.stop()
async def interrupt(self) -> None:
"""
Interrupt the input/output audio flow.
Args:
None
Returns:
out (None): This function does not return anything.
"""
self.interface.interrupt()
async def stop(self) -> None:
"""
Stop the conversation with the voice agent.
Args:
None
Returns:
out (None): This function does not return anything.
"""
self.interface.stop()
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/voice_agents/llama-index-voice-agents-gemini-live/llama_index/voice_agents/gemini_live/base.py",
"license": "MIT License",
"lines": 196,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/voice_agents/llama-index-voice-agents-gemini-live/llama_index/voice_agents/gemini_live/events.py | from llama_index.core.voice_agents import BaseVoiceAgentEvent
from typing import Dict, Any
class AudioSentEvent(BaseVoiceAgentEvent):
data: bytes
class AudioReceivedEvent(BaseVoiceAgentEvent):
data: bytes
class TextSentEvent(BaseVoiceAgentEvent):
text: str
class TextReceivedEvent(BaseVoiceAgentEvent):
text: str
class ToolCallEvent(BaseVoiceAgentEvent):
tool_name: str
tool_args: Dict[str, Any]
class ToolCallResultEvent(BaseVoiceAgentEvent):
tool_name: str
tool_result: Any
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/voice_agents/llama-index-voice-agents-gemini-live/llama_index/voice_agents/gemini_live/events.py",
"license": "MIT License",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/voice_agents/llama-index-voice-agents-gemini-live/llama_index/voice_agents/gemini_live/utils.py | from llama_index.core.tools import BaseTool
from typing import Dict, List, Callable, Any
from google.genai import types
def tool_to_fn(
tool: BaseTool,
) -> Callable[[Dict[str, Any], str, str], types.FunctionResponse]:
def fn(args: Dict[str, Any], id_: str, name: str) -> types.FunctionResponse:
return types.FunctionResponse(
id=id_, name=name, response={"result": tool(**args).raw_output}
)
return fn
def tools_to_gemini_tools(
tools: List[BaseTool],
) -> List[Dict[str, List[Dict[str, str]]]]:
d = {"function_declarations": []}
for tool in tools:
d["function_declarations"].append(
{
"name": tool.metadata.get_name(),
"description": tool.metadata.description,
"parameters": tool.metadata.get_parameters_dict(),
}
)
return [d]
def tools_to_functions_dict(
tools: List[BaseTool],
) -> Dict[str, Callable[[Dict[str, Any], str, str], types.FunctionResponse]]:
tools_dict = {}
for tool in tools:
tools_dict.update({tool.metadata.get_name(): tool_to_fn(tool)})
return tools_dict
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/voice_agents/llama-index-voice-agents-gemini-live/llama_index/voice_agents/gemini_live/utils.py",
"license": "MIT License",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/voice_agents/llama-index-voice-agents-gemini-live/tests/test_audio_interface.py | import pytest
import asyncio
from llama_index.core.voice_agents.interface import BaseVoiceAgentInterface
from llama_index.voice_agents.gemini_live.audio_interface import (
GeminiLiveVoiceAgentInterface,
)
from google.genai.live import AsyncSession
from typing_extensions import override
class MockSession(AsyncSession):
@override
def __init__(self):
pass
@pytest.mark.asyncio
async def test_audio_interface():
interface = GeminiLiveVoiceAgentInterface()
assert isinstance(interface, BaseVoiceAgentInterface)
assert interface.audio_in_queue is None
assert interface.out_queue is None
assert interface.session is None
assert interface.audio_stream is None
interface.start(session=MockSession())
assert isinstance(interface.session, MockSession)
assert isinstance(interface.out_queue, asyncio.Queue)
assert isinstance(interface.audio_in_queue, asyncio.Queue)
await interface.receive(data=b"hello world")
assert await interface.audio_in_queue.get() == b"hello world"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/voice_agents/llama-index-voice-agents-gemini-live/tests/test_audio_interface.py",
"license": "MIT License",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/voice_agents/llama-index-voice-agents-gemini-live/tests/test_utils.py | import pytest
from llama_index.voice_agents.gemini_live.utils import (
tools_to_gemini_tools,
tool_to_fn,
tools_to_functions_dict,
)
from llama_index.core.tools import FunctionTool
from typing import List, Dict, Any
from google.genai import types
def get_weather(location: str) -> str:
"""Get the weather."""
return "The weather at " + location + " is sunny"
def add(i: int, j: int) -> int:
"""Add two numbers."""
return i + j
@pytest.fixture()
def tools() -> List[FunctionTool]:
return [
FunctionTool.from_defaults(
name="get_weather",
description="Get the weather.",
fn=get_weather,
),
FunctionTool.from_defaults(
name="add",
description="Add two numbers.",
fn=add,
),
]
@pytest.fixture()
def function_declarations(
tools: List[FunctionTool],
) -> List[Dict[str, List[Dict[str, Any]]]]:
return [
{
"function_declarations": [
{
"name": tool.metadata.get_name(),
"description": tool.metadata.description,
"parameters": tool.metadata.get_parameters_dict(),
}
for tool in tools
]
}
]
def test_tools_to_gemini_tools(
tools: List[FunctionTool],
function_declarations: List[Dict[str, List[Dict[str, Any]]]],
):
assert tools_to_gemini_tools(tools) == function_declarations
def test_tool_to_fn(tools: List[FunctionTool]):
t0 = tool_to_fn(tools[0])
assert callable(tool_to_fn(tools[0]))
fr0 = t0({"location": "Frankfurt"}, "fn-001", "get_weather")
assert isinstance(fr0, types.FunctionResponse)
assert fr0.response == {"result": "The weather at Frankfurt is sunny"}
assert fr0.id == "fn-001"
assert fr0.name == "get_weather"
def test_tools_to_fn_dict(tools: List[FunctionTool]):
td = tools_to_functions_dict(tools)
assert len(td) == 2
assert "get_weather" in td
assert callable(td["get_weather"])
assert "add" in td
assert callable(td["add"])
assert td["add"]({"i": 2, "j": 3}, "fn-002", "add") == types.FunctionResponse(
id="fn-002", name="add", response={"result": 5}
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/voice_agents/llama-index-voice-agents-gemini-live/tests/test_utils.py",
"license": "MIT License",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-jira-issue/llama_index/tools/jira_issue/base.py | """Jira tool spec."""
import os
from typing import Optional, Dict, Any, Literal
from jira import JIRA
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class JiraIssueToolSpec(BaseToolSpec):
"""Atlassian Jira Issue Tool Spec."""
spec_functions = [
"search_issues",
"create_issue",
"add_comment_to_issue",
"update_issue_summary",
"update_issue_assignee",
"update_issue_status",
"update_issue_due_date",
"delete_issue",
]
def __init__(
self,
email: str = os.environ.get("JIRA_ACCOUNT_EMAIL", ""),
api_key: Optional[str] = os.environ.get("JIRA_API_KEY", ""),
server_url: Optional[str] = os.environ.get("JIRA_SERVER_URL", ""),
) -> None:
if email and api_key and server_url:
self.jira = JIRA(
basic_auth=(email, api_key),
server=server_url,
)
else:
raise Exception("Please provide Jira credentials to continue.")
def search_issues(self, jql_str: str) -> Dict[str, Any]:
"""
Search for JIRA issues using JQL.
Args:
jql_str (str): JQL query string to search for issues.
Returns:
Dict[str, Any]: A dictionary containing the search results or error message.
"""
try:
issues = self.jira.search_issues(jql_str)
if issues:
return {
"error": False,
"message": "Issues found",
"issues": [
{
"key": issue.key,
"summary": issue.fields.summary,
"status": issue.fields.status.name,
"assignee": issue.fields.assignee.displayName
if issue.fields.assignee
else None,
}
for issue in issues
],
}
else:
return {
"error": True,
"message": "No issues found.",
}
except Exception as e:
return {
"error": True,
"message": f"Failed to search issues: {e!s}",
}
def create_issue(
self,
project_key: str = "KAN",
summary: str = "New Issue",
description: Optional[str] = None,
issue_type: Literal["Task", "Bug", "Epic"] = "Task",
) -> Dict[str, Any]:
"""
Create a new JIRA issue.
Args:
project_key (str): The key of the project to create the issue in (default is "KAN").
summary (str): The summary of the new issue (default is "New Issue").
description (Optional[str]): The description of the new issue.
issue_type (str): The type of the issue to create, can be "Task", "Bug", or "Epic" (default is "Task").
Returns:
Dict[str, Any]: A dictionary indicating success or failure of the operation.
"""
try:
new_issue = self.jira.create_issue(
project=project_key,
summary=summary,
description=description,
issuetype={"name": issue_type},
)
return {
"error": False,
"message": f"Issue {new_issue.key} created successfully.",
"issue_key": new_issue.key,
}
except Exception as e:
return {
"error": True,
"message": f"Failed to create new issue: {e!s}",
}
def add_comment_to_issue(self, issue_key: str, comment: str) -> Dict[str, Any]:
"""
Add a comment to a JIRA issue.
Args:
issue_key (str): The key of the JIRA issue to comment on.
comment (str): The comment text to add.
Returns:
Dict[str, Any]: A dictionary indicating success or failure of the operation.
"""
try:
issue = self.jira.issue(issue_key)
self.jira.add_comment(issue, comment)
return {"error": False, "message": f"Comment added to issue {issue_key}."}
except Exception as e:
return {
"error": True,
"message": f"Failed to add comment to issue {issue_key}: {e!s}",
}
def update_issue_summary(
self, issue_key: str, new_summary: str, notify: bool = False
) -> Dict[str, Any]:
"""
Update the summary of a JIRA issue.
Args:
issue_key (str): The key of the JIRA issue to update.
new_summary (str): The new summary text for the issue.
notify (bool): Whether to email watchers of the issue about the update.
Returns:
Dict[str, Any]: A dictionary indicating success or failure of the operation.
"""
try:
issue = self.jira.issue(issue_key)
issue.update(summary=new_summary, notify=notify)
return {"error": False, "message": f"Issue {issue_key} summary updated."}
except Exception as e:
return {
"error": True,
"message": f"Failed to update issue {issue_key}: {e!s}",
}
def update_issue_assignee(self, issue_key, assignee_full_name):
"""
Update the assignee of the Jira issue using the assignee's full name.
Args:
issue_key (str): The key of the Jira issue to update.
assignee_full_name (str): The full name of the user to assign the issue to.
Returns:
Dict[str, Any]: A dictionary indicating success or failure of the operation.
"""
try:
# Search for users by display name
users = self.jira.search_users(query=assignee_full_name)
# Find exact match for the full name
target_user = None
for user in users:
if user.displayName.lower() == assignee_full_name.lower():
target_user = user
break
if not target_user:
return {
"error": True,
"message": f"User with full name '{assignee_full_name}' not found",
}
# Get the issue
issue = self.jira.issue(issue_key)
issue.update(assignee={"accountId": target_user.accountId})
return {
"error": False,
"message": f"Issue {issue_key} successfully assigned to {assignee_full_name}",
}
except Exception as e:
return {
"error": True,
"message": f"An error occurred while updating the assignee: {e!s}",
}
def update_issue_status(
self, issue_key: str, new_status: Literal["To Do", "In Progress", "Done"]
) -> Dict[str, Any]:
"""
Update the status of a JIRA issue.
Args:
issue_key (str): The key of the JIRA issue to update.
new_status (str): The new status to set for the issue.
Returns:
Dict[str, Any]: A dictionary indicating success or failure of the operation.
"""
try:
issue = self.jira.issue(issue_key)
transitions = self.jira.transitions(issue)
transition_id = next(
(t["id"] for t in transitions if t["name"] == new_status), None
)
if transition_id:
self.jira.transition_issue(issue, transition_id)
return {
"error": False,
"message": f"Issue {issue_key} status updated to {new_status}.",
}
else:
available_statuses = [t["name"] for t in transitions]
return {
"error": True,
"message": f"Status '{new_status}' not available for issue {issue_key}. Available transitions: {available_statuses}",
}
except Exception as e:
return {
"error": True,
"message": f"Failed to update status for issue {issue_key}: {e!s}",
}
def update_issue_due_date(
self, issue_key: str, due_date: Optional[str] = None
) -> Dict[str, Any]:
"""
Update the due date of a JIRA issue.
Args:
issue_key (str): The key of the JIRA issue to update.
due_date (Optional[str]): The new due date in 'YYYY-MM-DD' format.
Returns:
Dict[str, Any]: A dictionary indicating success or failure of the operation.
"""
if due_date:
try:
from datetime import datetime
datetime.strptime(due_date, "%Y-%m-%d")
except ValueError:
return {
"error": True,
"message": "Invalid date format. Use YYYY-MM-DD.",
}
try:
issue = self.jira.issue(issue_key)
issue.update(duedate=due_date)
return {
"error": False,
"message": f"Issue {issue_key} due date {'updated' if due_date else 'cleared'}.",
}
except Exception as e:
return {
"error": True,
"message": f"Failed to update due date for issue {issue_key}: {e!s}",
}
def delete_issue(self, issue_key: str) -> Dict[str, Any]:
"""
Delete a JIRA issue.
Args:
issue_key (str): The key of the JIRA issue to delete.
Returns:
Dict[str, Any]: A dictionary indicating success or failure of the operation.
"""
try:
issue = self.jira.issue(issue_key)
issue.delete()
return {
"error": False,
"message": f"Issue {issue_key} deleted successfully.",
}
except Exception as e:
return {
"error": True,
"message": f"Failed to delete issue {issue_key}: {e!s}",
}
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-jira-issue/llama_index/tools/jira_issue/base.py",
"license": "MIT License",
"lines": 260,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.