sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-seltz/llama_index/tools/seltz/base.py | """Seltz tool spec."""
from typing import List, Optional
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from seltz import Includes, Seltz
class SeltzToolSpec(BaseToolSpec):
"""
Seltz web knowledge tool spec.
Seltz provides fast, up-to-date web data with context-engineered
web content and sources for real-time AI reasoning.
"""
spec_functions = ["search"]
def __init__(self, api_key: str) -> None:
"""
Initialize with parameters.
Args:
api_key: Seltz API key. Obtain one at https://www.seltz.ai/
"""
self.client = Seltz(api_key=api_key)
def search(self, query: str, max_documents: Optional[int] = 10) -> List[Document]:
"""
Search the web using Seltz and return relevant documents with sources.
Args:
query: The search query text.
max_documents: Maximum number of documents to return (default: 10).
Returns:
A list of Document objects containing web content and source URLs.
"""
includes = Includes(max_documents=max_documents) if max_documents else None
response = self.client.search(query, includes=includes)
return [
Document(text=doc.content or "", metadata={"url": doc.url or ""})
for doc in response.documents
]
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-seltz/llama_index/tools/seltz/base.py",
"license": "MIT License",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-seltz/tests/test_tools_seltz.py | import os
from unittest.mock import Mock, patch
import pytest
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.tools.seltz import SeltzToolSpec
def test_class():
names_of_base_classes = [b.__name__ for b in SeltzToolSpec.__mro__]
assert BaseToolSpec.__name__ in names_of_base_classes
def test_spec_functions():
assert "search" in SeltzToolSpec.spec_functions
@patch("llama_index.tools.seltz.base.Seltz")
def test_init(mock_seltz):
tool = SeltzToolSpec(api_key="test-key")
mock_seltz.assert_called_once_with(api_key="test-key")
assert tool.client == mock_seltz.return_value
@patch("llama_index.tools.seltz.base.Includes")
@patch("llama_index.tools.seltz.base.Seltz")
def test_search(mock_seltz, mock_includes_class):
mock_doc1 = Mock()
mock_doc1.content = "Result content 1"
mock_doc1.url = "https://example1.com"
mock_doc2 = Mock()
mock_doc2.content = "Result content 2"
mock_doc2.url = "https://example2.com"
mock_response = Mock()
mock_response.documents = [mock_doc1, mock_doc2]
mock_client = Mock()
mock_client.search.return_value = mock_response
mock_seltz.return_value = mock_client
mock_includes = Mock()
mock_includes_class.return_value = mock_includes
tool = SeltzToolSpec(api_key="test-key")
results = tool.search("test query", max_documents=5)
mock_includes_class.assert_called_once_with(max_documents=5)
mock_client.search.assert_called_once_with("test query", includes=mock_includes)
assert len(results) == 2
assert all(isinstance(doc, Document) for doc in results)
assert results[0].text == "Result content 1"
assert results[0].metadata["url"] == "https://example1.com"
assert results[1].text == "Result content 2"
assert results[1].metadata["url"] == "https://example2.com"
@patch("llama_index.tools.seltz.base.Includes")
@patch("llama_index.tools.seltz.base.Seltz")
def test_search_default_max_documents(mock_seltz, mock_includes_class):
mock_response = Mock()
mock_response.documents = []
mock_client = Mock()
mock_client.search.return_value = mock_response
mock_seltz.return_value = mock_client
mock_includes = Mock()
mock_includes_class.return_value = mock_includes
tool = SeltzToolSpec(api_key="test-key")
tool.search("test query")
mock_includes_class.assert_called_once_with(max_documents=10)
mock_client.search.assert_called_once_with("test query", includes=mock_includes)
@patch("llama_index.tools.seltz.base.Seltz")
def test_search_empty_results(mock_seltz):
mock_response = Mock()
mock_response.documents = []
mock_client = Mock()
mock_client.search.return_value = mock_response
mock_seltz.return_value = mock_client
tool = SeltzToolSpec(api_key="test-key")
results = tool.search("no results query")
assert results == []
# -- Integration tests --
# These tests hit the real Seltz API and require a valid SELTZ_API_KEY
# environment variable. They are skipped by default in CI.
@pytest.mark.skipif(
not os.environ.get("SELTZ_API_KEY"),
reason="SELTZ_API_KEY not set",
)
def test_integration_search():
"""Integration test: perform a real search against the Seltz API."""
tool = SeltzToolSpec(api_key=os.environ["SELTZ_API_KEY"])
results = tool.search("what is llama index?", max_documents=3)
assert isinstance(results, list)
assert len(results) > 0
assert all(isinstance(doc, Document) for doc in results)
assert all(doc.text for doc in results)
assert all(doc.metadata.get("url") for doc in results)
@pytest.mark.skipif(
not os.environ.get("SELTZ_API_KEY"),
reason="SELTZ_API_KEY not set",
)
def test_integration_search_returns_documents():
"""Integration test: verify search returns well-formed Document objects."""
tool = SeltzToolSpec(api_key=os.environ["SELTZ_API_KEY"])
results = tool.search("artificial intelligence", max_documents=5)
assert isinstance(results, list)
assert len(results) > 0
for doc in results:
assert isinstance(doc, Document)
assert doc.text
assert doc.metadata.get("url")
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-seltz/tests/test_tools_seltz.py",
"license": "MIT License",
"lines": 96,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-google-genai/tests/test_base_cleanup.py | from types import SimpleNamespace
import pytest
from llama_index.llms.google_genai import base as base_mod
from llama_index.llms.google_genai.base import GoogleGenAI
class FakeChat:
def __init__(self, send_message_exc=None, stream_iter=None):
self._send_message_exc = send_message_exc
self._stream_iter = stream_iter
def send_message(self, *_args, **_kwargs):
if self._send_message_exc:
raise self._send_message_exc
return SimpleNamespace()
def send_message_stream(self, *_args, **_kwargs):
return self._stream_iter
class FakeAioChat:
def __init__(self, send_message_exc=None, stream_aiter=None):
self._send_message_exc = send_message_exc
self._stream_aiter = stream_aiter
async def send_message(self, *_args, **_kwargs):
if self._send_message_exc:
raise self._send_message_exc
return SimpleNamespace()
async def send_message_stream(self, *_args, **_kwargs):
return self._stream_aiter
class FakeClient:
def __init__(self, chat: FakeChat, aio_chat: FakeAioChat):
self.chats = SimpleNamespace(create=lambda **_kwargs: chat)
self.aio = SimpleNamespace(
chats=SimpleNamespace(create=lambda **_kwargs: aio_chat)
)
def _make_llm(file_mode="fileapi"):
llm = GoogleGenAI.model_construct()
object.__setattr__(llm, "model", "gemini-2.0-flash")
object.__setattr__(llm, "file_mode", file_mode)
object.__setattr__(llm, "max_retries", 0)
object.__setattr__(llm, "_generation_config", {})
return llm
def test_chat_bubbles_up_cleanup_error_if_delete_fails(monkeypatch):
"""
Test that if cleanup fails, the cleanup exception (RuntimeError) is raised.
Note: In Python, if an exception occurs in 'finally', it supersedes
any exception that occurred in 'try'.
"""
async def fake_prepare_chat_params(*_args, **_kwargs):
return "hello", {}, ["file1"]
monkeypatch.setattr(base_mod, "prepare_chat_params", fake_prepare_chat_params)
# 1. Force the model call to fail (ValueError)
chat = FakeChat(send_message_exc=ValueError("boom"))
aio_chat = FakeAioChat()
llm = _make_llm(file_mode="fileapi")
llm._client = FakeClient(chat=chat, aio_chat=aio_chat)
# 2. Force delete to fail (RuntimeError)
def fake_delete_uploaded_files(_names, _client):
raise RuntimeError("delete failed")
monkeypatch.setattr(base_mod, "delete_uploaded_files", fake_delete_uploaded_files)
# 3. We expect the RuntimeError (cleanup failure) to be the one raised/visible
with pytest.raises(RuntimeError, match="delete failed"):
llm._chat([])
@pytest.mark.asyncio
async def test_achat_bubbles_up_cleanup_error_if_delete_fails(monkeypatch):
async def fake_prepare_chat_params(*_args, **_kwargs):
return "hello", {}, ["file1"]
monkeypatch.setattr(base_mod, "prepare_chat_params", fake_prepare_chat_params)
chat = FakeChat()
aio_chat = FakeAioChat(send_message_exc=ValueError("boom"))
llm = _make_llm(file_mode="fileapi")
llm._client = FakeClient(chat=chat, aio_chat=aio_chat)
async def fake_adelete_uploaded_files(_names, _client):
raise RuntimeError("delete failed")
monkeypatch.setattr(base_mod, "adelete_uploaded_files", fake_adelete_uploaded_files)
with pytest.raises(RuntimeError, match="delete failed"):
await llm._achat([])
def test_stream_chat_runs_cleanup(monkeypatch):
async def fake_prepare_chat_params(*_args, **_kwargs):
return "hello", {}, ["file1"]
monkeypatch.setattr(base_mod, "prepare_chat_params", fake_prepare_chat_params)
class Chunk:
def __init__(self):
part = SimpleNamespace(text="hi")
content = SimpleNamespace(parts=[part])
cand = SimpleNamespace(content=content)
self.candidates = [cand]
stream_iter = iter([Chunk()])
chat = FakeChat(stream_iter=stream_iter)
aio_chat = FakeAioChat()
llm = _make_llm(file_mode="fileapi")
llm._client = FakeClient(chat=chat, aio_chat=aio_chat)
monkeypatch.setattr(
base_mod,
"chat_from_gemini_response",
lambda *_args, **_kwargs: SimpleNamespace(delta=None),
)
deleted = {"called": False}
def fake_delete_uploaded_files(names, _client):
assert names == ["file1"]
deleted["called"] = True
monkeypatch.setattr(base_mod, "delete_uploaded_files", fake_delete_uploaded_files)
gen = llm._stream_chat([])
_ = next(gen)
with pytest.raises(StopIteration):
next(gen)
assert deleted["called"] is True
@pytest.mark.asyncio
async def test_astream_chat_runs_cleanup(monkeypatch):
async def fake_prepare_chat_params(*_args, **_kwargs):
return "hello", {}, ["file1"]
monkeypatch.setattr(base_mod, "prepare_chat_params", fake_prepare_chat_params)
class Chunk:
def __init__(self):
part = SimpleNamespace(text="hi")
content = SimpleNamespace(parts=[part])
cand = SimpleNamespace(content=content)
self.candidates = [cand]
async def stream_aiter():
yield Chunk()
chat = FakeChat()
aio_chat = FakeAioChat(stream_aiter=stream_aiter())
llm = _make_llm(file_mode="fileapi")
llm._client = FakeClient(chat=chat, aio_chat=aio_chat)
monkeypatch.setattr(
base_mod,
"chat_from_gemini_response",
lambda *_args, **_kwargs: SimpleNamespace(delta=None),
)
deleted = {"called": False}
async def fake_adelete_uploaded_files(names, _client):
assert names == ["file1"]
deleted["called"] = True
monkeypatch.setattr(base_mod, "adelete_uploaded_files", fake_adelete_uploaded_files)
agen = await llm._astream_chat([])
item = await agen.__anext__()
assert item is not None
with pytest.raises(StopAsyncIteration):
await agen.__anext__()
assert deleted["called"] is True
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-google-genai/tests/test_base_cleanup.py",
"license": "MIT License",
"lines": 135,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-microsoft-sharepoint/llama_index/readers/microsoft_sharepoint/event.py | """SharePoint Reader Events and Types for LlamaIndex."""
from enum import Enum
from typing import Any, Optional
from llama_index.core.instrumentation.events import BaseEvent
class FileType(Enum):
"""Enum for file types supported by custom parsers."""
IMAGE = "image"
DOCUMENT = "document"
TEXT = "text"
HTML = "html"
CSV = "csv"
MARKDOWN = "md"
SPREADSHEET = "spreadsheet"
PRESENTATION = "presentation"
PDF = "pdf"
JSON = "json"
TXT = "txt"
UNKNOWN = "unknown"
class TotalPagesToProcessEvent(BaseEvent):
"""Event emitted when the total number of pages to process is known."""
total_pages: int
@classmethod
def class_name(cls) -> str:
return "TotalPagesToProcessEvent"
class PageDataFetchStartedEvent(BaseEvent):
"""Event emitted when fetching data for a page starts."""
page_id: str
@classmethod
def class_name(cls) -> str:
return "PageDataFetchStartedEvent"
class PageDataFetchCompletedEvent(BaseEvent):
"""Event emitted when fetching data for a page completes."""
page_id: str
document: Optional[Any] = None
@classmethod
def class_name(cls) -> str:
return "PageDataFetchCompletedEvent"
class PageSkippedEvent(BaseEvent):
"""Event emitted when a page is skipped (e.g., filtered out by callback)."""
page_id: str
@classmethod
def class_name(cls) -> str:
return "PageSkippedEvent"
class PageFailedEvent(BaseEvent):
"""Event emitted when processing a page fails."""
page_id: str
error: str
@classmethod
def class_name(cls) -> str:
return "PageFailedEvent"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-microsoft-sharepoint/llama_index/readers/microsoft_sharepoint/event.py",
"license": "MIT License",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-yugabytedb/llama_index/vector_stores/yugabytedb/base.py | import logging
import re
from typing import (
Any,
Dict,
List,
NamedTuple,
Optional,
Type,
Union,
TYPE_CHECKING,
Set,
Tuple,
Literal,
)
import pgvector # noqa
from pgvector.sqlalchemy import HALFVEC
import psycopg2 # noqa
import sqlalchemy
import sqlalchemy.ext.asyncio
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.schema import BaseNode, MetadataMode, TextNode
from llama_index.core.vector_stores.types import (
BasePydanticVectorStore,
FilterOperator,
MetadataFilters,
MetadataFilter,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
from llama_index.core.vector_stores.utils import (
metadata_dict_to_node,
node_to_metadata_dict,
)
if TYPE_CHECKING:
from sqlalchemy.sql.selectable import Select
PGType = Literal[
"text",
"int",
"integer",
"numeric",
"float",
"double precision",
"boolean",
"date",
"timestamp",
"uuid",
]
class DBEmbeddingRow(NamedTuple):
node_id: str
text: str
metadata: dict
similarity: float
_logger = logging.getLogger(__name__)
def get_data_model(
base: Type,
index_name: str,
schema_name: str,
hybrid_search: bool,
text_search_config: str,
cache_okay: bool,
embed_dim: int = 1536,
use_jsonb: bool = False,
use_halfvec: bool = False,
indexed_metadata_keys: Optional[Set[Tuple[str, PGType]]] = None,
) -> Any:
"""
This part create a dynamic sqlalchemy model with a new table.
"""
from pgvector.sqlalchemy import Vector
from sqlalchemy import Column, Computed
from sqlalchemy.dialects.postgresql import (
BIGINT,
JSON,
JSONB,
TSVECTOR,
VARCHAR,
UUID,
DOUBLE_PRECISION,
)
from sqlalchemy import cast, column
from sqlalchemy import String, Integer, Numeric, Float, Boolean, Date, DateTime
from sqlalchemy.schema import Index
from sqlalchemy.types import TypeDecorator
pg_type_map = {
"text": String,
"int": Integer,
"integer": Integer,
"numeric": Numeric,
"float": Float,
"double precision": DOUBLE_PRECISION, # or Float(precision=53)
"boolean": Boolean,
"date": Date,
"timestamp": DateTime,
"uuid": UUID,
}
indexed_metadata_keys = indexed_metadata_keys or set()
# check that types are in pg_type_map
for key, pg_type in indexed_metadata_keys:
if pg_type not in pg_type_map:
raise ValueError(
f"Invalid type {pg_type} for key {key}. "
f"Must be one of {list(pg_type_map.keys())}"
)
class TSVector(TypeDecorator):
impl = TSVECTOR
cache_ok = cache_okay
tablename = "data_%s" % index_name # dynamic table name
class_name = "Data%s" % index_name # dynamic class name
indexname = "%s_idx" % index_name # dynamic class name
metadata_dtype = JSONB if use_jsonb else JSON
if use_halfvec:
embedding_col = Column(HALFVEC(embed_dim)) # type: ignore
else:
embedding_col = Column(Vector(embed_dim)) # type: ignore
metadata_indices = [
Index(
f"{indexname}_{key}_{pg_type.replace(' ', '_')}",
cast(column("metadata_").op("->>")(key), pg_type_map[pg_type]),
postgresql_using="btree",
)
for key, pg_type in indexed_metadata_keys
]
if hybrid_search:
class HybridAbstractData(base): # type: ignore
__abstract__ = True # this line is necessary
id = Column(BIGINT, primary_key=True, autoincrement=True)
text = Column(VARCHAR, nullable=False)
metadata_ = Column(metadata_dtype)
node_id = Column(VARCHAR)
embedding = embedding_col
text_search_tsv = Column( # type: ignore
TSVector(),
Computed(
"to_tsvector('%s', text)" % text_search_config, persisted=True
),
)
model = type(
class_name,
(HybridAbstractData,),
{
"__tablename__": tablename,
"__table_args__": (*metadata_indices, {"schema": schema_name}),
},
)
Index(
indexname,
model.text_search_tsv, # type: ignore
postgresql_using="gin",
)
Index(
f"{indexname}_1",
model.metadata_["ref_doc_id"].astext, # type: ignore
postgresql_using="btree",
)
else:
class AbstractData(base): # type: ignore
__abstract__ = True # this line is necessary
id = Column(BIGINT, primary_key=True, autoincrement=True)
text = Column(VARCHAR, nullable=False)
metadata_ = Column(metadata_dtype)
node_id = Column(VARCHAR)
embedding = embedding_col
model = type(
class_name,
(AbstractData,),
{
"__tablename__": tablename,
"__table_args__": (*metadata_indices, {"schema": schema_name}),
},
)
Index(
f"{indexname}_1",
model.metadata_["ref_doc_id"].astext, # type: ignore
postgresql_using="btree",
)
return model
class YBVectorStore(BasePydanticVectorStore):
"""
Yugabytedb Vector Store.
Examples:
`pip install llama-index-vector-stores-yugabytedb`
```python
from llama_index.vector_stores.yugabytedb import YBVectorStore
# Create YBVectorStore instance
vector_store = YBVectorStore.from_params(
database="vector_db",
host="localhost",
password="password",
port=5432,
user="yugabytedb",
table_name="paul_graham_essay",
embed_dim=1536 # openai embedding dimension
use_halfvec=True # Enable half precision
)
```
"""
stores_text: bool = True
flat_metadata: bool = False
connection_string: str
table_name: str
schema_name: str
embed_dim: int
hybrid_search: bool
text_search_config: str
cache_ok: bool
perform_setup: bool
debug: bool
use_jsonb: bool
create_engine_kwargs: Dict
initialization_fail_on_error: bool = False
indexed_metadata_keys: Optional[Set[Tuple[str, PGType]]] = None
hnsw_kwargs: Optional[Dict[str, Any]]
use_halfvec: bool = False
_base: Any = PrivateAttr()
_table_class: Any = PrivateAttr()
_engine: Optional[sqlalchemy.engine.Engine] = PrivateAttr(default=None)
_session: sqlalchemy.orm.Session = PrivateAttr()
_is_initialized: bool = PrivateAttr(default=False)
def __init__(
self,
connection_string: Optional[Union[str, sqlalchemy.engine.URL]] = None,
table_name: Optional[str] = None,
schema_name: Optional[str] = None,
hybrid_search: bool = False,
text_search_config: str = "english",
embed_dim: int = 1536,
cache_ok: bool = False,
perform_setup: bool = True,
debug: bool = False,
use_jsonb: bool = False,
hnsw_kwargs: Optional[Dict[str, Any]] = None,
create_engine_kwargs: Optional[Dict[str, Any]] = None,
initialization_fail_on_error: bool = False,
use_halfvec: bool = False,
engine: Optional[sqlalchemy.engine.Engine] = None,
indexed_metadata_keys: Optional[Set[Tuple[str, PGType]]] = None,
) -> None:
"""
Constructor.
Args:
connection_string (Union[str, sqlalchemy.engine.URL]): Connection string to yugabytedb db.
table_name (str): Table name.
schema_name (str): Schema name.
hybrid_search (bool, optional): Enable hybrid search. Defaults to False.
text_search_config (str, optional): Text search configuration. Defaults to "english".
embed_dim (int, optional): Embedding dimensions. Defaults to 1536.
cache_ok (bool, optional): Enable cache. Defaults to False.
perform_setup (bool, optional): If db should be set up. Defaults to True.
debug (bool, optional): Debug mode. Defaults to False.
use_jsonb (bool, optional): Use JSONB instead of JSON. Defaults to False.
hnsw_kwargs (Optional[Dict[str, Any]], optional): HNSW kwargs, a dict that
contains "hnsw_ef_construction", "hnsw_ef_search", "hnsw_m", and optionally "hnsw_dist_method". Defaults to None,
which turns off HNSW search.
create_engine_kwargs (Optional[Dict[str, Any]], optional): Engine parameters to pass to create_engine. Defaults to None.
use_halfvec (bool, optional): If `True`, use half-precision vectors. Defaults to False.
engine (Optional[sqlalchemy.engine.Engine], optional): SQLAlchemy engine instance to use. Defaults to None.
indexed_metadata_keys (Optional[List[Tuple[str, str]]], optional): Set of metadata keys with their type to index. Defaults to None.
"""
table_name = table_name.lower() if table_name else "llamaindex"
schema_name = schema_name.lower() if schema_name else "public"
if hybrid_search and text_search_config is None:
raise ValueError(
"Sparse vector index creation requires "
"a text search configuration specification."
)
from sqlalchemy.orm import declarative_base
super().__init__(
connection_string=str(connection_string),
table_name=table_name,
schema_name=schema_name,
hybrid_search=hybrid_search,
text_search_config=text_search_config,
embed_dim=embed_dim,
cache_ok=cache_ok,
perform_setup=perform_setup,
debug=debug,
use_jsonb=use_jsonb,
hnsw_kwargs=hnsw_kwargs,
create_engine_kwargs=create_engine_kwargs or {},
initialization_fail_on_error=initialization_fail_on_error,
use_halfvec=use_halfvec,
indexed_metadata_keys=indexed_metadata_keys,
)
# sqlalchemy model
self._base = declarative_base()
self._table_class = get_data_model(
self._base,
table_name,
schema_name,
hybrid_search,
text_search_config,
cache_ok,
embed_dim=embed_dim,
use_jsonb=use_jsonb,
use_halfvec=use_halfvec,
indexed_metadata_keys=indexed_metadata_keys,
)
if engine is not None:
self._engine = engine
async def close(self) -> None:
if not self._is_initialized:
return
self._session.close_all()
if self._engine:
self._engine.dispose()
@classmethod
def class_name(cls) -> str:
return "YBVectorStore"
@classmethod
def from_params(
cls,
host: Optional[str] = None,
port: Optional[str] = None,
database: Optional[str] = None,
user: Optional[str] = None,
password: Optional[str] = None,
load_balance: Optional[bool] = False,
topology: Optional[str] = None,
yb_servers_refresh_interval: Optional[int] = 300,
fallback_to_topology_keys_only: Optional[bool] = False,
failed_host_ttl_seconds: Optional[int] = 5,
table_name: str = "llamaindex",
schema_name: str = "public",
connection_string: Optional[Union[str, sqlalchemy.engine.URL]] = None,
hybrid_search: bool = False,
text_search_config: str = "english",
embed_dim: int = 1536,
cache_ok: bool = False,
perform_setup: bool = True,
debug: bool = False,
use_jsonb: bool = False,
hnsw_kwargs: Optional[Dict[str, Any]] = None,
create_engine_kwargs: Optional[Dict[str, Any]] = None,
use_halfvec: bool = False,
indexed_metadata_keys: Optional[Set[Tuple[str, PGType]]] = None,
) -> "YBVectorStore":
"""
Construct from params.
Args:
load_balance:
host (Optional[str], optional): Host of yugabytedb connection. Defaults to None.
port (Optional[str], optional): Port of yugabytedb connection. Defaults to None.
database (Optional[str], optional): Postgres DB name. Defaults to None.
user (Optional[str], optional): Postgres username. Defaults to None.
password (Optional[str], optional): Postgres password. Defaults to None.
table_name (str): Table name. Defaults to "llamaindex".
schema_name (str): Schema name. Defaults to "public".
connection_string (Union[str, sqlalchemy.engine.URL]): Connection string to yugabytedb db
hybrid_search (bool, optional): Enable hybrid search. Defaults to False.
text_search_config (str, optional): Text search configuration. Defaults to "english".
embed_dim (int, optional): Embedding dimensions. Defaults to 1536.
cache_ok (bool, optional): Enable cache. Defaults to False.
perform_setup (bool, optional): If db should be set up. Defaults to True.
debug (bool, optional): Debug mode. Defaults to False.
use_jsonb (bool, optional): Use JSONB instead of JSON. Defaults to False.
hnsw_kwargs (Optional[Dict[str, Any]], optional): HNSW kwargs, a dict that
contains "hnsw_ef_construction", "hnsw_ef_search", "hnsw_m", and optionally "hnsw_dist_method". Defaults to None,
which turns off HNSW search.
create_engine_kwargs (Optional[Dict[str, Any]], optional): Engine parameters to pass to create_engine. Defaults to None.
use_halfvec (bool, optional): If `True`, use half-precision vectors. Defaults to False.
indexed_metadata_keys (Optional[Set[Tuple[str, str]]], optional): Set of metadata keys to index. Defaults to None.
Returns:
YBVectorStore: Instance of YBVectorStore constructed from params.
"""
from urllib.parse import urlencode
query_params = {"load_balance": str(load_balance)}
if topology is not None:
query_params["topology_keys"] = topology
if yb_servers_refresh_interval is not None:
query_params["yb_servers_refresh_interval"] = yb_servers_refresh_interval
if fallback_to_topology_keys_only:
query_params["fallback_to_topology_keys_only"] = (
fallback_to_topology_keys_only
)
if failed_host_ttl_seconds is not None:
query_params["failed_host_ttl_seconds"] = failed_host_ttl_seconds
query_str = urlencode(query_params)
conn_str = (
connection_string
or f"yugabytedb+psycopg2://{user}:{password}@{host}:{port}/{database}?{query_str}"
)
return cls(
connection_string=conn_str,
table_name=table_name,
schema_name=schema_name,
hybrid_search=hybrid_search,
text_search_config=text_search_config,
embed_dim=embed_dim,
cache_ok=cache_ok,
perform_setup=perform_setup,
debug=debug,
use_jsonb=use_jsonb,
hnsw_kwargs=hnsw_kwargs,
create_engine_kwargs=create_engine_kwargs,
use_halfvec=use_halfvec,
indexed_metadata_keys=indexed_metadata_keys,
)
@property
def client(self) -> Any:
if not self._is_initialized:
return None
return self._engine
def _connect(self) -> Any:
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
self._engine = self._engine or create_engine(
self.connection_string, echo=self.debug, **self.create_engine_kwargs
)
self._session = sessionmaker(self._engine)
def _create_schema_if_not_exists(self) -> bool:
"""
Create the schema if it does not exist.
Returns True if the schema was created, False if it already existed.
"""
if not re.match(r"^[A-Za-z_][A-Za-z0-9_]*$", self.schema_name):
raise ValueError(f"Invalid schema_name: {self.schema_name}")
with self._session() as session, session.begin():
# Check if the specified schema exists with "CREATE" statement
check_schema_statement = sqlalchemy.text(
f"SELECT schema_name FROM information_schema.schemata WHERE schema_name = :schema_name"
).bindparams(schema_name=self.schema_name)
result = session.execute(check_schema_statement).fetchone()
# If the schema does not exist, then create it
schema_doesnt_exist = result is None
if schema_doesnt_exist:
create_schema_statement = sqlalchemy.text(
# DDL won't tolerate quoted string literal here for schema_name,
# so use a format string to embed the schema_name directly, instead of a param.
f"CREATE SCHEMA IF NOT EXISTS {self.schema_name}"
)
session.execute(create_schema_statement)
session.commit()
return schema_doesnt_exist
def _create_tables_if_not_exists(self) -> None:
with self._session() as session, session.begin():
self._base.metadata.create_all(session.connection())
def _create_extension(self) -> None:
import sqlalchemy
with self._session() as session, session.begin():
statement = sqlalchemy.text("CREATE EXTENSION IF NOT EXISTS vector")
session.execute(statement)
session.commit()
def _create_hnsw_index(self) -> None:
import sqlalchemy
if (
"hnsw_ef_construction" not in self.hnsw_kwargs
or "hnsw_m" not in self.hnsw_kwargs
):
raise ValueError(
"Make sure hnsw_ef_search, hnsw_ef_construction, and hnsw_m are in hnsw_kwargs."
)
hnsw_ef_construction = self.hnsw_kwargs.pop("hnsw_ef_construction")
hnsw_m = self.hnsw_kwargs.pop("hnsw_m")
# If user didn’t specify an operator, pick a default based on whether halfvec is used
if "hnsw_dist_method" in self.hnsw_kwargs:
hnsw_dist_method = self.hnsw_kwargs.pop("hnsw_dist_method")
else:
if self.use_halfvec:
hnsw_dist_method = "halfvec_l2_ops"
else:
# Default to vector_cosine_ops
hnsw_dist_method = "vector_cosine_ops"
index_name = f"{self._table_class.__tablename__}_embedding_idx"
with self._session() as session, session.begin():
statement = sqlalchemy.text(
f"CREATE INDEX IF NOT EXISTS {index_name} "
f"ON {self.schema_name}.{self._table_class.__tablename__} "
f"USING ybhnsw (embedding {hnsw_dist_method}) "
f"WITH (m = {hnsw_m}, ef_construction = {hnsw_ef_construction})"
)
session.execute(statement)
session.commit()
def _initialize(self) -> None:
fail_on_error = self.initialization_fail_on_error
if not self._is_initialized:
self._connect()
if self.perform_setup:
try:
self._create_schema_if_not_exists()
except Exception as e:
_logger.warning(f"PG Setup: Error creating schema: {e}")
if fail_on_error:
raise
try:
self._create_extension()
except Exception as e:
_logger.warning(f"PG Setup: Error creating extension: {e}")
if fail_on_error:
raise
try:
self._create_tables_if_not_exists()
except Exception as e:
_logger.warning(f"PG Setup: Error creating tables: {e}")
if fail_on_error:
raise
if self.hnsw_kwargs is not None:
try:
self._create_hnsw_index()
except Exception as e:
_logger.warning(f"PG Setup: Error creating HNSW index: {e}")
if fail_on_error:
raise
self._is_initialized = True
def _node_to_table_row(self, node: BaseNode) -> Any:
return self._table_class(
node_id=node.node_id,
embedding=node.get_embedding(),
text=node.get_content(metadata_mode=MetadataMode.NONE),
metadata_=node_to_metadata_dict(
node,
remove_text=True,
flat_metadata=self.flat_metadata,
),
)
def add(self, nodes: List[BaseNode], **add_kwargs: Any) -> List[str]:
self._initialize()
ids = []
with self._session() as session, session.begin():
for node in nodes:
ids.append(node.node_id)
item = self._node_to_table_row(node)
session.add(item)
session.commit()
return ids
def _to_postgres_operator(self, operator: FilterOperator) -> str:
if operator == FilterOperator.EQ:
return "="
elif operator == FilterOperator.GT:
return ">"
elif operator == FilterOperator.LT:
return "<"
elif operator == FilterOperator.NE:
return "!="
elif operator == FilterOperator.GTE:
return ">="
elif operator == FilterOperator.LTE:
return "<="
elif operator == FilterOperator.IN:
return "IN"
elif operator == FilterOperator.NIN:
return "NOT IN"
elif operator == FilterOperator.CONTAINS:
return "@>"
elif operator == FilterOperator.TEXT_MATCH:
return "LIKE"
elif operator == FilterOperator.TEXT_MATCH_INSENSITIVE:
return "ILIKE"
elif operator == FilterOperator.IS_EMPTY:
return "IS NULL"
else:
_logger.warning(f"Unknown operator: {operator}, fallback to '='")
return "="
def _build_filter_clause(self, filter_: MetadataFilter) -> Any:
from sqlalchemy import text
if filter_.operator in [FilterOperator.IN, FilterOperator.NIN]:
# Expects a single value in the metadata, and a list to compare
# In Python, to create a tuple with a single element, you need to include a comma after the element
# This code will correctly format the IN clause whether there is one element or multiple elements in the list:
filter_value = ", ".join(f"'{e}'" for e in filter_.value)
return text(
f"metadata_->>'{filter_.key}' "
f"{self._to_postgres_operator(filter_.operator)} "
f"({filter_value})"
)
elif filter_.operator == FilterOperator.CONTAINS:
# Expects a list stored in the metadata, and a single value to compare
return text(
f"metadata_::jsonb->'{filter_.key}' "
f"{self._to_postgres_operator(filter_.operator)} "
f"'[\"{filter_.value}\"]'"
)
elif (
filter_.operator == FilterOperator.TEXT_MATCH
or filter_.operator == FilterOperator.TEXT_MATCH_INSENSITIVE
):
# Where the operator is text_match or ilike, we need to wrap the filter in '%' characters
return text(
f"metadata_->>'{filter_.key}' "
f"{self._to_postgres_operator(filter_.operator)} "
f"'%{filter_.value}%'"
)
elif filter_.operator == FilterOperator.IS_EMPTY:
# Where the operator is is_empty, we need to check if the metadata is null
return text(
f"metadata_->>'{filter_.key}' "
f"{self._to_postgres_operator(filter_.operator)}"
)
else:
# Check if value is a number. If so, cast the metadata value to a float
# This is necessary because the metadata is stored as a string
try:
return text(
f"(metadata_->>'{filter_.key}')::float "
f"{self._to_postgres_operator(filter_.operator)} "
f"{float(filter_.value)}"
)
except ValueError:
# If not a number, then treat it as a string
return text(
f"metadata_->>'{filter_.key}' "
f"{self._to_postgres_operator(filter_.operator)} "
f"'{filter_.value}'"
)
def _recursively_apply_filters(self, filters: List[MetadataFilters]) -> Any:
"""
Returns a sqlalchemy where clause.
"""
import sqlalchemy
sqlalchemy_conditions = {
"or": sqlalchemy.sql.or_,
"and": sqlalchemy.sql.and_,
}
if filters.condition not in sqlalchemy_conditions:
raise ValueError(
f"Invalid condition: {filters.condition}. "
f"Must be one of {list(sqlalchemy_conditions.keys())}"
)
return sqlalchemy_conditions[filters.condition](
*(
(
self._build_filter_clause(filter_)
if not isinstance(filter_, MetadataFilters)
else self._recursively_apply_filters(filter_)
)
for filter_ in filters.filters
)
)
def _apply_filters_and_limit(
self,
stmt: "Select",
limit: int,
metadata_filters: Optional[MetadataFilters] = None,
) -> Any:
if metadata_filters:
stmt = stmt.where( # type: ignore
self._recursively_apply_filters(metadata_filters)
)
return stmt.limit(limit) # type: ignore
def _build_query(
self,
embedding: Optional[List[float]],
limit: int = 10,
metadata_filters: Optional[MetadataFilters] = None,
) -> Any:
from sqlalchemy import select, text
stmt = select( # type: ignore
self._table_class.id,
self._table_class.node_id,
self._table_class.text,
self._table_class.metadata_,
self._table_class.embedding.cosine_distance(embedding).label("distance"),
).order_by(text("distance asc"))
return self._apply_filters_and_limit(stmt, limit, metadata_filters)
def _query_with_score(
self,
embedding: Optional[List[float]],
limit: int = 10,
metadata_filters: Optional[MetadataFilters] = None,
**kwargs: Any,
) -> List[DBEmbeddingRow]:
stmt = self._build_query(embedding, limit, metadata_filters)
with self._session() as session, session.begin():
res = session.execute(
stmt,
)
return [
DBEmbeddingRow(
node_id=item.node_id,
text=item.text,
metadata=item.metadata_,
similarity=(1 - item.distance) if item.distance is not None else 0,
)
for item in res.all()
]
def _build_sparse_query(
self,
query_str: Optional[str],
limit: int,
metadata_filters: Optional[MetadataFilters] = None,
) -> Any:
from sqlalchemy import select, type_coerce
from sqlalchemy.sql import func, text
from sqlalchemy.types import UserDefinedType
class REGCONFIG(UserDefinedType):
# The TypeDecorator.cache_ok class-level flag indicates if this custom TypeDecorator is safe to be used as part of a cache key.
# If the TypeDecorator is not guaranteed to produce the same bind/result behavior and SQL generation every time,
# this flag should be set to False; otherwise if the class produces the same behavior each time, it may be set to True.
cache_ok = True
def get_col_spec(self, **kw: Any) -> str:
return "regconfig"
if query_str is None:
raise ValueError("query_str must be specified for a sparse vector query.")
# Replace '&' with '|' to perform an OR search for higher recall
config_type_coerce = type_coerce(self.text_search_config, REGCONFIG)
ts_query = func.to_tsquery(
config_type_coerce,
func.replace(
func.text(func.plainto_tsquery(config_type_coerce, query_str)),
"&",
"|",
),
)
stmt = (
select( # type: ignore
self._table_class.id,
self._table_class.node_id,
self._table_class.text,
self._table_class.metadata_,
func.ts_rank(self._table_class.text_search_tsv, ts_query).label("rank"),
)
.where(self._table_class.text_search_tsv.op("@@")(ts_query))
.order_by(text("rank desc"))
)
# type: ignore
return self._apply_filters_and_limit(stmt, limit, metadata_filters)
def _sparse_query_with_rank(
self,
query_str: Optional[str] = None,
limit: int = 10,
metadata_filters: Optional[MetadataFilters] = None,
) -> List[DBEmbeddingRow]:
stmt = self._build_sparse_query(query_str, limit, metadata_filters)
with self._session() as session, session.begin():
res = session.execute(stmt)
return [
DBEmbeddingRow(
node_id=item.node_id,
text=item.text,
metadata=item.metadata_,
similarity=item.rank,
)
for item in res.all()
]
def _hybrid_query(
self, query: VectorStoreQuery, **kwargs: Any
) -> List[DBEmbeddingRow]:
if query.alpha is not None:
_logger.warning(
"yugabytedb hybrid search does not support alpha parameter."
)
sparse_top_k = query.sparse_top_k or query.similarity_top_k
dense_results = self._query_with_score(
query.query_embedding,
query.similarity_top_k,
query.filters,
**kwargs,
)
sparse_results = self._sparse_query_with_rank(
query.query_str, sparse_top_k, query.filters
)
all_results = dense_results + sparse_results
return _dedup_results(all_results)
def _db_rows_to_query_result(
self, rows: List[DBEmbeddingRow]
) -> VectorStoreQueryResult:
nodes = []
similarities = []
ids = []
for db_embedding_row in rows:
try:
node = metadata_dict_to_node(db_embedding_row.metadata)
node.set_content(str(db_embedding_row.text))
except Exception:
# NOTE: deprecated legacy logic for backward compatibility
node = TextNode(
id_=db_embedding_row.node_id,
text=db_embedding_row.text,
metadata=db_embedding_row.metadata,
)
similarities.append(db_embedding_row.similarity)
ids.append(db_embedding_row.node_id)
nodes.append(node)
return VectorStoreQueryResult(
nodes=nodes,
similarities=similarities,
ids=ids,
)
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
self._initialize()
if query.mode == VectorStoreQueryMode.HYBRID:
results = self._hybrid_query(query, **kwargs)
elif query.mode in [
VectorStoreQueryMode.SPARSE,
VectorStoreQueryMode.TEXT_SEARCH,
]:
sparse_top_k = query.sparse_top_k or query.similarity_top_k
results = self._sparse_query_with_rank(
query.query_str, sparse_top_k, query.filters
)
elif query.mode == VectorStoreQueryMode.DEFAULT:
results = self._query_with_score(
query.query_embedding,
query.similarity_top_k,
query.filters,
**kwargs,
)
else:
raise ValueError(f"Invalid query mode: {query.mode}")
return self._db_rows_to_query_result(results)
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
from sqlalchemy import delete
self._initialize()
with self._session() as session, session.begin():
stmt = delete(self._table_class).where(
self._table_class.metadata_["doc_id"].astext == ref_doc_id
)
session.execute(stmt)
session.commit()
def delete_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
**delete_kwargs: Any,
) -> None:
"""
Deletes nodes.
Args:
node_ids (Optional[List[str]], optional): IDs of nodes to delete. Defaults to None.
filters (Optional[MetadataFilters], optional): Metadata filters. Defaults to None.
"""
if not node_ids and not filters:
return
from sqlalchemy import delete
self._initialize()
with self._session() as session, session.begin():
stmt = delete(self._table_class)
if node_ids:
stmt = stmt.where(self._table_class.node_id.in_(node_ids))
if filters:
stmt = stmt.where(self._recursively_apply_filters(filters))
session.execute(stmt)
session.commit()
def clear(self) -> None:
"""Clears table."""
from sqlalchemy import delete
self._initialize()
with self._session() as session, session.begin():
stmt = delete(self._table_class)
session.execute(stmt)
session.commit()
def get_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
) -> List[BaseNode]:
"""Get nodes from vector store."""
assert node_ids is not None or filters is not None, (
"Either node_ids or filters must be provided"
)
self._initialize()
from sqlalchemy import select
stmt = select(
self._table_class.node_id,
self._table_class.text,
self._table_class.metadata_,
self._table_class.embedding,
)
if node_ids:
stmt = stmt.where(self._table_class.node_id.in_(node_ids))
if filters:
filter_clause = self._recursively_apply_filters(filters)
stmt = stmt.where(filter_clause)
nodes: List[BaseNode] = []
with self._session() as session, session.begin():
res = session.execute(stmt).fetchall()
for item in res:
node_id = item.node_id
text = item.text
metadata = item.metadata_
embedding = item.embedding
try:
node = metadata_dict_to_node(metadata)
node.set_content(str(text))
node.embedding = embedding
except Exception:
node = TextNode(
id_=node_id,
text=text,
metadata=metadata,
embedding=embedding,
)
nodes.append(node)
return nodes
def _dedup_results(results: List[DBEmbeddingRow]) -> List[DBEmbeddingRow]:
seen_ids = set()
deduped_results = []
for result in results:
if result.node_id not in seen_ids:
deduped_results.append(result)
seen_ids.add(result.node_id)
return deduped_results
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-yugabytedb/llama_index/vector_stores/yugabytedb/base.py",
"license": "MIT License",
"lines": 893,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-yugabytedb/tests/test_vector_stores_yugabytedb.py | from llama_index.core.vector_stores.types import BasePydanticVectorStore
from llama_index.vector_stores.yugabytedb import YBVectorStore
def test_class():
names_of_base_classes = [b.__name__ for b in YBVectorStore.__mro__]
assert BasePydanticVectorStore.__name__ in names_of_base_classes
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-yugabytedb/tests/test_vector_stores_yugabytedb.py",
"license": "MIT License",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-yugabytedb/tests/test_yugabytedb.py | import asyncio
from typing import Any, Dict, Generator, List, Union, Optional
import pytest
from llama_index.core.schema import (
BaseNode,
IndexNode,
NodeRelationship,
RelatedNodeInfo,
TextNode,
)
from llama_index.core.vector_stores.types import (
ExactMatchFilter,
FilterOperator,
MetadataFilter,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryMode,
)
from llama_index.vector_stores.yugabytedb import YBVectorStore
# from testing find install here https://github.com/pgvector/pgvector#installation-notes
PARAMS: Dict[str, Union[str, int]] = {
"host": "localhost",
"user": "yugabyte",
"password": "yugabyte",
"port": 5433,
"load_balance": "True",
}
TEST_DB = "test_vector_db"
TEST_DB_HNSW = "test_vector_db_hnsw"
TEST_TABLE_NAME = "lorem_ipsum"
TEST_SCHEMA_NAME = "test"
TEST_EMBED_DIM = 2
try:
import pgvector # noqa
import psycopg2
import sqlalchemy
import sqlalchemy.ext.asyncio # noqa
# connection check
conn__ = psycopg2.connect(**PARAMS) # type: ignore
conn__.close()
yugabytedb_not_available = False
print("yugabytedb available")
except (ImportError, Exception) as e:
yugabytedb_not_available = True
print("yugabytedb not available, Exception: ", e)
def _get_sample_vector(num: float) -> List[float]:
"""
Get sample embedding vector of the form [num, 1, 1, ..., 1]
where the length of the vector is TEST_EMBED_DIM.
"""
return [num] + [1.0] * (TEST_EMBED_DIM - 1)
@pytest.fixture(scope="session")
def conn() -> Any:
import psycopg2
return psycopg2.connect(**PARAMS) # type: ignore
@pytest.fixture()
def db(conn: Any) -> Generator:
conn.autocommit = True
with conn.cursor() as c:
c.execute(f"DROP DATABASE IF EXISTS {TEST_DB}")
c.execute(f"CREATE DATABASE {TEST_DB}")
conn.commit()
yield
with conn.cursor() as c:
c.execute(f"DROP DATABASE {TEST_DB}")
conn.commit()
@pytest.fixture()
def db_hnsw(conn: Any) -> Generator:
conn.autocommit = True
with conn.cursor() as c:
c.execute(f"DROP DATABASE IF EXISTS {TEST_DB_HNSW}")
c.execute(f"CREATE DATABASE {TEST_DB_HNSW}")
conn.commit()
yield
with conn.cursor() as c:
c.execute(f"DROP DATABASE {TEST_DB_HNSW}")
conn.commit()
@pytest.fixture()
def yb(db: None) -> Any:
yb = YBVectorStore.from_params(
**PARAMS, # type: ignore
database=TEST_DB,
table_name=TEST_TABLE_NAME,
schema_name=TEST_SCHEMA_NAME,
embed_dim=TEST_EMBED_DIM,
)
yield yb
asyncio.run(yb.close())
@pytest.fixture()
def yb_hybrid(db: None) -> Any:
yb = YBVectorStore.from_params(
**PARAMS, # type: ignore
database=TEST_DB,
table_name=TEST_TABLE_NAME,
schema_name=TEST_SCHEMA_NAME,
hybrid_search=True,
embed_dim=TEST_EMBED_DIM,
)
yield yb
asyncio.run(yb.close())
@pytest.fixture()
def yb_indexed_metadata(db: None) -> Any:
yb = YBVectorStore.from_params(
**PARAMS, # type: ignore
database=TEST_DB,
table_name=TEST_TABLE_NAME,
schema_name=TEST_SCHEMA_NAME,
hybrid_search=True,
embed_dim=TEST_EMBED_DIM,
indexed_metadata_keys=[("test_text", "text"), ("test_int", "int")],
)
yield yb
asyncio.run(yb.close())
@pytest.fixture()
def yb_hnsw(db_hnsw: None) -> Any:
yb = YBVectorStore.from_params(
**PARAMS, # type: ignore
database=TEST_DB_HNSW,
table_name=TEST_TABLE_NAME,
schema_name=TEST_SCHEMA_NAME,
embed_dim=TEST_EMBED_DIM,
hnsw_kwargs={"hnsw_m": 16, "hnsw_ef_construction": 64},
)
yield yb
asyncio.run(yb.close())
@pytest.fixture()
def yb_hnsw_hybrid(db_hnsw: None) -> Any:
yb = YBVectorStore.from_params(
**PARAMS, # type: ignore
database=TEST_DB_HNSW,
table_name=TEST_TABLE_NAME,
schema_name=TEST_SCHEMA_NAME,
embed_dim=TEST_EMBED_DIM,
hybrid_search=True,
hnsw_kwargs={"hnsw_m": 16, "hnsw_ef_construction": 64},
)
yield yb
asyncio.run(yb.close())
@pytest.fixture()
def yb_hnsw_multiple(db_hnsw: None) -> Generator[List[YBVectorStore], None, None]:
"""
This creates multiple instances of YBVectorStore.
"""
ybs = []
for _ in range(2):
yb = YBVectorStore.from_params(
**PARAMS, # type: ignore
database=TEST_DB_HNSW,
table_name=TEST_TABLE_NAME,
schema_name=TEST_SCHEMA_NAME,
embed_dim=TEST_EMBED_DIM,
hnsw_kwargs={"hnsw_m": 16, "hnsw_ef_construction": 64},
)
ybs.append(yb)
yield ybs
for yb in ybs:
asyncio.run(yb.close())
@pytest.fixture(scope="session")
def node_embeddings() -> List[TextNode]:
return [
TextNode(
text="lorem ipsum",
id_="aaa",
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="aaa")},
extra_info={"test_num": 1},
embedding=_get_sample_vector(1.0),
),
TextNode(
text="dolor sit amet",
id_="bbb",
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="bbb")},
extra_info={"test_key": "test_value"},
embedding=_get_sample_vector(0.1),
),
TextNode(
text="consectetur adipiscing elit",
id_="ccc",
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="ccc")},
extra_info={"test_key_list": ["test_value"]},
embedding=_get_sample_vector(0.1),
),
TextNode(
text="sed do eiusmod tempor",
id_="ddd",
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="ccc")},
extra_info={"test_key_2": "test_val_2"},
embedding=_get_sample_vector(0.1),
),
]
@pytest.fixture(scope="session")
def hybrid_node_embeddings() -> List[TextNode]:
return [
TextNode(
text="lorem ipsum",
id_="aaa",
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="aaa")},
embedding=_get_sample_vector(0.1),
),
TextNode(
text="dolor sit amet",
id_="bbb",
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="bbb")},
extra_info={"test_key": "test_value"},
embedding=_get_sample_vector(1.0),
),
TextNode(
text="The quick brown fox jumped over the lazy dog.",
id_="ccc",
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="ccc")},
embedding=_get_sample_vector(5.0),
),
TextNode(
text="The fox and the hound",
id_="ddd",
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="ddd")},
extra_info={"test_key": "test_value"},
embedding=_get_sample_vector(10.0),
),
]
@pytest.fixture(scope="session")
def index_node_embeddings() -> List[TextNode]:
return [
TextNode(
text="lorem ipsum",
id_="aaa",
embedding=_get_sample_vector(0.1),
),
TextNode(
text="dolor sit amet",
id_="bbb",
extra_info={"test_key": "test_value"},
embedding=_get_sample_vector(1.0),
),
IndexNode(
text="The quick brown fox jumped over the lazy dog.",
id_="aaa_ref",
index_id="aaa",
embedding=_get_sample_vector(5.0),
),
]
@pytest.fixture()
def yb_halfvec(db: None) -> Any:
yb = YBVectorStore.from_params(
**PARAMS, # type: ignore
database=TEST_DB,
table_name=TEST_TABLE_NAME + "_halfvec",
schema_name=TEST_SCHEMA_NAME,
embed_dim=TEST_EMBED_DIM,
use_halfvec=True,
)
yield yb
asyncio.run(yb.close())
@pytest.fixture()
def yb_halfvec_hybrid(db: None) -> Any:
yb = YBVectorStore.from_params(
**PARAMS, # type: ignore
database=TEST_DB,
table_name=TEST_TABLE_NAME + "_halfvec_hybrid",
schema_name=TEST_SCHEMA_NAME,
embed_dim=TEST_EMBED_DIM,
hybrid_search=True,
use_halfvec=True,
)
yield yb
asyncio.run(yb.close())
@pytest.fixture()
def yb_hnsw_halfvec(db_hnsw: None) -> Any:
yb = YBVectorStore.from_params(
**PARAMS, # type: ignore
database=TEST_DB_HNSW,
table_name=TEST_TABLE_NAME + "_hnsw_halfvec",
schema_name=TEST_SCHEMA_NAME,
embed_dim=TEST_EMBED_DIM,
use_halfvec=True,
hnsw_kwargs={"hnsw_m": 16, "hnsw_ef_construction": 64},
)
yield yb
asyncio.run(yb.close())
@pytest.fixture()
def yb_hnsw_hybrid_halfvec(db_hnsw: None) -> Any:
yb = YBVectorStore.from_params(
**PARAMS, # type: ignore
database=TEST_DB_HNSW,
table_name=TEST_TABLE_NAME + "_hnsw_halfvec_hybrid",
schema_name=TEST_SCHEMA_NAME,
embed_dim=TEST_EMBED_DIM,
hybrid_search=True,
use_halfvec=True,
hnsw_kwargs={"hnsw_m": 16, "hnsw_ef_construction": 64},
)
yield yb
asyncio.run(yb.close())
@pytest.mark.skipif(yugabytedb_not_available, reason="yugabytedb is not available")
@pytest.mark.asyncio
async def test_instance_creation(db: None) -> None:
yb = YBVectorStore.from_params(
**PARAMS, # type: ignore
database=TEST_DB,
table_name=TEST_TABLE_NAME,
schema_name=TEST_SCHEMA_NAME,
)
assert isinstance(yb, YBVectorStore)
assert yb.client is None
await yb.close()
@pytest.fixture()
def yb_fixture(request):
if request.param == "yb":
return request.getfixturevalue("yb")
elif request.param == "yb_halfvec":
return request.getfixturevalue("yb_halfvec")
else:
raise ValueError(f"Unknown param: {request.param}")
@pytest.mark.skipif(yugabytedb_not_available, reason="yugabytedb is not available")
@pytest.mark.asyncio
@pytest.mark.parametrize("yb_fixture", ["yb", "yb_halfvec"], indirect=True)
async def test_add_to_db_and_query(
yb_fixture: YBVectorStore, node_embeddings: List[TextNode]
) -> None:
yb_fixture.add(node_embeddings)
assert isinstance(yb_fixture, YBVectorStore)
assert hasattr(yb_fixture, "_engine")
q = VectorStoreQuery(query_embedding=_get_sample_vector(1.0), similarity_top_k=1)
res = yb_fixture.query(q)
assert res.nodes
assert len(res.nodes) == 1
assert res.nodes[0].node_id == "aaa"
@pytest.mark.skipif(yugabytedb_not_available, reason="yugabytedb is not available")
@pytest.mark.asyncio
async def test_query_hnsw(yb_hnsw: YBVectorStore, node_embeddings: List[TextNode]):
yb_hnsw.add(node_embeddings)
assert isinstance(yb_hnsw, YBVectorStore)
assert hasattr(yb_hnsw, "_engine")
q = VectorStoreQuery(query_embedding=_get_sample_vector(1.0), similarity_top_k=1)
res = yb_hnsw.query(q)
assert res.nodes
assert len(res.nodes) == 1
assert res.nodes[0].node_id == "aaa"
@pytest.mark.skipif(yugabytedb_not_available, reason="yugabytedb is not available")
@pytest.mark.asyncio
@pytest.mark.parametrize("yb_fixture", ["yb", "yb_halfvec"], indirect=True)
async def test_add_to_db_and_query_with_metadata_filters(
yb_fixture: YBVectorStore, node_embeddings: List[TextNode]
) -> None:
yb_fixture.add(node_embeddings)
assert isinstance(yb_fixture, YBVectorStore)
assert hasattr(yb_fixture, "_engine")
filters = MetadataFilters(
filters=[ExactMatchFilter(key="test_key", value="test_value")]
)
q = VectorStoreQuery(
query_embedding=_get_sample_vector(0.5), similarity_top_k=10, filters=filters
)
res = yb_fixture.query(q)
assert res.nodes
assert len(res.nodes) == 1
assert res.nodes[0].node_id == "bbb"
@pytest.mark.skipif(yugabytedb_not_available, reason="yugabytedb is not available")
@pytest.mark.asyncio
@pytest.mark.parametrize("yb_fixture", ["yb", "yb_halfvec"], indirect=True)
async def test_add_to_db_and_query_with_metadata_filters_with_in_operator(
yb_fixture: YBVectorStore, node_embeddings: List[TextNode]
) -> None:
yb_fixture.add(node_embeddings)
assert isinstance(yb_fixture, YBVectorStore)
assert hasattr(yb_fixture, "_engine")
filters = MetadataFilters(
filters=[
MetadataFilter(
key="test_key",
value=["test_value", "another_value"],
operator=FilterOperator.IN,
)
]
)
q = VectorStoreQuery(
query_embedding=_get_sample_vector(0.5), similarity_top_k=10, filters=filters
)
res = yb_fixture.query(q)
assert res.nodes
assert len(res.nodes) == 1
assert res.nodes[0].node_id == "bbb"
@pytest.mark.skipif(yugabytedb_not_available, reason="yugabytedb is not available")
@pytest.mark.asyncio
@pytest.mark.parametrize("yb_fixture", ["yb", "yb_halfvec"], indirect=True)
async def test_add_to_db_and_query_with_metadata_filters_with_in_operator_and_single_element(
yb_fixture: YBVectorStore, node_embeddings: List[TextNode]
) -> None:
yb_fixture.add(node_embeddings)
assert isinstance(yb_fixture, YBVectorStore)
assert hasattr(yb_fixture, "_engine")
filters = MetadataFilters(
filters=[
MetadataFilter(
key="test_key",
value=["test_value"],
operator=FilterOperator.IN,
)
]
)
q = VectorStoreQuery(
query_embedding=_get_sample_vector(0.5), similarity_top_k=10, filters=filters
)
res = yb_fixture.query(q)
assert res.nodes
assert len(res.nodes) == 1
assert res.nodes[0].node_id == "bbb"
@pytest.mark.skipif(yugabytedb_not_available, reason="yugabytedb is not available")
@pytest.mark.asyncio
@pytest.mark.parametrize("yb_fixture", ["yb", "yb_halfvec"], indirect=True)
async def test_add_to_db_and_query_with_metadata_filters_with_contains_operator(
yb_fixture: YBVectorStore, node_embeddings: List[TextNode]
) -> None:
yb_fixture.add(node_embeddings)
assert isinstance(yb_fixture, YBVectorStore)
assert hasattr(yb_fixture, "_engine")
filters = MetadataFilters(
filters=[
MetadataFilter(
key="test_key_list",
value="test_value",
operator=FilterOperator.CONTAINS,
)
]
)
q = VectorStoreQuery(
query_embedding=_get_sample_vector(0.5), similarity_top_k=10, filters=filters
)
res = yb_fixture.query(q)
assert res.nodes
assert len(res.nodes) == 1
assert res.nodes[0].node_id == "ccc"
@pytest.mark.skipif(yugabytedb_not_available, reason="yugabytedb is not available")
@pytest.mark.asyncio
@pytest.mark.parametrize("yb_fixture", ["yb", "yb_halfvec"], indirect=True)
async def test_add_to_db_and_query_with_metadata_filters_with_is_empty(
yb_fixture: YBVectorStore, node_embeddings: List[TextNode]
) -> None:
yb_fixture.add(node_embeddings)
assert isinstance(yb_fixture, YBVectorStore)
assert hasattr(yb_fixture, "_engine")
filters = MetadataFilters(
filters=[
MetadataFilter(
key="nonexistent_key", value=None, operator=FilterOperator.IS_EMPTY
)
]
)
q = VectorStoreQuery(
query_embedding=_get_sample_vector(0.5), similarity_top_k=10, filters=filters
)
res = yb_fixture.query(q)
assert res.nodes
# All nodes should match since none have the nonexistent_key
assert len(res.nodes) == len(node_embeddings)
@pytest.mark.skipif(yugabytedb_not_available, reason="yugabytedb is not available")
@pytest.mark.asyncio
@pytest.mark.parametrize("yb_fixture", ["yb", "yb_halfvec"], indirect=True)
async def test_add_to_db_query_and_delete(
yb_fixture: YBVectorStore, node_embeddings: List[TextNode]
) -> None:
yb_fixture.add(node_embeddings)
assert isinstance(yb_fixture, YBVectorStore)
assert hasattr(yb_fixture, "_engine")
q = VectorStoreQuery(query_embedding=_get_sample_vector(0.1), similarity_top_k=1)
res = yb_fixture.query(q)
assert res.nodes
assert len(res.nodes) == 1
assert res.nodes[0].node_id in {"bbb", "ccc", "ddd"}
@pytest.mark.skipif(yugabytedb_not_available, reason="yugabytedb is not available")
@pytest.mark.asyncio
async def test_sparse_query(
yb_hybrid: YBVectorStore, hybrid_node_embeddings: List[TextNode]
) -> None:
yb_hybrid.add(hybrid_node_embeddings)
assert isinstance(yb_hybrid, YBVectorStore)
assert hasattr(yb_hybrid, "_engine")
# text search should work when query is a sentence and not just a single word
q = VectorStoreQuery(
query_embedding=_get_sample_vector(0.1),
query_str="who is the fox?",
sparse_top_k=2,
mode=VectorStoreQueryMode.SPARSE,
)
res = yb_hybrid.query(q)
assert res.nodes
assert len(res.nodes) == 2
assert res.nodes[0].node_id == "ccc"
assert res.nodes[1].node_id == "ddd"
@pytest.mark.skipif(yugabytedb_not_available, reason="yugabytedb is not available")
@pytest.mark.asyncio
async def test_hybrid_query(
yb_hybrid: YBVectorStore,
hybrid_node_embeddings: List[TextNode],
) -> None:
yb_hybrid.add(hybrid_node_embeddings)
assert isinstance(yb_hybrid, YBVectorStore)
assert hasattr(yb_hybrid, "_engine")
q = VectorStoreQuery(
query_embedding=_get_sample_vector(0.1),
query_str="fox",
similarity_top_k=2,
mode=VectorStoreQueryMode.HYBRID,
sparse_top_k=1,
)
if use_async:
res = await yb_hybrid.aquery(q)
else:
res = yb_hybrid.query(q)
assert res.nodes
assert len(res.nodes) == 3
assert res.nodes[0].node_id == "aaa"
assert res.nodes[1].node_id == "bbb"
assert res.nodes[2].node_id == "ccc"
# if sparse_top_k is not specified, it should default to similarity_top_k
q = VectorStoreQuery(
query_embedding=_get_sample_vector(0.1),
query_str="fox",
similarity_top_k=2,
mode=VectorStoreQueryMode.HYBRID,
)
res = yb_hybrid.query(q)
assert res.nodes
assert len(res.nodes) == 4
assert res.nodes[0].node_id == "aaa"
assert res.nodes[1].node_id == "bbb"
assert res.nodes[2].node_id == "ccc"
assert res.nodes[3].node_id == "ddd"
# text search should work when query is a sentence and not just a single word
q = VectorStoreQuery(
query_embedding=_get_sample_vector(0.1),
query_str="who is the fox?",
similarity_top_k=2,
mode=VectorStoreQueryMode.HYBRID,
)
res = yb_hybrid.query(q)
assert res.nodes
assert len(res.nodes) == 4
assert res.nodes[0].node_id == "aaa"
assert res.nodes[1].node_id == "bbb"
assert res.nodes[2].node_id == "ccc"
assert res.nodes[3].node_id == "ddd"
@pytest.mark.skipif(yugabytedb_not_available, reason="yugabytedb is not available")
@pytest.mark.asyncio
async def test_hybrid_query(
yb_hnsw_hybrid: YBVectorStore, hybrid_node_embeddings: List[TextNode]
) -> None:
yb_hnsw_hybrid.add(hybrid_node_embeddings)
assert isinstance(yb_hnsw_hybrid, YBVectorStore)
assert hasattr(yb_hnsw_hybrid, "_engine")
q = VectorStoreQuery(
query_embedding=_get_sample_vector(0.1),
query_str="fox",
similarity_top_k=2,
mode=VectorStoreQueryMode.HYBRID,
sparse_top_k=1,
)
res = yb_hnsw_hybrid.query(q)
assert res.nodes
assert len(res.nodes) == 3
assert res.nodes[0].node_id == "aaa"
assert res.nodes[1].node_id == "bbb"
assert res.nodes[2].node_id == "ccc"
# if sparse_top_k is not specified, it should default to similarity_top_k
q = VectorStoreQuery(
query_embedding=_get_sample_vector(0.1),
query_str="fox",
similarity_top_k=2,
mode=VectorStoreQueryMode.HYBRID,
)
res = yb_hnsw_hybrid.query(q)
assert res.nodes
assert len(res.nodes) == 4
assert res.nodes[0].node_id == "aaa"
assert res.nodes[1].node_id == "bbb"
assert res.nodes[2].node_id == "ccc"
assert res.nodes[3].node_id == "ddd"
# text search should work when query is a sentence and not just a single word
q = VectorStoreQuery(
query_embedding=_get_sample_vector(0.1),
query_str="who is the fox?",
similarity_top_k=2,
mode=VectorStoreQueryMode.HYBRID,
)
res = yb_hnsw_hybrid.query(q)
assert res.nodes
assert len(res.nodes) == 4
assert res.nodes[0].node_id == "aaa"
assert res.nodes[1].node_id == "bbb"
assert res.nodes[2].node_id == "ccc"
assert res.nodes[3].node_id == "ddd"
@pytest.mark.skipif(yugabytedb_not_available, reason="yugabytedb is not available")
@pytest.mark.asyncio
async def test_add_to_db_and_hybrid_query_with_metadata_filters(
yb_hybrid: YBVectorStore, hybrid_node_embeddings: List[TextNode]
) -> None:
yb_hybrid.add(hybrid_node_embeddings)
assert isinstance(yb_hybrid, YBVectorStore)
assert hasattr(yb_hybrid, "_engine")
filters = MetadataFilters(
filters=[ExactMatchFilter(key="test_key", value="test_value")]
)
q = VectorStoreQuery(
query_embedding=_get_sample_vector(0.1),
query_str="fox",
similarity_top_k=10,
filters=filters,
mode=VectorStoreQueryMode.HYBRID,
)
res = yb_hybrid.query(q)
assert res.nodes
assert len(res.nodes) == 2
assert res.nodes[0].node_id == "bbb"
assert res.nodes[1].node_id == "ddd"
@pytest.mark.skipif(yugabytedb_not_available, reason="yugabytedb is not available")
def test_hybrid_query_fails_if_no_query_str_provided(
yb_hybrid: YBVectorStore, hybrid_node_embeddings: List[TextNode]
) -> None:
q = VectorStoreQuery(
query_embedding=_get_sample_vector(1.0),
similarity_top_k=10,
mode=VectorStoreQueryMode.HYBRID,
)
with pytest.raises(Exception) as exc:
yb_hybrid.query(q)
assert str(exc) == "query_str must be specified for a sparse vector query."
@pytest.mark.skipif(yugabytedb_not_available, reason="yugabytedb is not available")
@pytest.mark.asyncio
@pytest.mark.parametrize("yb_fixture", ["yb", "yb_halfvec"], indirect=True)
async def test_add_to_db_and_query_index_nodes(
yb_fixture: YBVectorStore, index_node_embeddings: List[BaseNode]
) -> None:
yb_fixture.add(index_node_embeddings)
assert isinstance(yb_fixture, YBVectorStore)
assert hasattr(yb_fixture, "_engine")
q = VectorStoreQuery(query_embedding=_get_sample_vector(5.0), similarity_top_k=2)
res = yb_fixture.query(q)
assert res.nodes
assert len(res.nodes) == 2
assert res.nodes[0].node_id == "aaa_ref"
assert isinstance(res.nodes[0], IndexNode)
assert hasattr(res.nodes[0], "index_id")
assert res.nodes[1].node_id == "bbb"
assert isinstance(res.nodes[1], TextNode)
@pytest.mark.skipif(yugabytedb_not_available, reason="yugabytedb is not available")
@pytest.mark.asyncio
@pytest.mark.parametrize("yb_fixture", ["yb", "yb_halfvec"], indirect=True)
async def test_delete_nodes(
yb_fixture: YBVectorStore, node_embeddings: List[BaseNode]
) -> None:
yb_fixture.add(node_embeddings)
assert isinstance(yb_fixture, YBVectorStore)
assert hasattr(yb_fixture, "_engine")
q = VectorStoreQuery(query_embedding=_get_sample_vector(0.5), similarity_top_k=10)
# test deleting nothing
yb_fixture.delete_nodes()
res = yb_fixture.query(q)
assert all(i in res.ids for i in ["aaa", "bbb", "ccc"])
# test deleting element that doesn't exist
yb_fixture.delete_nodes(["asdf"])
res = yb_fixture.query(q)
assert all(i in res.ids for i in ["aaa", "bbb", "ccc"])
# test deleting list
yb_fixture.delete_nodes(["aaa", "bbb"])
res = yb_fixture.query(q)
assert all(i not in res.ids for i in ["aaa", "bbb"])
assert "ccc" in res.ids
@pytest.mark.skipif(yugabytedb_not_available, reason="yugabytedb is not available")
@pytest.mark.asyncio
@pytest.mark.parametrize("yb_fixture", ["yb", "yb_halfvec"], indirect=True)
async def test_delete_nodes_metadata(
yb_fixture: YBVectorStore, node_embeddings: List[BaseNode]
) -> None:
yb_fixture.add(node_embeddings)
assert isinstance(yb_fixture, YBVectorStore)
assert hasattr(yb_fixture, "_engine")
q = VectorStoreQuery(query_embedding=_get_sample_vector(0.5), similarity_top_k=10)
# test deleting multiple IDs but only one satisfies filter
filters = MetadataFilters(
filters=[
MetadataFilter(
key="test_key",
value=["test_value", "another_value"],
operator=FilterOperator.IN,
)
]
)
yb_fixture.delete_nodes(["aaa", "bbb"], filters=filters)
res = yb_fixture.query(q)
assert all(i in res.ids for i in ["aaa", "ccc", "ddd"])
assert "bbb" not in res.ids
# test deleting one ID which satisfies the filter
filters = MetadataFilters(
filters=[
MetadataFilter(
key="test_num",
value=1,
operator=FilterOperator.EQ,
)
]
)
yb_fixture.delete_nodes(["aaa"], filters=filters)
res = yb_fixture.query(q)
assert all(i not in res.ids for i in ["bbb", "aaa"])
assert all(i in res.ids for i in ["ccc", "ddd"])
# test deleting one ID which doesn't satisfy the filter
filters = MetadataFilters(
filters=[
MetadataFilter(
key="test_num",
value=1,
operator=FilterOperator.EQ,
)
]
)
yb_fixture.delete_nodes(["ccc"], filters=filters)
res = yb_fixture.query(q)
assert all(i not in res.ids for i in ["bbb", "aaa"])
assert all(i in res.ids for i in ["ccc", "ddd"])
# test deleting purely based on filters
filters = MetadataFilters(
filters=[
MetadataFilter(
key="test_key_2",
value="test_val_2",
operator=FilterOperator.EQ,
)
]
)
yb_fixture.delete_nodes(filters=filters)
res = yb_fixture.query(q)
assert all(i not in res.ids for i in ["bbb", "aaa", "ddd"])
assert "ccc" in res.ids
@pytest.mark.skipif(yugabytedb_not_available, reason="yugabytedb is not available")
@pytest.mark.asyncio
async def test_hnsw_index_creation(
yb_hnsw_multiple: List[YBVectorStore],
node_embeddings: List[TextNode],
) -> None:
"""
This test will make sure that creating multiple YBVectorStores handles db initialization properly.
"""
# calling add will make the db initialization run
for yb in yb_hnsw_multiple:
yb.add(node_embeddings)
# these are the actual table and index names that YBVectorStore automatically created
data_test_table_name = f"data_{TEST_TABLE_NAME}"
data_test_index_name = f"data_{TEST_TABLE_NAME}_embedding_idx"
# create a connection to the TEST_DB_HNSW database to make sure that one, and only one, index was created
with psycopg2.connect(**PARAMS, database=TEST_DB_HNSW) as hnsw_conn:
with hnsw_conn.cursor() as c:
c.execute(
f"SELECT COUNT(*) FROM pg_indexes WHERE schemaname = '{TEST_SCHEMA_NAME}' AND tablename = '{data_test_table_name}' AND indexname LIKE '{data_test_index_name}%';"
)
index_count = c.fetchone()[0]
assert index_count == 1, (
f"Expected exactly one '{data_test_index_name}' index, but found {index_count}."
)
@pytest.mark.skipif(yugabytedb_not_available, reason="yugabytedb is not available")
@pytest.mark.asyncio
@pytest.mark.parametrize("yb_fixture", ["yb", "yb_halfvec"], indirect=True)
async def test_clear(
yb_fixture: YBVectorStore, node_embeddings: List[BaseNode]
) -> None:
yb_fixture.add(node_embeddings)
assert isinstance(yb_fixture, YBVectorStore)
assert hasattr(yb_fixture, "_engine")
q = VectorStoreQuery(query_embedding=_get_sample_vector(0.5), similarity_top_k=10)
res = yb_fixture.query(q)
assert all(i in res.ids for i in ["bbb", "aaa", "ddd", "ccc"])
yb_fixture.clear()
res = yb_fixture.query(q)
assert all(i not in res.ids for i in ["bbb", "aaa", "ddd", "ccc"])
assert len(res.ids) == 0
@pytest.mark.skipif(yugabytedb_not_available, reason="yugabytedb is not available")
@pytest.mark.parametrize(
("node_ids", "filters", "expected_node_ids"),
[
(["aaa", "bbb"], None, ["aaa", "bbb"]),
(
None,
MetadataFilters(
filters=[
MetadataFilter(
key="test_num",
value=1,
operator=FilterOperator.EQ,
)
]
),
["aaa"],
),
(
["bbb", "ccc"],
MetadataFilters(
filters=[
MetadataFilter(
key="test_key",
value="test_value",
operator=FilterOperator.EQ,
)
]
),
["bbb"],
),
(
["ccc"],
MetadataFilters(
filters=[
MetadataFilter(
key="test_key",
value="test_value",
operator=FilterOperator.EQ,
)
]
),
[],
),
(
["aaa", "bbb"],
MetadataFilters(
filters=[
MetadataFilter(
key="test_num",
value=999,
operator=FilterOperator.EQ,
)
]
),
[],
),
],
)
def test_get_nodes_parametrized(
yb: YBVectorStore,
node_embeddings: List[TextNode],
node_ids: Optional[List[str]],
filters: Optional[MetadataFilters],
expected_node_ids: List[str],
) -> None:
"""Test get_nodes method with various combinations of node_ids and filters."""
yb.add(node_embeddings)
nodes = yb.get_nodes(node_ids=node_ids, filters=filters)
retrieved_ids = [node.node_id for node in nodes]
assert set(retrieved_ids) == set(expected_node_ids)
assert len(retrieved_ids) == len(expected_node_ids)
@pytest.mark.skipif(yugabytedb_not_available, reason="yugabytedb is not available")
@pytest.mark.asyncio
async def test_custom_engines(db: None, node_embeddings: List[TextNode]) -> None:
"""Test that YBVectorStore works correctly with custom engines."""
from sqlalchemy import create_engine
# Create custom engines
engine = create_engine(
f"postgresql+psycopg2://{PARAMS['user']}:{PARAMS['password']}@{PARAMS['host']}:{PARAMS['port']}/{TEST_DB}",
echo=False,
)
# Create YBVectorStore with custom engines
yb = YBVectorStore(embed_dim=TEST_EMBED_DIM, engine=engine)
# Test sync add
yb.add(node_embeddings[0:4])
# Query to verify nodes were added correctly
q = VectorStoreQuery(query_embedding=_get_sample_vector(0.5), similarity_top_k=10)
# Test sync query
res = yb.query(q)
assert len(res.nodes) == 4
assert set(res.ids) == {"aaa", "bbb", "ccc", "ddd"}
# Clean up
await yb.close()
engine.dispose()
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-yugabytedb/tests/test_yugabytedb.py",
"license": "MIT License",
"lines": 868,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-core/tests/callbacks/test_token_budget.py | import pytest
from unittest.mock import Mock
from types import SimpleNamespace
from llama_index.core.callbacks import CallbackManager
from llama_index.core.callbacks.schema import CBEventType, EventPayload
from llama_index.core.callbacks.token_counting import TokenCountingHandler
from llama_index.core.llms import CompletionResponse
def test_token_budget_enforcement():
"""Test that the TokenCountingHandler enforces the budget in on_event_end."""
# 1. Create a Mock Tokenizer
# We configure it so every time it runs, it returns a list of 5 tokens.
mock_tokenizer = Mock()
mock_tokenizer.return_value = [1, 2, 3, 4, 5] # Always 5 tokens
# 2. Setup Handler with Budget of 15
handler = TokenCountingHandler(tokenizer=mock_tokenizer, token_budget=15)
# 3. Create a valid CompletionResponse object
# The handler expects an object with a .raw attribute, not just a string.
response_object = CompletionResponse(text="generated text")
# 4. First Event: Safe
# Prompt (5 tokens) + Completion (5 tokens) = 10 tokens total.
# Budget is 15. This should pass.
handler.on_event_end(
event_type=CBEventType.LLM,
payload={
EventPayload.PROMPT: "input prompt",
EventPayload.COMPLETION: response_object,
},
)
# 5. Second Event: Unsafe
# This adds another 10 tokens. Total = 20.
# Budget is 15. This should CRASH.
with pytest.raises(ValueError) as excinfo:
handler.on_event_end(
event_type=CBEventType.LLM,
payload={
EventPayload.PROMPT: "input prompt",
EventPayload.COMPLETION: response_object,
},
)
# 6. Verify Error
assert "Token budget exceeded" in str(excinfo.value)
assert "Limit: 15" in str(excinfo.value)
def test_token_budget_via_callback_manager():
mock_tokenizer = Mock()
mock_tokenizer.return_value = [1, 2, 3, 4, 5]
handler = TokenCountingHandler(tokenizer=mock_tokenizer, token_budget=15)
cm = CallbackManager([handler])
# minimal object that satisfies your get_tokens_from_response fallback behavior
resp = SimpleNamespace(raw=None, additional_kwargs={})
# first event -> 10 tokens (5 prompt + 5 completion) OK
cm.on_event_end(
CBEventType.LLM,
payload={EventPayload.PROMPT: "p", EventPayload.COMPLETION: resp},
)
# second event -> total 20 tokens => should exceed
with pytest.raises(ValueError):
cm.on_event_end(
CBEventType.LLM,
payload={EventPayload.PROMPT: "p", EventPayload.COMPLETION: resp},
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/callbacks/test_token_budget.py",
"license": "MIT License",
"lines": 60,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-core/tests/test_types.py | from contextvars import ContextVar
from typing import Any, Dict, List
from llama_index.core.types import Thread
def test_thread_with_no_target() -> None:
"""Test that creating a Thread with target=None does not crash."""
# Should not raise exception
t = Thread(target=None)
t.start()
t.join()
def test_thread_with_target() -> None:
"""Test that a Thread with a target runs the target."""
result: List[int] = []
def target_fn() -> None:
result.append(1)
t = Thread(target=target_fn)
t.start()
t.join()
assert result == [1]
def test_thread_context_copy() -> None:
"""Test that the context is copied to the new thread."""
var = ContextVar("var", default=0)
var.set(1)
results: Dict[str, Any] = {}
def target_fn() -> None:
results["value"] = var.get()
t = Thread(target=target_fn)
t.start()
t.join()
# It should copy the context where var=1.
# If it didn't use copy_context(), it might still work in some threaded envs
# depending on how context vars propagate, but Thread implementation explicitly uses copy_context().run
assert results["value"] == 1
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/test_types.py",
"license": "MIT License",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-core/llama_index/core/agent/workflow/agent_context.py | """
Agent context protocol and simple implementation for non-workflow usage.
This module provides a minimal duck-typed protocol that `take_step` implementations
use, allowing agents to work both in full workflow contexts and in simpler
scenarios like `LLM.predict_and_call`.
"""
from dataclasses import dataclass, field
from typing import Any, Protocol, runtime_checkable
from workflows.context.state_store import DictState, InMemoryStateStore
@runtime_checkable
class AgentContext(Protocol):
"""
Minimal context interface for agent take_step implementations.
This protocol defines the subset of Context that agents actually use,
allowing for both full workflow Context and lightweight alternatives.
"""
@property
def store(self) -> InMemoryStateStore[Any]:
"""Access the key-value store for agent state."""
...
@property
def is_running(self) -> bool:
"""Check if the workflow is actively running (for event writing)."""
...
def write_event_to_stream(self, event: Any) -> None:
"""Write an event to the output stream."""
...
def _default_store() -> InMemoryStateStore[DictState]:
return InMemoryStateStore(DictState())
@dataclass(frozen=True)
class SimpleAgentContext:
"""
Lightweight context for agents used outside workflows.
This implementation satisfies the AgentContext protocol with minimal
overhead, suitable for use in `LLM.predict_and_call` and similar
non-workflow scenarios where a full Context is not needed.
"""
store: InMemoryStateStore[DictState] = field(default_factory=_default_store)
is_running: bool = False
def write_event_to_stream(self, event: Any) -> None:
"""No-op - events are discarded in non-workflow usage."""
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/llama_index/core/agent/workflow/agent_context.py",
"license": "MIT License",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-vllm/tests/test_vllm_server_modes.py | import json
from typing import Any, Dict, List, Optional
import pytest
import requests
from llama_index.llms.vllm import VllmServer
class FakeResponse:
def __init__(
self,
json_data: Optional[Dict[str, Any]] = None,
status_code: int = 200,
iter_lines_data: Optional[List[bytes]] = None,
) -> None:
self._json = json_data or {}
self.status_code = status_code
self._iter_lines = iter_lines_data or []
self.content = json.dumps(self._json).encode("utf-8")
def json(self) -> Dict[str, Any]:
return self._json
def iter_lines(self, **_: Any):
for chunk in self._iter_lines:
yield chunk
def raise_for_status(self) -> None:
if self.status_code >= 400:
raise requests.HTTPError(f"HTTP {self.status_code}")
def test_openai_like_complete(monkeypatch):
recorded = {}
def fake_post(url, headers=None, json=None, stream=False, timeout=None):
recorded["url"] = url
recorded["headers"] = headers
recorded["json"] = json
recorded["stream"] = stream
recorded["timeout"] = timeout
return FakeResponse({"choices": [{"message": {"content": "hello"}}]})
monkeypatch.setattr("requests.post", fake_post)
llm = VllmServer(
api_url="http://mock/chat", openai_like=True, api_headers={"X-Test": "1"}
)
result = llm.complete("hi")
assert result.text == "hello"
assert recorded["url"] == "http://mock/chat"
assert recorded["stream"] is False
assert recorded["json"]["messages"][0]["content"] == "hi"
assert recorded["headers"]["X-Test"] == "1"
def test_openai_like_stream(monkeypatch):
chunks = [
b'data: {"choices":[{"delta":{"content":"he"}}]}\n',
b'data: {"choices":[{"delta":{"content":"llo"}}]}\n',
b"data: [DONE]\n",
]
def fake_post(url, headers=None, json=None, stream=False, timeout=None):
return FakeResponse(iter_lines_data=chunks, json_data={"choices": []})
monkeypatch.setattr("requests.post", fake_post)
llm = VllmServer(api_url="http://mock/chat", openai_like=True)
outputs = list(llm.stream_complete("hi"))
assert [o.delta for o in outputs] == ["he", "llo"]
assert outputs[-1].text == "hello"
def test_native_http_error(monkeypatch):
def fake_post(url, headers=None, json=None, stream=False, timeout=None):
return FakeResponse(status_code=500)
monkeypatch.setattr("requests.post", fake_post)
llm = VllmServer(api_url="http://mock/native", openai_like=False)
with pytest.raises(requests.HTTPError):
llm.complete("hi")
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-vllm/tests/test_vllm_server_modes.py",
"license": "MIT License",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-oceanbase/tests/test_vector_stores_oceanbase_unit.py | from typing import Any, Dict, List
import sys
import types
import pytest
from llama_index.core.bridge.pydantic import ValidationError
from llama_index.core.schema import TextNode
from llama_index.core.vector_stores.types import (
MetadataFilter,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryMode,
)
from llama_index.core.vector_stores.utils import node_to_metadata_dict
if "pyobvector" not in sys.modules:
pyobvector = types.ModuleType("pyobvector")
class DummyObVecClient:
pass
class DummyVECTOR:
def __init__(self, dim: int) -> None:
self.dim = dim
class DummySparseVector:
def __call__(self) -> "DummySparseVector":
return self
class DummyFtsIndexParam:
def __init__(self, *args: Any, **kwargs: Any) -> None:
pass
class DummyFtsParser:
NGRAM = "NGRAM"
class DummyMatchAgainst:
def __init__(self, *args: Any, **kwargs: Any) -> None:
pass
def label(self, *args: Any, **kwargs: Any) -> "DummyMatchAgainst":
return self
def desc(self) -> "DummyMatchAgainst":
return self
pyobvector.ObVecClient = DummyObVecClient
pyobvector.VECTOR = DummyVECTOR
pyobvector.SPARSE_VECTOR = DummySparseVector
pyobvector.FtsIndexParam = DummyFtsIndexParam
pyobvector.FtsParser = DummyFtsParser
pyobvector.MatchAgainst = DummyMatchAgainst
client_mod = types.ModuleType("pyobvector.client")
index_param_mod = types.ModuleType("pyobvector.client.index_param")
class DummyVecIndexType:
HNSW = "HNSW"
HNSW_SQ = "HNSW_SQ"
IVFFLAT = "IVFFLAT"
IVFSQ = "IVFSQ"
IVFPQ = "IVFPQ"
DAAT = "DAAT"
index_param_mod.VecIndexType = DummyVecIndexType
client_mod.index_param = index_param_mod
sys.modules["pyobvector"] = pyobvector
sys.modules["pyobvector.client"] = client_mod
sys.modules["pyobvector.client.index_param"] = index_param_mod
from llama_index.vector_stores.oceanbase import base as ob_base
class DummyResult:
def __init__(self, rows: List[tuple[Any, ...]]):
self._rows = rows
def fetchall(self) -> List[tuple[Any, ...]]:
return self._rows
class DummyClient:
def __init__(self, rows: List[tuple[Any, ...]] | None = None) -> None:
self.rows = rows or []
self.last_ef_search: int | None = None
def ann_search(self, *args: Any, **kwargs: Any) -> DummyResult:
return DummyResult(self.rows)
def set_ob_hnsw_ef_search(self, value: int) -> None:
self.last_ef_search = value
def make_store() -> ob_base.OceanBaseVectorStore:
store = ob_base.OceanBaseVectorStore.__new__(ob_base.OceanBaseVectorStore)
store._metadata_field = "metadata"
store._doc_id_field = "doc_id"
store._primary_field = "id"
store._text_field = "document"
store._vector_field = "embedding"
store._sparse_vector_field = "sparse_embedding"
store._fulltext_field = "fulltext_content"
store._include_sparse = True
store._include_fulltext = True
store._normalize = False
store._vidx_metric_type = "l2"
store._index_type = "HNSW"
store._hnsw_ef_search = -1
store._table_name = "test_table"
store._client = DummyClient()
return store
def test_escape_json_path_segment():
assert ob_base._escape_json_path_segment("foo") == "foo"
assert ob_base._escape_json_path_segment("foo-bar") == '"foo-bar"'
with pytest.raises(ValueError):
ob_base._escape_json_path_segment("")
def test_enhance_filter_key_quotes_segments():
store = make_store()
key = store._enhance_filter_key("foo.bar-baz")
assert key.startswith("metadata->'$.")
assert '"bar-baz"' in key
def test_to_oceanbase_filter_builds_params():
store = make_store()
params: Dict[str, Any] = {}
expanding: set[str] = set()
filters = MetadataFilters(
filters=[
MetadataFilter(key="theme", value="FOO", operator="=="),
MetadataFilter(key="location", value=[1, 2], operator="in"),
MetadataFilters(
filters=[
MetadataFilter(key="location", value=None, operator="is_empty"),
],
condition="and",
),
MetadataFilter(key="name", value="bar", operator="text_match"),
],
condition="or",
)
clause = store._to_oceanbase_filter(
filters, params=params, expanding_params=expanding
)
assert "metadata" in clause
assert any(value == "FOO" for value in params.values())
assert any(value == [1, 2] for value in params.values())
assert any(value == "bar%" for value in params.values())
assert any(name.startswith("in_") for name in expanding)
def test_to_oceanbase_filter_not_condition_and_empty_in():
store = make_store()
params: Dict[str, Any] = {}
expanding: set[str] = set()
filters = MetadataFilters(
filters=[
MetadataFilter(key="score", value=1, operator=">"),
],
condition="not",
)
clause = store._to_oceanbase_filter(
filters, params=params, expanding_params=expanding
)
assert clause.startswith("NOT")
params = {}
expanding = set()
filters = MetadataFilters(
filters=[
MetadataFilter(key="tags", value=[], operator="nin"),
]
)
clause = store._to_oceanbase_filter(
filters, params=params, expanding_params=expanding
)
assert clause == "1=1"
def test_to_oceanbase_filter_invalid_in_value():
store = make_store()
params: Dict[str, Any] = {}
expanding: set[str] = set()
filters = MetadataFilters(
filters=[
MetadataFilter(key="tags", value="not-a-list", operator="in"),
]
)
with pytest.raises(ValueError):
store._to_oceanbase_filter(filters, params=params, expanding_params=expanding)
def test_build_where_clause_with_doc_and_node_ids():
store = make_store()
filters = MetadataFilters(
filters=[
MetadataFilter(key="score", value=1, operator=">="),
]
)
clause = store._build_where_clause(
filters=filters, doc_ids=["doc-1", "doc-2"], node_ids=["node-1"]
)
assert clause is not None
params = clause.compile().params
assert any(value == [1] or value == 1 for value in params.values())
assert ["doc-1", "doc-2"] in params.values()
assert ["node-1"] in params.values()
empty_doc_clause = store._build_where_clause(doc_ids=[])
assert empty_doc_clause is not None
assert empty_doc_clause.text == "1=0"
def test_normalize_vector():
assert ob_base._normalize([0.0, 0.0]) == [0.0, 0.0]
normalized = ob_base._normalize([3.0, 4.0])
assert normalized == pytest.approx([0.6, 0.8], rel=1e-6)
def test_parse_distance_to_similarity_cosine():
store = make_store()
store._vidx_metric_type = "cosine"
assert store._parse_distance_to_similarities(0.2) == pytest.approx(0.8, rel=1e-6)
def test_parse_metric_type_to_dist_func():
store = make_store()
store._vidx_metric_type = "l2"
assert store._parse_metric_type_str_to_dist_func()(1, 2).name == "l2_distance"
store._vidx_metric_type = "inner_product"
assert (
store._parse_metric_type_str_to_dist_func()(1, 2).name
== "negative_inner_product"
)
store._vidx_metric_type = "cosine"
assert store._parse_metric_type_str_to_dist_func()(1, 2).name == "cosine_distance"
store._vidx_metric_type = "invalid"
with pytest.raises(ValueError):
store._parse_metric_type_str_to_dist_func()
def test_query_dense_records_and_ef_search():
store = make_store()
metadata = node_to_metadata_dict(TextNode(text="text-1"), remove_text=True)
store._client = DummyClient(
rows=[
("id-1", "text-1", metadata, 0.0),
]
)
query = VectorStoreQuery(query_embedding=[0.1, 0.2], similarity_top_k=1)
records = store._query_dense_records(
query, search_param={"efSearch": 32}, where_clause=None
)
assert store._client.last_ef_search == 32
assert len(records) == 1
assert records[0]["id"] == "id-1"
assert records[0]["score"] == pytest.approx(1.0, rel=1e-6)
def test_query_sparse_records_parses_metadata():
store = make_store()
metadata = node_to_metadata_dict(TextNode(text="text-1"), remove_text=True)
store._client = DummyClient(
rows=[
("id-1", "text-1", metadata, 1.0),
]
)
records = store._query_sparse_records(
sparse_query={1: 1.0}, top_k=1, where_clause=None
)
assert len(records) == 1
assert records[0]["id"] == "id-1"
assert records[0]["score"] == pytest.approx(-1.0, rel=1e-6)
def test_query_sparse_and_fulltext_requires_flags():
store = make_store()
store._include_sparse = False
with pytest.raises(ValueError):
store._query_sparse_records(sparse_query={1: 1.0}, top_k=1, where_clause=None)
store._include_fulltext = False
with pytest.raises(ValueError):
store._query_fulltext_records(fulltext_query="foo", top_k=1, where_clause=None)
def test_hybrid_fusion_weights_and_ranking():
store = make_store()
records_by_modality = {
"vector": [
{"id": "doc-1", "node": "v1", "score": 0.9, "modality": "vector"},
{"id": "doc-2", "node": "v2", "score": 0.8, "modality": "vector"},
],
"sparse": [
{"id": "doc-2", "node": "s2", "score": 0.7, "modality": "sparse"},
],
"fulltext": [
{"id": "doc-3", "node": "f3", "score": 0.6, "modality": "fulltext"},
],
}
result = store._fuse_hybrid_records(records_by_modality, top_k=2, alpha=None)
assert len(result.ids) == 2
assert result.ids[0] in {"doc-1", "doc-2", "doc-3"}
def test_normalize_hybrid_weights_alpha():
store = make_store()
weights = store._normalize_hybrid_weights(["vector", "fulltext"], alpha=0.8)
assert weights["vector"] == pytest.approx(0.8, rel=1e-6)
assert weights["fulltext"] == pytest.approx(0.2, rel=1e-6)
def test_query_mode_validations_and_outputs():
store = make_store()
store._build_where_clause = lambda *args, **kwargs: None
store._query_dense_records = lambda *args, **kwargs: [
{"id": "doc-1", "node": "n1", "score": 0.9, "modality": "vector"}
]
store._query_sparse_records = lambda *args, **kwargs: [
{"id": "doc-2", "node": "n2", "score": 0.8, "modality": "sparse"}
]
store._query_fulltext_records = lambda *args, **kwargs: [
{"id": "doc-3", "node": "n3", "score": 0.7, "modality": "fulltext"}
]
with pytest.raises(ValueError):
store.query(
VectorStoreQuery(mode=VectorStoreQueryMode.DEFAULT, similarity_top_k=1)
)
result = store.query(
VectorStoreQuery(
mode=VectorStoreQueryMode.DEFAULT,
query_embedding=[0.1],
similarity_top_k=1,
)
)
assert result.ids == ["doc-1"]
with pytest.raises(ValueError):
store.query(
VectorStoreQuery(mode=VectorStoreQueryMode.SPARSE, similarity_top_k=1)
)
result = store.query(
VectorStoreQuery(mode=VectorStoreQueryMode.SPARSE, similarity_top_k=1),
sparse_query={1: 1.0},
)
assert result.ids == ["doc-2"]
with pytest.raises(ValueError):
store.query(
VectorStoreQuery(
mode=VectorStoreQueryMode.TEXT_SEARCH,
query_str=None,
similarity_top_k=1,
)
)
result = store.query(
VectorStoreQuery(
mode=VectorStoreQueryMode.TEXT_SEARCH,
query_str="foo",
similarity_top_k=1,
)
)
assert result.ids == ["doc-3"]
with pytest.raises(ValueError):
store.query(
VectorStoreQuery(mode=VectorStoreQueryMode.HYBRID, similarity_top_k=1)
)
result = store.query(
VectorStoreQuery(
mode=VectorStoreQueryMode.HYBRID,
query_embedding=[0.1],
query_str="foo",
similarity_top_k=2,
hybrid_top_k=1,
),
sparse_query={1: 1.0},
)
assert len(result.ids) == 1
def test_init_validations(monkeypatch: pytest.MonkeyPatch) -> None:
import pyobvector
class StubClient:
pass
monkeypatch.setattr(pyobvector, "ObVecClient", StubClient)
monkeypatch.setattr(
ob_base.OceanBaseVectorStore, "_create_table_with_index", lambda self: None
)
with pytest.raises(ValidationError):
ob_base.OceanBaseVectorStore(
client=StubClient(),
dim=8,
include_fulltext=True,
include_sparse=False,
)
with pytest.raises(ValidationError):
ob_base.OceanBaseVectorStore(
client=StubClient(),
dim=8,
vidx_metric_type="bad",
)
with pytest.raises(ValidationError):
ob_base.OceanBaseVectorStore(
client=StubClient(),
dim=8,
index_type="bad",
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-oceanbase/tests/test_vector_stores_oceanbase_unit.py",
"license": "MIT License",
"lines": 350,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-mcp-discovery/examples/basic_usage.py | import asyncio
from llama_index.core.agent import ReActAgent
from llama_index.core.llms import MockLLM
from llama_index.tools.mcp_discovery import MCPDiscoveryTool
async def main():
# 1. Initialize the Tool Spec
# In a real scenario, this URL would point to your running MCP server or discovery service
print("Initializing MCP Discovery Tool...")
discovery_tool = MCPDiscoveryTool(api_url="https://demo.mcp-server.com/api")
# 2. Convert the spec into a list of FunctionTools
agent_tools = discovery_tool.to_tool_list()
print(f"Loaded tools: {[t.metadata.name for t in agent_tools]}")
# 3. Initialize the Agent
# We use MockLLM here for demonstration purposes
llm = MockLLM()
agent = ReActAgent.from_tools(agent_tools, llm=llm, verbose=True)
print("\nAgent is ready! It can now call 'discover_tools' when asked to find new capabilities.")
# Example async interaction (Mocked)
# response = await agent.achat("I need a tool to calculate the square root of a number.")
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-mcp-discovery/examples/basic_usage.py",
"license": "MIT License",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-mcp-discovery/llama_index/tools/mcp_discovery/base.py | """MCP Discovery tool spec."""
import aiohttp
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class MCPDiscoveryTool(BaseToolSpec):
"""
MCP Discovery Tool.
This tool queries the MCP Discovery API for autonomous tool recommendations.
It accepts a natural language description of the need and returns a
human-readable list of recommended MCP servers with name, category, and description.
Attributes:
api_url: The URL of the MCP discovery API endpoint.
"""
spec_functions = ["discover_tools"]
def __init__(self, api_url: str) -> None:
"""
Initialize the MCP Discovery Tool.
Args:
api_url: The URL of the MCP discovery API endpoint.
"""
self.api_url = api_url
async def discover_tools(self, user_request: str, limit: int = 5) -> str:
"""
Discover tools based on a natural language request.
This method allows an agent to discover needed tools without human intervention.
It queries the MCP discovery API with the user's request and returns formatted
tool recommendations.
Args:
user_request: Natural language description of the tool needed.
limit: Maximum number of tool recommendations to return. Defaults to 5.
Returns:
A formatted string containing the discovered tools with their names,
descriptions, and categories. Returns an error message if the request fails.
Example:
>>> tool = MCPDiscoveryTool(api_url="http://localhost:8000/api")
>>> result = await tool.discover_tools("I need a math calculator", limit=3)
>>> print(result)
Found 2 tools:
1. Name: math-calculator,
Description: A tool for calculations,
Category: math
"""
try:
async with aiohttp.ClientSession() as session:
async with session.post(
self.api_url, json={"need": user_request, "limit": limit}
) as response:
data = await response.json()
tools_json = data.get("recommendations", [])
num = data.get("total_found", -1)
if num == -1:
tools = "Following tools are found:\n"
else:
tools = f"Found {num} tools:\n"
if tools_json:
for ind, i in enumerate(tools_json, start=1):
tools += f"{ind}. Name: {i.get('name')},\n"
tools += f" Description: {i.get('description')},\n"
tools += f" Category: {i.get('category')}\n\n"
return tools.strip()
return tools
except Exception as e:
return f"Error discovering tools: {e}"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-mcp-discovery/llama_index/tools/mcp_discovery/base.py",
"license": "MIT License",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-mcp-discovery/tests/test_mcp_discovery.py | import sys
import asyncio
from unittest.mock import MagicMock
# --- MOCKING DEPENDENCIES START ---
# We mock external dependencies to ensure tests run even in minimal environments.
# 1. Mock 'aiohttp'
mock_aiohttp = MagicMock()
sys.modules["aiohttp"] = mock_aiohttp
# 2. Mock 'llama_index.core' and 'BaseToolSpec'
# We need strictly what the base.py imports:
# "from llama_index.core.tools.tool_spec.base import BaseToolSpec"
mock_core = MagicMock()
sys.modules["llama_index.core"] = mock_core
sys.modules["llama_index.core.tools"] = mock_core
sys.modules["llama_index.core.tools.tool_spec"] = mock_core
sys.modules["llama_index.core.tools.tool_spec.base"] = mock_core
# Define a real class for BaseToolSpec so inheritance works
class MockBaseToolSpec:
spec_functions = []
mock_core.BaseToolSpec = MockBaseToolSpec
# --- MOCKING DEPENDENCIES END ---
# Inherit import paths are resolved relative to the package root usually
# Try correct import based on file structure
try:
from llama_index.tools.mcp_discovery.base import MCPDiscoveryTool
except ImportError:
# Fallback if running from a different root
sys.path.append("llama_index/tools/mcp_discovery")
from base import MCPDiscoveryTool
# Helper for async context managers (async with ...)
class AsyncContextManager:
def __init__(self, return_value=None, error=None):
self.return_value = return_value
self.error = error
async def __aenter__(self):
if self.error:
raise self.error
return self.return_value
async def __aexit__(self, exc_type, exc_value, traceback):
pass
def test_discover_tools_success():
"""Test successful discovery of tools."""
tool = MCPDiscoveryTool(api_url="http://test-api.com")
# Mock Data
mock_data = {
"recommendations": [
{"name": "test-tool", "description": "A test tool", "category": "Testing"},
{"name": "fancy-tool", "description": "A fancy tool", "category": "Fancy"},
],
"total_found": 2,
}
# Setup Mocks for aiohttp
mock_response = MagicMock()
# Make json() awaitable using an async function
async def get_json():
return mock_data
mock_response.json.side_effect = get_json
mock_session = MagicMock()
mock_session.post.return_value = AsyncContextManager(return_value=mock_response)
sys.modules["aiohttp"].ClientSession.return_value = AsyncContextManager(
return_value=mock_session
)
# Run Code
result = asyncio.run(tool.discover_tools("help me", limit=2))
# Assertions
assert "Found 2 tools" in result
assert "1. Name: test-tool" in result
assert "2. Name: fancy-tool" in result
assert "A test tool" in result
def test_discover_tools_empty():
"""Test behavior when no tools are found."""
tool = MCPDiscoveryTool(api_url="http://test-api.com")
mock_data = {"recommendations": [], "total_found": 0}
mock_response = MagicMock()
async def get_json():
return mock_data
mock_response.json.side_effect = get_json
mock_session = MagicMock()
mock_session.post.return_value = AsyncContextManager(return_value=mock_response)
sys.modules["aiohttp"].ClientSession.return_value = AsyncContextManager(
return_value=mock_session
)
result = asyncio.run(tool.discover_tools("unlikely query", limit=5))
assert "Found 0 tools" in result or "Following tools are found" in result
def test_discover_tools_error():
"""Test proper error handling on network failure."""
tool = MCPDiscoveryTool(api_url="http://test-api.com")
# Force an error in ClientSession init
sys.modules["aiohttp"].ClientSession.return_value = AsyncContextManager(
error=Exception("Connection refused")
)
result = asyncio.run(tool.discover_tools("crash"))
assert "Error discovering tools: Connection refused" in result
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-mcp-discovery/tests/test_mcp_discovery.py",
"license": "MIT License",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/voice_agents/llama-index-voice-agents-gemini-live/tests/test_gemini_live.py | from unittest.mock import MagicMock, patch
from llama_index.voice_agents.gemini_live.base import GeminiLiveVoiceAgent
def test_client_header_initialization():
"""Test that the client header is correctly passed to the GeminiLiveVoiceAgent."""
with patch("llama_index.voice_agents.gemini_live.base.Client") as mock_client_class:
mock_client = MagicMock()
mock_client_class.return_value = mock_client
agent = GeminiLiveVoiceAgent(api_key="test-key")
# Access the client property to trigger initialization
_ = agent.client
# Check if http_options were passed to the client constructor
call_args = mock_client_class.call_args
_, kwargs = call_args
http_options = kwargs["http_options"]
headers = http_options["headers"]
assert headers["x-goog-api-client"].startswith("llamaindex/")
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/voice_agents/llama-index-voice-agents-gemini-live/tests/test_gemini_live.py",
"license": "MIT License",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-volcenginemysql/examples/volcengine_mysql_vector_store_demo.py | from __future__ import annotations
import asyncio
from typing import List
import os
from volcenginesdkarkruntime import Ark
from llama_index.core import Document, StorageContext, VectorStoreIndex, Settings
from llama_index.core.embeddings import BaseEmbedding
from llama_index.core.vector_stores.types import VectorStoreQuery
from llama_index.vector_stores.volcengine_mysql import VolcengineMySQLVectorStore
EMBED_DIM: int = 2048
# Required environment variables for this demo:
# - ARK_API_KEY: API key for Volcengine Ark.
# - ARK_EMBEDDING_MODEL: embedding endpoint/model ID used by ArkEmbedding.
# - ARK_LLM_MODEL: chat completion model endpoint ID used by call_ark_llm.
# - VEM_HOST, VEM_PORT, VEM_USER, VEM_PASSWORD, VEM_DATABASE: MySQL
# connection info for VolcengineMySQLVectorStore.
# - VEM_TABLE (optional): MySQL table name for the vector store; defaults to
# "llamaindex_vs_local_dummy_demo" if not set.
EMBEDDING_MODEL = os.getenv("ARK_EMBEDDING_MODEL")
if not EMBEDDING_MODEL:
raise RuntimeError("Please set ARK_EMBEDDING_MODEL environment variable.")
LLM_MODEL = os.getenv("ARK_LLM_MODEL")
if not LLM_MODEL:
raise RuntimeError("Please set ARK_LLM_MODEL environment variable.")
ARK_API_KEY = os.getenv("ARK_API_KEY")
if not ARK_API_KEY:
raise RuntimeError("Please set ARK_API_KEY environment variable for Ark.")
"""Ark client configured identically to the working sample in ~/dev/test/ark.py.
By relying on the SDK defaults (no custom base_url) we avoid proxy issues that
occur when overriding the API host, and we let the SDK route requests correctly
for both embedding and chat endpoints.
"""
ARK_CLIENT = Ark(api_key=ARK_API_KEY)
class ArkEmbedding(BaseEmbedding):
"""Embedding model backed by Volcengine Ark embeddings."""
def __init__(self, client: Ark, model: str = EMBEDDING_MODEL) -> None:
super().__init__()
self._client = client
self._model = model
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]: # type: ignore[override]
if not texts:
return []
vectors: List[List[float]] = []
for text in texts:
try:
# Match the working multimodal embeddings usage from ~/dev/test/ark.py
# while still only sending text. Each input is wrapped as a
# multimodal "text" block to conform to the endpoint.
resp = self._client.multimodal_embeddings.create(
model=self._model,
input=[{"type": "text", "text": text}],
)
vectors.append(resp.data.embedding)
except Exception as exc: # pragma: no cover
raise RuntimeError(f"Ark embeddings request failed: {exc}") from exc
return vectors
def _get_text_embedding(self, text: str) -> List[float]: # type: ignore[override]
return self._get_text_embeddings([text])[0]
def _get_query_embedding(self, query: str) -> List[float]: # type: ignore[override]
return self._get_text_embedding(query)
async def _aget_query_embedding(self, query: str) -> List[float]: # type: ignore[override]
return self._get_query_embedding(query)
def call_ark_llm(prompt: str) -> str:
"""Call Ark chat completion using the same LLM endpoint."""
completion = ARK_CLIENT.chat.completions.create(
model=LLM_MODEL,
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": prompt},
],
}
],
reasoning_effort="medium",
extra_headers={"x-is-encrypted": "true"},
)
return completion.choices[0].message.content
async def run_async_query_demo(
vector_store: VolcengineMySQLVectorStore, embed_model: ArkEmbedding, question: str
) -> None:
"""Demonstrate async query capabilities."""
print(f"\n=== Async Similarity search for: {question!r} ===")
query_embedding = await embed_model.aget_query_embedding(question)
vs_query = VectorStoreQuery(
query_embedding=query_embedding,
similarity_top_k=3,
)
result = await vector_store.aquery(vs_query)
print(f"Top {len(result.nodes)} results (fetched asynchronously):")
for i, node in enumerate(result.nodes, 1):
sim = result.similarities[i - 1] if result.similarities else None
if sim is not None:
print(f" {i}. similarity={sim:.4f}")
else:
print(f" {i}.")
print(f" text={node.get_content()[:120]}...")
print(f" metadata={node.metadata}")
# Explicitly close the async engine to release connections before the loop closes.
# Otherwise, you may see 'RuntimeError: Event loop is closed' during cleanup.
await vector_store.aclose()
def build_vector_store() -> VolcengineMySQLVectorStore:
"""Initialize VolcengineMySQLVectorStore with local connection params.
Uses the same instance as the user's other demos, but this function
can be adapted easily to different hosts/credentials.
"""
# MySQL connection parameters are read from environment variables and must
# be provided by the user for this demo.
host = os.getenv("VEM_HOST")
port = int(os.getenv("VEM_PORT")) if os.getenv("VEM_PORT") else None
user = os.getenv("VEM_USER")
password = os.getenv("VEM_PASSWORD")
database = os.getenv("VEM_DATABASE")
table_name = os.getenv("VEM_TABLE", "llamaindex_demo")
if not all([host, port, user, password, database]):
raise RuntimeError(
"Please set VEM_HOST, VEM_PORT, VEM_USER, VEM_PASSWORD, VEM_DATABASE for the demo."
)
vs = VolcengineMySQLVectorStore.from_params(
host=host,
port=port,
user=user,
password=password,
database=database,
table_name=table_name,
embed_dim=EMBED_DIM,
# Override default SSL-related connect args to match the direct
# PyMySQL usage that already works for you.
connection_args={"read_timeout": 30},
ann_index_algorithm="hnsw",
ann_index_distance="l2",
ann_m=16,
ef_search=20,
perform_setup=True,
debug=False,
)
return vs
def prepare_documents() -> List[Document]:
docs = [
Document(
text=(
"veDB for MySQL is a cloud-native, high-performance "
"database service from Volcengine. It provides automatic "
"scaling, high availability, and excellent performance "
"for modern applications."
),
metadata={"source": "product_docs", "category": "database"},
),
Document(
text=(
"LlamaIndex is a framework for building LLM applications "
"over your own data sources and knowledge bases."
),
metadata={"source": "framework_docs", "category": "framework"},
),
Document(
text=(
"Vector databases enable efficient similarity search for AI "
"applications by storing high-dimensional embeddings."
),
metadata={"source": "ai_docs", "category": "ai"},
),
]
return docs
def run_demo() -> None:
print("Initializing Ark embedding model...")
embed_model = ArkEmbedding(client=ARK_CLIENT, model=EMBEDDING_MODEL)
Settings.embed_model = embed_model
print("Configuring VolcengineMySQLVectorStore...")
vector_store = build_vector_store()
print("Preparing documents...")
docs = prepare_documents()
print("Building VectorStoreIndex backed by VolcengineMySQLVectorStore...")
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
docs,
storage_context=storage_context,
embed_model=embed_model,
)
print("Index built and stored in MySQL vector table.")
# Demonstrate direct vector-store query
question = "What is veDB for MySQL?"
print(f"\n=== Similarity search for: {question!r} ===")
query_embedding = embed_model.get_text_embedding(question)
vs_query = VectorStoreQuery(
query_embedding=query_embedding,
similarity_top_k=3,
)
result = vector_store.query(vs_query)
print(f"Top {len(result.nodes)} results:")
for i, node in enumerate(result.nodes, 1):
sim = result.similarities[i - 1] if result.similarities else None
if sim is not None:
print(f" {i}. similarity={sim:.4f}")
else:
print(f" {i}.")
print(f" text={node.get_content()[:120]}...")
print(f" metadata={node.metadata}")
# Simple RAG-style call to Ark LLM using retrieved context
context_text = "\n\n".join(node.get_content() for node in result.nodes)
prompt = f"""You are a helpful AI assistant. Use the following context to answer the question.
If you don't know the answer, just say you don't know. Do not make up answers.
Context:
{context_text}
Question: {question}
Answer:"""
print("\n=== Ark LLM answer ===")
answer = call_ark_llm(prompt)
print(answer)
# Run async query demo
asyncio.run(run_async_query_demo(vector_store, embed_model, question))
# Cleanup: drop the demo table after the RAG test finishes
print("\nCleaning up demo table ...")
vector_store.drop()
print("Cleanup complete.")
if __name__ == "__main__":
run_demo()
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-volcenginemysql/examples/volcengine_mysql_vector_store_demo.py",
"license": "MIT License",
"lines": 217,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-volcenginemysql/llama_index/vector_stores/volcengine_mysql/base.py | """
Volcengine RDS MySQL VectorStore integration for LlamaIndex.
"""
from __future__ import annotations
import json
import logging
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Sequence, Union
from urllib.parse import quote_plus
import sqlalchemy
from sqlalchemy.ext.asyncio import create_async_engine
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.schema import BaseNode, MetadataMode, TextNode
from llama_index.core.vector_stores.types import (
BasePydanticVectorStore,
FilterCondition,
FilterOperator,
MetadataFilter,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
from llama_index.core.vector_stores.utils import (
metadata_dict_to_node,
node_to_metadata_dict,
)
_logger = logging.getLogger(__name__)
@dataclass
class DBEmbeddingRow:
"""Internal helper struct representing a row fetched from the DB."""
node_id: str
text: str
metadata: Dict[str, Any]
similarity: float
class VolcengineMySQLVectorStore(BasePydanticVectorStore):
"""
Volcengine RDS MySQL Vector Store.
LlamaIndex vector store implementation backed by Volcengine RDS
MySQL with native vector index support (``VECTOR(N)`` + HNSW ANN).
Capabilities
~~~~~~~~~~~~
- Vector column: ``VECTOR(embed_dim)``.
- Vector index: ``VECTOR INDEX (embedding) USING HNSW`` or a vector
index with ``SECONDARY_ENGINE_ATTRIBUTE`` specifying algorithm,
``M``, and distance metric, for example::
SECONDARY_ENGINE_ATTRIBUTE='{"algorithm": "hnsw", "M": "16", "distance": "l2"}'
- Distance functions:
- ``L2_DISTANCE(embedding, TO_VECTOR('[...]'))``
- ``COSINE_DISTANCE(embedding, TO_VECTOR('[...]'))``
- Server parameters (depending on configuration):
- ``loose_vector_index_enabled``
- ``loose_hnsw_ef_search`` and other HNSW-related options.
Differences from :class:`MariaDBVectorStore`
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Uses MySQL ``VECTOR`` columns and ``TO_VECTOR``/``L2_DISTANCE``
functions instead of MariaDB's ``VECTOR(...)`` together with
``VEC_FromText``/``VEC_DISTANCE_COSINE``.
- Uses ``JSON_EXTRACT`` / ``JSON_UNQUOTE`` to filter on the metadata
JSON column.
- Optionally uses ``loose_hnsw_ef_search`` to control ANN search
breadth.
"""
# LlamaIndex protocol flags
stores_text: bool = True
flat_metadata: bool = False
# Pydantic model fields ( persisted configuration )
connection_string: str
connection_args: Dict[str, Any]
table_name: str
database: str
embed_dim: int
ann_index_algorithm: str
ann_index_distance: str
ann_m: int
ef_search: int
perform_setup: bool
debug: bool
# Runtime-only attributes
_engine: Any = PrivateAttr()
_aengine: Any = PrivateAttr()
_is_initialized: bool = PrivateAttr(default=False)
def __init__(
self,
connection_string: Union[str, sqlalchemy.engine.URL],
connection_args: Optional[Dict[str, Any]] = None,
table_name: str = "llamaindex",
database: Optional[str] = None,
embed_dim: int = 1536,
ann_index_algorithm: str = "hnsw",
ann_index_distance: str = "l2",
ann_m: int = 16,
ef_search: int = 20,
perform_setup: bool = True,
debug: bool = False,
) -> None:
"""
Constructor.
Args:
connection_string: SQLAlchemy/MySQL connection string, for
example ``mysql+pymysql://user:pwd@host:3306/database``.
connection_args: Extra connection arguments passed to
SQLAlchemy. For Volcengine RDS MySQL this typically
includes SSL options and read timeouts.
table_name: Name of the table used to store vectors. Defaults
to ``"llamaindex"``.
database: Name of the database/schema (for bookkeeping only;
the actual target is taken from the connection string).
embed_dim: Embedding dimension. Must match the upstream
embedding model dimension.
ann_index_algorithm: Vector index algorithm. RDS MySQL
currently supports ``"hnsw"``.
ann_index_distance: Distance metric, ``"l2"`` or
``"cosine"``.
ann_m: HNSW parameter ``M`` (maximum number of neighbors per
node). Affects recall and performance.
ef_search: HNSW ``ef_search`` parameter controlling search
breadth at query time.
perform_setup: If ``True``, perform basic capability checks
and create the table/index on initialization.
debug: If ``True``, enable SQLAlchemy SQL logging.
"""
super().__init__(
connection_string=str(connection_string),
connection_args=connection_args
or {
"ssl": {"ssl_mode": "PREFERRED"},
"read_timeout": 30,
},
table_name=table_name,
database=database or "",
embed_dim=embed_dim,
ann_index_algorithm=ann_index_algorithm.lower(),
ann_index_distance=ann_index_distance.lower(),
ann_m=ann_m,
ef_search=ef_search,
perform_setup=perform_setup,
debug=debug,
)
# Private attrs
self._engine = None
self._aengine = None
self._is_initialized = False
# ------------------------------------------------------------------
# LlamaIndex base metadata
# ------------------------------------------------------------------
@classmethod
def class_name(cls) -> str:
"""Return the vector store type name used by LlamaIndex."""
return "VolcengineMySQLVectorStore"
@property
def client(self) -> Any: # type: ignore[override]
"""Return the underlying SQLAlchemy engine (if initialized)."""
if not self._is_initialized:
return None
return self._engine
@property
def aclient(self) -> Any: # type: ignore[override]
"""Return the underlying Async SQLAlchemy engine (if initialized)."""
if not self._is_initialized:
return None
return self._aengine
def close(self) -> None:
"""Dispose the underlying SQLAlchemy engine."""
if not self._is_initialized:
return
assert self._engine is not None
self._engine.dispose()
self._is_initialized = False
async def aclose(self) -> None:
"""Dispose the underlying Async SQLAlchemy engine."""
if self._aengine is not None:
await self._aengine.dispose()
self._aengine = None
# ------------------------------------------------------------------
# Factory construction
# ------------------------------------------------------------------
@classmethod
def from_params(
cls,
host: Optional[str] = None,
port: Optional[int] = None,
database: Optional[str] = None,
user: Optional[str] = None,
password: Optional[str] = None,
table_name: str = "llamaindex",
connection_string: Optional[Union[str, sqlalchemy.engine.URL]] = None,
connection_args: Optional[Dict[str, Any]] = None,
embed_dim: int = 1536,
ann_index_algorithm: str = "hnsw",
ann_index_distance: str = "l2",
ann_m: int = 16,
ef_search: int = 20,
perform_setup: bool = True,
debug: bool = False,
) -> "VolcengineMySQLVectorStore":
"""
Construct a vector store from basic connection parameters.
Args:
host: Hostname of the Volcengine RDS MySQL instance.
port: Port of the MySQL instance (typically 3306).
database: Database/schema name.
user: Database username.
password: Database password.
table_name: Name of the table used to store vectors.
connection_string: Optional full SQLAlchemy connection string.
If provided, it takes precedence over ``host``/``user``/
``password``/``database``.
connection_args: Optional dict of extra SQLAlchemy connection
arguments.
embed_dim: Embedding dimension.
ann_index_algorithm: Vector index algorithm, typically
``"hnsw"``.
ann_index_distance: Distance metric, ``"l2"`` or
``"cosine"``.
ann_m: HNSW ``M`` parameter.
ef_search: HNSW ``ef_search`` parameter.
perform_setup: Whether to create the table/index and validate
configuration on initialization.
debug: Whether to emit SQL debug logs.
"""
if connection_string is None:
if not all([host, port, database, user]):
raise ValueError(
"host/port/database/user must all be provided, or pass a full connection_string instead."
)
password_safe = quote_plus(password or "")
connection_string = (
f"mysql+pymysql://{user}:{password_safe}@{host}:{port}/{database}"
)
return cls(
connection_string=connection_string,
connection_args=connection_args,
table_name=table_name,
database=database,
embed_dim=embed_dim,
ann_index_algorithm=ann_index_algorithm,
ann_index_distance=ann_index_distance,
ann_m=ann_m,
ef_search=ef_search,
perform_setup=perform_setup,
debug=debug,
)
# ------------------------------------------------------------------
# Internal initialization & DDL
# ------------------------------------------------------------------
def _connect(self) -> None:
"""Create SQLAlchemy engine."""
self._engine = sqlalchemy.create_engine(
self.connection_string,
connect_args=self.connection_args,
echo=self.debug,
)
def _aconnect(self) -> None:
"""Create Async SQLAlchemy engine."""
if self._aengine is not None:
return
# Prepare async connection string
# We replace 'pymysql' with 'aiomysql' if present
async_conn_str = self.connection_string.replace("pymysql", "aiomysql")
# aiomysql does not support 'read_timeout' which is commonly used in pymysql
# Filter out incompatible args
filtered_args = {
k: v for k, v in self.connection_args.items() if k != "read_timeout"
}
self._aengine = create_async_engine(
async_conn_str,
connect_args=filtered_args,
echo=self.debug,
)
def _validate_server_capability(self) -> None:
"""
Validate that the MySQL server supports Volcengine vector index.
The current implementation performs only a basic check:
- Run ``SHOW VARIABLES LIKE 'loose_vector_index_enabled'`` and
verify that the value is ``ON``.
- If the variable is missing or disabled, raise an error and ask
the user to enable it in the RDS console or parameter template.
This method can be extended to also inspect ``SELECT VERSION()``
and enforce a minimum server version if needed.
"""
assert self._engine is not None
with self._engine.connect() as connection:
# Check loose_vector_index_enabled
result = connection.execute(
sqlalchemy.text("SHOW VARIABLES LIKE :var"),
{"var": "loose_vector_index_enabled"},
)
row = result.fetchone()
if not row or str(row[1]).upper() != "ON":
raise ValueError(
"Volcengine MySQL vector index is not enabled: please set loose_vector_index_enabled to ON."
)
def _create_table_if_not_exists(self) -> None:
"""
Create table with a VECTOR column and HNSW vector index if needed.
Example schema::
CREATE TABLE IF NOT EXISTS `table_name` (
id BIGINT PRIMARY KEY AUTO_INCREMENT,
node_id VARCHAR(255) NOT NULL,
text LONGTEXT,
metadata JSON,
embedding VECTOR(1536) NOT NULL,
INDEX idx_node_id (node_id),
VECTOR INDEX idx_embedding (embedding)
SECONDARY_ENGINE_ATTRIBUTE='{"algorithm": "hnsw", "M": "16", "distance": "l2"}'
) ENGINE = InnoDB;
Notes
-----
- Vector indexes can typically only be created on empty tables.
It is therefore recommended to let this class create the table
*before* any data is written.
- If a user has already created the table without a vector
index, this method will **not** attempt to run
``ALTER TABLE ... ADD VECTOR INDEX`` on existing data in order
to avoid long locks or failures. In that case, please migrate
the data manually or create the correct schema ahead of time.
"""
assert self._engine is not None
sec_attr = (
"{" # Build JSON string for SECONDARY_ENGINE_ATTRIBUTE
f'"algorithm": "{self.ann_index_algorithm}", '
f'"M": "{self.ann_m}", '
f'"distance": "{self.ann_index_distance}"'
"}"
)
create_stmt = f"""
CREATE TABLE IF NOT EXISTS `{self.table_name}` (
id BIGINT PRIMARY KEY AUTO_INCREMENT,
node_id VARCHAR(255) NOT NULL,
text LONGTEXT,
metadata JSON,
embedding VECTOR({self.embed_dim}) NOT NULL,
INDEX idx_node_id (node_id),
VECTOR INDEX idx_embedding (embedding)
SECONDARY_ENGINE_ATTRIBUTE='{sec_attr}'
) ENGINE = InnoDB
"""
with self._engine.connect() as connection:
connection.execute(sqlalchemy.text(create_stmt))
connection.commit()
def _initialize(self) -> None:
"""Ensure engine is created and table is ready."""
if self._engine is None:
self._connect()
if self._is_initialized:
return
if self.perform_setup:
self._validate_server_capability()
self._create_table_if_not_exists()
self._is_initialized = True
async def _ainitialize(self) -> None:
"""Ensure async engine is created and table is ready."""
if self._aengine is None:
self._aconnect()
if self._is_initialized:
return
if self.perform_setup:
await self._avalidate_server_capability()
await self._acreate_table_if_not_exists()
self._is_initialized = True
async def _avalidate_server_capability(self) -> None:
"""Async version of _validate_server_capability."""
assert self._aengine is not None
async with self._aengine.connect() as connection:
result = await connection.execute(
sqlalchemy.text("SHOW VARIABLES LIKE :var"),
{"var": "loose_vector_index_enabled"},
)
row = result.fetchone()
if not row or str(row[1]).upper() != "ON":
raise ValueError(
"Volcengine MySQL vector index is not enabled: please set loose_vector_index_enabled to ON."
)
async def _acreate_table_if_not_exists(self) -> None:
"""Async version of _create_table_if_not_exists."""
assert self._aengine is not None
sec_attr = (
"{" # Build JSON string for SECONDARY_ENGINE_ATTRIBUTE
f'"algorithm": "{self.ann_index_algorithm}", '
f'"M": "{self.ann_m}", '
f'"distance": "{self.ann_index_distance}"'
"}"
)
create_stmt = f"""
CREATE TABLE IF NOT EXISTS `{self.table_name}` (
id BIGINT PRIMARY KEY AUTO_INCREMENT,
node_id VARCHAR(255) NOT NULL,
text LONGTEXT,
metadata JSON,
embedding VECTOR({self.embed_dim}) NOT NULL,
INDEX idx_node_id (node_id),
VECTOR INDEX idx_embedding (embedding)
SECONDARY_ENGINE_ATTRIBUTE='{sec_attr}'
) ENGINE = InnoDB
"""
async with self._aengine.connect() as connection:
await connection.execute(sqlalchemy.text(create_stmt))
await connection.commit()
# ------------------------------------------------------------------
# Helpers for (de)serializing nodes and filters
# ------------------------------------------------------------------
def _node_to_table_row(self, node: BaseNode) -> Dict[str, Any]:
"""Convert a BaseNode into a plain row dict ready for insertion."""
return {
"node_id": node.node_id,
"text": node.get_content(metadata_mode=MetadataMode.NONE),
"embedding": node.get_embedding(),
"metadata": node_to_metadata_dict(
node,
remove_text=True,
flat_metadata=self.flat_metadata,
),
}
def _to_mysql_operator(self, operator: FilterOperator) -> str:
"""Map LlamaIndex FilterOperator to SQL operator string."""
if operator == FilterOperator.EQ:
return "="
if operator == FilterOperator.GT:
return ">"
if operator == FilterOperator.LT:
return "<"
if operator == FilterOperator.NE:
return "!="
if operator == FilterOperator.GTE:
return ">="
if operator == FilterOperator.LTE:
return "<="
if operator == FilterOperator.IN:
return "IN"
if operator == FilterOperator.NIN:
return "NOT IN"
_logger.warning("Unsupported operator: %s, fallback to '='", operator)
return "="
def _build_filter_clause(
self,
filter_: MetadataFilter,
params: Dict[str, Any],
param_counter: List[int],
) -> str:
"""
Build a single metadata filter expression for the JSON column.
Rules:
- For string values use ``JSON_UNQUOTE(JSON_EXTRACT(...))`` in
comparisons.
- For numeric values compare the result of ``JSON_EXTRACT(...)``
directly.
- For ``IN``/``NIN`` operators build a ``(v1, v2, ...)`` value
list.
"""
key_expr = f"JSON_EXTRACT(metadata, '$.{filter_.key}')"
value = filter_.value
if filter_.operator in [FilterOperator.IN, FilterOperator.NIN]:
assert isinstance(value, list), (
"The value for an IN/NIN filter must be a list"
)
param_keys: List[str] = []
for v in value:
param_name = f"filter_param_{param_counter[0]}"
param_counter[0] += 1
# For IN/NIN, we always compare as strings after JSON_UNQUOTE
if isinstance(v, str):
params[param_name] = v
else:
params[param_name] = str(v)
param_keys.append(f":{param_name}")
filter_value = f"({', '.join(param_keys)})"
return f"JSON_UNQUOTE({key_expr}) {self._to_mysql_operator(filter_.operator)} {filter_value}"
# Scalar comparison
param_name = f"filter_param_{param_counter[0]}"
param_counter[0] += 1
params[param_name] = value
if isinstance(value, str):
expr = f"JSON_UNQUOTE({key_expr}) {self._to_mysql_operator(filter_.operator)} :{param_name}"
else:
# For numeric or other non-string values, compare the JSON_EXTRACT
# result directly.
expr = (
f"{key_expr} {self._to_mysql_operator(filter_.operator)} :{param_name}"
)
return expr
def _filters_to_where_clause(
self,
filters: MetadataFilters,
params: Dict[str, Any],
param_counter: List[int],
) -> str:
"""Convert MetadataFilters tree into a SQL WHERE clause (without 'WHERE')."""
conditions_map = {
FilterCondition.OR: "OR",
FilterCondition.AND: "AND",
}
if filters.condition not in conditions_map:
raise ValueError(
f"Unsupported condition: {filters.condition}. "
f"Must be one of {list(conditions_map.keys())}"
)
clauses: List[str] = []
for f in filters.filters:
if isinstance(f, MetadataFilter):
clauses.append(self._build_filter_clause(f, params, param_counter))
elif isinstance(f, MetadataFilters):
sub = self._filters_to_where_clause(f, params, param_counter)
if sub:
clauses.append(f"({sub})")
else:
raise ValueError(
"Unsupported filter type: {type(f)}. Must be one of "
f"MetadataFilter, MetadataFilters"
)
return f" {conditions_map[filters.condition]} ".join(clauses)
def _db_rows_to_query_result(
self, rows: List[DBEmbeddingRow]
) -> VectorStoreQueryResult:
"""Convert internal DB rows to LlamaIndex VectorStoreQueryResult."""
nodes: List[BaseNode] = []
similarities: List[float] = []
ids: List[str] = []
for r in rows:
metadata = r.metadata or {}
# If the metadata contains the special fields used by
# `metadata_dict_to_node`, reconstruct the original node.
# Otherwise, fall back to a plain TextNode so that we can still
# return meaningful results when only custom metadata is stored.
if isinstance(metadata, dict) and metadata.get("_node_content") is not None:
node = metadata_dict_to_node(metadata)
node.set_content(str(r.text))
else:
node = TextNode(text=str(r.text), id_=r.node_id, metadata=metadata)
nodes.append(node)
ids.append(r.node_id)
similarities.append(r.similarity)
return VectorStoreQueryResult(nodes=nodes, similarities=similarities, ids=ids)
# ------------------------------------------------------------------
# Public API: get_nodes / add / delete / query
# ------------------------------------------------------------------
def get_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
) -> List[BaseNode]: # type: ignore[override]
"""
Get nodes by ``node_ids``.
Note:
The current implementation only supports exact lookup by
``node_ids`` and ignores the ``filters`` argument.
"""
self._initialize()
if not node_ids:
return []
# Use bind parameters for the IN clause
stmt_str = (
f"SELECT text, metadata FROM `{self.table_name}` WHERE node_id IN :node_ids"
)
stmt = sqlalchemy.text(stmt_str).bindparams(
sqlalchemy.bindparam("node_ids", expanding=True)
)
assert self._engine is not None
with self._engine.connect() as connection:
result = connection.execute(stmt, {"node_ids": node_ids})
nodes: List[BaseNode] = []
for item in result:
raw_meta = item.metadata
metadata = json.loads(raw_meta) if isinstance(raw_meta, str) else raw_meta
if isinstance(metadata, dict) and metadata.get("_node_content") is not None:
node = metadata_dict_to_node(metadata)
node.set_content(str(item.text))
else:
node = TextNode(text=str(item.text), metadata=metadata or {})
nodes.append(node)
return nodes
def add(
self,
nodes: Sequence[BaseNode],
**kwargs: Any,
) -> List[str]: # type: ignore[override]
"""
Add nodes with embeddings into the MySQL vector store.
Expectations:
- Each :class:`BaseNode` in ``nodes`` must already contain an
``embedding`` (normally computed by the index or embedding
model upstream).
- The embedding is serialized as a JSON array string and passed
to ``TO_VECTOR(:embedding)`` when inserting into the
``VECTOR`` column.
- Rows are inserted in batch using ``executemany`` semantics to
reduce round trips.
"""
self._initialize()
if not nodes:
return []
ids: List[str] = []
rows: List[Dict[str, Any]] = []
for node in nodes:
ids.append(node.node_id)
item = self._node_to_table_row(node)
rows.append(
{
"node_id": item["node_id"],
"text": item["text"],
# TO_VECTOR expects a string like "[1.0,2.0,...]"
"embedding": json.dumps(item["embedding"]),
"metadata": json.dumps(item["metadata"]),
}
)
insert_stmt = sqlalchemy.text(
f"""
INSERT INTO `{self.table_name}` (node_id, text, embedding, metadata)
VALUES (:node_id, :text, TO_VECTOR(:embedding), :metadata)
"""
)
assert self._engine is not None
with self._engine.connect() as connection:
connection.execute(insert_stmt, rows)
connection.commit()
return ids
async def async_add( # type: ignore[override]
self,
nodes: Sequence[BaseNode],
**kwargs: Any,
) -> List[str]:
"""
Add nodes with embeddings into the MySQL vector store asynchronously.
"""
await self._ainitialize()
if not nodes:
return []
ids: List[str] = []
rows: List[Dict[str, Any]] = []
for node in nodes:
ids.append(node.node_id)
item = self._node_to_table_row(node)
rows.append(
{
"node_id": item["node_id"],
"text": item["text"],
# TO_VECTOR expects a string like "[1.0,2.0,...]"
"embedding": json.dumps(item["embedding"]),
"metadata": json.dumps(item["metadata"]),
}
)
insert_stmt = sqlalchemy.text(
f"""
INSERT INTO `{self.table_name}` (node_id, text, embedding, metadata)
VALUES (:node_id, :text, TO_VECTOR(:embedding), :metadata)
"""
)
async with self._aengine.connect() as connection:
await connection.execute(insert_stmt, rows)
await connection.commit()
return ids
def delete(
self,
ref_doc_id: str,
**delete_kwargs: Any,
) -> None: # type: ignore[override]
"""Delete all nodes whose metadata.ref_doc_id equals the given value."""
self._initialize()
if not ref_doc_id:
return
stmt = sqlalchemy.text(
f"""
DELETE FROM `{self.table_name}`
WHERE JSON_EXTRACT(metadata, '$.ref_doc_id') = :doc_id
"""
)
assert self._engine is not None
with self._engine.connect() as connection:
connection.execute(stmt, {"doc_id": ref_doc_id})
connection.commit()
async def adelete( # type: ignore[override]
self,
ref_doc_id: str,
**delete_kwargs: Any,
) -> None:
"""Async wrapper around :meth:`delete`."""
await self._ainitialize()
if not ref_doc_id:
return
stmt = sqlalchemy.text(
f"""
DELETE FROM `{self.table_name}`
WHERE JSON_EXTRACT(metadata, '$.ref_doc_id') = :doc_id
"""
)
async with self._aengine.connect() as connection:
await connection.execute(stmt, {"doc_id": ref_doc_id})
await connection.commit()
def delete_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
**delete_kwargs: Any,
) -> None: # type: ignore[override]
"""
Delete nodes by ``node_ids``.
Note:
The current implementation only supports deletion by
``node_ids`` and ignores ``filters``.
"""
self._initialize()
if not node_ids:
return
stmt_str = f"DELETE FROM `{self.table_name}` WHERE node_id IN :node_ids"
stmt = sqlalchemy.text(stmt_str).bindparams(
sqlalchemy.bindparam("node_ids", expanding=True)
)
assert self._engine is not None
with self._engine.connect() as connection:
connection.execute(stmt, {"node_ids": node_ids})
connection.commit()
async def adelete_nodes( # type: ignore[override]
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
**delete_kwargs: Any,
) -> None:
"""Async wrapper around :meth:`delete_nodes`."""
await self._ainitialize()
if not node_ids:
return
stmt_str = f"DELETE FROM `{self.table_name}` WHERE node_id IN :node_ids"
stmt = sqlalchemy.text(stmt_str).bindparams(
sqlalchemy.bindparam("node_ids", expanding=True)
)
async with self._aengine.connect() as connection:
await connection.execute(stmt, {"node_ids": node_ids})
await connection.commit()
def count(self) -> int:
"""Return total number of rows in the table."""
self._initialize()
stmt = sqlalchemy.text(f"SELECT COUNT(*) FROM `{self.table_name}`")
assert self._engine is not None
with self._engine.connect() as connection:
result = connection.execute(stmt)
value = result.scalar()
return int(value or 0)
def drop(self) -> None:
"""Drop the underlying table and dispose the engine."""
self._initialize()
stmt = sqlalchemy.text(f"DROP TABLE IF EXISTS `{self.table_name}`")
assert self._engine is not None
with self._engine.connect() as connection:
connection.execute(stmt)
connection.commit()
self.close()
def clear(self) -> None: # type: ignore[override]
"""Delete all rows from the table (keep schema & indexes)."""
self._initialize()
stmt = sqlalchemy.text(f"DELETE FROM `{self.table_name}`")
assert self._engine is not None
with self._engine.connect() as connection:
connection.execute(stmt)
connection.commit()
async def aclear(self) -> None: # type: ignore[override]
"""Async wrapper around :meth:`clear`."""
await self._ainitialize()
stmt = sqlalchemy.text(f"DELETE FROM `{self.table_name}`")
async with self._aengine.connect() as connection:
await connection.execute(stmt)
await connection.commit()
def _build_distance_expression(self) -> str:
"""
Return the SQL distance expression template used in ORDER BY.
The returned string uses a named bind parameter ``:query_embedding``
(serialized JSON array string) and the ``embedding`` column.
"""
if self.ann_index_distance == "cosine":
func_name = "COSINE_DISTANCE"
else:
# Default to L2 distance
func_name = "L2_DISTANCE"
return f"{func_name}(embedding, TO_VECTOR(:query_embedding))"
def query( # type: ignore[override]
self,
query: VectorStoreQuery,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""
Execute a vector similarity search on Volcengine RDS MySQL.
- Only ``VectorStoreQueryMode.DEFAULT`` is supported.
- The database-side vector index and distance functions are used
to perform ANN/KNN search.
- :class:`MetadataFilters` are translated into a ``WHERE``
clause over the JSON ``metadata`` column.
- Returned similarities are computed as ``1 / (1 + distance)``.
"""
if query.mode != VectorStoreQueryMode.DEFAULT:
raise NotImplementedError(f"Query mode {query.mode} not available.")
if query.query_embedding is None:
raise ValueError(
"VolcengineMySQLVectorStore only supports embedding-based queries; query_embedding must be provided"
)
self._initialize()
distance_expr = self._build_distance_expression()
base_stmt = f"""
SELECT
node_id,
text,
metadata,
{distance_expr} AS distance
FROM `{self.table_name}`
"""
# Metadata filters
params = {
"query_embedding": json.dumps(query.query_embedding),
"limit": int(query.similarity_top_k),
}
if query.filters is not None:
param_counter = [0]
where_clause = self._filters_to_where_clause(
query.filters, params, param_counter
)
if where_clause:
base_stmt += f" WHERE {where_clause}"
base_stmt += " ORDER BY distance LIMIT :limit"
rows: List[DBEmbeddingRow] = []
assert self._engine is not None
with self._engine.connect() as connection:
# Optionally set ef_search, which affects recall and latency
if self.ef_search:
try:
connection.execute(
sqlalchemy.text(
"SET SESSION loose_hnsw_ef_search = :ef_search"
),
{"ef_search": int(self.ef_search)},
)
except Exception: # pragma: no cover - tolerate cases where the parameter does not exist
_logger.warning(
"Failed to set loose_hnsw_ef_search, continue without it.",
exc_info=True,
)
result = connection.execute(sqlalchemy.text(base_stmt), params)
for item in result:
raw_meta = item.metadata
metadata = (
json.loads(raw_meta) if isinstance(raw_meta, str) else raw_meta
)
distance = float(item.distance) if item.distance is not None else 0.0
similarity = 1.0 / (1.0 + distance)
rows.append(
DBEmbeddingRow(
node_id=item.node_id,
text=item.text,
metadata=metadata,
similarity=similarity,
)
)
return self._db_rows_to_query_result(rows)
async def aquery( # type: ignore[override]
self,
query: VectorStoreQuery,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Async wrapper around :meth:`query`."""
if query.mode != VectorStoreQueryMode.DEFAULT:
raise NotImplementedError(f"Query mode {query.mode} not available.")
if query.query_embedding is None:
raise ValueError(
"VolcengineMySQLVectorStore only supports embedding-based queries; query_embedding must be provided"
)
await self._ainitialize()
distance_expr = self._build_distance_expression()
base_stmt = f"""
SELECT
node_id,
text,
metadata,
{distance_expr} AS distance
FROM `{self.table_name}`
"""
# Metadata filters
params = {
"query_embedding": json.dumps(query.query_embedding),
"limit": int(query.similarity_top_k),
}
if query.filters is not None:
param_counter = [0]
where_clause = self._filters_to_where_clause(
query.filters, params, param_counter
)
if where_clause:
base_stmt += f" WHERE {where_clause}"
base_stmt += " ORDER BY distance LIMIT :limit"
rows: List[DBEmbeddingRow] = []
async with self._aengine.connect() as connection:
# Optionally set ef_search, which affects recall and latency
if self.ef_search:
try:
await connection.execute(
sqlalchemy.text(
"SET SESSION loose_hnsw_ef_search = :ef_search"
),
{"ef_search": int(self.ef_search)},
)
except Exception:
_logger.warning(
"Failed to set loose_hnsw_ef_search, continue without it.",
exc_info=True,
)
result = await connection.execute(sqlalchemy.text(base_stmt), params)
for item in result:
raw_meta = item.metadata
metadata = (
json.loads(raw_meta) if isinstance(raw_meta, str) else raw_meta
)
distance = float(item.distance) if item.distance is not None else 0.0
similarity = 1.0 / (1.0 + distance)
rows.append(
DBEmbeddingRow(
node_id=item.node_id,
text=item.text,
metadata=metadata,
similarity=similarity,
)
)
return self._db_rows_to_query_result(rows)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-volcenginemysql/llama_index/vector_stores/volcengine_mysql/base.py",
"license": "MIT License",
"lines": 918,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-volcenginemysql/tests/test_vector_stores_volcenginemysql.py | """
Tests for `VolcengineMySQLVectorStore`.
These tests are intentionally written as "SQL-construction" unit tests:
- No real database is contacted.
- We patch `sqlalchemy.create_engine()` and assert that the resulting SQLAlchemy
statements contain the expected clauses.
This keeps the suite deterministic and runnable in CI without provisioning a
Volcengine MySQL instance.
"""
import unittest
from unittest.mock import AsyncMock, MagicMock, Mock, patch
import pytest
from llama_index.core.schema import TextNode
from llama_index.core.vector_stores.types import (
FilterOperator,
MetadataFilter,
MetadataFilters,
VectorStoreQuery,
)
from llama_index.vector_stores.volcengine_mysql import VolcengineMySQLVectorStore
# A syntactically-valid connection string used across unit tests. We never
# connect to the target; `sqlalchemy.create_engine` is always patched.
_TEST_CONN_STR = "mysql+pymysql://user:pass@localhost:3306/db"
def test_vector_store_reports_class_name() -> None:
"""`class_name()` is used for registry / serialization roundtrips."""
assert VolcengineMySQLVectorStore.class_name() == "VolcengineMySQLVectorStore"
def test_client_property_tracks_engine_lifecycle() -> None:
"""`client` should be non-None after initialization and cleared on close()."""
with patch("sqlalchemy.create_engine") as mock_create_engine:
with patch.object(VolcengineMySQLVectorStore, "_validate_server_capability"):
with patch.object(
VolcengineMySQLVectorStore, "_create_table_if_not_exists"
):
store = VolcengineMySQLVectorStore(
connection_string=_TEST_CONN_STR,
perform_setup=True,
)
# Initially None because initialization is lazy
assert store.client is None
# Trigger initialization
store._initialize()
assert store.client is not None
store.close()
assert store.client is None
def test_constructor_with_setup_runs_validation_and_table_creation() -> None:
"""When setup is requested, the store validates server features and creates tables."""
with patch("sqlalchemy.create_engine") as mock_create_engine:
with patch.object(
VolcengineMySQLVectorStore, "_validate_server_capability"
) as mock_validate:
with patch.object(
VolcengineMySQLVectorStore, "_create_table_if_not_exists"
) as mock_create_table:
store = VolcengineMySQLVectorStore(
connection_string=_TEST_CONN_STR,
perform_setup=True,
)
# Initialize to trigger calls
store._initialize()
mock_create_engine.assert_called_once()
mock_validate.assert_called_once()
mock_create_table.assert_called_once()
assert store._is_initialized is True
def test_constructor_without_setup_skips_validation_and_ddl() -> None:
"""When setup is disabled, we still create an engine but skip DDL and validation."""
with patch("sqlalchemy.create_engine") as mock_create_engine:
with patch.object(
VolcengineMySQLVectorStore, "_validate_server_capability"
) as mock_validate:
with patch.object(
VolcengineMySQLVectorStore, "_create_table_if_not_exists"
) as mock_create_table:
store = VolcengineMySQLVectorStore(
connection_string=_TEST_CONN_STR,
perform_setup=False,
)
# Initialize to trigger calls
store._initialize()
# Connection is established but setup steps are skipped
mock_create_engine.assert_called_once()
mock_validate.assert_not_called()
mock_create_table.assert_not_called()
assert store._is_initialized is True
def test_from_params_builds_connection_string_and_forwards_kwargs() -> None:
"""`from_params()` should synthesize a MySQL connection string and pass through options."""
with patch.object(
VolcengineMySQLVectorStore, "__init__", return_value=None
) as mock_init:
VolcengineMySQLVectorStore.from_params(
host="localhost",
port=3306,
user="user",
password="password",
database="db",
table_name="custom_table",
)
mock_init.assert_called_once()
call_kwargs = mock_init.call_args[1]
assert (
"mysql+pymysql://user:password@localhost:3306/db"
in call_kwargs["connection_string"]
)
assert call_kwargs["table_name"] == "custom_table"
def test_validate_server_capability_accepts_enabled_flag_and_rejects_disabled() -> None:
"""The feature flag `loose_vector_index_enabled` must be ON for vector search."""
mock_engine = MagicMock()
mock_connection = MagicMock()
mock_result = MagicMock()
# Successful case
mock_result.fetchone.return_value = ["loose_vector_index_enabled", "ON"]
mock_connection.execute.return_value = mock_result
mock_engine.connect.return_value.__enter__.return_value = mock_connection
with patch("sqlalchemy.create_engine", return_value=mock_engine):
store = VolcengineMySQLVectorStore(
connection_string=_TEST_CONN_STR,
perform_setup=False,
)
store._engine = mock_engine
store._validate_server_capability()
# Failure case
mock_result.fetchone.return_value = ["loose_vector_index_enabled", "OFF"]
with patch("sqlalchemy.create_engine", return_value=mock_engine):
store = VolcengineMySQLVectorStore(
connection_string=_TEST_CONN_STR,
perform_setup=False,
)
store._engine = mock_engine
with pytest.raises(
ValueError, match="Volcengine MySQL vector index is not enabled"
):
store._validate_server_capability()
def test_create_table_if_missing_emits_vector_index_ddl() -> None:
"""The DDL should include a vector column and the corresponding VECTOR INDEX."""
mock_engine = MagicMock()
mock_connection = MagicMock()
mock_engine.connect.return_value.__enter__.return_value = mock_connection
with patch("sqlalchemy.create_engine", return_value=mock_engine):
store = VolcengineMySQLVectorStore(
connection_string=_TEST_CONN_STR,
table_name="test_table",
perform_setup=False,
)
store._engine = mock_engine
store._create_table_if_not_exists()
mock_connection.execute.assert_called_once()
stmt = mock_connection.execute.call_args[0][0]
assert "CREATE TABLE IF NOT EXISTS `test_table`" in str(stmt)
assert "VECTOR INDEX idx_embedding (embedding)" in str(stmt)
def test_add_inserts_rows_and_returns_node_ids() -> None:
"""`add()` should batch-insert rows and return the IDs in the same order."""
mock_engine = MagicMock()
mock_connection = MagicMock()
mock_engine.connect.return_value.__enter__.return_value = mock_connection
with patch("sqlalchemy.create_engine", return_value=mock_engine):
store = VolcengineMySQLVectorStore(
connection_string=_TEST_CONN_STR,
perform_setup=False,
)
# In production these are produced by an index / embedding model. In the unit
# test we provide fixed vectors so we can assert on the SQL payload.
nodes = [
TextNode(
text="test text 1",
id_="id1",
embedding=[1.0, 2.0],
metadata={"key": "val1"},
),
TextNode(
text="test text 2",
id_="id2",
embedding=[3.0, 4.0],
metadata={"key": "val2"},
),
]
ids = store.add(nodes)
assert ids == ["id1", "id2"]
mock_connection.execute.assert_called_once()
# Verify call args
args = mock_connection.execute.call_args
assert "INSERT INTO `llamaindex`" in str(args[0][0])
assert len(args[0][1]) == 2 # 2 rows
def test_delete_by_ref_doc_id_builds_expected_where_clause() -> None:
"""`delete()` removes rows by `ref_doc_id` stored inside the JSON metadata."""
mock_engine = MagicMock()
mock_connection = MagicMock()
mock_engine.connect.return_value.__enter__.return_value = mock_connection
with patch("sqlalchemy.create_engine", return_value=mock_engine):
store = VolcengineMySQLVectorStore(
connection_string=_TEST_CONN_STR,
perform_setup=False,
)
store.delete(ref_doc_id="ref_id_1")
mock_connection.execute.assert_called_once()
stmt = mock_connection.execute.call_args[0][0]
assert "DELETE FROM `llamaindex`" in str(stmt)
assert "JSON_EXTRACT(metadata, '$.ref_doc_id') = :doc_id" in str(stmt)
def test_delete_nodes_uses_in_clause_for_node_ids() -> None:
"""`delete_nodes()` should translate a list of IDs into a SQL IN clause."""
mock_engine = MagicMock()
mock_connection = MagicMock()
mock_engine.connect.return_value.__enter__.return_value = mock_connection
with patch("sqlalchemy.create_engine", return_value=mock_engine):
store = VolcengineMySQLVectorStore(
connection_string=_TEST_CONN_STR,
perform_setup=False,
)
store.delete_nodes(node_ids=["id1", "id2"])
mock_connection.execute.assert_called_once()
stmt = mock_connection.execute.call_args[0][0]
assert "DELETE FROM `llamaindex`" in str(stmt)
# Note: SQLAlchemy expands IN parameters into a special
# "__[POSTCOMPILE_...]" placeholder. We assert on the existence of the
# IN clause rather than matching the parameter token.
assert "WHERE node_id IN" in str(stmt)
def test_query_returns_scored_nodes_from_distance() -> None:
"""`query()` should return nodes + similarities derived from the distance metric."""
mock_engine = MagicMock()
mock_connection = MagicMock()
mock_result = MagicMock()
# The vector store reads columns from SQLAlchemy result rows by attribute.
# We model a single returned row to keep the unit test focused.
row = Mock()
row.node_id = "id1"
row.text = "text1"
row.metadata = '{"key": "val"}'
row.distance = 0.1
mock_result.__iter__.return_value = [row]
mock_connection.execute.return_value = mock_result
mock_engine.connect.return_value.__enter__.return_value = mock_connection
with patch("sqlalchemy.create_engine", return_value=mock_engine):
store = VolcengineMySQLVectorStore(
connection_string=_TEST_CONN_STR,
perform_setup=False,
)
query = VectorStoreQuery(
query_embedding=[1.0, 2.0],
similarity_top_k=2,
)
result = store.query(query)
assert len(result.nodes) == 1
assert result.nodes[0].node_id == "id1"
# Implementation detail: similarity is derived from distance.
# Similarity = 1 / (1 + distance) = 1 / 1.1 ~= 0.909
assert pytest.approx(result.similarities[0], 0.001) == 0.909
mock_connection.execute.assert_called()
# Verify query structure
stmt = str(mock_connection.execute.call_args_list[-1][0][0])
assert "SELECT" in stmt
assert "L2_DISTANCE" in stmt # Default is l2
assert "ORDER BY distance LIMIT :limit" in stmt
def test_query_applies_metadata_filters_as_json_extract_predicates() -> None:
"""Metadata filters are stored in a JSON column and translated into predicates."""
mock_engine = MagicMock()
mock_connection = MagicMock()
mock_result = MagicMock()
mock_result.__iter__.return_value = []
mock_connection.execute.return_value = mock_result
mock_engine.connect.return_value.__enter__.return_value = mock_connection
with patch("sqlalchemy.create_engine", return_value=mock_engine):
store = VolcengineMySQLVectorStore(
connection_string=_TEST_CONN_STR,
perform_setup=False,
)
# Use a mix of string and numeric filters to exercise both quoting and
# numeric comparison paths.
filters = MetadataFilters(
filters=[
MetadataFilter(key="key1", value="val1", operator=FilterOperator.EQ),
MetadataFilter(key="key2", value=10, operator=FilterOperator.GT),
]
)
query = VectorStoreQuery(
query_embedding=[1.0, 2.0],
filters=filters,
)
store.query(query)
stmt = str(mock_connection.execute.call_args_list[-1][0][0])
assert "WHERE" in stmt
assert (
"JSON_UNQUOTE(JSON_EXTRACT(metadata, '$.key1')) = :filter_param_0" in stmt
)
assert "JSON_EXTRACT(metadata, '$.key2') > :filter_param_1" in stmt
def test_get_nodes_fetches_text_and_metadata_for_ids() -> None:
"""`get_nodes()` should fetch the stored payload for the requested IDs."""
mock_engine = MagicMock()
mock_connection = MagicMock()
mock_result = MagicMock()
row = Mock()
row.text = "text1"
row.metadata = '{"key": "val"}'
mock_result.__iter__.return_value = [row]
mock_connection.execute.return_value = mock_result
mock_engine.connect.return_value.__enter__.return_value = mock_connection
with patch("sqlalchemy.create_engine", return_value=mock_engine):
store = VolcengineMySQLVectorStore(
connection_string=_TEST_CONN_STR,
perform_setup=False,
)
nodes = store.get_nodes(node_ids=["id1"])
assert len(nodes) == 1
assert nodes[0].text == "text1"
stmt = str(mock_connection.execute.call_args[0][0])
# Similar to delete_nodes(), SQLAlchemy rewrites the IN parameter
# placeholder, so we avoid matching the exact token.
assert "SELECT text, metadata FROM `llamaindex`" in stmt
assert "WHERE node_id IN" in stmt
def test_clear_deletes_all_rows_from_table() -> None:
"""`clear()` is a convenience that wipes all entries (but keeps the table)."""
mock_engine = MagicMock()
mock_connection = MagicMock()
mock_engine.connect.return_value.__enter__.return_value = mock_connection
with patch("sqlalchemy.create_engine", return_value=mock_engine):
store = VolcengineMySQLVectorStore(
connection_string=_TEST_CONN_STR,
perform_setup=False,
)
store.clear()
stmt = str(mock_connection.execute.call_args[0][0])
assert "DELETE FROM `llamaindex`" in stmt
def test_drop_removes_table_and_disposes_engine() -> None:
"""`drop()` should issue DROP TABLE and release engine resources."""
mock_engine = MagicMock()
mock_connection = MagicMock()
mock_engine.connect.return_value.__enter__.return_value = mock_connection
with patch("sqlalchemy.create_engine", return_value=mock_engine):
store = VolcengineMySQLVectorStore(
connection_string=_TEST_CONN_STR,
perform_setup=False,
)
store.drop()
stmt = str(mock_connection.execute.call_args[0][0])
assert "DROP TABLE IF EXISTS `llamaindex`" in stmt
# Verify engine was disposed
mock_engine.dispose.assert_called_once()
class TestAsyncVolcengineMySQLVectorStore(unittest.IsolatedAsyncioTestCase):
async def test_async_add_inserts_rows_and_returns_node_ids(self) -> None:
"""`async_add()` should batch-insert rows and return the IDs using async engine."""
mock_engine = MagicMock()
mock_connection = AsyncMock()
# Mock engine.connect() context manager
mock_engine.connect.return_value.__aenter__.return_value = mock_connection
mock_engine.connect.return_value.__aexit__.return_value = None
with patch(
"llama_index.vector_stores.volcengine_mysql.base.create_async_engine",
return_value=mock_engine,
) as mock_create:
store = VolcengineMySQLVectorStore(
connection_string=_TEST_CONN_STR,
perform_setup=False,
)
nodes = [
TextNode(
text="test text 1",
id_="id1",
embedding=[1.0, 2.0],
metadata={"key": "val1"},
)
]
ids = await store.async_add(nodes)
# Check async engine creation
mock_create.assert_called_once()
expected_conn_str = _TEST_CONN_STR.replace("pymysql", "aiomysql")
assert mock_create.call_args[0][0] == expected_conn_str
assert ids == ["id1"]
mock_connection.execute.assert_awaited_once()
mock_connection.commit.assert_awaited_once()
args = mock_connection.execute.call_args
assert "INSERT INTO `llamaindex`" in str(args[0][0])
async def test_adelete_by_ref_doc_id_uses_async_execution(self) -> None:
"""`adelete()` removes rows by `ref_doc_id` using async engine."""
mock_engine = MagicMock()
mock_connection = AsyncMock()
mock_engine.connect.return_value.__aenter__.return_value = mock_connection
mock_engine.connect.return_value.__aexit__.return_value = None
with patch(
"llama_index.vector_stores.volcengine_mysql.base.create_async_engine",
return_value=mock_engine,
):
store = VolcengineMySQLVectorStore(
connection_string=_TEST_CONN_STR,
perform_setup=False,
)
await store.adelete(ref_doc_id="ref_id_1")
mock_connection.execute.assert_awaited_once()
mock_connection.commit.assert_awaited_once()
stmt = mock_connection.execute.call_args[0][0]
assert "DELETE FROM `llamaindex`" in str(stmt)
assert "JSON_EXTRACT(metadata, '$.ref_doc_id') = :doc_id" in str(stmt)
async def test_adelete_nodes_uses_in_clause_async(self) -> None:
"""`adelete_nodes()` should use IN clause and async execution."""
mock_engine = MagicMock()
mock_connection = AsyncMock()
mock_engine.connect.return_value.__aenter__.return_value = mock_connection
mock_engine.connect.return_value.__aexit__.return_value = None
with patch(
"llama_index.vector_stores.volcengine_mysql.base.create_async_engine",
return_value=mock_engine,
):
store = VolcengineMySQLVectorStore(
connection_string=_TEST_CONN_STR,
perform_setup=False,
)
await store.adelete_nodes(node_ids=["id1", "id2"])
mock_connection.execute.assert_awaited_once()
stmt = mock_connection.execute.call_args[0][0]
assert "DELETE FROM `llamaindex`" in str(stmt)
assert "WHERE node_id IN" in str(stmt)
async def test_aclear_deletes_all_rows_async(self) -> None:
"""`aclear()` deletes all rows using async engine."""
mock_engine = MagicMock()
mock_connection = AsyncMock()
mock_engine.connect.return_value.__aenter__.return_value = mock_connection
mock_engine.connect.return_value.__aexit__.return_value = None
with patch(
"llama_index.vector_stores.volcengine_mysql.base.create_async_engine",
return_value=mock_engine,
):
store = VolcengineMySQLVectorStore(
connection_string=_TEST_CONN_STR,
perform_setup=False,
)
await store.aclear()
mock_connection.execute.assert_awaited_once()
stmt = mock_connection.execute.call_args[0][0]
assert "DELETE FROM `llamaindex`" in str(stmt)
async def test_aquery_returns_scored_nodes_async(self) -> None:
"""`aquery()` should return nodes + similarities using async engine."""
mock_engine = MagicMock()
mock_connection = AsyncMock()
mock_result = MagicMock()
# Mock result iteration for async
row = Mock()
row.node_id = "id1"
row.text = "text1"
row.metadata = '{"key": "val"}'
row.distance = 0.1
# Async result is typically iterable, but here we mock the execute return value
# The code does `result = await connection.execute(...)` and then `for item in result:`
# Standard SQLAlchemy AsyncResult is synchronous iterable after await.
mock_result.__iter__.return_value = [row]
mock_connection.execute.return_value = mock_result
mock_engine.connect.return_value.__aenter__.return_value = mock_connection
mock_engine.connect.return_value.__aexit__.return_value = None
with patch(
"llama_index.vector_stores.volcengine_mysql.base.create_async_engine",
return_value=mock_engine,
):
store = VolcengineMySQLVectorStore(
connection_string=_TEST_CONN_STR,
perform_setup=False,
)
query = VectorStoreQuery(
query_embedding=[1.0, 2.0],
similarity_top_k=2,
)
result = await store.aquery(query)
assert len(result.nodes) == 1
assert result.nodes[0].node_id == "id1"
assert pytest.approx(result.similarities[0], 0.001) == 0.909
# We expect at least one execute call (query).
# We might have an optional SET SESSION call if ef_search is set (default 20).
assert mock_connection.execute.await_count >= 1
# Verify the SELECT query
stmt = str(mock_connection.execute.call_args_list[-1][0][0])
assert "SELECT" in stmt
assert "L2_DISTANCE" in stmt
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-volcenginemysql/tests/test_vector_stores_volcenginemysql.py",
"license": "MIT License",
"lines": 466,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-datasets/llama_index/readers/datasets/base.py | """Datasets reader."""
from typing import List, Optional, Any, Iterable, Dict, Union
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from datasets import Dataset, IterableDataset, load_dataset, Split
class DatasetsReader(BaseReader):
"""
Datasets reader.
Load HuggingFace datasets as documents.
"""
@staticmethod
def _make_document(
sample: Dict[str, Any],
doc_id_key: Optional[str] = None,
text_key: Optional[str] = None,
) -> Document:
kwargs = {"metadata": sample}
if doc_id_key:
if doc_id_key not in sample:
msg = f"Document id key '{doc_id_key}' not found."
raise KeyError(msg)
kwargs["id_"] = sample[doc_id_key]
if text_key:
if text_key not in sample:
msg = f"Text key '{text_key}' not found."
raise KeyError(msg)
kwargs["text"] = sample[text_key]
return Document(**kwargs)
def load_data(
self,
*args: Any,
dataset: Optional[Dataset] = None,
split: Union[Split, str] = Split.TRAIN,
doc_id_key: Optional[str] = None,
text_key: Optional[str] = None,
**load_kwargs: Any,
) -> List[Document]:
"""
Load data from the dataset.
Args:
*args: Positional arguments to pass to load_dataset.
dataset (Optional[Dataset]): The dataset to load. load_dataset is skipped if provided. Optional.
split (Union[Split, str]): The split to load. Default: Split.TRAIN.
doc_id_key (Optional[str]): The key of the doc_id in samples. Optional.
text_key (Optional[str]): The key of the text in samples. Optional.
**load_kwargs: Keyword arguments to pass to load_dataset.
Returns:
List[Document]: A list of documents.
"""
if dataset is None:
dataset = load_dataset(*args, **load_kwargs, split=split, streaming=False)
return [
self._make_document(sample, doc_id_key=doc_id_key, text_key=text_key)
for sample in dataset
]
def lazy_load_data(
self,
*args: Any,
dataset: Optional[IterableDataset] = None,
split: Union[Split, str] = Split.TRAIN,
doc_id_key: Optional[str] = None,
text_key: Optional[str] = None,
**load_kwargs: Any,
) -> Iterable[Document]:
"""
Lazily load data from the dataset.
Args:
*args: Positional arguments to pass to load_dataset.
dataset (Optional[IterableDataset]): The dataset to load. load_dataset is skipped if provided. Optional.
split (Union[Split, str]): The split to load. Default: Split.TRAIN.
doc_id_key (Optional[str]): The key of the doc_id in samples. Optional.
text_key (Optional[str]): The key of the text in samples. Optional.
**load_kwargs: Keyword arguments to pass to load_dataset.
Returns:
List[Document]: A list of documents.
"""
if dataset is None:
dataset = load_dataset(*args, **load_kwargs, split=split, streaming=True)
# Return Document generator
return (
self._make_document(sample, doc_id_key=doc_id_key, text_key=text_key)
for sample in dataset
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-datasets/llama_index/readers/datasets/base.py",
"license": "MIT License",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-datasets/tests/test_readers_datasets.py | import pytest
from unittest.mock import MagicMock, patch
from typing import Iterable
from datasets import Dataset
from llama_index.readers.datasets import DatasetsReader
@pytest.fixture
def reader():
return DatasetsReader()
@pytest.fixture
def sample_data():
return [
{"id": "doc_1", "content": "This is the first document.", "extra": "A"},
{"id": "doc_2", "content": "This is the second document.", "extra": "B"},
]
# --- load_data tests ---
def test_load_data_with_preloaded_dataset(reader, sample_data):
"""Test load_data with a preloaded dataset."""
# Mocking a Dataset object that behaves like a list
mock_dataset = MagicMock(spec=Dataset)
mock_dataset.__iter__.return_value = iter(sample_data)
docs = reader.load_data(dataset=mock_dataset, text_key="content")
assert isinstance(docs, list)
assert len(docs) == 2
assert docs[0].text == "This is the first document."
assert docs[0].metadata == sample_data[0]
assert docs[1].text == "This is the second document."
@patch("llama_index.readers.datasets.base.load_dataset")
def test_load_data_from_huggingface(mock_hf_load, reader, sample_data):
"""Test load_data with a huggingface dataset."""
# Setup the mock to return our sample data when iterated
mock_ds_instance = MagicMock(spec=Dataset)
mock_ds_instance.__iter__.return_value = iter(sample_data)
mock_hf_load.return_value = mock_ds_instance
dataset_name = "some/dataset"
split_name = "validation"
docs = reader.load_data(
dataset_name, split=split_name, text_key="content", doc_id_key="id"
)
assert len(docs) == 2
assert docs[0].id_ == "doc_1"
assert docs[0].text == "This is the first document."
mock_hf_load.assert_called_once_with(
dataset_name, split=split_name, streaming=False
)
# --- lazy_load_data tests ---
def test_lazy_load_data_with_preloaded_dataset(reader, sample_data):
"""Test lazy_load_data with a preloaded dataset."""
# IterableDataset is basically just an iterable generator
mock_iterable_ds = (item for item in sample_data)
doc_gen = reader.lazy_load_data(dataset=mock_iterable_ds, text_key="content")
assert isinstance(doc_gen, Iterable)
# Ensure it's not a list yet
assert not isinstance(doc_gen, list)
# Consume generator
docs = list(doc_gen)
assert len(docs) == 2
assert docs[0].text == sample_data[0]["content"]
@patch("llama_index.readers.datasets.base.load_dataset")
def test_lazy_load_data_from_huggingface(mock_hf_load, reader, sample_data):
"""Test lazy_load_data with a huggingface dataset."""
# Setup mock to return an iterable
mock_hf_load.return_value = iter(sample_data)
dataset_name = "some/streamed_dataset"
doc_gen = reader.lazy_load_data(dataset_name, text_key="content")
assert isinstance(doc_gen, Iterable)
mock_hf_load.assert_called_once_with(dataset_name, split="train", streaming=True)
first_doc = next(doc_gen)
assert first_doc.text == "This is the first document."
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-datasets/tests/test_readers_datasets.py",
"license": "MIT License",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-apertis/llama_index/llms/apertis/base.py | from typing import Any, Dict, Optional
from llama_index.core.base.llms.types import LLMMetadata
from llama_index.core.bridge.pydantic import Field
from llama_index.core.constants import (
DEFAULT_CONTEXT_WINDOW,
DEFAULT_NUM_OUTPUTS,
DEFAULT_TEMPERATURE,
)
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
from llama_index.llms.openai_like import OpenAILike
DEFAULT_API_BASE = "https://api.apertis.ai/v1"
DEFAULT_MODEL = "gpt-5.2"
class Apertis(OpenAILike):
"""
Apertis LLM.
Apertis provides a unified API gateway to access multiple LLM providers
including OpenAI, Anthropic, Google, and more through an OpenAI-compatible
interface.
Supported Endpoints:
- `/v1/chat/completions` - OpenAI Chat Completions format (default)
- `/v1/responses` - OpenAI Responses format compatible
- `/v1/messages` - Anthropic format compatible
To instantiate the `Apertis` class, you will need an API key. You can set
the API key either as an environment variable `APERTIS_API_KEY` or directly
in the class constructor.
You can obtain an API key at https://api.apertis.ai/token
Examples:
`pip install llama-index-llms-apertis`
```python
from llama_index.llms.apertis import Apertis
llm = Apertis(
api_key="<your-api-key>",
model="gpt-5.2",
)
response = llm.complete("Hello World!")
print(str(response))
```
"""
model: str = Field(
description="The model to use. Supports models from OpenAI, Anthropic, Google, and more."
)
context_window: int = Field(
default=DEFAULT_CONTEXT_WINDOW,
description="The maximum number of context tokens for the model.",
gt=0,
)
is_chat_model: bool = Field(
default=True,
description=LLMMetadata.model_fields["is_chat_model"].description,
)
def __init__(
self,
model: str = DEFAULT_MODEL,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: int = DEFAULT_NUM_OUTPUTS,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 5,
api_base: Optional[str] = DEFAULT_API_BASE,
api_key: Optional[str] = None,
**kwargs: Any,
) -> None:
additional_kwargs = additional_kwargs or {}
api_base = get_from_param_or_env("api_base", api_base, "APERTIS_API_BASE")
api_key = get_from_param_or_env("api_key", api_key, "APERTIS_API_KEY")
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
api_base=api_base,
api_key=api_key,
additional_kwargs=additional_kwargs,
max_retries=max_retries,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "Apertis_LLM"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-apertis/llama_index/llms/apertis/base.py",
"license": "MIT License",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-apertis/tests/test_llms_apertis.py | from llama_index.core.base.llms.base import BaseLLM
from llama_index.llms.apertis import Apertis
def test_llm_class():
"""Test that Apertis is a proper LLM subclass."""
names_of_base_classes = [b.__name__ for b in Apertis.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
def test_class_name():
"""Test the class name method."""
assert Apertis.class_name() == "Apertis_LLM"
def test_default_model():
"""Test default model initialization."""
llm = Apertis(api_key="test-key")
assert llm.model == "gpt-5.2"
def test_custom_model():
"""Test custom model initialization."""
llm = Apertis(api_key="test-key", model="claude-sonnet-4.5")
assert llm.model == "claude-sonnet-4.5"
def test_api_base():
"""Test default API base URL."""
llm = Apertis(api_key="test-key")
assert llm.api_base == "https://api.apertis.ai/v1"
def test_custom_api_base():
"""Test custom API base URL."""
custom_base = "https://custom.api.example.com/v1"
llm = Apertis(api_key="test-key", api_base=custom_base)
assert llm.api_base == custom_base
def test_is_chat_model():
"""Test that Apertis is configured as a chat model by default."""
llm = Apertis(api_key="test-key")
assert llm.is_chat_model is True
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-apertis/tests/test_llms_apertis.py",
"license": "MIT License",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-parallel-web-systems/llama_index/tools/parallel_web_systems/base.py | """Parallel AI tool spec"""
from typing import Any, Dict, List, Optional, Union
import httpx
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class ParallelWebSystemsToolSpec(BaseToolSpec):
"""
Parallel AI tool spec
This tool provides access to Parallel Web Systems Search and Extract
APIs, enabling LLM agents to perform web research and content extraction.
The Search API returns structured, compressed excerpts from web search
results optimized for LLM consumption.
The Extract API converts public URLs into clean, LLM-optimized markdown,
including JavaScript-heavy pages and PDFs.
"""
spec_functions = [
"search",
"extract",
]
def __init__(self, api_key: str, base_url: Optional[str] = None) -> None:
"""
Initialize with parameters
Args:
api_key: Your Parallel AI API key from https://platform.parallel.ai/
base_url: Optional custom base URL for the API
"""
self.api_key = api_key
self.base_url = base_url or "https://api.parallel.ai"
async def search(
self,
objective: Optional[str] = None,
search_queries: Optional[List[str]] = None,
max_results: int = 10,
mode: Optional[str] = None,
excerpts: Optional[Dict[str, Any]] = None,
source_policy: Optional[Dict[str, Any]] = None,
fetch_policy: Optional[Dict[str, Any]] = None,
) -> List[Document]:
"""
Search the web using Parallel Search API
Returns structured, compressed excerpts optimized for LLM consumption.
At least one of `objective` or `search_queries` must be provided.
Args:
objective: Natural-language description of what the web search is
trying to find. This can include guidance about preferred sources
or freshness.
search_queries: Optional list of traditional keyword search queries
to guide the search. May contain search operators. Max 5 queries,
200 chars each.
max_results: Upper bound on the number of results to return (1-40).
The default is 10
mode: Search mode preset. 'one-shot' returns more comprehensive results
and longer excerpts for single response answers. 'agentic' returns
more concise, token-efficient results for use in an agentic loop.
excerpts: Optional settings to configure excerpt generation.
Example: {'max_chars_per_result': 1500}
source_policy: Optional source policy governing domain and date
preferences in search results.
fetch_policy: Policy for cached vs live content.
Example: {'max_age_seconds': 86400, 'timeout_seconds': 60}
Returns:
A list of Document objects containing search results with excerpts
and metadata including url, title, and publish_date.
"""
if not objective and not search_queries:
raise ValueError(
"At least one of 'objective' or 'search_queries' must be provided"
)
headers = {
"x-api-key": self.api_key,
"Content-Type": "application/json",
"parallel-beta": "search-extract-2025-10-10",
}
payload: Dict[str, Any] = {
"max_results": max_results,
}
if objective:
payload["objective"] = objective
if search_queries:
payload["search_queries"] = search_queries
if mode:
payload["mode"] = mode
if excerpts:
payload["excerpts"] = excerpts
if source_policy:
payload["source_policy"] = source_policy
if fetch_policy:
payload["fetch_policy"] = fetch_policy
try:
async with httpx.AsyncClient() as client:
response = await client.post(
f"{self.base_url}/v1beta/search",
headers=headers,
json=payload,
timeout=60,
)
response.raise_for_status()
data = response.json()
documents = []
for result in data.get("results", []):
# combine excerpts into the document text
excerpts_list = result.get("excerpts", [])
text = "\n\n".join(excerpts_list) if excerpts_list else ""
doc = Document(
text=text,
metadata={
"url": result.get("url"),
"title": result.get("title"),
"publish_date": result.get("publish_date"),
"search_id": data.get("search_id"),
},
)
documents.append(doc)
return documents
except Exception as e:
print(f"Error calling Parallel AI Search API: {e}")
return []
async def extract(
self,
urls: List[str],
objective: Optional[str] = None,
search_queries: Optional[List[str]] = None,
excerpts: Union[bool, Dict[str, Any]] = True,
full_content: Union[bool, Dict[str, Any]] = False,
fetch_policy: Optional[Dict[str, Any]] = None,
) -> List[Document]:
"""
Extract clean, structured content from web pages using Parallel AI's Extract API.
Converts public URLs into clean, LLM-optimized markdown including
JavaScript-heavy pages and PDFs
Args:
urls: List of URLs to extract content from.
objective: Natural language objective to focus extraction on specific
topics. The returned excerpts will be relevant to this objective.
search_queries: Specific keyword queries to focus extraction.
excerpts: Include relevant excerpts. Can be True/False or a dict with
settings like {'max_chars_per_result': 2000}. Excerpts are focused
on objective/queries if provided.
full_content: Include full page content. Can be True/False or a dict
with settings like {'max_chars_per_result': 3000}.
fetch_policy: Cache vs live content policy.
Example: {'max_age_seconds': 86400, 'timeout_seconds': 60,
'disable_cache_fallback': False}
Returns:
A list of Document objects containing extracted content with metadata
including url, title, publish_date, and excerpts
"""
headers = {
"x-api-key": self.api_key,
"Content-Type": "application/json",
"parallel-beta": "search-extract-2025-10-10",
}
payload: Dict[str, Any] = {
"urls": urls,
"excerpts": excerpts,
"full_content": full_content,
}
if objective:
payload["objective"] = objective
if search_queries:
payload["search_queries"] = search_queries
if fetch_policy:
payload["fetch_policy"] = fetch_policy
try:
async with httpx.AsyncClient() as client:
response = await client.post(
f"{self.base_url}/v1beta/extract",
headers=headers,
json=payload,
timeout=60,
)
response.raise_for_status()
data = response.json()
documents = []
for result in data.get("results", []):
# Use full_content if available, otherwise combine excerpts
full_text = result.get("full_content")
excerpts_list = result.get("excerpts", [])
if full_text:
text = full_text
elif excerpts_list:
text = "\n\n".join(excerpts_list)
else:
text = ""
doc = Document(
text=text,
metadata={
"url": result.get("url"),
"title": result.get("title"),
"publish_date": result.get("publish_date"),
"extract_id": data.get("extract_id"),
"excerpts": excerpts_list,
},
)
documents.append(doc)
# handle any errors in response
for error in data.get("errors", []):
doc = Document(
text=f"Error extracting content: {error.get('content', 'Unknown error')}",
metadata={
"url": error.get("url"),
"error_type": error.get("error_type"),
"extract_id": data.get("extract_id"),
},
)
documents.append(doc)
return documents
except Exception as e:
print(f"Error calling Parallel AI Extract API: {e}")
return []
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-parallel-web-systems/llama_index/tools/parallel_web_systems/base.py",
"license": "MIT License",
"lines": 210,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-parallel-web-systems/tests/test_tools_parallel_web_systems.py | """Tests for Parallel AI tool spec."""
import pytest
from unittest.mock import AsyncMock, MagicMock, patch
from llama_index.core.schema import Document
from llama_index.tools.parallel_web_systems import ParallelWebSystemsToolSpec
class TestParallelWebSystemsToolSpec:
"""Test suite for ParallelWebSystemsToolSpec."""
def test_initialization(self) -> None:
"""Test that the tool spec initializes correctly."""
api_key = "test-api-key"
tool = ParallelWebSystemsToolSpec(api_key=api_key)
assert tool.api_key == api_key
assert tool.base_url == "https://api.parallel.ai"
def test_initialization_with_custom_base_url(self) -> None:
"""Test initialization with a custom base URL."""
api_key = "test-api-key"
custom_url = "https://custom.api.com"
tool = ParallelWebSystemsToolSpec(api_key=api_key, base_url=custom_url)
assert tool.api_key == api_key
assert tool.base_url == custom_url
@pytest.mark.asyncio
async def test_search_requires_objective_or_queries(self) -> None:
"""Test that search raises error when neither objective nor search_queries provided."""
tool = ParallelWebSystemsToolSpec(api_key="test-key")
with pytest.raises(ValueError) as exc_info:
await tool.search()
assert (
"At least one of 'objective' or 'search_queries' must be provided"
in str(exc_info.value)
)
@patch("llama_index.tools.parallel_web_systems.base.httpx.AsyncClient")
@pytest.mark.asyncio
async def test_search_with_objective(self, mock_async_client: MagicMock) -> None:
"""Test successful search operation with objective."""
mock_response = MagicMock()
mock_response.json.return_value = {
"search_id": "search_123",
"results": [
{
"url": "https://example.com/1",
"title": "Test Title 1",
"publish_date": "2024-01-15",
"excerpts": ["Sample excerpt 1", "Sample excerpt 2"],
},
{
"url": "https://example.com/2",
"title": "Test Title 2",
"publish_date": "2024-01-16",
"excerpts": ["Another excerpt"],
},
],
}
mock_client = AsyncMock()
mock_client.post = AsyncMock(return_value=mock_response)
mock_async_client.return_value.__aenter__.return_value = mock_client
tool = ParallelWebSystemsToolSpec(api_key="test-key")
results = await tool.search(
objective="What was the GDP of France in 2023?", max_results=5
)
# Assertions
assert len(results) == 2
assert all(isinstance(doc, Document) for doc in results)
assert "Sample excerpt 1" in results[0].text
assert "Sample excerpt 2" in results[0].text
assert results[0].metadata["url"] == "https://example.com/1"
assert results[0].metadata["title"] == "Test Title 1"
assert results[0].metadata["search_id"] == "search_123"
@patch("llama_index.tools.parallel_web_systems.base.httpx.AsyncClient")
@pytest.mark.asyncio
async def test_search_with_queries(self, mock_async_client: MagicMock) -> None:
"""Test search with search_queries."""
mock_response = MagicMock()
mock_response.json.return_value = {"search_id": "search_456", "results": []}
mock_client = AsyncMock()
mock_client.post = AsyncMock(return_value=mock_response)
mock_async_client.return_value.__aenter__.return_value = mock_client
tool = ParallelWebSystemsToolSpec(api_key="test-key")
await tool.search(
search_queries=["renewable energy 2024", "solar power"],
max_results=10,
mode="agentic",
)
@patch("llama_index.tools.parallel_web_systems.base.httpx.AsyncClient")
@pytest.mark.asyncio
async def test_search_api_error(self, mock_async_client: MagicMock, capsys) -> None:
"""Test search handles API errors gracefully."""
mock_client = AsyncMock()
mock_client.post = AsyncMock(side_effect=Exception("API Error"))
mock_async_client.return_value.__aenter__.return_value = mock_client
tool = ParallelWebSystemsToolSpec(api_key="test-key")
results = await tool.search(objective="test query")
assert results == []
captured = capsys.readouterr()
assert "Error calling Parallel AI Search API" in captured.out
@patch("llama_index.tools.parallel_web_systems.base.httpx.AsyncClient")
@pytest.mark.asyncio
async def test_extract_success(self, mock_async_client: MagicMock) -> None:
"""Test successful extract operation."""
mock_response = MagicMock()
mock_response.json.return_value = {
"extract_id": "extract_123",
"results": [
{
"url": "https://en.wikipedia.org/wiki/AI",
"title": "Artificial intelligence - Wikipedia",
"publish_date": "2024-01-15",
"excerpts": ["AI excerpt 1", "AI excerpt 2"],
"full_content": None,
},
],
"errors": [],
}
mock_client = AsyncMock()
mock_client.post = AsyncMock(return_value=mock_response)
mock_async_client.return_value.__aenter__.return_value = mock_client
tool = ParallelWebSystemsToolSpec(api_key="test-key")
results = await tool.extract(
urls=["https://en.wikipedia.org/wiki/AI"],
objective="What are the main applications of AI?",
)
assert len(results) == 1
assert isinstance(results[0], Document)
assert "AI excerpt 1" in results[0].text
assert results[0].metadata["url"] == "https://en.wikipedia.org/wiki/AI"
assert results[0].metadata["extract_id"] == "extract_123"
@patch("llama_index.tools.parallel_web_systems.base.httpx.AsyncClient")
@pytest.mark.asyncio
async def test_extract_with_full_content(
self, mock_async_client: MagicMock
) -> None:
"""Test extract with full content enabled."""
mock_response = MagicMock()
mock_response.json.return_value = {
"extract_id": "extract_456",
"results": [
{
"url": "https://example.com/article",
"title": "Test Article",
"full_content": "# Full Article Content\n\nThis is the full content...",
"excerpts": [],
},
],
"errors": [],
}
mock_client = AsyncMock()
mock_client.post = AsyncMock(return_value=mock_response)
mock_async_client.return_value.__aenter__.return_value = mock_client
tool = ParallelWebSystemsToolSpec(api_key="test-key")
results = await tool.extract(
urls=["https://example.com/article"],
full_content=True,
excerpts=False,
)
assert len(results) == 1
assert "Full Article Content" in results[0].text
@patch("llama_index.tools.parallel_web_systems.base.httpx.AsyncClient")
@pytest.mark.asyncio
async def test_extract_handles_errors(self, mock_async_client: MagicMock) -> None:
"""Test extract handles URL errors gracefully"""
mock_response = MagicMock()
mock_response.json.return_value = {
"extract_id": "extract_789",
"results": [],
"errors": [
{
"url": "https://invalid-url.com/",
"error_type": "fetch_failed",
"content": "Failed to fetch URL",
},
],
}
mock_client = AsyncMock()
mock_client.post = AsyncMock(return_value=mock_response)
mock_async_client.return_value.__aenter__.return_value = mock_client
tool = ParallelWebSystemsToolSpec(api_key="test-key")
results = await tool.extract(urls=["https://invalid-url.com/"])
assert len(results) == 1
assert "Error extracting content" in results[0].text
assert results[0].metadata["error_type"] == "fetch_failed"
@patch("llama_index.tools.parallel_web_systems.base.httpx.AsyncClient")
@pytest.mark.asyncio
async def test_extract_api_error(
self, mock_async_client: MagicMock, capsys
) -> None:
"""Test extract handles API errors gracefully."""
mock_client = AsyncMock()
mock_client.post = AsyncMock(side_effect=Exception("Network Error"))
mock_async_client.return_value.__aenter__.return_value = mock_client
tool = ParallelWebSystemsToolSpec(api_key="test-key")
results = await tool.extract(urls=["https://example.com"])
assert results == []
captured = capsys.readouterr()
assert "Error calling Parallel AI Extract API" in captured.out
def test_spec_functions_list(self) -> None:
"""Test that spec_functions contains expected methods."""
expected_functions = ["search", "extract"]
assert ParallelWebSystemsToolSpec.spec_functions == expected_functions
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-parallel-web-systems/tests/test_tools_parallel_web_systems.py",
"license": "MIT License",
"lines": 197,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-core/tests/agent/react/test_react_chat_formatter.py | from typing import List, Optional, Sequence
from llama_index.core.agent.react.formatter import ReActChatFormatter
from llama_index.core.agent.react.types import (
BaseReasoningStep,
)
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.llms import MessageRole
from llama_index.core.tools import BaseTool
class MockReActChatFormatter(ReActChatFormatter):
def format(
self,
tools: Sequence[BaseTool],
chat_history: List[ChatMessage],
current_reasoning: Optional[List[BaseReasoningStep]] = None,
) -> List[ChatMessage]:
return [ChatMessage(role=MessageRole.SYSTEM, content="mock data!")]
def test_inheritance_react_chat_formatter():
formatter_from_defaults = MockReActChatFormatter.from_defaults()
format_default_message = formatter_from_defaults.format([], [])
assert format_default_message[0].content == "mock data!"
formatter_from_context = MockReActChatFormatter.from_context("mock context!")
format_context_message = formatter_from_context.format([], [])
assert format_context_message[0].content == "mock data!"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/agent/react/test_react_chat_formatter.py",
"license": "MIT License",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-core/llama_index/core/chat_engine/multi_modal_condense_plus_context.py | import logging
from typing import Any, List, Optional, Sequence, Tuple, Union
from llama_index.core.base.response.schema import RESPONSE_TYPE, Response
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.callbacks import trace_method
from llama_index.core.chat_engine.types import (
AgentChatResponse,
BaseChatEngine,
StreamingAgentChatResponse,
ToolOutput,
)
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
ChatResponseGen,
MessageRole,
)
from llama_index.core.base.response.schema import (
StreamingResponse,
AsyncStreamingResponse,
)
from llama_index.core.base.llms.generic_utils import messages_to_history_str
from llama_index.core.indices.query.schema import QueryBundle
from llama_index.core.llms import LLM, TextBlock, ChatMessage, ImageBlock
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.prompts import PromptTemplate
from llama_index.core.schema import ImageNode, NodeWithScore, MetadataMode
from llama_index.core.base.llms.generic_utils import image_node_to_image_block
from llama_index.core.memory import BaseMemory, Memory
from llama_index.core.chat_engine.multi_modal_context import _get_image_and_text_nodes
from llama_index.core.llms.llm import (
astream_chat_response_to_tokens,
stream_chat_response_to_tokens,
)
from llama_index.core.prompts.default_prompts import DEFAULT_TEXT_QA_PROMPT
from llama_index.core.chat_engine.condense_plus_context import (
DEFAULT_CONDENSE_PROMPT_TEMPLATE,
)
from llama_index.core.settings import Settings
from llama_index.core.base.base_retriever import BaseRetriever
logger = logging.getLogger(__name__)
class MultiModalCondensePlusContextChatEngine(BaseChatEngine):
"""
Multi-Modal Condensed Conversation & Context Chat Engine.
First condense a conversation and latest user message to a standalone question
Then build a context for the standalone question from a retriever,
Then pass the context along with prompt and user message to LLM to generate a response.
"""
def __init__(
self,
retriever: BaseRetriever,
multi_modal_llm: LLM,
memory: BaseMemory,
context_prompt: Optional[Union[str, PromptTemplate]] = None,
condense_prompt: Optional[Union[str, PromptTemplate]] = None,
system_prompt: Optional[str] = None,
skip_condense: bool = False,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
callback_manager: Optional[CallbackManager] = None,
verbose: bool = False,
):
self._retriever = retriever
self._multi_modal_llm = multi_modal_llm
self._memory = memory
context_prompt = context_prompt or DEFAULT_TEXT_QA_PROMPT
if isinstance(context_prompt, str):
context_prompt = PromptTemplate(context_prompt)
self._context_prompt_template = context_prompt
condense_prompt = condense_prompt or DEFAULT_CONDENSE_PROMPT_TEMPLATE
if isinstance(condense_prompt, str):
condense_prompt = PromptTemplate(condense_prompt)
self._condense_prompt_template = condense_prompt
self._system_prompt = system_prompt
self._skip_condense = skip_condense
self._node_postprocessors = node_postprocessors or []
self.callback_manager = callback_manager or CallbackManager([])
for node_postprocessor in self._node_postprocessors:
node_postprocessor.callback_manager = self.callback_manager
self._verbose = verbose
@classmethod
def from_defaults(
cls,
retriever: BaseRetriever,
multi_modal_llm: Optional[LLM] = None,
chat_history: Optional[List[ChatMessage]] = None,
memory: Optional[BaseMemory] = None,
system_prompt: Optional[str] = None,
context_prompt: Optional[Union[str, PromptTemplate]] = None,
condense_prompt: Optional[Union[str, PromptTemplate]] = None,
skip_condense: bool = False,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
verbose: bool = False,
**kwargs: Any,
) -> "MultiModalCondensePlusContextChatEngine":
"""Initialize a MultiModalCondensePlusContextChatEngine from default parameters."""
multi_modal_llm = multi_modal_llm or Settings.llm
chat_history = chat_history or []
memory = memory or Memory.from_defaults(
chat_history=chat_history,
token_limit=multi_modal_llm.metadata.context_window - 256,
)
return cls(
retriever=retriever,
multi_modal_llm=multi_modal_llm,
memory=memory,
context_prompt=context_prompt,
condense_prompt=condense_prompt,
skip_condense=skip_condense,
callback_manager=Settings.callback_manager,
node_postprocessors=node_postprocessors,
system_prompt=system_prompt,
verbose=verbose,
)
def _condense_question(
self, chat_history: List[ChatMessage], latest_message: str
) -> str:
"""Condense a conversation history and latest user message to a standalone question."""
if self._skip_condense or len(chat_history) == 0:
return latest_message
chat_history_str = messages_to_history_str(chat_history)
logger.debug(chat_history_str)
llm_input = self._condense_prompt_template.format(
chat_history=chat_history_str, question=latest_message
)
return str(self._multi_modal_llm.complete(llm_input))
async def _acondense_question(
self, chat_history: List[ChatMessage], latest_message: str
) -> str:
"""Condense a conversation history and latest user message to a standalone question."""
if self._skip_condense or len(chat_history) == 0:
return latest_message
chat_history_str = messages_to_history_str(chat_history)
logger.debug(chat_history_str)
llm_input = self._condense_prompt_template.format(
chat_history=chat_history_str, question=latest_message
)
return str(await self._multi_modal_llm.acomplete(llm_input))
def _get_nodes(self, message: str) -> List[NodeWithScore]:
"""Generate context information from a message."""
nodes = self._retriever.retrieve(message)
for postprocessor in self._node_postprocessors:
nodes = postprocessor.postprocess_nodes(
nodes, query_bundle=QueryBundle(message)
)
return nodes
async def _aget_nodes(self, message: str) -> List[NodeWithScore]:
"""Generate context information from a message."""
nodes = await self._retriever.aretrieve(message)
for postprocessor in self._node_postprocessors:
nodes = postprocessor.postprocess_nodes(
nodes, query_bundle=QueryBundle(message)
)
return nodes
def _run_c2(
self,
message: str,
chat_history: Optional[List[ChatMessage]] = None,
) -> Tuple[ToolOutput, List[NodeWithScore]]:
if chat_history is not None:
self._memory.set(chat_history)
chat_history = self._memory.get(input=message)
# Condense conversation history and latest message to a standalone question
condensed_question = self._condense_question(chat_history, message) # type: ignore
logger.info(f"Condensed question: {condensed_question}")
if self._verbose:
print(f"Condensed question: {condensed_question}")
# get the context nodes using the condensed question
context_nodes = self._get_nodes(condensed_question)
context_source = ToolOutput(
tool_name="retriever",
content=str(context_nodes),
raw_input={"message": condensed_question},
raw_output=context_nodes,
)
return context_source, context_nodes
async def _arun_c2(
self,
message: str,
chat_history: Optional[List[ChatMessage]] = None,
) -> Tuple[ToolOutput, List[NodeWithScore]]:
if chat_history is not None:
await self._memory.aset(chat_history)
chat_history = await self._memory.aget(input=message)
# Condense conversation history and latest message to a standalone question
condensed_question = await self._acondense_question(chat_history, message) # type: ignore
logger.info(f"Condensed question: {condensed_question}")
if self._verbose:
print(f"Condensed question: {condensed_question}")
# get the context nodes using the condensed question
context_nodes = await self._aget_nodes(condensed_question)
context_source = ToolOutput(
tool_name="retriever",
content=str(context_nodes),
raw_input={"message": condensed_question},
raw_output=context_nodes,
)
return context_source, context_nodes
def synthesize(
self,
query_str: str,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
streaming: bool = False,
) -> RESPONSE_TYPE:
image_nodes, text_nodes = _get_image_and_text_nodes(nodes)
context_str = "\n\n".join(
[r.get_content(metadata_mode=MetadataMode.LLM) for r in text_nodes]
)
fmt_prompt = self._context_prompt_template.format(
context_str=context_str, query_str=query_str
)
blocks: List[Union[ImageBlock, TextBlock]] = [
image_node_to_image_block(image_node.node)
for image_node in image_nodes
if isinstance(image_node.node, ImageNode)
]
blocks.append(TextBlock(text=fmt_prompt))
chat_history = self._memory.get(
input=query_str,
)
if streaming:
llm_stream = self._multi_modal_llm.stream_chat(
[
ChatMessage(role="system", content=self._system_prompt),
*chat_history,
ChatMessage(role="user", blocks=blocks),
]
)
stream_tokens = stream_chat_response_to_tokens(llm_stream)
return StreamingResponse(
response_gen=stream_tokens,
source_nodes=nodes,
metadata={"text_nodes": text_nodes, "image_nodes": image_nodes},
)
else:
llm_response = self._multi_modal_llm.chat(
[
ChatMessage(role="system", content=self._system_prompt),
*chat_history,
ChatMessage(role="user", blocks=blocks),
]
)
output = llm_response.message.content or ""
return Response(
response=output,
source_nodes=nodes,
metadata={"text_nodes": text_nodes, "image_nodes": image_nodes},
)
async def asynthesize(
self,
query_str: str,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
streaming: bool = False,
) -> RESPONSE_TYPE:
image_nodes, text_nodes = _get_image_and_text_nodes(nodes)
context_str = "\n\n".join(
[r.get_content(metadata_mode=MetadataMode.LLM) for r in text_nodes]
)
fmt_prompt = self._context_prompt_template.format(
context_str=context_str, query_str=query_str
)
blocks: List[Union[ImageBlock, TextBlock]] = [
image_node_to_image_block(image_node.node)
for image_node in image_nodes
if isinstance(image_node.node, ImageNode)
]
blocks.append(TextBlock(text=fmt_prompt))
chat_history = await self._memory.aget(
input=query_str,
)
if streaming:
llm_stream = await self._multi_modal_llm.astream_chat(
[
ChatMessage(role="system", content=self._system_prompt),
*chat_history,
ChatMessage(role="user", blocks=blocks),
]
)
stream_tokens = await astream_chat_response_to_tokens(llm_stream)
return AsyncStreamingResponse(
response_gen=stream_tokens,
source_nodes=nodes,
metadata={"text_nodes": text_nodes, "image_nodes": image_nodes},
)
else:
llm_response = await self._multi_modal_llm.achat(
[
ChatMessage(role="system", content=self._system_prompt),
*chat_history,
ChatMessage(role="user", blocks=blocks),
]
)
output = llm_response.message.content or ""
return Response(
response=output,
source_nodes=nodes,
metadata={"text_nodes": text_nodes, "image_nodes": image_nodes},
)
@trace_method("chat")
def chat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> AgentChatResponse:
context_source, context_nodes = self._run_c2(message, chat_history)
response = self.synthesize(message, nodes=context_nodes, streaming=False)
user_message = ChatMessage(content=str(message), role=MessageRole.USER)
assistant_message = ChatMessage(
content=str(response), role=MessageRole.ASSISTANT
)
self._memory.put(user_message)
self._memory.put(assistant_message)
assert context_source.tool_name == "retriever"
# re-package raw_outputs here to provide image nodes and text nodes separately
context_source.raw_output = response.metadata
return AgentChatResponse(
response=str(response),
sources=[context_source],
source_nodes=context_nodes,
)
@trace_method("chat")
def stream_chat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> StreamingAgentChatResponse:
context_source, context_nodes = self._run_c2(message, chat_history)
response = self.synthesize(message, nodes=context_nodes, streaming=True)
assert isinstance(response, StreamingResponse)
def wrapped_gen(response: StreamingResponse) -> ChatResponseGen:
full_response = ""
for token in response.response_gen:
full_response += token
yield ChatResponse(
message=ChatMessage(
content=full_response, role=MessageRole.ASSISTANT
),
delta=token,
)
user_message = ChatMessage(content=str(message), role=MessageRole.USER)
assistant_message = ChatMessage(
content=full_response, role=MessageRole.ASSISTANT
)
self._memory.put(user_message)
self._memory.put(assistant_message)
assert context_source.tool_name == "retriever"
# re-package raw_outputs here to provide image nodes and text nodes separately
context_source.raw_output = response.metadata
return StreamingAgentChatResponse(
chat_stream=wrapped_gen(response),
sources=[context_source],
source_nodes=context_nodes,
is_writing_to_memory=False,
)
@trace_method("chat")
async def achat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> AgentChatResponse:
context_source, context_nodes = await self._arun_c2(message, chat_history)
response = await self.asynthesize(message, nodes=context_nodes, streaming=False)
user_message = ChatMessage(content=str(message), role=MessageRole.USER)
assistant_message = ChatMessage(
content=str(response), role=MessageRole.ASSISTANT
)
await self._memory.aput(user_message)
await self._memory.aput(assistant_message)
assert context_source.tool_name == "retriever"
# re-package raw_outputs here to provide image nodes and text nodes separately
context_source.raw_output = response.metadata
return AgentChatResponse(
response=str(response),
sources=[context_source],
source_nodes=context_nodes,
)
@trace_method("chat")
async def astream_chat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> StreamingAgentChatResponse:
context_source, context_nodes = await self._arun_c2(message, chat_history)
response = await self.asynthesize(message, nodes=context_nodes, streaming=True)
assert isinstance(response, AsyncStreamingResponse)
async def wrapped_gen(response: AsyncStreamingResponse) -> ChatResponseAsyncGen:
full_response = ""
async for token in response.async_response_gen():
full_response += token
yield ChatResponse(
message=ChatMessage(
content=full_response, role=MessageRole.ASSISTANT
),
delta=token,
)
user_message = ChatMessage(content=message, role=MessageRole.USER)
assistant_message = ChatMessage(
content=full_response, role=MessageRole.ASSISTANT
)
await self._memory.aput(user_message)
await self._memory.aput(assistant_message)
assert context_source.tool_name == "retriever"
# re-package raw_outputs here to provide image nodes and text nodes separately
context_source.raw_output = response.metadata
return StreamingAgentChatResponse(
achat_stream=wrapped_gen(response),
sources=[context_source],
source_nodes=context_nodes,
is_writing_to_memory=False,
)
def reset(self) -> None:
# Clear chat history
self._memory.reset()
@property
def chat_history(self) -> List[ChatMessage]:
"""Get chat history."""
return self._memory.get_all()
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/llama_index/core/chat_engine/multi_modal_condense_plus_context.py",
"license": "MIT License",
"lines": 411,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-core/tests/chat_engine/test_mm_condense_plus_context.py | import pytest
from llama_index.core import MockEmbedding
from llama_index.core.embeddings import MockMultiModalEmbedding
from llama_index.core.chat_engine.multi_modal_condense_plus_context import (
MultiModalCondensePlusContextChatEngine,
)
from llama_index.core.indices import MultiModalVectorStoreIndex
from llama_index.core.llms.mock import MockLLMWithChatMemoryOfLastCall
from llama_index.core.schema import Document, ImageDocument
from llama_index.core.llms import TextBlock, ImageBlock
from llama_index.core.chat_engine.types import ChatMode
SYSTEM_PROMPT = "Talk like a pirate."
@pytest.fixture()
def chat_engine() -> MultiModalCondensePlusContextChatEngine:
# Base64 string for a 1×1 transparent PNG
base64_str = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR4nGMAAQAABQABDQottAAAAABJRU5ErkJggg=="
img = ImageDocument(image=base64_str, metadata={"file_name": "tiny.png"})
embed_model_text = MockEmbedding(embed_dim=3)
embed_model_image = MockMultiModalEmbedding(embed_dim=3)
index = MultiModalVectorStoreIndex.from_documents(
[Document.example(), img],
image_embed_model=embed_model_image,
embed_model=embed_model_text,
)
fixture = index.as_chat_engine(
similarity_top_k=2,
image_similarity_top_k=1,
chat_mode=ChatMode.CONDENSE_PLUS_CONTEXT,
llm=MockLLMWithChatMemoryOfLastCall(),
system_prompt=SYSTEM_PROMPT,
)
assert isinstance(fixture, MultiModalCondensePlusContextChatEngine)
return fixture
def test_chat(chat_engine: MultiModalCondensePlusContextChatEngine):
response = chat_engine.chat("Hello World!")
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert len(chat_engine.chat_history) == 2
assert len(response.source_nodes) == 2 # one image and one text
assert len(response.sources) == 1
assert response.sources[0].tool_name == "retriever"
assert len(response.sources[0].raw_output["image_nodes"]) == 1
assert len(response.sources[0].raw_output["text_nodes"]) == 1
llm = chat_engine._multi_modal_llm
assert len(llm.last_chat_messages) == 2 # system prompt and user message
assert (
len(llm.last_chat_messages[1].blocks) == 2
) # user message consisting of text block containing text context and query and image block
assert (
isinstance(llm.last_chat_messages[1].blocks[0], ImageBlock)
and isinstance(llm.last_chat_messages[1].blocks[1], TextBlock)
) or (
isinstance(llm.last_chat_messages[1].blocks[0], TextBlock)
and isinstance(llm.last_chat_messages[1].blocks[1], ImageBlock)
)
assert "chat" in llm.last_called_chat_function
response = chat_engine.chat("What is the capital of the moon?")
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert "What is the capital of the moon?" in str(response)
assert len(chat_engine.chat_history) == 4
def test_chat_stream(chat_engine: MultiModalCondensePlusContextChatEngine):
response = chat_engine.stream_chat("Hello World!")
num_iters = 0
for _ in response.response_gen:
num_iters += 1
assert num_iters > 10
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert len(chat_engine.chat_history) == 2
assert len(response.source_nodes) == 2 # one image and one text
assert len(response.sources) == 1
assert response.sources[0].tool_name == "retriever"
assert len(response.sources[0].raw_output["image_nodes"]) == 1
assert len(response.sources[0].raw_output["text_nodes"]) == 1
llm = chat_engine._multi_modal_llm
assert len(llm.last_chat_messages) == 2 # system prompt and user message
assert (
len(llm.last_chat_messages[1].blocks) == 2
) # user message consisting of text block containing text context and query and image block
assert (
isinstance(llm.last_chat_messages[1].blocks[0], ImageBlock)
and isinstance(llm.last_chat_messages[1].blocks[1], TextBlock)
) or (
isinstance(llm.last_chat_messages[1].blocks[0], TextBlock)
and isinstance(llm.last_chat_messages[1].blocks[1], ImageBlock)
)
assert "stream_chat" in llm.last_called_chat_function
response = chat_engine.stream_chat("What is the capital of the moon?")
num_iters = 0
for _ in response.response_gen:
num_iters += 1
assert num_iters > 10
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert "What is the capital of the moon?" in str(response)
assert len(chat_engine.chat_history) == 4
@pytest.mark.asyncio
async def test_achat(chat_engine: MultiModalCondensePlusContextChatEngine):
response = await chat_engine.achat("Hello World!")
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert len(chat_engine.chat_history) == 2
assert len(response.source_nodes) == 2 # one image and one text
assert len(response.sources) == 1
assert response.sources[0].tool_name == "retriever"
assert len(response.sources[0].raw_output["image_nodes"]) == 1
assert len(response.sources[0].raw_output["text_nodes"]) == 1
llm = chat_engine._multi_modal_llm
assert len(llm.last_chat_messages) == 2 # system prompt and user message
assert (
len(llm.last_chat_messages[1].blocks) == 2
) # user message consisting of text block containing text context and query and image block
assert (
isinstance(llm.last_chat_messages[1].blocks[0], ImageBlock)
and isinstance(llm.last_chat_messages[1].blocks[1], TextBlock)
) or (
isinstance(llm.last_chat_messages[1].blocks[0], TextBlock)
and isinstance(llm.last_chat_messages[1].blocks[1], ImageBlock)
)
assert "achat" in llm.last_called_chat_function
response = await chat_engine.achat("What is the capital of the moon?")
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert "What is the capital of the moon?" in str(response)
assert len(chat_engine.chat_history) == 4
@pytest.mark.asyncio
async def test_chat_astream(chat_engine: MultiModalCondensePlusContextChatEngine):
response = await chat_engine.astream_chat("Hello World!")
num_iters = 0
async for _ in response.async_response_gen():
num_iters += 1
assert num_iters > 10
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert len(chat_engine.chat_history) == 2
assert len(response.source_nodes) == 2 # one image and one text
assert len(response.sources) == 1
assert response.sources[0].tool_name == "retriever"
assert len(response.sources[0].raw_output["image_nodes"]) == 1
assert len(response.sources[0].raw_output["text_nodes"]) == 1
llm = chat_engine._multi_modal_llm
assert len(llm.last_chat_messages) == 2 # system prompt and user message
assert (
len(llm.last_chat_messages[1].blocks) == 2
) # user message consisting of text block containing text context and query and image block
assert (
isinstance(llm.last_chat_messages[1].blocks[0], ImageBlock)
and isinstance(llm.last_chat_messages[1].blocks[1], TextBlock)
) or (
isinstance(llm.last_chat_messages[1].blocks[0], TextBlock)
and isinstance(llm.last_chat_messages[1].blocks[1], ImageBlock)
)
assert "astream_chat" in llm.last_called_chat_function
response = await chat_engine.astream_chat("What is the capital of the moon?")
num_iters = 0
async for _ in response.async_response_gen():
num_iters += 1
assert num_iters > 10
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert "What is the capital of the moon?" in str(response)
assert len(chat_engine.chat_history) == 4
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/chat_engine/test_mm_condense_plus_context.py",
"license": "MIT License",
"lines": 163,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/ingestion/llama-index-ingestion-ray/llama_index/ingestion/ray/base.py | import logging
import ray
import asyncio
from llama_index.core.ingestion import IngestionPipeline, DocstoreStrategy
from llama_index.ingestion.ray.transform import RayTransformComponent
from llama_index.ingestion.ray.utils import ray_serialize_node, ray_deserialize_node
from typing import Any, List, Optional, Sequence
from llama_index.core.ingestion.cache import IngestionCache
from llama_index.core.constants import (
DEFAULT_PIPELINE_NAME,
DEFAULT_PROJECT_NAME,
)
from llama_index.core.bridge.pydantic import BaseModel, Field
from llama_index.core.instrumentation import get_dispatcher
from llama_index.core.readers.base import ReaderConfig
from llama_index.core.schema import (
BaseNode,
Document,
)
from llama_index.core.storage.docstore import (
BaseDocumentStore,
)
from llama_index.core.vector_stores.types import BasePydanticVectorStore
dispatcher = get_dispatcher(__name__)
logger = logging.getLogger(__name__)
def run_transformations(
nodes: Sequence[BaseNode],
transformations: Sequence[RayTransformComponent],
**kwargs: Any,
) -> Sequence[BaseNode]:
"""
Run a series of transformations on a set of nodes.
Args:
nodes: The nodes to transform.
transformations: The transformations to apply to the nodes.
Returns:
The transformed nodes.
"""
ds = ray.data.from_items([ray_serialize_node(node) for node in nodes])
for transform in transformations:
ds = transform(ds, **kwargs)
return [ray_deserialize_node(serialized_node) for serialized_node in ds.take_all()]
async def arun_transformations(
nodes: Sequence[BaseNode],
transformations: Sequence[RayTransformComponent],
**kwargs: Any,
) -> Sequence[BaseNode]:
"""
Run a series of transformations on a set of nodes.
Args:
nodes: The nodes to transform.
transformations: The transformations to apply to the nodes.
Returns:
The transformed nodes.
"""
ds = ray.data.from_items([ray_serialize_node(node) for node in nodes])
for transform in transformations:
ds = transform(ds, **kwargs)
rows = await asyncio.to_thread(ds.take_all)
return [ray_deserialize_node(serialized_node) for serialized_node in rows]
class RayIngestionPipeline(IngestionPipeline):
"""
An ingestion pipeline that can be applied to data using a Ray cluster.
Args:
name (str, optional):
Unique name of the ingestion pipeline. Defaults to DEFAULT_PIPELINE_NAME.
project_name (str, optional):
Unique name of the project. Defaults to DEFAULT_PROJECT_NAME.
transformations (List[RayTransformComponent], optional):
Ray transformations to apply to the data. Defaults to None.
documents (Optional[Sequence[Document]], optional):
Documents to ingest. Defaults to None.
readers (Optional[List[ReaderConfig]], optional):
Reader to use to read the data. Defaults to None.
vector_store (Optional[BasePydanticVectorStore], optional):
Vector store to use to store the data. Defaults to None.
docstore (Optional[BaseDocumentStore], optional):
Document store to use for de-duping with a vector store. Defaults to None.
docstore_strategy (DocstoreStrategy, optional):
Document de-dup strategy. Defaults to DocstoreStrategy.UPSERTS.
Examples:
```python
import ray
from llama_index.core import Document
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core.extractors import TitleExtractor
from llama_index.ingestion.ray import RayIngestionPipeline, RayTransformComponent
# Start a new cluster (or connect to an existing one)
ray.init()
# Create transformations
transformations=[
RayTransformComponent(
transform_class=TitleExtractor,
map_batches_kwargs={
"batch_size": 10, # Define the batch size
},
),
RayTransformComponent(
transform_class=OpenAIEmbedding,
map_batches_kwargs={
"batch_size": 10,
},
),
]
# Create the Ray ingestion pipeline
pipeline = RayIngestionPipeline(
transformations=transformations
)
# Run the pipeline with many documents
nodes = pipeline.run(documents=[Document.example()] * 100)
```
"""
transformations: List[RayTransformComponent] = Field(
description="Transformations to apply to the data with Ray"
)
def __init__(
self,
name: str = DEFAULT_PIPELINE_NAME,
project_name: str = DEFAULT_PROJECT_NAME,
transformations: Optional[List[RayTransformComponent]] = None,
readers: Optional[List[ReaderConfig]] = None,
documents: Optional[Sequence[Document]] = None,
vector_store: Optional[BasePydanticVectorStore] = None,
docstore: Optional[BaseDocumentStore] = None,
docstore_strategy: DocstoreStrategy = DocstoreStrategy.UPSERTS,
) -> None:
BaseModel.__init__(
self,
name=name,
project_name=project_name,
transformations=transformations,
readers=readers,
documents=documents,
vector_store=vector_store,
cache=IngestionCache(),
docstore=docstore,
docstore_strategy=docstore_strategy,
disable_cache=True, # Caching is disabled as Ray processes transformations lazily
)
@dispatcher.span
def run(
self,
show_progress: bool = False,
documents: Optional[List[Document]] = None,
nodes: Optional[Sequence[BaseNode]] = None,
store_doc_text: bool = True,
**kwargs: Any,
) -> Sequence[BaseNode]:
"""
Run a series of transformations on a set of nodes.
If a vector store is provided, nodes with embeddings will be added to the vector store.
If a vector store + docstore are provided, the docstore will be used to de-duplicate documents.
Args:
show_progress (bool, optional): Shows execution progress bar(s). Defaults to False.
documents (Optional[List[Document]], optional): Set of documents to be transformed. Defaults to None.
nodes (Optional[Sequence[BaseNode]], optional): Set of nodes to be transformed. Defaults to None.
store_doc_text (bool, optional): Whether to store the document texts. Defaults to True.
Returns:
Sequence[BaseNode]: The set of transformed Nodes/Documents
"""
input_nodes = self._prepare_inputs(documents, nodes)
# check if we need to dedup
if self.docstore is not None and self.vector_store is not None:
if self.docstore_strategy in (
DocstoreStrategy.UPSERTS,
DocstoreStrategy.UPSERTS_AND_DELETE,
):
nodes_to_run = self._handle_upserts(input_nodes)
elif self.docstore_strategy == DocstoreStrategy.DUPLICATES_ONLY:
nodes_to_run = self._handle_duplicates(input_nodes)
else:
raise ValueError(f"Invalid docstore strategy: {self.docstore_strategy}")
elif self.docstore is not None and self.vector_store is None:
if self.docstore_strategy == DocstoreStrategy.UPSERTS:
logger.info(
"Docstore strategy set to upserts, but no vector store. "
"Switching to duplicates_only strategy."
)
self.docstore_strategy = DocstoreStrategy.DUPLICATES_ONLY
elif self.docstore_strategy == DocstoreStrategy.UPSERTS_AND_DELETE:
logger.info(
"Docstore strategy set to upserts and delete, but no vector store. "
"Switching to duplicates_only strategy."
)
self.docstore_strategy = DocstoreStrategy.DUPLICATES_ONLY
nodes_to_run = self._handle_duplicates(input_nodes)
else:
nodes_to_run = input_nodes
nodes = run_transformations(
nodes_to_run,
self.transformations,
show_progress=show_progress,
**kwargs,
)
if self.vector_store is not None:
nodes_with_embeddings = [n for n in nodes if n.embedding is not None]
if nodes_with_embeddings:
self.vector_store.add(nodes_with_embeddings)
if self.docstore is not None:
self._update_docstore(nodes_to_run, store_doc_text=store_doc_text)
return nodes
@dispatcher.span
async def arun(
self,
show_progress: bool = False,
documents: Optional[List[Document]] = None,
nodes: Optional[Sequence[BaseNode]] = None,
store_doc_text: bool = True,
**kwargs: Any,
) -> Sequence[BaseNode]:
"""
Run a series of transformations on a set of nodes.
If a vector store is provided, nodes with embeddings will be added to the vector store.
If a vector store + docstore are provided, the docstore will be used to de-duplicate documents.
Args:
show_progress (bool, optional): Shows execution progress bar(s). Defaults to False.
documents (Optional[List[Document]], optional): Set of documents to be transformed. Defaults to None.
nodes (Optional[Sequence[BaseNode]], optional): Set of nodes to be transformed. Defaults to None.
store_doc_text (bool, optional): Whether to store the document texts. Defaults to True.
Returns:
Sequence[BaseNode]: The set of transformed Nodes/Documents
"""
input_nodes = self._prepare_inputs(documents, nodes)
# check if we need to dedup
if self.docstore is not None and self.vector_store is not None:
if self.docstore_strategy in (
DocstoreStrategy.UPSERTS,
DocstoreStrategy.UPSERTS_AND_DELETE,
):
nodes_to_run = await self._ahandle_upserts(
input_nodes, store_doc_text=store_doc_text
)
elif self.docstore_strategy == DocstoreStrategy.DUPLICATES_ONLY:
nodes_to_run = await self._ahandle_duplicates(
input_nodes, store_doc_text=store_doc_text
)
else:
raise ValueError(f"Invalid docstore strategy: {self.docstore_strategy}")
elif self.docstore is not None and self.vector_store is None:
if self.docstore_strategy == DocstoreStrategy.UPSERTS:
logger.info(
"Docstore strategy set to upserts, but no vector store. "
"Switching to duplicates_only strategy."
)
self.docstore_strategy = DocstoreStrategy.DUPLICATES_ONLY
elif self.docstore_strategy == DocstoreStrategy.UPSERTS_AND_DELETE:
logger.info(
"Docstore strategy set to upserts and delete, but no vector store. "
"Switching to duplicates_only strategy."
)
self.docstore_strategy = DocstoreStrategy.DUPLICATES_ONLY
nodes_to_run = await self._ahandle_duplicates(
input_nodes, store_doc_text=store_doc_text
)
else:
nodes_to_run = input_nodes
nodes = await arun_transformations( # type: ignore
nodes_to_run,
self.transformations,
show_progress=show_progress,
**kwargs,
)
if self.vector_store is not None:
nodes_with_embeddings = [n for n in nodes if n.embedding is not None]
if nodes_with_embeddings:
await self.vector_store.async_add(nodes_with_embeddings)
if self.docstore is not None:
await self._aupdate_docstore(nodes_to_run, store_doc_text=store_doc_text)
return nodes
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/ingestion/llama-index-ingestion-ray/llama_index/ingestion/ray/base.py",
"license": "MIT License",
"lines": 269,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/ingestion/llama-index-ingestion-ray/llama_index/ingestion/ray/transform.py | from typing import Type, Any, Dict, Optional
import ray
from llama_index.core.schema import TransformComponent
from pydantic import BaseModel
import pyarrow as pa
from llama_index.ingestion.ray.utils import (
ray_deserialize_node_batch,
ray_serialize_node_batch,
)
class TransformActor:
"""A Ray Actor executing the wrapped transformation."""
def __init__(
self,
transform_class: Type[TransformComponent],
transform_kwargs: Dict[str, Any],
):
self.transform = transform_class(**transform_kwargs)
def __call__(self, batch: pa.Table, **kwargs) -> pa.Table:
"""Execute the wrapped transformation on the given batch."""
# Deserialize input nodes
nodes = ray_deserialize_node_batch(batch)
# Apply the transformation
new_nodes = self.transform(nodes, **kwargs)
# Serialize output nodes
return ray_serialize_node_batch(new_nodes)
class RayTransformComponent(BaseModel):
"""
A wrapper around transformations that enables execution in Ray.
Args:
transform_class (Type[TransformComponent]): The transformation class to wrap.
transform_kwargs (Optional[Dict[str, Any]], optional): The keyword arguments to pass to the transformation __init__ function.
map_batches_kwargs (Optional[Dict[str, Any]], optional): The keyword arguments to pass to ray.data.Dataset.map_batches (see https://docs.ray.io/en/latest/data/api/doc/ray.data.Dataset.map_batches.html for details)
"""
transform_class: Type[TransformComponent]
transform_kwargs: Dict[str, Any]
map_batches_kwargs: Dict[str, Any]
def __init__(
self,
transform_class: Type[TransformComponent],
map_batches_kwargs: Optional[Dict[str, Any]] = None,
transform_kwargs: Optional[Dict[str, Any]] = None,
**kwargs,
) -> None:
super().__init__(
transform_class=transform_class,
transform_kwargs=transform_kwargs or kwargs,
map_batches_kwargs=map_batches_kwargs or {},
)
def __call__(self, dataset: ray.data.Dataset, **kwargs) -> ray.data.Dataset:
"""Run the transformation on the given ray dataset."""
return dataset.map_batches(
TransformActor,
fn_constructor_kwargs={
"transform_class": self.transform_class,
"transform_kwargs": self.transform_kwargs,
},
fn_kwargs=kwargs,
batch_format="pyarrow",
**self.map_batches_kwargs,
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/ingestion/llama-index-ingestion-ray/llama_index/ingestion/ray/transform.py",
"license": "MIT License",
"lines": 60,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/ingestion/llama-index-ingestion-ray/llama_index/ingestion/ray/utils.py | import pyarrow as pa
from llama_index.core.schema import BaseNode
from typing import Any, Sequence, List, Dict
import importlib
def ray_serialize_node(node: BaseNode) -> Dict[str, Any]:
"""Serialize a node to send to a Ray actor."""
# Pop embedding to store separately/cleanly
embedding = node.embedding
node.embedding = None
try:
data = node.to_json()
finally:
# Always restore the embedding to avoid mutating the input object
node.embedding = embedding
return {
"module": node.__class__.__module__,
"class_name": node.__class__.__name__,
"data": data,
"embedding": embedding,
}
def ray_serialize_node_batch(nodes: Sequence[BaseNode]) -> pa.Table:
"""Serialize a batch of nodes to send to a Ray actor."""
modules = []
class_names = []
data_json = []
embeddings = []
for node in nodes:
# 1. Capture embedding
embed_val = node.embedding
embeddings.append(embed_val)
# 2. Pop embedding so it isn't included in the JSON string (save space)
node.embedding = None
# 3. Serialize remaining data to JSON
try:
modules.append(node.__class__.__module__)
class_names.append(node.__class__.__name__)
data_json.append(node.to_json())
finally:
# 4. Restore embedding so we don't destructively mutate the user's nodes
node.embedding = embed_val
return pa.Table.from_pydict(
{
"module": modules,
"class_name": class_names,
"data": data_json, # This is now a column of JSON strings
"embedding": embeddings, # This is a column of float lists (or nulls)
}
)
def ray_deserialize_node(serialized_node: Dict[str, Any]) -> BaseNode:
"""Deserialize a node received from a Ray actor."""
module = importlib.import_module(serialized_node["module"])
cls = getattr(module, serialized_node["class_name"])
# Reconstruct from JSON string
node = cls.from_json(serialized_node["data"])
# Re-attach embedding
if serialized_node.get("embedding") is not None:
node.embedding = serialized_node["embedding"]
return node
def ray_deserialize_node_batch(table: pa.Table) -> List[BaseNode]:
"""Deserialize a batch of nodes received from a Ray actor."""
return [ray_deserialize_node(row) for row in table.to_pylist()]
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/ingestion/llama-index-ingestion-ray/llama_index/ingestion/ray/utils.py",
"license": "MIT License",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/ingestion/llama-index-ingestion-ray/tests/test_pipeline.py | from typing import Sequence, Any
from pathlib import Path
import pytest
import ray.exceptions
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
from llama_index.core.extractors import KeywordExtractor
from llama_index.core.llms.mock import MockLLM
from llama_index.core.node_parser import SentenceSplitter, MarkdownElementNodeParser
from llama_index.core.readers import ReaderConfig, StringIterableReader
from llama_index.core.schema import Document, BaseNode, TransformComponent
from llama_index.core.storage.docstore import SimpleDocumentStore
from llama_index.ingestion.ray import RayIngestionPipeline, RayTransformComponent
def test_build_pipeline() -> None:
pipeline = RayIngestionPipeline(
readers=[
ReaderConfig(
reader=StringIterableReader(),
reader_kwargs={"texts": ["This is a test."]},
)
],
documents=[Document.example()],
transformations=[
RayTransformComponent(SentenceSplitter),
RayTransformComponent(KeywordExtractor, llm=MockLLM()),
RayTransformComponent(MockEmbedding, embed_dim=8),
],
)
assert len(pipeline.transformations) == 3
def test_run_pipeline() -> None:
pipeline = RayIngestionPipeline(
readers=[
ReaderConfig(
reader=StringIterableReader(),
reader_kwargs={"texts": ["This is a test."]},
)
],
documents=[Document.example()],
transformations=[
RayTransformComponent(SentenceSplitter),
RayTransformComponent(KeywordExtractor, llm=MockLLM()),
],
)
nodes = pipeline.run()
assert len(nodes) == 2
assert len(nodes[0].metadata) > 0
def test_run_pipeline_with_ref_doc_id():
documents = [
Document(text="one", doc_id="1"),
]
pipeline = RayIngestionPipeline(
documents=documents,
transformations=[
RayTransformComponent(MarkdownElementNodeParser, llm=MockLLM()),
RayTransformComponent(SentenceSplitter),
RayTransformComponent(MockEmbedding, embed_dim=8),
],
)
nodes = pipeline.run()
assert len(nodes) == 1
assert nodes[0].ref_doc_id == "1"
def test_save_load_pipeline(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.chdir(tmp_path)
documents = [
Document(text="one", doc_id="1"),
Document(text="two", doc_id="2"),
Document(text="one", doc_id="1"),
]
pipeline = RayIngestionPipeline(
transformations=[
RayTransformComponent(SentenceSplitter, chunk_size=25, chunk_overlap=0),
],
docstore=SimpleDocumentStore(),
)
nodes = pipeline.run(documents=documents)
assert len(nodes) == 2
assert pipeline.docstore is not None
assert len(pipeline.docstore.docs) == 2
# dedup will catch the last node
nodes = pipeline.run(documents=[documents[-1]])
assert len(nodes) == 0
assert pipeline.docstore is not None
assert len(pipeline.docstore.docs) == 2
# test save/load
pipeline.persist("./test_pipeline")
pipeline2 = RayIngestionPipeline(
transformations=[
RayTransformComponent(SentenceSplitter, chunk_size=25, chunk_overlap=0),
],
)
pipeline2.load("./test_pipeline")
# dedup will catch the last node
nodes = pipeline.run(documents=[documents[-1]])
assert len(nodes) == 0
assert pipeline.docstore is not None
assert len(pipeline.docstore.docs) == 2
def test_pipeline_with_transform_error() -> None:
class RaisingTransform(TransformComponent):
def __call__(
self, nodes: Sequence[BaseNode], **kwargs: Any
) -> Sequence[BaseNode]:
raise RuntimeError
document1 = Document.example()
document1.id_ = "1"
pipeline = RayIngestionPipeline(
transformations=[
RayTransformComponent(SentenceSplitter, chunk_size=25, chunk_overlap=0),
RayTransformComponent(RaisingTransform),
],
docstore=SimpleDocumentStore(),
)
with pytest.raises(ray.exceptions.RayTaskError):
pipeline.run(documents=[document1])
assert pipeline.docstore.get_node("1", raise_error=False) is None
@pytest.mark.asyncio
async def test_arun_pipeline() -> None:
pipeline = RayIngestionPipeline(
readers=[
ReaderConfig(
reader=StringIterableReader(),
reader_kwargs={"texts": ["This is a test."]},
)
],
documents=[Document.example()],
transformations=[
RayTransformComponent(SentenceSplitter),
RayTransformComponent(KeywordExtractor, llm=MockLLM()),
],
)
nodes = await pipeline.arun()
assert len(nodes) == 2
assert len(nodes[0].metadata) > 0
@pytest.mark.asyncio
async def test_arun_pipeline_with_ref_doc_id():
documents = [
Document(text="one", doc_id="1"),
]
pipeline = RayIngestionPipeline(
documents=documents,
transformations=[
RayTransformComponent(MarkdownElementNodeParser, llm=MockLLM()),
RayTransformComponent(SentenceSplitter),
RayTransformComponent(MockEmbedding, embed_dim=8),
],
)
nodes = await pipeline.arun()
assert len(nodes) == 1
assert nodes[0].ref_doc_id == "1"
@pytest.mark.asyncio
async def test_async_pipeline_with_transform_error() -> None:
class RaisingTransform(TransformComponent):
def __call__(
self, nodes: Sequence[BaseNode], **kwargs: Any
) -> Sequence[BaseNode]:
raise RuntimeError
document1 = Document.example()
document1.id_ = "1"
pipeline = RayIngestionPipeline(
transformations=[
RayTransformComponent(SentenceSplitter, chunk_size=25, chunk_overlap=0),
RayTransformComponent(RaisingTransform),
],
docstore=SimpleDocumentStore(),
)
with pytest.raises(RuntimeError):
await pipeline.arun(documents=[document1])
assert pipeline.docstore.get_node("1", raise_error=False) is None
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/ingestion/llama-index-ingestion-ray/tests/test_pipeline.py",
"license": "MIT License",
"lines": 166,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-alibabacloud-mysql/llama_index/vector_stores/alibabacloud_mysql/base.py | """Alibaba Cloud MySQL Vector Store."""
import json
import logging
import re
from typing import Any, Dict, List, NamedTuple, Optional, Literal, Sequence
from urllib.parse import quote_plus
import sqlalchemy
import sqlalchemy.ext.asyncio
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.schema import BaseNode, MetadataMode
from llama_index.core.vector_stores.types import (
BasePydanticVectorStore,
FilterCondition,
FilterOperator,
MetadataFilter,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
from llama_index.core.vector_stores.utils import (
metadata_dict_to_node,
node_to_metadata_dict,
)
class DBEmbeddingRow(NamedTuple):
node_id: str
text: str
metadata: dict
similarity: float
_logger = logging.getLogger(__name__)
class AlibabaCloudMySQLVectorStore(BasePydanticVectorStore):
"""
Alibaba Cloud MySQL Vector Store.
Examples:
```python
from llama_index.vector_stores.alibabacloud_mysql import AlibabaCloudMySQLVectorStore
# Create AlibabaCloudMySQLVectorStore instance
vector_store = AlibabaCloudMySQLVectorStore(
table_name="llama_index_vectorstore",
host="localhost",
port=3306,
user="llamaindex",
password="password",
database="vectordb",
embed_dim=1536, # OpenAI embedding dimension
default_m=6,
distance_method="COSINE"
)
```
"""
stores_text: bool = True
flat_metadata: bool = False
connection_string: str
table_name: str = "llama_index_table"
database: str
embed_dim: int = 1536
default_m: int = 6
distance_method: Literal["EUCLIDEAN", "COSINE"] = "COSINE"
perform_setup: bool = True
debug: bool = False
_engine: Any = PrivateAttr()
_async_engine: Any = PrivateAttr()
_session: Any = PrivateAttr()
_async_session: Any = PrivateAttr()
_table_class: Any = PrivateAttr()
_is_initialized: bool = PrivateAttr(default=False)
def _validate_identifier(self, name: str) -> str:
# 只允许字母、数字、下划线(符合 SQL 标识符规范)
if not re.match(r"^[a-zA-Z_][a-zA-Z0-9_]*$", name):
raise ValueError(f"Invalid identifier: {name}")
return name
def _validate_positive_int(self, value: int, param_name: str) -> int:
if not isinstance(value, int) or value <= 0:
raise ValueError(f"Expected positive int for {param_name}, got {value}")
return value
def _validate_table_name(self, table_name: str) -> str:
return self._validate_identifier(table_name)
def __init__(
self,
host: str,
port: int,
user: str,
password: str,
database: str,
table_name: str = "llama_index_table",
embed_dim: int = 1536,
default_m: int = 6,
distance_method: Literal["EUCLIDEAN", "COSINE"] = "COSINE",
perform_setup: bool = True,
debug: bool = False,
) -> None:
"""
Constructor.
Args:
host (str): Host of Alibaba Cloud MySQL connection.
port (int): Port of Alibaba Cloud MySQL connection.
user (str): Alibaba Cloud MySQL username.
password (str): Alibaba Cloud MySQL password.
database (str): Alibaba Cloud MySQL DB name.
table_name (str, optional): Table name for the vector store. Defaults to "llama_index_table".
embed_dim (int, optional): Embedding dimensions. Defaults to 1536.
default_m (int, optional): Default M value for the vector index. Defaults to 6.
distance_method (Literal["EUCLIDEAN", "COSINE"], optional): Vector distance type. Defaults to COSINE.
perform_setup (bool, optional): If DB should be set up. Defaults to True.
debug (bool, optional): If debug logging should be enabled. Defaults to False.
"""
# Validate table_name, embed_dim, and default_m
self._validate_table_name(table_name)
self._validate_positive_int(embed_dim, "embed_dim")
self._validate_positive_int(default_m, "default_m")
# Create connection string
password_safe = quote_plus(password)
connection_string = (
f"mysql+pymysql://{user}:{password_safe}@{host}:{port}/{database}"
)
super().__init__(
connection_string=connection_string,
table_name=table_name,
database=database,
embed_dim=embed_dim,
default_m=default_m,
distance_method=distance_method,
perform_setup=perform_setup,
debug=debug,
)
# Private attrs
self._engine = None
self._async_engine = None
self._session = None
self._async_session = None
self._table_class = None
self._is_initialized = False
self._initialize()
@classmethod
def class_name(cls) -> str:
return "AlibabaCloudMySQLVectorStore"
@classmethod
def from_params(
cls,
host: str,
port: int,
user: str,
password: str,
database: str,
table_name: str = "llama_index_table",
embed_dim: int = 1536,
default_m: int = 6,
distance_method: Literal["EUCLIDEAN", "COSINE"] = "COSINE",
perform_setup: bool = True,
debug: bool = False,
) -> "AlibabaCloudMySQLVectorStore":
"""
Construct from params.
Args:
host (str): Host of Alibaba Cloud MySQL connection.
port (int): Port of Alibaba Cloud MySQL connection.
user (str): Alibaba Cloud MySQL username.
password (str): Alibaba Cloud MySQL password.
database (str): Alibaba Cloud MySQL DB name.
table_name (str, optional): Table name for the vector store. Defaults to "llama_index_table".
embed_dim (int, optional): Embedding dimensions. Defaults to 1536.
default_m (int, optional): Default M value for the vector index. Defaults to 6.
distance_method (Literal["EUCLIDEAN", "COSINE"], optional): Vector distance type. Defaults to COSINE.
perform_setup (bool, optional): If DB should be set up. Defaults to True.
debug (bool, optional): If debug logging should be enabled. Defaults to False.
Returns:
AlibabaCloudMySQLVectorStore: Instance of AlibabaCloudMySQLVectorStore constructed from params.
"""
return cls(
host=host,
port=port,
user=user,
password=password,
database=database,
table_name=table_name,
embed_dim=embed_dim,
default_m=default_m,
distance_method=distance_method,
perform_setup=perform_setup,
debug=debug,
)
@property
def client(self) -> Any:
"""Return the SQLAlchemy engine."""
if not self._is_initialized:
return None
return self._engine
def _connect(self) -> None:
"""Create SQLAlchemy engines and sessions."""
from sqlalchemy import create_engine
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession
from sqlalchemy.orm import sessionmaker
# Create sync engine
self._engine = create_engine(
self.connection_string,
echo=self.debug,
)
# Create async engine
async_connection_string = self.connection_string.replace(
"mysql+pymysql://", "mysql+aiomysql://"
)
self._async_engine = create_async_engine(
async_connection_string,
echo=self.debug,
)
# Create session makers
self._session = sessionmaker(self._engine)
self._async_session = sessionmaker(self._async_engine, class_=AsyncSession)
def _check_vector_support(self) -> None:
"""Check if the MySQL server supports vector operations."""
from sqlalchemy import text
with self._session() as session:
try:
# Check MySQL version
# Try to execute a simple vector function to verify support
result = session.execute(
text("SELECT VEC_FromText('[1,2,3]') IS NOT NULL as vector_support")
)
vector_result = result.fetchone()
if not vector_result or not vector_result[0]:
raise ValueError(
"RDS MySQL Vector functions are not available."
" Please ensure you're using RDS MySQL 8.0.36+ with Vector support."
)
# Check rds_release_date >= 20251031
result = session.execute(text("SHOW VARIABLES LIKE 'rds_release_date'"))
rds_release_result = result.fetchone()
if not rds_release_result:
raise ValueError(
"Unable to retrieve rds_release_date variable. "
"Your MySQL instance may not Alibaba Cloud RDS MySQL instance."
)
rds_release_date = rds_release_result[1]
if int(rds_release_date) < 20251031:
raise ValueError(
f"Alibaba Cloud MySQL rds_release_date must be 20251031 or later, found: {rds_release_date}."
)
except Exception as e:
if "FUNCTION" in str(e) and "VEC_FromText" in str(e):
raise ValueError(
"RDS MySQL Vector functions are not available."
" Please ensure you're using RDS MySQL 8.0.36+ with Vector support."
) from e
raise
def _create_table_if_not_exists(self) -> None:
from sqlalchemy import text
with self._session() as session:
# Create table with VECTOR data type for Alibaba Cloud MySQL
stmt = text(f"""
CREATE TABLE IF NOT EXISTS `{self.table_name}` (
id VARCHAR(36) PRIMARY KEY,
node_id VARCHAR(255) NOT NULL,
text LONGTEXT,
metadata JSON,
embedding VECTOR({self.embed_dim}) NOT NULL,
INDEX `node_id_index` (node_id),
VECTOR INDEX (embedding) M={self.default_m} DISTANCE={self.distance_method}
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
""")
session.execute(stmt)
session.commit()
def _initialize(self) -> None:
if not self._is_initialized:
self._connect()
if self.perform_setup:
self._check_vector_support()
self._create_table_if_not_exists()
self._is_initialized = True
def _node_to_table_row(self, node: BaseNode) -> Dict[str, Any]:
return {
"node_id": node.node_id,
"text": node.get_content(metadata_mode=MetadataMode.NONE),
"embedding": node.get_embedding(),
"metadata": node_to_metadata_dict(
node,
remove_text=True,
flat_metadata=self.flat_metadata,
),
}
def _to_mysql_operator(self, operator: FilterOperator) -> str:
if operator == FilterOperator.EQ:
return "="
elif operator == FilterOperator.GT:
return ">"
elif operator == FilterOperator.LT:
return "<"
elif operator == FilterOperator.NE:
return "!="
elif operator == FilterOperator.GTE:
return ">="
elif operator == FilterOperator.LTE:
return "<="
elif operator == FilterOperator.IN:
return "IN"
elif operator == FilterOperator.NIN:
return "NOT IN"
else:
_logger.warning("Unsupported operator: %s, fallback to '='", operator)
return "="
def _build_filter_clause(
self, filter_: MetadataFilter, global_param_counter: List[int]
) -> tuple[str, dict]:
params = {}
if filter_.operator in [FilterOperator.IN, FilterOperator.NIN]:
# For IN/NIN operators, we need multiple placeholders
placeholders = []
for i in range(len(filter_.value)):
param_name = f"param_{global_param_counter[0]}"
global_param_counter[0] += 1
placeholders.append(f":{param_name}")
params[param_name] = filter_.value[i]
filter_value = f"({','.join(placeholders)})"
elif isinstance(filter_.value, (list, tuple)):
# For list/tuple values, we also need multiple placeholders
placeholders = []
for i in range(len(filter_.value)):
param_name = f"param_{global_param_counter[0]}"
global_param_counter[0] += 1
placeholders.append(f":{param_name}")
params[param_name] = filter_.value[i]
filter_value = f"({','.join(placeholders)})"
else:
# For single value, create a single parameter
param_name = f"param_{global_param_counter[0]}"
global_param_counter[0] += 1
filter_value = f":{param_name}"
params[param_name] = filter_.value
clause = f"JSON_VALUE(metadata, '$.{filter_.key}') {self._to_mysql_operator(filter_.operator)} {filter_value}"
return clause, params
def _filters_to_where_clause(
self, filters: MetadataFilters, global_param_counter: List[int]
) -> tuple[str, dict]:
conditions = {
FilterCondition.OR: "OR",
FilterCondition.AND: "AND",
}
if filters.condition not in conditions:
raise ValueError(
f"Unsupported condition: {filters.condition}. "
f"Must be one of {list(conditions.keys())}"
)
clauses: List[str] = []
all_params = {}
for filter_ in filters.filters:
if isinstance(filter_, MetadataFilter):
clause, filter_params = self._build_filter_clause(
filter_, global_param_counter
)
clauses.append(clause)
all_params.update(filter_params)
continue
if isinstance(filter_, MetadataFilters):
subclause, subparams = self._filters_to_where_clause(
filter_, global_param_counter
)
if subclause:
clauses.append(f"({subclause})")
all_params.update(subparams)
continue
raise ValueError(
f"Unsupported filter type: {type(filter_)}. Must be one of {MetadataFilter}, {MetadataFilters}"
)
return f" {conditions[filters.condition]} ".join(clauses), all_params
def _db_rows_to_query_result(
self, rows: List[DBEmbeddingRow]
) -> VectorStoreQueryResult:
nodes = []
similarities = []
ids = []
for db_embedding_row in rows:
node = metadata_dict_to_node(db_embedding_row.metadata)
node.set_content(str(db_embedding_row.text))
similarities.append(db_embedding_row.similarity)
ids.append(db_embedding_row.node_id)
nodes.append(node)
return VectorStoreQueryResult(
nodes=nodes,
similarities=similarities,
ids=ids,
)
def get_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
) -> List[BaseNode]:
"""Get nodes from vector store."""
self._initialize()
nodes: List[BaseNode] = []
with self._session() as session:
if node_ids:
# Using parameterized query to prevent SQL injection
placeholders = ",".join([f":node_id_{i}" for i in range(len(node_ids))])
params = {f"node_id_{i}": node_id for i, node_id in enumerate(node_ids)}
stmt = sqlalchemy.text(
f"""SELECT text, metadata FROM `{self.table_name}` WHERE node_id IN ({placeholders})"""
)
result = session.execute(stmt, params)
else:
stmt = sqlalchemy.text(
f"""SELECT text, metadata FROM `{self.table_name}`"""
)
result = session.execute(stmt)
for item in result:
node = metadata_dict_to_node(
json.loads(item[1]) if isinstance(item[1], str) else item[1]
)
node.set_content(str(item[0]))
nodes.append(node)
return nodes
def add(
self,
nodes: List[BaseNode],
**add_kwargs: Any,
) -> List[str]:
self._initialize()
ids = []
with self._session() as session:
for node in nodes:
ids.append(node.node_id)
item = self._node_to_table_row(node)
stmt = sqlalchemy.text(f"""
INSERT INTO `{self.table_name}` (id, node_id, text, embedding, metadata)
VALUES (
UUID(),
:node_id,
:text,
VEC_FromText(:embedding),
:metadata
)
ON DUPLICATE KEY UPDATE
text = VALUES(text),
embedding = VALUES(embedding),
metadata = VALUES(metadata)
""")
session.execute(
stmt,
{
"node_id": item["node_id"],
"text": item["text"],
"embedding": json.dumps(item["embedding"]),
"metadata": json.dumps(item["metadata"]),
},
)
session.commit()
return ids
async def async_add(
self,
nodes: Sequence[BaseNode],
**kwargs: Any,
) -> List[str]:
"""
Async wrapper around :meth:`add`.
"""
self._initialize()
if not nodes:
return []
ids: List[str] = []
async with self._async_session() as session:
for node in nodes:
ids.append(node.node_id)
item = self._node_to_table_row(node)
stmt = sqlalchemy.text(f"""
INSERT INTO `{self.table_name}` (id, node_id, text, embedding, metadata)
VALUES (
UUID(),
:node_id,
:text,
VEC_FromText(:embedding),
:metadata
)
ON DUPLICATE KEY UPDATE
text = VALUES(text),
embedding = VALUES(embedding),
metadata = VALUES(metadata)
""")
await session.execute(
stmt,
{
"node_id": item["node_id"],
"text": item["text"],
"embedding": json.dumps(item["embedding"]),
"metadata": json.dumps(item["metadata"]),
},
)
await session.commit()
return ids
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
if query.mode != VectorStoreQueryMode.DEFAULT:
raise NotImplementedError(f"Query mode {query.mode} not available.")
self._initialize()
# Using specified distance function for vector similarity search
distance_func = (
"VEC_DISTANCE_COSINE"
if self.distance_method == "COSINE"
else "VEC_DISTANCE_EUCLIDEAN"
)
where_clause = ""
params = {
"query_embedding": json.dumps(query.query_embedding),
"limit": query.similarity_top_k,
}
if query.filters:
# Use a global counter to ensure unique parameter names
global_param_counter = [0] # Use a list to make it mutable
where_clause, filter_params = self._filters_to_where_clause(
query.filters, global_param_counter
)
where_clause = f"WHERE {where_clause}"
params.update(filter_params)
stmt = sqlalchemy.text(f"""
SELECT
node_id,
text,
embedding,
metadata,
{distance_func}(embedding, VEC_FromText(:query_embedding)) AS distance
FROM `{self.table_name}`
{where_clause}
ORDER BY distance
LIMIT :limit
""")
with self._session() as session:
result = session.execute(stmt, params)
results = result.fetchall()
rows = []
for item in results:
rows.append(
DBEmbeddingRow(
node_id=item[0],
text=item[1],
metadata=json.loads(item[3])
if isinstance(item[3], str)
else item[3],
similarity=(1 - item[4]) if item[4] is not None else 0,
)
)
return self._db_rows_to_query_result(rows)
async def aquery(
self, query: VectorStoreQuery, **kwargs: Any
) -> VectorStoreQueryResult:
"""Async wrapper around :meth:`query`."""
if query.mode != VectorStoreQueryMode.DEFAULT:
raise NotImplementedError(f"Query mode {query.mode} not available.")
self._initialize()
# Using specified distance function for vector similarity search
distance_func = (
"VEC_DISTANCE_COSINE"
if self.distance_method == "COSINE"
else "VEC_DISTANCE_EUCLIDEAN"
)
where_clause = ""
params = {
"query_embedding": json.dumps(query.query_embedding),
"limit": query.similarity_top_k,
}
if query.filters:
# Use a global counter to ensure unique parameter names
global_param_counter = [0] # Use a list to make it mutable
where_clause, filter_params = self._filters_to_where_clause(
query.filters, global_param_counter
)
where_clause = f"WHERE {where_clause}"
params.update(filter_params)
stmt = sqlalchemy.text(f"""
SELECT
node_id,
text,
embedding,
metadata,
{distance_func}(embedding, VEC_FromText(:query_embedding)) AS distance
FROM `{self.table_name}`
{where_clause}
ORDER BY distance
LIMIT :limit
""")
async with self._async_session() as session:
result = await session.execute(stmt, params)
results = result.fetchall()
rows = []
for item in results:
rows.append(
DBEmbeddingRow(
node_id=item[0],
text=item[1],
metadata=json.loads(item[3])
if isinstance(item[3], str)
else item[3],
similarity=(1 - item[4]) if item[4] is not None else 0,
)
)
return self._db_rows_to_query_result(rows)
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
self._initialize()
with self._session() as session:
# Delete based on ref_doc_id in metadata
stmt = sqlalchemy.text(
f"""DELETE FROM `{self.table_name}` WHERE JSON_EXTRACT(metadata, '$.ref_doc_id') = :doc_id"""
)
session.execute(stmt, {"doc_id": ref_doc_id})
session.commit()
async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Async wrapper around :meth:`delete`."""
self._initialize()
async with self._async_session() as session:
# Delete based on ref_doc_id in metadata
stmt = sqlalchemy.text(
f"""DELETE FROM `{self.table_name}` WHERE JSON_EXTRACT(metadata, '$.ref_doc_id') = :doc_id"""
)
await session.execute(stmt, {"doc_id": ref_doc_id})
await session.commit()
def delete_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
**delete_kwargs: Any,
) -> None:
self._initialize()
with self._session() as session:
if node_ids:
# Using parameterized query to prevent SQL injection
placeholders = ",".join([f":node_id_{i}" for i in range(len(node_ids))])
params = {f"node_id_{i}": node_id for i, node_id in enumerate(node_ids)}
stmt = sqlalchemy.text(
f"""DELETE FROM `{self.table_name}` WHERE node_id IN ({placeholders})"""
)
session.execute(stmt, params)
session.commit()
async def adelete_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
**delete_kwargs: Any,
) -> None:
"""Async wrapper around :meth:`delete_nodes`."""
self._initialize()
async with self._async_session() as session:
if node_ids:
# Using parameterized query to prevent SQL injection
placeholders = ",".join([f":node_id_{i}" for i in range(len(node_ids))])
params = {f"node_id_{i}": node_id for i, node_id in enumerate(node_ids)}
stmt = sqlalchemy.text(
f"""DELETE FROM `{self.table_name}` WHERE node_id IN ({placeholders})"""
)
await session.execute(stmt, params)
await session.commit()
def count(self) -> int:
self._initialize()
with self._session() as session:
stmt = sqlalchemy.text(
f"""SELECT COUNT(*) as count FROM `{self.table_name}`"""
)
result = session.execute(stmt)
row = result.fetchone()
return row[0] if row else 0
def drop(self) -> None:
self._initialize()
with self._session() as session:
stmt = sqlalchemy.text(f"""DROP TABLE IF EXISTS `{self.table_name}`""")
session.execute(stmt)
session.commit()
self.close()
def clear(self) -> None:
self._initialize()
with self._session() as session:
stmt = sqlalchemy.text(f"""DELETE FROM `{self.table_name}`""")
session.execute(stmt)
session.commit()
async def aclear(self) -> None:
"""Async wrapper around :meth:`clear`."""
self._initialize()
async with self._async_session() as session:
stmt = sqlalchemy.text(f"""DELETE FROM `{self.table_name}`""")
await session.execute(stmt)
await session.commit()
def close(self) -> None:
if not self._is_initialized:
return
if self._engine:
self._engine.dispose()
if self._async_engine:
import asyncio
try:
# Try to run the async disposal
loop = asyncio.get_event_loop()
if not loop.is_running():
asyncio.run(self._async_engine.dispose())
else:
# If already in a running loop, create a new thread to run the disposal
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor() as executor:
future = executor.submit(
asyncio.run, self._async_engine.dispose()
)
future.result()
except RuntimeError:
# If no event loop exists, create one
asyncio.run(self._async_engine.dispose())
self._is_initialized = False
async def aclose(self) -> None:
if not self._is_initialized:
return
if self._engine:
self._engine.dispose()
if self._async_engine:
await self._async_engine.dispose()
self._is_initialized = False
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-alibabacloud-mysql/llama_index/vector_stores/alibabacloud_mysql/base.py",
"license": "MIT License",
"lines": 706,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-alibabacloud-mysql/tests/test_alibabacloud_mysql.py | """Unit tests for AlibabaCloudMySQLVectorStore to improve test coverage."""
import json
from unittest.mock import Mock, patch
import pytest
from llama_index.core.schema import TextNode
from llama_index.core.vector_stores.types import (
FilterOperator,
MetadataFilter,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryMode,
)
from llama_index.vector_stores.alibabacloud_mysql import AlibabaCloudMySQLVectorStore
def test_class_name() -> None:
"""Test class_name method."""
assert AlibabaCloudMySQLVectorStore.class_name() == "AlibabaCloudMySQLVectorStore"
def test_client_property() -> None:
"""Test client property."""
with patch.object(AlibabaCloudMySQLVectorStore, "_connect"):
store = AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
perform_setup=False, # Don't perform setup to avoid DB calls
)
# Test when initialized
store._is_initialized = True
store._engine = Mock() # Mock the engine
assert store.client is not None
# Test when not initialized
store._is_initialized = False
assert store.client is None
def test_create_engine() -> None:
"""Test _create_engine method."""
# Don't mock _connect, let it run to create engines
store = AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
embed_dim=1536,
default_m=6,
distance_method="COSINE",
perform_setup=False, # Don't perform setup to avoid DB calls
)
# Verify the engines were created (they would be set in _connect)
# The _connect method is called in __init__ via _initialize
assert store._engine is not None
assert store._async_engine is not None
def test_get_connection_context_manager() -> None:
"""Test session context manager."""
mock_conn = Mock()
mock_execute_result = Mock()
with patch.object(AlibabaCloudMySQLVectorStore, "_connect") as mock_connect:
store = AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
perform_setup=False, # Don't perform setup to avoid DB calls
)
# Mock the session maker to return our mock connection
mock_session = Mock()
mock_session.return_value.__enter__ = Mock(return_value=mock_conn)
mock_session.return_value.__exit__ = Mock(return_value=None)
store._session = mock_session
# Test the context manager
with store._session() as conn:
assert conn == mock_conn
# Verify the session context manager was used
mock_session.assert_called_once()
def test_check_vector_support_success() -> None:
"""Test _check_vector_support method with successful case."""
mock_session = Mock()
mock_session.execute.return_value.fetchone.side_effect = [
[True], # vector_support result
["rds_release_date", "20251031"], # rds release date result
]
with patch.object(AlibabaCloudMySQLVectorStore, "_session") as mock_session_maker:
mock_session_maker.return_value.__enter__.return_value = mock_session
with patch.object(AlibabaCloudMySQLVectorStore, "_connect"):
store = AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
perform_setup=False, # Don't perform setup to avoid DB calls
)
# Should not raise any exception
store._check_vector_support()
def test_check_vector_support_no_vector_functions() -> None:
"""Test _check_vector_support method when vector functions are not available."""
mock_session = Mock()
mock_session.execute.return_value.fetchone.return_value = [False]
with patch.object(AlibabaCloudMySQLVectorStore, "_session") as mock_session_maker:
mock_session_maker.return_value.__enter__.return_value = mock_session
with patch.object(AlibabaCloudMySQLVectorStore, "_connect"):
store = AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
perform_setup=False, # Don't perform setup to avoid DB calls
)
with pytest.raises(
ValueError, match="RDS MySQL Vector functions are not available"
):
store._check_vector_support()
def test_check_vector_support_old_release_date() -> None:
"""Test _check_vector_support method with old release date."""
mock_session = Mock()
mock_session.execute.return_value.fetchone.side_effect = [
[True], # vector_support result
["rds_release_date", "20251030"], # old rds release date
]
with patch.object(AlibabaCloudMySQLVectorStore, "_session") as mock_session_maker:
mock_session_maker.return_value.__enter__.return_value = mock_session
with patch.object(AlibabaCloudMySQLVectorStore, "_connect"):
store = AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
perform_setup=False, # Don't perform setup to avoid DB calls
)
with pytest.raises(
ValueError, match="rds_release_date must be 20251031 or later"
):
store._check_vector_support()
def test_check_vector_support_no_release_date_variable() -> None:
"""Test _check_vector_support method when rds_release_date variable is not available."""
mock_session = Mock()
mock_session.execute.return_value.fetchone.side_effect = [
[True], # vector_support result
None, # No rds_release_date variable
]
with patch.object(AlibabaCloudMySQLVectorStore, "_session") as mock_session_maker:
mock_session_maker.return_value.__enter__.return_value = mock_session
with patch.object(AlibabaCloudMySQLVectorStore, "_connect"):
store = AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
perform_setup=False, # Don't perform setup to avoid DB calls
)
with pytest.raises(
ValueError, match="Unable to retrieve rds_release_date variable"
):
store._check_vector_support()
def test_check_vector_support_function_error() -> None:
"""Test _check_vector_support method when VEC_FromText function raises an error."""
mock_session = Mock()
mock_session.execute.side_effect = Exception(
"FUNCTION test.VEC_FromText does not exist"
)
with patch.object(AlibabaCloudMySQLVectorStore, "_session") as mock_session_maker:
mock_session_maker.return_value.__enter__.return_value = mock_session
with patch.object(AlibabaCloudMySQLVectorStore, "_connect"):
store = AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
perform_setup=False, # Don't perform setup to avoid DB calls
)
with pytest.raises(
ValueError, match="RDS MySQL Vector functions are not available"
):
store._check_vector_support()
def test_initialize_invalid_distance_method() -> None:
"""Test initialization with invalid distance method (should be caught by Pydantic validation)."""
with pytest.raises(
Exception
): # Pydantic should catch invalid distance_method during initialization
AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
distance_method="INVALID",
)
def test_initialize_success() -> None:
"""Test _initialize method success case."""
with patch.object(
AlibabaCloudMySQLVectorStore, "_check_vector_support"
) as mock_check:
with patch.object(
AlibabaCloudMySQLVectorStore, "_create_table_if_not_exists"
) as mock_create_table:
with patch.object(AlibabaCloudMySQLVectorStore, "_connect"):
store = AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
perform_setup=True,
)
# Verify methods were called
mock_check.assert_called_once()
mock_create_table.assert_called_once()
assert store._is_initialized is True
def test_initialize_without_setup() -> None:
"""Test _initialize method when perform_setup is False."""
with patch.object(
AlibabaCloudMySQLVectorStore, "_check_vector_support"
) as mock_check:
with patch.object(
AlibabaCloudMySQLVectorStore, "_create_table_if_not_exists"
) as mock_create_table:
with patch.object(AlibabaCloudMySQLVectorStore, "_connect"):
store = AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
perform_setup=False,
)
# Verify methods were NOT called when perform_setup is False
mock_check.assert_not_called()
mock_create_table.assert_not_called()
assert store._is_initialized is True
def test_node_to_table_row() -> None:
"""Test _node_to_table_row method."""
with patch.object(AlibabaCloudMySQLVectorStore, "_connect"):
store = AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
perform_setup=False, # Don't perform setup to avoid DB calls
)
# Create a test node
node = TextNode(
text="test text",
id_="test-id",
metadata={"key": "value"},
embedding=[1.0, 2.0, 3.0],
)
# Convert node to table row
row = store._node_to_table_row(node)
assert row["node_id"] == "test-id"
assert row["text"] == "test text"
assert row["embedding"] == [1.0, 2.0, 3.0]
assert "key" in row["metadata"]
def test_to_mysql_operator() -> None:
"""Test _to_mysql_operator method."""
with patch.object(AlibabaCloudMySQLVectorStore, "_connect"):
store = AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
perform_setup=False, # Don't perform setup to avoid DB calls
)
# Test all supported operators
assert store._to_mysql_operator(FilterOperator.EQ) == "="
assert store._to_mysql_operator(FilterOperator.GT) == ">"
assert store._to_mysql_operator(FilterOperator.LT) == "<"
assert store._to_mysql_operator(FilterOperator.NE) == "!="
assert store._to_mysql_operator(FilterOperator.GTE) == ">="
assert store._to_mysql_operator(FilterOperator.LTE) == "<="
assert store._to_mysql_operator(FilterOperator.IN) == "IN"
assert store._to_mysql_operator(FilterOperator.NIN) == "NOT IN"
# Test unsupported operator (should fallback to =)
with patch(
"llama_index.vector_stores.alibabacloud_mysql.base._logger"
) as mock_logger:
assert store._to_mysql_operator("UNSUPPORTED") == "="
mock_logger.warning.assert_called_once()
def test_build_filter_clause() -> None:
"""Test _build_filter_clause method."""
with patch.object(AlibabaCloudMySQLVectorStore, "_connect"):
store = AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
perform_setup=False, # Don't perform setup to avoid DB calls
)
# Test simple equality filter
filter_eq = MetadataFilter(
key="category", value="test", operator=FilterOperator.EQ
)
global_param_counter = [0]
clause, values = store._build_filter_clause(filter_eq, global_param_counter)
assert "JSON_VALUE(metadata, '$.category') =" in clause
# The values should now be a dictionary for SQLAlchemy named parameters
assert isinstance(values, dict)
# Test IN filter
filter_in = MetadataFilter(
key="category", value=["test1", "test2"], operator=FilterOperator.IN
)
global_param_counter = [0]
clause, values = store._build_filter_clause(filter_in, global_param_counter)
assert "JSON_VALUE(metadata, '$.category') IN" in clause
# The values should now be a dictionary for SQLAlchemy named parameters
assert isinstance(values, dict)
def test_filters_to_where_clause() -> None:
"""Test _filters_to_where_clause method."""
with patch.object(AlibabaCloudMySQLVectorStore, "_connect"):
store = AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
perform_setup=False, # Don't perform setup to avoid DB calls
)
# Test simple filters
filters = MetadataFilters(
filters=[
MetadataFilter(
key="category", value="test", operator=FilterOperator.EQ
),
MetadataFilter(key="priority", value=1, operator=FilterOperator.GT),
],
condition="and",
)
global_param_counter = [0]
clause, values = store._filters_to_where_clause(filters, global_param_counter)
assert "JSON_VALUE(metadata, '$.category') =" in clause
assert "JSON_VALUE(metadata, '$.priority') >" in clause
assert "AND" in clause
# Values should be a dictionary for SQLAlchemy named parameters
assert isinstance(values, dict)
# Test OR condition with multiple filters
filters_or = MetadataFilters(
filters=[
MetadataFilter(
key="category", value="test", operator=FilterOperator.EQ
),
MetadataFilter(
key="type", value="document", operator=FilterOperator.EQ
),
],
condition="or",
)
global_param_counter = [0]
clause, values = store._filters_to_where_clause(
filters_or, global_param_counter
)
assert "OR" in clause
def test_db_rows_to_query_result() -> None:
"""Test _db_rows_to_query_result method."""
with patch.object(AlibabaCloudMySQLVectorStore, "_connect"):
store = AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
perform_setup=False, # Don't perform setup to avoid DB calls
)
# Create test rows
from llama_index.vector_stores.alibabacloud_mysql.base import DBEmbeddingRow
test_rows = [
DBEmbeddingRow(
node_id="test-id-1",
text="test text 1",
metadata={
"_node_content": '{"id_": "test-id-1", "text": "test text 1"}'
},
similarity=0.9,
),
DBEmbeddingRow(
node_id="test-id-2",
text="test text 2",
metadata={
"_node_content": '{"id_": "test-id-2", "text": "test text 2"}'
},
similarity=0.8,
),
]
# Convert to query result
result = store._db_rows_to_query_result(test_rows)
assert len(result.nodes) == 2
assert len(result.similarities) == 2
assert len(result.ids) == 2
assert result.similarities[0] == 0.9
assert result.similarities[1] == 0.8
assert result.ids[0] == "test-id-1"
assert result.ids[1] == "test-id-2"
def test_query_unsupported_mode() -> None:
"""Test query method with unsupported mode."""
with patch.object(AlibabaCloudMySQLVectorStore, "_connect"):
store = AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
perform_setup=False, # Don't perform setup to avoid DB calls
)
query = VectorStoreQuery(
query_embedding=[1.0, 2.0, 3.0],
mode=VectorStoreQueryMode.TEXT_SEARCH, # Unsupported mode
)
with pytest.raises(
NotImplementedError,
match=f"Query mode {VectorStoreQueryMode.TEXT_SEARCH} not available",
):
store.query(query)
def test_close() -> None:
"""Test close method."""
with patch.object(AlibabaCloudMySQLVectorStore, "_connect"):
store = AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
perform_setup=False, # Don't perform setup to avoid DB calls
)
# Set initialized state
store._is_initialized = True
# Close the store
store.close()
# Verify it's marked as not initialized
assert store._is_initialized is False
def test_from_params() -> None:
"""Test from_params class method."""
with patch.object(
AlibabaCloudMySQLVectorStore, "__init__", return_value=None
) as mock_init:
AlibabaCloudMySQLVectorStore.from_params(
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
table_name="custom_table",
embed_dim=512,
default_m=10,
distance_method="EUCLIDEAN",
perform_setup=False,
debug=False,
)
# Verify the parameters were passed correctly
mock_init.assert_called_once_with(
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
table_name="custom_table",
embed_dim=512,
default_m=10,
distance_method="EUCLIDEAN",
perform_setup=False,
debug=False,
)
def test_get_nodes() -> None:
"""Test get_nodes method."""
# Instead of mocking complex database results, we'll just verify the method can be called
# without errors and that the proper query structure is used
mock_session = Mock()
# Mock the result to have proper iterable behavior
mock_result = Mock()
# Mock the result to behave like a SQLAlchemy result
mock_result.__iter__ = Mock(return_value=iter([])) # Empty iterator to avoid issues
mock_session.execute.return_value = mock_result
with patch.object(AlibabaCloudMySQLVectorStore, "_session") as mock_session_maker:
mock_session_maker.return_value.__enter__.return_value = mock_session
with patch.object(AlibabaCloudMySQLVectorStore, "_connect"):
store = AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
perform_setup=False, # Don't perform setup to avoid DB calls
)
# Test get_nodes with node_ids - just verify the method doesn't error
nodes = store.get_nodes(node_ids=["test-id-1", "test-id-2"])
# We can't verify exact count due to mock complexity, just that it returns a list
assert isinstance(nodes, list)
mock_session.execute.assert_called()
# Check that the query uses SQLAlchemy named parameters
args = mock_session.execute.call_args
if args and args[0]:
query = str(args[0][0]) # First argument of the call
assert ":node_id_0" in query or ":node_id_1" in query
# Test get_nodes without node_ids
mock_session.reset_mock()
nodes = store.get_nodes()
assert isinstance(nodes, list)
call_args = mock_session.execute.call_args
if call_args and call_args[0]:
query = str(call_args[0][0]) # First argument of the call
# When no node_ids provided, query should not have WHERE clause
assert "WHERE node_id IN" not in query
def test_add() -> None:
"""Test add method."""
mock_session = Mock()
with patch.object(AlibabaCloudMySQLVectorStore, "_session") as mock_session_maker:
mock_session_maker.return_value.__enter__.return_value = mock_session
with patch.object(AlibabaCloudMySQLVectorStore, "_connect"):
store = AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
perform_setup=False, # Don't perform setup to avoid DB calls
)
# Create test nodes
nodes = [
TextNode(
text="test text 1",
id_="test-id-1",
metadata={"key": "value1"},
embedding=[1.0, 2.0, 3.0],
),
TextNode(
text="test text 2",
id_="test-id-2",
metadata={"key": "value2"},
embedding=[4.0, 5.0, 6.0],
),
]
# Test adding nodes
ids = store.add(nodes)
assert len(ids) == 2
assert "test-id-1" in ids
assert "test-id-2" in ids
assert mock_session.execute.call_count == 2
def test_query() -> None:
"""Test query method."""
mock_session = Mock()
mock_result = Mock()
# Create mock rows that behave like database result rows
class MockRow:
def __init__(self, node_id, text, embedding, metadata, distance):
self.node_id = node_id
self.text = text
self.embedding = embedding
self.metadata = metadata
self.distance = distance
def __getitem__(self, index):
if index == 0:
return self.node_id # node_id
elif index == 1:
return self.text # text
elif index == 2:
return self.embedding # embedding
elif index == 3:
return self.metadata # metadata
elif index == 4:
return self.distance # distance
else:
raise IndexError("Index out of range")
row1 = MockRow(
"test-id-1",
"test text 1",
"[1.0, 2.0, 3.0]",
json.dumps({"_node_content": '{"id_": "test-id-1", "text": "test text 1"}'}),
0.1,
)
row2 = MockRow(
"test-id-2",
"test text 2",
"[4.0, 5.0, 6.0]",
json.dumps({"_node_content": '{"id_": "test-id-2", "text": "test text 2"}'}),
0.2,
)
mock_result.fetchall.return_value = [row1, row2]
mock_session.execute.return_value = mock_result
with patch.object(AlibabaCloudMySQLVectorStore, "_session") as mock_session_maker:
mock_session_maker.return_value.__enter__.return_value = mock_session
with patch.object(AlibabaCloudMySQLVectorStore, "_connect"):
store = AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
distance_method="COSINE",
perform_setup=False, # Don't perform setup to avoid DB calls
)
# Test basic query
query = VectorStoreQuery(
query_embedding=[1.0, 2.0, 3.0], similarity_top_k=2
)
result = store.query(query)
assert len(result.nodes) == 2
assert len(result.similarities) == 2
assert len(result.ids) == 2
assert result.similarities[0] == 0.9 # 1 - 0.1
assert result.similarities[1] == 0.8 # 1 - 0.2
# Verify the query was executed
mock_session.execute.assert_called_once()
sql_query = str(mock_session.execute.call_args[0][0])
assert "VEC_DISTANCE_COSINE" in sql_query
def test_query_with_filters() -> None:
"""Test query method with filters."""
mock_session = Mock()
mock_result = Mock()
# Create mock rows that behave like database result rows
class MockRow:
def __init__(self, node_id, text, embedding, metadata, distance):
self.node_id = node_id
self.text = text
self.embedding = embedding
self.metadata = metadata
self.distance = distance
def __getitem__(self, index):
if index == 0:
return self.node_id # node_id
elif index == 1:
return self.text # text
elif index == 2:
return self.embedding # embedding
elif index == 3:
return self.metadata # metadata
elif index == 4:
return self.distance # distance
else:
raise IndexError("Index out of range")
row1 = MockRow(
"test-id-1",
"test text 1",
"[1.0, 2.0, 3.0]",
json.dumps({"_node_content": '{"id_": "test-id-1", "text": "test text 1"}'}),
0.1,
)
mock_result.fetchall.return_value = [row1]
mock_session.execute.return_value = mock_result
with patch.object(AlibabaCloudMySQLVectorStore, "_session") as mock_session_maker:
mock_session_maker.return_value.__enter__.return_value = mock_session
with patch.object(AlibabaCloudMySQLVectorStore, "_connect"):
store = AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
distance_method="EUCLIDEAN",
perform_setup=False, # Don't perform setup to avoid DB calls
)
# Test query with filters
query = VectorStoreQuery(
query_embedding=[1.0, 2.0, 3.0],
similarity_top_k=1,
filters=MetadataFilters(
filters=[
MetadataFilter(
key="category", value="test", operator=FilterOperator.EQ
)
]
),
)
result = store.query(query)
assert len(result.nodes) == 1
# Verify the query was executed with filters
mock_session.execute.assert_called_once()
sql_query = str(mock_session.execute.call_args[0][0])
assert "VEC_DISTANCE_EUCLIDEAN" in sql_query
assert "JSON_VALUE(metadata, '$.category') =" in sql_query
def test_delete() -> None:
"""Test delete method."""
mock_session = Mock()
with patch.object(AlibabaCloudMySQLVectorStore, "_session") as mock_session_maker:
mock_session_maker.return_value.__enter__.return_value = mock_session
with patch.object(AlibabaCloudMySQLVectorStore, "_connect"):
store = AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
perform_setup=False, # Don't perform setup to avoid DB calls
)
# Test delete
store.delete("test-ref-doc-id")
# Verify the query was executed
mock_session.execute.assert_called_once()
sql_query = str(mock_session.execute.call_args[0][0])
assert "JSON_EXTRACT(metadata, '$.ref_doc_id') =" in sql_query
def test_delete_nodes() -> None:
"""Test delete_nodes method."""
mock_session = Mock()
with patch.object(AlibabaCloudMySQLVectorStore, "_session") as mock_session_maker:
mock_session_maker.return_value.__enter__.return_value = mock_session
with patch.object(AlibabaCloudMySQLVectorStore, "_connect"):
store = AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
perform_setup=False, # Don't perform setup to avoid DB calls
)
# Test delete_nodes with node_ids
store.delete_nodes(node_ids=["test-id-1", "test-id-2"])
# Verify the query was executed
mock_session.execute.assert_called_once()
sql_query = str(mock_session.execute.call_args[0][0])
assert "node_id IN" in sql_query
def test_count() -> None:
"""Test count method."""
mock_session = Mock()
mock_result = Mock()
mock_result.fetchone.return_value = [5]
mock_session.execute.return_value = mock_result
with patch.object(AlibabaCloudMySQLVectorStore, "_session") as mock_session_maker:
mock_session_maker.return_value.__enter__.return_value = mock_session
with patch.object(AlibabaCloudMySQLVectorStore, "_connect"):
store = AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
perform_setup=False, # Don't perform setup to avoid DB calls
)
# Test count
count = store.count()
assert count == 5
# Verify the query was executed
mock_session.execute.assert_called_once()
sql_query = str(mock_session.execute.call_args[0][0])
assert "COUNT(*)" in sql_query
def test_drop() -> None:
"""Test drop method."""
mock_session = Mock()
with patch.object(AlibabaCloudMySQLVectorStore, "_session") as mock_session_maker:
mock_session_maker.return_value.__enter__.return_value = mock_session
with patch.object(AlibabaCloudMySQLVectorStore, "_connect"):
store = AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
perform_setup=False, # Don't perform setup to avoid DB calls
)
# Test drop
store.drop()
# Verify the query was executed
mock_session.execute.assert_called_once()
sql_query = str(mock_session.execute.call_args[0][0])
assert "DROP TABLE IF EXISTS" in sql_query
def test_clear() -> None:
"""Test clear method."""
mock_session = Mock()
with patch.object(AlibabaCloudMySQLVectorStore, "_session") as mock_session_maker:
mock_session_maker.return_value.__enter__.return_value = mock_session
with patch.object(AlibabaCloudMySQLVectorStore, "_connect"):
store = AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
perform_setup=False, # Don't perform setup to avoid DB calls
)
# Test clear
store.clear()
# Verify the query was executed
mock_session.execute.assert_called_once()
sql_query = str(mock_session.execute.call_args[0][0])
assert "DELETE FROM" in sql_query
def test_create_table_if_not_exists() -> None:
"""Test _create_table_if_not_exists method."""
mock_session = Mock()
with patch.object(AlibabaCloudMySQLVectorStore, "_session") as mock_session_maker:
mock_session_maker.return_value.__enter__.return_value = mock_session
with patch.object(AlibabaCloudMySQLVectorStore, "_connect"):
store = AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
embed_dim=1536,
default_m=6,
distance_method="COSINE",
perform_setup=False, # Don't perform setup to avoid DB calls
)
# Test _create_table_if_not_exists
store._create_table_if_not_exists()
# Verify the query was executed
mock_session.execute.assert_called_once()
sql_query = str(mock_session.execute.call_args[0][0])
assert "CREATE TABLE IF NOT EXISTS" in sql_query
assert "VECTOR" in sql_query
def test_validate_identifier() -> None:
"""Test _validate_identifier method."""
with patch.object(AlibabaCloudMySQLVectorStore, "_connect"):
store = AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
perform_setup=False, # Don't perform setup to avoid DB calls
)
# Test valid identifiers
assert store._validate_identifier("valid_table") == "valid_table"
assert store._validate_identifier("_table_name") == "_table_name"
assert store._validate_identifier("table123") == "table123"
assert store._validate_identifier("Table_Name_123") == "Table_Name_123"
assert store._validate_identifier("a") == "a"
# Test invalid identifiers - should raise ValueError
with pytest.raises(ValueError, match="Invalid identifier: 123invalid"):
store._validate_identifier("123invalid")
with pytest.raises(ValueError, match="Invalid identifier: invalid-table"):
store._validate_identifier("invalid-table")
with pytest.raises(ValueError, match="Invalid identifier: invalid.table"):
store._validate_identifier("invalid.table")
with pytest.raises(ValueError, match="Invalid identifier: "):
store._validate_identifier("")
with pytest.raises(ValueError, match="Invalid identifier: table name"):
store._validate_identifier("table name")
def test_validate_positive_int() -> None:
"""Test _validate_positive_int method."""
with patch.object(AlibabaCloudMySQLVectorStore, "_connect"):
store = AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
perform_setup=False, # Don't perform setup to avoid DB calls
)
# Test valid positive integers
assert store._validate_positive_int(1, "test_param") == 1
assert store._validate_positive_int(100, "test_param") == 100
assert store._validate_positive_int(999999, "test_param") == 999999
# Test invalid values - should raise ValueError
with pytest.raises(
ValueError, match="Expected positive int for test_param, got 0"
):
store._validate_positive_int(0, "test_param")
with pytest.raises(
ValueError, match="Expected positive int for test_param, got -1"
):
store._validate_positive_int(-1, "test_param")
with pytest.raises(
ValueError, match="Expected positive int for test_param, got -100"
):
store._validate_positive_int(-100, "test_param")
with pytest.raises(
ValueError, match="Expected positive int for test_param, got 0"
):
store._validate_positive_int(0.0, "test_param")
def test_validate_table_name() -> None:
"""Test _validate_table_name method."""
with patch.object(AlibabaCloudMySQLVectorStore, "_connect"):
store = AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
perform_setup=False, # Don't perform setup to avoid DB calls
)
# Test valid table names (should pass through _validate_identifier)
assert store._validate_table_name("valid_table") == "valid_table"
assert store._validate_table_name("_my_table") == "_my_table"
assert store._validate_table_name("Table123") == "Table123"
# Test invalid table names - should raise ValueError via _validate_identifier
with pytest.raises(ValueError, match="Invalid identifier: invalid-table"):
store._validate_table_name("invalid-table")
def test_init_validation_table_name() -> None:
"""Test that __init__ validates table_name parameter."""
# Test valid table names should work
with patch.object(AlibabaCloudMySQLVectorStore, "_connect"):
store = AlibabaCloudMySQLVectorStore(
table_name="valid_table_name",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
perform_setup=False,
)
assert store.table_name == "valid_table_name"
# Test invalid table names should raise ValueError
with pytest.raises(ValueError, match="Invalid identifier: invalid-table-name"):
AlibabaCloudMySQLVectorStore(
table_name="invalid-table-name", # Contains hyphens, invalid for identifier
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
)
def test_init_validation_embed_dim() -> None:
"""Test that __init__ validates embed_dim parameter."""
# Test valid embed_dim values should work
with patch.object(AlibabaCloudMySQLVectorStore, "_connect"):
store = AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
embed_dim=128, # Valid positive integer
perform_setup=False,
)
assert store.embed_dim == 128
# Test invalid embed_dim values should raise ValueError
with pytest.raises(ValueError, match="Expected positive int for embed_dim, got 0"):
AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
embed_dim=0, # Invalid: not positive
)
with pytest.raises(
ValueError, match="Expected positive int for embed_dim, got -128"
):
AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
embed_dim=-128, # Invalid: negative
)
def test_init_validation_default_m() -> None:
"""Test that __init__ validates default_m parameter."""
# Test valid default_m values should work
with patch.object(AlibabaCloudMySQLVectorStore, "_connect"):
store = AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
default_m=8, # Valid positive integer
perform_setup=False,
)
assert store.default_m == 8
# Test invalid default_m values should raise ValueError
with pytest.raises(ValueError, match="Expected positive int for default_m, got 0"):
AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
default_m=0, # Invalid: not positive
)
with pytest.raises(ValueError, match="Expected positive int for default_m, got -5"):
AlibabaCloudMySQLVectorStore(
table_name="test_table",
host="localhost",
port=3306,
user="test_user",
password="test_password",
database="test_db",
default_m=-5, # Invalid: negative
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-alibabacloud-mysql/tests/test_alibabacloud_mysql.py",
"license": "MIT License",
"lines": 1009,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-alibabacloud-mysql/tests/test_vector_stores_alibabacloud_mysql.py | from llama_index.core.vector_stores.types import BasePydanticVectorStore
from llama_index.vector_stores.alibabacloud_mysql import AlibabaCloudMySQLVectorStore
def test_class():
names_of_base_classes = [b.__name__ for b in AlibabaCloudMySQLVectorStore.__mro__]
assert BasePydanticVectorStore.__name__ in names_of_base_classes
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-alibabacloud-mysql/tests/test_vector_stores_alibabacloud_mysql.py",
"license": "MIT License",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-vertexaivectorsearch/llama_index/vector_stores/vertexaivectorsearch/_v2_operations.py | """
V2 operations for Vertex AI Vector Search.
This module contains all v2-specific operations and is imported lazily
only when api_version="v2" is used.
"""
import logging
import time
from functools import wraps
from typing import Any, Callable, List, Optional, TypeVar
from llama_index.core.schema import BaseNode, TextNode
from llama_index.core.vector_stores.types import (
VectorStoreQuery,
VectorStoreQueryResult,
VectorStoreQueryMode,
MetadataFilters,
)
from llama_index.core.vector_stores.utils import node_to_metadata_dict
_logger = logging.getLogger(__name__)
T = TypeVar("T")
def retry(max_attempts: int = 3, delay: float = 1.0) -> Callable:
"""
Simple retry decorator with exponential backoff.
Args:
max_attempts: Maximum number of retry attempts
delay: Initial delay in seconds (doubles on each retry)
Returns:
Decorator function
"""
def decorator(func: Callable[..., T]) -> Callable[..., T]:
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> T:
for attempt in range(max_attempts):
try:
return func(*args, **kwargs)
except Exception as e:
if attempt == max_attempts - 1:
# Last attempt, raise the exception
raise
# Exponential backoff
sleep_time = delay * (2**attempt)
_logger.warning(
f"Attempt {attempt + 1}/{max_attempts} failed: {e}. "
f"Retrying in {sleep_time}s..."
)
time.sleep(sleep_time)
# This should never be reached, but satisfies type checker
raise RuntimeError("Retry logic error")
return wrapper
return decorator
def _import_v2_sdk():
"""
Import v2 SDK with proper error handling.
Returns:
The vectorsearch_v1beta module
Raises:
ImportError: If google-cloud-vectorsearch is not installed
"""
try:
from google.cloud import vectorsearch_v1beta
return vectorsearch_v1beta
except ImportError as e:
raise ImportError(
"v2 operations require google-cloud-vectorsearch. "
"Install with: pip install google-cloud-vectorsearch"
) from e
# =============================================================================
# Helper Functions for Hybrid Search
# =============================================================================
def _calculate_rrf_weights(alpha: float, num_searches: int = 2) -> List[float]:
"""
Calculate RRF weights from alpha value.
Args:
alpha: Weight for vector search (0 = text only, 1 = vector only)
num_searches: Number of searches being combined
Returns:
List of weights [vector_weight, text_weight]
"""
if num_searches == 2:
return [alpha, 1.0 - alpha]
return [1.0 / num_searches] * num_searches
def _convert_filters_to_v2(filters: Optional[MetadataFilters]) -> Optional[dict]:
"""
Convert LlamaIndex MetadataFilters to V2 filter format.
V2 filter format:
- Simple: {"field": {"$eq": "value"}}
- AND: {"$and": [{...}, {...}]}
- OR: {"$or": [{...}, {...}]}
Args:
filters: LlamaIndex MetadataFilters object
Returns:
V2 filter dict or None
"""
if filters is None or len(filters.filters) == 0:
return None
from llama_index.core.vector_stores.types import (
FilterOperator,
FilterCondition,
MetadataFilter,
)
op_map = {
FilterOperator.EQ: "$eq",
FilterOperator.NE: "$ne",
FilterOperator.GT: "$gt",
FilterOperator.GTE: "$gte",
FilterOperator.LT: "$lt",
FilterOperator.LTE: "$lte",
FilterOperator.IN: "$in",
FilterOperator.NIN: "$nin",
FilterOperator.CONTAINS: "$contains",
}
def convert_single(f: MetadataFilter) -> dict:
v2_op = op_map.get(f.operator, "$eq")
return {f.key: {v2_op: f.value}}
if len(filters.filters) == 1:
f = filters.filters[0]
if isinstance(f, MetadataFilters):
return _convert_filters_to_v2(f)
return convert_single(f)
converted = []
for f in filters.filters:
if isinstance(f, MetadataFilters):
converted.append(_convert_filters_to_v2(f))
else:
converted.append(convert_single(f))
condition = "$and" if filters.condition == FilterCondition.AND else "$or"
return {condition: converted}
def _merge_results_rrf(
store: Any,
vector_results: List[Any],
text_results: List[Any],
alpha: float,
top_k: int,
k: int = 60,
) -> VectorStoreQueryResult:
"""
Merge vector and text search results using Reciprocal Rank Fusion.
RRF formula: score = sum(1 / (k + rank_i)) for each result list
With alpha weighting: final_score = alpha * vector_rrf + (1-alpha) * text_rrf
Args:
store: VertexAIVectorStore instance
vector_results: Results from vector search
text_results: Results from text search
alpha: Weight for vector results (0=text only, 1=vector only)
top_k: Number of results to return
k: RRF constant (default 60)
Returns:
Merged VectorStoreQueryResult
"""
# Build score maps: id -> (rrf_score, result_object)
vector_scores = {}
for rank, result in enumerate(vector_results):
node_id = result.data_object.name.split("/")[-1]
rrf_score = 1.0 / (k + rank + 1)
vector_scores[node_id] = (rrf_score * alpha, result)
text_scores = {}
for rank, result in enumerate(text_results):
node_id = result.data_object.name.split("/")[-1]
rrf_score = 1.0 / (k + rank + 1)
text_scores[node_id] = (rrf_score * (1 - alpha), result)
# Merge scores
all_ids = set(vector_scores.keys()) | set(text_scores.keys())
merged_scores = []
for node_id in all_ids:
v_score, v_result = vector_scores.get(node_id, (0, None))
t_score, t_result = text_scores.get(node_id, (0, None))
total_score = v_score + t_score
result = v_result or t_result
merged_scores.append((total_score, node_id, result))
# Sort by score descending and take top_k
merged_scores.sort(key=lambda x: x[0], reverse=True)
merged_scores = merged_scores[:top_k]
# Build result
nodes = []
ids = []
similarities = []
for score, node_id, result in merged_scores:
data_obj = result.data_object
# Extract embedding
embedding = None
if hasattr(data_obj, "vectors") and data_obj.vectors:
if store.embedding_field in data_obj.vectors:
vector_data = data_obj.vectors[store.embedding_field]
if hasattr(vector_data, "dense") and vector_data.dense:
if hasattr(vector_data.dense, "values"):
embedding = list(vector_data.dense.values)
# Extract metadata
metadata = dict(data_obj.data) if data_obj.data else {}
node = TextNode(
text=metadata.get(store.text_key, ""),
id_=node_id,
metadata=metadata,
embedding=embedding,
)
nodes.append(node)
ids.append(node_id)
similarities.append(score)
return VectorStoreQueryResult(nodes=nodes, similarities=similarities, ids=ids)
def _build_ranker(
store: Any,
query: VectorStoreQuery,
num_searches: int,
) -> Any:
"""
Build the appropriate ranker based on store configuration.
Args:
store: VertexAIVectorStore instance
query: The query being executed
num_searches: Number of searches being combined
Returns:
Ranker object (RRF or VertexRanker)
"""
vectorsearch = _import_v2_sdk()
if store.hybrid_ranker == "vertex":
title_template = None
content_template = None
if store.vertex_ranker_title_field:
title_template = f"{{{{ {store.vertex_ranker_title_field} }}}}"
if store.vertex_ranker_content_field:
content_template = f"{{{{ {store.vertex_ranker_content_field} }}}}"
return vectorsearch.Ranker(
vertex=vectorsearch.VertexRanker(
query=query.query_str or "",
model=store.vertex_ranker_model,
title_template=title_template,
content_template=content_template,
)
)
else:
# RRF ranker (default)
alpha = query.alpha if query.alpha is not None else store.default_hybrid_alpha
weights = _calculate_rrf_weights(alpha, num_searches)
return vectorsearch.Ranker(
rrf=vectorsearch.ReciprocalRankFusion(weights=weights)
)
def add_v2(
store: Any,
nodes: List[BaseNode],
is_complete_overwrite: bool = False,
**add_kwargs: Any,
) -> List[str]:
"""
Add nodes to collection using v2 API.
Args:
store: The VertexAIVectorStore instance
nodes: List of nodes with embeddings
is_complete_overwrite: Whether to overwrite existing data
**add_kwargs: Additional keyword arguments
Returns:
List of node IDs
"""
vectorsearch = _import_v2_sdk()
_logger.info(f"Adding {len(nodes)} nodes to v2 collection: {store.collection_id}")
# Get or create v2 client
from llama_index.vector_stores.vertexaivectorsearch._sdk_manager import (
VectorSearchSDKManager,
)
sdk_manager = VectorSearchSDKManager(
project_id=store.project_id,
region=store.region,
credentials_path=store.credentials_path,
)
# Get v2 clients
clients = sdk_manager.get_v2_client()
data_object_client = clients["data_object_service_client"]
# Build parent path
parent = f"projects/{store.project_id}/locations/{store.region}/collections/{store.collection_id}"
# Convert nodes to v2 data objects
batch_requests = []
ids = []
for node in nodes:
node_id = node.node_id
metadata = node_to_metadata_dict(node, remove_text=False, flat_metadata=False)
embedding = node.get_embedding()
# Prepare data and vectors following the notebook pattern
data_object = {
"data": metadata, # Metadata becomes the data field
"vectors": {
# Assuming default embedding field name
"embedding": {"dense": {"values": embedding}}
},
}
# Create batch request item
batch_requests.append({"data_object_id": node_id, "data_object": data_object})
ids.append(node_id)
# Batch create data objects
batch_size = store.batch_size
for i in range(0, len(batch_requests), batch_size):
batch = batch_requests[i : i + batch_size]
_logger.info(f"Creating batch {i // batch_size + 1} ({len(batch)} objects)")
request = vectorsearch.BatchCreateDataObjectsRequest(
parent=parent, requests=batch
)
try:
response = data_object_client.batch_create_data_objects(request)
_logger.debug(f"Batch create response: {response}")
except Exception as e:
_logger.error(f"Failed to create data objects batch: {e}")
raise
return ids
def delete_v2(
store: Any,
ref_doc_id: str,
**delete_kwargs: Any,
) -> None:
"""
Delete nodes using ref_doc_id with v2 API.
Args:
store: The VertexAIVectorStore instance
ref_doc_id: The document ID to delete
**delete_kwargs: Additional keyword arguments
"""
vectorsearch = _import_v2_sdk()
_logger.info(f"Deleting nodes with ref_doc_id: {ref_doc_id} from v2 collection")
# Get v2 client
from llama_index.vector_stores.vertexaivectorsearch._sdk_manager import (
VectorSearchSDKManager,
)
sdk_manager = VectorSearchSDKManager(
project_id=store.project_id,
region=store.region,
credentials_path=store.credentials_path,
)
clients = sdk_manager.get_v2_client()
data_object_client = clients["data_object_service_client"]
search_client = clients["data_object_search_service_client"]
# Build parent path
parent = f"projects/{store.project_id}/locations/{store.region}/collections/{store.collection_id}"
# Query for data objects with matching ref_doc_id
query_request = vectorsearch.QueryDataObjectsRequest(
parent=parent,
filter={"ref_doc_id": {"$eq": ref_doc_id}},
output_fields=vectorsearch.OutputFields(
data_fields=["ref_doc_id"], metadata_fields=["*"]
),
)
try:
# Execute query
results = search_client.query_data_objects(query_request)
# Build batch delete requests
delete_requests = []
for data_object in results:
delete_requests.append(
vectorsearch.DeleteDataObjectRequest(name=data_object.name)
)
# Batch delete
if delete_requests:
batch_delete_request = vectorsearch.BatchDeleteDataObjectsRequest(
parent=parent, requests=delete_requests
)
response = data_object_client.batch_delete_data_objects(
batch_delete_request
)
_logger.info(
f"Deleted {len(delete_requests)} data objects with ref_doc_id: {ref_doc_id}"
)
else:
_logger.info(f"No data objects found with ref_doc_id: {ref_doc_id}")
except Exception as e:
_logger.error(f"Failed to delete data objects: {e}")
raise
# =============================================================================
# Result Processing Helpers
# =============================================================================
def _process_search_results(
store: Any,
results: Any,
) -> VectorStoreQueryResult:
"""
Process single search results into VectorStoreQueryResult.
Args:
store: VertexAIVectorStore instance
results: Search results from the API
Returns:
VectorStoreQueryResult with nodes, similarities, and IDs
"""
top_k_nodes = []
top_k_ids = []
top_k_scores = []
for result in results:
data_obj = result.data_object
# Extract embedding
embedding = None
if hasattr(data_obj, "vectors") and data_obj.vectors:
if store.embedding_field in data_obj.vectors:
vector_data = data_obj.vectors[store.embedding_field]
if hasattr(vector_data, "dense") and vector_data.dense:
if hasattr(vector_data.dense, "values"):
embedding = list(vector_data.dense.values)
# Extract metadata
metadata = dict(data_obj.data) if data_obj.data else {}
# Extract ID from resource name or data_object_id
node_id = data_obj.name.split("/")[-1]
if hasattr(data_obj, "data_object_id") and data_obj.data_object_id:
node_id = data_obj.data_object_id
node = TextNode(
text=metadata.get(store.text_key, ""),
id_=node_id,
metadata=metadata,
embedding=embedding,
)
top_k_nodes.append(node)
top_k_ids.append(node_id)
# Use distance or score depending on what's available
score = 1.0
if hasattr(result, "distance"):
score = result.distance
elif hasattr(result, "score"):
score = result.score
top_k_scores.append(score)
return VectorStoreQueryResult(
nodes=top_k_nodes,
similarities=top_k_scores,
ids=top_k_ids,
)
def _process_batch_search_results(
store: Any,
batch_results: Any,
top_k: int,
) -> VectorStoreQueryResult:
"""
Process batch search (RRF/VertexRanker combined) results.
Args:
store: VertexAIVectorStore instance
batch_results: Batch search results from the API
top_k: Maximum number of results to return
Returns:
VectorStoreQueryResult with nodes, similarities, and IDs
"""
top_k_nodes = []
top_k_ids = []
top_k_scores = []
# Batch results contain combined ranked results
# The structure may vary based on API response format
if hasattr(batch_results, "combined_results") and batch_results.combined_results:
# Process combined results from RRF/ranker
for result in batch_results.combined_results:
if len(top_k_nodes) >= top_k:
break
data_obj = result.data_object
# Extract embedding
embedding = None
if hasattr(data_obj, "vectors") and data_obj.vectors:
if store.embedding_field in data_obj.vectors:
vector_data = data_obj.vectors[store.embedding_field]
if hasattr(vector_data, "dense") and vector_data.dense:
if hasattr(vector_data.dense, "values"):
embedding = list(vector_data.dense.values)
# Extract metadata
metadata = dict(data_obj.data) if data_obj.data else {}
# Extract ID
node_id = data_obj.name.split("/")[-1]
if hasattr(data_obj, "data_object_id") and data_obj.data_object_id:
node_id = data_obj.data_object_id
node = TextNode(
text=metadata.get(store.text_key, ""),
id_=node_id,
metadata=metadata,
embedding=embedding,
)
top_k_nodes.append(node)
top_k_ids.append(node_id)
score = 1.0
if hasattr(result, "distance"):
score = result.distance
elif hasattr(result, "score"):
score = result.score
elif hasattr(result, "rank_score"):
score = result.rank_score
top_k_scores.append(score)
elif hasattr(batch_results, "results"):
# Alternative response structure
for response in batch_results.results:
if hasattr(response, "results"):
for result in response.results:
if len(top_k_nodes) >= top_k:
break
data_obj = result.data_object
# Extract embedding
embedding = None
if hasattr(data_obj, "vectors") and data_obj.vectors:
if store.embedding_field in data_obj.vectors:
vector_data = data_obj.vectors[store.embedding_field]
if hasattr(vector_data, "dense") and vector_data.dense:
if hasattr(vector_data.dense, "values"):
embedding = list(vector_data.dense.values)
# Extract metadata
metadata = dict(data_obj.data) if data_obj.data else {}
# Extract ID
node_id = data_obj.name.split("/")[-1]
if hasattr(data_obj, "data_object_id") and data_obj.data_object_id:
node_id = data_obj.data_object_id
node = TextNode(
text=metadata.get(store.text_key, ""),
id_=node_id,
metadata=metadata,
embedding=embedding,
)
top_k_nodes.append(node)
top_k_ids.append(node_id)
score = 1.0
if hasattr(result, "distance"):
score = result.distance
elif hasattr(result, "score"):
score = result.score
top_k_scores.append(score)
return VectorStoreQueryResult(
nodes=top_k_nodes,
similarities=top_k_scores,
ids=top_k_ids,
)
# =============================================================================
# Query Mode Implementations
# =============================================================================
def _query_v2_default(
store: Any,
query: VectorStoreQuery,
clients: dict,
parent: str,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""
Execute default dense vector search.
Args:
store: VertexAIVectorStore instance
query: The vector store query
clients: V2 client dictionary
parent: Parent resource path
**kwargs: Additional arguments
Returns:
VectorStoreQueryResult
"""
vectorsearch = _import_v2_sdk()
search_client = clients["data_object_search_service_client"]
if query.query_embedding is None:
raise ValueError(
"query_embedding is required for DEFAULT (vector) search mode. "
"Use TEXT_SEARCH mode if you only have a text query."
)
# Build filter
v2_filter = _convert_filters_to_v2(query.filters)
# Build search request
search_kwargs = {
"parent": parent,
"vector_search": vectorsearch.VectorSearch(
search_field=store.embedding_field,
vector=vectorsearch.DenseVector(values=query.query_embedding),
top_k=query.similarity_top_k,
output_fields=vectorsearch.OutputFields(
data_fields=["*"],
vector_fields=["*"],
metadata_fields=["*"],
),
),
}
if v2_filter:
search_kwargs["vector_search"].filter = v2_filter
search_request = vectorsearch.SearchDataObjectsRequest(**search_kwargs)
try:
results = search_client.search_data_objects(search_request)
return _process_search_results(store, results)
except Exception as e:
_logger.error(f"Failed to execute DEFAULT search: {e}")
raise
def _query_v2_text_search(
store: Any,
query: VectorStoreQuery,
clients: dict,
parent: str,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""
Execute full-text keyword search only.
Args:
store: VertexAIVectorStore instance
query: The vector store query
clients: V2 client dictionary
parent: Parent resource path
**kwargs: Additional arguments
Returns:
VectorStoreQueryResult
"""
vectorsearch = _import_v2_sdk()
search_client = clients["data_object_search_service_client"]
if query.query_str is None:
raise ValueError("TEXT_SEARCH mode requires query_str.")
if store.text_search_fields is None:
raise ValueError(
"TEXT_SEARCH mode requires text_search_fields to be configured "
"in the constructor."
)
top_k = query.sparse_top_k or query.similarity_top_k
# Build search request
search_request = vectorsearch.SearchDataObjectsRequest(
parent=parent,
text_search=vectorsearch.TextSearch(
search_text=query.query_str,
data_field_names=store.text_search_fields,
top_k=top_k,
output_fields=vectorsearch.OutputFields(
data_fields=["*"],
vector_fields=["*"],
metadata_fields=["*"],
),
),
)
try:
results = search_client.search_data_objects(search_request)
return _process_search_results(store, results)
except Exception as e:
_logger.error(f"Failed to execute TEXT_SEARCH: {e}")
raise
def _query_v2_hybrid(
store: Any,
query: VectorStoreQuery,
clients: dict,
parent: str,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""
Execute hybrid search: VectorSearch + TextSearch with ranker.
Args:
store: VertexAIVectorStore instance
query: The vector store query
clients: V2 client dictionary
parent: Parent resource path
**kwargs: Additional arguments
Returns:
VectorStoreQueryResult
"""
vectorsearch = _import_v2_sdk()
search_client = clients["data_object_search_service_client"]
# Validate requirements
if not store.enable_hybrid:
raise ValueError(
"HYBRID mode requires enable_hybrid=True in the VertexAIVectorStore "
"constructor."
)
if query.query_embedding is None:
raise ValueError("HYBRID mode requires query_embedding (dense vector).")
if query.query_str is None:
_logger.warning(
"HYBRID mode without query_str - falling back to vector-only search"
)
return _query_v2_default(store, query, clients, parent, **kwargs)
if store.text_search_fields is None:
_logger.warning(
"No text_search_fields configured - falling back to vector-only search"
)
return _query_v2_default(store, query, clients, parent, **kwargs)
# Build filter
v2_filter = _convert_filters_to_v2(query.filters)
# Calculate top_k values
top_k = query.similarity_top_k
sparse_top_k = query.sparse_top_k or top_k
hybrid_top_k = query.hybrid_top_k or top_k
# Build output fields
output_fields = vectorsearch.OutputFields(
data_fields=["*"],
vector_fields=["*"],
metadata_fields=["*"],
)
# Build vector search request
vector_search_kwargs = {
"search_field": store.embedding_field,
"vector": vectorsearch.DenseVector(values=query.query_embedding),
"top_k": top_k,
"output_fields": output_fields,
}
if v2_filter:
vector_search_kwargs["filter"] = v2_filter
# Note: BatchSearchDataObjectsRequest.Search only supports vector_search,
# not text_search. For true hybrid, we run separate searches and merge.
try:
# Run vector search
vector_request = vectorsearch.SearchDataObjectsRequest(
parent=parent,
vector_search=vectorsearch.VectorSearch(**vector_search_kwargs),
)
vector_results = list(search_client.search_data_objects(vector_request))
# Run text search
text_request = vectorsearch.SearchDataObjectsRequest(
parent=parent,
text_search=vectorsearch.TextSearch(
search_text=query.query_str,
data_field_names=store.text_search_fields,
top_k=sparse_top_k,
output_fields=output_fields,
),
)
text_results = list(search_client.search_data_objects(text_request))
# Merge results using RRF
alpha = query.alpha if query.alpha is not None else store.default_hybrid_alpha
return _merge_results_rrf(
store, vector_results, text_results, alpha, hybrid_top_k
)
except Exception as e:
_logger.error(f"Failed to execute HYBRID search: {e}")
raise
def _query_v2_semantic_hybrid(
store: Any,
query: VectorStoreQuery,
clients: dict,
parent: str,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""
Execute semantic hybrid: VectorSearch + SemanticSearch with ranker.
Args:
store: VertexAIVectorStore instance
query: The vector store query
clients: V2 client dictionary
parent: Parent resource path
**kwargs: Additional arguments
Returns:
VectorStoreQueryResult
"""
vectorsearch = _import_v2_sdk()
search_client = clients["data_object_search_service_client"]
if not store.enable_hybrid:
raise ValueError(
"SEMANTIC_HYBRID mode requires enable_hybrid=True in the constructor."
)
if query.query_str is None:
raise ValueError("SEMANTIC_HYBRID mode requires query_str.")
# Calculate top_k values
top_k = query.similarity_top_k
hybrid_top_k = query.hybrid_top_k or top_k
# Build filter
v2_filter = _convert_filters_to_v2(query.filters)
# Build output fields
output_fields = vectorsearch.OutputFields(
data_fields=["*"],
vector_fields=["*"],
metadata_fields=["*"],
)
searches = []
# Add vector search if embedding provided
if query.query_embedding is not None:
vector_search_kwargs = {
"search_field": store.embedding_field,
"vector": vectorsearch.DenseVector(values=query.query_embedding),
"top_k": top_k,
"output_fields": output_fields,
}
if v2_filter:
vector_search_kwargs["filter"] = v2_filter
searches.append(
vectorsearch.Search(
vector_search=vectorsearch.VectorSearch(**vector_search_kwargs)
)
)
# Add semantic search
searches.append(
vectorsearch.Search(
semantic_search=vectorsearch.SemanticSearch(
search_text=query.query_str,
search_field=store.embedding_field,
task_type=store.semantic_task_type,
top_k=top_k,
output_fields=output_fields,
)
)
)
# Build ranker
ranker = _build_ranker(store, query, num_searches=len(searches))
# Execute batch search
batch_request = vectorsearch.BatchSearchDataObjectsRequest(
parent=parent,
searches=searches,
combine=vectorsearch.BatchSearchDataObjectsRequest.CombineResultsOptions(
ranker=ranker,
top_k=hybrid_top_k,
output_fields=output_fields,
),
)
try:
results = search_client.batch_search_data_objects(batch_request)
return _process_batch_search_results(store, results, hybrid_top_k)
except Exception as e:
_logger.error(f"Failed to execute SEMANTIC_HYBRID search: {e}")
raise
# =============================================================================
# Main Query Function with Mode Routing
# =============================================================================
def query_v2(
store: Any,
query: VectorStoreQuery,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""
Query collection using v2 API with support for multiple query modes.
Args:
store: The VertexAIVectorStore instance
query: The vector store query with mode specification
**kwargs: Additional keyword arguments
Returns:
Vector store query result
Supported modes:
- DEFAULT: Dense vector similarity search
- TEXT_SEARCH: Full-text keyword search
- HYBRID: Dense vector + text search with RRF/VertexRanker
- SEMANTIC_HYBRID: Dense vector + semantic search with ranker
- SPARSE: (Phase 2) Sparse vector search
"""
_logger.info(
f"Querying v2 collection: {store.collection_id} with mode: {query.mode}"
)
# Get v2 clients
from llama_index.vector_stores.vertexaivectorsearch._sdk_manager import (
VectorSearchSDKManager,
)
sdk_manager = VectorSearchSDKManager(
project_id=store.project_id,
region=store.region,
credentials_path=store.credentials_path,
)
clients = sdk_manager.get_v2_client()
parent = f"projects/{store.project_id}/locations/{store.region}/collections/{store.collection_id}"
# Route based on query mode
if query.mode == VectorStoreQueryMode.DEFAULT:
return _query_v2_default(store, query, clients, parent, **kwargs)
elif query.mode == VectorStoreQueryMode.HYBRID:
return _query_v2_hybrid(store, query, clients, parent, **kwargs)
elif query.mode == VectorStoreQueryMode.TEXT_SEARCH:
return _query_v2_text_search(store, query, clients, parent, **kwargs)
elif query.mode == VectorStoreQueryMode.SEMANTIC_HYBRID:
return _query_v2_semantic_hybrid(store, query, clients, parent, **kwargs)
elif query.mode == VectorStoreQueryMode.SPARSE:
raise NotImplementedError(
"SPARSE mode is planned for Phase 2 and requires a sparse vector field "
"configured in the collection schema. Consider using TEXT_SEARCH mode "
"for keyword search or HYBRID mode for combined vector + keyword search."
)
else:
# Fall back to default for unsupported modes
_logger.warning(
f"Query mode {query.mode} not explicitly supported, falling back to DEFAULT"
)
return _query_v2_default(store, query, clients, parent, **kwargs)
def delete_nodes_v2(
store: Any,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
**kwargs: Any,
) -> None:
"""
Delete nodes by IDs or filters using v2 API.
Args:
store: The VertexAIVectorStore instance
node_ids: List of node IDs to delete
filters: Metadata filters to select nodes for deletion
**kwargs: Additional keyword arguments
"""
vectorsearch = _import_v2_sdk()
_logger.info(f"Deleting nodes from v2 collection: {store.collection_id}")
# Get v2 client
from llama_index.vector_stores.vertexaivectorsearch._sdk_manager import (
VectorSearchSDKManager,
)
sdk_manager = VectorSearchSDKManager(
project_id=store.project_id,
region=store.region,
credentials_path=store.credentials_path,
)
clients = sdk_manager.get_v2_client()
data_object_client = clients["data_object_service_client"]
search_client = clients["data_object_search_service_client"]
# Build parent path
parent = f"projects/{store.project_id}/locations/{store.region}/collections/{store.collection_id}"
collection_name = parent
if node_ids is not None:
# Delete by node IDs
_logger.info(f"Deleting {len(node_ids)} nodes by ID")
# Build batch delete requests
delete_requests = []
for node_id in node_ids:
delete_requests.append(
vectorsearch.DeleteDataObjectRequest(
name=f"{collection_name}/dataObjects/{node_id}"
)
)
try:
if delete_requests:
batch_delete_request = vectorsearch.BatchDeleteDataObjectsRequest(
parent=parent, requests=delete_requests
)
response = data_object_client.batch_delete_data_objects(
batch_delete_request
)
_logger.info(f"Deleted {len(delete_requests)} data objects by ID")
else:
_logger.info("No data objects to delete")
except Exception as e:
_logger.error(f"Failed to delete data objects by ID: {e}")
raise
elif filters is not None:
# Delete by filters - need to query first then delete
_logger.info(f"Deleting nodes matching filters")
# For now, we'll skip filter conversion and just log
_logger.warning(
"Filter-based deletion not yet implemented. "
"LlamaIndex MetadataFilters need conversion to v2 filter format."
)
# TODO: Implement filter conversion when we understand the mapping better
# This would require converting LlamaIndex MetadataFilters to v2's filter format
else:
raise ValueError("Either node_ids or filters must be provided")
def clear_v2(store: Any) -> None:
"""
Clear all nodes from the collection using v2 API.
Args:
store: The VertexAIVectorStore instance
"""
vectorsearch = _import_v2_sdk()
_logger.info(f"Clearing all nodes from v2 collection: {store.collection_id}")
# Get v2 client
from llama_index.vector_stores.vertexaivectorsearch._sdk_manager import (
VectorSearchSDKManager,
)
sdk_manager = VectorSearchSDKManager(
project_id=store.project_id,
region=store.region,
credentials_path=store.credentials_path,
)
clients = sdk_manager.get_v2_client()
data_object_client = clients["data_object_service_client"]
search_client = clients["data_object_search_service_client"]
# Build parent path
parent = f"projects/{store.project_id}/locations/{store.region}/collections/{store.collection_id}"
collection_name = parent
try:
# Query all data objects (without filter to get all)
query_request = vectorsearch.QueryDataObjectsRequest(
parent=parent,
page_size=100, # Process in batches
output_fields=vectorsearch.OutputFields(metadata_fields=["*"]),
)
# Iterate through pages and delete all
total_deleted = 0
paged_response = search_client.query_data_objects(query_request)
for page in paged_response.pages:
delete_requests = []
for data_object in page.data_objects:
delete_requests.append(
vectorsearch.DeleteDataObjectRequest(name=data_object.name)
)
# Batch delete this page
if delete_requests:
batch_delete_request = vectorsearch.BatchDeleteDataObjectsRequest(
parent=parent, requests=delete_requests
)
data_object_client.batch_delete_data_objects(batch_delete_request)
total_deleted += len(delete_requests)
_logger.info(f"Cleared {total_deleted} data objects from collection")
except Exception as e:
_logger.error(f"Failed to clear collection: {e}")
raise
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-vertexaivectorsearch/llama_index/vector_stores/vertexaivectorsearch/_v2_operations.py",
"license": "MIT License",
"lines": 957,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-aibadgr/llama_index/llms/aibadgr/base.py | import os
from typing import Any, Optional
from llama_index.llms.openai_like import OpenAILike
class AIBadgr(OpenAILike):
"""
AI Badgr LLM (Budget/Utility, OpenAI-compatible).
AI Badgr provides OpenAI-compatible API endpoints with tier-based model naming.
Use tier names (basic, normal, premium) or power-user model names
(phi-3-mini, mistral-7b, llama3-8b-instruct). OpenAI model names are also
accepted and mapped automatically.
Examples:
`pip install llama-index-llms-aibadgr`
```python
from llama_index.llms.aibadgr import AIBadgr
# Set up the AIBadgr class with the required model and API key
llm = AIBadgr(model="premium", api_key="your_api_key")
# Call the complete method with a query
response = llm.complete("Explain the importance of low latency LLMs")
print(response)
```
"""
def __init__(
self,
model: str,
api_key: Optional[str] = None,
api_base: str = "https://aibadgr.com/api/v1",
is_chat_model: bool = True,
**kwargs: Any,
) -> None:
api_key = api_key or os.environ.get("AIBADGR_API_KEY", None)
api_base = os.environ.get("AIBADGR_BASE_URL", api_base)
super().__init__(
model=model,
api_key=api_key,
api_base=api_base,
is_chat_model=is_chat_model,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "AIBadgr"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-aibadgr/llama_index/llms/aibadgr/base.py",
"license": "MIT License",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-aibadgr/tests/test_llms_aibadgr.py | from llama_index.llms.aibadgr import AIBadgr
def test_class_name():
"""Test that class name is correct."""
names_of_base_classes = [b.__name__ for b in AIBadgr.__mro__]
assert AIBadgr.class_name() == "AIBadgr"
assert "OpenAILike" in names_of_base_classes
def test_initialization():
"""Test that AIBadgr can be initialized with default and custom parameters."""
# Test with premium model
llm = AIBadgr(model="premium", api_key="test_key")
assert llm.model == "premium"
assert llm.api_key == "test_key"
assert llm.api_base == "https://aibadgr.com/api/v1"
# Test with custom model
llm = AIBadgr(model="basic", api_key="test_key")
assert llm.model == "basic"
# Test with custom base URL
llm = AIBadgr(
model="premium", api_key="test_key", api_base="https://custom.url/api/v1"
)
assert llm.api_base == "https://custom.url/api/v1"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-aibadgr/tests/test_llms_aibadgr.py",
"license": "MIT License",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-bedrock-converse/tests/test_thinking_delta.py | from unittest.mock import MagicMock
import pytest
from llama_index.core.base.llms.types import (
ChatMessage,
MessageRole,
ThinkingBlock,
TextBlock,
)
from llama_index.llms.bedrock_converse import BedrockConverse
@pytest.fixture
def mock_bedrock_client():
return MagicMock()
@pytest.fixture
def bedrock_with_thinking(mock_bedrock_client):
return BedrockConverse(
model="us.anthropic.claude-sonnet-4-20250514-v1:0",
thinking={"type": "enabled", "budget_tokens": 1024},
client=mock_bedrock_client,
)
def test_thinking_delta_populated_in_stream_chat(
bedrock_with_thinking, mock_bedrock_client
):
mock_bedrock_client.converse_stream.return_value = {
"stream": [
{
"contentBlockDelta": {
"delta": {
"reasoningContent": {
"text": "Let me think",
"signature": "sig1",
}
},
"contentBlockIndex": 0,
}
},
{
"contentBlockDelta": {
"delta": {
"reasoningContent": {
"text": " about this",
"signature": "sig2",
}
},
"contentBlockIndex": 0,
}
},
{
"contentBlockDelta": {
"delta": {"text": "The answer is"},
"contentBlockIndex": 1,
}
},
{
"metadata": {
"usage": {
"inputTokens": 10,
"outputTokens": 20,
"totalTokens": 30,
}
}
},
]
}
messages = [ChatMessage(role=MessageRole.USER, content="Test")]
responses = list(bedrock_with_thinking.stream_chat(messages))
assert len(responses) > 0
thinking_responses = [
r for r in responses if r.additional_kwargs.get("thinking_delta") is not None
]
assert len(thinking_responses) == 2
assert thinking_responses[0].additional_kwargs["thinking_delta"] == "Let me think"
assert thinking_responses[1].additional_kwargs["thinking_delta"] == " about this"
text_responses = [
r
for r in responses
if r.delta and r.additional_kwargs.get("thinking_delta") is None
]
assert len(text_responses) >= 1
assert text_responses[0].delta == "The answer is"
@pytest.mark.asyncio
async def test_thinking_delta_populated_in_astream_chat(
bedrock_with_thinking, monkeypatch
):
events = [
{
"contentBlockDelta": {
"delta": {
"reasoningContent": {
"text": "Let me think",
"signature": "sig1",
}
},
"contentBlockIndex": 0,
}
},
{
"contentBlockDelta": {
"delta": {
"reasoningContent": {
"text": " about this",
"signature": "sig2",
}
},
"contentBlockIndex": 0,
}
},
{
"contentBlockDelta": {
"delta": {"text": "The answer is"},
"contentBlockIndex": 1,
}
},
{
"metadata": {
"usage": {
"inputTokens": 10,
"outputTokens": 20,
"totalTokens": 30,
}
}
},
]
async def _fake_converse_with_retry_async(**_kwargs):
async def _gen():
for event in events:
yield event
return _gen()
monkeypatch.setattr(
"llama_index.llms.bedrock_converse.base.converse_with_retry_async",
_fake_converse_with_retry_async,
)
messages = [ChatMessage(role=MessageRole.USER, content="Test")]
response_stream = await bedrock_with_thinking.astream_chat(messages)
responses = [r async for r in response_stream]
assert len(responses) > 0
thinking_responses = [
r for r in responses if r.additional_kwargs.get("thinking_delta") is not None
]
assert len(thinking_responses) == 2
assert thinking_responses[0].additional_kwargs["thinking_delta"] == "Let me think"
assert thinking_responses[1].additional_kwargs["thinking_delta"] == " about this"
text_responses = [
r
for r in responses
if r.delta and r.additional_kwargs.get("thinking_delta") is None
]
assert len(text_responses) >= 1
assert text_responses[0].delta == "The answer is"
def test_thinking_delta_none_for_non_thinking_content(
bedrock_with_thinking, mock_bedrock_client
):
mock_bedrock_client.converse_stream.return_value = {
"stream": [
{
"contentBlockStart": {
"start": {"text": ""},
"contentBlockIndex": 0,
}
},
{
"contentBlockDelta": {
"delta": {"text": "Regular text"},
"contentBlockIndex": 0,
}
},
{
"metadata": {
"usage": {
"inputTokens": 10,
"outputTokens": 20,
"totalTokens": 30,
}
}
},
]
}
messages = [ChatMessage(role=MessageRole.USER, content="Test")]
responses = list(bedrock_with_thinking.stream_chat(messages))
text_responses = [r for r in responses if r.delta]
assert all(
r.additional_kwargs.get("thinking_delta") is None for r in text_responses
)
def test_thinking_block_in_message_blocks(bedrock_with_thinking, mock_bedrock_client):
mock_bedrock_client.converse_stream.return_value = {
"stream": [
{
"contentBlockDelta": {
"delta": {
"reasoningContent": {
"text": "Thinking content",
"signature": "sig",
}
},
"contentBlockIndex": 0,
}
},
{
"contentBlockDelta": {
"delta": {"text": "Text content"},
"contentBlockIndex": 1,
}
},
{
"metadata": {
"usage": {
"inputTokens": 10,
"outputTokens": 20,
"totalTokens": 30,
}
}
},
]
}
messages = [ChatMessage(role=MessageRole.USER, content="Test")]
responses = list(bedrock_with_thinking.stream_chat(messages))
final_response = responses[-1]
assert len(final_response.message.blocks) >= 2
thinking_blocks = [
b for b in final_response.message.blocks if isinstance(b, ThinkingBlock)
]
assert len(thinking_blocks) == 1
assert thinking_blocks[0].content == "Thinking content"
text_blocks = [b for b in final_response.message.blocks if isinstance(b, TextBlock)]
assert len(text_blocks) >= 1
def test_thinking_delta_populated_in_chat(bedrock_with_thinking, mock_bedrock_client):
mock_bedrock_client.converse.return_value = {
"output": {
"message": {
"role": "assistant",
"content": [
{
"reasoningContent": {
"reasoningText": {
"text": "I am thinking",
"signature": "sig",
}
}
},
{"text": "The answer is 42"},
],
}
},
"usage": {"inputTokens": 10, "outputTokens": 20, "totalTokens": 30},
}
messages = [ChatMessage(role=MessageRole.USER, content="Test")]
response = bedrock_with_thinking.chat(messages)
# In non-streaming chat, thinking_delta should NOT be in additional_kwargs
assert "thinking_delta" not in response.additional_kwargs
# But it should be in blocks as a ThinkingBlock
assert any(isinstance(b, ThinkingBlock) for b in response.message.blocks)
thinking_block = next(
b for b in response.message.blocks if isinstance(b, ThinkingBlock)
)
assert thinking_block.content == "I am thinking"
def test_thinking_block_round_trip(bedrock_with_thinking, mock_bedrock_client):
from llama_index.llms.bedrock_converse.utils import messages_to_converse_messages
messages = [
ChatMessage(role=MessageRole.USER, content="Explain 42"),
ChatMessage(
role=MessageRole.ASSISTANT,
blocks=[
ThinkingBlock(
content="I need to calculate",
additional_information={"signature": "sig123"},
),
TextBlock(text="It is the meaning of life"),
],
),
ChatMessage(role=MessageRole.USER, content="Thanks"),
]
converse_messages, _ = messages_to_converse_messages(messages, "some-model")
# The assistant message should have 2 content blocks in Bedrock format
assistant_msg = converse_messages[1]
assert assistant_msg["role"] == "assistant"
assert len(assistant_msg["content"]) == 2
assert "reasoningContent" in assistant_msg["content"][0]
assert (
assistant_msg["content"][0]["reasoningContent"]["reasoningText"]["text"]
== "I need to calculate"
)
assert (
assistant_msg["content"][0]["reasoningContent"]["reasoningText"]["signature"]
== "sig123"
)
assert assistant_msg["content"][1]["text"] == "It is the meaning of life"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-bedrock-converse/tests/test_thinking_delta.py",
"license": "MIT License",
"lines": 285,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-typecast/llama_index/tools/typecast/base.py | """Typecast text to speech tool spec."""
from typing import List, Optional
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from typecast.client import Typecast
from typecast.models import Output, Prompt, TTSRequest, VoicesV2Filter
class TypecastToolSpec(BaseToolSpec):
"""Typecast tool spec for text-to-speech synthesis with emotion control."""
spec_functions = ["get_voices", "get_voice", "text_to_speech"]
def __init__(self, api_key: str, host: Optional[str] = None) -> None:
"""
Initialize with parameters.
Args:
api_key (str): Your Typecast API key
host (Optional[str]): The base url of Typecast API (default: https://api.typecast.ai)
"""
self.api_key = api_key
self.host = host
def get_voices(
self,
model: Optional[str] = None,
gender: Optional[str] = None,
age: Optional[str] = None,
use_case: Optional[str] = None,
) -> List[dict]:
"""
Get list of available voices from Typecast (V2 API).
Args:
model (Optional[str]): Filter by model name (e.g., "ssfm-v21", "ssfm-v30")
gender (Optional[str]): Filter by gender ("male" or "female")
age (Optional[str]): Filter by age group ("child", "teenager", "young_adult", "middle_age", "elder")
use_case (Optional[str]): Filter by use case category (e.g., "Audiobook", "Game", "Podcast")
Returns:
List[dict]: List of available voices with their details including:
- voice_id: Unique voice identifier
- voice_name: Human-readable name
- models: List of supported models with emotions
- gender: Voice gender (optional)
- age: Voice age group (optional)
- use_cases: List of suitable use cases (optional)
Raises:
TypecastError: If API request fails
"""
# Create the client
client = Typecast(host=self.host, api_key=self.api_key)
# Build filter if any parameters provided
filter_obj = None
if any([model, gender, age, use_case]):
filter_obj = VoicesV2Filter(
model=model,
gender=gender,
age=age,
use_cases=use_case,
)
# Get the voices using V2 API
response = client.voices_v2(filter=filter_obj)
# Return the dumped voice models as dict
return [voice.model_dump() for voice in response]
def get_voice(self, voice_id: str) -> dict:
"""
Get details of a specific voice from Typecast (V2 API).
Args:
voice_id (str): The voice ID to get details for (e.g., "tc_62a8975e695ad26f7fb514d1")
Returns:
dict: Voice details including:
- voice_id: Unique voice identifier
- voice_name: Human-readable name
- models: List of supported models with emotions
- gender: Voice gender (optional)
- age: Voice age group (optional)
- use_cases: List of suitable use cases (optional)
Raises:
NotFoundError: If voice not found
TypecastError: If API request fails
"""
# Create the client
client = Typecast(host=self.host, api_key=self.api_key)
# Get the voice using V2 API
response = client.voice_v2(voice_id)
# Return the dumped voice model as dict
return response.model_dump()
def text_to_speech(
self,
text: str,
voice_id: str,
output_path: str,
model: str = "ssfm-v21",
language: Optional[str] = None,
emotion_preset: Optional[str] = "normal",
emotion_intensity: Optional[float] = 1.0,
volume: Optional[int] = 100,
audio_pitch: Optional[int] = 0,
audio_tempo: Optional[float] = 1.0,
audio_format: Optional[str] = "wav",
seed: Optional[int] = None,
) -> str:
"""
Convert text to speech using Typecast API.
Args:
text (str): The text to convert to speech
voice_id (str): The voice ID to use (e.g., "tc_62a8975e695ad26f7fb514d1")
output_path (str): Path to save the audio file
model (str): Voice model name (default: "ssfm-v21")
language (Optional[str]): Language code (ISO 639-3, e.g., "eng", "kor")
emotion_preset (Optional[str]): Emotion preset (normal, happy, sad, angry)
emotion_intensity (Optional[float]): Emotion intensity (0.0 to 2.0)
volume (Optional[int]): Volume (0 to 200, default: 100)
audio_pitch (Optional[int]): Audio pitch (-12 to 12, default: 0)
audio_tempo (Optional[float]): Audio tempo (0.5 to 2.0, default: 1.0)
audio_format (Optional[str]): Audio format (wav or mp3, default: wav)
seed (Optional[int]): Random seed for reproducible results
Returns:
str: Path to the generated audio file
Raises:
ValueError: If parameters are invalid
BadRequestError: If request parameters are invalid
UnauthorizedError: If API authentication fails
PaymentRequiredError: If API quota exceeded
NotFoundError: If resource not found
UnprocessableEntityError: If validation error
InternalServerError: If Typecast API server error
TypecastError: If other API error occurs
IOError: If file save fails
"""
# Validate parameters
if not text or not text.strip():
raise ValueError("Text cannot be empty")
if not voice_id:
raise ValueError("Voice ID is required")
if not output_path:
raise ValueError("Output path is required")
# Create client
client = Typecast(host=self.host, api_key=self.api_key)
# Build the request
request = TTSRequest(
voice_id=voice_id,
text=text,
model=model,
language=language,
prompt=Prompt(
emotion_preset=emotion_preset,
emotion_intensity=emotion_intensity,
),
output=Output(
volume=volume,
audio_pitch=audio_pitch,
audio_tempo=audio_tempo,
audio_format=audio_format,
),
seed=seed,
)
# Generate audio
response = client.text_to_speech(request)
# Save the audio
with open(output_path, "wb") as fp:
fp.write(response.audio_data)
# Return the save location
return output_path
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-typecast/llama_index/tools/typecast/base.py",
"license": "MIT License",
"lines": 157,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-typecast/tests/test_tools_typecast.py | import pytest
from unittest.mock import Mock, patch, mock_open
from llama_index.tools.typecast import TypecastToolSpec
def test_class_inheritance():
"""Test that TypecastToolSpec inherits from BaseToolSpec"""
from llama_index.core.tools.tool_spec.base import BaseToolSpec
names_of_base_classes = [b.__name__ for b in TypecastToolSpec.__mro__]
assert BaseToolSpec.__name__ in names_of_base_classes
def test_spec_functions():
"""Test that required functions are defined"""
assert "get_voices" in TypecastToolSpec.spec_functions
assert "get_voice" in TypecastToolSpec.spec_functions
assert "text_to_speech" in TypecastToolSpec.spec_functions
def test_initialization():
"""Test tool initialization"""
tool = TypecastToolSpec(api_key="test-key", host="https://test.api")
assert tool.api_key == "test-key"
assert tool.host == "https://test.api"
@patch("llama_index.tools.typecast.base.Typecast")
def test_get_voices_success(mock_typecast):
"""Test successful voice retrieval (V2 API)"""
# Mock response with V2 structure
mock_voice = Mock()
mock_voice.model_dump.return_value = {
"voice_id": "tc_123",
"voice_name": "Test Voice",
"models": [{"version": "ssfm-v21", "emotions": ["normal", "happy"]}],
"gender": "female",
"age": "young_adult",
"use_cases": ["Audiobook", "Podcast"],
}
mock_typecast.return_value.voices_v2.return_value = [mock_voice]
tool = TypecastToolSpec(api_key="test-key")
voices = tool.get_voices()
assert len(voices) == 1
assert voices[0]["voice_name"] == "Test Voice"
assert voices[0]["models"][0]["version"] == "ssfm-v21"
mock_typecast.return_value.voices_v2.assert_called_once_with(filter=None)
@patch("llama_index.tools.typecast.base.Typecast")
@patch("llama_index.tools.typecast.base.VoicesV2Filter")
def test_get_voices_with_filters(mock_filter, mock_typecast):
"""Test voice retrieval with V2 filters"""
mock_voice = Mock()
mock_voice.model_dump.return_value = {
"voice_id": "tc_123",
"voice_name": "Test Voice",
"models": [{"version": "ssfm-v30", "emotions": ["normal", "happy"]}],
"gender": "female",
"age": "young_adult",
}
mock_typecast.return_value.voices_v2.return_value = [mock_voice]
tool = TypecastToolSpec(api_key="test-key")
voices = tool.get_voices(model="ssfm-v30", gender="female", age="young_adult")
assert len(voices) == 1
mock_typecast.return_value.voices_v2.assert_called_once()
@patch("llama_index.tools.typecast.base.Typecast")
def test_get_voices_failure(mock_typecast):
"""Test voice retrieval failure handling"""
from typecast.exceptions import TypecastError
mock_typecast.return_value.voices_v2.side_effect = TypecastError("API Error")
tool = TypecastToolSpec(api_key="test-key")
with pytest.raises(TypecastError) as exc_info:
tool.get_voices()
assert "API Error" in str(exc_info.value)
@patch("llama_index.tools.typecast.base.Typecast")
def test_get_voice_success(mock_typecast):
"""Test successful single voice retrieval (V2 API)"""
# Mock response with V2 structure
mock_voice = Mock()
mock_voice.model_dump.return_value = {
"voice_id": "tc_123",
"voice_name": "Test Voice",
"models": [
{"version": "ssfm-v21", "emotions": ["normal", "happy", "sad"]},
{"version": "ssfm-v30", "emotions": ["normal", "happy", "sad", "whisper"]},
],
"gender": "female",
"age": "young_adult",
"use_cases": ["Audiobook", "Podcast"],
}
mock_typecast.return_value.voice_v2.return_value = mock_voice
tool = TypecastToolSpec(api_key="test-key")
voice = tool.get_voice("tc_123")
assert voice["voice_id"] == "tc_123"
assert voice["voice_name"] == "Test Voice"
assert len(voice["models"]) == 2
assert "happy" in voice["models"][0]["emotions"]
@patch("llama_index.tools.typecast.base.Typecast")
def test_get_voice_not_found(mock_typecast):
"""Test voice not found handling"""
from typecast.exceptions import NotFoundError
mock_typecast.return_value.voice_v2.side_effect = NotFoundError("Voice not found")
tool = TypecastToolSpec(api_key="test-key")
with pytest.raises(NotFoundError) as exc_info:
tool.get_voice("invalid_id")
assert "Voice not found" in str(exc_info.value)
@patch("llama_index.tools.typecast.base.Typecast")
@patch("builtins.open", new_callable=mock_open)
def test_text_to_speech_success(mock_file, mock_typecast):
"""Test successful text-to-speech conversion"""
# Mock response
mock_response = Mock()
mock_response.audio_data = b"audio data"
mock_typecast.return_value.text_to_speech.return_value = mock_response
tool = TypecastToolSpec(api_key="test-key")
output_path = tool.text_to_speech(
text="Hello world", voice_id="voice1", output_path="output.wav"
)
assert output_path == "output.wav"
mock_file.assert_called_once_with("output.wav", "wb")
def test_text_to_speech_validation():
"""Test parameter validation"""
tool = TypecastToolSpec(api_key="test-key")
# Empty text
with pytest.raises(ValueError, match="Text cannot be empty"):
tool.text_to_speech(text="", voice_id="voice1", output_path="out.wav")
# Missing voice_id
with pytest.raises(ValueError, match="Voice ID is required"):
tool.text_to_speech(text="Hello", voice_id="", output_path="out.wav")
@patch("llama_index.tools.typecast.base.Typecast")
@patch("builtins.open", new_callable=mock_open)
def test_text_to_speech_with_seed(mock_file, mock_typecast):
"""Test text-to-speech conversion with seed parameter"""
# Mock response
mock_response = Mock()
mock_response.audio_data = b"audio data"
mock_typecast.return_value.text_to_speech.return_value = mock_response
tool = TypecastToolSpec(api_key="test-key")
output_path = tool.text_to_speech(
text="Hello world",
voice_id="voice1",
output_path="output.wav",
seed=42,
)
assert output_path == "output.wav"
# Verify that the request was called with seed parameter
call_args = mock_typecast.return_value.text_to_speech.call_args
assert call_args is not None
request = call_args[0][0]
assert request.seed == 42
@patch("llama_index.tools.typecast.base.Typecast")
@patch("builtins.open", new_callable=mock_open)
def test_text_to_speech_with_all_parameters(mock_file, mock_typecast):
"""Test text-to-speech conversion with all parameters"""
# Mock response
mock_response = Mock()
mock_response.audio_data = b"audio data"
mock_typecast.return_value.text_to_speech.return_value = mock_response
tool = TypecastToolSpec(api_key="test-key")
output_path = tool.text_to_speech(
text="Hello world",
voice_id="voice1",
output_path="output.mp3",
model="ssfm-v21",
language="eng",
emotion_preset="happy",
emotion_intensity=1.5,
volume=120,
audio_pitch=2,
audio_tempo=1.2,
audio_format="mp3",
seed=42,
)
assert output_path == "output.mp3"
call_args = mock_typecast.return_value.text_to_speech.call_args
request = call_args[0][0]
assert request.text == "Hello world"
assert request.voice_id == "voice1"
assert request.language == "eng"
assert request.prompt.emotion_preset == "happy"
assert request.prompt.emotion_intensity == 1.5
assert request.output.volume == 120
assert request.output.audio_pitch == 2
assert request.output.audio_tempo == 1.2
assert request.output.audio_format == "mp3"
assert request.seed == 42
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-typecast/tests/test_tools_typecast.py",
"license": "MIT License",
"lines": 176,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-airweave/llama_index/tools/airweave/base.py | """Airweave tool spec."""
import warnings
from typing import Any, Dict, List, Optional
from airweave import AirweaveSDK, SearchRequest
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class AirweaveToolSpec(BaseToolSpec):
"""
Airweave tool spec for searching collections.
Airweave is an open-source platform that makes any app searchable
for your agent by syncing data from various sources.
To use this tool, you need:
1. An Airweave account and API key
2. At least one collection set up with data
Get started at https://airweave.ai/
"""
spec_functions = [
"search_collection",
"advanced_search_collection",
"search_and_generate_answer",
"list_collections",
"get_collection_info",
]
def __init__(
self,
api_key: str,
base_url: Optional[str] = None,
framework_name: str = "llamaindex",
framework_version: str = "0.1.0",
) -> None:
"""
Initialize with Airweave API credentials.
Args:
api_key: Your Airweave API key from the dashboard
base_url: Optional custom base URL for self-hosted instances
framework_name: Framework name for analytics (default: "llamaindex")
framework_version: Framework version for analytics
"""
init_kwargs: Dict[str, Any] = {
"api_key": api_key,
"framework_name": framework_name,
"framework_version": framework_version,
}
if base_url:
init_kwargs["base_url"] = base_url
self.client = AirweaveSDK(**init_kwargs)
def search_collection(
self,
collection_id: str,
query: str,
limit: Optional[int] = 10,
offset: Optional[int] = 0,
) -> List[Document]:
"""
Search a specific Airweave collection with a natural language query.
This is a simple search function for common use cases. For advanced
options like reranking or answer generation, use advanced_search_collection.
Args:
collection_id: The readable ID of the collection to search
(e.g., 'finance-data-ab123')
query: The search query in natural language
limit: Maximum number of results to return (default: 10)
offset: Number of results to skip for pagination (default: 0)
Returns:
List of Document objects containing search results with metadata
"""
response = self.client.collections.search(
readable_id=collection_id,
request=SearchRequest(query=query, limit=limit, offset=offset),
)
return self._parse_search_response(response, collection_id)
def advanced_search_collection(
self,
collection_id: str,
query: str,
limit: Optional[int] = 10,
offset: Optional[int] = 0,
retrieval_strategy: Optional[str] = None,
temporal_relevance: Optional[float] = None,
expand_query: Optional[bool] = None,
interpret_filters: Optional[bool] = None,
rerank: Optional[bool] = None,
generate_answer: Optional[bool] = None,
) -> Dict[str, Any]:
"""
Advanced search with full control over retrieval parameters.
Args:
collection_id: The readable ID of the collection
query: The search query in natural language
limit: Maximum number of results to return (default: 10)
offset: Number of results to skip for pagination (default: 0)
retrieval_strategy: Search strategy - "hybrid", "neural", or "keyword"
- hybrid: combines semantic and keyword search (default)
- neural: pure semantic/embedding search
- keyword: traditional BM25 keyword search
temporal_relevance: Weight recent content higher (0.0-1.0)
0.0 = no recency bias, 1.0 = only recent matters
expand_query: Generate query variations for better recall
interpret_filters: Extract structured filters from natural language
rerank: Use LLM-based reranking for improved relevance
generate_answer: Generate a natural language answer from results
Returns:
Dictionary with 'documents' list and optional 'answer' field
Example: {"documents": [...], "answer": "Generated answer text"}
"""
search_params: Dict[str, Any] = {
"query": query,
"limit": limit,
"offset": offset,
}
# Add optional parameters
if retrieval_strategy:
search_params["retrieval_strategy"] = retrieval_strategy
if temporal_relevance is not None:
search_params["temporal_relevance"] = temporal_relevance
if expand_query is not None:
search_params["expand_query"] = expand_query
if interpret_filters is not None:
search_params["interpret_filters"] = interpret_filters
if rerank is not None:
search_params["rerank"] = rerank
if generate_answer is not None:
search_params["generate_answer"] = generate_answer
response = self.client.collections.search(
readable_id=collection_id,
request=SearchRequest(**search_params),
)
result: Dict[str, Any] = {
"documents": self._parse_search_response(response, collection_id),
}
# Add generated answer if available
if hasattr(response, "completion") and response.completion:
result["answer"] = response.completion
return result
def search_and_generate_answer(
self,
collection_id: str,
query: str,
limit: Optional[int] = 10,
use_reranking: bool = True,
) -> Optional[str]:
"""
Search collection and generate a natural language answer (RAG-style).
This is a convenience method that combines search with answer generation,
perfect for agents that need direct answers rather than raw documents.
Args:
collection_id: The readable ID of the collection
query: The search query / question in natural language
limit: Maximum number of results to consider (default: 10)
use_reranking: Whether to use LLM reranking (default: True)
Returns:
Natural language answer generated from the search results,
or None if no answer could be generated (with a warning)
"""
response = self.client.collections.search(
readable_id=collection_id,
request=SearchRequest(
query=query,
limit=limit,
generate_answer=True,
rerank=use_reranking,
),
)
if hasattr(response, "completion") and response.completion:
return response.completion
else:
# Fallback if no answer generated
warnings.warn(
"No answer could be generated from the search results", UserWarning
)
return None
def _parse_search_response(
self, response: Any, collection_id: str
) -> List[Document]:
"""Parse Airweave search response into LlamaIndex Documents."""
documents = []
if hasattr(response, "results") and response.results:
for result in response.results:
# Extract text content
text_content = ""
if isinstance(result, dict):
text_content = (
result.get("content") or result.get("text") or str(result)
)
elif hasattr(result, "content"):
text_content = result.content
elif hasattr(result, "text"):
text_content = result.text
else:
text_content = str(result)
# Build metadata
metadata: Dict[str, Any] = {"collection_id": collection_id}
if isinstance(result, dict):
if "metadata" in result:
metadata.update(result["metadata"])
if "score" in result:
metadata["score"] = result["score"]
if "source" in result:
metadata["source"] = result["source"]
if "id" in result:
metadata["result_id"] = result["id"]
else:
if hasattr(result, "metadata") and result.metadata:
metadata.update(result.metadata)
if hasattr(result, "score"):
metadata["score"] = result.score
if hasattr(result, "source"):
metadata["source"] = result.source
if hasattr(result, "id"):
metadata["result_id"] = result.id
documents.append(Document(text=text_content, metadata=metadata))
return documents
def list_collections(
self,
skip: Optional[int] = 0,
limit: Optional[int] = 100,
) -> List[Dict[str, Any]]:
"""
List all collections available in your Airweave organization.
Useful for discovering what collections are available to search.
Args:
skip: Number of collections to skip for pagination (default: 0)
limit: Maximum number of collections to return, 1-1000 (default: 100)
Returns:
List of dictionaries with collection information
"""
collections = self.client.collections.list(skip=skip, limit=limit)
return [
{
"id": (
coll.readable_id if hasattr(coll, "readable_id") else str(coll.id)
),
"name": coll.name,
"created_at": (
str(coll.created_at) if hasattr(coll, "created_at") else None
),
}
for coll in collections
]
def get_collection_info(self, collection_id: str) -> Dict[str, Any]:
"""
Get detailed information about a specific collection.
Args:
collection_id: The readable ID of the collection
Returns:
Dictionary with detailed collection information
"""
collection = self.client.collections.get(readable_id=collection_id)
return {
"id": (
collection.readable_id
if hasattr(collection, "readable_id")
else str(collection.id)
),
"name": collection.name,
"created_at": (
str(collection.created_at)
if hasattr(collection, "created_at")
else None
),
"description": getattr(collection, "description", None),
}
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-airweave/llama_index/tools/airweave/base.py",
"license": "MIT License",
"lines": 262,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-airweave/tests/test_tools_airweave.py | """Tests for Airweave tool spec."""
from unittest.mock import MagicMock, Mock, patch
import pytest
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.tools.airweave import AirweaveToolSpec
def test_class_inheritance() -> None:
"""Test that AirweaveToolSpec inherits from BaseToolSpec."""
names_of_base_classes = [b.__name__ for b in AirweaveToolSpec.__mro__]
assert BaseToolSpec.__name__ in names_of_base_classes
@pytest.fixture()
def mock_airweave_sdk():
"""Create a mock Airweave SDK."""
with patch("llama_index.tools.airweave.base.AirweaveSDK") as mock_sdk:
yield mock_sdk
def test_initialization(mock_airweave_sdk) -> None:
"""Test AirweaveToolSpec initialization with required parameters."""
tool_spec = AirweaveToolSpec(api_key="test-api-key")
assert tool_spec is not None
mock_airweave_sdk.assert_called_once_with(
api_key="test-api-key",
framework_name="llamaindex",
framework_version="0.1.0",
)
def test_initialization_with_base_url(mock_airweave_sdk) -> None:
"""Test initialization with custom base URL."""
tool_spec = AirweaveToolSpec(
api_key="test-api-key", base_url="https://custom.airweave.com"
)
assert tool_spec is not None
mock_airweave_sdk.assert_called_once_with(
api_key="test-api-key",
framework_name="llamaindex",
framework_version="0.1.0",
base_url="https://custom.airweave.com",
)
def test_spec_functions() -> None:
"""Test that all expected functions are in spec_functions."""
expected_functions = [
"search_collection",
"advanced_search_collection",
"search_and_generate_answer",
"list_collections",
"get_collection_info",
]
assert AirweaveToolSpec.spec_functions == expected_functions
def test_search_collection(mock_airweave_sdk) -> None:
"""Test search_collection method with mock response."""
# Setup mock client and response
mock_client = MagicMock()
mock_result = Mock()
mock_result.content = "This is test content about LLMs"
mock_result.score = 0.95
mock_result.source = "test-source.pdf"
mock_result.id = "result-123"
mock_result.metadata = None # Set to None to avoid Mock issues
mock_response = Mock()
mock_response.results = [mock_result]
mock_client.collections.search.return_value = mock_response
mock_airweave_sdk.return_value = mock_client
# Create tool and test
tool_spec = AirweaveToolSpec(api_key="test-key")
results = tool_spec.search_collection(
collection_id="test-collection", query="test query", limit=5
)
# Assertions
assert len(results) == 1
assert results[0].text == "This is test content about LLMs"
assert results[0].metadata["score"] == 0.95
assert results[0].metadata["source"] == "test-source.pdf"
assert results[0].metadata["collection_id"] == "test-collection"
assert results[0].metadata["result_id"] == "result-123"
def test_search_collection_empty_results(mock_airweave_sdk) -> None:
"""Test search_collection with no results."""
mock_client = MagicMock()
mock_response = Mock()
mock_response.results = []
mock_client.collections.search.return_value = mock_response
mock_airweave_sdk.return_value = mock_client
tool_spec = AirweaveToolSpec(api_key="test-key")
results = tool_spec.search_collection(
collection_id="test-collection", query="nonexistent query"
)
assert len(results) == 0
def test_advanced_search_collection(mock_airweave_sdk) -> None:
"""Test advanced_search_collection with all parameters."""
mock_client = MagicMock()
mock_result = Mock()
mock_result.content = "Test content"
mock_result.score = 0.95
mock_result.metadata = None # Set to None to avoid Mock issues
mock_response = Mock()
mock_response.results = [mock_result]
mock_response.completion = "Generated answer from AI"
mock_client.collections.search.return_value = mock_response
mock_airweave_sdk.return_value = mock_client
tool_spec = AirweaveToolSpec(api_key="test-key")
result = tool_spec.advanced_search_collection(
collection_id="test-collection",
query="test query",
limit=5,
retrieval_strategy="hybrid",
temporal_relevance=0.5,
expand_query=True,
rerank=True,
generate_answer=True,
)
assert "documents" in result
assert "answer" in result
assert len(result["documents"]) == 1
assert result["answer"] == "Generated answer from AI"
def test_advanced_search_no_answer(mock_airweave_sdk) -> None:
"""Test advanced_search_collection without answer generation."""
mock_client = MagicMock()
mock_result = Mock()
mock_result.content = "Test content"
mock_result.metadata = None # Set to None to avoid Mock issues
mock_response = Mock()
mock_response.results = [mock_result]
# Explicitly set completion to None
mock_response.completion = None
mock_client.collections.search.return_value = mock_response
mock_airweave_sdk.return_value = mock_client
tool_spec = AirweaveToolSpec(api_key="test-key")
result = tool_spec.advanced_search_collection(
collection_id="test-collection",
query="test query",
limit=5,
)
assert "documents" in result
assert "answer" not in result
assert len(result["documents"]) == 1
def test_search_and_generate_answer(mock_airweave_sdk) -> None:
"""Test search_and_generate_answer convenience method."""
mock_client = MagicMock()
mock_response = Mock()
mock_response.completion = "This is the generated answer"
mock_client.collections.search.return_value = mock_response
mock_airweave_sdk.return_value = mock_client
tool_spec = AirweaveToolSpec(api_key="test-key")
answer = tool_spec.search_and_generate_answer(
collection_id="test-collection",
query="What is the answer?",
)
assert answer == "This is the generated answer"
def test_search_and_generate_answer_no_completion(mock_airweave_sdk) -> None:
"""Test search_and_generate_answer when no answer is generated."""
mock_client = MagicMock()
mock_response = Mock(spec=[]) # Create Mock with no attributes
# Explicitly set completion to None
mock_response.completion = None
mock_client.collections.search.return_value = mock_response
mock_airweave_sdk.return_value = mock_client
tool_spec = AirweaveToolSpec(api_key="test-key")
# Expect a UserWarning to be raised
with pytest.warns(
UserWarning, match="No answer could be generated from the search results"
):
answer = tool_spec.search_and_generate_answer(
collection_id="test-collection",
query="What is the answer?",
)
assert answer is None
def test_list_collections(mock_airweave_sdk) -> None:
"""Test list_collections method."""
# Setup mock
mock_client = MagicMock()
mock_collection1 = Mock()
mock_collection1.readable_id = "finance-data"
mock_collection1.name = "Finance Data"
mock_collection1.created_at = "2024-01-01T00:00:00"
mock_collection2 = Mock()
mock_collection2.readable_id = "support-tickets"
mock_collection2.name = "Support Tickets"
mock_collection2.created_at = "2024-01-02T00:00:00"
mock_client.collections.list.return_value = [mock_collection1, mock_collection2]
mock_airweave_sdk.return_value = mock_client
# Test
tool_spec = AirweaveToolSpec(api_key="test-key")
collections = tool_spec.list_collections()
# Assertions
assert len(collections) == 2
assert collections[0]["id"] == "finance-data"
assert collections[0]["name"] == "Finance Data"
assert collections[1]["id"] == "support-tickets"
assert collections[1]["name"] == "Support Tickets"
def test_get_collection_info(mock_airweave_sdk) -> None:
"""Test get_collection_info method."""
# Setup mock
mock_client = MagicMock()
mock_collection = Mock()
mock_collection.readable_id = "test-collection"
mock_collection.name = "Test Collection"
mock_collection.created_at = "2024-01-01T00:00:00"
mock_collection.description = "A test collection"
mock_client.collections.get.return_value = mock_collection
mock_airweave_sdk.return_value = mock_client
# Test
tool_spec = AirweaveToolSpec(api_key="test-key")
info = tool_spec.get_collection_info(collection_id="test-collection")
# Assertions
assert info["id"] == "test-collection"
assert info["name"] == "Test Collection"
assert info["description"] == "A test collection"
mock_client.collections.get.assert_called_once_with(readable_id="test-collection")
def test_parse_dict_results(mock_airweave_sdk) -> None:
"""Test parsing search results when they come as dictionaries."""
mock_client = MagicMock()
mock_response = Mock()
mock_response.results = [
{
"content": "Dict result content",
"score": 0.88,
"source": "dict-source",
"id": "dict-123",
"metadata": {"custom_field": "custom_value"},
}
]
mock_client.collections.search.return_value = mock_response
mock_airweave_sdk.return_value = mock_client
tool_spec = AirweaveToolSpec(api_key="test-key")
results = tool_spec.search_collection(collection_id="test-collection", query="test")
assert len(results) == 1
assert results[0].text == "Dict result content"
assert results[0].metadata["score"] == 0.88
assert results[0].metadata["custom_field"] == "custom_value"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-airweave/tests/test_tools_airweave.py",
"license": "MIT License",
"lines": 225,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-ovhcloud/llama_index/llms/ovhcloud/base.py | import os
from typing import Any, Callable, Dict, List, Optional, Sequence
from llama_index.core.base.llms.types import (
ChatMessage,
LLMMetadata,
)
from llama_index.core.callbacks import CallbackManager
from llama_index.core.constants import DEFAULT_NUM_OUTPUTS, DEFAULT_TEMPERATURE
from llama_index.core.types import BaseOutputParser, PydanticProgramMode
from llama_index.llms.openai import OpenAI
from .utils import Model
DEFAULT_API_BASE = "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1"
class OVHcloud(OpenAI):
"""
OVHcloud AI Endpoints LLM.
OVHcloud AI Endpoints provides OpenAI-compatible API endpoints for various models.
You can use the API for free with rate limits if no API key is provided or if it's
an empty string. Otherwise, generate an API key from the OVHcloud manager at
https://ovh.com/manager in the Public Cloud section, AI & Machine Learning, AI Endpoints.
Args:
model (str): The model name to use (e.g., "llama-3.1-8b-instruct").
Model availability is validated dynamically against the API
with fallback to static validation if the API call fails.
temperature (float): The temperature to use for generation
max_tokens (int): The maximum number of tokens to generate
additional_kwargs (Optional[Dict[str, Any]]): Additional kwargs for the API
max_retries (int): The maximum number of retries to make
api_key (Optional[str]): The OVHcloud API key. If not provided or empty string,
the API can be used for free with rate limits.
callback_manager (Optional[CallbackManager]): Callback manager for logging
default_headers (Optional[Dict[str, str]]): Default headers for API requests
system_prompt (Optional[str]): System prompt for chat
messages_to_prompt (Optional[Callable]): Function to format messages to prompt
completion_to_prompt (Optional[Callable]): Function to format completion prompt
pydantic_program_mode (PydanticProgramMode): Mode for Pydantic handling
output_parser (Optional[BaseOutputParser]): Parser for model outputs
api_base (Optional[str]): Override the default API base URL
Examples:
`pip install llama-index-llms-ovhcloud`
```python
from llama_index.llms.ovhcloud import OVHcloud
# Using with API key
llm = OVHcloud(
model="llama-3.1-8b-instruct",
api_key="YOUR_API_KEY",
)
response = llm.complete("Hello, world!")
# Using without API key (free with rate limits)
llm = OVHcloud(
model="llama-3.1-8b-instruct",
api_key="", # or omit api_key parameter
)
response = llm.complete("Hello, world!")
# Get available models dynamically
llm = OVHcloud(model="llama-3.1-8b-instruct")
available = llm.available_models # List[Model] - fetched dynamically
model_ids = [model.id for model in available]
print(f"Available models: {model_ids}")
# Chat messages
from llama_index.core.llms import ChatMessage
messages = [
ChatMessage(
role="system", content="You are a helpful assistant"
),
ChatMessage(role="user", content="What is the capital of France?"),
]
response = llm.chat(messages)
print(response)
```
"""
def __init__(
self,
model: str,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: int = DEFAULT_NUM_OUTPUTS,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 10,
api_key: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
default_headers: Optional[Dict[str, str]] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
api_base: Optional[str] = None,
) -> None:
additional_kwargs = additional_kwargs or {}
callback_manager = callback_manager or CallbackManager([])
# Get API key from parameter or environment variable
# Allow empty string for free tier usage
# If not provided, use empty string to allow free tier access
if api_key is None:
api_key = os.environ.get("OVHCLOUD_API_KEY", "")
# If api_key is explicitly set to empty string, keep it as empty string
# Use provided api_base or default
api_base = api_base or DEFAULT_API_BASE
# Validate model dynamically if we have an API key
# If no API key, we skip validation (free tier)
if api_key:
try:
# Import OpenAI here to avoid circular imports
from openai import OpenAI as OpenAIClient
temp_client = OpenAIClient(
api_key=api_key,
base_url=api_base,
)
except Exception:
# If validation fails, continue anyway (might be network issue)
# The actual API call will fail if the model is invalid
pass
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
api_base=api_base,
api_key=api_key,
additional_kwargs=additional_kwargs,
max_retries=max_retries,
callback_manager=callback_manager,
default_headers=default_headers,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
@property
def available_models(self) -> List[Model]:
"""Get available models from OVHcloud AI Endpoints."""
try:
return get_available_models_dynamic(self._get_client())
except Exception:
# If fetching fails, return empty list or current model
return [Model(id=self.model)] if hasattr(self, "model") else []
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "OVHcloud_LLM"
@property
def metadata(self) -> LLMMetadata:
"""Get LLM metadata."""
return LLMMetadata(
num_output=self.max_tokens,
model_name=self.model,
is_chat_model=True,
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-ovhcloud/llama_index/llms/ovhcloud/base.py",
"license": "MIT License",
"lines": 147,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-ovhcloud/llama_index/llms/ovhcloud/utils.py | from llama_index.core.bridge.pydantic import BaseModel
# OVHcloud AI Endpoints supported models
# This list can be updated based on the catalog at:
# https://www.ovhcloud.com/en/public-cloud/ai-endpoints/catalog/
SUPPORTED_MODEL_SLUGS = [
# Add model slugs here as they become available
# Example format: "model-name"
]
class Model(BaseModel):
"""
Model information for OVHcloud AI Endpoints models.
Args:
id: unique identifier for the model, passed as model parameter for requests
model_type: API type (defaults to "chat")
client: client name
"""
id: str
model_type: str = "chat"
client: str = "OVHcloud"
def __hash__(self) -> int:
return hash(self.id)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-ovhcloud/llama_index/llms/ovhcloud/utils.py",
"license": "MIT License",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-ovhcloud/tests/test_coverage_comprehensive.py | #!/usr/bin/env python3
"""
Comprehensive test coverage for OVHcloud AI Endpoints dynamic validation functions.
This file ensures all lines in utils.py and base.py are covered.
"""
import sys
from unittest.mock import Mock, patch
from llama_index.llms.ovhcloud.utils import Model
from llama_index.llms.ovhcloud.base import OVHcloud
def test_model_class():
"""Test the Model class comprehensively."""
print("Testing Model class...")
# Test basic creation
model = Model(id="test-model")
assert model.id == "test-model"
assert model.model_type == "chat"
assert model.client == "OVHcloud"
# Test with custom values
model2 = Model(id="custom-model", model_type="completion", client="Custom")
assert model2.id == "custom-model"
assert model2.model_type == "completion"
assert model2.client == "Custom"
# Test hash functionality
model3 = Model(id="test-model")
assert hash(model) == hash(model3)
# Test that models can be used in sets
model_set = {model, model2, model3}
assert len(model_set) == 2 # model and model3 are the same
print("✅ Model class tests passed")
def test_ovhcloud_class():
"""Test the OVHcloud class dynamic functionality."""
print("Testing OVHcloud class...")
# Test available_models property with exception (falls back to current model)
llm = OVHcloud(model="test-model", api_key="fake-api-key")
result = llm.available_models
assert len(result) == 1
assert result[0].id == "test-model"
# Test constructor with API key
with patch("openai.OpenAI") as mock_client_class:
mock_client = Mock()
mock_client_class.return_value = mock_client
llm = OVHcloud(model="test-model", api_key="fake-api-key")
# Test constructor with empty API key
llm = OVHcloud(model="test-model", api_key="")
print("✅ OVHcloud class tests passed")
def main():
"""Run all tests."""
print("🧪 Running Comprehensive Coverage Tests")
print("=" * 50)
try:
test_model_class()
test_ovhcloud_class()
print("=" * 50)
print("🎉 All coverage tests passed!")
except Exception as e:
print(f"❌ Test failed: {e}")
import traceback
traceback.print_exc()
return False
return True
if __name__ == "__main__":
success = main()
sys.exit(0 if success else 1)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-ovhcloud/tests/test_coverage_comprehensive.py",
"license": "MIT License",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-ovhcloud/tests/test_llms_ovhcloud.py | from llama_index.core.base.llms.base import BaseLLM
from llama_index.llms.ovhcloud import OVHcloud
def test_text_inference_embedding_class():
names_of_base_classes = [b.__name__ for b in OVHcloud.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-ovhcloud/tests/test_llms_ovhcloud.py",
"license": "MIT License",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-confluence/llama_index/readers/confluence/html_parser.py | class HtmlTextParser:
def __init__(self):
try:
from markdownify import markdownify # noqa: F401
except ImportError:
raise ImportError(
"`markdownify` package not found, please run `pip install markdownify`"
)
def convert(self, html: str) -> str:
from markdownify import markdownify
if not html:
return ""
return markdownify(
html,
heading_style="ATX", # Use # for headings instead of underlines
bullets="*", # Use * for unordered lists
strip=["script", "style"], # Remove script and style tags for security
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-confluence/llama_index/readers/confluence/html_parser.py",
"license": "MIT License",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-confluence/tests/test_html_parser.py | import pytest
from llama_index.readers.confluence.html_parser import HtmlTextParser
class TestHtmlTextParser:
def test_parser_initialization(self):
parser = HtmlTextParser()
assert parser is not None
@pytest.mark.parametrize(
("html_input", "expected_contains"),
[
("<p>Simple text</p>", "Simple text"),
("<p>First paragraph</p><p>Second paragraph</p>", "First paragraph"),
("<h1>Main Title</h1>", "Main Title"),
("<h2>Subtitle</h2>", "Subtitle"),
("<h3>Section</h3>", "Section"),
("<strong>Bold text</strong>", "Bold text"),
("<b>Bold text</b>", "Bold text"),
("<em>Italic text</em>", "Italic text"),
("<i>Italic text</i>", "Italic text"),
("<code>inline code</code>", "inline code"),
],
)
def test_basic_formatting(self, html_input, expected_contains):
parser = HtmlTextParser()
result = parser.convert(html_input)
assert expected_contains in result
@pytest.mark.parametrize(
("html_input", "expected_marker"),
[
("<h1>Title</h1>", "#"),
("<h2>Subtitle</h2>", "##"),
("<h3>Section</h3>", "###"),
("<strong>Bold</strong>", "**"),
("<b>Bold</b>", "**"),
("<em>Italic</em>", "*"),
],
)
def test_markdown_style_conversion(self, html_input, expected_marker):
parser = HtmlTextParser()
result = parser.convert(html_input)
assert expected_marker in result
@pytest.mark.parametrize(
("html_input", "expected_contains"),
[
("<ul><li>Item 1</li><li>Item 2</li></ul>", "Item 1"),
("<ol><li>First</li><li>Second</li></ol>", "First"),
(
"<ul><li>Parent<ul><li>Child</li></ul></li></ul>",
"Parent",
),
],
)
def test_lists(self, html_input, expected_contains):
parser = HtmlTextParser()
result = parser.convert(html_input)
assert expected_contains in result
def test_unordered_list_structure(self):
parser = HtmlTextParser()
html = "<ul><li>Item 1</li><li>Item 2</li><li>Item 3</li></ul>"
result = parser.convert(html)
assert "Item 1" in result
assert "Item 2" in result
assert "Item 3" in result
assert any(marker in result for marker in ["*", "-", "+"])
def test_ordered_list_structure(self):
parser = HtmlTextParser()
html = "<ol><li>First item</li><li>Second item</li></ol>"
result = parser.convert(html)
assert "First item" in result
assert "Second item" in result
@pytest.mark.parametrize(
("html_input", "expected_text", "expected_marker"),
[
('<a href="https://example.com">Link text</a>', "Link text", "example.com"),
('<a href="/page">Internal link</a>', "Internal link", "/page"),
('<a href="url"><strong>Bold link</strong></a>', "Bold link", "url"),
],
)
def test_links(self, html_input, expected_text, expected_marker):
parser = HtmlTextParser()
result = parser.convert(html_input)
assert expected_text in result
assert expected_marker in result
def test_link_markdown_format(self):
parser = HtmlTextParser()
html = '<a href="https://example.com">Example</a>'
result = parser.convert(html)
assert "[Example](https://example.com)" in result
@pytest.mark.parametrize(
("html_input", "expected_contains"),
[
("<pre>Code block</pre>", "Code block"),
("<pre><code>def foo():</code></pre>", "def foo():"),
("<p>Use <code>print()</code> function</p>", "print()"),
],
)
def test_code_blocks(self, html_input, expected_contains):
parser = HtmlTextParser()
result = parser.convert(html_input)
assert expected_contains in result
def test_table_basic(self):
parser = HtmlTextParser()
html = """
<table>
<thead>
<tr><th>Header 1</th><th>Header 2</th></tr>
</thead>
<tbody>
<tr><td>Cell 1</td><td>Cell 2</td></tr>
</tbody>
</table>
"""
result = parser.convert(html)
assert "Header 1" in result
assert "Header 2" in result
assert "Cell 1" in result
assert "Cell 2" in result
def test_table_multiple_rows(self):
parser = HtmlTextParser()
html = """
<table>
<tr><th>Name</th><th>Age</th></tr>
<tr><td>Alice</td><td>30</td></tr>
<tr><td>Bob</td><td>25</td></tr>
</table>
"""
result = parser.convert(html)
assert "Name" in result
assert "Age" in result
assert "Alice" in result
assert "Bob" in result
assert "30" in result
assert "25" in result
def test_mixed_formatting(self):
parser = HtmlTextParser()
html = """
<h1>Main Title</h1>
<p>This is a paragraph with <strong>bold</strong> and <em>italic</em> text.</p>
<ul>
<li>First item</li>
<li>Second item with <code>code</code></li>
</ul>
<p>A <a href="https://example.com">link</a> in a paragraph.</p>
"""
result = parser.convert(html)
assert "Main Title" in result
assert "paragraph" in result
assert "bold" in result
assert "italic" in result
assert "First item" in result
assert "Second item" in result
assert "code" in result
assert "[link](https://example.com)" in result
def test_nested_lists(self):
parser = HtmlTextParser()
html = """
<ul>
<li>Parent item 1
<ul>
<li>Child item 1</li>
<li>Child item 2</li>
</ul>
</li>
<li>Parent item 2</li>
</ul>
"""
result = parser.convert(html)
assert "Parent item 1" in result
assert "Child item 1" in result
assert "Child item 2" in result
assert "Parent item 2" in result
def test_empty_paragraph(self):
parser = HtmlTextParser()
html = "<p></p>"
result = parser.convert(html)
assert result is not None
def test_empty_string(self):
parser = HtmlTextParser()
result = parser.convert("")
assert result is not None
def test_special_characters(self):
parser = HtmlTextParser()
html = "<p><div> & "test"</p>"
result = parser.convert(html)
assert result is not None
assert len(result) > 0
def test_line_breaks(self):
parser = HtmlTextParser()
html = "<p>Line 1<br/>Line 2<br/>Line 3</p>"
result = parser.convert(html)
assert "Line 1" in result
assert "Line 2" in result
assert "Line 3" in result
def test_div_elements(self):
parser = HtmlTextParser()
html = "<div>Content in div</div>"
result = parser.convert(html)
assert "Content in div" in result
def test_nested_formatting(self):
parser = HtmlTextParser()
html = "<p><strong><em>Bold and italic</em></strong></p>"
result = parser.convert(html)
assert "Bold and italic" in result
def test_confluence_style_content(self):
parser = HtmlTextParser()
html = """
<h1>Page Title</h1>
<p>Introduction paragraph with <strong>important</strong> information.</p>
<h2>Section 1</h2>
<p>Section content with a <a href="/link">link</a>.</p>
<ul>
<li>Bullet point 1</li>
<li>Bullet point 2</li>
</ul>
<h2>Section 2</h2>
<p>Code example: <code>function()</code></p>
<pre><code>def example():
return True</code></pre>
"""
result = parser.convert(html)
assert "Page Title" in result
assert "Introduction" in result
assert "important" in result
assert "Section 1" in result
assert "Section 2" in result
assert "link" in result
assert "Bullet point 1" in result
assert "function()" in result
assert "def example():" in result
def test_whitespace_handling(self):
parser = HtmlTextParser()
html = "<p>Text with multiple spaces</p>"
result = parser.convert(html)
assert "Text" in result
assert "spaces" in result
def test_convert_returns_string(self):
parser = HtmlTextParser()
result = parser.convert("<p>Test</p>")
assert isinstance(result, str)
def test_multiple_headings(self):
parser = HtmlTextParser()
html = """
<h1>H1 Title</h1>
<h2>H2 Subtitle</h2>
<h3>H3 Section</h3>
<h4>H4 Subsection</h4>
"""
result = parser.convert(html)
assert "H1 Title" in result
assert "H2 Subtitle" in result
assert "H3 Section" in result
assert "H4 Subsection" in result
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-confluence/tests/test_html_parser.py",
"license": "MIT License",
"lines": 251,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-core/llama_index/core/chat_engine/multi_modal_context.py | from typing import Any, List, Optional, Sequence, Tuple, Union
from llama_index.core.base.response.schema import RESPONSE_TYPE, Response
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.callbacks import trace_method
from llama_index.core.chat_engine.types import (
AgentChatResponse,
BaseChatEngine,
StreamingAgentChatResponse,
ToolOutput,
)
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
ChatResponseGen,
MessageRole,
)
from llama_index.core.base.response.schema import (
StreamingResponse,
AsyncStreamingResponse,
)
from llama_index.core.indices.query.schema import QueryBundle, QueryType
from llama_index.core.llms import LLM, TextBlock, ChatMessage, ImageBlock
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.prompts import PromptTemplate
from llama_index.core.schema import ImageNode, NodeWithScore, MetadataMode
from llama_index.core.base.llms.generic_utils import image_node_to_image_block
from llama_index.core.memory import BaseMemory, Memory
# from llama_index.core.query_engine.multi_modal import _get_image_and_text_nodes
from llama_index.core.llms.llm import (
astream_chat_response_to_tokens,
stream_chat_response_to_tokens,
)
from llama_index.core.prompts.default_prompts import DEFAULT_TEXT_QA_PROMPT
from llama_index.core.settings import Settings
from llama_index.core.base.base_retriever import BaseRetriever
def _get_image_and_text_nodes(
nodes: List[NodeWithScore],
) -> Tuple[List[NodeWithScore], List[NodeWithScore]]:
image_nodes = []
text_nodes = []
for res_node in nodes:
if isinstance(res_node.node, ImageNode):
image_nodes.append(res_node)
else:
text_nodes.append(res_node)
return image_nodes, text_nodes
def _ensure_query_bundle(str_or_query_bundle: QueryType) -> QueryBundle:
if isinstance(str_or_query_bundle, str):
return QueryBundle(str_or_query_bundle)
return str_or_query_bundle
class MultiModalContextChatEngine(BaseChatEngine):
"""
Multimodal Context Chat Engine.
Assumes that retrieved text context fits within context window of LLM, along with images.
This class closely relates to the non-multimodal version, ContextChatEngine.
Args:
retriever (MultiModalVectorIndexRetriever): A retriever object.
multi_modal_llm (LLM): A multimodal LLM model.
memory (BaseMemory): Chat memory buffer to store the history.
system_prompt (str): System prompt.
context_template (Optional[Union[str, PromptTemplate]]): Prompt Template to embed query and context.
node_postprocessors (Optional[List[BaseNodePostprocessor]]): Node Postprocessors.
callback_manager (Optional[CallbackManager]): A callback manager.
"""
def __init__(
self,
retriever: BaseRetriever,
multi_modal_llm: LLM,
memory: BaseMemory,
system_prompt: str,
context_template: Optional[Union[str, PromptTemplate]] = None,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
) -> None:
self._retriever = retriever
self._multi_modal_llm = multi_modal_llm
context_template = context_template or DEFAULT_TEXT_QA_PROMPT
if isinstance(context_template, str):
context_template = PromptTemplate(context_template)
self._context_template = context_template
self._memory = memory
self._system_prompt = system_prompt
self._node_postprocessors = node_postprocessors or []
self.callback_manager = callback_manager or CallbackManager([])
for node_postprocessor in self._node_postprocessors:
node_postprocessor.callback_manager = self.callback_manager
@classmethod
def from_defaults(
cls,
retriever: BaseRetriever,
chat_history: Optional[List[ChatMessage]] = None,
memory: Optional[BaseMemory] = None,
system_prompt: Optional[str] = None,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
context_template: Optional[Union[str, PromptTemplate]] = None,
multi_modal_llm: Optional[LLM] = None,
**kwargs: Any,
) -> "MultiModalContextChatEngine":
"""Initialize a MultiModalContextChatEngine from default parameters."""
multi_modal_llm = multi_modal_llm or Settings.llm
chat_history = chat_history or []
memory = memory or Memory.from_defaults(
chat_history=chat_history,
token_limit=multi_modal_llm.metadata.context_window - 256,
)
system_prompt = system_prompt or ""
node_postprocessors = node_postprocessors or []
return cls(
retriever,
multi_modal_llm=multi_modal_llm,
memory=memory,
system_prompt=system_prompt,
node_postprocessors=node_postprocessors,
callback_manager=Settings.callback_manager,
context_template=context_template,
)
def _apply_node_postprocessors(
self, nodes: List[NodeWithScore], query_bundle: QueryBundle
) -> List[NodeWithScore]:
for node_postprocessor in self._node_postprocessors:
nodes = node_postprocessor.postprocess_nodes(
nodes, query_bundle=query_bundle
)
return nodes
def _get_nodes(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
nodes = self._retriever.retrieve(query_bundle)
return self._apply_node_postprocessors(nodes, query_bundle=query_bundle)
async def _aget_nodes(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
nodes = await self._retriever.aretrieve(query_bundle)
return self._apply_node_postprocessors(nodes, query_bundle=query_bundle)
def synthesize(
self,
query_bundle: QueryBundle,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
streaming: bool = False,
) -> RESPONSE_TYPE:
image_nodes, text_nodes = _get_image_and_text_nodes(nodes)
context_str = "\n\n".join(
[r.get_content(metadata_mode=MetadataMode.LLM) for r in text_nodes]
)
fmt_prompt = self._context_template.format(
context_str=context_str, query_str=query_bundle.query_str
)
blocks: List[Union[ImageBlock, TextBlock]] = [
image_node_to_image_block(image_node.node)
for image_node in image_nodes
if isinstance(image_node.node, ImageNode)
]
blocks.append(TextBlock(text=fmt_prompt))
chat_history = self._memory.get(
input=str(query_bundle),
)
if streaming:
llm_stream = self._multi_modal_llm.stream_chat(
[
ChatMessage(role="system", content=self._system_prompt),
*chat_history,
ChatMessage(role="user", blocks=blocks),
]
)
stream_tokens = stream_chat_response_to_tokens(llm_stream)
return StreamingResponse(
response_gen=stream_tokens,
source_nodes=nodes,
metadata={"text_nodes": text_nodes, "image_nodes": image_nodes},
)
else:
llm_response = self._multi_modal_llm.chat(
[
ChatMessage(role="system", content=self._system_prompt),
*chat_history,
ChatMessage(role="user", blocks=blocks),
]
)
output = llm_response.message.content or ""
return Response(
response=output,
source_nodes=nodes,
metadata={"text_nodes": text_nodes, "image_nodes": image_nodes},
)
async def asynthesize(
self,
query_bundle: QueryBundle,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
streaming: bool = False,
) -> RESPONSE_TYPE:
image_nodes, text_nodes = _get_image_and_text_nodes(nodes)
context_str = "\n\n".join(
[r.get_content(metadata_mode=MetadataMode.LLM) for r in text_nodes]
)
fmt_prompt = self._context_template.format(
context_str=context_str, query_str=query_bundle.query_str
)
blocks: List[Union[ImageBlock, TextBlock]] = [
image_node_to_image_block(image_node.node)
for image_node in image_nodes
if isinstance(image_node.node, ImageNode)
]
blocks.append(TextBlock(text=fmt_prompt))
chat_history = await self._memory.aget(
input=str(query_bundle),
)
if streaming:
llm_stream = await self._multi_modal_llm.astream_chat(
[
ChatMessage(role="system", content=self._system_prompt),
*chat_history,
ChatMessage(role="user", blocks=blocks),
]
)
stream_tokens = await astream_chat_response_to_tokens(llm_stream)
return AsyncStreamingResponse(
response_gen=stream_tokens,
source_nodes=nodes,
metadata={"text_nodes": text_nodes, "image_nodes": image_nodes},
)
else:
llm_response = await self._multi_modal_llm.achat(
[
ChatMessage(role="system", content=self._system_prompt),
*chat_history,
ChatMessage(role="user", blocks=blocks),
]
)
output = llm_response.message.content or ""
return Response(
response=output,
source_nodes=nodes,
metadata={"text_nodes": text_nodes, "image_nodes": image_nodes},
)
@trace_method("chat")
def chat(
self,
message: str,
chat_history: Optional[List[ChatMessage]] = None,
prev_chunks: Optional[List[NodeWithScore]] = None,
) -> AgentChatResponse:
if chat_history is not None:
self._memory.set(chat_history)
# get nodes and postprocess them
nodes = self._get_nodes(_ensure_query_bundle(message))
if len(nodes) == 0 and prev_chunks is not None:
nodes = prev_chunks
response = self.synthesize(
_ensure_query_bundle(message), nodes=nodes, streaming=False
)
user_message = ChatMessage(content=str(message), role=MessageRole.USER)
ai_message = ChatMessage(content=str(response), role=MessageRole.ASSISTANT)
self._memory.put(user_message)
self._memory.put(ai_message)
return AgentChatResponse(
response=str(response),
sources=[
ToolOutput(
tool_name="retriever",
content=str(nodes),
raw_input={"message": message},
raw_output=response.metadata,
)
],
source_nodes=response.source_nodes,
)
@trace_method("chat")
def stream_chat(
self,
message: str,
chat_history: Optional[List[ChatMessage]] = None,
prev_chunks: Optional[List[NodeWithScore]] = None,
) -> StreamingAgentChatResponse:
if chat_history is not None:
self._memory.set(chat_history)
# get nodes and postprocess them
nodes = self._get_nodes(_ensure_query_bundle(message))
if len(nodes) == 0 and prev_chunks is not None:
nodes = prev_chunks
response = self.synthesize(
_ensure_query_bundle(message), nodes=nodes, streaming=True
)
assert isinstance(response, StreamingResponse)
def wrapped_gen(response: StreamingResponse) -> ChatResponseGen:
full_response = ""
for token in response.response_gen:
full_response += token
yield ChatResponse(
message=ChatMessage(
content=full_response, role=MessageRole.ASSISTANT
),
delta=token,
)
user_message = ChatMessage(content=str(message), role=MessageRole.USER)
ai_message = ChatMessage(content=full_response, role=MessageRole.ASSISTANT)
self._memory.put(user_message)
self._memory.put(ai_message)
return StreamingAgentChatResponse(
chat_stream=wrapped_gen(response),
sources=[
ToolOutput(
tool_name="retriever",
content=str(nodes),
raw_input={"message": message},
raw_output=response.metadata,
)
],
source_nodes=response.source_nodes,
is_writing_to_memory=False,
)
@trace_method("chat")
async def achat(
self,
message: str,
chat_history: Optional[List[ChatMessage]] = None,
prev_chunks: Optional[List[NodeWithScore]] = None,
) -> AgentChatResponse:
if chat_history is not None:
await self._memory.aset(chat_history)
# get nodes and postprocess them
nodes = await self._aget_nodes(_ensure_query_bundle(message))
if len(nodes) == 0 and prev_chunks is not None:
nodes = prev_chunks
response = await self.asynthesize(
_ensure_query_bundle(message), nodes=nodes, streaming=False
)
user_message = ChatMessage(content=str(message), role=MessageRole.USER)
ai_message = ChatMessage(content=str(response), role=MessageRole.ASSISTANT)
await self._memory.aput(user_message)
await self._memory.aput(ai_message)
return AgentChatResponse(
response=str(response),
sources=[
ToolOutput(
tool_name="retriever",
content=str(nodes),
raw_input={"message": message},
raw_output=response.metadata,
)
],
source_nodes=response.source_nodes,
)
@trace_method("chat")
async def astream_chat(
self,
message: str,
chat_history: Optional[List[ChatMessage]] = None,
prev_chunks: Optional[List[NodeWithScore]] = None,
) -> StreamingAgentChatResponse:
if chat_history is not None:
await self._memory.aset(chat_history)
# get nodes and postprocess them
nodes = await self._aget_nodes(_ensure_query_bundle(message))
if len(nodes) == 0 and prev_chunks is not None:
nodes = prev_chunks
response = await self.asynthesize(
_ensure_query_bundle(message), nodes=nodes, streaming=True
)
assert isinstance(response, AsyncStreamingResponse)
async def wrapped_gen(response: AsyncStreamingResponse) -> ChatResponseAsyncGen:
full_response = ""
async for token in response.async_response_gen():
full_response += token
yield ChatResponse(
message=ChatMessage(
content=full_response, role=MessageRole.ASSISTANT
),
delta=token,
)
user_message = ChatMessage(content=str(message), role=MessageRole.USER)
ai_message = ChatMessage(content=full_response, role=MessageRole.ASSISTANT)
await self._memory.aput(user_message)
await self._memory.aput(ai_message)
return StreamingAgentChatResponse(
achat_stream=wrapped_gen(response),
sources=[
ToolOutput(
tool_name="retriever",
content=str(nodes),
raw_input={"message": message},
raw_output=response.metadata,
)
],
source_nodes=response.source_nodes,
is_writing_to_memory=False,
)
def reset(self) -> None:
self._memory.reset()
@property
def chat_history(self) -> List[ChatMessage]:
"""Get chat history."""
return self._memory.get_all()
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/llama_index/core/chat_engine/multi_modal_context.py",
"license": "MIT License",
"lines": 392,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-core/tests/chat_engine/test_multi_modal_context.py | import pytest
from llama_index.core import MockEmbedding
from llama_index.core.embeddings import MockMultiModalEmbedding
from llama_index.core.chat_engine.multi_modal_context import (
MultiModalContextChatEngine,
)
from llama_index.core.indices import MultiModalVectorStoreIndex
from llama_index.core.llms.mock import MockLLMWithChatMemoryOfLastCall
from llama_index.core.schema import Document, ImageDocument, QueryBundle
from llama_index.core.llms import TextBlock, ImageBlock
from llama_index.core.chat_engine.types import ChatMode
SYSTEM_PROMPT = "Talk like a pirate."
@pytest.fixture()
def chat_engine() -> MultiModalContextChatEngine:
# Base64 string for a 1×1 transparent PNG
base64_str = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR4nGMAAQAABQABDQottAAAAABJRU5ErkJggg=="
img = ImageDocument(image=base64_str, metadata={"file_name": "tiny.png"})
embed_model_text = MockEmbedding(embed_dim=3)
embed_model_image = MockMultiModalEmbedding(embed_dim=3)
index = MultiModalVectorStoreIndex.from_documents(
[Document.example(), img],
image_embed_model=embed_model_image,
embed_model=embed_model_text,
)
fixture = index.as_chat_engine(
similarity_top_k=2,
image_similarity_top_k=1,
chat_mode=ChatMode.CONTEXT,
llm=MockLLMWithChatMemoryOfLastCall(),
system_prompt=SYSTEM_PROMPT,
)
assert isinstance(fixture, MultiModalContextChatEngine)
return fixture
def test_chat(chat_engine: MultiModalContextChatEngine):
response = chat_engine.chat("Hello World!")
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert len(chat_engine.chat_history) == 2
assert len(response.source_nodes) == 2 # one image and one text
assert len(response.sources) == 1
assert response.sources[0].tool_name == "retriever"
assert len(response.sources[0].raw_output["image_nodes"]) == 1
assert len(response.sources[0].raw_output["text_nodes"]) == 1
llm = chat_engine._multi_modal_llm
assert len(llm.last_chat_messages) == 2 # system prompt and user message
assert (
len(llm.last_chat_messages[1].blocks) == 2
) # user message consisting of text block containing text context and query and image block
assert (
isinstance(llm.last_chat_messages[1].blocks[0], ImageBlock)
and isinstance(llm.last_chat_messages[1].blocks[1], TextBlock)
) or (
isinstance(llm.last_chat_messages[1].blocks[0], TextBlock)
and isinstance(llm.last_chat_messages[1].blocks[1], ImageBlock)
)
assert "chat" in llm.last_called_chat_function
response = chat_engine.chat("What is the capital of the moon?")
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert "What is the capital of the moon?" in str(response)
assert len(chat_engine.chat_history) == 4
chat_engine.reset()
q = QueryBundle("Hello World through QueryBundle")
response = chat_engine.chat(q)
assert str(q) in str(response)
assert len(chat_engine.chat_history) == 2
assert str(q) in str(chat_engine.chat_history[0])
def test_chat_stream(chat_engine: MultiModalContextChatEngine):
response = chat_engine.stream_chat("Hello World!")
num_iters = 0
for _ in response.response_gen:
num_iters += 1
assert num_iters > 10
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert len(chat_engine.chat_history) == 2
assert len(response.source_nodes) == 2 # one image and one text
assert len(response.sources) == 1
assert response.sources[0].tool_name == "retriever"
assert len(response.sources[0].raw_output["image_nodes"]) == 1
assert len(response.sources[0].raw_output["text_nodes"]) == 1
llm = chat_engine._multi_modal_llm
assert len(llm.last_chat_messages) == 2 # system prompt and user message
assert (
len(llm.last_chat_messages[1].blocks) == 2
) # user message consisting of text block containing text context and query and image block
assert (
isinstance(llm.last_chat_messages[1].blocks[0], ImageBlock)
and isinstance(llm.last_chat_messages[1].blocks[1], TextBlock)
) or (
isinstance(llm.last_chat_messages[1].blocks[0], TextBlock)
and isinstance(llm.last_chat_messages[1].blocks[1], ImageBlock)
)
assert "stream_chat" in llm.last_called_chat_function
response = chat_engine.stream_chat("What is the capital of the moon?")
num_iters = 0
for _ in response.response_gen:
num_iters += 1
assert num_iters > 10
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert "What is the capital of the moon?" in str(response)
assert len(chat_engine.chat_history) == 4
chat_engine.reset()
q = QueryBundle("Hello World through QueryBundle")
response = chat_engine.stream_chat(q)
num_iters = 0
for _ in response.response_gen:
num_iters += 1
assert num_iters > 10
assert str(q) in str(response)
assert len(chat_engine.chat_history) == 2
assert str(q) in str(chat_engine.chat_history[0])
@pytest.mark.asyncio
async def test_achat(chat_engine: MultiModalContextChatEngine):
response = await chat_engine.achat("Hello World!")
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert len(chat_engine.chat_history) == 2
assert len(response.source_nodes) == 2 # one image and one text
assert len(response.sources) == 1
assert response.sources[0].tool_name == "retriever"
assert len(response.sources[0].raw_output["image_nodes"]) == 1
assert len(response.sources[0].raw_output["text_nodes"]) == 1
llm = chat_engine._multi_modal_llm
assert len(llm.last_chat_messages) == 2 # system prompt and user message
assert (
len(llm.last_chat_messages[1].blocks) == 2
) # user message consisting of text block containing text context and query and image block
assert (
isinstance(llm.last_chat_messages[1].blocks[0], ImageBlock)
and isinstance(llm.last_chat_messages[1].blocks[1], TextBlock)
) or (
isinstance(llm.last_chat_messages[1].blocks[0], TextBlock)
and isinstance(llm.last_chat_messages[1].blocks[1], ImageBlock)
)
assert "achat" in llm.last_called_chat_function
response = await chat_engine.achat("What is the capital of the moon?")
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert "What is the capital of the moon?" in str(response)
assert len(chat_engine.chat_history) == 4
chat_engine.reset()
q = QueryBundle("Hello World through QueryBundle")
response = await chat_engine.achat(q)
assert str(q) in str(response)
assert len(chat_engine.chat_history) == 2
assert str(q) in str(chat_engine.chat_history[0])
@pytest.mark.asyncio
async def test_chat_astream(chat_engine: MultiModalContextChatEngine):
response = await chat_engine.astream_chat("Hello World!")
num_iters = 0
async for _ in response.async_response_gen():
num_iters += 1
assert num_iters > 10
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert len(chat_engine.chat_history) == 2
assert len(response.source_nodes) == 2 # one image and one text
assert len(response.sources) == 1
assert response.sources[0].tool_name == "retriever"
assert len(response.sources[0].raw_output["image_nodes"]) == 1
assert len(response.sources[0].raw_output["text_nodes"]) == 1
llm = chat_engine._multi_modal_llm
assert len(llm.last_chat_messages) == 2 # system prompt and user message
assert (
len(llm.last_chat_messages[1].blocks) == 2
) # user message consisting of text block containing text context and query and image block
assert (
isinstance(llm.last_chat_messages[1].blocks[0], ImageBlock)
and isinstance(llm.last_chat_messages[1].blocks[1], TextBlock)
) or (
isinstance(llm.last_chat_messages[1].blocks[0], TextBlock)
and isinstance(llm.last_chat_messages[1].blocks[1], ImageBlock)
)
assert "astream_chat" in llm.last_called_chat_function
response = await chat_engine.astream_chat("What is the capital of the moon?")
num_iters = 0
async for _ in response.async_response_gen():
num_iters += 1
assert num_iters > 10
assert SYSTEM_PROMPT in str(response)
assert "Hello World!" in str(response)
assert "What is the capital of the moon?" in str(response)
assert len(chat_engine.chat_history) == 4
chat_engine.reset()
q = QueryBundle("Hello World through QueryBundle")
response = await chat_engine.astream_chat(q)
num_iters = 0
async for _ in response.async_response_gen():
num_iters += 1
assert num_iters > 10
assert str(q) in str(response)
assert len(chat_engine.chat_history) == 2
assert str(q) in str(chat_engine.chat_history[0])
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/chat_engine/test_multi_modal_context.py",
"license": "MIT License",
"lines": 195,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/scrapy_web/base.py | from typing import List, Optional, Union
from multiprocessing import Process, Queue
from scrapy.spiders import Spider
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
from .utils import run_spider_process, load_scrapy_settings
class ScrapyWebReader(BasePydanticReader):
"""
Scrapy web page reader.
Reads pages from the web.
Args:
project_path (Optional[str]): The path to the Scrapy project for
loading the project settings (with middlewares and pipelines).
The project path should contain the `scrapy.cfg` file.
Settings will be set to empty if path not specified or not found.
Defaults to "".
metadata_keys (Optional[List[str]]): List of keys to use
as document metadata from the scraped item. Defaults to [].
keep_keys (bool): Whether to keep metadata keys in items.
Defaults to False.
"""
project_path: Optional[str] = ""
metadata_keys: Optional[List[str]] = []
keep_keys: bool = False
def __init__(
self,
project_path: Optional[str] = "",
metadata_keys: Optional[List[str]] = [],
keep_keys: bool = False,
):
super().__init__(
project_path=project_path,
metadata_keys=metadata_keys,
keep_keys=keep_keys,
)
@classmethod
def class_name(cls) -> str:
return "ScrapyWebReader"
def load_data(self, spider: Union[Spider, str]) -> List[Document]:
"""
Load data from the input spider.
Args:
spider (Union[Spider, str]): The Scrapy spider class or
the spider name from the project to use for scraping.
Returns:
List[Document]: List of documents extracted from the web pages.
"""
if not self._is_spider_correct_type(spider):
raise ValueError(
"Invalid spider type. Provide a Spider class or spider name with project path."
)
documents_queue = Queue()
config = {
"keep_keys": self.keep_keys,
"metadata_keys": self.metadata_keys,
"settings": load_scrapy_settings(self.project_path),
}
# Running each spider in a separate process as Scrapy uses
# twisted reactor which can only be run once in a process
process = Process(
target=run_spider_process, args=(spider, documents_queue, config)
)
process.start()
process.join()
if documents_queue.empty():
return []
return documents_queue.get()
def _is_spider_correct_type(self, spider: Union[Spider, str]) -> bool:
return not (isinstance(spider, str) and not self.project_path)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/scrapy_web/base.py",
"license": "MIT License",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/scrapy_web/utils.py | import json
import os
from multiprocessing import Queue
from typing import Dict
from scrapy.spiders import signals, Spider
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
from llama_index.core.schema import Document
def load_scrapy_settings(project_path: str) -> Dict:
"""
Load Scrapy settings from the given project path.
"""
if not project_path:
return {}
if not os.path.exists(project_path):
return {}
cwd = os.getcwd()
try:
os.chdir(project_path)
try:
settings = get_project_settings() or {}
except Exception:
settings = {}
finally:
os.chdir(cwd)
return settings
def run_spider_process(spider: Spider, documents_queue: Queue, config: Dict):
"""
Run the Scrapy spider process and collect documents in the queue.
"""
documents = []
def item_scraped(item, response, spider):
documents.append(item_to_document(dict(item), config))
process = CrawlerProcess(settings=config["settings"])
crawler = process.create_crawler(spider)
crawler.signals.connect(item_scraped, signal=signals.item_scraped)
process.crawl(crawler)
process.start()
documents_queue.put(documents)
def item_to_document(item: Dict, config: Dict) -> Dict:
"""
Convert a scraped item to a Document with metadata.
"""
metadata = setup_metadata(item, config)
item = remove_metadata_keys(item, config)
return Document(text=json.dumps(item), metadata=metadata)
def setup_metadata(item: Dict, config: Dict) -> Dict:
"""
Set up metadata for the document from the scraped item.
"""
metadata = {}
for key in config["metadata_keys"]:
if key in item:
metadata[key] = item[key]
return metadata
def remove_metadata_keys(item: Dict, config: Dict) -> Dict:
"""
Remove metadata keys from the scraped item if keep_keys is False.
"""
if not config["keep_keys"]:
for key in config["metadata_keys"]:
item.pop(key, None)
return item
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/scrapy_web/utils.py",
"license": "MIT License",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-web/tests/test_scrapy_web_reader.py | import io
import pytest
import zipfile
import requests
from llama_index.readers.web import ScrapyWebReader
try:
from scrapy.spiders import Spider
SCRAPY_AVAILABLE = True
except ImportError:
SCRAPY_AVAILABLE = False
pytestmark = pytest.mark.skipif(not SCRAPY_AVAILABLE, reason="Scrapy not installed")
class SampleSpider(Spider):
name = "sample_spider"
start_urls = ["http://quotes.toscrape.com"]
def parse(self, response):
for q in response.css("div.quote"):
yield {
"text": q.css("span.text::text").get(),
"author": q.css(".author::text").get(),
}
@pytestmark
def test_scrapy_web_reader_with_spider_class():
reader = ScrapyWebReader()
docs = reader.load_data(SampleSpider)
assert isinstance(docs, list)
assert len(docs) > 0
@pytestmark
def test_scrapy_web_reader_with_zip_project(tmp_path):
project_zip_url = (
"https://github.com/scrapy/quotesbot/archive/refs/heads/master.zip"
)
response = requests.get(project_zip_url)
response.raise_for_status()
with zipfile.ZipFile(io.BytesIO(response.content)) as zf:
zf.extractall(tmp_path)
project_dir = tmp_path / "quotesbot-master"
reader = ScrapyWebReader(project_path=str(project_dir))
docs = reader.load_data("toscrape-css")
assert isinstance(docs, list)
assert len(docs) > 0
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-web/tests/test_scrapy_web_reader.py",
"license": "MIT License",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-couchbase/tests/test_couchbase_query_vector_store.py | """Test Couchbase Query Vector Store functionality using GSI."""
from __future__ import annotations
import os
import json
from typing import Any, List
from datetime import timedelta
import pytest
import time
from llama_index.core.schema import MetadataMode, TextNode, Document
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core.vector_stores.types import (
VectorStoreQuery,
MetadataFilters,
MetadataFilter,
FilterOperator,
)
from llama_index.vector_stores.couchbase import CouchbaseQueryVectorStore
from llama_index.vector_stores.couchbase.base import QueryVectorSearchType
from llama_index.vector_stores.couchbase.base import QueryVectorSearchSimilarity
from llama_index.core.storage.storage_context import StorageContext
from llama_index.core import VectorStoreIndex
from datetime import timedelta
from couchbase.auth import PasswordAuthenticator
from couchbase.cluster import Cluster
from couchbase.options import ClusterOptions
from couchbase.logic.options import KnownConfigProfiles
from couchbase.options import QueryOptions
CONNECTION_STRING = os.getenv("COUCHBASE_CONNECTION_STRING", "")
BUCKET_NAME = os.getenv("COUCHBASE_BUCKET_NAME", "")
SCOPE_NAME = os.getenv("COUCHBASE_SCOPE_NAME", "")
COLLECTION_NAME = os.getenv("COUCHBASE_COLLECTION_NAME", "")
USERNAME = os.getenv("COUCHBASE_USERNAME", "")
PASSWORD = os.getenv("COUCHBASE_PASSWORD", "")
INDEX_NAME = os.getenv("COUCHBASE_INDEX_NAME", "test_vector_index")
SLEEP_DURATION = 5 # Increased for GSI indexing
EMBEDDING_DIMENSION = 1536
def set_all_env_vars() -> bool:
"""Check if all required environment variables are set."""
return all(
[
CONNECTION_STRING,
BUCKET_NAME,
SCOPE_NAME,
COLLECTION_NAME,
USERNAME,
PASSWORD,
]
)
def text_to_embedding(text: str) -> List[float]:
"""Convert text to a unique embedding using ASCII values."""
ascii_values = [float(ord(char)) for char in text]
# Pad or trim the list to make it of length EMBEDDING_DIMENSION
return ascii_values[:EMBEDDING_DIMENSION] + [0.0] * (
EMBEDDING_DIMENSION - len(ascii_values)
)
def get_cluster() -> Any:
"""Get a couchbase cluster object."""
auth = PasswordAuthenticator(USERNAME, PASSWORD)
options = ClusterOptions(authenticator=auth)
options.apply_profile(KnownConfigProfiles.WanDevelopment)
connect_string = CONNECTION_STRING
cluster = Cluster(connect_string, options)
# Wait until the cluster is ready for use.
cluster.wait_until_ready(timedelta(seconds=5))
return cluster
@pytest.fixture()
def cluster() -> Cluster:
"""Get a couchbase cluster object."""
return get_cluster()
def delete_documents(
client: Any, bucket_name: str, scope_name: str, collection_name: str
) -> None:
"""Delete all the documents in the collection."""
query = f"DELETE FROM `{bucket_name}`.`{scope_name}`.`{collection_name}`"
client.query(query).execute()
@pytest.fixture(scope="session")
def node_embeddings() -> list[TextNode]:
"""Return a list of TextNodes with embeddings."""
return [
TextNode(
text="foo",
id_="12c70eed-5779-4008-aba0-596e003f6443",
metadata={
"genre": "Mystery",
"pages": 10,
"rating": 4.5,
},
embedding=text_to_embedding("foo"),
),
TextNode(
text="bar",
id_="f7d81cb3-bb42-47e6-96f5-17db6860cd11",
metadata={
"genre": "Comedy",
"pages": 5,
"rating": 3.2,
},
embedding=text_to_embedding("bar"),
),
TextNode(
text="baz",
id_="469e9537-7bc5-4669-9ff6-baa0ed086236",
metadata={
"genre": "Thriller",
"pages": 20,
"rating": 4.8,
},
embedding=text_to_embedding("baz"),
),
]
def create_scope_and_collection(
cluster: Cluster, bucket_name: str, scope_name: str, collection_name: str
) -> None:
"""Create scope and collection if they don't exist."""
try:
from couchbase.exceptions import (
ScopeAlreadyExistsException,
CollectionAlreadyExistsException,
QueryIndexAlreadyExistsException,
)
bucket = cluster.bucket(bucket_name)
# Create scope if it doesn't exist
try:
bucket.collections().create_scope(scope_name=scope_name)
except ScopeAlreadyExistsException:
pass
# Create collection if it doesn't exist
try:
bucket.collections().create_collection(
collection_name=collection_name, scope_name=scope_name
)
except CollectionAlreadyExistsException:
pass
try:
bucket.scope(scope_name).collection(
collection_name
).query_indexes().create_primary_index()
except QueryIndexAlreadyExistsException:
pass
except Exception as e:
# Log the error but don't fail - collection might already exist
pass
def create_vector_index(
cluster: Any,
bucket_name: str,
scope_name: str,
collection_name: str,
index_name: str,
embedding_key: str = "embedding",
) -> None:
"""Create GSI vector index for the collection."""
try:
from couchbase.options import QueryOptions
bucket = cluster.bucket(bucket_name)
scope = bucket.scope(scope_name)
# Check if index already exists
try:
query = f"SELECT name FROM system:indexes WHERE keyspace_id = '{collection_name}' AND name = '{index_name}'"
result = scope.query(query).execute()
if len(list(result.rows())) > 0:
return # Index already exists
except Exception:
pass
# Index creation options
with_opts = json.dumps(
{
"dimension": EMBEDDING_DIMENSION,
"description": "IVF1024,PQ32x8",
"similarity": "cosine",
}
)
collection = scope.collection(collection_name)
docs = {}
for i in range(2000):
docs[f"large_batch_{i}"] = {
"text": f"document_{i}",
"embedding": text_to_embedding(f"document_{i}"),
"metadata": {
"batch_id": "large",
"doc_num": i,
},
}
result = collection.insert_multi(docs)
if not result.all_ok:
raise Exception(f"Error inserting documents: {result.exceptions}")
# Create vector index
create_index_query = f"""
CREATE INDEX {index_name}
ON `{bucket_name}`.`{scope_name}`.`{collection_name}` ({embedding_key} VECTOR)
USING GSI WITH {with_opts}
"""
result = scope.query(
create_index_query, QueryOptions(timeout=timedelta(seconds=300))
).execute()
time.sleep(15)
# raise Exception("Stop here")
# Wait for index to be ready
except Exception:
raise
def drop_vector_index(
cluster: Any,
bucket_name: str,
scope_name: str,
collection_name: str,
index_name: str,
) -> None:
"""Drop the GSI vector index."""
try:
from couchbase.options import QueryOptions
bucket = cluster.bucket(bucket_name)
scope = bucket.scope(scope_name)
drop_index_query = f"DROP INDEX `{index_name}` on `{bucket_name}`.`{scope_name}`.`{collection_name}`"
scope.query(
drop_index_query, QueryOptions(timeout=timedelta(seconds=60))
).execute()
except Exception as e:
# Index might not exist or already dropped
pass
@pytest.mark.skipif(
not set_all_env_vars(), reason="missing Couchbase environment variables"
)
class TestCouchbaseQueryVectorStore:
@classmethod
def setup_class(cls) -> None:
"""Set up test class with vector index creation."""
cls.cluster = get_cluster()
# Create scope and collection if they don't exist
create_scope_and_collection(
cls.cluster, BUCKET_NAME, SCOPE_NAME, COLLECTION_NAME
)
# Create vector index for testing
create_vector_index(
cls.cluster, BUCKET_NAME, SCOPE_NAME, COLLECTION_NAME, INDEX_NAME
)
@classmethod
def teardown_class(cls) -> None:
"""Clean up after all tests."""
try:
# Drop the vector index
drop_vector_index(
cls.cluster, BUCKET_NAME, SCOPE_NAME, COLLECTION_NAME, INDEX_NAME
)
delete_documents(cls.cluster, BUCKET_NAME, SCOPE_NAME, COLLECTION_NAME)
except Exception:
pass
def setup_method(self) -> None:
"""Set up each test method."""
# Delete all the documents in the collection
delete_documents(self.cluster, BUCKET_NAME, SCOPE_NAME, COLLECTION_NAME)
self.vector_store = CouchbaseQueryVectorStore(
cluster=self.cluster,
bucket_name=BUCKET_NAME,
scope_name=SCOPE_NAME,
collection_name=COLLECTION_NAME,
search_type=QueryVectorSearchType.ANN,
similarity=QueryVectorSearchSimilarity.DOT,
nprobes=50,
)
def test_initialization_default_params(self) -> None:
"""Test initialization with default parameters."""
vector_store = CouchbaseQueryVectorStore(
cluster=self.cluster,
bucket_name=BUCKET_NAME,
scope_name=SCOPE_NAME,
collection_name=COLLECTION_NAME,
search_type=QueryVectorSearchType.ANN,
similarity=QueryVectorSearchSimilarity.COSINE,
nprobes=50,
)
assert vector_store._search_type == QueryVectorSearchType.ANN
assert vector_store._similarity == QueryVectorSearchSimilarity.COSINE
assert vector_store._nprobes == 50
assert vector_store._text_key == "text"
assert vector_store._embedding_key == "embedding"
assert vector_store._metadata_key == "metadata"
def test_initialization_custom_params(self) -> None:
"""Test initialization with custom parameters."""
custom_timeout = timedelta(seconds=120)
vector_store = CouchbaseQueryVectorStore(
cluster=self.cluster,
bucket_name=BUCKET_NAME,
scope_name=SCOPE_NAME,
collection_name=COLLECTION_NAME,
search_type=QueryVectorSearchType.KNN,
similarity="euclidean",
text_key="content",
embedding_key="vector",
metadata_key="meta",
query_options=QueryOptions(timeout=custom_timeout),
)
assert vector_store._search_type == QueryVectorSearchType.KNN
assert vector_store._similarity == QueryVectorSearchSimilarity.EUCLIDEAN
assert vector_store._text_key == "content"
assert vector_store._embedding_key == "vector"
assert vector_store._metadata_key == "meta"
assert vector_store._query_options["timeout"] == custom_timeout
def test_initialization_with_string_search_type(self) -> None:
"""Test initialization with string search type."""
vector_store = CouchbaseQueryVectorStore(
cluster=self.cluster,
bucket_name=BUCKET_NAME,
scope_name=SCOPE_NAME,
collection_name=COLLECTION_NAME,
search_type="KNN",
similarity="EUCLIDEAN",
)
assert vector_store._search_type == QueryVectorSearchType.KNN
assert vector_store._similarity == QueryVectorSearchSimilarity.EUCLIDEAN
assert vector_store._nprobes is None
def test_add_documents(self, node_embeddings: List[TextNode]) -> None:
"""Test adding documents to Couchbase query vector store."""
input_doc_ids = [node_embedding.id_ for node_embedding in node_embeddings]
# Add nodes to the couchbase vector store
doc_ids = self.vector_store.add(node_embeddings)
# Ensure that all nodes are returned & they are the same as input
assert len(doc_ids) == len(node_embeddings)
for doc_id in doc_ids:
assert doc_id in input_doc_ids
def test_ann_search(self, node_embeddings: List[TextNode]) -> None:
"""Test ANN vector search functionality."""
# Add nodes to the couchbase vector store
self.vector_store.add(node_embeddings)
# Wait for the documents to be indexed
time.sleep(SLEEP_DURATION)
# ANN similarity search
q = VectorStoreQuery(
query_embedding=text_to_embedding("foo"), similarity_top_k=1
)
result = self.vector_store.query(q)
assert result.nodes is not None and len(result.nodes) == 1
assert (
result.nodes[0].get_content(metadata_mode=MetadataMode.NONE)
== node_embeddings[0].text
)
assert result.similarities is not None
def test_knn_search(self, node_embeddings: List[TextNode]) -> None:
"""Test KNN vector search functionality."""
# Create a KNN vector store
knn_vector_store = CouchbaseQueryVectorStore(
cluster=self.cluster,
bucket_name=BUCKET_NAME,
scope_name=SCOPE_NAME,
collection_name=COLLECTION_NAME,
search_type=QueryVectorSearchType.KNN,
similarity=QueryVectorSearchSimilarity.L2,
nprobes=50,
)
# Add nodes to the couchbase vector store
knn_vector_store.add(node_embeddings)
# Wait for the documents to be indexed
time.sleep(SLEEP_DURATION)
# KNN similarity search
q = VectorStoreQuery(
query_embedding=text_to_embedding("foo"), similarity_top_k=1
)
result = knn_vector_store.query(q)
assert result.nodes is not None and len(result.nodes) == 1
assert (
result.nodes[0].get_content(metadata_mode=MetadataMode.NONE)
== node_embeddings[0].text
)
assert result.similarities is not None
def test_search_with_filters(self, node_embeddings: List[TextNode]) -> None:
"""Test vector search with metadata filters."""
# Add nodes to the couchbase vector store
self.vector_store.add(node_embeddings)
# Wait for the documents to be indexed
time.sleep(SLEEP_DURATION)
# Test equality filter
q = VectorStoreQuery(
query_embedding=text_to_embedding("baz"),
similarity_top_k=3,
filters=MetadataFilters(
filters=[
MetadataFilter(
key="genre", value="Thriller", operator=FilterOperator.EQ
),
]
),
)
result = self.vector_store.query(q)
assert result.nodes is not None and len(result.nodes) == 1
assert result.nodes[0].metadata.get("genre") == "Thriller"
def test_search_with_numeric_filters(self, node_embeddings: List[TextNode]) -> None:
"""Test vector search with numeric metadata filters."""
# Add nodes to the couchbase vector store
self.vector_store.add(node_embeddings)
# Wait for the documents to be indexed
time.sleep(SLEEP_DURATION)
# Test greater than filter
q = VectorStoreQuery(
query_embedding=text_to_embedding("baz"),
similarity_top_k=3,
filters=MetadataFilters(
filters=[
MetadataFilter(key="pages", value=10, operator=FilterOperator.GT),
]
),
)
result = self.vector_store.query(q)
assert result.nodes is not None and len(result.nodes) == 1
assert result.nodes[0].metadata.get("pages") == 20
# Test less than or equal filter
q = VectorStoreQuery(
query_embedding=text_to_embedding("bar"),
similarity_top_k=3,
filters=MetadataFilters(
filters=[
MetadataFilter(key="pages", value=10, operator=FilterOperator.LTE),
]
),
)
result = self.vector_store.query(q)
assert result.nodes is not None and len(result.nodes) == 2
for node in result.nodes:
assert node.metadata.get("pages") <= 10
def test_search_with_combined_filters(
self, node_embeddings: List[TextNode]
) -> None:
"""Test vector search with multiple combined filters."""
# Add nodes to the couchbase vector store
self.vector_store.add(node_embeddings)
# Wait for the documents to be indexed
time.sleep(SLEEP_DURATION)
# Test combined filters with AND condition
q = VectorStoreQuery(
query_embedding=text_to_embedding("baz"),
similarity_top_k=3,
filters=MetadataFilters(
filters=[
MetadataFilter(
key="genre", value="Thriller", operator=FilterOperator.EQ
),
MetadataFilter(key="rating", value=4.0, operator=FilterOperator.GT),
],
condition="and",
),
)
result = self.vector_store.query(q)
assert result.nodes is not None and len(result.nodes) == 1
assert result.nodes[0].metadata.get("genre") == "Thriller"
assert result.nodes[0].metadata.get("rating") > 4.0
def test_delete_document(self) -> None:
"""Test delete document from Couchbase query vector store."""
storage_context = StorageContext.from_defaults(vector_store=self.vector_store)
# Add a document to the vector store
VectorStoreIndex.from_documents(
[
Document(
text="hello world",
metadata={"name": "John Doe", "age": 30, "city": "New York"},
),
],
storage_context=storage_context,
)
# Wait for the documents to be indexed
time.sleep(SLEEP_DURATION)
# Search for the document
search_embedding = OpenAIEmbedding().get_text_embedding("hello world")
q = VectorStoreQuery(
query_embedding=search_embedding,
similarity_top_k=1,
)
result = self.vector_store.query(q)
assert result.nodes is not None and len(result.nodes) == 1
# Get the document ID to delete
ref_doc_id_to_delete = result.nodes[0].ref_doc_id
# Delete the document
self.vector_store.delete(ref_doc_id=ref_doc_id_to_delete)
# Wait for the deletion to be processed
time.sleep(SLEEP_DURATION)
# Ensure that no results are returned
result = self.vector_store.query(q)
assert len(result.nodes) == 0
def test_empty_query_embedding_error(self) -> None:
"""Test that empty query embedding raises ValueError."""
q = VectorStoreQuery(
query_embedding=None,
similarity_top_k=1,
)
with pytest.raises(ValueError, match="Query embedding must not be empty"):
self.vector_store.query(q)
def test_different_similarity_metrics(
self, node_embeddings: List[TextNode]
) -> None:
"""Test different similarity metrics."""
similarity_metrics = [
QueryVectorSearchSimilarity.COSINE,
QueryVectorSearchSimilarity.EUCLIDEAN,
QueryVectorSearchSimilarity.DOT,
]
for metric in similarity_metrics:
# Create vector store with specific similarity metric
vector_store = CouchbaseQueryVectorStore(
cluster=self.cluster,
bucket_name=BUCKET_NAME,
scope_name=SCOPE_NAME,
collection_name=COLLECTION_NAME,
similarity=metric,
search_type=QueryVectorSearchType.ANN,
nprobes=50,
)
# Add nodes to the vector store
vector_store.add(node_embeddings)
# Wait for indexing
time.sleep(SLEEP_DURATION)
# Test search
q = VectorStoreQuery(
query_embedding=text_to_embedding("foo"),
similarity_top_k=1,
)
result = vector_store.query(q)
assert result.nodes is not None and len(result.nodes) == 1
assert result.similarities is not None
def test_custom_field_names(self) -> None:
"""Test vector store with custom field names."""
custom_vector_store = CouchbaseQueryVectorStore(
cluster=self.cluster,
bucket_name=BUCKET_NAME,
scope_name=SCOPE_NAME,
collection_name=COLLECTION_NAME,
search_type=QueryVectorSearchType.ANN,
similarity=QueryVectorSearchSimilarity.COSINE,
nprobes=50,
text_key="content",
embedding_key="vector",
metadata_key="meta",
)
# Create a test node with custom field mapping
test_node = TextNode(
text="custom field test",
id_="custom-test-id",
metadata={"category": "test"},
embedding=text_to_embedding("custom field test"),
)
# Add the node
doc_ids = custom_vector_store.add([test_node])
assert len(doc_ids) == 1
# Wait for indexing
time.sleep(SLEEP_DURATION)
# Search for the document
q = VectorStoreQuery(
query_embedding=text_to_embedding("custom field test"),
similarity_top_k=1,
)
result = custom_vector_store.query(q)
assert result.nodes is not None and len(result.nodes) == 1
assert (
result.nodes[0].get_content(metadata_mode=MetadataMode.NONE)
== "custom field test"
)
def test_batch_insert(self, node_embeddings: List[TextNode]) -> None:
"""Test batch insert with custom batch size."""
# Test with small batch size
doc_ids = self.vector_store.add(node_embeddings, batch_size=2)
assert len(doc_ids) == len(node_embeddings)
# Wait for indexing
time.sleep(SLEEP_DURATION)
# Verify all documents are searchable
q = VectorStoreQuery(
query_embedding=text_to_embedding("foo"),
similarity_top_k=3,
)
result = self.vector_store.query(q)
assert result.nodes is not None and len(result.nodes) == 3
def test_vector_index_utilization(self, node_embeddings: List[TextNode]) -> None:
"""Test that vector search actually utilizes the GSI vector index."""
# Add nodes to the vector store
self.vector_store.add(node_embeddings)
# Wait for indexing
time.sleep(SLEEP_DURATION)
# Test that we can perform vector search (this implicitly tests index usage)
q = VectorStoreQuery(
query_embedding=text_to_embedding("foo"),
similarity_top_k=2,
)
result = self.vector_store.query(q)
assert result.nodes is not None and len(result.nodes) == 2
assert result.similarities is not None
assert len(result.similarities) == 2
def test_vector_search_relevance(self, node_embeddings: List[TextNode]) -> None:
"""Test that vector search returns relevant results."""
# Add nodes to the vector store
self.vector_store.add(node_embeddings)
# Wait for GSI indexing
time.sleep(SLEEP_DURATION)
# Search for "foo" - should return "foo" document with best score
q = VectorStoreQuery(
query_embedding=text_to_embedding("foo"),
similarity_top_k=3,
)
result = self.vector_store.query(q)
assert result.nodes is not None and len(result.nodes) == 3
# The first result should be the most similar (lowest distance for dot product)
assert result.nodes[0].get_content(metadata_mode=MetadataMode.NONE) == "foo"
# Verify scores are ordered (ascending for distance-based similarity)
scores = result.similarities
print(f"scores: {scores}")
assert scores[0] <= scores[1]
assert scores[1] <= scores[2]
def test_large_batch_processing(self) -> None:
"""Test handling of larger document batches."""
# Create a larger batch of documents
large_batch = []
for i in range(2000):
node = TextNode(
text=f"document_{i}",
id_=f"large_batch_{i}",
metadata={"batch_id": "large", "doc_num": i},
embedding=text_to_embedding(f"document_{i}"),
)
large_batch.append(node)
# Add the large batch
doc_ids = self.vector_store.add(large_batch, batch_size=10)
assert len(doc_ids) == len(large_batch)
# Wait for indexing
time.sleep(SLEEP_DURATION * 2) # Extra time for larger batch
# Test search works with larger dataset
q = VectorStoreQuery(
query_embedding=text_to_embedding("document_25"),
similarity_top_k=5,
)
result = self.vector_store.query(q)
assert result.nodes is not None and len(result.nodes) == 5
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-couchbase/tests/test_couchbase_query_vector_store.py",
"license": "MIT License",
"lines": 624,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-core/tests/llms/test_predict_and_call.py | from llama_index.core.llms.mock import MockLLM
from llama_index.core.base.llms.types import ChatMessage, MessageRole, ChatResponse
from llama_index.core.program.function_program import FunctionTool
from llama_index.core.agent.react.formatter import ReActChatFormatter
from llama_index.core.agent.react.output_parser import ReActOutputParser
def tool(*args, **kwargs) -> str:
return "hello!!"
class _ReActDrivingLLM(MockLLM):
async def achat(
self, messages: list[ChatMessage], **kwargs: object
) -> ChatResponse:
return ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content="Thought: do it\nAction: tool\nAction Input: {}\n",
),
raw={"content": "react"},
)
def test_predict_and_call_via_react_agent() -> None:
"""Ensure tool is called via ReAct-style action parsing."""
llm = _ReActDrivingLLM()
response = llm.predict_and_call(
tools=[FunctionTool.from_defaults(fn=tool)],
react_chat_formatter=ReActChatFormatter.from_defaults(),
output_parser=ReActOutputParser(),
user_msg=ChatMessage(role=MessageRole.USER, content="run tool"),
chat_history=[],
)
assert response.response == "hello!!"
assert len(response.sources) == 1
assert response.sources[0].content == "hello!!"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/llms/test_predict_and_call.py",
"license": "MIT License",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-serpex/examples/serpex_example.py | """Example usage of SERPEX tool with LlamaIndex."""
import os
from llama_index.tools.serpex import SerpexToolSpec
# Set your API key (or use environment variable SERPEX_API_KEY)
os.environ["SERPEX_API_KEY"] = "your_api_key_here"
def basic_search_example():
"""Basic search example."""
print("=" * 60)
print("Basic Search Example")
print("=" * 60)
# Initialize tool
tool = SerpexToolSpec()
# Perform search
results = tool.search("latest developments in artificial intelligence", num_results=5)
for doc in results:
print(doc.text)
print()
def location_search_example():
"""Location-based search example."""
print("=" * 60)
print("Location-Based Search Example")
print("=" * 60)
# Initialize tool
tool = SerpexToolSpec()
# Search with location
results = tool.search_with_location(
query="best Italian restaurants", location="San Francisco, CA", num_results=5
)
for doc in results:
print(doc.text)
print()
def international_search_example():
"""International search with country and language."""
print("=" * 60)
print("International Search Example")
print("=" * 60)
# Initialize tool
tool = SerpexToolSpec()
# Search with country and language
results = tool.search(
query="noticias de tecnología",
num_results=5,
gl="es", # Spain
hl="es", # Spanish
)
for doc in results:
print(doc.text)
print()
def agent_example():
"""Example with LlamaIndex agent."""
print("=" * 60)
print("Agent Example")
print("=" * 60)
try:
from llama_index.agent.openai import OpenAIAgent
from llama_index.llms.openai import OpenAI
# Initialize SERPEX tool
serpex_tool = SerpexToolSpec()
# Create agent
llm = OpenAI(model="gpt-4")
agent = OpenAIAgent.from_tools(serpex_tool.to_tool_list(), llm=llm, verbose=True)
# Ask question that requires web search
response = agent.chat(
"What are the latest features announced for LlamaIndex? Search the web for recent news."
)
print(response)
except ImportError:
print("OpenAI dependencies not installed. Install with:")
print("pip install llama-index-agent-openai llama-index-llms-openai")
print()
def main():
"""Run all examples."""
# Make sure API key is set
if not os.environ.get("SERPEX_API_KEY"):
print("Please set SERPEX_API_KEY environment variable")
print("Get your API key at: https://serpex.dev/dashboard")
return
# Run examples
basic_search_example()
location_search_example()
international_search_example()
agent_example()
if __name__ == "__main__":
main()
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-serpex/examples/serpex_example.py",
"license": "MIT License",
"lines": 84,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-serpex/llama_index/tools/serpex/base.py | """SERPEX search tool specification."""
import os
from typing import Any, Dict, List, Optional
import requests
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class SerpexToolSpec(BaseToolSpec):
"""
SERPEX tool spec for web search.
This tool allows you to search the web using the SERPEX API and get
real-time search results from multiple search engines including Google,
Bing, DuckDuckGo, Brave, Yahoo, and Yandex.
SERPEX provides fast, reliable search results via API, perfect for
AI applications, RAG systems, and data analytics.
Args:
api_key (Optional[str]): SERPEX API key. If not provided, will look
for SERPEX_API_KEY environment variable.
engine (str): Default search engine to use. Options: 'auto' (default),
'google', 'bing', 'duckduckgo', 'brave', 'yahoo', 'yandex'.
Examples:
>>> from llama_index.tools.serpex import SerpexToolSpec
>>> tool = SerpexToolSpec(api_key="your_api_key")
>>> results = tool.search("latest AI news")
>>> for doc in results:
... print(doc.text)
"""
spec_functions = ["search"]
def __init__(
self,
api_key: Optional[str] = None,
engine: str = "auto",
) -> None:
"""
Initialize SERPEX tool.
Args:
api_key: SERPEX API key. If not provided, reads from
SERPEX_API_KEY environment variable.
engine: Default search engine ('auto', 'google', 'bing', etc.).
Raises:
ValueError: If API key is not provided and not found in environment.
"""
self.api_key = api_key or os.environ.get("SERPEX_API_KEY")
if not self.api_key:
raise ValueError(
"SERPEX_API_KEY not found. Please set it as an environment "
"variable or pass it as an argument. "
"Get your API key at: https://serpex.dev/dashboard"
)
self.base_url = "https://api.serpex.dev/api/search"
self.engine = engine
def search(
self,
query: str,
num_results: int = 10,
engine: Optional[str] = None,
time_range: Optional[str] = None,
) -> List[Document]:
"""
Search the web using SERPEX API.
This function queries the specified search engine and returns structured
results containing titles, URLs, and snippets.
Args:
query: Search query string.
num_results: Number of results to return (default: 10, max: 100).
engine: Override default search engine. Options: 'auto', 'google',
'bing', 'duckduckgo', 'brave', 'yahoo', 'yandex'.
time_range: Filter results by time. Options: 'day', 'week',
'month', 'year'.
Returns:
List of Document objects, one per search result.
Each document contains the title, URL, and snippet in its text,
with metadata including search details.
Examples:
>>> tool = SerpexToolSpec(api_key="your_key")
>>> results = tool.search("LlamaIndex tutorial", num_results=5)
>>> for doc in results:
... print(f"Title: {doc.metadata['title']}")
... print(f"URL: {doc.metadata['url']}")
... print(doc.text)
>>> # Search with specific engine
>>> results = tool.search(
... "privacy focused browser",
... engine="duckduckgo",
... num_results=5
... )
>>> # Search with time filter
>>> results = tool.search(
... "AI news",
... time_range="day",
... num_results=10
... )
"""
params: Dict[str, Any] = {
"q": query,
"engine": engine or self.engine,
"category": "web",
}
if num_results:
params["num"] = min(num_results, 100) # Cap at 100
if time_range:
params["time_range"] = time_range
try:
response = requests.get(
self.base_url,
params=params,
headers={
"Authorization": f"Bearer {self.api_key}",
},
timeout=30,
)
response.raise_for_status()
data = response.json()
# Extract results from the response
results_list = data.get("results", [])
if not results_list:
return []
# Get metadata
api_metadata = data.get("metadata", {})
num_results_total = api_metadata.get("number_of_results", 0)
response_time = api_metadata.get("response_time", 0)
# Create documents for each result
documents = []
for result in results_list[:num_results]:
title = result.get("title", "No title")
url = result.get("url", "")
snippet = result.get("snippet", "No description available")
text = f"{title}\nURL: {url}\n{snippet}"
metadata = {
"title": title,
"url": url,
"snippet": snippet,
"number_of_results": num_results_total,
"response_time": response_time,
"query": query,
"engine": engine or self.engine,
}
documents.append(Document(text=text, metadata=metadata))
return documents
except requests.exceptions.RequestException as e:
raise e
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-serpex/llama_index/tools/serpex/base.py",
"license": "MIT License",
"lines": 140,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-serpex/test_local.py | #!/usr/bin/env python3
"""Local test script for SERPEX tool integration."""
import os
from llama_index.tools.serpex import SerpexToolSpec
# Test 1: Check initialization
print("Test 1: Initializing SERPEX tool...")
try:
# You need to set your SERPEX_API_KEY environment variable
api_key = os.environ.get("SERPEX_API_KEY")
if not api_key:
print("⚠️ Warning: SERPEX_API_KEY not set. Please set it:")
print(' export SERPEX_API_KEY="your_api_key"')
print("\nTrying with dummy key for structure test...")
tool = SerpexToolSpec(api_key="dummy_key_for_testing")
print("✅ Tool initialization works!")
else:
tool = SerpexToolSpec(api_key=api_key)
print("✅ Tool initialized with real API key!")
# Test 2: Basic search
print("\nTest 2: Testing basic search...")
results = tool.search("LlamaIndex tutorial", num_results=3)
print(f"✅ Search returned {len(results)} results (as Document objects):")
for i, result in enumerate(results, 1):
print(f"\nResult {i}:")
print(result.text[:500]) # Print first 500 chars
# Test 3: Check tool list conversion
print("\n" + "=" * 60)
print("Test 4: Testing tool list conversion...")
tool_list = tool.to_tool_list()
print(f"✅ Tool list has {len(tool_list)} tools:")
for t in tool_list:
print(f" - {t.metadata.name}: {t.metadata.description[:60]}...")
print("\n" + "=" * 60)
print("🎉 All tests passed! SERPEX integration is working!")
except Exception as e:
print(f"❌ Test failed: {e}")
import traceback
traceback.print_exc()
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-serpex/test_local.py",
"license": "MIT License",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-serpex/tests/test_serpex.py | """Tests for SERPEX tool."""
import os
from unittest.mock import MagicMock, patch
import pytest
import requests
from llama_index.tools.serpex import SerpexToolSpec
def test_serpex_init_with_key():
"""Test initialization with API key."""
tool = SerpexToolSpec(api_key="test-key")
assert tool.api_key == "test-key"
assert tool.engine == "auto"
def test_serpex_init_with_custom_engine():
"""Test initialization with custom engine."""
tool = SerpexToolSpec(api_key="test-key", engine="google")
assert tool.api_key == "test-key"
assert tool.engine == "google"
def test_serpex_init_without_key():
"""Test initialization without API key raises error."""
with patch.dict(os.environ, {}, clear=True):
with pytest.raises(ValueError, match="SERPEX_API_KEY not found"):
SerpexToolSpec()
@patch("requests.get")
def test_search(mock_get):
"""Test search functionality."""
mock_response = MagicMock()
mock_response.json.return_value = {
"results": [
{
"title": "Test Result",
"url": "https://example.com",
"snippet": "Test snippet",
}
],
"metadata": {
"number_of_results": 1,
"response_time": 100,
},
}
mock_response.raise_for_status = MagicMock()
mock_get.return_value = mock_response
tool = SerpexToolSpec(api_key="test-key")
results = tool.search("test query")
assert len(results) == 1
assert "Test Result" in results[0].text
assert "Test snippet" in results[0].text
assert results[0].metadata["title"] == "Test Result"
assert results[0].metadata["url"] == "https://example.com"
assert results[0].metadata["snippet"] == "Test snippet"
assert results[0].metadata["number_of_results"] == 1
assert results[0].metadata["response_time"] == 100
assert results[0].metadata["query"] == "test query"
@patch("requests.get")
def test_search_with_engine(mock_get):
"""Test search with specific engine."""
mock_response = MagicMock()
mock_response.json.return_value = {
"results": [
{
"title": "DuckDuckGo Result",
"url": "https://example.com",
"snippet": "Privacy focused result",
}
],
"metadata": {
"number_of_results": 1,
"response_time": 150,
},
}
mock_response.raise_for_status = MagicMock()
mock_get.return_value = mock_response
tool = SerpexToolSpec(api_key="test-key")
results = tool.search("test query", engine="duckduckgo")
assert len(results) == 1
assert "DuckDuckGo Result" in results[0].text
@patch("requests.get")
def test_search_with_time_range(mock_get):
"""Test search with time range filter."""
mock_response = MagicMock()
mock_response.json.return_value = {
"results": [
{
"title": "Recent Result",
"url": "https://example.com",
"snippet": "Recent news",
}
],
"metadata": {
"number_of_results": 1,
"response_time": 120,
},
}
mock_response.raise_for_status = MagicMock()
mock_get.return_value = mock_response
tool = SerpexToolSpec(api_key="test-key")
results = tool.search("news", time_range="day", num_results=5)
assert len(results) == 1
assert "Recent Result" in results[0].text
@patch("requests.get")
def test_search_no_results(mock_get):
"""Test search with no results."""
mock_response = MagicMock()
mock_response.json.return_value = {
"results": [],
"metadata": {
"number_of_results": 0,
"response_time": 50,
},
}
mock_response.raise_for_status = MagicMock()
mock_get.return_value = mock_response
tool = SerpexToolSpec(api_key="test-key")
results = tool.search("nonexistent query")
assert len(results) == 0
@patch("requests.get")
def test_search_api_error(mock_get):
"""Test search raises API errors."""
mock_get.side_effect = requests.exceptions.RequestException("API Error")
tool = SerpexToolSpec(api_key="test-key")
with pytest.raises(requests.exceptions.RequestException):
tool.search("test query")
# Integration test (requires real API key)
@pytest.mark.skipif(not os.environ.get("SERPEX_API_KEY"), reason="Requires real SERPEX API key")
def test_real_search():
"""Test real search with actual API."""
api_key = os.environ.get("SERPEX_API_KEY")
tool = SerpexToolSpec(api_key=api_key)
results = tool.search("LlamaIndex", num_results=5)
assert len(results) > 0
assert "LlamaIndex" in results[0].text or "llama" in results[0].text.lower()
@pytest.mark.skipif(not os.environ.get("SERPEX_API_KEY"), reason="Requires real SERPEX API key")
def test_real_search_with_filters():
"""Test real search with filters."""
api_key = os.environ.get("SERPEX_API_KEY")
tool = SerpexToolSpec(api_key=api_key, engine="duckduckgo")
results = tool.search("AI news", num_results=3, time_range="week")
assert len(results) > 0
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-serpex/tests/test_serpex.py",
"license": "MIT License",
"lines": 136,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-github/examples/github_app_example.py | """
Example: Using GitHub App Authentication with LlamaIndex GitHub Reader
This example demonstrates how to use GitHub App authentication instead of a Personal Access Token (PAT).
GitHub App authentication is more secure and provides better rate limits for enterprise use cases.
Prerequisites:
1. Create a GitHub App (see README.md for detailed setup guide)
2. Install the GitHub App in your organization/account
3. Download the private key (.pem file)
4. Note your App ID and Installation ID
Installation:
pip install llama-index-readers-github[github-app]
"""
import os
from pathlib import Path
from llama_index.readers.github import GithubRepositoryReader, GitHubAppAuth
def read_private_key_from_file(key_path: str) -> str:
"""Read private key from a file."""
with open(key_path, "r") as f:
return f.read()
def example_with_github_app():
"""Example: Load repository using GitHub App authentication."""
# Step 1: Set up GitHub App credentials
# You can get these values from your GitHub App settings
app_id = os.getenv("GITHUB_APP_ID", "123456")
installation_id = os.getenv("GITHUB_INSTALLATION_ID", "789012")
# Load private key from file or environment variable
private_key_path = os.getenv("GITHUB_PRIVATE_KEY_PATH", "path/to/your-app.private-key.pem")
if Path(private_key_path).exists():
private_key = read_private_key_from_file(private_key_path)
else:
# Alternatively, you can store the key in an environment variable
private_key = os.getenv("GITHUB_PRIVATE_KEY", "")
if not private_key:
print("Error: GitHub App private key not found!")
print("Please set GITHUB_PRIVATE_KEY_PATH or GITHUB_PRIVATE_KEY environment variable")
return
# Step 2: Create GitHubAppAuth instance
github_app_auth = GitHubAppAuth(
app_id=app_id,
private_key=private_key,
installation_id=installation_id,
)
# Step 3: Create reader with GitHub App authentication
reader = GithubRepositoryReader(
owner="facebook",
repo="react",
github_app_auth=github_app_auth, # Use GitHub App auth instead of github_token
verbose=True,
)
# Step 4: Load documents
# The reader will automatically fetch and refresh installation tokens as needed
print("Loading repository...")
documents = reader.load_data(branch="main")
print(f"Loaded {len(documents)} documents from the repository")
# Example: Print first document
if documents:
print(f"\nFirst document preview:")
print(f"File: {documents[0].metadata.get('file_path', 'N/A')}")
print(f"Content length: {len(documents[0].text)} characters")
def example_with_filtering():
"""Example: Load specific files with GitHub App authentication."""
# Set up credentials
app_id = os.getenv("GITHUB_APP_ID")
installation_id = os.getenv("GITHUB_INSTALLATION_ID")
private_key = os.getenv("GITHUB_PRIVATE_KEY")
if not all([app_id, installation_id, private_key]):
print("Missing GitHub App credentials!")
return
# Create auth and reader
github_app_auth = GitHubAppAuth(
app_id=app_id,
private_key=private_key,
installation_id=installation_id,
)
reader = GithubRepositoryReader(
owner="python",
repo="cpython",
github_app_auth=github_app_auth,
verbose=True,
# Filter to only include Python files in the Lib directory
filter_file_extensions=(
[".py"],
GithubRepositoryReader.FilterType.INCLUDE,
),
)
print("Loading Python files from Lib directory...")
documents = reader.load_data(branch="main")
print(f"Loaded {len(documents)} Python files")
def example_token_management():
"""Example: Manual token management with GitHub App."""
app_id = os.getenv("GITHUB_APP_ID")
installation_id = os.getenv("GITHUB_INSTALLATION_ID")
private_key = os.getenv("GITHUB_PRIVATE_KEY")
if not all([app_id, installation_id, private_key]):
print("Missing GitHub App credentials!")
return
github_app_auth = GitHubAppAuth(
app_id=app_id,
private_key=private_key,
installation_id=installation_id,
)
# The token is automatically fetched and cached
print("First token fetch (will make API call)...")
token1 = github_app_auth.get_installation_token()
print(f"Token expires at: {github_app_auth._token_expires_at}")
# Subsequent calls use cached token
print("\nSecond fetch (uses cache)...")
token2 = github_app_auth.get_installation_token()
assert token1 == token2, "Token should be cached"
print("Token was retrieved from cache")
# Force refresh the token
print("\nForce refresh...")
token3 = github_app_auth.get_installation_token(force_refresh=True)
print(f"New token fetched, expires at: {github_app_auth._token_expires_at}")
# Manual invalidation (useful if you know the token was revoked)
print("\nInvalidating token manually...")
github_app_auth.invalidate_token()
print("Token cache cleared")
# Next call will fetch a fresh token
token4 = github_app_auth.get_installation_token()
print("Fresh token fetched after invalidation")
if __name__ == "__main__":
print("=" * 60)
print("GitHub App Authentication Examples")
print("=" * 60)
# Choose which example to run
example = os.getenv("EXAMPLE", "basic")
if example == "basic":
print("\nRunning basic example...")
example_with_github_app()
elif example == "filtering":
print("\nRunning filtering example...")
example_with_filtering()
elif example == "token_mgmt":
print("\nRunning token management example...")
example_token_management()
else:
print(f"\nUnknown example: {example}")
print("Valid options: basic, filtering, token_mgmt")
print("Set EXAMPLE environment variable to choose")
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-github/examples/github_app_example.py",
"license": "MIT License",
"lines": 140,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-github/llama_index/readers/github/github_app_auth.py | """
GitHub App authentication module.
This module provides GitHub App authentication support for the GitHub readers.
It handles JWT generation and installation access token management with automatic
token refresh and caching.
"""
import time
from typing import Optional
try:
import jwt
except ImportError:
jwt = None # type: ignore
class GitHubAppAuthenticationError(Exception):
"""Raised when GitHub App authentication fails."""
class GitHubAppAuth:
"""
GitHub App authentication handler.
This class manages authentication for GitHub Apps by generating JWTs and
obtaining/caching installation access tokens. Tokens are automatically
refreshed when they expire.
Attributes:
app_id (str): The GitHub App ID.
private_key (str): The private key for the GitHub App (PEM format).
installation_id (str): The installation ID for the GitHub App.
Examples:
>>> # Read private key from file
>>> with open("private-key.pem", "r") as f:
... private_key = f.read()
>>>
>>> # Create auth handler
>>> auth = GitHubAppAuth(
... app_id="123456",
... private_key=private_key,
... installation_id="789012"
... )
>>>
>>> # Get installation token (cached and auto-refreshed)
>>> import asyncio
>>> token = asyncio.run(auth.get_installation_token())
"""
# Token expiry buffer in seconds (refresh 5 minutes before expiry)
TOKEN_EXPIRY_BUFFER = 300
# JWT expiry time in seconds (10 minutes, max allowed by GitHub)
JWT_EXPIRY_SECONDS = 600
# Installation token expiry time in seconds (1 hour, GitHub default)
INSTALLATION_TOKEN_EXPIRY_SECONDS = 3600
def __init__(
self,
app_id: str,
private_key: str,
installation_id: str,
base_url: str = "https://api.github.com",
) -> None:
"""
Initialize GitHubAppAuth.
Args:
app_id: The GitHub App ID.
private_key: The private key for the GitHub App in PEM format.
installation_id: The installation ID for the GitHub App.
base_url: Base URL for GitHub API (default: "https://api.github.com").
Raises:
ImportError: If PyJWT is not installed.
GitHubAppAuthenticationError: If initialization fails.
"""
if jwt is None:
raise ImportError(
"PyJWT is required for GitHub App authentication. "
"Install it with: pip install 'PyJWT[crypto]>=2.8.0'"
)
if not app_id:
raise GitHubAppAuthenticationError("app_id is required")
if not private_key:
raise GitHubAppAuthenticationError("private_key is required")
if not installation_id:
raise GitHubAppAuthenticationError("installation_id is required")
self.app_id = app_id
self.private_key = private_key
self.installation_id = installation_id
self.base_url = base_url.rstrip("/")
# Token cache
self._token_cache: Optional[str] = None
self._token_expires_at: float = 0
def _generate_jwt(self) -> str:
"""
Generate JWT for GitHub App authentication.
The JWT is used to authenticate as the GitHub App itself, before
obtaining an installation access token.
Returns:
The generated JWT token.
Raises:
GitHubAppAuthenticationError: If JWT generation fails.
"""
try:
now = int(time.time())
payload = {
"iat": now - 60, # Issued at (with 60s buffer for clock skew)
"exp": now + self.JWT_EXPIRY_SECONDS, # Expires in 10 minutes
"iss": self.app_id, # Issuer is the app ID
}
return jwt.encode(payload, self.private_key, algorithm="RS256")
except Exception as e:
raise GitHubAppAuthenticationError(f"Failed to generate JWT: {e!s}") from e
async def get_installation_token(self, force_refresh: bool = False) -> str:
"""
Get or refresh installation access token.
This method returns a cached token if it's still valid, or requests
a new token from GitHub if the cached token is expired or about to expire.
Args:
force_refresh: If True, forces a token refresh even if cached token
is still valid.
Returns:
A valid installation access token.
Raises:
GitHubAppAuthenticationError: If token retrieval fails.
ImportError: If httpx is not installed.
"""
# Check if cached token is still valid (with buffer)
if not force_refresh and self._is_token_valid():
return self._token_cache # type: ignore
# Generate new token
try:
import httpx
except ImportError:
raise ImportError(
"httpx is required for GitHub App authentication. "
"Install it with: pip install httpx>=0.26.0"
)
jwt_token = self._generate_jwt()
headers = {
"Accept": "application/vnd.github+json",
"Authorization": f"Bearer {jwt_token}",
"X-GitHub-Api-Version": "2022-11-28",
}
url = f"{self.base_url}/app/installations/{self.installation_id}/access_tokens"
try:
async with httpx.AsyncClient() as client:
response = await client.post(url, headers=headers, timeout=10.0)
response.raise_for_status()
data = response.json()
self._token_cache = data["token"]
# Token typically expires in 1 hour
self._token_expires_at = (
time.time() + self.INSTALLATION_TOKEN_EXPIRY_SECONDS
)
return self._token_cache
except httpx.HTTPStatusError as e:
raise GitHubAppAuthenticationError(
f"Failed to get installation token: {e.response.status_code} - {e.response.text}"
) from e
except Exception as e:
raise GitHubAppAuthenticationError(
f"Failed to get installation token: {e!s}"
) from e
def _is_token_valid(self) -> bool:
"""
Check if the cached token is still valid.
Returns:
True if token exists and is not expired (accounting for buffer).
"""
if not self._token_cache:
return False
# Check if token will expire within the buffer period
time_until_expiry = self._token_expires_at - time.time()
return time_until_expiry > self.TOKEN_EXPIRY_BUFFER
def invalidate_token(self) -> None:
"""
Invalidate the cached token.
This forces the next call to get_installation_token() to fetch a new token.
Useful if you know the token has been revoked or is no longer valid.
"""
self._token_cache = None
self._token_expires_at = 0
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-github/llama_index/readers/github/github_app_auth.py",
"license": "MIT License",
"lines": 172,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-github/tests/test_github_app_auth.py | """Tests for GitHub App authentication."""
import time
import os
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
# Test if PyJWT is available
try:
import jwt
HAS_JWT = True
except ImportError:
HAS_JWT = False
jwt = None
try:
from llama_index.readers.github.github_app_auth import (
GitHubAppAuth,
GitHubAppAuthenticationError,
)
from llama_index.readers.github.repository.github_client import GithubClient
from llama_index.readers.github.issues.github_client import GitHubIssuesClient
from llama_index.readers.github.collaborators.github_client import (
GitHubCollaboratorsClient,
)
from llama_index.readers.github import GithubRepositoryReader
HAS_GITHUB_APP_AUTH = True
except ImportError:
HAS_GITHUB_APP_AUTH = False
# Sample RSA private key for testing (this is a test key, not a real private key)
# pragma: allowlist secret
TEST_PRIVATE_KEY = os.getenv("TEST_PRIVATE_KEY", "not-a-private-key")
@pytest.mark.skipif(not HAS_JWT, reason="PyJWT not installed")
@pytest.mark.skipif(
not HAS_GITHUB_APP_AUTH, reason="GitHub App auth module not available"
)
class TestGitHubAppAuth:
"""Test GitHub App authentication class."""
def test_init_requires_app_id(self):
"""Test that app_id is required."""
with pytest.raises(GitHubAppAuthenticationError, match="app_id is required"):
GitHubAppAuth(
app_id="", private_key=TEST_PRIVATE_KEY, installation_id="123"
)
def test_init_requires_private_key(self):
"""Test that private_key is required."""
with pytest.raises(
GitHubAppAuthenticationError, match="private_key is required"
):
GitHubAppAuth(app_id="123", private_key="", installation_id="456")
def test_init_requires_installation_id(self):
"""Test that installation_id is required."""
with pytest.raises(
GitHubAppAuthenticationError, match="installation_id is required"
):
GitHubAppAuth(
app_id="123", private_key=TEST_PRIVATE_KEY, installation_id=""
)
def test_init_success(self):
"""Test successful initialization."""
auth = GitHubAppAuth(
app_id="123456", private_key=TEST_PRIVATE_KEY, installation_id="789012"
)
assert auth.app_id == "123456"
assert auth.private_key == TEST_PRIVATE_KEY
assert auth.installation_id == "789012"
assert auth.base_url == "https://api.github.com"
assert auth._token_cache is None
assert auth._token_expires_at == 0
def test_init_custom_base_url(self):
"""Test initialization with custom base URL."""
auth = GitHubAppAuth(
app_id="123",
private_key=TEST_PRIVATE_KEY,
installation_id="456",
base_url="https://github.enterprise.com/api/v3",
)
assert auth.base_url == "https://github.enterprise.com/api/v3"
@pytest.mark.skipif(
condition=TEST_PRIVATE_KEY == "not-a-private-key",
reason="An SSH private key is not available",
)
def test_generate_jwt(self):
"""Test JWT generation."""
auth = GitHubAppAuth(
app_id="123456", private_key=TEST_PRIVATE_KEY, installation_id="789012"
)
token = auth._generate_jwt()
# Decode the JWT to verify its contents
decoded = jwt.decode(token, options={"verify_signature": False})
assert decoded["iss"] == "123456"
assert "iat" in decoded
assert "exp" in decoded
# Check that expiry is approximately 10 minutes from issue time (allow 60s buffer for iat)
time_diff = decoded["exp"] - decoded["iat"]
assert 600 <= time_diff <= 660, (
f"Expected JWT lifespan around 600-660s, got {time_diff}s"
)
@pytest.mark.asyncio
@pytest.mark.skipif(
condition=TEST_PRIVATE_KEY == "not-a-private-key",
reason="An SSH private key is not available",
)
async def test_get_installation_token_success(self):
"""Test successful installation token retrieval."""
auth = GitHubAppAuth(
app_id="123456", private_key=TEST_PRIVATE_KEY, installation_id="789012"
)
mock_response = MagicMock()
mock_response.json.return_value = {"token": "ghs_test_token_123"}
mock_response.raise_for_status = MagicMock()
with patch("httpx.AsyncClient") as mock_client_class:
mock_client = AsyncMock()
mock_client.__aenter__.return_value = mock_client
mock_client.__aexit__.return_value = None
mock_client.post = AsyncMock(return_value=mock_response)
mock_client_class.return_value = mock_client
token = await auth.get_installation_token()
assert token == "ghs_test_token_123"
assert auth._token_cache == "ghs_test_token_123"
assert auth._token_expires_at > time.time()
# Verify the API call was made correctly
mock_client.post.assert_called_once()
call_args = mock_client.post.call_args
assert (
call_args[0][0]
== "https://api.github.com/app/installations/789012/access_tokens"
)
@pytest.mark.asyncio
async def test_get_installation_token_uses_cache(self):
"""Test that cached token is returned when valid."""
auth = GitHubAppAuth(
app_id="123456", private_key=TEST_PRIVATE_KEY, installation_id="789012"
)
# Set up a cached token that won't expire soon
auth._token_cache = "cached_token"
auth._token_expires_at = time.time() + 1000 # Expires in ~16 minutes
# Should return cached token without making API call
token = await auth.get_installation_token()
assert token == "cached_token"
@pytest.mark.asyncio
@pytest.mark.skipif(
condition=TEST_PRIVATE_KEY == "not-a-private-key",
reason="An SSH private key is not available",
)
async def test_get_installation_token_refreshes_expired(self):
"""Test that expired token is refreshed."""
auth = GitHubAppAuth(
app_id="123456", private_key=TEST_PRIVATE_KEY, installation_id="789012"
)
# Set up an expired cached token
auth._token_cache = "expired_token"
auth._token_expires_at = time.time() - 100 # Expired 100 seconds ago
mock_response = MagicMock()
mock_response.json.return_value = {"token": "ghs_new_token_456"}
mock_response.raise_for_status = MagicMock()
with patch("httpx.AsyncClient") as mock_client_class:
mock_client = AsyncMock()
mock_client.__aenter__.return_value = mock_client
mock_client.__aexit__.return_value = None
mock_client.post = AsyncMock(return_value=mock_response)
mock_client_class.return_value = mock_client
token = await auth.get_installation_token()
assert token == "ghs_new_token_456"
assert auth._token_cache == "ghs_new_token_456"
@pytest.mark.asyncio
@pytest.mark.skipif(
condition=TEST_PRIVATE_KEY == "not-a-private-key",
reason="An SSH private key is not available",
)
async def test_get_installation_token_refreshes_when_near_expiry(self):
"""Test that token is refreshed when near expiry (within buffer)."""
auth = GitHubAppAuth(
app_id="123456", private_key=TEST_PRIVATE_KEY, installation_id="789012"
)
# Set up a token that expires within the buffer period (5 minutes)
auth._token_cache = "expiring_soon_token"
auth._token_expires_at = time.time() + 200 # Expires in ~3 minutes
mock_response = MagicMock()
mock_response.json.return_value = {"token": "ghs_refreshed_token"}
mock_response.raise_for_status = MagicMock()
with patch("httpx.AsyncClient") as mock_client_class:
mock_client = AsyncMock()
mock_client.__aenter__.return_value = mock_client
mock_client.__aexit__.return_value = None
mock_client.post = AsyncMock(return_value=mock_response)
mock_client_class.return_value = mock_client
token = await auth.get_installation_token()
assert token == "ghs_refreshed_token"
@pytest.mark.asyncio
@pytest.mark.skipif(
condition=TEST_PRIVATE_KEY == "not-a-private-key",
reason="An SSH private key is not available",
)
async def test_get_installation_token_force_refresh(self):
"""Test force refresh of token."""
auth = GitHubAppAuth(
app_id="123456", private_key=TEST_PRIVATE_KEY, installation_id="789012"
)
# Set up a valid cached token
auth._token_cache = "valid_token"
auth._token_expires_at = time.time() + 1000
mock_response = MagicMock()
mock_response.json.return_value = {"token": "ghs_forced_refresh_token"}
mock_response.raise_for_status = MagicMock()
with patch("httpx.AsyncClient") as mock_client_class:
mock_client = AsyncMock()
mock_client.__aenter__.return_value = mock_client
mock_client.__aexit__.return_value = None
mock_client.post = AsyncMock(return_value=mock_response)
mock_client_class.return_value = mock_client
token = await auth.get_installation_token(force_refresh=True)
assert token == "ghs_forced_refresh_token"
mock_client.post.assert_called_once()
@pytest.mark.asyncio
@pytest.mark.skipif(
condition=TEST_PRIVATE_KEY == "not-a-private-key",
reason="An SSH private key is not available",
)
async def test_get_installation_token_http_error(self):
"""Test handling of HTTP errors."""
auth = GitHubAppAuth(
app_id="123456", private_key=TEST_PRIVATE_KEY, installation_id="789012"
)
with patch("httpx.AsyncClient") as mock_client_class:
mock_client = AsyncMock()
mock_client.__aenter__.return_value = mock_client
mock_client.__aexit__.return_value = None
# Mock HTTP error
import httpx
mock_response = MagicMock()
mock_response.status_code = 401
mock_response.text = "Unauthorized"
mock_client.post = AsyncMock(
side_effect=httpx.HTTPStatusError(
"Unauthorized", request=MagicMock(), response=mock_response
)
)
mock_client_class.return_value = mock_client
with pytest.raises(
GitHubAppAuthenticationError, match="Failed to get installation token"
):
await auth.get_installation_token()
def test_is_token_valid(self):
"""Test token validity checking."""
auth = GitHubAppAuth(
app_id="123456", private_key=TEST_PRIVATE_KEY, installation_id="789012"
)
# No token cached
assert not auth._is_token_valid()
# Token expires in ~6.7 minutes (within 5-minute buffer, should be invalid)
auth._token_cache = "token"
auth._token_expires_at = time.time() + 400
assert auth._is_token_valid() # 400 seconds > 300 seconds buffer
# Token expires in 10 minutes (well outside buffer, should be valid)
auth._token_expires_at = time.time() + 600
assert auth._is_token_valid()
# Token expires in 4 minutes (within buffer, should be invalid)
auth._token_expires_at = time.time() + 240
assert not auth._is_token_valid()
# Expired token
auth._token_expires_at = time.time() - 100
assert not auth._is_token_valid()
def test_invalidate_token(self):
"""Test token invalidation."""
auth = GitHubAppAuth(
app_id="123456", private_key=TEST_PRIVATE_KEY, installation_id="789012"
)
# Set up cached token
auth._token_cache = "some_token"
auth._token_expires_at = time.time() + 1000
# Invalidate
auth.invalidate_token()
assert auth._token_cache is None
assert auth._token_expires_at == 0
@pytest.mark.skipif(not HAS_GITHUB_APP_AUTH, reason="GitHub App auth not available")
class TestGithubClientWithAppAuth:
"""Test GithubClient with GitHub App authentication."""
def test_init_with_pat(self):
"""Test initialization with PAT (backward compatibility)."""
client = GithubClient(github_token="ghp_test_token")
assert client._github_token == "ghp_test_token"
assert not client._use_github_app
assert client._github_app_auth is None
def test_init_with_github_app(self):
"""Test initialization with GitHub App auth."""
app_auth = GitHubAppAuth(
app_id="123", private_key=TEST_PRIVATE_KEY, installation_id="456"
)
client = GithubClient(github_app_auth=app_auth)
assert client._github_app_auth is app_auth
assert client._use_github_app
assert client._github_token is None
def test_init_with_both_raises_error(self):
"""Test that providing both PAT and GitHub App auth raises error."""
app_auth = GitHubAppAuth(
app_id="123", private_key=TEST_PRIVATE_KEY, installation_id="456"
)
with pytest.raises(ValueError, match="Cannot provide both"):
GithubClient(github_token="ghp_token", github_app_auth=app_auth)
def test_init_with_neither_raises_error(self):
"""Test that providing neither PAT nor GitHub App auth raises error."""
with patch.dict("os.environ", {}, clear=True):
with pytest.raises(ValueError, match="Please provide a Github token"):
GithubClient()
@pytest.mark.asyncio
async def test_get_auth_headers_with_pat(self):
"""Test getting auth headers with PAT."""
client = GithubClient(github_token="ghp_test_token")
headers = await client._get_auth_headers()
assert headers["Authorization"] == "Bearer ghp_test_token"
assert "Accept" in headers
assert "X-GitHub-Api-Version" in headers
@pytest.mark.asyncio
async def test_get_auth_headers_with_github_app(self):
"""Test getting auth headers with GitHub App."""
app_auth = GitHubAppAuth(
app_id="123", private_key=TEST_PRIVATE_KEY, installation_id="456"
)
# Mock the get_installation_token method
app_auth.get_installation_token = AsyncMock(return_value="ghs_app_token_123")
client = GithubClient(github_app_auth=app_auth)
headers = await client._get_auth_headers()
assert headers["Authorization"] == "Bearer ghs_app_token_123"
assert "Accept" in headers
assert "X-GitHub-Api-Version" in headers
app_auth.get_installation_token.assert_called_once()
@pytest.mark.skipif(not HAS_GITHUB_APP_AUTH, reason="GitHub App auth not available")
class TestIssuesClientWithAppAuth:
"""Test GitHubIssuesClient with GitHub App authentication."""
def test_init_with_github_app(self):
"""Test initialization with GitHub App auth."""
app_auth = GitHubAppAuth(
app_id="123", private_key=TEST_PRIVATE_KEY, installation_id="456"
)
client = GitHubIssuesClient(github_app_auth=app_auth)
assert client._github_app_auth is app_auth
assert client._use_github_app
def test_init_with_both_raises_error(self):
"""Test that providing both PAT and GitHub App auth raises error."""
app_auth = GitHubAppAuth(
app_id="123", private_key=TEST_PRIVATE_KEY, installation_id="456"
)
with pytest.raises(ValueError, match="Cannot provide both"):
GitHubIssuesClient(github_token="ghp_token", github_app_auth=app_auth)
@pytest.mark.skipif(not HAS_GITHUB_APP_AUTH, reason="GitHub App auth not available")
class TestCollaboratorsClientWithAppAuth:
"""Test GitHubCollaboratorsClient with GitHub App authentication."""
def test_init_with_github_app(self):
"""Test initialization with GitHub App auth."""
app_auth = GitHubAppAuth(
app_id="123", private_key=TEST_PRIVATE_KEY, installation_id="456"
)
client = GitHubCollaboratorsClient(github_app_auth=app_auth)
assert client._github_app_auth is app_auth
assert client._use_github_app
def test_init_with_both_raises_error(self):
"""Test that providing both PAT and GitHub App auth raises error."""
app_auth = GitHubAppAuth(
app_id="123", private_key=TEST_PRIVATE_KEY, installation_id="456"
)
with pytest.raises(ValueError, match="Cannot provide both"):
GitHubCollaboratorsClient(
github_token="ghp_token", github_app_auth=app_auth
)
@pytest.mark.skipif(not HAS_GITHUB_APP_AUTH, reason="GitHub App auth not available")
class TestRepositoryReaderWithAppAuth:
"""Test GithubRepositoryReader with GitHub App authentication."""
def test_reader_with_github_app_client(self):
"""Test creating reader with GitHub App authenticated client."""
app_auth = GitHubAppAuth(
app_id="123", private_key=TEST_PRIVATE_KEY, installation_id="456"
)
client = GithubClient(github_app_auth=app_auth)
reader = GithubRepositoryReader(
github_client=client, owner="test-owner", repo="test-repo"
)
assert reader._github_client is client
assert reader._github_client._use_github_app
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-github/tests/test_github_app_auth.py",
"license": "MIT License",
"lines": 376,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/embeddings/llama-index-embeddings-voyageai/tests/test_embeddings_voyageai_integration.py | """
Integration tests for VoyageAI embeddings with batching.
These tests require VOYAGE_API_KEY environment variable to be set.
Run with: pytest tests/test_embeddings_voyageai_integration.py -v
"""
import os
import tempfile
import pytest
from llama_index.embeddings.voyageai import VoyageEmbedding
from llama_index.embeddings.voyageai.base import VIDEO_SUPPORT
# Skip all tests if VOYAGE_API_KEY is not set
pytestmark = pytest.mark.skipif(
"VOYAGE_API_KEY" not in os.environ, reason="VOYAGE_API_KEY not set"
)
MODEL = "voyage-3.5"
CONTEXT_MODEL = "voyage-context-3"
VOYAGE_4_MODELS = ["voyage-4", "voyage-4-lite", "voyage-4-large"]
MULTIMODAL_MODEL = "voyage-multimodal-3.5"
@pytest.mark.parametrize("model", [MODEL, CONTEXT_MODEL, *VOYAGE_4_MODELS])
def test_embedding_single_document(model: str):
"""Test embedding single document."""
emb = VoyageEmbedding(model_name=model)
text = "This is a test document."
result = emb._get_text_embedding(text)
assert isinstance(result, list)
assert len(result) > 0
assert isinstance(result[0], float)
@pytest.mark.parametrize("model", [MODEL, CONTEXT_MODEL, *VOYAGE_4_MODELS])
def test_embedding_multiple_documents(model: str):
"""Test embedding multiple documents."""
emb = VoyageEmbedding(model_name=model, embed_batch_size=2)
texts = ["Document 1", "Document 2", "Document 3"]
result = emb._get_text_embeddings(texts)
assert len(result) == 3
assert all(isinstance(emb, list) for emb in result)
# Verify embeddings are different
assert result[0] != result[1]
@pytest.mark.parametrize("model", [MODEL, CONTEXT_MODEL, *VOYAGE_4_MODELS])
@pytest.mark.asyncio
async def test_async_embedding_multiple_documents(model: str):
"""Test async embedding multiple documents."""
emb = VoyageEmbedding(model_name=model, embed_batch_size=2)
texts = ["Document 1", "Document 2", "Document 3"]
result = await emb._aget_text_embeddings(texts)
assert len(result) == 3
assert all(isinstance(emb, list) for emb in result)
@pytest.mark.parametrize("model", [MODEL, CONTEXT_MODEL, *VOYAGE_4_MODELS])
def test_embedding_with_small_batch_size(model: str):
"""Test embedding with small batch size to verify batching works."""
emb = VoyageEmbedding(model_name=model, embed_batch_size=2)
texts = [f"Document {i}" for i in range(5)]
result = emb._get_text_embeddings(texts)
# Should successfully embed all documents despite small batch size
assert len(result) == 5
assert all(isinstance(emb, list) for emb in result)
# Verify embeddings are unique
assert result[0] != result[1]
@pytest.mark.parametrize("model", [MODEL, CONTEXT_MODEL, *VOYAGE_4_MODELS])
def test_embedding_empty_list(model: str):
"""Test embedding with empty list."""
emb = VoyageEmbedding(model_name=model)
texts = []
result = emb._get_text_embeddings(texts)
assert len(result) == 0
assert isinstance(result, list)
@pytest.mark.parametrize("model", [MODEL, CONTEXT_MODEL, *VOYAGE_4_MODELS])
def test_embedding_consistency(model: str):
"""Test that same text produces same embedding."""
emb = VoyageEmbedding(model_name=model)
text = "consistency test text"
result1 = emb._get_text_embedding(text)
result2 = emb._get_text_embedding(text)
# Same text should produce identical embeddings
assert result1 == result2
def test_embedding_with_output_dimension():
"""Test embedding with custom output dimension."""
emb = VoyageEmbedding(
model_name="voyage-3-large", output_dimension=512, embed_batch_size=10
)
texts = ["Test document"]
result = emb._get_text_embeddings(texts)
assert len(result) == 1
assert len(result[0]) == 512
def test_context_model_embedding():
"""Test contextual embedding model."""
emb = VoyageEmbedding(
model_name="voyage-context-3", output_dimension=512, embed_batch_size=2
)
texts = ["Document 1", "Document 2", "Document 3"]
result = emb._get_text_embeddings(texts)
assert len(result) == 3
assert all(len(emb) == 512 for emb in result)
@pytest.mark.asyncio
async def test_context_model_async_embedding():
"""Test async contextual embedding model."""
emb = VoyageEmbedding(
model_name="voyage-context-3", output_dimension=512, embed_batch_size=2
)
texts = ["Document 1", "Document 2", "Document 3"]
result = await emb._aget_text_embeddings(texts)
assert len(result) == 3
assert all(len(emb) == 512 for emb in result)
@pytest.mark.parametrize("model", [MODEL, CONTEXT_MODEL, *VOYAGE_4_MODELS])
def test_automatic_batching_with_many_documents(model: str):
"""Test automatic batching with many documents."""
emb = VoyageEmbedding(model_name=model, embed_batch_size=10)
# Create 25 documents to ensure multiple batches
texts = [f"Document number {i} with some content." for i in range(25)]
result = emb._get_text_embeddings(texts)
assert len(result) == 25
assert all(isinstance(emb, list) for emb in result)
@pytest.mark.parametrize("model", [MODEL, CONTEXT_MODEL, *VOYAGE_4_MODELS])
def test_batching_with_varying_text_lengths(model: str):
"""Test batching with texts of varying lengths."""
emb = VoyageEmbedding(model_name=model, embed_batch_size=5)
texts = [
"Short.",
"This is a medium length text with some more content.",
"This is a much longer text that contains significantly more words and should consume more tokens than the previous texts. "
* 3,
"Another short one.",
"Yet another long text with lots of repeated content. " * 5,
]
result = emb._get_text_embeddings(texts)
assert len(result) == 5
assert all(isinstance(emb, list) for emb in result)
def test_query_vs_document_embeddings():
"""Test that query and document embeddings are different."""
emb = VoyageEmbedding(model_name=MODEL)
text = "test text"
query_emb = emb._get_query_embedding(text)
doc_emb = emb._get_text_embedding(text)
# Query and document embeddings should be different
assert query_emb != doc_emb
assert len(query_emb) == len(doc_emb)
@pytest.mark.asyncio
async def test_async_query_vs_document_embeddings():
"""Test async query and document embeddings are different."""
emb = VoyageEmbedding(model_name=MODEL)
text = "test text"
query_emb = await emb._aget_query_embedding(text)
doc_emb = await emb._aget_text_embedding(text)
# Query and document embeddings should be different
assert query_emb != doc_emb
assert len(query_emb) == len(doc_emb)
@pytest.mark.parametrize("model", [MODEL, CONTEXT_MODEL, *VOYAGE_4_MODELS])
def test_build_batches_with_real_tokenizer(model: str):
"""Test batch building with real tokenizer."""
emb = VoyageEmbedding(model_name=model, embed_batch_size=10)
texts = [
"Short text.",
"This is a much longer text with many more words.",
"Another text.",
]
batches = list(emb._build_batches(texts))
# Verify all texts are included
total_texts = sum(batch_size for _, batch_size in batches)
assert total_texts == len(texts)
# Verify each batch has texts
for batch_texts, batch_size in batches:
assert len(batch_texts) == batch_size
assert batch_size > 0
@pytest.mark.parametrize("model", [MODEL, CONTEXT_MODEL, *VOYAGE_4_MODELS])
@pytest.mark.slow
def test_automatic_batching_with_long_texts(model: str):
"""
Test automatic batching with many texts that exceed token limits.
This test is marked as slow because it processes many texts.
The key is to have MANY texts whose combined tokens exceed the limit,
not necessarily very long individual texts.
"""
emb = VoyageEmbedding(model_name=model)
# Create longer text to ensure we exceed token limits
# voyage-3.5 has 320k token limit, voyage-context-3 has 32k
# We'll use different configurations for each model
if model == CONTEXT_MODEL:
# ~500 tokens per text × 100 texts = ~50k tokens > 32k limit
text = "This is a document with some content for testing. " * 40
num_texts = 100
else:
# For voyage-3.5 with 320k limit, we need much more tokens
# Use longer text (~2000 tokens each) × 200 texts = ~400k tokens
text = "This is a document with some content for testing purposes. " * 160
num_texts = 200
texts = [f"{text} Document {i}." for i in range(num_texts)]
# Count batches that will be created
batches = list(emb._build_batches(texts))
batch_count = len(batches)
print(f"\nModel: {model}, Texts: {num_texts}, Batches: {batch_count}")
# Verify multiple batches were created due to token limits
assert batch_count >= 2, (
f"Expected at least 2 batches, got {batch_count}. Model: {model}"
)
# Now actually embed them (this will take a while)
result = emb._get_text_embeddings(texts)
assert len(result) == num_texts
assert all(isinstance(emb, list) for emb in result)
# Integration tests for voyage-multimodal-3.5
def test_multimodal_text_embedding():
"""Test text embedding with multimodal model."""
emb = VoyageEmbedding(model_name=MULTIMODAL_MODEL)
text = "This is a test document for multimodal embedding."
result = emb._get_text_embedding(text)
assert isinstance(result, list)
assert len(result) > 0
assert isinstance(result[0], float)
def test_multimodal_multiple_text_embeddings():
"""Test multiple text embeddings with multimodal model."""
emb = VoyageEmbedding(model_name=MULTIMODAL_MODEL, embed_batch_size=2)
texts = ["Document 1", "Document 2", "Document 3"]
result = emb._get_text_embeddings(texts)
assert len(result) == 3
assert all(isinstance(e, list) for e in result)
# Verify embeddings are different
assert result[0] != result[1]
def test_multimodal_query_vs_document_embeddings():
"""Test that query and document embeddings are different for multimodal model."""
emb = VoyageEmbedding(model_name=MULTIMODAL_MODEL)
text = "test text for multimodal"
query_emb = emb._get_query_embedding(text)
doc_emb = emb._get_text_embedding(text)
# Query and document embeddings should be different
assert query_emb != doc_emb
assert len(query_emb) == len(doc_emb)
@pytest.mark.skipif(not VIDEO_SUPPORT, reason="Video support requires voyageai>=0.3.6")
def test_video_embedding():
"""
Test video embedding with voyage-multimodal-3.5.
This test downloads a small sample video for testing.
"""
import urllib.request
# Download a small sample video (Big Buck Bunny - first few seconds, ~1MB)
sample_video_url = "https://test-videos.co.uk/vids/bigbuckbunny/mp4/h264/360/Big_Buck_Bunny_360_10s_1MB.mp4"
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmp_file:
try:
urllib.request.urlretrieve(sample_video_url, tmp_file.name)
emb = VoyageEmbedding(model_name=MULTIMODAL_MODEL)
result = emb.get_video_embedding(tmp_file.name)
assert isinstance(result, list)
assert len(result) > 0
assert isinstance(result[0], float)
finally:
# Clean up
if os.path.exists(tmp_file.name):
os.unlink(tmp_file.name)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/embeddings/llama-index-embeddings-voyageai/tests/test_embeddings_voyageai_integration.py",
"license": "MIT License",
"lines": 245,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/embeddings/llama-index-embeddings-isaacus/examples/async_usage.py | """Async usage example for Isaacus embeddings."""
import asyncio
import os
from llama_index.embeddings.isaacus import IsaacusEmbedding
from llama_index.core.base.embeddings.base import similarity
async def main():
"""Demonstrate async usage of Isaacus embeddings."""
# Initialize the embedding model. This assumes the presence of ISAACUS_API_KEY
# in the host environment
embedding_model = IsaacusEmbedding()
# Example legal texts to embed
texts = [
"The parties hereby agree to the terms and conditions set forth in this contract.",
"This agreement shall be governed by the laws of the State of California.",
"Either party may terminate this contract with 30 days written notice.",
"The confidentiality provisions shall survive termination of this agreement.",
]
print("Generating embeddings asynchronously for legal texts...")
print()
# Get embeddings for individual texts asynchronously
for i, text in enumerate(texts):
embedding = await embedding_model.aget_text_embedding(text)
print(f"Text {i+1}: {text[:60]}...")
print(f" Embedding dimension: {len(embedding)}")
print(f" First 5 values: {[f'{x:.4f}' for x in embedding[:5]]}")
print()
# Get embeddings for all texts at once asynchronously
print("Getting batch embeddings asynchronously...")
all_embeddings = await embedding_model.aget_text_embedding_batch(texts)
print(f"Generated {len(all_embeddings)} embeddings")
print()
# Demonstrate query vs document embeddings with async
print("Demonstrating async query vs document task optimization...")
# Create a document embedder
doc_embedder = IsaacusEmbedding(task="retrieval/document")
doc_embedding = await doc_embedder.aget_text_embedding(texts[0])
# Get a query embedding (uses retrieval/query task automatically)
query = "What are the termination terms?"
query_embedding = await embedding_model.aget_query_embedding(query)
print(f"Document embedding dimension: {len(doc_embedding)}")
print(f"Query embedding dimension: {len(query_embedding)}")
print()
# Demonstrate similarity (cosine similarity)
print("Calculating similarities between query and documents...")
for i, text in enumerate(texts):
doc_emb = await doc_embedder.aget_text_embedding(text)
sim = similarity(query_embedding, doc_emb)
print(f"Similarity to document {i+1}: {sim:.4f}")
print(f" Document: {text[:60]}...")
print()
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/embeddings/llama-index-embeddings-isaacus/examples/async_usage.py",
"license": "MIT License",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/embeddings/llama-index-embeddings-isaacus/examples/basic_usage.py | """Basic usage example for Isaacus embeddings."""
import os
from llama_index.embeddings.isaacus import IsaacusEmbedding
from llama_index.core.base.embeddings.base import similarity
def main():
"""Demonstrate basic usage of Isaacus embeddings."""
# Initialize the embedding model. This assumes the presence of ISAACUS_API_KEY
# in the host environment
embedding_model = IsaacusEmbedding()
# Example legal texts to embed
texts = [
"The parties hereby agree to the terms and conditions set forth in this contract.",
"This agreement shall be governed by the laws of the State of California.",
"Either party may terminate this contract with 30 days written notice.",
"The confidentiality provisions shall survive termination of this agreement.",
]
print("Generating embeddings for legal texts...")
print()
# Get embeddings for individual texts
for i, text in enumerate(texts):
embedding = embedding_model.get_text_embedding(text)
print(f"Text {i+1}: {text[:60]}...")
print(f" Embedding dimension: {len(embedding)}")
print(f" First 5 values: {[f'{x:.4f}' for x in embedding[:5]]}")
print()
# Get embeddings for all texts at once (batch processing)
print("Getting batch embeddings...")
all_embeddings = embedding_model.get_text_embedding_batch(texts)
print(f"Generated {len(all_embeddings)} embeddings")
print()
# Demonstrate query vs document embeddings
print("Demonstrating query vs document task optimization...")
# Create a document embedder
doc_embedder = IsaacusEmbedding(task="retrieval/document")
doc_embedding = doc_embedder.get_text_embedding(texts[0])
# Get a query embedding (uses retrieval/query task automatically)
query = "What are the termination terms?"
query_embedding = embedding_model.get_query_embedding(query)
print(f"Document embedding dimension: {len(doc_embedding)}")
print(f"Query embedding dimension: {len(query_embedding)}")
print()
# Demonstrate similarity (cosine similarity)
print("Calculating similarities between query and documents...")
for i, text in enumerate(texts):
doc_emb = doc_embedder.get_text_embedding(text)
sim = similarity(query_embedding, doc_emb)
print(f"Similarity to document {i+1}: {sim:.4f}")
print(f" Document: {text[:60]}...")
print()
if __name__ == "__main__":
main()
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/embeddings/llama-index-embeddings-isaacus/examples/basic_usage.py",
"license": "MIT License",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/embeddings/llama-index-embeddings-isaacus/llama_index/embeddings/isaacus/base.py | """Isaacus embeddings file."""
import logging
from typing import Any, List, Literal, Optional
import isaacus
from llama_index.core.base.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
BaseEmbedding,
Embedding,
)
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
logger = logging.getLogger(__name__)
DEFAULT_ISAACUS_API_BASE = "https://api.isaacus.com/v1"
DEFAULT_ISAACUS_MODEL = "kanon-2-embedder"
class IsaacusEmbedding(BaseEmbedding):
"""
Isaacus Embeddings Integration.
This class provides an interface to Isaacus' embedding API, featuring the
Kanon 2 Embedder - the world's most accurate legal embedding model on the
Massive Legal Embedding Benchmark (MLEB).
Args:
model (str, optional): The model to use. Defaults to "kanon-2-embedder".
api_key (str, optional): The API key for Isaacus. Defaults to ISAACUS_API_KEY.
base_url (str, optional): The base URL for Isaacus API. Defaults to ISAACUS_BASE_URL.
dimensions (int, optional): The desired embedding dimensionality.
task (str, optional): Task type: "retrieval/query" or "retrieval/document".
overflow_strategy (str, optional): Strategy for handling overflow. Defaults to "drop_end".
timeout (float, optional): Timeout for requests in seconds. Defaults to 60.0.
**kwargs: Additional keyword arguments.
Environment Variables:
- ISAACUS_API_KEY: The API key for Isaacus
- ISAACUS_BASE_URL: The base URL for Isaacus API (optional)
Raises:
ValueError: If required environment variables are not set.
"""
model: str = Field(
default=DEFAULT_ISAACUS_MODEL,
description="The model to use for embeddings.",
)
api_key: Optional[str] = Field(default=None, description="The API key for Isaacus.")
base_url: Optional[str] = Field(
default=None, description="The base URL for Isaacus API."
)
dimensions: Optional[int] = Field(
default=None, description="The desired embedding dimensionality."
)
task: Optional[Literal["retrieval/query", "retrieval/document"]] = Field(
default=None,
description="Task type: 'retrieval/query' or 'retrieval/document'.",
)
overflow_strategy: Optional[Literal["drop_end"]] = Field(
default="drop_end", description="Strategy for handling overflow."
)
timeout: float = Field(default=60.0, description="Timeout for requests in seconds.")
_client: Any = PrivateAttr()
_aclient: Any = PrivateAttr()
def __init__(
self,
model: str = DEFAULT_ISAACUS_MODEL,
api_key: Optional[str] = None,
base_url: Optional[str] = None,
dimensions: Optional[int] = None,
task: Optional[Literal["retrieval/query", "retrieval/document"]] = None,
overflow_strategy: Optional[Literal["drop_end"]] = "drop_end",
timeout: float = 60.0,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
) -> None:
"""
Initialize an instance of the IsaacusEmbedding class.
Args:
model (str, optional): The model to use. Defaults to "kanon-2-embedder".
api_key (str, optional): The API key for Isaacus. Defaults to ISAACUS_API_KEY.
base_url (str, optional): The base URL for Isaacus API.
dimensions (int, optional): The desired embedding dimensionality.
task (str, optional): Task type: "retrieval/query" or "retrieval/document".
overflow_strategy (str, optional): Strategy for handling overflow.
timeout (float, optional): Timeout for requests in seconds. Defaults to 60.0.
embed_batch_size (int, optional): Batch size for embedding calls. Defaults to DEFAULT_EMBED_BATCH_SIZE.
callback_manager (Optional[CallbackManager], optional): Callback manager. Defaults to None.
**kwargs: Additional keyword arguments.
"""
# Get API key from parameter or environment
try:
api_key = get_from_param_or_env(
"api_key",
api_key,
"ISAACUS_API_KEY",
)
except ValueError:
raise ValueError(
"API key is required. Set ISAACUS_API_KEY environment variable or pass api_key parameter."
)
# Get base URL from parameter or environment (optional)
if base_url is None:
try:
base_url = get_from_param_or_env(
"base_url",
base_url,
"ISAACUS_BASE_URL",
)
except ValueError:
base_url = DEFAULT_ISAACUS_API_BASE
super().__init__(
model_name=model,
model=model,
api_key=api_key,
base_url=base_url,
dimensions=dimensions,
task=task,
overflow_strategy=overflow_strategy,
timeout=timeout,
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
**kwargs,
)
# Initialize Isaacus clients
self._client = isaacus.Isaacus(
api_key=self.api_key,
base_url=self.base_url,
timeout=self.timeout,
)
self._aclient = isaacus.AsyncIsaacus(
api_key=self.api_key,
base_url=self.base_url,
timeout=self.timeout,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "IsaacusEmbedding"
def _prepare_request_params(
self, text: str, task_override: Optional[str] = None
) -> dict:
"""Prepare request parameters for the Isaacus API."""
params = {
"model": self.model,
"texts": text,
}
# Use task_override if provided, otherwise use instance task
task_to_use = task_override if task_override is not None else self.task
if task_to_use is not None:
params["task"] = task_to_use
if self.dimensions is not None:
params["dimensions"] = self.dimensions
if self.overflow_strategy is not None:
params["overflow_strategy"] = self.overflow_strategy
return params
def _get_query_embedding(self, query: str) -> Embedding:
"""
Get query embedding.
For queries, we use the 'retrieval/query' task if no task is explicitly set.
"""
return self._get_text_embedding(query, task_override="retrieval/query")
def _get_text_embedding(
self, text: str, task_override: Optional[str] = None
) -> Embedding:
"""Get text embedding."""
try:
params = self._prepare_request_params(text, task_override)
response = self._client.embeddings.create(**params)
# Extract the embedding from the response
if response.embeddings and len(response.embeddings) > 0:
return response.embeddings[0].embedding
else:
raise ValueError("No embeddings returned from API")
except Exception as e:
logger.error(f"Error while embedding text: {e}")
raise ValueError(f"Unable to embed text: {e}")
def _get_text_embeddings(self, texts: List[str]) -> List[Embedding]:
"""
Get embeddings for multiple texts.
Note: The Isaacus API supports batch embedding, so we send all texts at once.
"""
try:
params = self._prepare_request_params(texts, task_override=self.task)
response = self._client.embeddings.create(**params)
# Extract embeddings from response, maintaining order
embeddings = []
for emb_obj in sorted(response.embeddings, key=lambda x: x.index):
embeddings.append(emb_obj.embedding)
return embeddings
except Exception as e:
logger.error(f"Error while embedding texts: {e}")
raise ValueError(f"Unable to embed texts: {e}")
async def _aget_query_embedding(self, query: str) -> Embedding:
"""
Get query embedding asynchronously.
For queries, we use the 'retrieval/query' task if no task is explicitly set.
"""
return await self._aget_text_embedding(query, task_override="retrieval/query")
async def _aget_text_embedding(
self, text: str, task_override: Optional[str] = None
) -> Embedding:
"""Get text embedding asynchronously."""
try:
params = self._prepare_request_params(text, task_override)
response = await self._aclient.embeddings.create(**params)
# Extract the embedding from the response
if response.embeddings and len(response.embeddings) > 0:
return response.embeddings[0].embedding
else:
raise ValueError("No embeddings returned from API")
except Exception as e:
logger.error(f"Error while embedding text: {e}")
raise ValueError(f"Unable to embed text: {e}")
async def _aget_text_embeddings(self, texts: List[str]) -> List[Embedding]:
"""
Get embeddings for multiple texts asynchronously.
Note: The Isaacus API supports batch embedding, so we send all texts at once.
"""
try:
params = self._prepare_request_params(texts, task_override=self.task)
response = await self._aclient.embeddings.create(**params)
# Extract embeddings from response, maintaining order
embeddings = []
for emb_obj in sorted(response.embeddings, key=lambda x: x.index):
embeddings.append(emb_obj.embedding)
return embeddings
except Exception as e:
logger.error(f"Error while embedding texts: {e}")
raise ValueError(f"Unable to embed texts: {e}")
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/embeddings/llama-index-embeddings-isaacus/llama_index/embeddings/isaacus/base.py",
"license": "MIT License",
"lines": 224,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/embeddings/llama-index-embeddings-isaacus/tests/test_isaacus_embeddings.py | """Test Isaacus embeddings."""
import os
from unittest.mock import MagicMock, patch
import pytest
from llama_index.embeddings.isaacus.base import IsaacusEmbedding
STUB_MODEL = "kanon-2-embedder"
STUB_API_KEY = "test-api-key"
STUB_BASE_URL = "https://api.isaacus.com/v1"
@pytest.fixture(name="isaacus_embedding")
def fixture_isaacus_embedding() -> IsaacusEmbedding:
"""Create an IsaacusEmbedding instance for testing."""
return IsaacusEmbedding(
model=STUB_MODEL,
api_key=STUB_API_KEY,
base_url=STUB_BASE_URL,
)
@pytest.fixture(name="mock_embedding_object")
def fixture_mock_embedding_object() -> MagicMock:
"""Create a mock embedding object."""
mock_obj = MagicMock()
mock_obj.embedding = [0.1, 0.2, 0.3, 0.4, 0.5]
mock_obj.index = 0
return mock_obj
@pytest.fixture(name="mock_response")
def fixture_mock_response(mock_embedding_object: MagicMock) -> MagicMock:
"""Create a mock response for testing."""
mock_response = MagicMock()
mock_response.embeddings = [mock_embedding_object]
mock_response.usage = MagicMock()
mock_response.usage.input_tokens = 5
return mock_response
class TestIsaacusEmbedding:
"""Test IsaacusEmbedding class."""
def test_class_name(self, isaacus_embedding: IsaacusEmbedding) -> None:
"""Test class name."""
assert IsaacusEmbedding.class_name() == "IsaacusEmbedding"
assert isaacus_embedding.class_name() == "IsaacusEmbedding"
def test_init_with_parameters(self) -> None:
"""Test initialization with parameters."""
embedding = IsaacusEmbedding(
model=STUB_MODEL,
api_key=STUB_API_KEY,
base_url=STUB_BASE_URL,
dimensions=1024,
task="retrieval/document",
overflow_strategy="drop_end",
timeout=30.0,
)
assert embedding.model == STUB_MODEL
assert embedding.api_key == STUB_API_KEY
assert embedding.base_url == STUB_BASE_URL
assert embedding.dimensions == 1024
assert embedding.task == "retrieval/document"
assert embedding.overflow_strategy == "drop_end"
assert embedding.timeout == 30.0
def test_init_with_environment_variables(self) -> None:
"""Test initialization with environment variables."""
with patch.dict(
os.environ,
{
"ISAACUS_API_KEY": STUB_API_KEY,
"ISAACUS_BASE_URL": STUB_BASE_URL,
},
):
embedding = IsaacusEmbedding()
assert embedding.model == STUB_MODEL
assert embedding.api_key == STUB_API_KEY
assert embedding.base_url == STUB_BASE_URL
def test_init_missing_api_key(self) -> None:
"""Test initialization with missing API key."""
with pytest.raises(ValueError, match="API key is required"):
IsaacusEmbedding(
base_url=STUB_BASE_URL,
)
def test_get_text_embedding_success(
self, isaacus_embedding: IsaacusEmbedding, mock_response: MagicMock
) -> None:
"""Test successful text embedding."""
with patch.object(
isaacus_embedding._client.embeddings, "create", return_value=mock_response
):
embedding = isaacus_embedding.get_text_embedding("test text")
assert embedding == [0.1, 0.2, 0.3, 0.4, 0.5]
def test_get_text_embedding_with_task(
self, isaacus_embedding: IsaacusEmbedding, mock_response: MagicMock
) -> None:
"""Test text embedding with task parameter."""
isaacus_embedding.task = "retrieval/document"
with patch.object(
isaacus_embedding._client.embeddings, "create", return_value=mock_response
) as mock_create:
embedding = isaacus_embedding.get_text_embedding("test text")
assert embedding == [0.1, 0.2, 0.3, 0.4, 0.5]
# Verify task was passed to API
call_kwargs = mock_create.call_args.kwargs
assert call_kwargs["task"] == "retrieval/document"
def test_get_query_embedding_uses_retrieval_query_task(
self, isaacus_embedding: IsaacusEmbedding, mock_response: MagicMock
) -> None:
"""Test that get_query_embedding uses retrieval/query task."""
with patch.object(
isaacus_embedding._client.embeddings, "create", return_value=mock_response
) as mock_create:
embedding = isaacus_embedding.get_query_embedding("test query")
assert embedding == [0.1, 0.2, 0.3, 0.4, 0.5]
# Verify task was set to retrieval/query
call_kwargs = mock_create.call_args.kwargs
assert call_kwargs["task"] == "retrieval/query"
def test_get_text_embedding_error(
self, isaacus_embedding: IsaacusEmbedding
) -> None:
"""Test text embedding with error."""
with patch.object(
isaacus_embedding._client.embeddings,
"create",
side_effect=Exception("API error"),
):
with pytest.raises(ValueError, match="Unable to embed text"):
isaacus_embedding.get_text_embedding("test text")
def test_get_text_embedding_no_embeddings_returned(
self, isaacus_embedding: IsaacusEmbedding
) -> None:
"""Test text embedding when no embeddings are returned."""
mock_response = MagicMock()
mock_response.embeddings = []
with patch.object(
isaacus_embedding._client.embeddings, "create", return_value=mock_response
):
with pytest.raises(ValueError, match="No embeddings returned from API"):
isaacus_embedding.get_text_embedding("test text")
def test_get_text_embeddings_batch(
self, isaacus_embedding: IsaacusEmbedding
) -> None:
"""Test batch text embeddings."""
# Create mock response with multiple embeddings
mock_emb1 = MagicMock()
mock_emb1.embedding = [0.1, 0.2, 0.3]
mock_emb1.index = 0
mock_emb2 = MagicMock()
mock_emb2.embedding = [0.4, 0.5, 0.6]
mock_emb2.index = 1
mock_emb3 = MagicMock()
mock_emb3.embedding = [0.7, 0.8, 0.9]
mock_emb3.index = 2
mock_response = MagicMock()
mock_response.embeddings = [mock_emb1, mock_emb2, mock_emb3]
texts = ["text1", "text2", "text3"]
with patch.object(
isaacus_embedding._client.embeddings, "create", return_value=mock_response
):
embeddings = isaacus_embedding.get_text_embedding_batch(texts)
assert len(embeddings) == 3
assert embeddings[0] == [0.1, 0.2, 0.3]
assert embeddings[1] == [0.4, 0.5, 0.6]
assert embeddings[2] == [0.7, 0.8, 0.9]
def test_get_text_embeddings_maintains_order(
self, isaacus_embedding: IsaacusEmbedding
) -> None:
"""Test that batch embeddings maintain correct order."""
# Create mock response with embeddings out of order
mock_emb1 = MagicMock()
mock_emb1.embedding = [0.1, 0.2, 0.3]
mock_emb1.index = 0
mock_emb2 = MagicMock()
mock_emb2.embedding = [0.4, 0.5, 0.6]
mock_emb2.index = 1
mock_response = MagicMock()
# Return embeddings out of order
mock_response.embeddings = [mock_emb2, mock_emb1]
texts = ["text1", "text2"]
with patch.object(
isaacus_embedding._client.embeddings, "create", return_value=mock_response
):
embeddings = isaacus_embedding.get_text_embedding_batch(texts)
# Should be sorted by index
assert embeddings[0] == [0.1, 0.2, 0.3]
assert embeddings[1] == [0.4, 0.5, 0.6]
@pytest.mark.asyncio
async def test_aget_text_embedding_success(
self, isaacus_embedding: IsaacusEmbedding, mock_response: MagicMock
) -> None:
"""Test successful async text embedding."""
with patch.object(
isaacus_embedding._aclient.embeddings, "create", return_value=mock_response
):
embedding = await isaacus_embedding.aget_text_embedding("test text")
assert embedding == [0.1, 0.2, 0.3, 0.4, 0.5]
@pytest.mark.asyncio
async def test_aget_query_embedding_uses_retrieval_query_task(
self, isaacus_embedding: IsaacusEmbedding, mock_response: MagicMock
) -> None:
"""Test that aget_query_embedding uses retrieval/query task."""
with patch.object(
isaacus_embedding._aclient.embeddings, "create", return_value=mock_response
) as mock_create:
embedding = await isaacus_embedding.aget_query_embedding("test query")
assert embedding == [0.1, 0.2, 0.3, 0.4, 0.5]
# Verify task was set to retrieval/query
call_kwargs = mock_create.call_args.kwargs
assert call_kwargs["task"] == "retrieval/query"
@pytest.mark.asyncio
async def test_aget_text_embedding_error(
self, isaacus_embedding: IsaacusEmbedding
) -> None:
"""Test async text embedding with error."""
with patch.object(
isaacus_embedding._aclient.embeddings,
"create",
side_effect=Exception("API error"),
):
with pytest.raises(ValueError, match="Unable to embed text"):
await isaacus_embedding.aget_text_embedding("test text")
@pytest.mark.asyncio
async def test_aget_text_embeddings_batch(
self, isaacus_embedding: IsaacusEmbedding
) -> None:
"""Test async batch text embeddings."""
# Create mock response with multiple embeddings
mock_emb1 = MagicMock()
mock_emb1.embedding = [0.1, 0.2, 0.3]
mock_emb1.index = 0
mock_emb2 = MagicMock()
mock_emb2.embedding = [0.4, 0.5, 0.6]
mock_emb2.index = 1
mock_response = MagicMock()
mock_response.embeddings = [mock_emb1, mock_emb2]
texts = ["text1", "text2"]
with patch.object(
isaacus_embedding._aclient.embeddings, "create", return_value=mock_response
):
embeddings = await isaacus_embedding.aget_text_embedding_batch(texts)
assert len(embeddings) == 2
assert embeddings[0] == [0.1, 0.2, 0.3]
assert embeddings[1] == [0.4, 0.5, 0.6]
def test_prepare_request_params_basic(
self, isaacus_embedding: IsaacusEmbedding
) -> None:
"""Test _prepare_request_params with basic parameters."""
params = isaacus_embedding._prepare_request_params("test text")
assert params["model"] == STUB_MODEL
assert params["texts"] == "test text"
assert "task" not in params # No task set by default
def test_prepare_request_params_with_all_options(self) -> None:
"""Test _prepare_request_params with all options set."""
embedding = IsaacusEmbedding(
model=STUB_MODEL,
api_key=STUB_API_KEY,
base_url=STUB_BASE_URL,
dimensions=1024,
task="retrieval/document",
overflow_strategy="drop_end",
)
params = embedding._prepare_request_params("test text")
assert params["model"] == STUB_MODEL
assert params["texts"] == "test text"
assert params["task"] == "retrieval/document"
assert params["dimensions"] == 1024
assert params["overflow_strategy"] == "drop_end"
def test_prepare_request_params_task_override(
self, isaacus_embedding: IsaacusEmbedding
) -> None:
"""Test _prepare_request_params with task override."""
isaacus_embedding.task = "retrieval/document"
params = isaacus_embedding._prepare_request_params(
"test text", task_override="retrieval/query"
)
# Override should take precedence
assert params["task"] == "retrieval/query"
def test_embedding_dimensions(self, isaacus_embedding: IsaacusEmbedding) -> None:
"""Test that embeddings have the expected dimensions."""
mock_emb = MagicMock()
mock_emb.embedding = [0.1] * 1792 # Default Kanon 2 dimension
mock_emb.index = 0
mock_response = MagicMock()
mock_response.embeddings = [mock_emb]
with patch.object(
isaacus_embedding._client.embeddings, "create", return_value=mock_response
):
embedding = isaacus_embedding.get_text_embedding("test text")
assert len(embedding) == 1792
assert all(isinstance(x, float) for x in embedding)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/embeddings/llama-index-embeddings-isaacus/tests/test_isaacus_embeddings.py",
"license": "MIT License",
"lines": 281,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-helicone/llama_index/llms/helicone/base.py | from typing import Any, Dict, Optional
from llama_index.core.bridge.pydantic import Field
from llama_index.core.constants import (
DEFAULT_NUM_OUTPUTS,
DEFAULT_TEMPERATURE,
)
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
from llama_index.llms.openai_like import OpenAILike
# Default Helicone AI Gateway base. Override with HELICONE_API_BASE if needed.
DEFAULT_API_BASE = "https://ai-gateway.helicone.ai/v1"
# Default model routed via gateway; users may override to any supported provider.
DEFAULT_MODEL = "gpt-4o-mini"
class Helicone(OpenAILike):
"""
Helicone (OpenAI-compatible) LLM.
Route OpenAI-compatible requests through Helicone for observability and control.
Authentication:
- Set your Helicone API key via the `api_key` parameter or `HELICONE_API_KEY`.
No OpenAI/third-party provider keys are required when using the AI Gateway.
Examples:
`pip install llama-index-llms-helicone`
```python
from llama_index.llms.helicone import Helicone
from llama_index.llms.openai_like.base import ChatMessage
llm = Helicone(
api_key="<helicone-api-key>",
model="gpt-4o-mini", # works across providers
)
message: ChatMessage = ChatMessage(role="user", content="Hello world!")
response = helicone.chat(messages=[message])
print(str(response))
```
"""
model: str = Field(
description=(
"OpenAI-compatible model name routed via the Helicone AI Gateway. "
"Learn more about [provider routing](https://docs.helicone.ai/gateway/provider-routing). "
"All models are visible [here](https://www.helicone.ai/models)."
)
)
api_base: Optional[str] = Field(
default=DEFAULT_API_BASE,
description=(
"Base URL for the Helicone AI Gateway. Can also be set via the "
"HELICONE_API_BASE environment variable. See the "
"[Gateway overview](https://docs.helicone.ai/gateway/overview)."
),
)
api_key: Optional[str] = Field(
description=(
"Helicone API key used to authorize requests (Authorization: Bearer). "
"Provide directly or set via HELICONE_API_KEY. Generate your API key "
"in the [dashboard settings](https://us.helicone.ai/settings/api-keys). "
),
)
default_headers: Optional[Dict[str, str]] = Field(
default=None,
description=(
"Additional HTTP headers to include with requests. The Helicone "
"Authorization header is added automatically from api_key. See "
"[custom properties](https://docs.helicone.ai/features/advanced-usage/custom-properties)/[headers](https://docs.helicone.ai/helicone-headers/header-directory)."
),
)
def __init__(
self,
model: str = DEFAULT_MODEL,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: int = DEFAULT_NUM_OUTPUTS,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 5,
api_base: Optional[str] = DEFAULT_API_BASE,
api_key: Optional[str] = None,
default_headers: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> None:
additional_kwargs = additional_kwargs or {}
api_base = get_from_param_or_env("api_base", api_base, "HELICONE_API_BASE")
api_key = get_from_param_or_env("api_key", api_key, "HELICONE_API_KEY")
if default_headers:
default_headers.update({"Authorization": f"Bearer {api_key}"})
else:
default_headers = {"Authorization": f"Bearer {api_key}"}
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
api_base=api_base,
default_headers=default_headers,
additional_kwargs=additional_kwargs,
max_retries=max_retries,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "Helicone_LLM"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-helicone/llama_index/llms/helicone/base.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-helicone/tests/test_llms_helicone.py | from unittest.mock import MagicMock
from llama_index.core.base.llms.base import BaseLLM
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.llms.helicone import Helicone
from llama_index.llms.helicone.base import DEFAULT_API_BASE, DEFAULT_MODEL
def test_llm_class_inheritance():
names_of_base_classes = [b.__name__ for b in Helicone.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
def test_class_name():
assert Helicone.class_name() == "Helicone_LLM"
def test_default_model_and_api_base(monkeypatch):
# Ensure no env overrides are set
monkeypatch.delenv("HELICONE_API_BASE", raising=False)
monkeypatch.delenv("HELICONE_API_KEY", raising=False)
llm = Helicone(api_key="test_key")
assert llm.model == DEFAULT_MODEL
assert llm.api_base == DEFAULT_API_BASE
assert llm.default_headers == {"Authorization": "Bearer test_key"}
def test_env_override_api_base_and_key(monkeypatch):
monkeypatch.setenv("HELICONE_API_BASE", "https://example.com/v1")
monkeypatch.setenv("HELICONE_API_KEY", "env_key")
# Pass None so env vars are used by get_from_param_or_env
llm = Helicone(api_base=None, api_key=None)
assert llm.api_base == "https://example.com/v1"
assert llm.default_headers is not None
assert llm.default_headers.get("Authorization") == "Bearer env_key"
def test_user_headers_are_merged_with_auth():
headers = {"X-Existing": "1"}
llm = Helicone(api_key="abc123", default_headers=headers)
# Authorization is added, and original header remains
assert llm.default_headers is not None
assert llm.default_headers.get("X-Existing") == "1"
assert llm.default_headers.get("Authorization") == "Bearer abc123"
# Only validate merged content; object identity is not guaranteed
def test_explicit_api_base_param_overrides_env(monkeypatch):
monkeypatch.setenv("HELICONE_API_BASE", "https://env.example/v1")
llm = Helicone(api_key="k", api_base="https://param.example/v1")
assert llm.api_base == "https://param.example/v1"
def test_additional_kwargs_passthrough():
extra = {"foo": "bar"}
llm = Helicone(api_key="k", additional_kwargs=extra)
assert llm.additional_kwargs == extra
def test_temperature_and_max_tokens_initialization():
llm = Helicone(api_key="test_key", temperature=0.5, max_tokens=100)
assert llm.temperature == 0.5
assert llm.max_tokens == 100
def test_max_retries_initialization():
llm = Helicone(api_key="test_key", max_retries=10)
assert llm.max_retries == 10
# Mock-based tests for LLM methods
def _create_mock_completion_response(text: str):
"""Helper to create a mock OpenAI completion response."""
class FakeCompletionChoice:
def __init__(self, text: str):
self.text = text
self.logprobs = None
class FakeUsage:
def __init__(self):
self.prompt_tokens = 5
self.completion_tokens = 10
self.total_tokens = 15
class FakeCompletionResponse:
def __init__(self, text: str):
self.choices = [FakeCompletionChoice(text)]
self.usage = FakeUsage()
return FakeCompletionResponse(text)
def test_complete_method():
"""Test the complete method with a mock client."""
mock_client = MagicMock()
mock_client.completions.create.return_value = _create_mock_completion_response(
"This is a test completion"
)
llm = Helicone(
api_key="test_key",
api_base="https://example.com/v1",
openai_client=mock_client,
)
resp = llm.complete("Test prompt")
assert hasattr(resp, "text")
assert "test completion" in resp.text.lower()
mock_client.completions.create.assert_called_once()
def test_complete_with_custom_parameters():
"""Test complete method passes parameters correctly."""
mock_client = MagicMock()
mock_client.completions.create.return_value = _create_mock_completion_response(
"Response"
)
llm = Helicone(
api_key="test_key",
temperature=0.7,
max_tokens=50,
openai_client=mock_client,
)
llm.complete("Test")
# Verify the create call was made with expected parameters
call_kwargs = mock_client.completions.create.call_args[1]
assert call_kwargs["temperature"] == 0.7
assert call_kwargs["max_tokens"] == 50
def test_chat_method():
"""Test the chat method with a mock client."""
# The chat method in OpenAILike uses complete() internally,
# so we mock completions.create instead
mock_client = MagicMock()
mock_client.completions.create.return_value = _create_mock_completion_response(
"This is a chat response"
)
llm = Helicone(
api_key="test_key",
api_base="https://example.com/v1",
openai_client=mock_client,
)
messages = [ChatMessage(role=MessageRole.USER, content="Hello!")]
resp = llm.chat(messages)
assert hasattr(resp, "message")
assert resp.message.content is not None
assert "chat response" in resp.message.content.lower()
mock_client.completions.create.assert_called_once()
def test_stream_complete_method():
"""Test that stream_complete can be called with proper parameters."""
mock_client = MagicMock()
llm = Helicone(
api_key="test_key",
api_base="https://example.com/v1",
openai_client=mock_client,
)
# Just verify we can call the method without errors
# Full streaming behavior is complex to mock and better tested in integration tests
try:
llm.stream_complete("Test prompt")
mock_client.completions.create.assert_called_once()
call_kwargs = mock_client.completions.create.call_args[1]
assert call_kwargs.get("stream") is True
except Exception:
# If there's a mock issue, at least verify the method exists
assert hasattr(llm, "stream_complete")
def test_stream_chat_method():
"""Test that stream_chat can be called with proper parameters."""
mock_client = MagicMock()
llm = Helicone(
api_key="test_key",
api_base="https://example.com/v1",
openai_client=mock_client,
)
# Just verify we can call the method without errors
# Full streaming behavior is complex to mock and better tested in integration tests
messages = [ChatMessage(role=MessageRole.USER, content="Hello!")]
try:
llm.stream_chat(messages)
mock_client.completions.create.assert_called_once()
call_kwargs = mock_client.completions.create.call_args[1]
assert call_kwargs.get("stream") is True
except Exception:
# If there's a mock issue, at least verify the method exists
assert hasattr(llm, "stream_chat")
def test_model_name_property():
"""Test that model_name property returns the correct model."""
llm = Helicone(api_key="test_key", model="gpt-4o")
assert llm.model == "gpt-4o"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-helicone/tests/test_llms_helicone.py",
"license": "MIT License",
"lines": 158,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-experimental/llama_index/experimental/query_engine/polars/output_parser.py | """Polars output parser."""
import ast
import logging
import sys
import traceback
from typing import Any, Dict, Optional
import numpy as np
import polars as pl
from llama_index.core.output_parsers import BaseOutputParser
from llama_index.core.output_parsers.utils import parse_code_markdown
from llama_index.experimental.exec_utils import safe_eval, safe_exec
logger = logging.getLogger(__name__)
def default_output_processor(
output: str, df: pl.DataFrame, **output_kwargs: Any
) -> str:
"""Process outputs in a default manner."""
if sys.version_info < (3, 9):
logger.warning(
"Python version must be >= 3.9 in order to use "
"the default output processor, which executes "
"the Python query. Instead, we will return the "
"raw Python instructions as a string."
)
return output
local_vars = {"df": df, "pl": pl}
global_vars = {"np": np}
output = parse_code_markdown(output, only_last=True)
if not isinstance(output, str):
output = output[0]
# NOTE: inspired from langchain's tool
# see langchain.tools.python.tool (PythonAstREPLTool)
try:
tree = ast.parse(output)
module = ast.Module(tree.body[:-1], type_ignores=[])
safe_exec(ast.unparse(module), {}, local_vars) # type: ignore
module_end = ast.Module(tree.body[-1:], type_ignores=[])
module_end_str = ast.unparse(module_end) # type: ignore
if module_end_str.strip("'\"") != module_end_str:
# if there's leading/trailing quotes, then we need to eval
# string to get the actual expression
module_end_str = safe_eval(module_end_str, global_vars, local_vars)
try:
# Handle Polars DataFrame display options
result = safe_eval(module_end_str, global_vars, local_vars)
# Set display options for Polars if provided
if isinstance(result, pl.DataFrame):
# Polars doesn't have global display options like pandas,
# but we can control the output format
if "max_rows" in output_kwargs:
max_rows = output_kwargs["max_rows"]
if len(result) > max_rows:
# Show head and tail with indication of truncation
head_rows = max_rows // 2
tail_rows = max_rows - head_rows
result_str = (
str(result.head(head_rows))
+ "\n...\n"
+ str(result.tail(tail_rows))
)
else:
result_str = str(result)
else:
result_str = str(result)
else:
result_str = str(result)
return result_str
except Exception:
raise
except Exception as e:
err_string = (
f"There was an error running the output as Python code. Error message: {e}"
)
traceback.print_exc()
return err_string
class PolarsInstructionParser(BaseOutputParser):
"""
Polars instruction parser.
This 'output parser' takes in polars instructions (in Python code) and
executes them to return an output.
"""
def __init__(
self, df: pl.DataFrame, output_kwargs: Optional[Dict[str, Any]] = None
) -> None:
"""Initialize params."""
self.df = df
self.output_kwargs = output_kwargs or {}
def parse(self, output: str) -> Any:
"""Parse, validate, and correct errors programmatically."""
return default_output_processor(output, self.df, **self.output_kwargs)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-experimental/llama_index/experimental/query_engine/polars/output_parser.py",
"license": "MIT License",
"lines": 89,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-experimental/llama_index/experimental/query_engine/polars/polars_query_engine.py | """
Default query for PolarsIndex.
WARNING: This tool provides the LLM with access to the `eval` function.
Arbitrary code execution is possible on the machine running this tool.
This tool is not recommended to be used in a production setting, and would
require heavy sandboxing or virtual machines
"""
import logging
from typing import Any, Dict, Optional
import polars as pl
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.base.response.schema import Response
from llama_index.core.llms.llm import LLM
from llama_index.core.prompts import BasePromptTemplate, PromptTemplate
from llama_index.core.prompts.mixin import PromptDictType, PromptMixinType
from llama_index.core.schema import QueryBundle
from llama_index.core.settings import Settings
from llama_index.core.utils import print_text
from llama_index.experimental.query_engine.polars.prompts import DEFAULT_POLARS_PROMPT
from llama_index.experimental.query_engine.polars.output_parser import (
PolarsInstructionParser,
)
logger = logging.getLogger(__name__)
DEFAULT_INSTRUCTION_STR = (
"1. Convert the query to executable Python code using Polars.\n"
"2. The final line of code should be a Python expression that can be called with the `eval()` function.\n"
"3. The code should represent a solution to the query.\n"
"4. PRINT ONLY THE EXPRESSION.\n"
"5. Do not quote the expression.\n"
)
# **NOTE**: newer version of sql query engine
DEFAULT_RESPONSE_SYNTHESIS_PROMPT_TMPL = (
"Given an input question, synthesize a response from the query results.\n"
"Query: {query_str}\n\n"
"Polars Instructions (optional):\n{polars_instructions}\n\n"
"Polars Output: {polars_output}\n\n"
"Response: "
)
DEFAULT_RESPONSE_SYNTHESIS_PROMPT = PromptTemplate(
DEFAULT_RESPONSE_SYNTHESIS_PROMPT_TMPL,
)
class PolarsQueryEngine(BaseQueryEngine):
"""
Polars query engine.
Convert natural language to Polars python code.
WARNING: This tool provides the Agent access to the `eval` function.
Arbitrary code execution is possible on the machine running this tool.
This tool is not recommended to be used in a production setting, and would
require heavy sandboxing or virtual machines
Args:
df (pl.DataFrame): Polars dataframe to use.
instruction_str (Optional[str]): Instruction string to use.
instruction_parser (Optional[PolarsInstructionParser]): The output parser
that takes the polars query output string and returns a string.
It defaults to PolarsInstructionParser and takes polars DataFrame,
and any output kwargs as parameters.
polars_prompt (Optional[BasePromptTemplate]): Polars prompt to use.
output_kwargs (dict): Additional output processor kwargs for the
PolarsInstructionParser.
head (int): Number of rows to show in the table context.
verbose (bool): Whether to print verbose output.
llm (Optional[LLM]): Language model to use.
synthesize_response (bool): Whether to synthesize a response from the
query results. Defaults to False.
response_synthesis_prompt (Optional[BasePromptTemplate]): A
Response Synthesis BasePromptTemplate to use for the query. Defaults to
DEFAULT_RESPONSE_SYNTHESIS_PROMPT.
Examples:
`pip install llama-index-experimental polars`
```python
import polars as pl
from llama_index.experimental.query_engine.polars import PolarsQueryEngine
df = pl.DataFrame(
{
"city": ["Toronto", "Tokyo", "Berlin"],
"population": [2930000, 13960000, 3645000]
}
)
query_engine = PolarsQueryEngine(df=df, verbose=True)
response = query_engine.query("What is the population of Tokyo?")
```
"""
def __init__(
self,
df: pl.DataFrame,
instruction_str: Optional[str] = None,
instruction_parser: Optional[PolarsInstructionParser] = None,
polars_prompt: Optional[BasePromptTemplate] = None,
output_kwargs: Optional[dict] = None,
head: int = 5,
verbose: bool = False,
llm: Optional[LLM] = None,
synthesize_response: bool = False,
response_synthesis_prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
self._df = df
self._head = head
self._polars_prompt = polars_prompt or DEFAULT_POLARS_PROMPT
self._instruction_str = instruction_str or DEFAULT_INSTRUCTION_STR
self._instruction_parser = instruction_parser or PolarsInstructionParser(
df, output_kwargs or {}
)
self._verbose = verbose
self._llm = llm or Settings.llm
self._synthesize_response = synthesize_response
self._response_synthesis_prompt = (
response_synthesis_prompt or DEFAULT_RESPONSE_SYNTHESIS_PROMPT
)
super().__init__(callback_manager=Settings.callback_manager)
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt sub-modules."""
return {}
def _get_prompts(self) -> Dict[str, Any]:
"""Get prompts."""
return {
"polars_prompt": self._polars_prompt,
"response_synthesis_prompt": self._response_synthesis_prompt,
}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
if "polars_prompt" in prompts:
self._polars_prompt = prompts["polars_prompt"]
if "response_synthesis_prompt" in prompts:
self._response_synthesis_prompt = prompts["response_synthesis_prompt"]
def _get_table_context(self) -> str:
"""Get table context."""
return str(self._df.head(self._head))
def _query(self, query_bundle: QueryBundle) -> Response:
"""Answer a query."""
context = self._get_table_context()
polars_response_str = self._llm.predict(
self._polars_prompt,
df_str=context,
query_str=query_bundle.query_str,
instruction_str=self._instruction_str,
)
if self._verbose:
print_text(f"> Polars Instructions:\n```\n{polars_response_str}\n```\n")
polars_output = self._instruction_parser.parse(polars_response_str)
if self._verbose:
print_text(f"> Polars Output: {polars_output}\n")
response_metadata = {
"polars_instruction_str": polars_response_str,
"raw_polars_output": polars_output,
}
if self._synthesize_response:
response_str = str(
self._llm.predict(
self._response_synthesis_prompt,
query_str=query_bundle.query_str,
polars_instructions=polars_response_str,
polars_output=polars_output,
)
)
else:
response_str = str(polars_output)
return Response(response=response_str, metadata=response_metadata)
async def _aquery(self, query_bundle: QueryBundle) -> Response:
"""Answer a query asynchronously."""
context = self._get_table_context()
polars_response_str = await self._llm.apredict(
self._polars_prompt,
df_str=context,
query_str=query_bundle.query_str,
instruction_str=self._instruction_str,
)
if self._verbose:
print_text(f"> Polars Instructions:\n```\n{polars_response_str}\n```\n")
polars_output = self._instruction_parser.parse(polars_response_str)
if self._verbose:
print_text(f"> Polars Output: {polars_output}\n")
response_metadata = {
"polars_instruction_str": polars_response_str,
"raw_polars_output": polars_output,
}
if self._synthesize_response:
response_str = str(
await self._llm.apredict(
self._response_synthesis_prompt,
query_str=query_bundle.query_str,
polars_instructions=polars_response_str,
polars_output=polars_output,
)
)
else:
response_str = str(polars_output)
return Response(response=response_str, metadata=response_metadata)
# legacy
NLPolarsQueryEngine = PolarsQueryEngine
GPTNLPolarsQueryEngine = PolarsQueryEngine
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-experimental/llama_index/experimental/query_engine/polars/polars_query_engine.py",
"license": "MIT License",
"lines": 192,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-experimental/llama_index/experimental/query_engine/polars/prompts.py | from llama_index.core.prompts import PromptTemplate, PromptType
############################################
# Polars
############################################
DEFAULT_POLARS_TMPL = (
"You are working with a polars dataframe in Python.\n"
"The name of the dataframe is `df`.\n"
"This is the result of `print(df.head())`:\n"
"{df_str}\n\n"
"Follow these instructions:\n"
"{instruction_str}\n"
"Query: {query_str}\n\n"
"CRITICAL Polars syntax rules - follow these exactly:\n"
"- ALWAYS start with 'df.' - NEVER start with pl.col() alone\n"
"- pl.col() creates expressions that must be used INSIDE DataFrame methods\n"
"- pl.col() expressions do NOT have .to_frame() method - this does not exist!\n"
"- For selecting: df.select([pl.col('col1'), pl.col('col2')])\n"
"- For filtering: df.filter(pl.col('col1') > 10)\n"
"- For sorting: df.sort('column', descending=True) or df.sort(pl.col('column'), descending=True)\n"
"- For grouping: df.group_by('col1').agg([pl.col('col2').sum()]) NOT .groupby()\n"
"- For limiting: df.limit(5) or df.head(5)\n"
"- For aliasing: pl.col('old_name').alias('new_name') (only inside select/agg)\n"
"- WRONG: pl.col('company').to_frame() - this does NOT exist\n"
"- CORRECT: df.select([pl.col('company'), pl.col('revenue')])\n"
"- Always use complete, executable expressions that return a dataframe\n"
"- Do NOT assign to variables - return the final dataframe expression\n"
"- Keep expressions simple - avoid complex multi-line chaining\n"
"- Example: df.select([pl.col('company'), pl.col('revenue')]).sort('revenue', descending=True).limit(5)\n\n"
"Generate a single Polars expression that answers the query. Return ONLY the code, no explanations:\n"
)
DEFAULT_POLARS_PROMPT = PromptTemplate(
DEFAULT_POLARS_TMPL,
prompt_type=PromptType.PANDAS, # Reusing PANDAS type as there's no POLARS type
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-experimental/llama_index/experimental/query_engine/polars/prompts.py",
"license": "MIT License",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-experimental/tests/test_polars.py | """Test polars index."""
import sys
from pathlib import Path
from typing import Any, Dict, cast
import polars as pl
import pytest
from llama_index.core.base.response.schema import Response
from llama_index.core.indices.query.schema import QueryBundle
from llama_index.core.llms.mock import MockLLM
from llama_index.experimental.query_engine.polars.prompts import DEFAULT_POLARS_PROMPT
from llama_index.experimental.query_engine.polars.output_parser import (
PolarsInstructionParser,
)
from llama_index.experimental.query_engine.polars.polars_query_engine import (
PolarsQueryEngine,
)
def _mock_predict(*args: Any, **kwargs: Any) -> str:
"""Mock predict."""
query_str = kwargs["query_str"]
# Return Polars-style syntax for the mock
return f'df.select(pl.col("{query_str}"))'
def test_polars_query_engine(monkeypatch: pytest.MonkeyPatch) -> None:
"""Test polars query engine."""
monkeypatch.setattr(MockLLM, "predict", _mock_predict)
llm = MockLLM()
# Test on some sample data
df = pl.DataFrame(
{
"city": ["Toronto", "Tokyo", "Berlin"],
"population": [2930000, 13960000, 3645000],
"description": [
"""Toronto, Canada's largest city, is a vibrant and diverse metropolis situated in the province of Ontario.
Known for its iconic skyline featuring the CN Tower, Toronto is a cultural melting pot with a rich blend of communities, languages, and cuisines.
It boasts a thriving arts scene, world-class museums, and a strong economic hub.
Visitors can explore historic neighborhoods, such as Kensington Market and Distillery District, or enjoy beautiful natural surroundings on Toronto Islands.
With its welcoming atmosphere, top-notch education, and multicultural charm, Toronto is a global destination for both tourists and professionals alike.""",
"A city",
"Another City",
],
}
)
# the mock prompt just takes the all items in the given column
query_engine = PolarsQueryEngine(df, llm=llm, verbose=True)
response = query_engine.query(QueryBundle("population"))
assert isinstance(response, Response)
if sys.version_info < (3, 9):
assert str(response) == 'df.select(pl.col("population"))'
else:
expected_output = str(df.select(pl.col("population")))
assert str(response) == expected_output
metadata = cast(Dict[str, Any], response.metadata)
assert metadata["polars_instruction_str"] == 'df.select(pl.col("population"))'
query_engine = PolarsQueryEngine(
df,
llm=llm,
verbose=True,
output_kwargs={"max_rows": 10},
)
response = query_engine.query(QueryBundle("description"))
if sys.version_info < (3, 9):
assert str(response) == 'df.select(pl.col("description"))'
else:
expected_output = str(df.select(pl.col("description")))
assert str(response) == expected_output
# test get prompts
prompts = query_engine.get_prompts()
assert prompts["polars_prompt"] == DEFAULT_POLARS_PROMPT
def test_default_output_processor_rce(tmp_path: Path) -> None:
"""
Test that output processor prevents RCE.
https://github.com/run-llama/llama_index/issues/7054 .
"""
df = pl.DataFrame(
{
"city": ["Toronto", "Tokyo", "Berlin"],
"population": [2930000, 13960000, 3645000],
}
)
tmp_file = tmp_path / "pwnnnnn"
injected_code = f"__import__('os').system('touch {tmp_file}')"
parser = PolarsInstructionParser(df=df)
parser.parse(injected_code)
assert not tmp_file.is_file(), "file has been created via RCE!"
@pytest.mark.skipif(sys.version_info < (3, 9), reason="Requires Python 3.9 or higher")
def test_default_output_processor_rce2() -> None:
"""
Test that output processor prevents RCE.
https://github.com/run-llama/llama_index/issues/7054#issuecomment-1829141330 .
"""
df = pl.DataFrame(
{
"city": ["Toronto", "Tokyo", "Berlin"],
"population": [2930000, 13960000, 3645000],
}
)
# Test various RCE attempts
parser = PolarsInstructionParser(df=df)
# Test malicious code injection attempts
malicious_codes = [
"__import__('subprocess').call(['echo', 'pwned'])",
"exec('import os; os.system(\"echo pwned\")')",
'eval(\'__import__("os").system("echo pwned")\')',
"open('/etc/passwd').read()",
"__builtins__.__dict__['eval']('print(\"pwned\")')",
]
for malicious_code in malicious_codes:
try:
result = parser.parse(malicious_code)
# The result should contain an error message about forbidden access
assert "error" in str(result).lower() or "forbidden" in str(result).lower()
except Exception:
# Any exception is fine as it means the code was blocked
pass
@pytest.mark.skipif(sys.version_info < (3, 9), reason="Requires Python 3.9 or higher")
def test_default_output_processor_e2e(tmp_path: Path) -> None:
"""Test end-to-end functionality with real Polars operations."""
df = pl.DataFrame(
{
"city": ["Toronto", "Tokyo", "Berlin"],
"population": [2930000, 13960000, 3645000],
"country": ["Canada", "Japan", "Germany"],
}
)
parser = PolarsInstructionParser(df=df)
# Test valid Polars operations
valid_operations = [
"df.select(pl.col('city'))",
"df.filter(pl.col('population') > 5000000)",
"df.with_columns(pl.col('population').alias('pop'))",
"df.group_by('country').agg(pl.col('population').sum())",
"df.head(2)",
"df.select([pl.col('city'), pl.col('population')])",
]
for operation in valid_operations:
try:
result = parser.parse(operation)
# Should not contain error messages
assert "error" not in str(result).lower()
except Exception as e:
# If there's an exception, it should be a valid execution error, not security-related
assert "forbidden" not in str(e).lower()
assert "private" not in str(e).lower()
def test_polars_query_engine_complex_operations() -> None:
"""Test PolarsQueryEngine with more complex operations."""
df = pl.DataFrame(
{
"name": ["Alice", "Bob", "Charlie", "Diana"],
"age": [25, 30, 35, 28],
"salary": [50000, 60000, 70000, 55000],
"department": ["Engineering", "Sales", "Engineering", "Sales"],
}
)
# Mock LLM that returns complex Polars operations
class ComplexMockLLM(MockLLM):
def predict(self, *args, **kwargs):
query_str = kwargs.get("query_str", "")
if "average salary" in query_str.lower():
return "df.select(pl.col('salary').mean())"
elif "engineering" in query_str.lower():
return "df.filter(pl.col('department') == 'Engineering')"
elif "group by" in query_str.lower():
return "df.group_by('department').agg(pl.col('salary').mean())"
else:
return "df.head()"
llm = ComplexMockLLM()
query_engine = PolarsQueryEngine(df, llm=llm, verbose=True)
# Test average salary query
response = query_engine.query(QueryBundle("What is the average salary?"))
if sys.version_info >= (3, 9):
expected = str(df.select(pl.col("salary").mean()))
assert str(response) == expected
# Test filtering query
response = query_engine.query(QueryBundle("Show engineering employees"))
if sys.version_info >= (3, 9):
expected = str(df.filter(pl.col("department") == "Engineering"))
assert str(response) == expected
# Test groupby query
response = query_engine.query(QueryBundle("Group by department"))
if sys.version_info >= (3, 9):
expected = str(df.group_by("department").agg(pl.col("salary").mean()))
assert str(response) == expected
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-experimental/tests/test_polars.py",
"license": "MIT License",
"lines": 181,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-anthropic/tests/test_anthropic_utils.py | import pytest
from llama_index.llms.anthropic.utils import (
is_anthropic_prompt_caching_supported_model,
ANTHROPIC_PROMPT_CACHING_SUPPORTED_MODELS,
update_tool_calls,
is_anthropic_structured_output_supported,
STRUCTURED_OUTPUT_SUPPORT,
messages_to_anthropic_beta_messages,
)
from llama_index.core.base.llms.types import (
ToolCallBlock,
TextBlock,
ChatMessage,
ThinkingBlock,
ImageBlock,
DocumentBlock,
MessageRole,
)
from anthropic.types.beta import (
BetaTextBlockParam,
BetaThinkingBlockParam,
BetaToolUseBlockParam,
BetaToolResultBlockParam,
)
class TestAnthropicPromptCachingSupport:
"""Test suite for Anthropic prompt caching model validation."""
def test_claude_4_5_opus_supported(self):
"""Test Claude 4.5 Opus models support prompt caching."""
assert is_anthropic_prompt_caching_supported_model("claude-opus-4-5-20251101")
assert is_anthropic_prompt_caching_supported_model("claude-opus-4-5")
def test_claude_4_1_opus_supported(self):
"""Test Claude 4.1 Opus models support prompt caching."""
assert is_anthropic_prompt_caching_supported_model("claude-opus-4-1-20250805")
assert is_anthropic_prompt_caching_supported_model("claude-opus-4-1")
def test_claude_4_opus_supported(self):
"""Test Claude 4 Opus models support prompt caching."""
assert is_anthropic_prompt_caching_supported_model("claude-opus-4-20250514")
assert is_anthropic_prompt_caching_supported_model("claude-opus-4-0")
assert is_anthropic_prompt_caching_supported_model("claude-4-opus-20250514")
def test_claude_4_5_sonnet_supported(self):
"""Test Claude 4.5 Sonnet models support prompt caching."""
assert is_anthropic_prompt_caching_supported_model("claude-sonnet-4-5-20250929")
assert is_anthropic_prompt_caching_supported_model("claude-sonnet-4-5")
def test_claude_4_sonnet_supported(self):
"""Test Claude 4 Sonnet models support prompt caching."""
assert is_anthropic_prompt_caching_supported_model("claude-sonnet-4-20250514")
assert is_anthropic_prompt_caching_supported_model("claude-sonnet-4-0")
assert is_anthropic_prompt_caching_supported_model("claude-4-sonnet-20250514")
def test_claude_3_7_sonnet_supported(self):
"""Test Claude 3.7 Sonnet models support prompt caching."""
assert is_anthropic_prompt_caching_supported_model("claude-3-7-sonnet-20250219")
assert is_anthropic_prompt_caching_supported_model("claude-3-7-sonnet-latest")
def test_claude_3_5_sonnet_supported(self):
"""Test Claude 3.5 Sonnet models support prompt caching."""
assert is_anthropic_prompt_caching_supported_model("claude-3-5-sonnet-20241022")
assert is_anthropic_prompt_caching_supported_model("claude-3-5-sonnet-20240620")
assert is_anthropic_prompt_caching_supported_model("claude-3-5-sonnet-latest")
def test_claude_4_5_haiku_supported(self):
"""Test Claude 4.5 Haiku models support prompt caching."""
assert is_anthropic_prompt_caching_supported_model("claude-haiku-4-5-20251001")
assert is_anthropic_prompt_caching_supported_model("claude-haiku-4-5")
def test_claude_3_5_haiku_supported(self):
"""Test Claude 3.5 Haiku models support prompt caching."""
assert is_anthropic_prompt_caching_supported_model("claude-3-5-haiku-20241022")
assert is_anthropic_prompt_caching_supported_model("claude-3-5-haiku-latest")
def test_claude_3_haiku_supported(self):
"""Test Claude 3 Haiku models support prompt caching."""
assert is_anthropic_prompt_caching_supported_model("claude-3-haiku-20240307")
assert is_anthropic_prompt_caching_supported_model("claude-3-haiku-latest")
def test_claude_3_opus_deprecated_but_supported(self):
"""Test deprecated Claude 3 Opus models still support prompt caching."""
assert is_anthropic_prompt_caching_supported_model("claude-3-opus-20240229")
assert is_anthropic_prompt_caching_supported_model("claude-3-opus-latest")
def test_claude_2_not_supported(self):
"""Test Claude 2.x models do not support prompt caching."""
assert not is_anthropic_prompt_caching_supported_model("claude-2")
assert not is_anthropic_prompt_caching_supported_model("claude-2.0")
assert not is_anthropic_prompt_caching_supported_model("claude-2.1")
def test_claude_instant_not_supported(self):
"""Test Claude Instant models do not support prompt caching."""
assert not is_anthropic_prompt_caching_supported_model("claude-instant-1")
assert not is_anthropic_prompt_caching_supported_model("claude-instant-1.2")
def test_invalid_model_not_supported(self):
"""Test invalid or unknown model names return False."""
assert not is_anthropic_prompt_caching_supported_model("invalid-model")
assert not is_anthropic_prompt_caching_supported_model("")
assert not is_anthropic_prompt_caching_supported_model("gpt-4")
assert not is_anthropic_prompt_caching_supported_model("claude-nonexistent")
def test_constant_contains_all_supported_models(self):
"""Test that the constant tuple contains expected model patterns."""
assert len(ANTHROPIC_PROMPT_CACHING_SUPPORTED_MODELS) > 0
expected_patterns = [
"claude-opus-4-5",
"claude-opus-4-1",
"claude-opus-4-0",
"claude-sonnet-4-5",
"claude-sonnet-4-0",
"claude-3-7-sonnet",
"claude-3-5-sonnet",
"claude-haiku-4-5",
"claude-3-5-haiku",
"claude-3-haiku",
"claude-3-opus",
]
for pattern in expected_patterns:
has_pattern = any(
pattern in model for model in ANTHROPIC_PROMPT_CACHING_SUPPORTED_MODELS
)
assert has_pattern, (
f"Expected pattern '{pattern}' not found in supported models"
)
def test_case_sensitivity(self):
"""Test that model name matching is case-sensitive."""
assert is_anthropic_prompt_caching_supported_model("claude-sonnet-4-5-20250929")
assert not is_anthropic_prompt_caching_supported_model(
"Claude-Sonnet-4-5-20250929"
)
assert not is_anthropic_prompt_caching_supported_model(
"CLAUDE-SONNET-4-5-20250929"
)
def test_update_tool_calls() -> None:
blocks = [TextBlock(text="hello world")]
update_tool_calls(
blocks, ToolCallBlock(tool_call_id="1", tool_name="hello", tool_kwargs={})
) # type: ignore
assert len(blocks) == 2
assert isinstance(blocks[1], ToolCallBlock)
assert blocks[1].tool_call_id == "1"
assert blocks[1].tool_name == "hello"
assert blocks[1].tool_kwargs == {}
update_tool_calls(
blocks,
ToolCallBlock(
tool_call_id="1", tool_name="hello", tool_kwargs={"name": "John"}
),
) # type: ignore
assert len(blocks) == 2
assert isinstance(blocks[1], ToolCallBlock)
assert blocks[1].tool_call_id == "1"
assert blocks[1].tool_name == "hello"
assert blocks[1].tool_kwargs == {"name": "John"}
update_tool_calls(
blocks, ToolCallBlock(tool_call_id="2", tool_name="hello", tool_kwargs={})
) # type: ignore
assert len(blocks) == 3
assert isinstance(blocks[2], ToolCallBlock)
assert blocks[2].tool_call_id == "2"
assert blocks[2].tool_name == "hello"
assert blocks[2].tool_kwargs == {}
def test_messages_to_anthropic_beta_messages() -> None:
messages_legit = [
ChatMessage(
role="system",
blocks=[
TextBlock(text="this is a"),
TextBlock(text="system message"),
ThinkingBlock(
content="this will be ignored when building the system prompt",
additional_information={"signature": "ignored"},
),
],
),
ChatMessage(
role="user",
blocks=[
TextBlock(text="this is a test: say hello to World."),
],
),
ChatMessage(
blocks=[
ThinkingBlock(
content="user asks me to say hello to World",
additional_information={"signature": "hello"},
),
ToolCallBlock(
tool_name="say_hello",
tool_kwargs={"name": "World"},
tool_call_id="1",
),
],
role="assistant",
),
ChatMessage(
blocks=[TextBlock(text="Hello World!")],
role=MessageRole.TOOL.value,
additional_kwargs={"tool_call_id": "1"},
),
]
ant_messages, system = messages_to_anthropic_beta_messages(messages_legit)
assert (
len(ant_messages) == len(messages_legit) - 1
) # system message is not captured
assert system == "this is a\nsystem message"
assert ant_messages[0]["role"] == "user"
assert ant_messages[0]["content"] == [
BetaTextBlockParam(type="text", text="this is a test: say hello to World.")
]
assert ant_messages[1]["role"] == "assistant"
assert ant_messages[1]["content"] == [
BetaThinkingBlockParam(
type="thinking",
thinking="user asks me to say hello to World",
signature="hello",
),
BetaToolUseBlockParam(
name="say_hello", input={"name": "World"}, id="1", type="tool_use"
),
]
assert ant_messages[2]["role"] == "user"
assert ant_messages[2]["content"] == [
BetaToolResultBlockParam(
tool_use_id="1",
content=[BetaTextBlockParam(type="text", text="Hello World!")],
type="tool_result",
)
]
messages_image = [
ChatMessage(
role="user",
blocks=[
ImageBlock(
block_type="image", image=b"helloworld", image_mimetype="image/png"
),
],
),
]
ant_messages, system = messages_to_anthropic_beta_messages(messages_image)
assert not system
assert len(ant_messages) == len(messages_image)
assert ant_messages[0]["role"] == "user"
assert isinstance(ant_messages[0]["content"], list)
assert isinstance(ant_messages[0]["content"][0], dict)
assert ant_messages[0]["content"][0]["type"] == "image"
assert ant_messages[0]["content"][0]["source"]["type"] == "base64"
assert ant_messages[0]["content"][0]["source"]["media_type"] == "image/png"
messages_image_unsupp = [
ChatMessage(
role="user",
blocks=[
ImageBlock(
block_type="image", image=b"helloworld", image_mimetype="image/tiff"
),
],
),
]
with pytest.raises(ValueError, match="Image mimetype image/tiff not supported"):
messages_to_anthropic_beta_messages(messages_image_unsupp)
messages_document_unsupp = [
ChatMessage(
role="user",
blocks=[
DocumentBlock(data=b"document", document_mimetype="application/pdf")
],
)
]
with pytest.raises(ValueError, match=f"Block type not supported: document"):
messages_to_anthropic_beta_messages(messages_document_unsupp)
def test_is_anthropic_structured_output_supported() -> None:
for model in STRUCTURED_OUTPUT_SUPPORT:
assert is_anthropic_structured_output_supported(model)
assert not is_anthropic_structured_output_supported("claude-sonnet-4-0")
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-anthropic/tests/test_anthropic_utils.py",
"license": "MIT License",
"lines": 261,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-core/tests/node_parser/test_duplicate_text_positions.py | """Test that node parsers assign unique positions to duplicate text."""
from llama_index.core.node_parser import MarkdownNodeParser, SentenceSplitter
from llama_index.core.schema import Document, TextNode, MetadataMode
def _validate_nodes(nodes: list[TextNode], doc: Document) -> None:
"""Validate node positions: no duplicates, valid bounds, text matches."""
position_map = {}
prev_start = -1
for i, node in enumerate(nodes):
if not isinstance(node, TextNode):
continue
start, end = node.start_char_idx, node.end_char_idx
text = node.get_content(metadata_mode=MetadataMode.NONE)
# Valid bounds and no negative lengths
assert start is not None and end is not None
assert 0 <= start <= end <= len(doc.text)
# Text matches document
assert doc.text[start:end] == text, f"Node {i} text mismatch at [{start}:{end}]"
# No duplicate positions
key = (start, end, text)
assert key not in position_map, (
f"Nodes {position_map[key]} and {i} have duplicate position [{start}:{end}]"
)
position_map[key] = i
# Sequential ordering
assert start >= prev_start, f"Node {i} out of order"
prev_start = start
def test_markdown_with_duplicate_headers():
"""Test MarkdownNodeParser with repeated section headers."""
doc = Document(
text="""# Title
## Introduction
Content A.
## Methods
Content B.
## Introduction
Content C.
## Introduction
Content D.
""",
doc_id="test",
)
nodes = MarkdownNodeParser().get_nodes_from_documents([doc])
_validate_nodes(nodes, doc)
def test_sentence_splitter_with_duplicates():
"""Test SentenceSplitter with repeated sentences."""
doc = Document(
text=(
"This is important. Other text. This is important. "
"More text. This is important. Final text."
),
doc_id="test",
)
parser = SentenceSplitter(chunk_size=50, chunk_overlap=0, include_metadata=False)
nodes = parser.get_nodes_from_documents([doc])
_validate_nodes(nodes, doc)
def test_multiple_documents():
"""Test position tracking is independent per document."""
text = "## Section A\nContent.\n\n## Section A\nMore content."
docs = [Document(text=text, doc_id=f"doc{i}") for i in range(2)]
parser = MarkdownNodeParser()
nodes = parser.get_nodes_from_documents(docs)
for doc in docs:
doc_nodes = [
n for n in nodes if isinstance(n, TextNode) and n.ref_doc_id == doc.doc_id
]
_validate_nodes(doc_nodes, doc)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/node_parser/test_duplicate_text_positions.py",
"license": "MIT License",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-signnow/examples/from_env.py | import asyncio
import os
from llama_index.tools.signnow import SignNowMCPToolSpec
from llama_index.core.agent.workflow import FunctionAgent
async def main():
# Pass SignNow credentials directly via env_overrides (no .env required)
spec = SignNowMCPToolSpec.from_env(
env_overrides={
# Option 1: token-based auth
# "SIGNNOW_TOKEN": "your_signnow_token_here",
# Option 2: credential-based auth
"SIGNNOW_USER_EMAIL": "login",
"SIGNNOW_PASSWORD": "password",
"SIGNNOW_API_BASIC_TOKEN": "basic_token",
}
)
# Fetch tools from MCP server
tools = await spec.to_tool_list_async()
print({"count": len(tools), "names": [t.metadata.name for t in tools]})
# Create an agent and ask for templates list
agent = FunctionAgent(
name="SignNow Agent",
description="Query SignNow via MCP tools",
tools=tools,
system_prompt="Be helpful.",
)
resp = await agent.run("Show me list of templates and their names")
print(resp)
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-signnow/examples/from_env.py",
"license": "MIT License",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-signnow/llama_index/tools/signnow/base.py | """SignNow MCP tool spec scaffold."""
import os
import shutil
from typing import Dict, Iterable, List, Mapping, Optional, Sequence, Tuple, Union, cast
from mcp.client.session import ClientSession
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.core.tools import FunctionTool
from llama_index.core.tools.types import ToolMetadata
from llama_index.tools.mcp.base import McpToolSpec
from llama_index.tools.mcp.client import BasicMCPClient
def _merge_env(overrides: Optional[Mapping[str, str]] = None) -> Dict[str, str]:
"""
Build environment for spawning the MCP server:
- Start from current process environment
- Overlay provided overrides (takes precedence)
"""
env = dict(os.environ)
if overrides:
env.update({k: v for k, v in overrides.items() if v is not None})
return env
EXPECTED_SIGNNOW_KEYS = {
# Auth
"SIGNNOW_TOKEN",
"SIGNNOW_USER_EMAIL",
"SIGNNOW_PASSWORD",
"SIGNNOW_API_BASIC_TOKEN",
# API endpoints (optional; defaults may be used)
"SIGNNOW_APP_BASE",
"SIGNNOW_API_BASE",
}
def _validate_auth(env: Mapping[str, str]) -> None:
"""Require either SIGNNOW_TOKEN or (SIGNNOW_USER_EMAIL + SIGNNOW_PASSWORD + SIGNNOW_API_BASIC_TOKEN)."""
have_token = bool(env.get("SIGNNOW_TOKEN"))
have_basic = all(
env.get(k)
for k in ("SIGNNOW_USER_EMAIL", "SIGNNOW_PASSWORD", "SIGNNOW_API_BASIC_TOKEN")
)
if not (have_token or have_basic):
raise ValueError(
"Provide SIGNNOW_TOKEN OR SIGNNOW_USER_EMAIL + SIGNNOW_PASSWORD + SIGNNOW_API_BASIC_TOKEN."
)
def _resolve_sn_mcp_bin(explicit: Optional[str], require_in_path: bool) -> str:
"""Resolve path to sn-mcp binary from explicit arg, SIGNNOW_MCP_BIN, or PATH."""
candidate = explicit or os.environ.get("SIGNNOW_MCP_BIN") or "sn-mcp"
path = shutil.which(candidate)
if path:
return path
if require_in_path:
raise FileNotFoundError(
"Cannot find 'sn-mcp' in PATH. Set SIGNNOW_MCP_BIN or install SignNow MCP server."
)
return candidate
class SignNowMCPToolSpec(BaseToolSpec):
"""
Thin wrapper over McpToolSpec:
- creates BasicMCPClient for STDIO spawn,
- dynamically pulls tools from SignNow MCP server,
- sugar factories: from_env.
See McpToolSpec.to_tool_list() / .to_tool_list_async() for getting FunctionTool.
"""
# Follow BaseToolSpec typing contract
spec_functions: List[Union[str, Tuple[str, str]]] = []
def __init__(
self,
client: ClientSession,
allowed_tools: Optional[List[str]] = None,
include_resources: bool = False,
) -> None:
self._mcp_spec = McpToolSpec(
client=client,
allowed_tools=allowed_tools,
include_resources=include_resources,
)
@classmethod
def from_env(
cls,
*,
allowed_tools: Optional[Iterable[str]] = None,
include_resources: bool = False,
env_overrides: Optional[Mapping[str, str]] = None,
bin: Optional[str] = None,
cmd: str = "serve",
args: Optional[Sequence[str]] = None,
require_in_path: bool = True,
) -> "SignNowMCPToolSpec":
"""
Spawn STDIO: 'sn-mcp serve' with provided environment overrides merged
on top of the current process environment.
Supported variables (see server README):
SIGNNOW_TOKEN (token-based auth)
OR
SIGNNOW_USER_EMAIL, SIGNNOW_PASSWORD, SIGNNOW_API_BASIC_TOKEN (credential-based auth)
SIGNNOW_APP_BASE, SIGNNOW_API_BASE (optional, defaults can be used)
Parameters
----------
- bin: binary/command to spawn (default None → uses SIGNNOW_MCP_BIN or 'sn-mcp')
- cmd: subcommand (default 'serve')
- args: additional arguments for the server
- require_in_path: validate presence of binary in PATH if not absolute
"""
# Build env and filter to expected keys
env_all = _merge_env(env_overrides)
filtered = {k: v for k, v in env_all.items() if k in EXPECTED_SIGNNOW_KEYS}
_validate_auth(filtered)
# Resolve binary to absolute if possible
resolved_bin = _resolve_sn_mcp_bin(bin, require_in_path=require_in_path)
cmd_args: List[str] = [cmd]
if args:
cmd_args.extend(args)
client = BasicMCPClient(resolved_bin, args=cmd_args, env=filtered)
return cls(
client=client,
allowed_tools=list(allowed_tools) if allowed_tools else None,
include_resources=include_resources,
)
async def to_tool_list_async(self) -> List[FunctionTool]:
"""Delegate to underlying `McpToolSpec` with error handling."""
result = await self._mcp_spec.to_tool_list_async()
return cast(List[FunctionTool], result)
def to_tool_list(
self,
spec_functions: Optional[List[Union[str, Tuple[str, str]]]] = None,
func_to_metadata_mapping: Optional[Dict[str, ToolMetadata]] = None,
) -> List[FunctionTool]:
"""Delegate to underlying `McpToolSpec` (sync) with error handling."""
# We discover tools dynamically via MCP; provided parameters are ignored.
result = self._mcp_spec.to_tool_list()
return cast(List[FunctionTool], result)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-signnow/llama_index/tools/signnow/base.py",
"license": "MIT License",
"lines": 128,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-signnow/tests/test_delegation.py | from unittest.mock import MagicMock
from typing import List
import pytest
from llama_index.core.tools.function_tool import FunctionTool
from llama_index.tools.signnow.base import SignNowMCPToolSpec
def test_to_tool_list_delegates() -> None:
spec = SignNowMCPToolSpec.__new__(SignNowMCPToolSpec)
spec._mcp_spec = MagicMock()
# Stub to_tool_list to return correct type
dummy_tool = FunctionTool.from_defaults(fn=lambda: None, name="ok")
spec._mcp_spec.to_tool_list.return_value = [dummy_tool]
result = spec.to_tool_list()
assert isinstance(result, list)
assert all(isinstance(t, FunctionTool) for t in result)
@pytest.mark.asyncio
async def test_to_tool_list_async_delegates() -> None:
spec = SignNowMCPToolSpec.__new__(SignNowMCPToolSpec)
spec._mcp_spec = MagicMock()
dummy_tool = FunctionTool.from_defaults(fn=lambda: None, name="ok")
async def _async_return() -> List[FunctionTool]:
return [dummy_tool]
spec._mcp_spec.to_tool_list_async = _async_return
result = await spec.to_tool_list_async()
assert isinstance(result, list)
assert all(isinstance(t, FunctionTool) for t in result)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-signnow/tests/test_delegation.py",
"license": "MIT License",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-signnow/tests/test_env_and_bin.py | import os
import pytest
from unittest.mock import patch, MagicMock
from typing import Mapping, cast
from llama_index.tools.signnow.base import SignNowMCPToolSpec, EXPECTED_SIGNNOW_KEYS
def test_from_env_requires_auth() -> None:
with pytest.raises(ValueError):
SignNowMCPToolSpec.from_env(env_overrides={}, require_in_path=False)
@patch("shutil.which", return_value=None)
def test_bin_not_found(mock_which: MagicMock) -> None:
with pytest.raises(FileNotFoundError):
SignNowMCPToolSpec.from_env(
env_overrides={"SIGNNOW_TOKEN": "tok"}, require_in_path=True
)
@patch("shutil.which", return_value="/usr/local/bin/sn-mcp")
@patch("llama_index.tools.signnow.base.BasicMCPClient")
def test_auth_token_and_env_filtering(
mock_client: MagicMock, mock_which: MagicMock
) -> None:
spec = SignNowMCPToolSpec.from_env(
env_overrides=cast(
Mapping[str, str],
{
"SIGNNOW_TOKEN": "dummy",
"IRRELEVANT": "skip",
"SIGNNOW_API_BASE": None, # should be filtered out
},
),
require_in_path=True,
)
# Ensure client constructed with only expected env keys
called_env = mock_client.call_args.kwargs["env"]
assert set(called_env.keys()).issubset(EXPECTED_SIGNNOW_KEYS)
assert "SIGNNOW_TOKEN" in called_env and "IRRELEVANT" not in called_env
# Spec should be created successfully
assert isinstance(spec, SignNowMCPToolSpec)
@patch("shutil.which", return_value="/usr/local/bin/sn-mcp")
@patch("llama_index.tools.signnow.base.BasicMCPClient")
def test_auth_basic_credentials(mock_client: MagicMock, mock_which: MagicMock) -> None:
SignNowMCPToolSpec.from_env(
env_overrides={
"SIGNNOW_USER_EMAIL": "u@example.com",
"SIGNNOW_PASSWORD": "pass",
"SIGNNOW_API_BASIC_TOKEN": "basic",
},
require_in_path=True,
)
called_env = mock_client.call_args.kwargs["env"]
assert called_env.get("SIGNNOW_USER_EMAIL") == "u@example.com"
assert called_env.get("SIGNNOW_PASSWORD") == "pass"
assert called_env.get("SIGNNOW_API_BASIC_TOKEN") == "basic"
@patch("shutil.which", return_value="/usr/local/bin/sn-mcp")
@patch("llama_index.tools.signnow.base.BasicMCPClient")
def test_auth_both_token_and_basic(
mock_client: MagicMock, mock_which: MagicMock
) -> None:
SignNowMCPToolSpec.from_env(
env_overrides={
"SIGNNOW_TOKEN": "tok",
"SIGNNOW_USER_EMAIL": "u@example.com",
"SIGNNOW_PASSWORD": "pass",
"SIGNNOW_API_BASIC_TOKEN": "basic",
},
require_in_path=True,
)
called_env = mock_client.call_args.kwargs["env"]
# Both sets are allowed; presence is sufficient
assert called_env.get("SIGNNOW_TOKEN") == "tok"
assert called_env.get("SIGNNOW_USER_EMAIL") == "u@example.com"
def test_bin_resolution_with_env_var() -> None:
with patch.dict(os.environ, {"SIGNNOW_MCP_BIN": "custom-bin"}, clear=False):
with patch("shutil.which", return_value="/opt/custom-bin") as mock_which:
with patch("llama_index.tools.signnow.base.BasicMCPClient") as mock_client:
SignNowMCPToolSpec.from_env(
env_overrides={"SIGNNOW_TOKEN": "tok"}, require_in_path=True
)
# Ensure resolver tried env var and used absolute path
mock_which.assert_called_with("custom-bin")
assert mock_client.call_args.args[0] == "/opt/custom-bin"
@patch("llama_index.tools.signnow.base.BasicMCPClient")
def test_bin_resolution_with_explicit_param(mock_client: MagicMock) -> None:
with patch("shutil.which", return_value="/bin/local-sn") as mock_which:
SignNowMCPToolSpec.from_env(
env_overrides={"SIGNNOW_TOKEN": "tok"}, bin="local-sn", require_in_path=True
)
mock_which.assert_called_with("local-sn")
assert mock_client.call_args.args[0] == "/bin/local-sn"
@patch("llama_index.tools.signnow.base.BasicMCPClient")
def test_bin_candidate_when_not_required(mock_client: MagicMock) -> None:
# PATH resolution fails; should fall back to candidate without raising
with patch("shutil.which", return_value=None):
SignNowMCPToolSpec.from_env(
env_overrides={"SIGNNOW_TOKEN": "tok"}, require_in_path=False
)
# Default candidate is "sn-mcp"
assert mock_client.call_args.args[0] == "sn-mcp"
@patch("shutil.which", return_value="/usr/local/bin/sn-mcp")
@patch("llama_index.tools.signnow.base.BasicMCPClient")
def test_cmd_and_args_passed_to_client(
mock_client: MagicMock, mock_which: MagicMock
) -> None:
SignNowMCPToolSpec.from_env(
env_overrides={"SIGNNOW_TOKEN": "tok"},
cmd="serve",
args=["--flag", "v"],
require_in_path=True,
)
assert mock_client.call_args.kwargs["args"] == ["serve", "--flag", "v"]
@patch("shutil.which", return_value="/usr/local/bin/sn-mcp")
@patch("llama_index.tools.signnow.base.McpToolSpec")
@patch("llama_index.tools.signnow.base.BasicMCPClient")
def test_allowed_tools_and_include_resources_propagated(
mock_client: MagicMock, mock_mcp_spec: MagicMock, mock_which: MagicMock
) -> None:
SignNowMCPToolSpec.from_env(
env_overrides={"SIGNNOW_TOKEN": "tok"},
allowed_tools=["a", "b"],
include_resources=True,
require_in_path=True,
)
# McpToolSpec should be constructed with these values
kwargs = mock_mcp_spec.call_args.kwargs
assert kwargs["allowed_tools"] == ["a", "b"]
assert kwargs["include_resources"] is True
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-signnow/tests/test_env_and_bin.py",
"license": "MIT License",
"lines": 124,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-signnow/tests/test_tools_signnow.py | from unittest.mock import patch, MagicMock
from llama_index.tools.signnow.base import SignNowMCPToolSpec
def test_class() -> None:
names_of_base_classes = [b.__name__ for b in SignNowMCPToolSpec.__mro__]
assert "BaseToolSpec" in names_of_base_classes
@patch("shutil.which")
def test_from_env_returns_spec(mock_which: MagicMock) -> None:
mock_which.return_value = "/usr/local/bin/sn-mcp"
spec = SignNowMCPToolSpec.from_env(
env_overrides={
"SIGNNOW_TOKEN": "dummy",
"IRRELEVANT": "skip",
},
require_in_path=True,
)
assert isinstance(spec, SignNowMCPToolSpec)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-signnow/tests/test_tools_signnow.py",
"license": "MIT License",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-sglang/llama_index/llms/sglang/base.py | import json
from typing import Any, Callable, Dict, List, Optional, Sequence
from llama_index.core.base.llms.generic_utils import (
completion_response_to_chat_response,
)
from llama_index.core.base.llms.generic_utils import (
messages_to_prompt as generic_messages_to_prompt,
)
from llama_index.core.base.llms.generic_utils import (
stream_completion_response_to_chat_response,
)
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponse,
CompletionResponseAsyncGen,
CompletionResponseGen,
LLMMetadata,
)
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks import CallbackManager
from llama_index.core.llms.callbacks import llm_chat_callback, llm_completion_callback
from llama_index.core.llms.llm import LLM
from llama_index.core.types import BaseOutputParser, PydanticProgramMode
from llama_index.llms.sglang.utils import get_response, post_http_request
class SGLang(LLM):
r"""
SGLang LLM.
This class connects to an SGLang server for high-performance LLM inference.
Examples:
`pip install llama-index-llms-sglang`
```python
from llama_index.llms.sglang import SGLang
# specific functions to format for mistral instruct
def messages_to_prompt(messages):
prompt = "\n".join([str(x) for x in messages])
return f"<s>[INST] {prompt} [/INST] </s>\n"
def completion_to_prompt(completion):
return f"<s>[INST] {completion} [/INST] </s>\n"
llm = SGLang(
model="mistralai/Mistral-7B-Instruct-v0.1",
api_url="http://localhost:30000",
temperature=0.7,
max_new_tokens=256,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
)
response = llm.complete("What is a black hole?")
print(response)
```
"""
model: Optional[str] = Field(
default="default",
description="The model name (for metadata purposes).",
)
api_url: str = Field(
default="http://localhost:30000",
description="The API URL for the SGLang server.",
)
api_key: Optional[str] = Field(
default=None,
description="API key for authentication (if required by server).",
)
temperature: float = Field(
default=1.0,
description="The temperature to use for sampling.",
)
max_new_tokens: int = Field(
default=512,
description="Maximum number of tokens to generate per output sequence.",
)
top_p: float = Field(
default=1.0,
description="Float that controls the cumulative probability of the top tokens to consider.",
)
top_k: int = Field(
default=-1,
description="Integer that controls the number of top tokens to consider.",
)
frequency_penalty: float = Field(
default=0.0,
description="Float that penalizes new tokens based on their frequency in the generated text so far.",
)
presence_penalty: float = Field(
default=0.0,
description="Float that penalizes new tokens based on whether they appear in the generated text so far.",
)
stop: Optional[List[str]] = Field(
default=None,
description="List of strings that stop the generation when they are generated.",
)
n: int = Field(
default=1,
description="Number of output sequences to return for the given prompt.",
)
skip_special_tokens: bool = Field(
default=True,
description="Whether to skip special tokens in the output.",
)
regex: Optional[str] = Field(
default=None,
description="Optional regex pattern for constrained generation.",
)
is_chat_model: bool = Field(
default=False,
description=LLMMetadata.model_fields["is_chat_model"].description,
)
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict,
description="Additional keyword arguments for SGLang API.",
)
_client: Any = PrivateAttr()
def __init__(
self,
model: str = "default",
api_url: str = "http://localhost:30000",
api_key: Optional[str] = None,
temperature: float = 1.0,
max_new_tokens: int = 512,
top_p: float = 1.0,
top_k: int = -1,
frequency_penalty: float = 0.0,
presence_penalty: float = 0.0,
stop: Optional[List[str]] = None,
n: int = 1,
skip_special_tokens: bool = True,
regex: Optional[str] = None,
additional_kwargs: Dict[str, Any] = {},
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
is_chat_model: Optional[bool] = False,
) -> None:
messages_to_prompt = messages_to_prompt or generic_messages_to_prompt
completion_to_prompt = completion_to_prompt or (lambda x: x)
callback_manager = callback_manager or CallbackManager([])
super().__init__(
model=model,
api_url=api_url,
api_key=api_key,
temperature=temperature,
max_new_tokens=max_new_tokens,
top_p=top_p,
top_k=top_k,
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty,
stop=stop,
n=n,
skip_special_tokens=skip_special_tokens,
regex=regex,
additional_kwargs=additional_kwargs,
callback_manager=callback_manager,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
is_chat_model=is_chat_model,
)
self._client = None
@classmethod
def class_name(cls) -> str:
return "SGLang"
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
model_name=self.model,
is_chat_model=self.is_chat_model,
)
@property
def _model_kwargs(self) -> Dict[str, Any]:
base_kwargs = {
"temperature": self.temperature,
"max_new_tokens": self.max_new_tokens,
"top_p": self.top_p,
"top_k": self.top_k,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"stop": self.stop,
"n": self.n,
"skip_special_tokens": self.skip_special_tokens,
}
if self.regex:
base_kwargs["regex"] = self.regex
return {**base_kwargs, **self.additional_kwargs}
def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
return {
**self._model_kwargs,
**kwargs,
}
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
kwargs = kwargs if kwargs else {}
prompt = self.messages_to_prompt(messages)
completion_response = self.complete(prompt, **kwargs)
return completion_response_to_chat_response(completion_response)
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
kwargs = kwargs if kwargs else {}
params = {**self._model_kwargs, **kwargs}
# Build sampling parameters for SGLang
sampling_params = dict(**params)
# SGLang OpenAI-compatible API uses 'prompt' parameter
sampling_params["prompt"] = prompt
sampling_params["model"] = self.model
# Use OpenAI-compatible endpoint
endpoint = f"{self.api_url}/v1/completions"
response = post_http_request(
endpoint, sampling_params, stream=False, api_key=self.api_key
)
output = get_response(response)
return CompletionResponse(text=output[0])
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
prompt = self.messages_to_prompt(messages)
completion_response = self.stream_complete(prompt, **kwargs)
return stream_completion_response_to_chat_response(completion_response)
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
kwargs = kwargs if kwargs else {}
params = {**self._model_kwargs, **kwargs}
sampling_params = dict(**params)
sampling_params["text"] = prompt
# SGLang uses OpenAI-compatible API, so use /v1/completions for streaming
endpoint = f"{self.api_url}/v1/completions"
response = post_http_request(
endpoint, sampling_params, stream=True, api_key=self.api_key
)
def gen() -> CompletionResponseGen:
response_str = ""
for chunk in response.iter_lines(
chunk_size=8192, decode_unicode=False, delimiter=b"\n"
):
if chunk:
chunk_str = chunk.decode("utf-8")
# Handle SSE format
if chunk_str.startswith("data: "):
chunk_str = chunk_str[6:]
if chunk_str.strip() == "[DONE]":
break
try:
data = json.loads(chunk_str)
# OpenAI format has choices array
if "choices" in data and len(data["choices"]) > 0:
delta = data["choices"][0].get("text", "")
response_str += delta
yield CompletionResponse(text=response_str, delta=delta)
except json.JSONDecodeError:
continue
return gen()
@llm_chat_callback()
async def achat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponse:
kwargs = kwargs if kwargs else {}
return self.chat(messages, **kwargs)
@llm_completion_callback()
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
kwargs = kwargs if kwargs else {}
return self.complete(prompt, **kwargs)
@llm_chat_callback()
async def astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
async def gen() -> ChatResponseAsyncGen:
for message in self.stream_chat(messages, **kwargs):
yield message
return gen()
@llm_completion_callback()
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
async def gen() -> CompletionResponseAsyncGen:
for message in self.stream_complete(prompt, **kwargs):
yield message
return gen()
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-sglang/llama_index/llms/sglang/base.py",
"license": "MIT License",
"lines": 289,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-sglang/llama_index/llms/sglang/utils.py | import json
from typing import Iterable, List
import requests
def get_response(response: requests.Response) -> List[str]:
"""Extract text from SGLang API response."""
data = json.loads(response.content)
# Handle OpenAI-compatible format with choices array
if isinstance(data, dict) and "choices" in data:
choices = data["choices"]
if isinstance(choices, list) and len(choices) > 0:
# Handle both completion and chat completion formats
if "text" in choices[0]:
return [choice["text"] for choice in choices]
elif "message" in choices[0] and "content" in choices[0]["message"]:
return [choice["message"]["content"] for choice in choices]
# Fallback for native API format
if isinstance(data, dict) and "text" in data:
text = data["text"]
if isinstance(text, str):
return [text]
return text
return []
def post_http_request(
api_url: str, sampling_params: dict = {}, stream: bool = False, api_key: str = None
) -> requests.Response:
"""Post HTTP request to SGLang server."""
headers = {
"User-Agent": "LlamaIndex SGLang Client",
"Content-Type": "application/json",
}
# Add API key if provided
if api_key:
headers["Authorization"] = f"Bearer {api_key}"
sampling_params["stream"] = stream
return requests.post(
api_url,
headers=headers,
json=sampling_params,
stream=stream,
)
def get_streaming_response(response: requests.Response) -> Iterable[List[str]]:
"""Get streaming response from SGLang server."""
for chunk in response.iter_lines(
chunk_size=8192, decode_unicode=False, delimiter=b"\n"
):
if chunk:
chunk_str = chunk.decode("utf-8")
# Handle SSE format
if chunk_str.startswith("data: "):
chunk_str = chunk_str[6:]
if chunk_str.strip() == "[DONE]":
break
try:
data = json.loads(chunk_str)
if "text" in data:
text = data["text"]
if isinstance(text, str):
yield [text]
else:
yield text
except json.JSONDecodeError:
continue
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-sglang/llama_index/llms/sglang/utils.py",
"license": "MIT License",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-sglang/tests/test_llms_sglang.py | from llama_index.core.base.llms.base import BaseLLM
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.llms.sglang import SGLang
def test_llm_class():
names_of_base_classes = [b.__name__ for b in SGLang.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
from unittest.mock import Mock, patch
def test_initialization():
"""Test SGLang initialization."""
llm = SGLang(
model="test-model",
api_url="http://test:8000",
temperature=0.5,
max_new_tokens=100,
)
assert llm.model == "test-model"
assert llm.api_url == "http://test:8000"
assert llm.temperature == 0.5
assert llm.max_new_tokens == 100
def test_metadata():
"""Test metadata property."""
llm = SGLang(model="test-model")
metadata = llm.metadata
assert metadata.model_name == "test-model"
@patch("llama_index.llms.sglang.base.post_http_request")
@patch("llama_index.llms.sglang.base.get_response")
def test_complete(mock_get_response, mock_post_http_request):
"""Test complete method."""
mock_response = Mock()
mock_post_http_request.return_value = mock_response
mock_get_response.return_value = ["Test response."]
llm = SGLang(api_url="http://test:8000")
response = llm.complete("Test prompt")
assert response.text == "Test response."
mock_post_http_request.assert_called_once()
@patch("llama_index.llms.sglang.base.post_http_request")
@patch("llama_index.llms.sglang.base.get_response")
def test_chat(mock_get_response, mock_post_http_request):
"""Test chat method."""
mock_response = Mock()
mock_post_http_request.return_value = mock_response
mock_get_response.return_value = ["Chat response."]
llm = SGLang(api_url="http://test:8000")
messages = [ChatMessage(role=MessageRole.USER, content="Hello")]
response = llm.chat(messages)
assert response.message.content == "Chat response."
@patch("llama_index.llms.sglang.base.post_http_request")
def test_stream_complete(mock_post_http_request):
"""Test stream_complete method."""
mock_response = Mock()
chunks = [
b'data: {"choices": [{"text": "Hello"}]}\n',
b'data: {"choices": [{"text": " world"}]}\n',
b"data: [DONE]\n",
]
mock_response.iter_lines.return_value = iter(chunks)
mock_post_http_request.return_value = mock_response
llm = SGLang(api_url="http://test:8000")
gen = llm.stream_complete("Test prompt")
results = list(gen)
assert len(results) == 2
assert results[0].delta == "Hello"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-sglang/tests/test_llms_sglang.py",
"license": "MIT License",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-scrapegraph/examples/complete-scrapegraph-examples.py | """
Comprehensive example showcasing all ScrapeGraph tool functionalities with LlamaIndex.
This example demonstrates all available methods in the ScrapegraphToolSpec:
- SmartScraper for intelligent data extraction
- Markdownify for content conversion
- Search for web search functionality
- Basic Scrape for HTML extraction
- Agentic Scraper for complex navigation
"""
from typing import List
from pydantic import BaseModel, Field
from llama_index.tools.scrapegraph import ScrapegraphToolSpec
class NewsArticle(BaseModel):
"""Schema for news article information."""
title: str = Field(description="Article title")
author: str = Field(description="Article author", default="N/A")
date: str = Field(description="Publication date", default="N/A")
summary: str = Field(description="Article summary", default="N/A")
def demonstrate_all_tools():
"""Demonstrate all ScrapeGraph tool functionalities."""
# Initialize the tool spec (will use SGAI_API_KEY from environment)
scrapegraph_tool = ScrapegraphToolSpec()
print("🚀 Complete ScrapeGraph Tools Demonstration")
print("=" * 47)
# 1. SmartScraper Example
print("\n🤖 1. SmartScraper - AI-Powered Data Extraction")
print("-" * 50)
try:
response = scrapegraph_tool.scrapegraph_smartscraper(
prompt="Extract the main headline, key points, and any important information from this page",
url="https://example.com/",
)
if "error" not in response:
print("✅ SmartScraper extraction successful:")
print(f"Result: {str(response)[:300]}...")
else:
print(f"❌ SmartScraper error: {response['error']}")
except Exception as e:
print(f"❌ SmartScraper exception: {str(e)}")
# 2. Markdownify Example
print("\n📄 2. Markdownify - Content to Markdown Conversion")
print("-" * 54)
try:
response = scrapegraph_tool.scrapegraph_markdownify(
url="https://example.com/",
)
if "failed" not in str(response).lower():
print("✅ Markdownify conversion successful:")
print(f"Markdown preview: {response[:200]}...")
print(f"Total length: {len(response)} characters")
else:
print(f"❌ Markdownify error: {response}")
except Exception as e:
print(f"❌ Markdownify exception: {str(e)}")
# 3. Search Example
print("\n🔍 3. Search - Web Search Functionality")
print("-" * 39)
try:
response = scrapegraph_tool.scrapegraph_search(
query="ScrapeGraph AI web scraping tools",
max_results=3
)
if "failed" not in str(response).lower():
print("✅ Search successful:")
print(f"Search results: {str(response)[:300]}...")
else:
print(f"❌ Search error: {response}")
except Exception as e:
print(f"❌ Search exception: {str(e)}")
# 4. Basic Scrape Example
print("\n🌐 4. Basic Scrape - HTML Content Extraction")
print("-" * 46)
try:
response = scrapegraph_tool.scrapegraph_scrape(
url="https://httpbin.org/html",
render_heavy_js=False,
headers={"User-Agent": "ScrapeGraph-Demo/1.0"}
)
if "error" not in response:
html_content = response.get("html", "")
print("✅ Basic scrape successful:")
print(f"HTML length: {len(html_content):,} characters")
print(f"Request ID: {response.get('request_id', 'N/A')}")
# Extract title if present
if "<title>" in html_content:
title_start = html_content.find("<title>") + 7
title_end = html_content.find("</title>", title_start)
if title_end != -1:
title = html_content[title_start:title_end]
print(f"Page title: {title}")
else:
print(f"❌ Basic scrape error: {response['error']}")
except Exception as e:
print(f"❌ Basic scrape exception: {str(e)}")
# 5. Agentic Scraper Example
print("\n🤖 5. Agentic Scraper - Intelligent Navigation")
print("-" * 47)
try:
response = scrapegraph_tool.scrapegraph_agentic_scraper(
prompt="Navigate through this website and find any contact information, company details, or important announcements. Look in multiple sections if needed.",
url="https://example.com/",
)
if "error" not in response:
print("✅ Agentic scraper successful:")
if isinstance(response, dict):
for key, value in response.items():
print(f" {key}: {str(value)[:100]}...")
else:
print(f"Navigation result: {str(response)[:300]}...")
else:
print(f"❌ Agentic scraper error: {response['error']}")
except Exception as e:
print(f"❌ Agentic scraper exception: {str(e)}")
# 6. Integration with LlamaIndex Agent Example
print("\n🔗 6. LlamaIndex Agent Integration")
print("-" * 35)
try:
# Create tools list
tools = scrapegraph_tool.to_tool_list()
print(f"✅ Created {len(tools)} tools for LlamaIndex integration:")
for tool in tools:
print(f" • {tool.metadata.name}: {tool.metadata.description[:60]}...")
print("\n💡 These tools can be used with LlamaIndex agents:")
print(" from llama_index.core.agent import ReActAgent")
print(" agent = ReActAgent.from_tools(tools, llm=your_llm)")
except Exception as e:
print(f"❌ Integration setup error: {str(e)}")
# Performance and Usage Summary
print("\n📊 Tool Comparison Summary")
print("-" * 28)
print("SmartScraper: 🎯 Best for structured data extraction with AI")
print("Markdownify: 📄 Best for content analysis and documentation")
print("Search: 🔍 Best for finding information across the web")
print("Basic Scrape: ⚡ Fastest for simple HTML content extraction")
print("Agentic Scraper: 🧠 Most powerful for complex navigation tasks")
print("\n🎯 Use Case Recommendations:")
print("• Data Mining: SmartScraper + Agentic Scraper")
print("• Content Analysis: Markdownify + SmartScraper")
print("• Research: Search + SmartScraper")
print("• Monitoring: Basic Scrape (fastest)")
print("• Complex Sites: Agentic Scraper")
print("\n📚 Next Steps:")
print("• Set SGAI_API_KEY environment variable")
print("• Choose the right tool for your use case")
print("• Combine tools for comprehensive workflows")
print("• Integrate with LlamaIndex agents for advanced automation")
def main():
"""Run the complete demonstration."""
demonstrate_all_tools()
if __name__ == "__main__":
main()
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-scrapegraph/examples/complete-scrapegraph-examples.py",
"license": "MIT License",
"lines": 151,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-scrapegraph/examples/scrapegraph-agentic-scraper-llama-index.py | """
Example demonstrating ScrapeGraph Agentic Scraper integration with LlamaIndex.
This example shows how to use the ScrapegraphToolSpec for agentic scraping
that can navigate and interact with websites intelligently.
"""
from typing import List
from pydantic import BaseModel, Field
from llama_index.tools.scrapegraph import ScrapegraphToolSpec
class ProductInfo(BaseModel):
"""Schema for representing product information."""
name: str = Field(description="Product name")
price: str = Field(description="Product price", default="N/A")
description: str = Field(description="Product description", default="N/A")
features: List[str] = Field(description="List of key features", default_factory=list)
class ProductsListSchema(BaseModel):
"""Schema for representing multiple products."""
products: List[ProductInfo] = Field(description="List of products found")
class ContactInfo(BaseModel):
"""Schema for contact information."""
email: str = Field(description="Contact email", default="N/A")
phone: str = Field(description="Contact phone", default="N/A")
address: str = Field(description="Contact address", default="N/A")
def main():
"""Demonstrate agentic scraping functionality for complex navigation tasks."""
# Initialize the tool spec (will use SGAI_API_KEY from environment)
scrapegraph_tool = ScrapegraphToolSpec()
print("🤖 ScrapeGraph Agentic Scraper Examples")
print("=" * 43)
# Example 1: Navigate and extract product information
print("\n1. Extracting product information with navigation:")
try:
response = scrapegraph_tool.scrapegraph_agentic_scraper(
prompt="Navigate to the products or services section and extract information about the main offerings, including names, features, and any pricing information available.",
url="https://scrapegraphai.com/",
schema=ProductsListSchema,
)
if "error" not in response:
print("✅ Successfully extracted product data using agentic navigation:")
if "products" in response:
for product in response["products"]:
print(f" • Product: {product.get('name', 'N/A')}")
print(f" Price: {product.get('price', 'N/A')}")
print(f" Description: {product.get('description', 'N/A')[:100]}...")
if product.get('features'):
print(f" Features: {', '.join(product['features'][:3])}...")
print()
else:
print(f"Response: {response}")
else:
print(f"❌ Error: {response['error']}")
except Exception as e:
print(f"❌ Exception: {str(e)}")
# Example 2: Navigate to find contact information
print("\n2. Finding contact information through navigation:")
try:
response = scrapegraph_tool.scrapegraph_agentic_scraper(
prompt="Navigate through the website to find contact information, including email addresses, phone numbers, and physical addresses. Look in contact pages, footer, about sections, etc.",
url="https://scrapegraphai.com/",
schema=ContactInfo,
)
if "error" not in response:
print("✅ Successfully found contact information:")
print(f" Email: {response.get('email', 'Not found')}")
print(f" Phone: {response.get('phone', 'Not found')}")
print(f" Address: {response.get('address', 'Not found')}")
else:
print(f"❌ Error: {response['error']}")
except Exception as e:
print(f"❌ Exception: {str(e)}")
# Example 3: Complex navigation for documentation
print("\n3. Navigating documentation to find specific information:")
try:
response = scrapegraph_tool.scrapegraph_agentic_scraper(
prompt="Navigate to the documentation or help section and find information about API usage, getting started guides, or tutorials. Extract the main steps and any code examples mentioned.",
url="https://scrapegraphai.com/",
)
if "error" not in response:
print("✅ Successfully navigated and extracted documentation:")
if isinstance(response, dict):
for key, value in response.items():
print(f" {key}: {str(value)[:200]}...")
else:
print(f"Response: {str(response)[:500]}...")
else:
print(f"❌ Error: {response['error']}")
except Exception as e:
print(f"❌ Exception: {str(e)}")
# Example 4: Multi-step navigation for comprehensive data
print("\n4. Multi-step navigation for comprehensive site analysis:")
try:
response = scrapegraph_tool.scrapegraph_agentic_scraper(
prompt="Perform a comprehensive analysis of this website by navigating through different sections. Extract: 1) Main value proposition, 2) Key features or services, 3) Pricing information if available, 4) Company background, 5) Contact or support options. Navigate to multiple pages as needed.",
url="https://scrapegraphai.com/",
)
if "error" not in response:
print("✅ Successfully completed comprehensive site analysis:")
if isinstance(response, dict):
for key, value in response.items():
print(f" {key}: {str(value)[:150]}...")
else:
print(f"Analysis: {str(response)[:600]}...")
else:
print(f"❌ Error: {response['error']}")
except Exception as e:
print(f"❌ Exception: {str(e)}")
# Example 5: E-commerce style navigation
print("\n5. E-commerce style product discovery:")
try:
response = scrapegraph_tool.scrapegraph_agentic_scraper(
prompt="Navigate this website as if it were an e-commerce site. Look for product catalogs, pricing pages, feature comparisons, or service offerings. If you find multiple items or services, list them with their characteristics.",
url="https://example.com/",
)
if "error" not in response:
print("✅ Successfully performed e-commerce style navigation:")
print(f"Discovery results: {str(response)[:400]}...")
else:
print(f"❌ Error: {response['error']}")
except Exception as e:
print(f"❌ Exception: {str(e)}")
print("\n📚 Tips:")
print("• Set your SGAI_API_KEY environment variable")
print("• Agentic scraper can navigate multiple pages and follow links")
print("• Use detailed prompts to guide the navigation behavior")
print("• Combine with schemas for structured data extraction")
print("• Great for complex sites requiring multi-step interaction")
print("• More powerful but slower than basic scraping methods")
if __name__ == "__main__":
main()
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-scrapegraph/examples/scrapegraph-agentic-scraper-llama-index.py",
"license": "MIT License",
"lines": 128,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-scrapegraph/examples/scrapegraph-scrape-llama-index.py | """
Example demonstrating ScrapeGraph basic scraping integration with LlamaIndex.
This example shows how to use the ScrapegraphToolSpec for basic HTML scraping
with various options like JavaScript rendering and custom headers.
"""
from llama_index.tools.scrapegraph import ScrapegraphToolSpec
def main():
"""Demonstrate basic scraping functionality with various options."""
# Initialize the tool spec (will use SGAI_API_KEY from environment)
scrapegraph_tool = ScrapegraphToolSpec()
print("🌐 ScrapeGraph Basic Scraping Examples")
print("=" * 42)
# Example 1: Basic HTML scraping
print("\n1. Basic HTML scraping:")
try:
response = scrapegraph_tool.scrapegraph_scrape(
url="https://example.com/",
)
if "error" not in response:
html_content = response.get("html", "")
print(f"✅ Successfully scraped {len(html_content):,} characters of HTML")
print(f"Request ID: {response.get('request_id', 'N/A')}")
# Show a preview of the HTML
if html_content:
preview = html_content[:200].replace('\n', ' ').strip()
print(f"HTML Preview: {preview}...")
else:
print(f"❌ Error: {response['error']}")
except Exception as e:
print(f"❌ Exception: {str(e)}")
# Example 2: Scraping with JavaScript rendering
print("\n2. Scraping with JavaScript rendering enabled:")
try:
response = scrapegraph_tool.scrapegraph_scrape(
url="https://httpbin.org/html",
render_heavy_js=True
)
if "error" not in response:
html_content = response.get("html", "")
print(f"✅ Successfully scraped with JS rendering: {len(html_content):,} characters")
print(f"Request ID: {response.get('request_id', 'N/A')}")
else:
print(f"❌ Error: {response['error']}")
except Exception as e:
print(f"❌ Exception: {str(e)}")
# Example 3: Scraping with custom headers
print("\n3. Scraping with custom headers:")
try:
custom_headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "en-US,en;q=0.5",
"Connection": "keep-alive"
}
response = scrapegraph_tool.scrapegraph_scrape(
url="https://httpbin.org/headers",
headers=custom_headers
)
if "error" not in response:
html_content = response.get("html", "")
print(f"✅ Successfully scraped with custom headers: {len(html_content):,} characters")
print(f"Request ID: {response.get('request_id', 'N/A')}")
# Check if our headers were included (httpbin.org/headers shows sent headers)
if "Mozilla/5.0" in html_content:
print("✅ Custom User-Agent header was successfully used")
else:
print(f"❌ Error: {response['error']}")
except Exception as e:
print(f"❌ Exception: {str(e)}")
# Example 4: Complex scraping with multiple options
print("\n4. Complex scraping with multiple options:")
try:
response = scrapegraph_tool.scrapegraph_scrape(
url="https://scrapegraphai.com/",
render_heavy_js=False,
headers={
"User-Agent": "ScrapeGraph-LlamaIndex-Bot/1.0",
"Accept": "text/html,application/xhtml+xml"
}
)
if "error" not in response:
html_content = response.get("html", "")
print(f"✅ Successfully performed complex scraping: {len(html_content):,} characters")
print(f"Request ID: {response.get('request_id', 'N/A')}")
# Extract some basic info from the HTML
if "<title>" in html_content:
title_start = html_content.find("<title>") + 7
title_end = html_content.find("</title>", title_start)
if title_end != -1:
title = html_content[title_start:title_end]
print(f"Page Title: {title}")
else:
print(f"❌ Error: {response['error']}")
except Exception as e:
print(f"❌ Exception: {str(e)}")
print("\n📚 Tips:")
print("• Set your SGAI_API_KEY environment variable")
print("• Use render_heavy_js=True for dynamic content that requires JavaScript")
print("• Custom headers help avoid blocking and improve compatibility")
print("• Check response metadata like request_id for tracking")
print("• Basic scraping is faster than SmartScraper for simple HTML extraction")
if __name__ == "__main__":
main()
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-scrapegraph/examples/scrapegraph-scrape-llama-index.py",
"license": "MIT License",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-scrapegraph/tests/test_integration.py | """
Integration tests for ScrapeGraph tool specification.
These tests verify that the tool integrates properly with LlamaIndex
and can be used in real-world scenarios.
"""
import os
from unittest.mock import Mock, patch
from typing import List
import pytest
from pydantic import BaseModel, Field
from llama_index.tools.scrapegraph import ScrapegraphToolSpec
class IntegrationTestSchema(BaseModel):
"""Test schema for integration testing."""
title: str = Field(description="Page title")
content: str = Field(description="Main content")
links: List[str] = Field(description="Important links", default_factory=list)
class TestLlamaIndexIntegration:
"""Test integration with LlamaIndex core components."""
@pytest.fixture
def mock_tool_spec(self):
"""Create a mocked tool spec for integration testing."""
with patch("llama_index.tools.scrapegraph.base.Client") as mock_client_class:
mock_client = Mock()
mock_client_class.from_env.return_value = mock_client
tool_spec = ScrapegraphToolSpec()
tool_spec.client = mock_client
return tool_spec, mock_client
def test_tool_conversion_to_llamaindex_tools(self, mock_tool_spec):
"""Test that tools are properly converted to LlamaIndex format."""
tool_spec, mock_client = mock_tool_spec
tools = tool_spec.to_tool_list()
# Verify all spec functions are converted
assert len(tools) == len(ScrapegraphToolSpec.spec_functions)
# Verify each tool has proper metadata
for tool in tools:
assert hasattr(tool, "metadata")
assert hasattr(tool.metadata, "name")
assert hasattr(tool.metadata, "description")
assert tool.metadata.name in ScrapegraphToolSpec.spec_functions
# Verify tools can be called
for tool in tools:
assert hasattr(tool, "call")
assert callable(tool.call)
def test_tool_metadata_and_descriptions(self, mock_tool_spec):
"""Test that tools have proper metadata and descriptions."""
tool_spec, _ = mock_tool_spec
tools = tool_spec.to_tool_list()
expected_descriptions = {
"scrapegraph_smartscraper": "Perform intelligent web scraping",
"scrapegraph_markdownify": "Convert webpage content to markdown",
"scrapegraph_search": "Perform a search query",
"scrapegraph_scrape": "Perform basic HTML scraping",
"scrapegraph_agentic_scraper": "Perform agentic web scraping",
}
for tool in tools:
tool_name = tool.metadata.name
assert tool_name in expected_descriptions
# Check that description contains expected keywords
description_lower = tool.metadata.description.lower()
expected_keywords = expected_descriptions[tool_name].lower()
assert any(
keyword in description_lower for keyword in expected_keywords.split()
)
@patch.dict(os.environ, {"SGAI_API_KEY": "test-key"})
def test_environment_variable_initialization(self):
"""Test initialization using environment variables."""
with patch("llama_index.tools.scrapegraph.base.Client") as mock_client_class:
mock_client = Mock()
mock_client_class.from_env.return_value = mock_client
# This should not raise an exception
tool_spec = ScrapegraphToolSpec()
# Verify that from_env was called
mock_client_class.from_env.assert_called_once()
def test_error_handling_in_tool_execution(self, mock_tool_spec):
"""Test that tools handle errors gracefully when integrated."""
tool_spec, mock_client = mock_tool_spec
# Mock all client methods to raise exceptions
mock_client.smartscraper.side_effect = Exception("API Error")
mock_client.markdownify.side_effect = Exception("Network Error")
mock_client.search.side_effect = Exception("Search Error")
mock_client.scrape.side_effect = Exception("Scrape Error")
mock_client.agentic_scraper.side_effect = Exception("Navigation Error")
# Test each method handles errors gracefully
response1 = tool_spec.scrapegraph_smartscraper("test", "https://example.com")
assert "error" in response1
assert "SmartScraper failed" in response1["error"]
response2 = tool_spec.scrapegraph_markdownify("https://example.com")
assert "Markdownify failed" in response2
response3 = tool_spec.scrapegraph_search("test query")
assert "Search failed" in response3
response4 = tool_spec.scrapegraph_scrape("https://example.com")
assert "error" in response4
assert "Scrape failed" in response4["error"]
response5 = tool_spec.scrapegraph_agentic_scraper("test", "https://example.com")
assert "error" in response5
assert "Agentic scraper failed" in response5["error"]
class TestSchemaValidation:
"""Test Pydantic schema validation and integration."""
@pytest.fixture
def mock_tool_spec(self):
"""Create a mocked tool spec for schema testing."""
with patch("llama_index.tools.scrapegraph.base.Client") as mock_client_class:
mock_client = Mock()
mock_client_class.from_env.return_value = mock_client
tool_spec = ScrapegraphToolSpec()
tool_spec.client = mock_client
return tool_spec, mock_client
def test_schema_integration_smartscraper(self, mock_tool_spec):
"""Test schema integration with SmartScraper."""
tool_spec, mock_client = mock_tool_spec
# Mock response that matches schema
mock_response = {
"title": "Test Page",
"content": "Test content",
"links": ["https://example.com/link1", "https://example.com/link2"],
}
mock_client.smartscraper.return_value = mock_response
result = tool_spec.scrapegraph_smartscraper(
prompt="Extract page info",
url="https://example.com",
schema=IntegrationTestSchema,
)
# Verify the schema was passed correctly
mock_client.smartscraper.assert_called_once_with(
website_url="https://example.com",
user_prompt="Extract page info",
output_schema=IntegrationTestSchema,
)
assert result == mock_response
def test_schema_integration_agentic_scraper(self, mock_tool_spec):
"""Test schema integration with Agentic Scraper."""
tool_spec, mock_client = mock_tool_spec
mock_response = {
"title": "Navigation Result",
"content": "Found content through navigation",
"links": ["https://example.com/found"],
}
mock_client.agentic_scraper.return_value = mock_response
result = tool_spec.scrapegraph_agentic_scraper(
prompt="Navigate and extract",
url="https://example.com",
schema=IntegrationTestSchema,
)
mock_client.agentic_scraper.assert_called_once_with(
website_url="https://example.com",
user_prompt="Navigate and extract",
output_schema=IntegrationTestSchema,
)
assert result == mock_response
def test_multiple_schema_types(self, mock_tool_spec):
"""Test that different schema types are handled correctly."""
tool_spec, mock_client = mock_tool_spec
# Test with list of schemas
schema_list = [IntegrationTestSchema]
mock_client.smartscraper.return_value = {"result": "list schema test"}
tool_spec.scrapegraph_smartscraper(
prompt="test", url="https://example.com", schema=schema_list
)
mock_client.smartscraper.assert_called_with(
website_url="https://example.com",
user_prompt="test",
output_schema=schema_list,
)
# Test with dict schema
mock_client.smartscraper.reset_mock()
dict_schema = {"type": "object", "properties": {"title": {"type": "string"}}}
mock_client.smartscraper.return_value = {"result": "dict schema test"}
tool_spec.scrapegraph_smartscraper(
prompt="test", url="https://example.com", schema=dict_schema
)
mock_client.smartscraper.assert_called_with(
website_url="https://example.com",
user_prompt="test",
output_schema=dict_schema,
)
class TestParameterValidation:
"""Test parameter validation and handling."""
@pytest.fixture
def mock_tool_spec(self):
"""Create a mocked tool spec for parameter testing."""
with patch("llama_index.tools.scrapegraph.base.Client") as mock_client_class:
mock_client = Mock()
mock_client_class.from_env.return_value = mock_client
tool_spec = ScrapegraphToolSpec()
tool_spec.client = mock_client
return tool_spec, mock_client
def test_url_parameter_validation(self, mock_tool_spec):
"""Test that URL parameters are handled correctly."""
tool_spec, mock_client = mock_tool_spec
# Test various URL formats
test_urls = [
"https://example.com",
"http://example.com",
"https://example.com/path",
"https://example.com/path?param=value",
"https://subdomain.example.com",
]
mock_client.scrape.return_value = {"html": "test"}
for url in test_urls:
mock_client.scrape.reset_mock()
tool_spec.scrapegraph_scrape(url=url)
mock_client.scrape.assert_called_once_with(
website_url=url, render_heavy_js=False
)
def test_headers_parameter_handling(self, mock_tool_spec):
"""Test custom headers parameter handling."""
tool_spec, mock_client = mock_tool_spec
headers = {
"User-Agent": "Test Agent",
"Accept": "text/html",
"Authorization": "Bearer token",
"Custom-Header": "custom-value",
}
mock_client.scrape.return_value = {"html": "test"}
tool_spec.scrapegraph_scrape(url="https://example.com", headers=headers)
mock_client.scrape.assert_called_once_with(
website_url="https://example.com", render_heavy_js=False, headers=headers
)
def test_boolean_parameter_handling(self, mock_tool_spec):
"""Test boolean parameter handling."""
tool_spec, mock_client = mock_tool_spec
mock_client.scrape.return_value = {"html": "test"}
# Test with render_heavy_js=True
tool_spec.scrapegraph_scrape(url="https://example.com", render_heavy_js=True)
mock_client.scrape.assert_called_with(
website_url="https://example.com", render_heavy_js=True
)
# Test with render_heavy_js=False
mock_client.scrape.reset_mock()
tool_spec.scrapegraph_scrape(url="https://example.com", render_heavy_js=False)
mock_client.scrape.assert_called_with(
website_url="https://example.com", render_heavy_js=False
)
def test_kwargs_parameter_passing(self, mock_tool_spec):
"""Test that kwargs are passed through correctly."""
tool_spec, mock_client = mock_tool_spec
mock_client.smartscraper.return_value = {"result": "test"}
# Test kwargs with SmartScraper
tool_spec.scrapegraph_smartscraper(
prompt="test",
url="https://example.com",
timeout=30,
retries=3,
custom_param="value",
)
mock_client.smartscraper.assert_called_once_with(
website_url="https://example.com",
user_prompt="test",
output_schema=None,
timeout=30,
retries=3,
custom_param="value",
)
class TestRealWorldScenarios:
"""Test scenarios that simulate real-world usage."""
@pytest.fixture
def mock_tool_spec(self):
"""Create a mocked tool spec for real-world testing."""
with patch("llama_index.tools.scrapegraph.base.Client") as mock_client_class:
mock_client = Mock()
mock_client_class.from_env.return_value = mock_client
tool_spec = ScrapegraphToolSpec()
tool_spec.client = mock_client
return tool_spec, mock_client
def test_e_commerce_product_extraction(self, mock_tool_spec):
"""Test extracting product information from e-commerce sites."""
tool_spec, mock_client = mock_tool_spec
# Mock e-commerce product data
mock_response = {
"products": [
{"name": "Laptop", "price": "$999", "rating": "4.5/5"},
{"name": "Mouse", "price": "$29", "rating": "4.2/5"},
]
}
mock_client.smartscraper.return_value = mock_response
result = tool_spec.scrapegraph_smartscraper(
prompt="Extract product names, prices, and ratings from this e-commerce page",
url="https://shop.example.com/laptops",
)
assert result == mock_response
mock_client.smartscraper.assert_called_once()
def test_news_article_summarization(self, mock_tool_spec):
"""Test extracting and summarizing news articles."""
tool_spec, mock_client = mock_tool_spec
# Mock news article markdown
mock_markdown = """# Breaking News: AI Advances
## Summary
Artificial Intelligence has made significant breakthroughs...
## Key Points
- New neural network architecture
- 30% improvement in efficiency
- Applications in healthcare
"""
mock_client.markdownify.return_value = mock_markdown
result = tool_spec.scrapegraph_markdownify(
url="https://news.example.com/ai-breakthrough"
)
assert result == mock_markdown
assert "# Breaking News" in result
assert "Key Points" in result
def test_complex_site_navigation(self, mock_tool_spec):
"""Test complex site navigation with agentic scraper."""
tool_spec, mock_client = mock_tool_spec
# Mock complex navigation result
mock_response = {
"contact_info": {
"email": "contact@company.com",
"phone": "+1-555-0123",
"address": "123 Tech Street, Silicon Valley",
},
"navigation_path": ["Home", "About", "Contact", "Support"],
}
mock_client.agentic_scraper.return_value = mock_response
result = tool_spec.scrapegraph_agentic_scraper(
prompt="Navigate through the website to find comprehensive contact information",
url="https://company.example.com",
)
assert result == mock_response
assert "contact_info" in result
assert "navigation_path" in result
def test_multi_step_workflow(self, mock_tool_spec):
"""Test a multi-step workflow combining different tools."""
tool_spec, mock_client = mock_tool_spec
# Step 1: Search for relevant pages
mock_client.search.return_value = "Found relevant pages about Python tutorials"
search_result = tool_spec.scrapegraph_search(
query="Python programming tutorials beginner", max_results=5
)
# Step 2: Scrape the found page
mock_client.scrape.return_value = {
"html": "<html><head><title>Python Tutorial</title></head><body>Learn Python...</body></html>",
"request_id": "req-123",
}
scrape_result = tool_spec.scrapegraph_scrape(
url="https://python-tutorial.example.com"
)
# Step 3: Convert to markdown for analysis
mock_client.markdownify.return_value = (
"# Python Tutorial\n\nLearn Python programming..."
)
markdown_result = tool_spec.scrapegraph_markdownify(
url="https://python-tutorial.example.com"
)
# Verify all steps executed correctly
assert "Python tutorials" in search_result
assert "html" in scrape_result
assert "# Python Tutorial" in markdown_result
# Verify all client methods were called
mock_client.search.assert_called_once()
mock_client.scrape.assert_called_once()
mock_client.markdownify.assert_called_once()
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-scrapegraph/tests/test_integration.py",
"license": "MIT License",
"lines": 351,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/memory/llama-index-memory-bedrock-agentcore/llama_index/memory/bedrock_agentcore/base.py | from datetime import datetime, timezone
from typing import Any, Dict, List, Optional, Union
from pydantic import BaseModel, Field, model_serializer
import boto3
from botocore.config import Config
from llama_index.core.async_utils import asyncio_run
from llama_index.core.memory.memory import InsertMethod
from llama_index.core.memory.types import BaseMemory
from llama_index.memory.bedrock_agentcore.utils import (
convert_messages_to_event_payload,
convert_messages_to_string,
convert_memory_to_system_message,
convert_events_to_messages,
convert_memory_to_user_message,
)
from llama_index.core.memory import BaseMemory
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.base.llms.types import ChatMessage, MessageRole
class BaseAgentCoreMemory(BaseMemory):
"""Base class for Bedrock Agent Core Memory."""
_config: Any = PrivateAttr()
_client: Any = PrivateAttr()
_boto_client_kwargs: Any = PrivateAttr()
def __init__(self, client: Any) -> None:
super().__init__()
if client is not None:
self._client = client
def create_event(
self,
memory_id: str,
actor_id: str,
messages: List[ChatMessage],
session_id: str,
) -> None:
"""
Create an event in Bedrock Agent Core memory.
Args:
memory_id (str): The memory ID.
actor_id (str): The actor ID.
messages (List[ChatMessage]): The list of chat messages to add as an event.
session_id (str): The session ID.
Returns:
None
"""
if self._client is None:
raise ValueError("Client is not initialized")
if len(messages) == 0:
raise ValueError("The messages field cannot be empty")
payload = convert_messages_to_event_payload(messages)
if payload:
response = self._client.create_event(
memoryId=memory_id,
actorId=actor_id,
sessionId=session_id,
payload=payload,
eventTimestamp=datetime.now(timezone.utc),
)
event_id = response["event"]["eventId"]
if not event_id:
raise RuntimeError("Bedrock AgentCore did not return an event ID")
def list_events(
self, memory_id: str, session_id: str, actor_id: str
) -> List[ChatMessage]:
"""
List events for a given memory ID, session ID, and actor ID.
Args:
memory_id (str): The memory ID.
session_id (str): The session ID.
actor_id (str): The actor ID.
Returns:
List[ChatMessage]: A list of chat messages representing the events.
"""
def fetch_messages(max_results: int, next_token: str = None) -> tuple:
response = self._client.list_events(
memoryId=memory_id,
sessionId=session_id,
actorId=actor_id,
includePayloads=True,
maxResults=max_results,
**({"nextToken": next_token} if next_token else {}),
)
messages = convert_events_to_messages(list(reversed(response["events"])))
return messages, response.get("nextToken")
def has_user_message(messages) -> bool:
return any(msg.role == MessageRole.USER for msg in messages)
initial_max_results = 20
# If user is not the first message, then we need to find the closest User message to construct the oldest conversation in the batch
iterative_max_results = 3
all_messages = []
found_user = False
next_token = None
# Initial fetch with larger batch
messages, next_token = fetch_messages(initial_max_results)
all_messages.extend(messages)
if len(messages) < 0:
return []
# Check if first message is a USER msg. If it's not, some LLMs will throw an exception.
elif messages[0].role == MessageRole.USER:
found_user = True
found_user = has_user_message(messages)
# Keep fetching until we find a user message
while not found_user and next_token:
messages, next_token = fetch_messages(iterative_max_results, next_token)
if has_user_message(messages):
found_user = True
all_messages[:0] = messages
# Remove leading non-user messages
while all_messages[0].role != MessageRole.USER:
all_messages.pop(0)
return all_messages
def list_raw_events(
self, memory_id: str, session_id: str, actor_id: str
) -> List[Dict[str, Any]]:
"""
List raw events for a given memory ID, session ID, and actor ID.
Args:
memory_id (str): The memory ID.
session_id (str): The session ID.
actor_id (str): The actor ID.
Returns:
List[Dict[str, Any]]: A list of raw event dictionaries.
"""
def fetch_raw_events(max_results: int, next_token: str = None) -> tuple:
response = self._client.list_events(
memoryId=memory_id,
sessionId=session_id,
actorId=actor_id,
includePayloads=True,
maxResults=max_results,
**({"nextToken": next_token} if next_token else {}),
)
events = response.get("events", [])
return events, response.get("nextToken")
initial_max_results = 20
iterative_max_results = 3
all_events, next_token = fetch_raw_events(initial_max_results)
while next_token:
events, next_token = fetch_raw_events(iterative_max_results, next_token)
all_events.extend(events)
return all_events
def list_memory_records(
self,
memory_id: str,
memory_strategy_id: str,
namespace: str = "/",
max_results: int = 20,
) -> List[Dict[str, Any]]:
"""
List memory records for a given memory ID and namespace.
Args:
memory_id (str): The memory ID.
memory_strategy_id (str): The memory strategy ID. Used for long-term memory strategies.
namespace (str): The namespace for memory records.
max_results (int): Maximum number of memory records to retrieve in each batch.
Returns:
List[Dict[str, Any]]: A list of memory record summaries.
"""
if self._client is None:
raise ValueError("Client is not initialized.")
def fetch_memory_records(max_results: int, next_token: str = None) -> tuple:
response = self._client.list_memory_records(
memoryId=memory_id,
namespace=namespace,
memoryStrategyId=memory_strategy_id,
maxResults=max_results,
**{"nextToken": next_token} if next_token else {},
)
messages = response["memoryRecordSummaries"]
return messages, response.get("nextToken")
all_memory_records = []
initial_max_results = max_results
iterative_max_results = 3
initial_messages, next_token = fetch_memory_records(initial_max_results)
all_memory_records.extend(initial_messages)
while next_token:
messages, next_token = fetch_memory_records(
iterative_max_results, next_token
)
all_memory_records.extend(messages)
return all_memory_records
def retrieve_memories(
self,
memory_id: str,
search_criteria: Dict[str, Any],
max_results: int = 20,
namespace: Optional[str] = "/",
) -> List[Dict[str, Any]]:
"""
Retrieve memory records based on search criteria.
Args:
memory_id (str): The memory ID.
search_criteria (Dict[str, Any]): The search criteria for retrieving memories.
max_results (int): Maximum number of memory records to retrieve.
namespace (Optional[str]): The namespace for memory records.
Returns:
List[Dict[str, Any]]: A list of memory record contents.
"""
response = self._client.retrieve_memory_records(
memoryId=memory_id,
namespace=namespace,
searchCriteria=search_criteria,
maxResults=max_results,
)
memmory_record_summaries = response["memoryRecordSummaries"]
memory_content = []
for summary in memmory_record_summaries:
memory_content.append(summary["content"])
return memory_content
def list_sessions(
self,
memory_id: str,
actor_id: str,
max_results: int = 20,
) -> List[str]:
"""
List session IDs for a given memory ID and actor ID.
Args:
memory_id (str): The memory ID.
actor_id (str): The actor ID.
max_results (int): Maximum number of sessions to retrieve in each batch.
Returns:
List[str]: A list of session IDs.
"""
if self._client is None:
raise ValueError("Client is not initialized.")
def fetch_sessions(max_results: int, next_token: str = None) -> Dict:
response = self._client.list_sessions(
memoryId=memory_id,
actorId=actor_id,
maxResults=max_results,
**{"nextToken": next_token} if next_token else {},
)
session_summaries = response["sessionSummaries"]
session_ids = [session["sessionId"] for session in session_summaries]
return session_ids, response.get("nextToken")
all_session_ids = []
initial_max_results = max_results
iterative_max_results = 3
session_ids, next_token = fetch_sessions(initial_max_results)
all_session_ids.extend(session_ids)
while next_token:
ids, next_token = fetch_sessions(iterative_max_results, next_token)
all_session_ids.extend(ids)
return all_session_ids
def delete_events(
self,
memory_id: str,
session_id: str,
actor_id: str,
) -> Dict[str, Any]:
"""
Delete all events for a given memory ID, session ID, and actor ID.
Args:
memory_id (str): The memory ID.
session_id (str): The session ID.
actor_id (str): The actor ID.
Returns:
Dict[str, Any]: A dictionary containing the IDs of deleted events.
"""
if self._client is None:
raise ValueError("Client is not initialized.")
response = self.list_raw_events(
memory_id=memory_id,
session_id=session_id,
actor_id=actor_id,
)
deleted_events = []
for event in response:
event_id = event.get("eventId")
if not event_id:
continue
self._client.delete_event(
memoryId=memory_id,
sessionId=session_id,
actorId=actor_id,
eventId=event_id,
)
deleted_events.append(event_id)
return {"deletedEventIds": deleted_events}
def delete_memory_records(
self,
memory_id: str,
memory_strategy_id: str,
namespace: str,
) -> Dict[str, Any]:
"""
Delete all memory records for a given memory ID and namespace.
Args:
memory_id (str): The memory ID.
memory_strategy_id (str): The memory strategy ID. Used for long-term memory strategies.
namespace (str): The namespace for memory records.
Returns:
Dict[str, Any]: A dictionary containing the IDs of deleted memory records.
"""
if self._client is None:
raise ValueError("Client is not initialized.")
response = self.list_memory_records(
memory_id=memory_id,
namespace=namespace,
memory_strategy_id=memory_strategy_id,
)
deleted_memory_records = []
for record in response:
self._client.delete_memory_record(
memoryId=memory_id,
memoryRecordId=record["memoryRecordId"],
)
deleted_memory_records.append(record["memoryRecordId"])
return {"deletedMemoryRecordIds": deleted_memory_records}
def batch_delete_memory_records(
self,
memory_id: str,
memory_strategy_id: str,
namespace: str,
batch_size: int = 25,
) -> Dict[str, Any]:
"""
Batch delete memory records for a given memory ID and namespace.
Args:
memory_id (str): The memory ID.
memory_strategy_id (str): The memory strategy ID. Used for long-term memory strategies.
namespace (str): The namespace for memory records.
batch_size (int): The batch size for deletion.
Returns:
Dict[str, Any]: A dictionary containing details of successful and failed deletions.
"""
if self._client is None:
raise ValueError("Client is not initialized.")
response = self.list_memory_records(
memory_id=memory_id,
namespace=namespace,
memory_strategy_id=memory_strategy_id,
)
memory_record_ids = [
{"memoryRecordId": record["memoryRecordId"]} for record in response
]
successful_records = []
failed_records = []
# Batch delete in chunks of provided batch size
for i in range(0, len(memory_record_ids), batch_size):
batch_ids = memory_record_ids[i : i + batch_size]
response = self._client.batch_delete_memory_records(
memoryId=memory_id,
records=batch_ids,
)
successful_records.extend(response.get("successfulRecords", []))
failed_records.extend(response.get("failedRecords", []))
return {
"successfulRecords": successful_records,
"failedRecords": failed_records,
}
def delete_all_memory_for_session(
self,
memory_id: str,
actor_id: str,
session_id: str,
namespace: str,
memory_strategy_id: Optional[str] = None,
) -> Dict[str, Any]:
"""
Delete all memory (events and memory records) for a given session.
Args:
memory_id (str): The memory ID.
actor_id (str): The actor ID.
session_id (str): The session ID.
namespace (str): The namespace for memory records.
memory_strategy_id (Optional[str]): The memory strategy ID for memory records. Use if long-term memory strategy is used.
Returns:
Dict[str, Any]: A dictionary containing details of deleted events and memory records.
"""
if self._client is None:
raise ValueError("Client is not initialized.")
# Delete all events for the session
deleted_events = self.delete_events(
memory_id=memory_id,
session_id=session_id,
actor_id=actor_id,
)
deleted_memory = {
"deletedEvents": deleted_events.get("deletedEventIds", []),
}
# Delete all memory records for the session
if memory_strategy_id:
deleted_memory_records = self.batch_delete_memory_records(
memory_id=memory_id,
memory_strategy_id=memory_strategy_id,
namespace=namespace,
)
deleted_memory["successfulDeletedMemoryRecords"] = (
deleted_memory_records.get("successfulRecords", [])
)
deleted_memory["failedDeletedMemoryRecords"] = deleted_memory_records.get(
"failedRecords", []
)
return deleted_memory
class AgentCoreMemoryContext(BaseModel):
actor_id: str
memory_id: str
session_id: str
namespace: str = "/"
memory_strategy_id: Optional[str] = None
def get_context(self) -> Dict[str, Optional[str]]:
return {key: value for key, value in self.__dict__.items() if value is not None}
class AgentCoreMemory(BaseAgentCoreMemory):
search_msg_limit: int = Field(
default=5,
description="Limit of chat history messages to use for context in search API",
)
insert_method: InsertMethod = Field(
default=InsertMethod.SYSTEM,
description="Whether to inject memory blocks into a system message or into the latest user message.",
)
_context: AgentCoreMemoryContext = PrivateAttr()
def __init__(
self,
context: AgentCoreMemoryContext,
# TODO: add support for InsertMethod.USER. for now default to InsertMethod.SYSTEM
# insert_method: InsertMethod = InsertMethod.SYSTEM,
profile_name: Optional[str] = None,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
region_name: Optional[str] = None,
api_version: Optional[str] = None,
use_ssl: bool = True,
verify: Optional[Union[bool, str]] = None,
endpoint_url: Optional[str] = None,
botocore_session: Optional[Any] = None,
client: Optional[Any] = None,
timeout: Optional[float] = 60.0,
max_retries: Optional[int] = 10,
botocore_config: Optional[Any] = None,
) -> None:
boto3_user_agent_identifier = "x-client-framework:llama_index"
session_kwargs = {
"profile_name": profile_name,
"region_name": region_name,
"aws_access_key_id": aws_access_key_id,
"aws_secret_access_key": aws_secret_access_key,
"aws_session_token": aws_session_token,
"botocore_session": botocore_session,
}
self._config = (
Config(
retries={"max_attempts": max_retries, "mode": "standard"},
connect_timeout=timeout,
read_timeout=timeout,
user_agent_extra=boto3_user_agent_identifier,
)
if botocore_config is None
else botocore_config
)
self._boto_client_kwargs = {
"api_version": api_version,
"use_ssl": use_ssl,
"verify": verify,
"endpoint_url": endpoint_url,
}
try:
self._config = (
Config(
retries={"max_attempts": max_retries, "mode": "standard"},
connect_timeout=timeout,
read_timeout=timeout,
user_agent_extra=boto3_user_agent_identifier,
)
if botocore_config is None
else botocore_config
)
session = boto3.Session(**session_kwargs)
except ImportError:
raise ImportError(
"boto3 package not found, install with pip install boto3"
)
session = boto3.Session(**session_kwargs)
if client is not None:
self._client = client
else:
self._client = session.client(
"bedrock-agentcore",
config=self._config,
**self._boto_client_kwargs,
)
self._client._serializer._serializer._serialize_type_timestamp = (
self._serialize_timestamp_with_microseconds
)
super().__init__(self._client)
self._context = context
@model_serializer
def serialize_memory(self) -> Dict[str, Any]:
# leaving out the two keys since they are causing serialization/deserialization problems
return {
"search_msg_limit": self.search_msg_limit,
}
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "AgentCoreMemory"
@classmethod
def from_defaults(cls, **kwargs: Any) -> "AgentCoreMemory":
raise NotImplementedError("Use either from_client or from_config")
def _serialize_timestamp_with_microseconds(self, serialized, value, shape, name):
original_serialize_timestamp = (
self._client._serializer._serializer._serialize_type_timestamp
)
if isinstance(value, datetime):
serialized[name] = value.timestamp() # Float with microseconds
else:
original_serialize_timestamp(serialized, value, shape, name)
def _add_msgs_to_client_memory(self, messages: List[ChatMessage]) -> None:
"""Add new user and assistant messages to client memory."""
self.create_event(
messages=messages,
memory_id=self._context.memory_id,
actor_id=self._context.actor_id,
session_id=self._context.session_id,
)
async def aget(self, input: Optional[str] = None) -> List[ChatMessage]:
# Get list of events to represent as the chat history. Use this as the query for the memory records. If an input is provided, then also append it to the list of events
messages = self.list_events(
memory_id=self._context.memory_id,
session_id=self._context.session_id,
actor_id=self._context.actor_id,
)
input = convert_messages_to_string(messages, input)
search_criteria = {"searchQuery": input[:10000]}
if self._context.memory_strategy_id is not None:
search_criteria["memoryStrategyId"] = self._context.memory_strategy_id
memory_records = self.retrieve_memories(
memory_id=self._context.memory_id,
namespace=self._context.namespace,
search_criteria=search_criteria,
)
if self.insert_method == InsertMethod.SYSTEM:
system_message = convert_memory_to_system_message(memory_records)
# If system message is present
if len(messages) > 0 and messages[0].role == MessageRole.SYSTEM:
assert messages[0].content is not None
system_message = convert_memory_to_system_message(
response=memory_records,
existing_system_message=messages[0],
)
messages.insert(0, system_message)
elif self.insert_method == InsertMethod.USER:
# Find the latest user message
session_idx = next(
(
i
for i, msg in enumerate(reversed(messages))
if msg.role == MessageRole.USER
),
None,
)
memory_content = convert_memory_to_user_message(memory_records)
if session_idx is not None:
# Get actual index (since we enumerated in reverse)
actual_idx = len(messages) - 1 - session_idx
# Update existing user message since many LLMs have issues with consecutive user msgs
final_user_content = (
memory_content.content + messages[actual_idx].content
)
messages[actual_idx] = ChatMessage(
content=final_user_content, role=MessageRole.USER
)
messages[actual_idx].blocks = [
*memory_content.blocks,
*messages[actual_idx].blocks,
]
else:
messages.append(
ChatMessage(content=memory_content, role=MessageRole.USER)
)
return messages
async def aget_all(self) -> List[ChatMessage]:
return self.list_events(
memory_id=self._context.memory_id,
session_id=self._context.session_id,
actor_id=self._context.actor_id,
)
async def aput(self, message: ChatMessage) -> None:
"""Add a message to the chat store and process waterfall logic if needed."""
# Add the message to the chat store
self._add_msgs_to_client_memory([message])
async def aput_messages(self, messages: List[ChatMessage]) -> None:
"""Add a list of messages to the chat store and process waterfall logic if needed."""
# Add the messages to the chat store
self._add_msgs_to_client_memory(messages)
async def aset(self, messages: List[ChatMessage]) -> None:
initial_chat_len = len(self.get_all())
# Insert only new chat messages
self._add_msgs_to_client_memory(messages[initial_chat_len:])
# ---- Sync method wrappers ----
def get(self, input: Optional[str] = None) -> List[ChatMessage]:
"""Get chat history."""
return asyncio_run(self.aget(input=input))
def get_all(self) -> List[ChatMessage]:
"""Returns all chat history."""
return asyncio_run(self.aget_all())
def put(self, message: ChatMessage) -> None:
"""Add message to chat history and client memory."""
return asyncio_run(self.aput(message))
def put_messages(self, messages: List[ChatMessage]) -> None:
return asyncio_run(self.aput_messages(messages))
def set(self, messages: List[ChatMessage]) -> None:
"""Set chat history and add new messages to client memory."""
return asyncio_run(self.aset(messages))
def reset(self) -> None:
"""Only reset chat history."""
# Our guidance has been to not delete memory resources in AgentCore on behalf of the customer. If this changes in the future, then we can implement this method.
def get_context(self) -> AgentCoreMemoryContext:
return self._context.get_context()
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/memory/llama-index-memory-bedrock-agentcore/llama_index/memory/bedrock_agentcore/base.py",
"license": "MIT License",
"lines": 611,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/memory/llama-index-memory-bedrock-agentcore/llama_index/memory/bedrock_agentcore/utils.py | import logging
import json
from typing import Any, Dict, List, Optional
from llama_index.core.base.llms.types import ChatMessage, MessageRole
logger = logging.getLogger(__name__)
DEFAULT_INTRO_PREFERENCES = "Below are a set of relevant preferences retrieved from potentially several memory sources:"
DEFAULT_OUTRO_PREFERENCES = "This is the end of the retrieved preferences."
DISCLAIMER_FOR_LLM = "IMPORTANT: Ignore preferences unless they conflict. Proceed to answer the following user query directly and use tools if appropriate."
# For tool calls, there is a corresponding Assistant message that has an empty text. It's needed to reconstruct the entire conversation; however,
# CreateEvent doesn't accept empty text payloads, so this placeholder is needed. We will strip it out during ListEvents so that we don't influence the Agent with random text.
EMPTY_PAYLOAD_PLACEHOLDER_TEXT = "PLACEHOLDER FOR EMPTY ASSISTANT"
def convert_memory_to_system_message(
memory_records: List[Dict[str, Any]], existing_system_message: ChatMessage = None
) -> ChatMessage:
memories = [memory_json.get("text", "") for memory_json in memory_records]
formatted_messages = "\n\n" + DEFAULT_INTRO_PREFERENCES + "\n"
for memory in memories:
formatted_messages += f"\n {memory} \n\n"
formatted_messages += DEFAULT_OUTRO_PREFERENCES
system_message = formatted_messages
# If existing system message is available
if existing_system_message is not None:
system_message = existing_system_message.content.split(
DEFAULT_INTRO_PREFERENCES
)[0]
system_message = system_message + formatted_messages
return ChatMessage(content=system_message, role=MessageRole.SYSTEM)
def convert_memory_to_user_message(memory_records: List[Dict[str, Any]]) -> str:
memories = [memory_json.get("text", "") for memory_json in memory_records]
formatted_messages = "\n\n" + DEFAULT_INTRO_PREFERENCES + "\n"
for memory in memories:
formatted_messages += f"\n {memory} \n\n"
formatted_messages += f"{DEFAULT_OUTRO_PREFERENCES}\n"
formatted_messages += f"{DISCLAIMER_FOR_LLM}\n"
return ChatMessage(content=formatted_messages, role=MessageRole.USER)
def convert_messages_to_event_payload(messages: List[ChatMessage]) -> Dict[str, Any]:
payload = []
for message in messages:
text = message.content
if not text.strip():
text = EMPTY_PAYLOAD_PLACEHOLDER_TEXT
# Map LangChain roles to Bedrock Agent Core roles
if message.role == MessageRole.USER:
role = "USER"
elif message.role == MessageRole.ASSISTANT:
role = "ASSISTANT"
elif message.role == MessageRole.TOOL:
role = "TOOL"
elif message.role == MessageRole.SYSTEM:
role = "OTHER"
else:
logger.warning(f"Skipping unsupported message type: {message.role}")
return None
# payload.append({"blob": json.dumps({"eventTimeStamp": eventTimestamp.isoformat()})})
payload.append({"blob": json.dumps(message.additional_kwargs)})
payload.append(
{
"conversational": {"content": {"text": text}, "role": role},
}
)
return payload
def convert_events_to_messages(events):
"""
Reconstruct chat messages from event payloads.
Each message consists of:
1. a 'blob' entry containing tool call kwargs (JSON),
2. followed by a 'conversational' entry containing role + text.
"""
messages = []
for event in events:
payload = event.get("payload")
if not payload:
continue
# walk the payload in pairs (blob, conversational)
for i in range(0, len(payload), 2):
blob_entry = payload[i].get("blob")
conv_entry = payload[i + 1].get("conversational")
tool_call_kwargs = json.loads(blob_entry) if blob_entry else {}
event_role = conv_entry["role"]
block_content = conv_entry["content"]["text"]
if block_content == EMPTY_PAYLOAD_PLACEHOLDER_TEXT:
block_content = ""
if event_role == "USER":
role = MessageRole.USER
elif event_role == "ASSISTANT":
role = MessageRole.ASSISTANT
elif event_role == "TOOL":
role = MessageRole.TOOL
elif event_role == "OTHER":
role = MessageRole.SYSTEM
else:
logger.warning(f"Skipping unsupported event role type: {event_role}")
continue
messages.append(
ChatMessage(
role=role,
content=block_content,
additional_kwargs=tool_call_kwargs,
)
)
return messages
def convert_messages_to_string(
messages: List[ChatMessage], input: Optional[str] = None
) -> str:
formatted_messages = [f"{msg.role.value}: {msg.content}" for msg in messages]
result = "\n".join(formatted_messages)
if input:
result += f"\nuser: {input}"
return result
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/memory/llama-index-memory-bedrock-agentcore/llama_index/memory/bedrock_agentcore/utils.py",
"license": "MIT License",
"lines": 109,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/memory/llama-index-memory-bedrock-agentcore/tests/test_agentcore_memory.py | import pytest
from unittest.mock import MagicMock, patch
import json
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.memory.memory import InsertMethod
from llama_index.memory.bedrock_agentcore.base import (
AgentCoreMemory,
AgentCoreMemoryContext,
)
@pytest.fixture()
def mock_client():
"""Create a mock Bedrock AgentCore client."""
client = MagicMock()
client.create_event.return_value = {"event": {"eventId": "test-event-id"}}
client.list_events.return_value = {"events": [], "nextToken": None}
client.retrieve_memory_records.return_value = {"memoryRecordSummaries": []}
client.delete_event.return_value = {}
client.list_memory_records.return_value = {
"memoryRecordSummaries": [],
"nextToken": None,
}
return client
@pytest.fixture()
def memory_context():
"""Create a basic AgentCore Memory context for testing."""
return AgentCoreMemoryContext(
actor_id="test-actor",
memory_id="test-memory-store",
session_id="test-session-id",
memory_strategy_id="test-semantic-memory-strategy",
)
@pytest.fixture()
def memory(mock_client, memory_context):
"""Create a basic AgentCore Memory instance for testing."""
return AgentCoreMemory(context=memory_context, client=mock_client)
class TestAgentCoreMemoryContext:
"""Test AgentCoreMemoryContext class."""
def test_context_creation(self):
"""Test creating a memory context."""
context = AgentCoreMemoryContext(
actor_id="test-actor",
memory_id="test-memory",
session_id="test-session",
)
assert context.actor_id == "test-actor"
assert context.memory_id == "test-memory"
assert context.session_id == "test-session"
assert context.namespace == "/"
assert context.memory_strategy_id is None
def test_context_with_optional_fields(self):
"""Test creating a memory context with optional fields."""
context = AgentCoreMemoryContext(
actor_id="test-actor",
memory_id="test-memory",
session_id="test-session",
namespace="/custom",
memory_strategy_id="custom-strategy",
)
assert context.namespace == "/custom"
assert context.memory_strategy_id == "custom-strategy"
def test_get_context(self):
"""Test getting context as dictionary."""
context = AgentCoreMemoryContext(
actor_id="test-actor",
memory_id="test-memory",
session_id="test-session",
memory_strategy_id="test-strategy",
)
context_dict = context.get_context()
expected = {
"actor_id": "test-actor",
"memory_id": "test-memory",
"session_id": "test-session",
"namespace": "/",
"memory_strategy_id": "test-strategy",
}
assert context_dict == expected
class TestBaseAgentCoreMemoryMethods:
"""Test BaseAgentCoreMemory methods using AgentCoreMemory instance."""
def test_create_event_success(self, memory):
"""Test successful event creation."""
messages = [ChatMessage(role=MessageRole.USER, content="Hello")]
memory.create_event(
memory_id="test-memory",
actor_id="test-actor",
messages=messages,
session_id="test-session",
)
assert memory._client.create_event.called
call_args = memory._client.create_event.call_args
assert call_args[1]["memoryId"] == "test-memory"
assert call_args[1]["actorId"] == "test-actor"
assert call_args[1]["sessionId"] == "test-session"
def test_create_event_no_client(self, memory_context):
"""Test create_event raises error when client is None."""
with patch("boto3.Session"):
memory = AgentCoreMemory(context=memory_context)
memory._client = None # Set client to None after initialization
messages = [ChatMessage(role=MessageRole.USER, content="Hello")]
with pytest.raises(ValueError, match="Client is not initialized"):
memory.create_event(
memory_id="test-memory",
actor_id="test-actor",
messages=messages,
session_id="test-session",
)
def test_create_event_empty_messages(self, memory):
"""Test create_event raises error when messages is empty."""
with pytest.raises(ValueError, match="The messages field cannot be empty"):
memory.create_event(
memory_id="test-memory",
actor_id="test-actor",
messages=[],
session_id="test-session",
)
def test_create_event_no_event_id(self, memory):
"""Test create_event raises error when no event ID is returned."""
memory._client.create_event.return_value = {"event": {"eventId": None}}
messages = [ChatMessage(role=MessageRole.USER, content="Hello")]
with pytest.raises(
RuntimeError, match="Bedrock AgentCore did not return an event ID"
):
memory.create_event(
memory_id="test-memory",
actor_id="test-actor",
messages=messages,
session_id="test-session",
)
def test_list_events_simple(self, memory):
"""Test listing events with simple user message first."""
# Mock response with a user message first
mock_events = [
{
"payload": [
{"blob": json.dumps({})},
{
"conversational": {
"role": "USER",
"content": {"text": "Hello"},
}
},
]
}
]
memory._client.list_events.return_value = {
"events": mock_events,
"nextToken": None,
}
messages = memory.list_events(
memory_id="test-memory",
session_id="test-session",
actor_id="test-actor",
)
assert len(messages) == 1
assert messages[0].role == MessageRole.USER
assert messages[0].content == "Hello"
def test_list_events_with_pagination(self, memory):
"""Test listing events with pagination to find user message."""
# First call returns assistant message
mock_events_1 = [
{
"payload": [
{"blob": json.dumps({})},
{
"conversational": {
"role": "ASSISTANT",
"content": {"text": "Hi there"},
}
},
]
}
]
# Second call returns user message
mock_events_2 = [
{
"payload": [
{"blob": json.dumps({})},
{
"conversational": {
"role": "USER",
"content": {"text": "Hello"},
}
},
]
}
]
memory._client.list_events.side_effect = [
{"events": mock_events_1, "nextToken": "token1"},
{"events": mock_events_2, "nextToken": None},
]
messages = memory.list_events(
memory_id="test-memory",
session_id="test-session",
actor_id="test-actor",
)
assert len(messages) == 2
assert messages[0].role == MessageRole.USER
assert messages[0].content == "Hello"
assert messages[1].role == MessageRole.ASSISTANT
assert messages[1].content == "Hi there"
def test_retrieve_memories(self, memory):
"""Test retrieving memory records."""
memory._client.retrieve_memory_records.return_value = {
"memoryRecordSummaries": [
{"content": "Memory 1"},
{"content": "Memory 2"},
]
}
memories = memory.retrieve_memories(
memory_id="test-memory",
search_criteria={"searchQuery": "test query"},
)
assert memories == ["Memory 1", "Memory 2"]
memory._client.retrieve_memory_records.assert_called_once_with(
memoryId="test-memory",
namespace="/",
searchCriteria={"searchQuery": "test query"},
maxResults=20,
)
def test_list_raw_events_pagination(self, memory):
memory._client.list_events.side_effect = [
{"events": [{"eventId": "e1"}], "nextToken": "t1"},
{"events": [{"eventId": "e2"}], "nextToken": None},
]
events = memory.list_raw_events("mid", "sid", "aid")
assert [e["eventId"] for e in events] == ["e1", "e2"]
assert memory._client.list_events.call_count == 2
def test_list_memory_records_pagination(self, memory):
memory._client.list_memory_records.side_effect = [
{
"memoryRecordSummaries": [{"memoryRecordId": "m1"}],
"nextToken": "t1",
},
{
"memoryRecordSummaries": [{"memoryRecordId": "m2"}],
"nextToken": None,
},
]
records = memory.list_memory_records(
memory_id="mid",
memory_strategy_id="strategy",
namespace="/",
max_results=20,
)
assert [r["memoryRecordId"] for r in records] == ["m1", "m2"]
assert memory._client.list_memory_records.call_count == 2
first_call = memory._client.list_memory_records.call_args_list[0].kwargs
assert first_call["memoryId"] == "mid"
assert first_call["namespace"] == "/"
assert first_call["memoryStrategyId"] == "strategy"
assert first_call["maxResults"] == 20
def test_list_sessions_pagination(self, memory):
memory._client.list_sessions.side_effect = [
{"sessionSummaries": [{"sessionId": "s1"}], "nextToken": "t1"},
{"sessionSummaries": [{"sessionId": "s2"}], "nextToken": None},
]
sessions = memory.list_sessions(memory_id="mid", actor_id="aid", max_results=20)
assert sessions == ["s1", "s2"]
assert memory._client.list_sessions.call_count == 2
def test_delete_events_skips_missing_event_id(self, memory):
with patch.object(
AgentCoreMemory,
"list_raw_events",
return_value=[{"eventId": "e1"}, {"nope": "x"}, {"eventId": "e2"}],
):
out = memory.delete_events("mid", "sid", "aid")
assert out["deletedEventIds"] == ["e1", "e2"]
assert memory._client.delete_event.call_count == 2
def test_delete_memory_records(self, memory):
with patch.object(
AgentCoreMemory,
"list_memory_records",
return_value=[{"memoryRecordId": "r1"}, {"memoryRecordId": "r2"}],
):
out = memory.delete_memory_records(
memory_id="mid",
memory_strategy_id="strategy",
namespace="/",
)
assert out["deletedMemoryRecordIds"] == ["r1", "r2"]
assert memory._client.delete_memory_record.call_count == 2
def test_batch_delete_memory_records_chunks(self, memory):
with patch.object(
AgentCoreMemory,
"list_memory_records",
return_value=[{"memoryRecordId": f"r{i}"} for i in range(1, 6)],
):
memory._client.batch_delete_memory_records.side_effect = [
{
"successfulRecords": [
{"memoryRecordId": "r1"},
{"memoryRecordId": "r2"},
],
"failedRecords": [],
},
{
"successfulRecords": [
{"memoryRecordId": "r3"},
{"memoryRecordId": "r4"},
],
"failedRecords": [],
},
{
"successfulRecords": [{"memoryRecordId": "r5"}],
"failedRecords": [{"memoryRecordId": "rX"}],
},
]
out = memory.batch_delete_memory_records(
memory_id="mid",
memory_strategy_id="strategy",
namespace="/",
batch_size=2,
)
assert len(out["successfulRecords"]) == 5
assert len(out["failedRecords"]) == 1
assert memory._client.batch_delete_memory_records.call_count == 3
def test_delete_all_memory_for_session_events_only(self, memory):
with (
patch.object(
AgentCoreMemory,
"delete_events",
return_value={"deletedEventIds": ["e1"]},
) as p_del_events,
patch.object(
AgentCoreMemory,
"batch_delete_memory_records",
) as p_batch,
):
out = memory.delete_all_memory_for_session(
memory_id="mid",
actor_id="aid",
session_id="sid",
namespace="/",
memory_strategy_id=None,
)
assert out["deletedEvents"] == ["e1"]
p_del_events.assert_called_once()
p_batch.assert_not_called()
def test_delete_all_memory_for_session_events_and_records(self, memory):
with (
patch.object(
AgentCoreMemory,
"delete_events",
return_value={"deletedEventIds": ["e1"]},
) as p_del_events,
patch.object(
AgentCoreMemory,
"batch_delete_memory_records",
return_value={
"successfulRecords": [{"memoryRecordId": "r1"}],
"failedRecords": [{"memoryRecordId": "r2"}],
},
) as p_batch,
):
out = memory.delete_all_memory_for_session(
memory_id="mid",
actor_id="aid",
session_id="sid",
namespace="/",
memory_strategy_id="strategy",
)
assert out["deletedEvents"] == ["e1"]
assert out["successfulDeletedMemoryRecords"] == [{"memoryRecordId": "r1"}]
assert out["failedDeletedMemoryRecords"] == [{"memoryRecordId": "r2"}]
p_del_events.assert_called_once()
p_batch.assert_called_once()
class TestAgentCoreMemory:
"""Test AgentCoreMemory class."""
def test_initialization(self, memory_context):
"""Test AgentCoreMemory initialization."""
with patch("boto3.Session") as mock_session:
mock_client = MagicMock()
mock_session.return_value.client.return_value = mock_client
memory = AgentCoreMemory(context=memory_context)
assert memory._context == memory_context
assert memory._client == mock_client
assert memory.search_msg_limit == 5
assert memory.insert_method == InsertMethod.SYSTEM
def test_initialization_with_custom_client(self, memory_context, mock_client):
"""Test initialization with custom client."""
memory = AgentCoreMemory(context=memory_context, client=mock_client)
assert memory._client == mock_client
def test_class_name(self):
"""Test class name method."""
assert AgentCoreMemory.class_name() == "AgentCoreMemory"
def test_from_defaults_not_implemented(self):
"""Test that from_defaults raises NotImplementedError."""
with pytest.raises(
NotImplementedError, match="Use either from_client or from_config"
):
AgentCoreMemory.from_defaults()
def test_serialize_memory(self, memory):
"""Test memory serialization."""
serialized = memory.serialize_memory()
assert "search_msg_limit" in serialized
assert serialized["search_msg_limit"] == 5
# primary_memory is no longer included in serialization
assert "primary_memory" not in serialized
def test_get_context(self, memory, memory_context):
"""Test getting context."""
context = memory.get_context()
assert context == memory_context.get_context()
def test_get_with_system_insert(self, memory):
"""Test get method with SYSTEM insert method."""
# Mock the underlying methods that get() calls
mock_events = [ChatMessage(role=MessageRole.USER, content="Hello")]
mock_memories = ["Memory 1", "Memory 2"]
# Mock the client methods that are actually called
memory._client.list_events.return_value = {
"events": [
{
"payload": [
{"blob": json.dumps({})},
{
"conversational": {
"role": "USER",
"content": {"text": "Hello"},
}
},
]
}
],
"nextToken": None,
}
memory._client.retrieve_memory_records.return_value = {
"memoryRecordSummaries": [
{"content": {"text": "Memory 1"}},
{"content": {"text": "Memory 2"}},
]
}
# Test the get method
result = memory.get(input="test input")
# Should have system message + user message
assert len(result) >= 1
assert memory._client.list_events.called
assert memory._client.retrieve_memory_records.called
def test_get_with_user_insert(self, memory):
"""Test get method with USER insert method."""
# Setup
memory.insert_method = InsertMethod.USER
# Mock the client methods
memory._client.list_events.return_value = {
"events": [
{
"payload": [
{"blob": json.dumps({})},
{
"conversational": {
"role": "USER",
"content": {"text": "Hello"},
}
},
]
}
],
"nextToken": None,
}
memory._client.retrieve_memory_records.return_value = {
"memoryRecordSummaries": [{"content": {"text": "Memory 1"}}]
}
# Test
result = memory.get()
# Should have at least one message
assert len(result) >= 1
assert memory._client.list_events.called
assert memory._client.retrieve_memory_records.called
def test_get_all(self, memory):
"""Test get_all method."""
mock_messages = [ChatMessage(role=MessageRole.USER, content="Test")]
# Mock the client's list_events method since get_all calls self.list_events which uses the client
memory._client.list_events.return_value = {
"events": [
{
"payload": [
{"blob": json.dumps({})},
{
"conversational": {
"role": "USER",
"content": {"text": "Test"},
}
},
]
}
],
"nextToken": None,
}
result = memory.get_all()
assert len(result) == 1
assert result[0].role == MessageRole.USER
assert result[0].content == "Test"
memory._client.list_events.assert_called()
def test_put(self, memory):
"""Test put method."""
message = ChatMessage(role=MessageRole.USER, content="Hello")
# Mock the _add_msgs_to_client_memory method
with patch.object(memory, "_add_msgs_to_client_memory") as mock_add_msgs:
memory.put(message)
mock_add_msgs.assert_called_once_with([message])
@pytest.mark.asyncio
async def test_aput(self, memory):
"""Test async put method."""
message = ChatMessage(role=MessageRole.USER, content="Hello")
with patch.object(memory, "_add_msgs_to_client_memory") as mock_add_msgs:
await memory.aput(message)
mock_add_msgs.assert_called_once_with([message])
@pytest.mark.asyncio
async def test_aput_messages(self, memory):
"""Test async put messages method."""
messages = [
ChatMessage(role=MessageRole.USER, content="Hello"),
ChatMessage(role=MessageRole.ASSISTANT, content="Hi"),
]
with patch.object(memory, "_add_msgs_to_client_memory") as mock_add_msgs:
await memory.aput_messages(messages)
mock_add_msgs.assert_called_once_with(messages)
def test_set(self, memory):
"""Test set method."""
existing_messages = [ChatMessage(role=MessageRole.USER, content="Old")]
new_messages = [
ChatMessage(role=MessageRole.USER, content="Old"),
ChatMessage(role=MessageRole.ASSISTANT, content="New"),
]
# Mock the client's list_events method since set() calls get_all() which calls list_events()
memory._client.list_events.return_value = {
"events": [
{
"payload": [
{"blob": json.dumps({})},
{
"conversational": {
"role": "USER",
"content": {"text": "Old"},
}
},
]
}
],
"nextToken": None,
}
# Mock the _add_msgs_to_client_memory method
with patch.object(memory, "_add_msgs_to_client_memory") as mock_add_msgs:
memory.set(new_messages)
# Should only add the new message (since existing has 1 message, new has 2)
mock_add_msgs.assert_called_once_with([new_messages[1]])
def test_reset(self, memory):
"""Test reset method."""
# The reset method now just passes (no-op) as per the implementation
# This test verifies that reset can be called without errors
memory.reset()
# No assertions needed since reset() is now a no-op
def test_add_msgs_to_client_memory(self, memory):
"""Test adding messages to client memory."""
messages = [ChatMessage(role=MessageRole.USER, content="Hello")]
# Test the actual implementation - it should call create_event
memory._add_msgs_to_client_memory(messages)
# Verify create_event was called with correct parameters
memory._client.create_event.assert_called()
class TestIntegration:
"""Integration tests for AgentCoreMemory."""
@pytest.mark.asyncio
async def test_full_workflow(self, memory_context, mock_client):
"""Test a complete workflow with AgentCoreMemory."""
# Setup mock responses
mock_client.list_events.return_value = {
"events": [
{
"payload": [
{"blob": json.dumps({})},
{
"conversational": {
"role": "USER",
"content": {"text": "Hello"},
}
},
]
}
],
"nextToken": None,
}
mock_client.retrieve_memory_records.return_value = {
"memoryRecordSummaries": [{"content": {"text": "User likes greetings"}}]
}
# Create memory instance
memory = AgentCoreMemory(context=memory_context, client=mock_client)
# Add a message
message = ChatMessage(role=MessageRole.USER, content="New message")
await memory.aput(message)
# Verify create_event was called
assert mock_client.create_event.called
# Get messages (this will call list_events and retrieve_memories)
messages = memory.get()
# Should have system message + user message
assert len(messages) >= 1
assert mock_client.list_events.called
assert mock_client.retrieve_memory_records.called
class TestErrorHandling:
"""Test error handling scenarios."""
def test_boto3_import_error(self, memory_context):
"""Test handling of boto3 import error."""
with patch("boto3.Session", side_effect=ImportError("boto3 not found")):
with pytest.raises(ImportError, match="boto3 package not found"):
AgentCoreMemory(context=memory_context)
def test_client_initialization_error(self, memory_context):
"""Test handling of client initialization errors."""
with patch("boto3.Session") as mock_session:
mock_session.side_effect = Exception("AWS credentials not found")
with pytest.raises(Exception, match="AWS credentials not found"):
AgentCoreMemory(context=memory_context)
# Integration test with existing tests
@pytest.mark.asyncio
async def test_aput(memory):
"""Test adding a message."""
message = ChatMessage(role="user", content="New message")
await memory.aput(message)
# Verify that create_event was called
assert memory._client.create_event.called
@pytest.mark.asyncio
async def test_aput_messages(memory):
"""Test adding multiple messages."""
messages = [
ChatMessage(role="user", content="Message 1"),
ChatMessage(role="assistant", content="Response 1"),
]
await memory.aput_messages(messages)
# Verify that create_event was called for each message
assert memory._client.create_event.call_count == 1
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/memory/llama-index-memory-bedrock-agentcore/tests/test_agentcore_memory.py",
"license": "MIT License",
"lines": 626,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-openai-like/tests/test_openai_like_grok.py | import os
from typing import Any
from pydantic import BaseModel
from llama_index.llms.openai_like import OpenAILike
import pytest
SKIP_GROK = os.environ.get("GROK_API_KEY") is None
class Answer(BaseModel):
"""A simple answer."""
answer: str
# Define the models to test against
GROK: list[dict[str, Any]] = (
[
{
"model": "grok-4-fast-non-reasoning",
"config": {
"api_base": "https://api.x.ai/v1",
"is_chat_model": True,
"is_function_calling_model": True,
},
},
{
"model": "grok-4-fast-reasoning",
"config": {
"api_base": "https://api.x.ai/v1",
"is_chat_model": True,
"is_function_calling_model": True,
},
},
{
"model": "grok-4-fast-non-reasoning",
"config": {
"api_base": "https://api.x.ai/v1",
"is_chat_model": True,
"is_function_calling_model": True,
"should_use_structured_outputs": True,
},
},
{
"model": "grok-4-fast-reasoning",
"config": {
"api_base": "https://api.x.ai/v1",
"is_chat_model": True,
"is_function_calling_model": True,
"should_use_structured_outputs": True,
},
},
]
if not SKIP_GROK
else []
)
@pytest.fixture(params=GROK)
def llm(request) -> OpenAILike:
return OpenAILike(
model=request.param["model"],
api_key=os.environ["GROK_API_KEY"],
**request.param.get("config", {}),
)
@pytest.mark.skipif(SKIP_GROK, reason="GROK_API_KEY not set")
def test_complete(llm: OpenAILike) -> None:
"""Test both sync and async complete methods."""
prompt = "What is the capital of Switzerland?"
sync_response = llm.complete(prompt)
assert sync_response is not None
assert len(sync_response.text) > 0
assert "bern" in sync_response.text.lower()
@pytest.mark.skipif(SKIP_GROK, reason="GROK_API_KEY not set")
def test_complete_structured(llm: OpenAILike) -> None:
"""Test both sync and async complete methods."""
prompt = "What is the capital of Switzerland?"
sync_response: Answer | None = llm.as_structured_llm(Answer).complete(prompt).raw
assert sync_response is not None
assert "bern" in sync_response.answer.lower()
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-openai-like/tests/test_openai_like_grok.py",
"license": "MIT License",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-dev/llama_dev/release/prepare.py | from pathlib import Path
import click
from llama_dev.utils import (
BumpType,
bump_version,
load_pyproject,
update_pyproject_version,
)
def _replace_core_dependency(project_path: Path, old_dep: str, new_dep: str):
pyproject_path = project_path / "pyproject.toml"
# Read the file content
with open(pyproject_path, "r") as f:
content = f.read()
# Replace the old dependency string
new_content = content.replace(old_dep, new_dep)
# Write the updated content back
with open(pyproject_path, "w") as f:
f.write(new_content)
@click.command(
short_help="Bump the versions to begin a llama_index umbrella package release"
)
@click.option(
"--version-type",
type=click.Choice([t.value for t in BumpType], case_sensitive=False),
default=BumpType.PATCH.value,
help="Type of version bump to perform (default: patch)",
)
@click.option(
"--dry-run",
is_flag=True,
help="Show what would be done without making changes",
)
@click.pass_obj
def prepare(
obj: dict,
version_type: str,
dry_run: bool,
):
"""Bump the version numbers to initiate the llama_index umbrella package release."""
console = obj["console"]
repo_root = obj["repo_root"]
bump_enum = BumpType(version_type)
root_package_data = load_pyproject(repo_root)
current_version = root_package_data["project"]["version"]
new_version = bump_version(current_version, bump_enum)
new_dep_string = (
f"llama-index-core>={new_version},<{bump_version(new_version, BumpType.MINOR)}"
)
if dry_run:
console.print(f"Would bump llama_index from {current_version} to {new_version}")
console.print(f"llama_index will depend on '{new_dep_string}'")
else:
# Update llama-index version number
update_pyproject_version(repo_root, new_version)
# Update llama-index-core version number
update_pyproject_version(repo_root / "llama-index-core", new_version)
# Update llama-index-core dependency version
for dep in root_package_data["project"]["dependencies"]:
if dep.startswith("llama-index-core"):
_replace_core_dependency(repo_root, dep, new_dep_string)
break
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-dev/llama_dev/release/prepare.py",
"license": "MIT License",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-solr/llama_index/vector_stores/solr/base.py | """Apache Solr vector store."""
import asyncio
import logging
import time
from collections.abc import Sequence
from typing import Annotated, Any, ClassVar, Optional, Union
from annotated_types import MinLen
from pydantic import (
ConfigDict,
Field,
SkipValidation,
field_validator,
)
from llama_index.core.schema import BaseNode, TextNode
from llama_index.core.vector_stores.types import (
BasePydanticVectorStore,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
from llama_index.vector_stores.solr.constants import (
ESCAPE_RULES_NESTED_LUCENE_DISMAX,
)
from llama_index.vector_stores.solr.query_utils import (
escape_query_characters,
recursively_unpack_filters,
)
from llama_index.vector_stores.solr.types import BoostedTextField, SolrQueryDict
logger = logging.getLogger(__name__)
class ApacheSolrVectorStore(BasePydanticVectorStore):
"""
A LlamaIndex vector store implementation for Apache Solr.
This vector store provides integration with Apache Solr, supporting
both dense vector similarity search (KNN) and sparse text search (BM25).
Key Features:
* Dense vector embeddings with KNN similarity search
* Sparse text search with BM25 scoring and field boosting
* Metadata filtering with various operators
* Async/sync operations
* Automatic query escaping and field preprocessing
Field Mapping: the vector store maps LlamaIndex node attributes
to Solr fields:
* ``nodeid_field``: Maps to ``node.id_`` (required)
* ``content_field``: Maps to ``node.get_content()`` (optional)
* ``embedding_field``: Maps to ``node.get_embedding()`` (optional)
* ``docid_field``: Maps to ``node.ref_doc_id`` (optional)
* ``metadata fields``: Mapped via ``metadata_to_solr_field_mapping``
Query Modes:
* ``DEFAULT``: Dense vector KNN search using embeddings
* ``TEXT_SEARCH``: Sparse BM25 text search with field boosting
"""
# Core client properties
sync_client: SkipValidation[Any] = Field(
...,
exclude=True,
description="Synchronous Solr client instance for blocking operations.",
)
async_client: SkipValidation[Any] = Field(
...,
exclude=True,
description="Asynchronous Solr client instance for non-blocking operations.",
)
# Essential field mappings
nodeid_field: str = Field(
...,
description=(
"Solr field name that uniquely identifies a node (required). Must be unique across all documents and maps to the LlamaIndex `node.id_`."
),
)
docid_field: Optional[str] = Field(
default=None,
description=(
"Solr field name for the document ID (optional). Maps to `node.ref_doc_id` and is required for document-level operations like deletion."
),
)
content_field: Optional[str] = Field(
default=None,
description=(
"Solr field name for storing the node's text content (optional). Maps to `node.get_content()`; required for BM25 / text search."
),
)
embedding_field: Optional[str] = Field(
default=None,
description=(
"Solr field name for storing embedding vectors (optional). Maps to `node.get_embedding()`; required for vector similarity (KNN) search."
),
)
metadata_to_solr_field_mapping: Optional[list[tuple[str, str]]] = Field(
default=None,
description=(
"Mapping from node metadata keys to Solr field names (optional). Each tuple is (metadata_key, solr_field). Enables structured metadata filtering."
),
)
# Configuration options
text_search_fields: Optional[Annotated[Sequence[BoostedTextField], MinLen(1)]] = (
Field(
default=None,
description=(
"Fields used for BM25 text search with optional boosting. Sequence of BoostedTextField; required for TEXT_SEARCH mode."
),
)
)
output_fields: Annotated[Sequence[str], MinLen(1)] = Field(
default=["*", "score"],
description=(
"Default fields to return in query results. Include 'score' automatically for relevance; use '*' for all stored fields or list specific ones."
),
)
# Serialization configuration
model_config: ClassVar[ConfigDict] = ConfigDict(
arbitrary_types_allowed=True, frozen=True
)
# Required for LlamaIndex API compatibility
stores_text: bool = True
stores_node: bool = True
flat_metadata: bool = False
@field_validator("output_fields")
@classmethod
def _validate_output_fields(cls, value: Sequence[str]) -> list[str]:
"""
Ensure 'score' field is always included in output_fields during initialization.
Args:
value (Sequence[str]): The original output fields
Returns:
Modified output fields with 'score' always included
"""
result = list(value)
if "score" not in result:
result.append("score")
return result
@field_validator("text_search_fields", mode="before")
def _validate_text_search_fields(
cls, v: Optional[list[Union[str, BoostedTextField]]]
) -> Optional[list[BoostedTextField]]:
"""Validate and convert text search fields to BoostedTextField instances."""
if v is None:
return None
def to_boosted(item: Union[str, BoostedTextField]) -> BoostedTextField:
if isinstance(item, str):
return BoostedTextField(field=item)
return item
return [to_boosted(item) for item in v]
@property
def client(self) -> Any:
"""Return synchronous Solr client."""
return self.sync_client
@property
def aclient(self) -> Any:
"""Return asynchronous Solr client."""
return self.async_client
def _build_dense_query(
self, query: VectorStoreQuery, solr_query: SolrQueryDict
) -> SolrQueryDict:
"""
Build a dense vector KNN query for Solr.
Args:
query: The vector store query containing embedding and parameters
solr_query: The base Solr query dictionary to build upon
Returns:
Updated Solr query dictionary with dense vector search parameters
Raises:
ValueError: If no embedding field is specified in either query or vector store
"""
if query.embedding_field is not None:
embedding_field = query.embedding_field
logger.debug("Using embedding field from query: %s", embedding_field)
elif self.embedding_field is not None:
embedding_field = self.embedding_field
logger.debug("Using embedding field from vector store: %s", embedding_field)
else:
raise ValueError(
"No embedding field name specified in query or vector store. "
"Either set 'embedding_field' on the VectorStoreQuery or configure "
"'embedding_field' when initializing ApacheSolrVectorStore"
)
if query.query_embedding is None:
logger.warning(
"`query.query_embedding` is None, retrieval results will not be meaningful."
)
solr_query["q"] = (
f"{{!knn f={embedding_field} topK={query.similarity_top_k}}}{query.query_embedding}"
)
rows_value = None or query.similarity_top_k
solr_query["rows"] = str(rows_value)
return solr_query
def _build_bm25_query(
self, query: VectorStoreQuery, solr_query: SolrQueryDict
) -> SolrQueryDict:
"""
Build a BM25 text search query for Solr.
Args:
query: The vector store query containing the query string and parameters
solr_query: The base Solr query dictionary to build upon
Returns:
Updated Solr query dictionary with BM25 search parameters
Raises:
ValueError: If no text search fields are available or query string is None
"""
if query.query_str is None:
raise ValueError("Query string cannot be None for BM25 search")
# Use text_search_fields from the vector store
if self.text_search_fields is None:
raise ValueError(
"text_search_fields must be specified in the vector store config for BM25 search"
)
user_query = escape_query_characters(
query.query_str, translation_table=ESCAPE_RULES_NESTED_LUCENE_DISMAX
)
# Join the search fields with spaces for the Solr qf parameter
search_fields_str = " ".join(
[
text_search_field.get_query_str()
for text_search_field in self.text_search_fields
]
)
solr_query["q"] = (
f"{{!dismax deftype=lucene, qf='{search_fields_str}' v='{user_query}'}}"
)
# Use rows from query if provided, otherwise fall back to similarity_top_k
rows_value = None or query.sparse_top_k
solr_query["rows"] = str(rows_value)
return solr_query
def _to_solr_query(self, query: VectorStoreQuery) -> SolrQueryDict:
"""Generate a KNN Solr query."""
solr_query: SolrQueryDict = {"q": "*:*", "fq": []}
if (
query.mode == VectorStoreQueryMode.DEFAULT
and query.query_embedding is not None
):
solr_query = self._build_dense_query(query, solr_query)
elif query.mode == VectorStoreQueryMode.TEXT_SEARCH:
solr_query = self._build_bm25_query(query, solr_query)
if query.doc_ids is not None:
if self.docid_field is None:
raise ValueError(
"`docid_field` must be passed during initialization to filter on docid"
)
solr_query["fq"].append(
f"{self.docid_field}:({' OR '.join(query.doc_ids)})"
)
if query.node_ids is not None and len(query.node_ids) > 0:
solr_query["fq"].append(
f"{self.nodeid_field}:({' OR '.join(query.node_ids)})"
)
if query.output_fields is not None:
# Use output fields from query, ensuring score is always included
output_fields = self._validate_output_fields(query.output_fields)
solr_query["fl"] = ",".join(output_fields)
logger.info("Using output fields from query: %s", output_fields)
else:
# Use default output fields from vector store, ensuring score is always included
solr_query["fl"] = ",".join(self.output_fields)
logger.info(
"Using default output fields from vector store: %s", self.output_fields
)
if query.filters:
filter_queries = recursively_unpack_filters(query.filters)
solr_query["fq"].extend(filter_queries)
logger.debug(
"Converted input query into Solr query dictionary, input=%s, output=%s",
query,
solr_query,
)
return solr_query
def _process_query_results(
self, results: list[dict[str, Any]]
) -> VectorStoreQueryResult:
"""
Convert Solr search results to LlamaIndex VectorStoreQueryResult format.
This method transforms raw Solr documents into LlamaIndex TextNode objects
and packages them with similarity scores and metadata into a structured
query result. It handles field mapping, metadata extraction.
Args:
results: List of Solr document dictionaries from search response.
Each dictionary contains field values as returned by Solr.
Returns:
A :py:class:`VectorStoreQueryResult` containing:
* ``nodes``: List of :py:class:`TextNode` objects with content and metadata
* ``ids``: List of node IDs corresponding to each node
* ``similarities``: List of similarity scores (if available)
Raises:
ValueError: If the number of similarity scores doesn't match the
number of nodes (partial scoring is not supported).
Note:
* Metadata fields are automatically identified by excluding known
system fields (``nodeid_field``, ``content_field``, etc.)
* The 'score' field from Solr is extracted as similarity scores
* Missing optional fields (``content``, ``embedding``) are handled gracefully
"""
ids, nodes, similarities = [], [], []
for result in results:
metadata_fields = result.keys() - {
self.nodeid_field,
self.content_field,
self.embedding_field,
self.docid_field,
"score",
}
ids.append(result[self.nodeid_field])
node = TextNode(
id_=result[self.nodeid_field],
# input must be a string, if missing use empty string
text=result[self.content_field] if self.content_field else "",
embedding=(
result[self.embedding_field] if self.embedding_field else None
),
metadata={f: result[f] for f in metadata_fields},
)
nodes.append(node)
if "score" in result:
similarities.append(result["score"])
if len(similarities) == 0:
return VectorStoreQueryResult(nodes=nodes, ids=ids)
elif 0 < len(similarities) < len(nodes):
raise ValueError(
"The number of similarities (scores) does not match the number of nodes"
)
else:
return VectorStoreQueryResult(
nodes=nodes, ids=ids, similarities=similarities
)
def _validate_query_mode(self, query: VectorStoreQuery) -> None:
"""
Validate that the query mode is supported by this vector store.
This method ensures that the requested query mode is compatible with
the current Solr vector store implementation.
Supported Modes:
* ``DEFAULT``: Dense vector similarity search using KNN with embeddings
* ``TEXT_SEARCH``: Sparse text search using BM25 with field boosting
Args:
query:
The vector store query containing the mode to validate. The mode is
checked against supported :py:class:`VectorStoreQueryMode` values.
Raises:
ValueError: If the query mode is not supported. Unsupported modes
include any future modes not yet implemented in the Solr backend.
Note:
This validation occurs before query execution to provide clear
error messages for unsupported operations. Future versions may
support additional query modes like hybrid search.
"""
if (
query.mode == VectorStoreQueryMode.DEFAULT
or query.mode == VectorStoreQueryMode.TEXT_SEARCH
):
return
else:
raise ValueError(
f"ApacheSolrVectorStore does not support {query.mode} yet."
)
def query(
self, query: VectorStoreQuery, **search_kwargs: Any
) -> VectorStoreQueryResult:
"""
Execute a synchronous search query against the Solr vector store.
This method supports both dense vector similarity search (KNN) and sparse
text search (BM25) depending on the query mode and parameters. It handles
query validation, Solr query construction, execution, and result processing.
Query Types:
* Dense Vector Search: Uses ``query_embedding`` for KNN similarity search
* Text Search: Uses ``query_str`` for BM25 text search with field boosting
* Filtered Search: Combines vector/text search with metadata filters
Supported Filter Operations:
* ``EQ``, ``NE``: Equality and inequality comparisons
* ``GT``, ``GTE``, ``LT``, ``LTE``: Numeric range comparisons
* ``IN``, ``NIN``: List membership tests
* ``TEXT_MATCH``: Exact text matching
Unsupported Filter Operations:
* ``ANY``, ``ALL``: Complex logical operations
* ``TEXT_MATCH_INSENSITIVE``: Case-insensitive text matching
* ``CONTAINS``: Substring matching
Args:
query:
The vector store query containing search parameters:
* ``query_embedding``: Dense vector for similarity search (DEFAULT mode)
* ``query_str``: Text string for BM25 search (TEXT_SEARCH mode)
* ``mode``: ``VectorStoreQueryMode`` (DEFAULT or TEXT_SEARCH)
* ``similarity_top_k``: Number of results for vector search
* ``sparse_top_k``: Number of results for text search
* ``filters``: Optional metadata filters for constraining results
* ``doc_ids``: Optional list of document IDs to filter by
* ``node_ids``: Optional list of node IDs to filter by
* ``output_fields``: Optional list of fields to return
**search_kwargs: Extra keyword arguments (ignored for compatibility)
Returns:
VectorStoreQueryResult containing:
* nodes: List of TextNode objects with content and metadata
* ids: List of corresponding node IDs
* similarities: List of similarity scores (when available)
Raises:
ValueError: If the query mode is unsupported, or if required fields
are missing (e.g., ``embedding_field`` for vector search, ``docid_field``
for document filtering)
Note:
This method performs synchronous I/O operations. For better performance
in async contexts, use the :py:meth:`aquery` method instead.
"""
del search_kwargs # unused
self._validate_query_mode(query)
solr_query = self._to_solr_query(query)
results = self.sync_client.search(solr_query)
return self._process_query_results(results.response.docs)
async def aquery(
self, query: VectorStoreQuery, **search_kwargs: Any
) -> VectorStoreQueryResult:
"""
Execute an asynchronous search query against the Solr vector store.
This method supports both dense vector similarity search (KNN) and sparse
text search (BM25) depending on the query mode and parameters. It handles
query validation, Solr query construction, execution, and result processing.
Query Types:
* Dense Vector Search: Uses ``query_embedding`` for KNN similarity search
* Text Search: Uses ``query_str`` for BM25 text search with field boosting
* Filtered Search: Combines vector/text search with metadata filters
Supported Filter Operations:
* ``EQ``, ``NE``: Equality and inequality comparisons
* ``GT``, ``GTE``, ``LT``, ``LTE``: Numeric range comparisons
* ``IN``, ``NIN``: List membership tests
* ``TEXT_MATCH``: Exact text matching
Unsupported Filter Operations:
* ``ANY``, ``ALL``: Complex logical operations
* ``TEXT_MATCH_INSENSITIVE``: Case-insensitive text matching
* ``CONTAINS``: Substring matching
Args:
query:
The vector store query containing search parameters:
* ``query_embedding``: Dense vector for similarity search (DEFAULT mode)
* ``query_str``: Text string for BM25 search (TEXT_SEARCH mode)
* ``mode``: ``VectorStoreQueryMode`` (DEFAULT or TEXT_SEARCH)
* ``similarity_top_k``: Number of results for vector search
* ``sparse_top_k``: Number of results for text search
* ``filters``: Optional metadata filters for constraining results
* ``doc_ids``: Optional list of document IDs to filter by
* ``node_ids``: Optional list of node IDs to filter by
* ``output_fields``: Optional list of fields to return
**search_kwargs: Extra keyword arguments (ignored for compatibility)
Returns:
VectorStoreQueryResult containing:
* nodes: List of TextNode objects with content and metadata
* ids: List of corresponding node IDs
* similarities: List of similarity scores (when available)
Raises:
ValueError: If the query mode is unsupported, or if required fields
are missing (e.g., ``embedding_field`` for vector search, ``docid_field``
for document filtering)
"""
del search_kwargs # unused
self._validate_query_mode(query)
solr_query = self._to_solr_query(query)
results = await self.async_client.search(solr_query)
return self._process_query_results(results.response.docs)
def _get_data_from_node(self, node: BaseNode) -> dict[str, Any]:
"""
Transform a LlamaIndex node into a Solr document dictionary.
This method maps LlamaIndex node attributes to Solr fields based on the
vector store configuration. It handles content extraction, embedding
mapping, metadata processing.
Args:
node: LlamaIndex BaseNode containing content, metadata,
to be stored in Solr.
Returns:
Dictionary representing a Solr document with mapped fields:
- id: Always maps to node.node_id (required)
- content_field: Maps to node.get_content() (if configured)
- embedding_field: Maps to node.get_embedding() (if configured)
- docid_field: Maps to node.ref_doc_id (if configured)
- metadata fields: Mapped via metadata_to_solr_field_mapping
Field Mapping Process:
1. Always includes node ID as 'id' field
2. Extracts content if content_field is configured
3. Extracts embedding if embedding_field is configured
4. Includes document ID if docid_field is configured
5. Maps metadata using configured field mappings with preprocessing
Note:
This is an internal method used by add() and async_add() operations.
The returned dictionary must be compatible with the Solr schema.
"""
data: dict[str, Any] = {self.nodeid_field: node.node_id}
if self.content_field is not None:
data[self.content_field] = node.get_content()
if self.embedding_field is not None:
data[self.embedding_field] = node.get_embedding()
if self.docid_field is not None:
data[self.docid_field] = node.ref_doc_id
if self.metadata_to_solr_field_mapping is not None:
for metadata_key, solr_key in self.metadata_to_solr_field_mapping:
if metadata_key in node.metadata:
data[solr_key] = node.metadata[metadata_key]
return data
def _get_data_from_nodes(
self, nodes: Sequence[BaseNode]
) -> tuple[list[str], list[dict[str, Any]]]:
# helper to avoid double iteration, it gets expensive at large batch sizes
logger.debug("Extracting data from %d nodes", len(nodes))
data: list[dict[str, Any]] = []
node_ids: list[str] = []
for node in nodes:
node_ids.append(node.id_)
data.append(self._get_data_from_node(node))
return node_ids, data
def add(self, nodes: Sequence[BaseNode], **add_kwargs: Any) -> list[str]:
"""
Synchronously add nodes (documents) to a Solr collection.
Mapping from Solr fields to :py:class:`llama_index.core.schema.BaseNode` attributes
should be as follows:
* ``nodeid_field`` -> ``node_id``
* ``content_field`` -> ``content``
* ``embedding_field`` -> ``embedding``
* ``docid_field`` -> ``ref_doc_id``
All other fields corresponding to the Solr collection should be packed as a single
``dict`` in the ``metadata`` field.
Args:
nodes: The nodes (documents) to be added to the Solr collection.
**add_kwargs:
Extra keyword arguments.
Returns:
A list of node IDs for each node added to the store.
"""
del add_kwargs # unused
if not nodes:
raise ValueError("Call to 'add' with no contents")
start = time.perf_counter()
node_ids, data = self._get_data_from_nodes(nodes)
self.sync_client.add(data)
logger.info(
"Added %d documents to Solr in %0.2f seconds",
len(data),
time.perf_counter() - start,
)
return node_ids
async def async_add(
self,
nodes: Sequence[BaseNode],
**add_kwargs: Any,
) -> list[str]:
"""
Asynchronously add nodes (documents) to a Solr collection.
Mapping from Solr fields to :py:class:`llama_index.core.schema.BaseNode` attributes
should be as follows:
* ``nodeid_field`` -> ``node_id``
* ``content_field`` -> ``content``
* ``embedding_field`` -> ``embedding``
* ``docid_field`` -> ``ref_doc_id``
All other fields corresponding to the Solr collection should be packed as a single
``dict`` in the ``metadata`` field.
Args:
nodes: The nodes (documents) to be added to the Solr collection.
**add_kwargs:
Extra keyword arguments.
Returns:
A list of node IDs for each node added to the store.
Raises:
ValueError: If called with an empty list of nodes.
"""
del add_kwargs # unused
if not nodes:
raise ValueError("Call to 'async_add' with no contents")
start = time.perf_counter()
node_ids, data = self._get_data_from_nodes(nodes)
await self.async_client.add(data)
logger.info(
"Added %d documents to Solr in %0.2f seconds",
len(data),
time.perf_counter() - start,
)
return node_ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Synchronously delete a node from the collection using its reference document ID.
Args:
ref_doc_id: The reference document ID of the node to be deleted.
**delete_kwargs:
Extra keyword arguments, ignored by this implementation. These are added
solely for interface compatibility.
Raises:
ValueError:
If a ``docid_field`` was not passed to this vector store at
initialization.
"""
del delete_kwargs # unused
logger.debug("Deleting documents from Solr using query: %s", ref_doc_id)
self.sync_client.delete_by_id([ref_doc_id])
async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Asynchronously delete a node from the collection using its reference document ID.
Args:
ref_doc_id: The reference document ID of the node to be deleted.
**delete_kwargs:
Extra keyword arguments, ignored by this implementation. These are added
solely for interface compatibility.
Raises:
ValueError:
If a ``docid_field`` was not passed to this vector store at
initialization.
"""
del delete_kwargs # unused
logger.debug("Deleting documents from Solr using query: %s", ref_doc_id)
await self.async_client.delete_by_id([ref_doc_id])
def _build_delete_nodes_query(
self,
node_ids: Optional[list[str]] = None,
filters: Optional[MetadataFilters] = None,
) -> str:
if not node_ids and not filters:
raise ValueError(
"At least one of `node_ids` or `filters` must be passed to `delete_nodes`"
)
queries: list[str] = []
if node_ids:
queries.append(f"{self.nodeid_field}:({' OR '.join(node_ids)})")
if filters is not None:
queries.extend(recursively_unpack_filters(filters))
if not queries:
raise ValueError(
"Neither `node_ids` nor non-empty `filters` were passed to `delete_nodes`"
)
elif len(queries) == 1:
return queries[0]
return f"({' AND '.join(q for q in queries if q)})"
def delete_nodes(
self,
node_ids: Optional[list[str]] = None,
filters: Optional[MetadataFilters] = None,
**delete_kwargs: Any,
) -> None:
"""
Synchronously delete nodes from vector store based on node ids.
Args:
node_ids: The node IDs to delete.
filters: The filters to be applied to the node when deleting.
**delete_kwargs:
Extra keyword arguments, ignored by this implementation. These are added
solely for interface compatibility.
"""
del delete_kwargs # unused
has_filters = filters is not None and len(filters.filters) > 0
# we can efficiently delete by ID if no filters are specified
if node_ids and not has_filters:
logger.debug("Deleting %d nodes from Solr by ID", len(node_ids))
self.sync_client.delete_by_id(node_ids)
# otherwise, build a query to delete by IDs+filters
else:
query_string = self._build_delete_nodes_query(node_ids, filters)
logger.debug(
"Deleting nodes from Solr using query: %s", query_string
) # pragma: no cover
self.sync_client.delete_by_query(query_string)
async def adelete_nodes(
self,
node_ids: Optional[list[str]] = None,
filters: Optional[MetadataFilters] = None,
**delete_kwargs: Any,
) -> None:
"""
Asynchronously delete nodes from vector store based on node ids.
Args:
node_ids: The node IDs to delete.
filters: The filters to be applied to the node when deleting.
**delete_kwargs:
Extra keyword arguments, ignored by this implementation. These are added
solely for interface compatibility.
"""
del delete_kwargs # unused
has_filters = filters is not None and len(filters.filters) > 0
# we can efficiently delete by ID if no filters are specified
if node_ids and not has_filters:
logger.debug("Deleting %d nodes from Solr by ID", len(node_ids))
await self.async_client.delete_by_id(node_ids)
# otherwise, build a query to delete by IDs+filters
else:
query_string = self._build_delete_nodes_query(node_ids, filters)
logger.debug("Deleting nodes from Solr using query: %s", query_string)
await self.async_client.delete_by_query(query_string)
def clear(self) -> None:
"""
Delete all documents from the Solr collection synchronously.
This action is not reversible!
"""
self.sync_client.clear_collection()
async def aclear(self) -> None:
"""
Delete all documents from the Solr collection asynchronously.
This action is not reversible!
"""
await self.async_client.clear_collection()
def close(self) -> None:
"""Close the Solr client synchronously."""
self.sync_client.close()
try:
loop = asyncio.get_running_loop()
except RuntimeError:
# No running loop: create a temporary loop and close cleanly
asyncio.run(self.async_client.close())
else:
# Running loop: schedule async close (not awaited)
loop.create_task(self.async_client.close()) # noqa: RUF006
async def aclose(self) -> None:
"""Explicit aclose for callers running inside an event loop."""
self.sync_client.close()
await self.async_client.close()
def __del__(self) -> None:
"""
Clean up the client for shutdown.
This action is not reversible, and should only be called one time.
"""
try:
self.close()
except RuntimeError as exc:
logger.debug(
"No running event loop, nothing to close, type=%s err='%s'",
type(exc),
exc,
)
except Exception as exc:
logger.warning(
"Failed to close the async Solr client, type=%s err='%s'",
type(exc),
exc,
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-solr/llama_index/vector_stores/solr/base.py",
"license": "MIT License",
"lines": 720,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-solr/llama_index/vector_stores/solr/client/_base.py | """Sync/async clients for interacting with Apache Solr."""
from typing import Any, Optional
from llama_index.vector_stores.solr.constants import SolrConstants
class _BaseSolrClient:
"""Base Solr client for shared functionality."""
def __init__(
self,
base_url: str,
request_timeout_sec: int = SolrConstants.DEFAULT_TIMEOUT_SEC,
headers: Optional[dict[str, str]] = None,
**client_kwargs: Any,
) -> None:
"""
Initialize the client.
Args:
base_url:
The base URL of the target Solr collection or core.
request_timeout_sec: The timeout for requests to Solr.
headers: Additional headers to include in all requests.
**client_kwargs:
Additional keyword arguments to pass to the internal client
constructor.
"""
if not base_url.strip():
raise ValueError(
f"Parameter 'base_url' cannot be empty, input='{base_url}'"
)
if request_timeout_sec < 0:
raise ValueError(
f"Parameter 'request_timeout_sec' cannot be negative, "
f"input='{request_timeout_sec}'"
)
self._base_url = base_url.rstrip("/")
self._request_timeout_sec = request_timeout_sec
self._headers = headers or {}
self._client_kwargs = client_kwargs
# client will be created in implementations
self._client: Any = None
def __str__(self) -> str:
"""String representation of the client."""
return f"{self.__class__.__name__}(base_url='{self.base_url}')"
def __repr__(self) -> str:
"""String representation of the client."""
return str(self)
@property
def base_url(self) -> str:
"""The base URL of the target Solr collection."""
return self._base_url
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-solr/llama_index/vector_stores/solr/client/_base.py",
"license": "MIT License",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-solr/llama_index/vector_stores/solr/client/async_.py | """An asynchronous Solr client implementation using ``aiosolr`` under the hood."""
import asyncio
import logging
import sys
import time
from asyncio import Task
from collections.abc import Mapping, Sequence
from typing import Any, Union, cast
from urllib.parse import urlparse
import aiosolr
from pydantic import ValidationError
from llama_index.vector_stores.solr.client._base import _BaseSolrClient
from llama_index.vector_stores.solr.client.responses import (
SolrSelectResponse,
SolrUpdateResponse,
)
from llama_index.vector_stores.solr.client.utils import prepare_document_for_solr
from llama_index.vector_stores.solr.constants import SolrConstants
logger = logging.getLogger(__name__)
class AsyncSolrClient(_BaseSolrClient):
"""
A Solr client that wraps :py:class:`aiosolr.Client`.
See `aiosolr <https://github.com/youversion/aiosolr>`_ for implementation details.
"""
async def _build_client(self) -> aiosolr.Client:
try:
logger.info("Initializing aiosolr client for URL: %s", self.base_url)
# aiosolr.Client builds URLs for various actions in a hardcoded manner; for
# URLs with ports (such as localhost URLs), we need to pass the parsed version
# for external URLs, we need to pass the connection URL directly
parsed_url = urlparse(self.base_url)
*_, collection = parsed_url.path.split("/")
if parsed_url.port is not None:
args = {
"host": parsed_url.hostname,
"port": parsed_url.port,
"scheme": parsed_url.scheme,
"collection": collection,
**self._client_kwargs,
}
else:
args = {
"connection_url": self._base_url,
**self._client_kwargs,
}
if sys.version_info < (3, 10):
args["timeout"] = self._request_timeout_sec
else:
args["read_timeout"] = self._request_timeout_sec
args["write_timeout"] = self._request_timeout_sec
logger.debug("Initializing AIOSolr client with args: %s", self._base_url)
client = aiosolr.Client(**args)
await client.setup()
# should not happen
if client.session is None: # pragma: no cover
raise ValueError("AIOSolr client session was not created after setup")
if self._headers:
client.session.headers.update(self._headers)
logger.debug(
"Updated AIOSolr client default headers with keys: %s",
list(self._headers.keys()),
)
return client
except RuntimeError as exc: # pragma: no cover
raise ValueError(
f"AIOSolr client cannot be initialized (likely due to running in "
f"non-async context), type={type(exc)} err={exc}"
) from exc
async def _get_client(self) -> aiosolr.Client:
# defer session creation until actually required
if not self._client:
self._client = await self._build_client()
return self._client
async def search(
self, query_params: Mapping[str, Any], **kwargs: Any
) -> SolrSelectResponse:
"""
Asynchronously search Solr with the input query, returning any matching documents.
No validation is done on the input query dictionary.
Args:
query_params: A query dictionary to be sent to Solr.
**kwargs:
Additional keyword arguments to pass to :py:meth:`aiosolr.Client.query`.
Returns:
The deserialized response from Solr.
"""
try:
logger.info("Searching Solr with query='%s'", query_params)
client = await self._get_client()
results = await client.query(**query_params, **kwargs)
response = SolrSelectResponse.from_aiosolr_response(results)
logger.info(
"Solr response received (path=select): status=%s qtime=%s hits=%s",
response.response_header.status,
response.response_header.q_time,
response.response.num_found,
)
return response
except aiosolr.SolrError as err:
raise ValueError(
f"Error during Aiosolr call, type={type(err)} err={err}"
) from err
except ValidationError as err:
raise ValueError(
f"Unexpected response format from Solr: err={err.json()}"
) from err
async def add(
self, documents: Sequence[Mapping[str, Any]], **kwargs: Any
) -> SolrUpdateResponse:
"""
Asynchronously add documents to the Solr collection.
No validation is done on the input documents.
Args:
documents:
The documents to be added to the Solr collection. These documents should
be serializable to JSON.
**kwargs:
Additional keyword arguments to be passed to :py:meth:`aiosolr.Client.add`.
Returns:
The deserialized update response from Solr.
"""
logger.debug("Preparing documents for insertion into Solr collection")
start = time.perf_counter()
updated_docs = [prepare_document_for_solr(doc) for doc in documents]
logger.debug(
"Prepared %d documents, took %.2g seconds",
len(documents),
time.perf_counter() - start,
)
try:
logger.info("Adding %d documents to the Solr collection", len(documents))
client = await self._get_client()
results = await client.update(data=updated_docs, **kwargs)
response = SolrUpdateResponse.from_aiosolr_response(results)
logger.info(
"Solr response received (path=update): status=%s",
response.response_header.status,
)
return response
except aiosolr.SolrError as err:
raise ValueError(
f"Error during Aiosolr call, type={type(err)} err={err}"
) from err
except ValidationError as err:
raise ValueError(
f"Unexpected response format from Solr: err={err.json()}"
) from err
async def _delete(
self, delete_command: Union[list[str], dict[str, Any]], **kwargs: Any
) -> SolrUpdateResponse:
try:
client = await self._get_client()
delete_query = {"delete": delete_command}
results = await client.update(data=delete_query, **kwargs)
response = SolrUpdateResponse.from_aiosolr_response(results)
logger.info(
"Solr response received (path=update): status=%s qtime=%s",
response.response_header.status,
response.response_header.q_time,
)
return response
except aiosolr.SolrError as err:
raise ValueError(
f"Error during Aiosolr call, type={type(err)} err={err}"
) from err
except ValidationError as err:
raise ValueError(
f"Unexpected response format from Solr: err={err.json()}"
) from err
async def delete_by_query(
self, query_string: str, **kwargs: Any
) -> SolrUpdateResponse:
"""
Asynchronously delete documents from the Solr collection using a query string.
No validation is done on the input query string.
Args:
query_string: A query string matching the documents that should be deleted.
**kwargs:
Additional keyword arguments to be passed to
:py:meth:`aiosolr.Client.update`.
Returns:
The deserialized response from Solr.
"""
logger.info(
"Deleting documents from Solr matching query '%s', collection url=%s",
query_string,
self._base_url,
)
return await self._delete({"query": query_string}, **kwargs)
async def delete_by_id(
self, ids: Sequence[str], **kwargs: Any
) -> SolrUpdateResponse:
"""
Asynchronously delete documents from the Solr collection using their IDs.
If the set of IDs is known, this is generally more efficient than using
:py:meth:`.delete_by_query`.
Args:
ids: A sequence of document IDs to be deleted.
**kwargs:
Additional keyword arguments to be passed to
:py:meth:`aiosolr.Client.update`.
Returns:
The deserialized response from Solr.
Raises:
ValueError: If the list of IDs is empty.
"""
if not ids:
raise ValueError("The list of IDs to delete cannot be empty")
logger.info(
"Deleting %d documents from the Solr collection by ID, collection url=%s",
len(ids),
self._base_url,
)
return await self._delete(list(ids), **kwargs)
async def clear_collection(self, **kwargs) -> SolrUpdateResponse:
"""
Asynchronously delete all documents from the Solr collection.
Args:
**kwargs:
Optional keyword arguments to be passed to
:py:meth:`aiosolr.Client.update`.
Returns:
The deserialized response from Solr.
"""
return await self.delete_by_query(SolrConstants.QUERY_ALL, **kwargs)
async def close(self) -> None:
"""Close the ``aiosolr`` client, if it exists."""
if self._client is not None:
await cast(aiosolr.Client, self._client).close()
def __del__(self) -> None:
"""Destroy the client, ensuring the session gets closed if it's not already."""
tasks: set[Task] = set()
try:
loop = asyncio.get_running_loop()
if loop.is_running():
task = loop.create_task(self.close())
tasks.add(task)
task.add_done_callback(tasks.discard)
else: # pragma: no cover
loop.run_until_complete(self.close())
return
except RuntimeError as exc:
logger.debug(
"No running event loop, nothing to close, type=%s err='%s'",
type(exc),
exc,
)
# last resort catch for interpreter shutdown, not reasonably testable
except Exception as exc: # pragma: no cover
logger.warning(
"Failed to close the async Solr client, type=%s err='%s'",
type(exc),
exc,
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-solr/llama_index/vector_stores/solr/client/async_.py",
"license": "MIT License",
"lines": 254,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-solr/llama_index/vector_stores/solr/client/responses.py | """
Pydantic models for Solr responses.
This includes utilities for bridging between responses from ``pysolr`` and ``aiosolr``.
"""
from typing import Any, ClassVar, Optional, Union
from xml.etree import ElementTree as ET
import aiosolr
import pysolr
from pydantic import BaseModel, ConfigDict, Field, alias_generators
from typing_extensions import Self
class SolrResponseHeader(BaseModel):
"""
Solr response headers.
The list of fields it not exhaustive, but covers fields present in usage history or
commonly cited in Solr documentation.
See `Solr documentation
<https://solr.apache.org/guide/solr/latest/query-guide/response-writers.html#json-response-writer>`_
for details.
"""
status: Optional[int] = Field(default=None)
"""Response status returned by Solr."""
q_time: Union[float, int, None] = Field(default=None, alias="QTime")
"""Elapsed time (ms) taken by the Solr request handler to complete the request."""
zk_connected: Optional[bool] = Field(default=None, alias="zkConnected")
"""Optional field indicating whether the request handler was connected to a Zookeeper instance."""
rf: Optional[int] = Field(default=None)
"""Optional field indicating the number of shards that successfully responded to the request."""
params: Optional[dict[str, Any]] = Field(default=None)
"""Echoes the request parameters corresponding to the response."""
model_config: ClassVar[ConfigDict] = ConfigDict(
extra="allow", # allow extra fields, for forward-compatability
populate_by_name=True, # allow both name and alias forms when building
)
class SolrSelectResponseBody(BaseModel):
"""
Solr response body.
See `Solr documentation
<https://solr.apache.org/guide/solr/latest/query-guide/response-writers.html#json-response-writer>`_
for details.
"""
docs: list[dict[str, Any]]
"""Documents returned by Solr for the query.
Each document is a dictionary containing all of the fields specified in the
``fl`` parameter of the request (or a default if not provided).
"""
num_found: int
"""The number of documents returned by Solr."""
num_found_exact: bool
"""Whether the ``num_found`` value was approximated or not.
If ``True``, the real number of hits is guaranteed to be greater than or
equal to :py:attr:`.num_found`.
"""
start: int
"""The offset into the query's result set (for paginated queries)."""
model_config: ClassVar[ConfigDict] = ConfigDict(
alias_generator=alias_generators.to_camel, # generate camelCase aliases
extra="allow", # allow extra fields, for forward-compatability
populate_by_name=True, # allow both name and alias forms when building
)
class SolrSelectResponse(BaseModel):
"""
Solr search response.
See `Solr documentation
<https://solr.apache.org/guide/solr/latest/query-guide/response-writers.html#json-response-writer>`_
for details.
"""
response: SolrSelectResponseBody
"""The response contents for the input query, containing documents when applicable."""
response_header: SolrResponseHeader = Field(default_factory=SolrResponseHeader)
"""The header information for the response."""
debug: Optional[dict[str, Any]] = None
"""Debugging information for the response.
This will not be present unless indicated in the request.
"""
model_config: ClassVar[ConfigDict] = ConfigDict(
alias_generator=alias_generators.to_camel, # generate camelCase aliases
extra="allow", # allow extra fields, for forward-compatability
populate_by_name=True, # allow both name and alias forms when building
)
@classmethod
def from_pysolr_results(cls, results: pysolr.Results) -> Self:
"""
Build a response from a :py:class:`pysolr.Results`.
This uses the underlying raw response contained in the ``pysolr`` results.
"""
raw_response: dict[str, Any] = results.raw_response.get("response", {})
return cls(
response=SolrSelectResponseBody(
docs=results.docs,
num_found=results.hits,
num_found_exact=raw_response.get("numFoundExact", True),
start=raw_response.get("start", 0),
),
response_header=results.raw_response.get("responseHeader", {}),
debug=results.debug,
)
@classmethod
def from_aiosolr_response(cls, results: aiosolr.Response) -> Self:
"""Build a response from a :py:class:`aiosolr.Response`."""
raw_response: dict[str, Any] = results.data.get("response", {})
return cls(
response=SolrSelectResponseBody(
docs=results.docs,
num_found=raw_response.get("numFound", 0),
num_found_exact=raw_response.get("numFoundExact", True),
start=raw_response.get("start", 0),
),
response_header=SolrResponseHeader(status=results.status),
debug=results.data.get("debug", {}),
)
class SolrUpdateResponse(BaseModel):
"""
Solr update response (add and delete requests).
See `Solr documentation
<https://solr.apache.org/guide/solr/latest/query-guide/response-writers.html#json-response-writer>`_
for details.
"""
response_header: SolrResponseHeader
"""The header information for the response."""
debug: Optional[dict[str, Any]] = None
"""Debugging information for the response.
This will not be present unless indicated in the request.
"""
model_config: ClassVar[ConfigDict] = ConfigDict(
alias_generator=alias_generators.to_camel, # generate camelCase aliases
extra="allow", # allow extra fields, for forward-compatability
populate_by_name=True, # allow both name and alias forms when building
)
@classmethod
def from_aiosolr_response(cls, results: aiosolr.Response) -> Self:
"""Build an update response from a :py:class:`aiosolr.Response`."""
return cls(
response_header=SolrResponseHeader(status=results.status),
debug=results.data.get("debug", {}),
)
@classmethod
def from_xml(cls, xml: str) -> Self:
"""Parse an update response from return XML."""
root = ET.fromstring(xml)
header_data = {}
header_elem = root.find("./lst[@name='responseHeader']")
if header_elem is not None:
for child in header_elem:
name = child.attrib.get("name")
header_data[name] = child.text.strip() if child.text else None
return SolrUpdateResponse(
response_header=SolrResponseHeader.model_validate(header_data)
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-solr/llama_index/vector_stores/solr/client/responses.py",
"license": "MIT License",
"lines": 148,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-solr/llama_index/vector_stores/solr/client/sync.py | """A synchronous Solr client implementation using ``pysolr`` under the hood."""
import logging
import time
from collections.abc import Mapping, Sequence
from typing import Any, Optional
from xml.etree.ElementTree import ParseError
import pysolr
from pydantic import ValidationError
from llama_index.vector_stores.solr.client._base import _BaseSolrClient
from llama_index.vector_stores.solr.client.responses import (
SolrSelectResponse,
SolrUpdateResponse,
)
from llama_index.vector_stores.solr.client.utils import prepare_document_for_solr
from llama_index.vector_stores.solr.constants import SolrConstants
logger = logging.getLogger(__name__)
class SyncSolrClient(_BaseSolrClient):
"""
A synchronous Solr client that wraps :py:class:`pysolr.Solr`.
See `pysolr <https://github.com/django-haystack/pysolr/blob/master/pysolr.py>`_ for
implementation details.
"""
def _get_client(self) -> pysolr.Solr:
if self._client is None:
self._client = self._build_client()
return self._client
def _build_client(self) -> pysolr.Solr:
logger.info("Initializing pysolr client for URL: %s", self.base_url)
client = pysolr.Solr(
url=self.base_url, timeout=self._request_timeout_sec, **self._client_kwargs
)
if self._headers:
session = client.get_session()
session.headers.update(self._headers)
logger.debug(
"Updated pysolr client default headers with keys: %s",
list(self._headers.keys()),
)
return client
def close(self) -> None:
"""Close the underlying Solr client session."""
if self._client:
logger.debug("Closing the Solr client session")
# pysolr doesn't expose a close method, so we directly close the underlying session
self._client.get_session().close()
self._client = None
def search(
self, query_params: Mapping[str, Any], **kwargs: Any
) -> SolrSelectResponse:
"""
Search Solr with the input query, returning any matching documents.
No validation is done on the input query dictionary.
Args:
query_params: A query dictionary to be sent to Solr.
**kwargs:
Additional keyword arguments to pass to :py:meth:`pysolr.Solr.search`.
Returns:
The deserialized response from Solr.
"""
try:
logger.info("Searching Solr with query='%s'", query_params)
results = self._get_client().search(**query_params, **kwargs)
response = SolrSelectResponse.from_pysolr_results(results)
logger.info(
"Solr response received (path=select): status=%s qtime=%s hits=%s",
response.response_header.status,
response.response_header.q_time,
response.response.num_found,
)
return response
except pysolr.SolrError as err:
raise ValueError(
f"Error during Pysolr call, type={type(err)} err={err}"
) from err
except ValidationError as err:
raise ValueError(
f"Unexpected response format from Solr: err={err.json()}"
) from err
def add(
self, documents: Sequence[Mapping[str, Any]], **kwargs: Any
) -> SolrUpdateResponse:
"""
Add documents to the Solr collection.
No validation is done on the input documents.
Args:
documents:
The documents to be added to the Solr collection. These documents should
be serializable to JSON.
**kwargs:
Additional keyword arguments to pass to :py:meth:`pysolr.Solr.add`.
"""
logger.debug("Preparing documents for insertion into Solr collection")
start = time.perf_counter()
updated_docs = [prepare_document_for_solr(doc) for doc in documents]
logger.debug(
"Prepared %d documents, took %.2g seconds",
len(documents),
time.perf_counter() - start,
)
try:
logger.info("Adding %d documents to the Solr collection", len(documents))
# pysolr.Solr.add is not typed, but in code tracing it will always be this
res_text = str(self._get_client().add(updated_docs, **kwargs))
# update responses in pysolr are always in XML format
# response = SolrUpdateResponse.from_xml(res_text)
response = SolrUpdateResponse.model_validate_json(res_text)
logger.info(
"Solr response received (path=update): status=%s qtime=%s",
response.response_header.status,
response.response_header.q_time,
)
return response
except pysolr.SolrError as err:
raise ValueError(
f"Error during Pysolr call, type={type(err)} err={err}"
) from err
except ValidationError as err:
raise ValueError(
f"Unexpected response format from Solr: err={err.json()}"
) from err
def _delete(
self, query_string: Optional[str], ids: Optional[list[str]], **kwargs: Any
) -> SolrUpdateResponse:
try:
res_text = self._get_client().delete(q=query_string, id=ids, **kwargs)
# update responses in pysolr are always in XML format
response = SolrUpdateResponse.from_xml(res_text)
logger.info(
"Solr response received (path=update): status=%s qtime=%s",
response.response_header.status,
response.response_header.q_time,
)
return response
except pysolr.SolrError as err:
raise ValueError(
f"Error during Pysolr call, type={type(err)} err={err}"
) from err
except ParseError as err:
raise ValueError(
f"Error parsing XML response from Solr: err={err}"
) from err
except ValidationError as err:
raise ValueError(
f"Unexpected response format from Solr: err={err.json()}"
) from err
def delete_by_query(self, query_string: str, **kwargs: Any) -> SolrUpdateResponse:
"""
Delete documents from the Solr collection using a query string.
Args:
query_string: A query string matching the documents that should be deleted.
**kwargs:
Additional keyword arguments to pass to :py:meth:`pysolr.Solr.delete`.
Returns:
The deserialized response from Solr.
"""
logger.info(
"Deleting documents from Solr matching query '%s', collection url=%s",
query_string,
self._base_url,
)
return self._delete(query_string=query_string, ids=None, **kwargs)
def delete_by_id(self, ids: Sequence[str], **kwargs: Any) -> SolrUpdateResponse:
"""
Delete documents from the Solr collection using their IDs.
If the set of IDs is known, this is generally more efficient than using
:py:meth:`.delete_by_query`.
Args:
ids: A sequence of document IDs to be deleted.
**kwargs:
Additional keyword arguments to pass to :py:meth:`pysolr.Solr.delete`.
Returns:
The deserialized response from Solr.
Raises:
ValueError: If the list of IDs is empty.
"""
if not ids:
raise ValueError("The list of IDs to delete cannot be empty")
logger.info(
"Deleting %d documents from the Solr collection by ID, collection url=%s",
len(ids),
self._base_url,
)
return self._delete(query_string=None, ids=list(ids), **kwargs)
def clear_collection(self, **kwargs: Any) -> SolrUpdateResponse:
"""
Delete all documents from the Solr collection.
Args:
**kwargs:
Optional keyword arguments to be passed to
:py:meth:`pysolr.Solr.delete`.
Returns:
The deserialized response from Solr.
"""
logger.warning("The Solr collection is being cleared")
return self.delete_by_query(SolrConstants.QUERY_ALL, **kwargs)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-solr/llama_index/vector_stores/solr/client/sync.py",
"license": "MIT License",
"lines": 195,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-solr/llama_index/vector_stores/solr/client/utils.py | """Utilities for use with Solr clients, particularly for preparing data for ingestion."""
from datetime import date, datetime, timezone
try:
from datetime import UTC
except ImportError:
UTC = timezone.utc
from collections.abc import Mapping
from typing import Any, Union, cast
import numpy as np
from llama_index.vector_stores.solr.constants import SolrConstants
def format_datetime_for_solr(dt: Union[datetime, date]) -> str:
"""
Format an input :py:class:`datetime.datetime` or :py:class:`datetime.date` into a Solr-compatible date string.
When a timezone is specified (:py:attr:`~datetime.datetime.tzinfo`), it is converted
to UTC. If one is not specified, it is treated as UTC implicitly.
See `Solr documentation <https://solr.apache.org/guide/solr/latest/indexing-guide/date-formatting-math.html>`_
for more information on how Solr treats date fields.
Examples:
>>> from datetime import datetime
>>> from zoneinfo import ZoneInfo
>>> val = datetime(2025, 2, 18, 1, 2, 3, tzinfo=ZoneInfo("America/New_York"))
>>> format_datetime_for_solr(val)
'2025-02-18T06:02:03Z'
Args:
dt:
The input :py:class:`datetime.datetime` or :py:class:`datetime.date`
Returns:
A Solr-compatible date string.
"""
# dates don't have timezones
if isinstance(dt, datetime):
if dt.tzinfo is not None:
# convert other timezone to UTC
dt = dt.astimezone(UTC)
else:
# treat naive datetimes as UTC
dt = dt.replace(tzinfo=UTC)
return dt.strftime(SolrConstants.SOLR_ISO8601_DATE_FORMAT)
def prepare_document_for_solr(document: Mapping[str, Any]) -> dict[str, Any]:
"""
Prepare a document dictionary for insertion into Solr, converting datatypes when necessary.
The underlying Solr clients used do not always prepare certain datatypes appropriately
for calls to Solr, which can lead to surprising errors. This function adds some
special handling to avoid these issues, providing explicit support for the following:
* :py:class:`bytes` is decoded into a :py:class:`str`
* :py:class:`datetime.datetime` is formatted into a Solr-compatible date string
* :py:class:`datetime.date` is formatted into a Solr-compatible date string
* :py:class:`numpy.ndarray` and its contents are converted into a :py:class:`list`
of Python primitive types using :py:mod:`numpy` default behavior
Args:
document: The document dictionary to prepare.
Returns:
A document dictionary prepared for insertion into Solr.
"""
out_doc: dict[str, Any] = {}
for key, value in document.items():
if isinstance(value, (datetime, date)):
out_doc[key] = format_datetime_for_solr(value)
elif isinstance(value, np.ndarray):
out_doc[key] = cast(list, value.tolist())
elif isinstance(value, np.generic):
# covers all numpy scalar types, converts to standard python type
out_doc[key] = value.item()
elif isinstance(value, bytes):
out_doc[key] = value.decode()
else:
out_doc[key] = value
return out_doc
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-solr/llama_index/vector_stores/solr/client/utils.py",
"license": "MIT License",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-solr/llama_index/vector_stores/solr/constants.py | """
Constants for Solr vector store.
This module contains configuration constants, escape rules, and field definitions
used throughout the Solr vector store implementation. These constants ensure
consistent behavior across different components and provide centralized
configuration for Solr-specific operations.
The constants are organized into the following categories:
- Tokenization and delimiter constants
- Sparse encoding field definitions
- Query escaping rules for different Solr query parsers
- Default configuration values
"""
from types import MappingProxyType, SimpleNamespace
from typing import Final
# =============================================================================
# Configuration Defaults
# =============================================================================
SOLR_DEFAULT_MINIMUM_CHUNK_SIZE: Final[int] = 50
"""Default minimum size (in characters) for text chunks during document processing."""
# =============================================================================
# Query Escaping Rules
# =============================================================================
# Private mapping of characters that need escaping in Solr queries
_ESCAPE_RULES_MAPPING = {
"/": r"\/",
"'": r"\'",
"\\": r"\\\\",
"+": r"\+",
"-": r"\-",
"&": r"\&",
"|": r"\|",
"!": r"\!",
"(": r"\(",
")": r"\)",
"{": r"\{",
"}": r"\}",
"[": r"\[",
"]": r"\]",
"^": r"\^",
"~": r"\~",
"*": r"\*",
"?": r"\?",
":": r"\:",
'"': r"\"",
";": r"\;",
" ": r"\ ",
}
ESCAPE_RULES_GENERIC = MappingProxyType[int, str](str.maketrans(_ESCAPE_RULES_MAPPING))
"""Translation table for escaping special characters in standard Solr queries.
This mapping is used with str.translate() to escape characters that have special
meaning in Solr's query syntax.
Example:
escaped_query = user_input.translate(ESCAPE_RULES_GENERIC)
"""
ESCAPE_RULES_NESTED_LUCENE_DISMAX = MappingProxyType[int, str](
str.maketrans(
{
**_ESCAPE_RULES_MAPPING,
"+": r"\\+", # Double-escaped plus
"-": r"\\-", # Double-escaped minus
}
)
)
"""Translation table for escaping characters in nested Lucene+DisMax queries.
Double escaping for dismax special characters. Since we have two nested query parsers
(``lucene`` + ``dismax``), Solr parses the escaping characters twice, requiring
additional escaping for certain operators.
Use this when constructing queries that will be processed by both the Lucene
query parser and the DisMax query parser in sequence.
Example:
escaped_query = user_input.translate(ESCAPE_RULES_NESTED_LUCENE_DISMAX)
"""
class SolrConstants(SimpleNamespace):
"""Constants used by Solr clients."""
QUERY_ALL: Final[str] = "*:*"
"""Solr query requesting all documents to be returned."""
DEFAULT_TIMEOUT_SEC: Final[int] = 60
"""Default request timeout to Solr in seconds."""
SOLR_ISO8601_DATE_FORMAT: Final[str] = "%Y-%m-%dT%H:%M:%SZ"
"""A :py:meth:`datetime.datetime.strftime` format string for Solr-compatible datetimes.
See `Solr documentation
<https://solr.apache.org/guide/solr/latest/indexing-guide/date-formatting-math.html>`_
for more information.
"""
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-solr/llama_index/vector_stores/solr/constants.py",
"license": "MIT License",
"lines": 84,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.