index int64 0 0 | repo_id stringclasses 596 values | file_path stringlengths 31 168 | content stringlengths 1 6.2M |
|---|---|---|---|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/writer.py | """Writer chat wrapper."""
from __future__ import annotations
import json
import logging
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
Iterator,
List,
Literal,
Optional,
Sequence,
Tuple,
Type,
Union,
)
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models import LanguageModelInput
from langchain_core.language_models.chat_models import (
BaseChatModel,
)
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
ChatMessage,
HumanMessage,
SystemMessage,
ToolMessage,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.runnables import Runnable
from langchain_core.utils import get_from_dict_or_env
from langchain_core.utils.function_calling import convert_to_openai_tool
from pydantic import BaseModel, ConfigDict, Field, SecretStr, model_validator
logger = logging.getLogger(__name__)
class ChatWriter(BaseChatModel):
"""Writer chat model.
To use, you should have the ``writer-sdk`` Python package installed, and the
environment variable ``WRITER_API_KEY`` set with your API key or pass 'api_key'
init param.
Example:
.. code-block:: python
from langchain_community.chat_models import ChatWriter
chat = ChatWriter(
api_key="your key"
model="palmyra-x-004"
)
"""
client: Any = Field(default=None, exclude=True) #: :meta private:
async_client: Any = Field(default=None, exclude=True) #: :meta private:
api_key: Optional[SecretStr] = Field(default=None)
"""Writer API key."""
model_name: str = Field(default="palmyra-x-004", alias="model")
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
n: int = 1
"""Number of chat completions to generate for each prompt."""
max_tokens: Optional[int] = None
"""Maximum number of tokens to generate."""
model_config = ConfigDict(populate_by_name=True)
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "writer-chat"
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {
"model_name": self.model_name,
"temperature": self.temperature,
**self.model_kwargs,
}
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Writer API."""
return {
"model": self.model_name,
"temperature": self.temperature,
"n": self.n,
"max_tokens": self.max_tokens,
**self.model_kwargs,
}
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validates that api key is passed and creates Writer clients."""
try:
from writerai import AsyncClient, Client
except ImportError as e:
raise ImportError(
"Could not import writerai python package. "
"Please install it with `pip install writerai`."
) from e
if not values.get("client"):
values.update(
{
"client": Client(
api_key=get_from_dict_or_env(
values, "api_key", "WRITER_API_KEY"
)
)
}
)
if not values.get("async_client"):
values.update(
{
"async_client": AsyncClient(
api_key=get_from_dict_or_env(
values, "api_key", "WRITER_API_KEY"
)
)
}
)
if not (
type(values.get("client")) is Client
and type(values.get("async_client")) is AsyncClient
):
raise ValueError(
"'client' attribute must be with type 'Client' and "
"'async_client' must be with type 'AsyncClient' from 'writerai' package"
)
return values
def _create_chat_result(self, response: Any) -> ChatResult:
generations = []
for choice in response.choices:
message = self._convert_writer_to_langchain(choice.message)
gen = ChatGeneration(
message=message,
generation_info=dict(finish_reason=choice.finish_reason),
)
generations.append(gen)
token_usage = {}
if response.usage:
token_usage = response.usage.__dict__
llm_output = {
"token_usage": token_usage,
"model_name": self.model_name,
"system_fingerprint": response.system_fingerprint,
}
return ChatResult(generations=generations, llm_output=llm_output)
@staticmethod
def _convert_langchain_to_writer(message: BaseMessage) -> dict:
"""Convert a LangChain message to a Writer message dict."""
message_dict = {"role": "", "content": message.content}
if isinstance(message, ChatMessage):
message_dict["role"] = message.role
elif isinstance(message, HumanMessage):
message_dict["role"] = "user"
elif isinstance(message, AIMessage):
message_dict["role"] = "assistant"
if message.tool_calls:
message_dict["tool_calls"] = [
{
"id": tool["id"],
"type": "function",
"function": {"name": tool["name"], "arguments": tool["args"]},
}
for tool in message.tool_calls
]
elif isinstance(message, SystemMessage):
message_dict["role"] = "system"
elif isinstance(message, ToolMessage):
message_dict["role"] = "tool"
message_dict["tool_call_id"] = message.tool_call_id
else:
raise ValueError(f"Got unknown message type: {type(message)}")
if message.name:
message_dict["name"] = message.name
return message_dict
@staticmethod
def _convert_writer_to_langchain(response_message: Any) -> BaseMessage:
"""Convert a Writer message to a LangChain message."""
if not isinstance(response_message, dict):
response_message = json.loads(
json.dumps(response_message, default=lambda o: o.__dict__)
)
role = response_message.get("role", "")
content = response_message.get("content")
if not content:
content = ""
if role == "user":
return HumanMessage(content=content)
elif role == "assistant":
additional_kwargs = {}
if tool_calls := response_message.get("tool_calls", []):
additional_kwargs["tool_calls"] = tool_calls
return AIMessageChunk(content=content, additional_kwargs=additional_kwargs)
elif role == "system":
return SystemMessage(content=content)
elif role == "tool":
return ToolMessage(
content=content,
tool_call_id=response_message.get("tool_call_id", ""),
name=response_message.get("name", ""),
)
else:
return ChatMessage(content=content, role=role)
def _convert_messages_to_writer(
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
"""Convert a list of LangChain messages to List of Writer dicts."""
params = {
"model": self.model_name,
"temperature": self.temperature,
"n": self.n,
**self.model_kwargs,
}
if stop:
params["stop"] = stop
if self.max_tokens is not None:
params["max_tokens"] = self.max_tokens
message_dicts = [self._convert_langchain_to_writer(m) for m in messages]
return message_dicts, params
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
message_dicts, params = self._convert_messages_to_writer(messages, stop)
params = {**params, **kwargs, "stream": True}
response = self.client.chat.chat(messages=message_dicts, **params)
for chunk in response:
delta = chunk.choices[0].delta
if not delta or not delta.content:
continue
chunk = self._convert_writer_to_langchain(
{"role": "assistant", "content": delta.content}
)
chunk = ChatGenerationChunk(message=chunk)
if run_manager:
run_manager.on_llm_new_token(chunk.text)
yield chunk
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
message_dicts, params = self._convert_messages_to_writer(messages, stop)
params = {**params, **kwargs, "stream": True}
response = await self.async_client.chat.chat(messages=message_dicts, **params)
async for chunk in response:
delta = chunk.choices[0].delta
if not delta or not delta.content:
continue
chunk = self._convert_writer_to_langchain(
{"role": "assistant", "content": delta.content}
)
chunk = ChatGenerationChunk(message=chunk)
if run_manager:
await run_manager.on_llm_new_token(chunk.text)
yield chunk
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
message_dicts, params = self._convert_messages_to_writer(messages, stop)
params = {**params, **kwargs}
response = self.client.chat.chat(messages=message_dicts, **params)
return self._create_chat_result(response)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
message_dicts, params = self._convert_messages_to_writer(messages, stop)
params = {**params, **kwargs}
response = await self.async_client.chat.chat(messages=message_dicts, **params)
return self._create_chat_result(response)
def bind_tools(
self,
tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable]],
*,
tool_choice: Optional[Union[str, Literal["auto", "none"]]] = None,
**kwargs: Any,
) -> Runnable[LanguageModelInput, BaseMessage]:
"""Bind tools to the chat model.
Args:
tools: Tools to bind to the model
tool_choice: Which tool to require ('auto', 'none', or specific tool name)
**kwargs: Additional parameters to pass to the chat model
Returns:
A runnable that will use the tools
"""
formatted_tools = [convert_to_openai_tool(tool) for tool in tools]
if tool_choice:
kwargs["tool_choice"] = (
(tool_choice)
if tool_choice in ("auto", "none")
else {"type": "function", "function": {"name": tool_choice}}
)
return super().bind(tools=formatted_tools, **kwargs)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/solar.py | """Wrapper around Solar chat models."""
from typing import Dict
from langchain_core._api import deprecated
from langchain_core.utils import get_from_dict_or_env, pre_init
from pydantic import ConfigDict, Field
from langchain_community.chat_models import ChatOpenAI
from langchain_community.llms.solar import SOLAR_SERVICE_URL_BASE, SolarCommon
@deprecated( # type: ignore[arg-type]
since="0.0.34", removal="1.0", alternative_import="langchain_upstage.ChatUpstage"
)
class SolarChat(SolarCommon, ChatOpenAI): # type: ignore[override, override]
"""Wrapper around Solar large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``SOLAR_API_KEY`` set with your API key.
(Solar's chat API is compatible with OpenAI's SDK.)
Referenced from https://console.upstage.ai/services/solar
Example:
.. code-block:: python
from langchain_community.chat_models.solar import SolarChat
solar = SolarChat(model="solar-1-mini-chat")
"""
max_tokens: int = Field(default=1024)
# this is needed to match ChatOpenAI superclass
model_config = ConfigDict(
populate_by_name=True,
arbitrary_types_allowed=True,
extra="ignore",
)
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the environment is set up correctly."""
values["solar_api_key"] = get_from_dict_or_env(
values, "solar_api_key", "SOLAR_API_KEY"
)
try:
import openai
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
client_params = {
"api_key": values["solar_api_key"],
"base_url": (
values["base_url"] if "base_url" in values else SOLAR_SERVICE_URL_BASE
),
}
if not values.get("client"):
values["client"] = openai.OpenAI(**client_params).chat.completions
if not values.get("async_client"):
values["async_client"] = openai.AsyncOpenAI(
**client_params
).chat.completions
return values
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/kinetica.py | ##
# Copyright (c) 2024, Chad Juliano, Kinetica DB Inc.
##
"""Kinetica SQL generation LLM API."""
import json
import logging
import os
import re
from importlib.metadata import version
from pathlib import Path
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Pattern, cast
from langchain_core.utils import pre_init
if TYPE_CHECKING:
import gpudb
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import (
AIMessage,
BaseMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.output_parsers.transform import BaseOutputParser
from langchain_core.outputs import ChatGeneration, ChatResult, Generation
from pydantic import BaseModel, ConfigDict, Field
LOG = logging.getLogger(__name__)
# Kinetica pydantic API datatypes
class _KdtSuggestContext(BaseModel):
"""pydantic API request type"""
table: Optional[str] = Field(default=None, title="Name of table")
description: Optional[str] = Field(default=None, title="Table description")
columns: List[str] = Field(default=[], title="Table columns list")
rules: Optional[List[str]] = Field(
default=None, title="Rules that apply to the table."
)
samples: Optional[Dict] = Field(
default=None, title="Samples that apply to the entire context."
)
def to_system_str(self) -> str:
lines = []
lines.append(f"CREATE TABLE {self.table} AS")
lines.append("(")
if not self.columns or len(self.columns) == 0:
ValueError("columns list can't be null.")
columns = []
for column in self.columns:
column = column.replace('"', "").strip()
columns.append(f" {column}")
lines.append(",\n".join(columns))
lines.append(");")
if self.description:
lines.append(f"COMMENT ON TABLE {self.table} IS '{self.description}';")
if self.rules and len(self.rules) > 0:
lines.append(
f"-- When querying table {self.table} the following rules apply:"
)
for rule in self.rules:
lines.append(f"-- * {rule}")
result = "\n".join(lines)
return result
class _KdtSuggestPayload(BaseModel):
"""pydantic API request type"""
question: Optional[str] = None
context: List[_KdtSuggestContext]
def get_system_str(self) -> str:
lines = []
for table_context in self.context:
if table_context.table is None:
continue
context_str = table_context.to_system_str()
lines.append(context_str)
return "\n\n".join(lines)
def get_messages(self) -> List[Dict]:
messages = []
for context in self.context:
if context.samples is None:
continue
for question, answer in context.samples.items():
# unescape double quotes
answer = answer.replace("''", "'")
messages.append(dict(role="user", content=question or ""))
messages.append(dict(role="assistant", content=answer))
return messages
def to_completion(self) -> Dict:
messages = []
messages.append(dict(role="system", content=self.get_system_str()))
messages.extend(self.get_messages())
messages.append(dict(role="user", content=self.question or ""))
response = dict(messages=messages)
return response
class _KdtoSuggestRequest(BaseModel):
"""pydantic API request type"""
payload: _KdtSuggestPayload
class _KdtMessage(BaseModel):
"""pydantic API response type"""
role: str = Field(default="", title="One of [user|assistant|system]")
content: str
class _KdtChoice(BaseModel):
"""pydantic API response type"""
index: int
message: Optional[_KdtMessage] = Field(default=None, title="The generated SQL")
finish_reason: str
class _KdtUsage(BaseModel):
"""pydantic API response type"""
prompt_tokens: int
completion_tokens: int
total_tokens: int
class _KdtSqlResponse(BaseModel):
"""pydantic API response type"""
id: str
object: str
created: int
model: str
choices: List[_KdtChoice]
usage: _KdtUsage
prompt: str = Field(default="", title="The input question")
class _KdtCompletionResponse(BaseModel):
"""pydantic API response type"""
status: str
data: _KdtSqlResponse
class _KineticaLlmFileContextParser:
"""Parser for Kinetica LLM context datafiles."""
# parse line into a dict containing role and content
PARSER: Pattern = re.compile(r"^<\|(?P<role>\w+)\|>\W*(?P<content>.*)$", re.DOTALL)
@classmethod
def _removesuffix(cls, text: str, suffix: str) -> str:
if suffix and text.endswith(suffix):
return text[: -len(suffix)]
return text
@classmethod
def parse_dialogue_file(cls, input_file: os.PathLike) -> Dict:
path = Path(input_file)
# schema = path.name.removesuffix(".txt") python 3.9
schema = cls._removesuffix(path.name, ".txt")
lines = open(input_file).read()
return cls.parse_dialogue(lines, schema)
@classmethod
def parse_dialogue(cls, text: str, schema: str) -> Dict:
messages = []
system = None
lines = text.split("<|end|>")
user_message = None
for idx, line in enumerate(lines):
line = line.strip()
if len(line) == 0:
continue
match = cls.PARSER.match(line)
if match is None:
raise ValueError(f"Could not find starting token in: {line}")
groupdict = match.groupdict()
role = groupdict["role"]
if role == "system":
if system is not None:
raise ValueError(f"Only one system token allowed in: {line}")
system = groupdict["content"]
elif role == "user":
if user_message is not None:
raise ValueError(
f"Found user token without assistant token: {line}"
)
user_message = groupdict
elif role == "assistant":
if user_message is None:
raise Exception(f"Found assistant token without user token: {line}")
messages.append(user_message)
messages.append(groupdict)
user_message = None
else:
raise ValueError(f"Unknown token: {role}")
return {"schema": schema, "system": system, "messages": messages}
class KineticaUtil:
"""Kinetica utility functions."""
@classmethod
def create_kdbc(
cls,
url: Optional[str] = None,
user: Optional[str] = None,
passwd: Optional[str] = None,
) -> "gpudb.GPUdb":
"""Create a connectica connection object and verify connectivity.
If None is passed for one or more of the parameters then an attempt will be made
to retrieve the value from the related environment variable.
Args:
url: The Kinetica URL or ``KINETICA_URL`` if None.
user: The Kinetica user or ``KINETICA_USER`` if None.
passwd: The Kinetica password or ``KINETICA_PASSWD`` if None.
Returns:
The Kinetica connection object.
"""
try:
import gpudb
except ModuleNotFoundError:
raise ImportError(
"Could not import Kinetica python package. "
"Please install it with `pip install gpudb`."
)
url = cls._get_env("KINETICA_URL", url)
user = cls._get_env("KINETICA_USER", user)
passwd = cls._get_env("KINETICA_PASSWD", passwd)
options = gpudb.GPUdb.Options()
options.username = user
options.password = passwd
options.skip_ssl_cert_verification = True
options.disable_failover = True
options.logging_level = "INFO"
kdbc = gpudb.GPUdb(host=url, options=options)
LOG.info(
"Connected to Kinetica: {}. (api={}, server={})".format(
kdbc.get_url(), version("gpudb"), kdbc.server_version
)
)
return kdbc
@classmethod
def _get_env(cls, name: str, default: Optional[str]) -> str:
"""Get an environment variable or use a default."""
if default is not None:
return default
result = os.getenv(name)
if result is not None:
return result
raise ValueError(
f"Parameter was not passed and not found in the environment: {name}"
)
class ChatKinetica(BaseChatModel):
"""Kinetica LLM Chat Model API.
Prerequisites for using this API:
* The ``gpudb`` and ``typeguard`` packages installed.
* A Kinetica DB instance.
* Kinetica host specified in ``KINETICA_URL``
* Kinetica login specified ``KINETICA_USER``, and ``KINETICA_PASSWD``.
* An LLM context that specifies the tables and samples to use for inferencing.
This API is intended to interact with the Kinetica SqlAssist LLM that supports
generation of SQL from natural language.
In the Kinetica LLM workflow you create an LLM context in the database that provides
information needed for infefencing that includes tables, annotations, rules, and
samples. Invoking ``load_messages_from_context()`` will retrieve the contxt
information from the database so that it can be used to create a chat prompt.
The chat prompt consists of a ``SystemMessage`` and pairs of
``HumanMessage``/``AIMessage`` that contain the samples which are question/SQL
pairs. You can append pairs samples to this list but it is not intended to
facilitate a typical natural language conversation.
When you create a chain from the chat prompt and execute it, the Kinetica LLM will
generate SQL from the input. Optionally you can use ``KineticaSqlOutputParser`` to
execute the SQL and return the result as a dataframe.
The following example creates an LLM using the environment variables for the
Kinetica connection. This will fail if the API is unable to connect to the database.
Example:
.. code-block:: python
from langchain_community.chat_models.kinetica import KineticaChatLLM
kinetica_llm = KineticaChatLLM()
If you prefer to pass connection information directly then you can create a
connection using ``KineticaUtil.create_kdbc()``.
Example:
.. code-block:: python
from langchain_community.chat_models.kinetica import (
KineticaChatLLM, KineticaUtil)
kdbc = KineticaUtil._create_kdbc(url=url, user=user, passwd=passwd)
kinetica_llm = KineticaChatLLM(kdbc=kdbc)
"""
kdbc: Any = Field(exclude=True)
""" Kinetica DB connection. """
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Pydantic object validator."""
kdbc = values.get("kdbc", None)
if kdbc is None:
kdbc = KineticaUtil.create_kdbc()
values["kdbc"] = kdbc
return values
@property
def _llm_type(self) -> str:
return "kinetica-sqlassist"
@property
def _identifying_params(self) -> Dict[str, Any]:
return dict(
kinetica_version=str(self.kdbc.server_version), api_version=version("gpudb")
)
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if stop is not None:
raise ValueError("stop kwargs are not permitted.")
dict_messages = [self._convert_message_to_dict(m) for m in messages]
sql_response = self._submit_completion(dict_messages)
response_message = cast(_KdtMessage, sql_response.choices[0].message)
generated_dict = response_message.model_dump()
generated_message = self._convert_message_from_dict(generated_dict)
llm_output = dict(
input_tokens=sql_response.usage.prompt_tokens,
output_tokens=sql_response.usage.completion_tokens,
model_name=sql_response.model,
)
return ChatResult(
generations=[ChatGeneration(message=generated_message)],
llm_output=llm_output,
)
def load_messages_from_context(self, context_name: str) -> List:
"""Load a lanchain prompt from a Kinetica context.
A Kinetica Context is an object created with the Kinetica Workbench UI or with
SQL syntax. This function will convert the data in the context to a list of
messages that can be used as a prompt. The messages will contain a
``SystemMessage`` followed by pairs of ``HumanMessage``/``AIMessage`` that
contain the samples.
Args:
context_name: The name of an LLM context in the database.
Returns:
A list of messages containing the information from the context.
"""
# query kinetica for the prompt
sql = f"GENERATE PROMPT WITH OPTIONS (CONTEXT_NAMES = '{context_name}')"
result = self._execute_sql(sql)
prompt = result["Prompt"]
prompt_json = json.loads(prompt)
# convert the prompt to messages
# request = SuggestRequest.model_validate(prompt_json) # pydantic v2
request = _KdtoSuggestRequest.model_validate(prompt_json)
payload = request.payload
dict_messages = []
dict_messages.append(dict(role="system", content=payload.get_system_str()))
dict_messages.extend(payload.get_messages())
messages = [self._convert_message_from_dict(m) for m in dict_messages]
return messages
def _submit_completion(self, messages: List[Dict]) -> _KdtSqlResponse:
"""Submit a /chat/completions request to Kinetica."""
request = dict(messages=messages)
request_json = json.dumps(request)
response_raw = self.kdbc._GPUdb__submit_request_json(
"/chat/completions", request_json
)
response_json = json.loads(response_raw)
status = response_json["status"]
if status != "OK":
message = response_json["message"]
match_resp = re.compile(r"response:({.*})")
result = match_resp.search(message)
if result is not None:
response = result.group(1)
response_json = json.loads(response)
message = response_json["message"]
raise ValueError(message)
data = response_json["data"]
# response = CompletionResponse.model_validate(data) # pydantic v2
response = _KdtCompletionResponse.model_validate(data)
if response.status != "OK":
raise ValueError("SQL Generation failed")
return response.data
def _execute_sql(self, sql: str) -> Dict:
"""Execute an SQL query and return the result."""
response = self.kdbc.execute_sql_and_decode(
sql, limit=1, get_column_major=False
)
status_info = response["status_info"]
if status_info["status"] != "OK":
message = status_info["message"]
raise ValueError(message)
records = response["records"]
if len(records) != 1:
raise ValueError("No records returned.")
record = records[0]
response_dict = {}
for col, val in record.items():
response_dict[col] = val
return response_dict
@classmethod
def load_messages_from_datafile(cls, sa_datafile: Path) -> List[BaseMessage]:
"""Load a lanchain prompt from a Kinetica context datafile."""
datafile_dict = _KineticaLlmFileContextParser.parse_dialogue_file(sa_datafile)
messages = cls._convert_dict_to_messages(datafile_dict)
return messages
@classmethod
def _convert_message_to_dict(cls, message: BaseMessage) -> Dict:
"""Convert a single message to a BaseMessage."""
content = cast(str, message.content)
if isinstance(message, HumanMessage):
role = "user"
elif isinstance(message, AIMessage):
role = "assistant"
elif isinstance(message, SystemMessage):
role = "system"
else:
raise ValueError(f"Got unsupported message type: {message}")
result_message = dict(role=role, content=content)
return result_message
@classmethod
def _convert_message_from_dict(cls, message: Dict) -> BaseMessage:
"""Convert a single message from a BaseMessage."""
role = message["role"]
content = message["content"]
if role == "user":
return HumanMessage(content=content)
elif role == "assistant":
return AIMessage(content=content)
elif role == "system":
return SystemMessage(content=content)
else:
raise ValueError(f"Got unsupported role: {role}")
@classmethod
def _convert_dict_to_messages(cls, sa_data: Dict) -> List[BaseMessage]:
"""Convert a dict to a list of BaseMessages."""
schema = sa_data["schema"]
system = sa_data["system"]
messages = sa_data["messages"]
LOG.info(f"Importing prompt for schema: {schema}")
result_list: List[BaseMessage] = []
result_list.append(SystemMessage(content=system))
result_list.extend([cls._convert_message_from_dict(m) for m in messages])
return result_list
class KineticaSqlResponse(BaseModel):
"""Response containing SQL and the fetched data.
This object is returned by a chain with ``KineticaSqlOutputParser`` and it contains
the generated SQL and related Pandas Dataframe fetched from the database.
"""
sql: str = Field(default="")
"""The generated SQL."""
# dataframe: "pd.DataFrame" = Field(default=None)
dataframe: Any = Field(default=None)
"""The Pandas dataframe containing the fetched data."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
class KineticaSqlOutputParser(BaseOutputParser[KineticaSqlResponse]):
"""Fetch and return data from the Kinetica LLM.
This object is used as the last element of a chain to execute generated SQL and it
will output a ``KineticaSqlResponse`` containing the SQL and a pandas dataframe with
the fetched data.
Example:
.. code-block:: python
from langchain_community.chat_models.kinetica import (
KineticaChatLLM, KineticaSqlOutputParser)
kinetica_llm = KineticaChatLLM()
# create chain
ctx_messages = kinetica_llm.load_messages_from_context(self.context_name)
ctx_messages.append(("human", "{input}"))
prompt_template = ChatPromptTemplate.from_messages(ctx_messages)
chain = (
prompt_template
| kinetica_llm
| KineticaSqlOutputParser(kdbc=kinetica_llm.kdbc)
)
sql_response: KineticaSqlResponse = chain.invoke(
{"input": "What are the female users ordered by username?"}
)
assert isinstance(sql_response, KineticaSqlResponse)
LOG.info(f"SQL Response: {sql_response.sql}")
assert isinstance(sql_response.dataframe, pd.DataFrame)
"""
kdbc: Any = Field(exclude=True)
""" Kinetica DB connection. """
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def parse(self, text: str) -> KineticaSqlResponse:
df = self.kdbc.to_df(text)
return KineticaSqlResponse(sql=text, dataframe=df)
def parse_result(
self, result: List[Generation], *, partial: bool = False
) -> KineticaSqlResponse:
return self.parse(result[0].text)
@property
def _type(self) -> str:
return "kinetica_sql_output_parser"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_models/anthropic.py | from typing import Any, AsyncIterator, Dict, Iterator, List, Optional, cast
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import (
BaseChatModel,
agenerate_from_stream,
generate_from_stream,
)
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
ChatMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.prompt_values import PromptValue
from pydantic import ConfigDict
from langchain_community.llms.anthropic import _AnthropicCommon
def _convert_one_message_to_text(
message: BaseMessage,
human_prompt: str,
ai_prompt: str,
) -> str:
content = cast(str, message.content)
if isinstance(message, ChatMessage):
message_text = f"\n\n{message.role.capitalize()}: {content}"
elif isinstance(message, HumanMessage):
message_text = f"{human_prompt} {content}"
elif isinstance(message, AIMessage):
message_text = f"{ai_prompt} {content}"
elif isinstance(message, SystemMessage):
message_text = content
else:
raise ValueError(f"Got unknown type {message}")
return message_text
def convert_messages_to_prompt_anthropic(
messages: List[BaseMessage],
*,
human_prompt: str = "\n\nHuman:",
ai_prompt: str = "\n\nAssistant:",
) -> str:
"""Format a list of messages into a full prompt for the Anthropic model
Args:
messages (List[BaseMessage]): List of BaseMessage to combine.
human_prompt (str, optional): Human prompt tag. Defaults to "\n\nHuman:".
ai_prompt (str, optional): AI prompt tag. Defaults to "\n\nAssistant:".
Returns:
str: Combined string with necessary human_prompt and ai_prompt tags.
"""
messages = messages.copy() # don't mutate the original list
if not isinstance(messages[-1], AIMessage):
messages.append(AIMessage(content=""))
text = "".join(
_convert_one_message_to_text(message, human_prompt, ai_prompt)
for message in messages
)
# trim off the trailing ' ' that might come from the "Assistant: "
return text.rstrip()
@deprecated(
since="0.0.28",
removal="1.0",
alternative_import="langchain_anthropic.ChatAnthropic",
)
class ChatAnthropic(BaseChatModel, _AnthropicCommon):
"""`Anthropic` chat large language models.
To use, you should have the ``anthropic`` python package installed, and the
environment variable ``ANTHROPIC_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
import anthropic
from langchain_community.chat_models import ChatAnthropic
model = ChatAnthropic(model="<model_name>", anthropic_api_key="my-api-key")
"""
model_config = ConfigDict(
populate_by_name=True,
arbitrary_types_allowed=True,
)
@property
def lc_secrets(self) -> Dict[str, str]:
return {"anthropic_api_key": "ANTHROPIC_API_KEY"}
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "anthropic-chat"
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by Langchain."""
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "chat_models", "anthropic"]
def _convert_messages_to_prompt(self, messages: List[BaseMessage]) -> str:
"""Format a list of messages into a full prompt for the Anthropic model
Args:
messages (List[BaseMessage]): List of BaseMessage to combine.
Returns:
str: Combined string with necessary HUMAN_PROMPT and AI_PROMPT tags.
"""
prompt_params = {}
if self.HUMAN_PROMPT:
prompt_params["human_prompt"] = self.HUMAN_PROMPT
if self.AI_PROMPT:
prompt_params["ai_prompt"] = self.AI_PROMPT
return convert_messages_to_prompt_anthropic(messages=messages, **prompt_params)
def convert_prompt(self, prompt: PromptValue) -> str:
return self._convert_messages_to_prompt(prompt.to_messages())
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
prompt = self._convert_messages_to_prompt(messages)
params: Dict[str, Any] = {"prompt": prompt, **self._default_params, **kwargs}
if stop:
params["stop_sequences"] = stop
stream_resp = self.client.completions.create(**params, stream=True)
for data in stream_resp:
delta = data.completion
chunk = ChatGenerationChunk(message=AIMessageChunk(content=delta))
if run_manager:
run_manager.on_llm_new_token(delta, chunk=chunk)
yield chunk
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
prompt = self._convert_messages_to_prompt(messages)
params: Dict[str, Any] = {"prompt": prompt, **self._default_params, **kwargs}
if stop:
params["stop_sequences"] = stop
stream_resp = await self.async_client.completions.create(**params, stream=True)
async for data in stream_resp:
delta = data.completion
chunk = ChatGenerationChunk(message=AIMessageChunk(content=delta))
if run_manager:
await run_manager.on_llm_new_token(delta, chunk=chunk)
yield chunk
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
prompt = self._convert_messages_to_prompt(
messages,
)
params: Dict[str, Any] = {
"prompt": prompt,
**self._default_params,
**kwargs,
}
if stop:
params["stop_sequences"] = stop
response = self.client.completions.create(**params)
completion = response.completion
message = AIMessage(content=completion)
return ChatResult(generations=[ChatGeneration(message=message)])
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._astream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return await agenerate_from_stream(stream_iter)
prompt = self._convert_messages_to_prompt(
messages,
)
params: Dict[str, Any] = {
"prompt": prompt,
**self._default_params,
**kwargs,
}
if stop:
params["stop_sequences"] = stop
response = await self.async_client.completions.create(**params)
completion = response.completion
message = AIMessage(content=completion)
return ChatResult(generations=[ChatGeneration(message=message)])
def get_num_tokens(self, text: str) -> int:
"""Calculate number of tokens."""
if not self.count_tokens:
raise NameError("Please ensure the anthropic package is loaded")
return self.count_tokens(text)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_loaders/imessage.py | from __future__ import annotations
from datetime import datetime
from pathlib import Path
from typing import TYPE_CHECKING, Iterator, List, Optional, Union
from langchain_core.chat_loaders import BaseChatLoader
from langchain_core.chat_sessions import ChatSession
from langchain_core.messages import HumanMessage
if TYPE_CHECKING:
import sqlite3
def nanoseconds_from_2001_to_datetime(nanoseconds: int) -> datetime:
"""Convert nanoseconds since 2001 to a datetime object.
Args:
nanoseconds (int): Nanoseconds since January 1, 2001.
Returns:
datetime: Datetime object.
"""
# Convert nanoseconds to seconds (1 second = 1e9 nanoseconds)
timestamp_in_seconds = nanoseconds / 1e9
# The reference date is January 1, 2001, in Unix time
reference_date_seconds = datetime(2001, 1, 1).timestamp()
# Calculate the actual timestamp by adding the reference date
actual_timestamp = reference_date_seconds + timestamp_in_seconds
# Convert to a datetime object
return datetime.fromtimestamp(actual_timestamp)
class IMessageChatLoader(BaseChatLoader):
"""Load chat sessions from the `iMessage` chat.db SQLite file.
It only works on macOS when you have iMessage enabled and have the chat.db file.
The chat.db file is likely located at ~/Library/Messages/chat.db. However, your
terminal may not have permission to access this file. To resolve this, you can
copy the file to a different location, change the permissions of the file, or
grant full disk access for your terminal emulator
in System Settings > Security and Privacy > Full Disk Access.
"""
def __init__(self, path: Optional[Union[str, Path]] = None):
"""
Initialize the IMessageChatLoader.
Args:
path (str or Path, optional): Path to the chat.db SQLite file.
Defaults to None, in which case the default path
~/Library/Messages/chat.db will be used.
"""
if path is None:
path = Path.home() / "Library" / "Messages" / "chat.db"
self.db_path = path if isinstance(path, Path) else Path(path)
if not self.db_path.exists():
raise FileNotFoundError(f"File {self.db_path} not found")
try:
import sqlite3 # noqa: F401
except ImportError as e:
raise ImportError(
"The sqlite3 module is required to load iMessage chats.\n"
"Please install it with `pip install pysqlite3`"
) from e
@staticmethod
def _parse_attributed_body(attributed_body: bytes) -> str:
"""
Parse the attributedBody field of the message table
for the text content of the message.
The attributedBody field is a binary blob that contains
the message content after the byte string b"NSString":
5 bytes 1-3 bytes `len` bytes
... | b"NSString" | preamble | `len` | contents | ...
The 5 preamble bytes are always b"\x01\x94\x84\x01+"
The size of `len` is either 1 byte or 3 bytes:
- If the first byte in `len` is b"\x81" then `len` is 3 bytes long.
So the message length is the 2 bytes after, in little Endian.
- Otherwise, the size of `len` is 1 byte, and the message length is
that byte.
Args:
attributed_body (bytes): attributedBody field of the message table.
Return:
str: Text content of the message.
"""
content = attributed_body.split(b"NSString")[1][5:]
length, start = content[0], 1
if content[0] == 129:
length, start = int.from_bytes(content[1:3], "little"), 3
return content[start : start + length].decode("utf-8", errors="ignore")
@staticmethod
def _get_session_query(use_chat_handle_table: bool) -> str:
# Messages sent pre OSX 12 require a join through the chat_handle_join table
# However, the table doesn't exist if database created with OSX 12 or above.
joins_w_chat_handle = """
JOIN chat_handle_join ON
chat_message_join.chat_id = chat_handle_join.chat_id
JOIN handle ON
handle.ROWID = chat_handle_join.handle_id"""
joins_no_chat_handle = """
JOIN handle ON message.handle_id = handle.ROWID
"""
joins = joins_w_chat_handle if use_chat_handle_table else joins_no_chat_handle
return f"""
SELECT message.date,
handle.id,
message.text,
message.is_from_me,
message.attributedBody
FROM message
JOIN chat_message_join ON
message.ROWID = chat_message_join.message_id
{joins}
WHERE chat_message_join.chat_id = ?
ORDER BY message.date ASC;
"""
def _load_single_chat_session(
self, cursor: "sqlite3.Cursor", use_chat_handle_table: bool, chat_id: int
) -> ChatSession:
"""
Load a single chat session from the iMessage chat.db.
Args:
cursor: SQLite cursor object.
chat_id (int): ID of the chat session to load.
Returns:
ChatSession: Loaded chat session.
"""
results: List[HumanMessage] = []
query = self._get_session_query(use_chat_handle_table)
cursor.execute(query, (chat_id,))
messages = cursor.fetchall()
for date, sender, text, is_from_me, attributedBody in messages:
if text:
content = text
elif attributedBody:
content = self._parse_attributed_body(attributedBody)
else: # Skip messages with no content
continue
results.append(
HumanMessage( # type: ignore[call-arg]
role=sender,
content=content,
additional_kwargs={
"message_time": date,
"message_time_as_datetime": nanoseconds_from_2001_to_datetime(
date
),
"sender": sender,
"is_from_me": bool(is_from_me),
},
)
)
return ChatSession(messages=results)
def lazy_load(self) -> Iterator[ChatSession]:
"""
Lazy load the chat sessions from the iMessage chat.db
and yield them in the required format.
Yields:
ChatSession: Loaded chat session.
"""
import sqlite3
try:
conn = sqlite3.connect(self.db_path)
except sqlite3.OperationalError as e:
raise ValueError(
f"Could not open iMessage DB file {self.db_path}.\n"
"Make sure your terminal emulator has disk access to this file.\n"
" You can either copy the DB file to an accessible location"
" or grant full disk access for your terminal emulator."
" You can grant full disk access for your terminal emulator"
" in System Settings > Security and Privacy > Full Disk Access."
) from e
cursor = conn.cursor()
# See if chat_handle_join table exists:
query = """SELECT name FROM sqlite_master
WHERE type='table' AND name='chat_handle_join';"""
cursor.execute(query)
is_chat_handle_join_exists = cursor.fetchone()
# Fetch the list of chat IDs sorted by time (most recent first)
query = """SELECT chat_id
FROM message
JOIN chat_message_join ON message.ROWID = chat_message_join.message_id
GROUP BY chat_id
ORDER BY MAX(date) DESC;"""
cursor.execute(query)
chat_ids = [row[0] for row in cursor.fetchall()]
for chat_id in chat_ids:
yield self._load_single_chat_session(
cursor, is_chat_handle_join_exists, chat_id
)
conn.close()
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_loaders/whatsapp.py | import logging
import os
import re
import zipfile
from typing import Iterator, List, Union
from langchain_core.chat_loaders import BaseChatLoader
from langchain_core.chat_sessions import ChatSession
from langchain_core.messages import AIMessage, HumanMessage
logger = logging.getLogger(__name__)
class WhatsAppChatLoader(BaseChatLoader):
"""Load `WhatsApp` conversations from a dump zip file or directory."""
def __init__(self, path: str):
"""Initialize the WhatsAppChatLoader.
Args:
path (str): Path to the exported WhatsApp chat
zip directory, folder, or file.
To generate the dump, open the chat, click the three dots in the top
right corner, and select "More". Then select "Export chat" and
choose "Without media".
"""
self.path = path
ignore_lines = [
"This message was deleted",
"<Media omitted>",
"image omitted",
"Messages and calls are end-to-end encrypted. No one outside of this chat,"
" not even WhatsApp, can read or listen to them.",
]
self._ignore_lines = re.compile(
r"(" + "|".join([r"\u200E*" + line for line in ignore_lines]) + r")",
flags=re.IGNORECASE,
)
self._message_line_regex = re.compile(
r"\u200E*\[?(\d{1,2}/\d{1,2}/\d{2,4}, \d{1,2}:\d{2}:\d{2} (?:AM|PM))\]?[ \u200E]*([^:]+): (.+)", # noqa
flags=re.IGNORECASE,
)
def _load_single_chat_session(self, file_path: str) -> ChatSession:
"""Load a single chat session from a file.
Args:
file_path (str): Path to the chat file.
Returns:
ChatSession: The loaded chat session.
"""
with open(file_path, "r", encoding="utf-8") as file:
txt = file.read()
# Split messages by newlines, but keep multi-line messages grouped
chat_lines: List[str] = []
current_message = ""
for line in txt.split("\n"):
if self._message_line_regex.match(line):
if current_message:
chat_lines.append(current_message)
current_message = line
else:
current_message += " " + line.strip()
if current_message:
chat_lines.append(current_message)
results: List[Union[HumanMessage, AIMessage]] = []
for line in chat_lines:
result = self._message_line_regex.match(line.strip())
if result:
timestamp, sender, text = result.groups()
if not self._ignore_lines.match(text.strip()):
results.append(
HumanMessage( # type: ignore[call-arg]
role=sender,
content=text,
additional_kwargs={
"sender": sender,
"events": [{"message_time": timestamp}],
},
)
)
else:
logger.debug(f"Could not parse line: {line}")
return ChatSession(messages=results)
@staticmethod
def _iterate_files(path: str) -> Iterator[str]:
"""Iterate over the files in a directory or zip file.
Args:
path (str): Path to the directory or zip file.
Yields:
str: The path to each file.
"""
if os.path.isfile(path):
yield path
elif os.path.isdir(path):
for root, _, files in os.walk(path):
for file in files:
if file.endswith(".txt"):
yield os.path.join(root, file)
elif zipfile.is_zipfile(path):
with zipfile.ZipFile(path) as zip_file:
for file in zip_file.namelist():
if file.endswith(".txt"):
yield zip_file.extract(file)
def lazy_load(self) -> Iterator[ChatSession]:
"""Lazy load the messages from the chat file and yield
them as chat sessions.
Yields:
Iterator[ChatSession]: The loaded chat sessions.
"""
yield self._load_single_chat_session(self.path)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_loaders/slack.py | import json
import logging
import re
import zipfile
from pathlib import Path
from typing import Dict, Iterator, List, Union
from langchain_core.chat_loaders import BaseChatLoader
from langchain_core.chat_sessions import ChatSession
from langchain_core.messages import AIMessage, HumanMessage
logger = logging.getLogger(__name__)
class SlackChatLoader(BaseChatLoader):
"""Load `Slack` conversations from a dump zip file."""
def __init__(
self,
path: Union[str, Path],
):
"""
Initialize the chat loader with the path to the exported Slack dump zip file.
:param path: Path to the exported Slack dump zip file.
"""
self.zip_path = path if isinstance(path, Path) else Path(path)
if not self.zip_path.exists():
raise FileNotFoundError(f"File {self.zip_path} not found")
@staticmethod
def _load_single_chat_session(messages: List[Dict]) -> ChatSession:
results: List[Union[AIMessage, HumanMessage]] = []
previous_sender = None
for message in messages:
if not isinstance(message, dict):
continue
text = message.get("text", "")
timestamp = message.get("ts", "")
sender = message.get("user", "")
if not sender:
continue
skip_pattern = re.compile(
r"<@U\d+> has joined the channel", flags=re.IGNORECASE
)
if skip_pattern.match(text):
continue
if sender == previous_sender:
results[-1].content += "\n\n" + text
results[-1].additional_kwargs["events"].append(
{"message_time": timestamp}
)
else:
results.append(
HumanMessage( # type: ignore[call-arg]
role=sender,
content=text,
additional_kwargs={
"sender": sender,
"events": [{"message_time": timestamp}],
},
)
)
previous_sender = sender
return ChatSession(messages=results)
@staticmethod
def _read_json(zip_file: zipfile.ZipFile, file_path: str) -> List[dict]:
"""Read JSON data from a zip subfile."""
with zip_file.open(file_path, "r") as f:
data = json.load(f)
if not isinstance(data, list):
raise ValueError(f"Expected list of dictionaries, got {type(data)}")
return data
def lazy_load(self) -> Iterator[ChatSession]:
"""
Lazy load the chat sessions from the Slack dump file and yield them
in the required format.
:return: Iterator of chat sessions containing messages.
"""
with zipfile.ZipFile(str(self.zip_path), "r") as zip_file:
for file_path in zip_file.namelist():
if file_path.endswith(".json"):
messages = self._read_json(zip_file, file_path)
yield self._load_single_chat_session(messages)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_loaders/gmail.py | import base64
import re
from typing import Any, Iterator
from langchain_core._api.deprecation import deprecated
from langchain_core.chat_loaders import BaseChatLoader
from langchain_core.chat_sessions import ChatSession
from langchain_core.messages import HumanMessage
def _extract_email_content(msg: Any) -> HumanMessage:
from_email = None
for values in msg["payload"]["headers"]:
name = values["name"]
if name == "From":
from_email = values["value"]
if from_email is None:
raise ValueError
for part in msg["payload"]["parts"]:
if part["mimeType"] == "text/plain":
data = part["body"]["data"]
data = base64.urlsafe_b64decode(data).decode("utf-8")
# Regular expression to split the email body at the first
# occurrence of a line that starts with "On ... wrote:"
pattern = re.compile(r"\r\nOn .+(\r\n)*wrote:\r\n")
# Split the email body and extract the first part
newest_response = re.split(pattern, data)[0]
message = HumanMessage(
content=newest_response, additional_kwargs={"sender": from_email}
)
return message
raise ValueError
def _get_message_data(service: Any, message: Any) -> ChatSession:
msg = service.users().messages().get(userId="me", id=message["id"]).execute()
message_content = _extract_email_content(msg)
in_reply_to = None
email_data = msg["payload"]["headers"]
for values in email_data:
name = values["name"]
if name == "In-Reply-To":
in_reply_to = values["value"]
if in_reply_to is None:
raise ValueError
thread_id = msg["threadId"]
thread = service.users().threads().get(userId="me", id=thread_id).execute()
messages = thread["messages"]
response_email = None
for message in messages:
email_data = message["payload"]["headers"]
for values in email_data:
if values["name"] == "Message-ID":
message_id = values["value"]
if message_id == in_reply_to:
response_email = message
if response_email is None:
raise ValueError
starter_content = _extract_email_content(response_email)
return ChatSession(messages=[starter_content, message_content])
@deprecated(
since="0.0.32",
removal="1.0",
alternative_import="langchain_google_community.GMailLoader",
)
class GMailLoader(BaseChatLoader):
"""Load data from `GMail`.
There are many ways you could want to load data from GMail.
This loader is currently fairly opinionated in how to do so.
The way it does it is it first looks for all messages that you have sent.
It then looks for messages where you are responding to a previous email.
It then fetches that previous email, and creates a training example
of that email, followed by your email.
Note that there are clear limitations here. For example,
all examples created are only looking at the previous email for context.
To use:
- Set up a Google Developer Account:
Go to the Google Developer Console, create a project,
and enable the Gmail API for that project.
This will give you a credentials.json file that you'll need later.
"""
def __init__(self, creds: Any, n: int = 100, raise_error: bool = False) -> None:
super().__init__()
self.creds = creds
self.n = n
self.raise_error = raise_error
def lazy_load(self) -> Iterator[ChatSession]:
from googleapiclient.discovery import build
service = build("gmail", "v1", credentials=self.creds)
results = (
service.users()
.messages()
.list(userId="me", labelIds=["SENT"], maxResults=self.n)
.execute()
)
messages = results.get("messages", [])
for message in messages:
try:
yield _get_message_data(service, message)
except Exception as e:
# TODO: handle errors better
if self.raise_error:
raise e
else:
pass
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_loaders/base.py | from langchain_core.chat_loaders import BaseChatLoader
__all__ = ["BaseChatLoader"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_loaders/langsmith.py | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Dict, Iterable, Iterator, List, Optional, Union, cast
from langchain_core.chat_loaders import BaseChatLoader
from langchain_core.chat_sessions import ChatSession
from langchain_core.load.load import load
if TYPE_CHECKING:
from langsmith.client import Client
from langsmith.schemas import Run
logger = logging.getLogger(__name__)
class LangSmithRunChatLoader(BaseChatLoader):
"""
Load chat sessions from a list of LangSmith "llm" runs.
Attributes:
runs (Iterable[Union[str, Run]]): The list of LLM run IDs or run objects.
client (Client): Instance of LangSmith client for fetching data.
"""
def __init__(
self, runs: Iterable[Union[str, Run]], client: Optional["Client"] = None
):
"""
Initialize a new LangSmithRunChatLoader instance.
:param runs: List of LLM run IDs or run objects.
:param client: An instance of LangSmith client, if not provided,
a new client instance will be created.
"""
from langsmith.client import Client
self.runs = runs
self.client = client or Client()
@staticmethod
def _load_single_chat_session(llm_run: "Run") -> ChatSession:
"""
Convert an individual LangSmith LLM run to a ChatSession.
:param llm_run: The LLM run object.
:return: A chat session representing the run's data.
"""
chat_session = LangSmithRunChatLoader._get_messages_from_llm_run(llm_run)
functions = LangSmithRunChatLoader._get_functions_from_llm_run(llm_run)
if functions:
chat_session["functions"] = functions
return chat_session
@staticmethod
def _get_messages_from_llm_run(llm_run: "Run") -> ChatSession:
"""
Extract messages from a LangSmith LLM run.
:param llm_run: The LLM run object.
:return: ChatSession with the extracted messages.
"""
if llm_run.run_type != "llm":
raise ValueError(f"Expected run of type llm. Got: {llm_run.run_type}")
if "messages" not in llm_run.inputs:
raise ValueError(f"Run has no 'messages' inputs. Got {llm_run.inputs}")
if not llm_run.outputs:
raise ValueError("Cannot convert pending run")
messages = load(llm_run.inputs)["messages"]
message_chunk = load(llm_run.outputs)["generations"][0]["message"]
return ChatSession(messages=messages + [message_chunk])
@staticmethod
def _get_functions_from_llm_run(llm_run: "Run") -> Optional[List[Dict]]:
"""
Extract functions from a LangSmith LLM run if they exist.
:param llm_run: The LLM run object.
:return: Functions from the run or None.
"""
if llm_run.run_type != "llm":
raise ValueError(f"Expected run of type llm. Got: {llm_run.run_type}")
return (llm_run.extra or {}).get("invocation_params", {}).get("functions")
def lazy_load(self) -> Iterator[ChatSession]:
"""
Lazy load the chat sessions from the iterable of run IDs.
This method fetches the runs and converts them to chat sessions on-the-fly,
yielding one session at a time.
:return: Iterator of chat sessions containing messages.
"""
from langsmith.schemas import Run
for run_obj in self.runs:
try:
if hasattr(run_obj, "id"):
run = run_obj
else:
run = self.client.read_run(run_obj)
session = self._load_single_chat_session(cast(Run, run))
yield session
except ValueError as e:
logger.warning(f"Could not load run {run_obj}: {repr(e)}")
continue
class LangSmithDatasetChatLoader(BaseChatLoader):
"""
Load chat sessions from a LangSmith dataset with the "chat" data type.
Attributes:
dataset_name (str): The name of the LangSmith dataset.
client (Client): Instance of LangSmith client for fetching data.
"""
def __init__(self, *, dataset_name: str, client: Optional["Client"] = None):
"""
Initialize a new LangSmithChatDatasetLoader instance.
:param dataset_name: The name of the LangSmith dataset.
:param client: An instance of LangSmith client; if not provided,
a new client instance will be created.
"""
try:
from langsmith.client import Client
except ImportError as e:
raise ImportError(
"The LangSmith client is required to load LangSmith datasets.\n"
"Please install it with `pip install langsmith`"
) from e
self.dataset_name = dataset_name
self.client = client or Client()
def lazy_load(self) -> Iterator[ChatSession]:
"""
Lazy load the chat sessions from the specified LangSmith dataset.
This method fetches the chat data from the dataset and
converts each data point to chat sessions on-the-fly,
yielding one session at a time.
:return: Iterator of chat sessions containing messages.
"""
from langchain_community.adapters import openai as oai_adapter
data = self.client.read_dataset_openai_finetuning(
dataset_name=self.dataset_name
)
for data_point in data:
yield ChatSession(
messages=[
oai_adapter.convert_dict_to_message(m)
for m in data_point.get("messages", [])
],
functions=data_point.get("functions"),
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_loaders/telegram.py | import json
import logging
import os
import tempfile
import zipfile
from pathlib import Path
from typing import Iterator, List, Union
from langchain_core.chat_loaders import BaseChatLoader
from langchain_core.chat_sessions import ChatSession
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
logger = logging.getLogger(__name__)
class TelegramChatLoader(BaseChatLoader):
"""Load `telegram` conversations to LangChain chat messages.
To export, use the Telegram Desktop app from
https://desktop.telegram.org/, select a conversation, click the three dots
in the top right corner, and select "Export chat history". Then select
"Machine-readable JSON" (preferred) to export. Note: the 'lite' versions of
the desktop app (like "Telegram for MacOS") do not support exporting chat
history.
"""
def __init__(
self,
path: Union[str, Path],
):
"""Initialize the TelegramChatLoader.
Args:
path (Union[str, Path]): Path to the exported Telegram chat zip,
directory, json, or HTML file.
"""
self.path = path if isinstance(path, str) else str(path)
@staticmethod
def _load_single_chat_session_html(file_path: str) -> ChatSession:
"""Load a single chat session from an HTML file.
Args:
file_path (str): Path to the HTML file.
Returns:
ChatSession: The loaded chat session.
"""
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError(
"Please install the 'beautifulsoup4' package to load"
" Telegram HTML files. You can do this by running"
"'pip install beautifulsoup4' in your terminal."
)
with open(file_path, "r", encoding="utf-8") as file:
soup = BeautifulSoup(file, "html.parser")
results: List[Union[HumanMessage, AIMessage]] = []
previous_sender = None
for message in soup.select(".message.default"):
timestamp = message.select_one(".pull_right.date.details")["title"]
from_name_element = message.select_one(".from_name")
if from_name_element is None and previous_sender is None:
logger.debug("from_name not found in message")
continue
elif from_name_element is None:
from_name = previous_sender
else:
from_name = from_name_element.text.strip()
text = message.select_one(".text").text.strip()
results.append(
HumanMessage(
content=text,
additional_kwargs={
"sender": from_name,
"events": [{"message_time": timestamp}],
},
)
)
previous_sender = from_name
return ChatSession(messages=results)
@staticmethod
def _load_single_chat_session_json(file_path: str) -> ChatSession:
"""Load a single chat session from a JSON file.
Args:
file_path (str): Path to the JSON file.
Returns:
ChatSession: The loaded chat session.
"""
with open(file_path, "r", encoding="utf-8") as file:
data = json.load(file)
messages = data.get("messages", [])
results: List[BaseMessage] = []
for message in messages:
text = message.get("text", "")
timestamp = message.get("date", "")
from_name = message.get("from", "")
results.append(
HumanMessage(
content=text,
additional_kwargs={
"sender": from_name,
"events": [{"message_time": timestamp}],
},
)
)
return ChatSession(messages=results)
@staticmethod
def _iterate_files(path: str) -> Iterator[str]:
"""Iterate over files in a directory or zip file.
Args:
path (str): Path to the directory or zip file.
Yields:
str: Path to each file.
"""
if os.path.isfile(path) and path.endswith((".html", ".json")):
yield path
elif os.path.isdir(path):
for root, _, files in os.walk(path):
for file in files:
if file.endswith((".html", ".json")):
yield os.path.join(root, file)
elif zipfile.is_zipfile(path):
with zipfile.ZipFile(path) as zip_file:
for file in zip_file.namelist():
if file.endswith((".html", ".json")):
with tempfile.TemporaryDirectory() as temp_dir:
yield zip_file.extract(file, path=temp_dir)
def lazy_load(self) -> Iterator[ChatSession]:
"""Lazy load the messages from the chat file and yield them
in as chat sessions.
Yields:
ChatSession: The loaded chat session.
"""
for file_path in self._iterate_files(self.path):
if file_path.endswith(".html"):
yield self._load_single_chat_session_html(file_path)
elif file_path.endswith(".json"):
yield self._load_single_chat_session_json(file_path)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_loaders/facebook_messenger.py | import json
import logging
from pathlib import Path
from typing import Iterator, Union
from langchain_core.chat_loaders import BaseChatLoader
from langchain_core.chat_sessions import ChatSession
from langchain_core.messages import HumanMessage
logger = logging.getLogger(__file__)
class SingleFileFacebookMessengerChatLoader(BaseChatLoader):
"""Load `Facebook Messenger` chat data from a single file.
Args:
path (Union[Path, str]): The path to the chat file.
"""
def __init__(self, path: Union[Path, str]) -> None:
super().__init__()
self.file_path = path if isinstance(path, Path) else Path(path)
def lazy_load(self) -> Iterator[ChatSession]:
"""Lazy loads the chat data from the file.
Yields:
ChatSession: A chat session containing the loaded messages.
"""
with open(self.file_path) as f:
data = json.load(f)
sorted_data = sorted(data["messages"], key=lambda x: x["timestamp_ms"])
messages = []
for index, m in enumerate(sorted_data):
if "content" not in m:
logger.info(
f"""Skipping Message No.
{index+1} as no content is present in the message"""
)
continue
messages.append(
HumanMessage(
content=m["content"], additional_kwargs={"sender": m["sender_name"]}
)
)
yield ChatSession(messages=messages)
class FolderFacebookMessengerChatLoader(BaseChatLoader):
"""Load `Facebook Messenger` chat data from a folder.
Args:
path (Union[str, Path]): The path to the directory
containing the chat files.
"""
def __init__(self, path: Union[str, Path]) -> None:
super().__init__()
self.directory_path = Path(path) if isinstance(path, str) else path
def lazy_load(self) -> Iterator[ChatSession]:
"""Lazy loads the chat data from the folder.
Yields:
ChatSession: A chat session containing the loaded messages.
"""
inbox_path = self.directory_path / "inbox"
for _dir in inbox_path.iterdir():
if _dir.is_dir():
for _file in _dir.iterdir():
if _file.suffix.lower() == ".json":
file_loader = SingleFileFacebookMessengerChatLoader(path=_file)
for result in file_loader.lazy_load():
yield result
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_loaders/utils.py | """Utilities for chat loaders."""
from copy import deepcopy
from typing import Iterable, Iterator, List
from langchain_core.chat_sessions import ChatSession
from langchain_core.messages import AIMessage, BaseMessage
def merge_chat_runs_in_session(
chat_session: ChatSession, delimiter: str = "\n\n"
) -> ChatSession:
"""Merge chat runs together in a chat session.
A chat run is a sequence of messages from the same sender.
Args:
chat_session: A chat session.
Returns:
A chat session with merged chat runs.
"""
messages: List[BaseMessage] = []
for message in chat_session["messages"]:
if not isinstance(message.content, str):
raise ValueError(
"Chat Loaders only support messages with content type string, "
f"got {message.content}"
)
if not messages:
messages.append(deepcopy(message))
elif (
isinstance(message, type(messages[-1]))
and messages[-1].additional_kwargs.get("sender") is not None
and messages[-1].additional_kwargs["sender"]
== message.additional_kwargs.get("sender")
):
if not isinstance(messages[-1].content, str):
raise ValueError(
"Chat Loaders only support messages with content type string, "
f"got {messages[-1].content}"
)
messages[-1].content = (
messages[-1].content + delimiter + message.content
).strip()
messages[-1].additional_kwargs.get("events", []).extend(
message.additional_kwargs.get("events") or []
)
else:
messages.append(deepcopy(message))
return ChatSession(messages=messages)
def merge_chat_runs(chat_sessions: Iterable[ChatSession]) -> Iterator[ChatSession]:
"""Merge chat runs together.
A chat run is a sequence of messages from the same sender.
Args:
chat_sessions: A list of chat sessions.
Returns:
A list of chat sessions with merged chat runs.
"""
for chat_session in chat_sessions:
yield merge_chat_runs_in_session(chat_session)
def map_ai_messages_in_session(chat_sessions: ChatSession, sender: str) -> ChatSession:
"""Convert messages from the specified 'sender' to AI messages.
This is useful for fine-tuning the AI to adapt to your voice.
"""
messages = []
num_converted = 0
for message in chat_sessions["messages"]:
if message.additional_kwargs.get("sender") == sender:
message = AIMessage(
content=message.content,
additional_kwargs=message.additional_kwargs.copy(),
example=getattr(message, "example", None), # type: ignore[arg-type]
)
num_converted += 1
messages.append(message)
return ChatSession(messages=messages)
def map_ai_messages(
chat_sessions: Iterable[ChatSession], sender: str
) -> Iterator[ChatSession]:
"""Convert messages from the specified 'sender' to AI messages.
This is useful for fine-tuning the AI to adapt to your voice.
"""
for chat_session in chat_sessions:
yield map_ai_messages_in_session(chat_session, sender)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/chat_loaders/__init__.py | """**Chat Loaders** load chat messages from common communications platforms.
Load chat messages from various
communications platforms such as Facebook Messenger, Telegram, and
WhatsApp. The loaded chat messages can be used for fine-tuning models.
**Class hierarchy:**
.. code-block::
BaseChatLoader --> <name>ChatLoader # Examples: WhatsAppChatLoader, IMessageChatLoader
**Main helpers:**
.. code-block::
ChatSession
""" # noqa: E501
import importlib
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from langchain_community.chat_loaders.base import (
BaseChatLoader,
)
from langchain_community.chat_loaders.facebook_messenger import (
FolderFacebookMessengerChatLoader,
SingleFileFacebookMessengerChatLoader,
)
from langchain_community.chat_loaders.gmail import (
GMailLoader,
)
from langchain_community.chat_loaders.imessage import (
IMessageChatLoader,
)
from langchain_community.chat_loaders.langsmith import (
LangSmithDatasetChatLoader,
LangSmithRunChatLoader,
)
from langchain_community.chat_loaders.slack import (
SlackChatLoader,
)
from langchain_community.chat_loaders.telegram import (
TelegramChatLoader,
)
from langchain_community.chat_loaders.whatsapp import (
WhatsAppChatLoader,
)
__all__ = [
"BaseChatLoader",
"FolderFacebookMessengerChatLoader",
"GMailLoader",
"IMessageChatLoader",
"LangSmithDatasetChatLoader",
"LangSmithRunChatLoader",
"SingleFileFacebookMessengerChatLoader",
"SlackChatLoader",
"TelegramChatLoader",
"WhatsAppChatLoader",
]
_module_lookup = {
"BaseChatLoader": "langchain_core.chat_loaders",
"FolderFacebookMessengerChatLoader": "langchain_community.chat_loaders.facebook_messenger", # noqa: E501
"GMailLoader": "langchain_community.chat_loaders.gmail",
"IMessageChatLoader": "langchain_community.chat_loaders.imessage",
"LangSmithDatasetChatLoader": "langchain_community.chat_loaders.langsmith",
"LangSmithRunChatLoader": "langchain_community.chat_loaders.langsmith",
"SingleFileFacebookMessengerChatLoader": "langchain_community.chat_loaders.facebook_messenger", # noqa: E501
"SlackChatLoader": "langchain_community.chat_loaders.slack",
"TelegramChatLoader": "langchain_community.chat_loaders.telegram",
"WhatsAppChatLoader": "langchain_community.chat_loaders.whatsapp",
}
def __getattr__(name: str) -> Any:
if name in _module_lookup:
module = importlib.import_module(_module_lookup[name])
return getattr(module, name)
raise AttributeError(f"module {__name__} has no attribute {name}")
|
0 | lc_public_repos/langchain/libs/community | lc_public_repos/langchain/libs/community/tests/data.py | """Module defines common test data."""
from pathlib import Path
_THIS_DIR = Path(__file__).parent
_EXAMPLES_DIR = _THIS_DIR / "examples"
# Paths to data files
MLB_TEAMS_2012_CSV = _EXAMPLES_DIR / "mlb_teams_2012.csv"
MLB_TEAMS_2012_SQL = _EXAMPLES_DIR / "mlb_teams_2012.sql"
|
0 | lc_public_repos/langchain/libs/community/tests | lc_public_repos/langchain/libs/community/tests/integration_tests/test_dalle.py | """Integration test for DallE API Wrapper."""
from langchain_community.utilities.dalle_image_generator import DallEAPIWrapper
def test_call() -> None:
"""Test that call returns a URL in the output."""
search = DallEAPIWrapper() # type: ignore[call-arg]
output = search.run("volcano island")
assert "https://oaidalleapi" in output
|
0 | lc_public_repos/langchain/libs/community/tests | lc_public_repos/langchain/libs/community/tests/integration_tests/test_document_transformers.py | """Integration test for embedding-based redundant doc filtering."""
from langchain_core.documents import Document
from langchain_community.document_transformers.embeddings_redundant_filter import (
EmbeddingsClusteringFilter,
EmbeddingsRedundantFilter,
_DocumentWithState,
)
from langchain_community.embeddings import OpenAIEmbeddings
def test_embeddings_redundant_filter() -> None:
texts = [
"What happened to all of my cookies?",
"Where did all of my cookies go?",
"I wish there were better Italian restaurants in my neighborhood.",
]
docs = [Document(page_content=t) for t in texts]
embeddings = OpenAIEmbeddings()
redundant_filter = EmbeddingsRedundantFilter(embeddings=embeddings)
actual = redundant_filter.transform_documents(docs)
assert len(actual) == 2
assert set(texts[:2]).intersection([d.page_content for d in actual])
def test_embeddings_redundant_filter_with_state() -> None:
texts = ["What happened to all of my cookies?", "foo bar baz"]
state = {"embedded_doc": [0.5] * 10}
docs = [_DocumentWithState(page_content=t, state=state) for t in texts]
embeddings = OpenAIEmbeddings()
redundant_filter = EmbeddingsRedundantFilter(embeddings=embeddings)
actual = redundant_filter.transform_documents(docs)
assert len(actual) == 1
def test_embeddings_clustering_filter() -> None:
texts = [
"What happened to all of my cookies?",
"A cookie is a small, baked sweet treat and you can find it in the cookie",
"monsters' jar.",
"Cookies are good.",
"I have nightmares about the cookie monster.",
"The most popular pizza styles are: Neapolitan, New York-style and",
"Chicago-style. You can find them on iconic restaurants in major cities.",
"Neapolitan pizza: This is the original pizza style,hailing from Naples,",
"Italy.",
"I wish there were better Italian Pizza restaurants in my neighborhood.",
"New York-style pizza: This is characterized by its large, thin crust, and",
"generous toppings.",
"The first movie to feature a robot was 'A Trip to the Moon' (1902).",
"The first movie to feature a robot that could pass for a human was",
"'Blade Runner' (1982)",
"The first movie to feature a robot that could fall in love with a human",
"was 'Her' (2013)",
"A robot is a machine capable of carrying out complex actions automatically.",
"There are certainly hundreds, if not thousands movies about robots like:",
"'Blade Runner', 'Her' and 'A Trip to the Moon'",
]
docs = [Document(page_content=t) for t in texts]
embeddings = OpenAIEmbeddings()
redundant_filter = EmbeddingsClusteringFilter(
embeddings=embeddings,
num_clusters=3,
num_closest=1,
sorted=True,
)
actual = redundant_filter.transform_documents(docs)
assert len(actual) == 3
assert texts[1] in [d.page_content for d in actual]
assert texts[4] in [d.page_content for d in actual]
assert texts[11] in [d.page_content for d in actual]
|
0 | lc_public_repos/langchain/libs/community/tests | lc_public_repos/langchain/libs/community/tests/integration_tests/test_pdf_pagesplitter.py | """Test splitting with page numbers included."""
import os
from langchain_community.document_loaders import PyPDFLoader
from langchain_community.embeddings.openai import OpenAIEmbeddings
from langchain_community.vectorstores import FAISS
def test_pdf_pagesplitter() -> None:
"""Test splitting with page numbers included."""
script_dir = os.path.dirname(__file__)
loader = PyPDFLoader(os.path.join(script_dir, "examples/hello.pdf"))
docs = loader.load()
assert "page" in docs[0].metadata
assert "source" in docs[0].metadata
faiss_index = FAISS.from_documents(docs, OpenAIEmbeddings())
docs = faiss_index.similarity_search("Complete this sentence: Hello", k=1)
assert "Hello world" in docs[0].page_content
|
0 | lc_public_repos/langchain/libs/community/tests | lc_public_repos/langchain/libs/community/tests/integration_tests/test_compile.py | import pytest
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
pass
|
0 | lc_public_repos/langchain/libs/community/tests | lc_public_repos/langchain/libs/community/tests/integration_tests/.env.example | # openai
# your api key from https://platform.openai.com/account/api-keys
OPENAI_API_KEY=your_openai_api_key_here
# searchapi
# your api key from https://www.searchapi.io/
SEARCHAPI_API_KEY=your_searchapi_api_key_here
# astra db
ASTRA_DB_API_ENDPOINT=https://your_astra_db_id-your_region.apps.astra.datastax.com
ASTRA_DB_APPLICATION_TOKEN=AstraCS:your_astra_db_application_token
# ASTRA_DB_KEYSPACE=your_astra_db_namespace
# cassandra
CASSANDRA_CONTACT_POINTS=127.0.0.1
# CASSANDRA_USERNAME=your_cassandra_username
# CASSANDRA_PASSWORD=your_cassandra_password
# CASSANDRA_KEYSPACE=your_cassandra_keyspace
# pinecone
# your api key from left menu "API Keys" in https://app.pinecone.io
PINECONE_API_KEY=your_pinecone_api_key_here
# your pinecone environment from left menu "API Keys" in https://app.pinecone.io
PINECONE_ENVIRONMENT=us-west4-gcp
# jira
# your api token from https://id.atlassian.com/manage-profile/security/api-tokens
# more details here: https://confluence.atlassian.com/enterprise/using-personal-access-tokens-1026032365.html
# JIRA_API_TOKEN=your_jira_api_token_here
# JIRA_USERNAME=your_jira_username_here
# JIRA_INSTANCE_URL=your_jira_instance_url_here
# JIRA_CLOUD=True
# clickup
CLICKUP_ACCESS_TOKEN=your_clickup_access_token
# power bi
# sign in to azure in order to authenticate with DefaultAzureCredentials
# details here https://learn.microsoft.com/en-us/dotnet/api/azure.identity.defaultazurecredential?view=azure-dotnet
POWERBI_DATASET_ID=_powerbi_dataset_id_here
POWERBI_TABLE_NAME=_test_table_name_here
POWERBI_NUMROWS=_num_rows_in_your_test_table
# MongoDB Atlas Vector Search
MONGODB_ATLAS_URI=your_mongodb_atlas_connection_string
# Kinetica
KINETICA_URL = _db_connection_url_here
KINETICA_USER = _login_user_here
KINETICA_PASSWD = _password_here
# Upstash Vector
# Create two Upstash Vector instances. First one should have dimensionality of 10.
# Second one should be created with embedding model BAA/bge-small-en-V1.5
UPSTASH_VECTOR_URL=your_upstash_vector_url
UPSTASH_VECTOR_TOKEN=your_upstash_vector_token
UPSTASH_VECTOR_URL_EMBEDDING=your_upstash_vector_embedding_url
UPSTASH_VECTOR_TOKEN_EMBEDDING=your_upstash_vector_embedding_token
|
0 | lc_public_repos/langchain/libs/community/tests | lc_public_repos/langchain/libs/community/tests/integration_tests/test_long_context_reorder.py | """Integration test for doc reordering."""
from langchain_core.vectorstores import InMemoryVectorStore
from langchain_community.document_transformers.long_context_reorder import (
LongContextReorder,
)
from langchain_community.embeddings import OpenAIEmbeddings
def test_long_context_reorder() -> None:
"""Test Lost in the middle reordering get_relevant_docs."""
texts = [
"Basquetball is a great sport.",
"Fly me to the moon is one of my favourite songs.",
"The Celtics are my favourite team.",
"This is a document about the Boston Celtics",
"I simply love going to the movies",
"The Boston Celtics won the game by 20 points",
"This is just a random text.",
"Elden Ring is one of the best games in the last 15 years.",
"L. Kornet is one of the best Celtics players.",
"Larry Bird was an iconic NBA player.",
]
embeddings = OpenAIEmbeddings()
retriever = InMemoryVectorStore.from_texts(
texts, embedding=embeddings
).as_retriever(search_kwargs={"k": 10})
reordering = LongContextReorder()
docs = retriever.invoke("Tell me about the Celtics")
actual = reordering.transform_documents(docs)
# First 2 and Last 2 elements must contain the most relevant
first_and_last = list(actual[:2]) + list(actual[-2:])
assert len(actual) == 10
assert texts[2] in [d.page_content for d in first_and_last]
assert texts[3] in [d.page_content for d in first_and_last]
assert texts[5] in [d.page_content for d in first_and_last]
assert texts[8] in [d.page_content for d in first_and_last]
|
0 | lc_public_repos/langchain/libs/community/tests | lc_public_repos/langchain/libs/community/tests/integration_tests/conftest.py | # Getting the absolute path of the current file's directory
import os
ABS_PATH = os.path.dirname(os.path.abspath(__file__))
# Getting the absolute path of the project's root directory
PROJECT_DIR = os.path.abspath(os.path.join(ABS_PATH, os.pardir, os.pardir))
# Loading the .env file if it exists
def _load_env() -> None:
dotenv_path = os.path.join(PROJECT_DIR, "tests", "integration_tests", ".env")
if os.path.exists(dotenv_path):
from dotenv import load_dotenv
load_dotenv(dotenv_path)
_load_env()
|
0 | lc_public_repos/langchain/libs/community/tests | lc_public_repos/langchain/libs/community/tests/integration_tests/test_nuclia_transformer.py | import asyncio
import json
from typing import Any
from unittest import mock
from langchain_core.documents import Document
from langchain_community.document_transformers.nuclia_text_transform import (
NucliaTextTransformer,
)
from langchain_community.tools.nuclia.tool import NucliaUnderstandingAPI
def fakerun(**args: Any) -> Any:
async def run(self: Any, **args: Any) -> str:
await asyncio.sleep(0.1)
data = {
"extracted_text": [{"body": {"text": "Hello World"}}],
"file_extracted_data": [{"language": "en"}],
"field_metadata": [
{
"metadata": {
"metadata": {
"paragraphs": [
{"end": 66, "sentences": [{"start": 1, "end": 67}]}
]
}
}
}
],
}
return json.dumps(data)
return run
async def test_nuclia_loader() -> None:
with mock.patch(
"langchain_community.tools.nuclia.tool.NucliaUnderstandingAPI._arun",
new_callable=fakerun,
):
with mock.patch("os.environ.get", return_value="_a_key_"):
nua = NucliaUnderstandingAPI(enable_ml=False)
documents = [
Document(page_content="Hello, my name is Alice", metadata={}),
Document(page_content="Hello, my name is Bob", metadata={}),
]
nuclia_transformer = NucliaTextTransformer(nua)
transformed_documents = await nuclia_transformer.atransform_documents(
documents
)
assert len(transformed_documents) == 2
assert (
transformed_documents[0].metadata["nuclia"]["file"]["language"] == "en"
)
assert (
len(
transformed_documents[1].metadata["nuclia"]["metadata"]["metadata"][
"metadata"
]["paragraphs"]
)
== 1
)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/graphs/test_ontotext_graphdb_graph.py | from pathlib import Path
import pytest
from langchain_community.graphs import OntotextGraphDBGraph
"""
cd libs/community/tests/integration_tests/graphs/docker-compose-ontotext-graphdb
./start.sh
"""
def test_query_method_with_valid_query() -> None:
graph = OntotextGraphDBGraph(
query_endpoint="http://localhost:7200/repositories/langchain",
query_ontology="CONSTRUCT {?s ?p ?o}"
"FROM <https://swapi.co/ontology/> WHERE {?s ?p ?o}",
)
query_results = graph.query(
"PREFIX voc: <https://swapi.co/vocabulary/> "
"PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> "
"SELECT ?eyeColor "
"WHERE {"
' ?besalisk rdfs:label "Dexter Jettster" ; '
" voc:eyeColor ?eyeColor ."
"}"
)
assert len(query_results) == 1
assert len(query_results[0]) == 1
assert str(query_results[0][0]) == "yellow"
def test_query_method_with_invalid_query() -> None:
graph = OntotextGraphDBGraph(
query_endpoint="http://localhost:7200/repositories/langchain",
query_ontology="CONSTRUCT {?s ?p ?o}"
"FROM <https://swapi.co/ontology/> WHERE {?s ?p ?o}",
)
with pytest.raises(ValueError) as e:
graph.query(
"PREFIX : <https://swapi.co/vocabulary/> "
"PREFIX owl: <http://www.w3.org/2002/07/owl#> "
"PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> "
"PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> "
"SELECT ?character (MAX(?lifespan) AS ?maxLifespan) "
"WHERE {"
" ?species a :Species ;"
" :character ?character ;"
" :averageLifespan ?lifespan ."
" FILTER(xsd:integer(?lifespan))"
"} "
"ORDER BY DESC(?maxLifespan) "
"LIMIT 1"
)
assert (
str(e.value)
== "You did something wrong formulating either the URI or your SPARQL query"
)
def test_get_schema_with_query() -> None:
graph = OntotextGraphDBGraph(
query_endpoint="http://localhost:7200/repositories/langchain",
query_ontology="CONSTRUCT {?s ?p ?o}"
"FROM <https://swapi.co/ontology/> WHERE {?s ?p ?o}",
)
from rdflib import Graph
assert len(Graph().parse(data=graph.get_schema, format="turtle")) == 19
@pytest.mark.parametrize(
"rdf_format, file_extension",
[
("json-ld", "json"),
("json-ld", "jsonld"),
("json-ld", "json-ld"),
("xml", "rdf"),
("xml", "xml"),
("xml", "owl"),
("pretty-xml", "xml"),
("n3", "n3"),
("turtle", "ttl"),
("nt", "nt"),
("trig", "trig"),
("nquads", "nq"),
("nquads", "nquads"),
("trix", "trix"),
],
)
def test_get_schema_from_file(
tmp_path: Path, rdf_format: str, file_extension: str
) -> None:
expected_number_of_ontology_statements = 19
graph = OntotextGraphDBGraph(
query_endpoint="http://localhost:7200/repositories/langchain",
query_ontology="CONSTRUCT {?s ?p ?o}"
"FROM <https://swapi.co/ontology/> WHERE {?s ?p ?o}",
)
from rdflib import ConjunctiveGraph, Graph
assert (
len(Graph().parse(data=graph.get_schema, format="turtle"))
== expected_number_of_ontology_statements
)
# serialize the ontology schema loaded with the query in a local file
# in various rdf formats and check that this results
# in the same number of statements
conjunctive_graph = ConjunctiveGraph()
ontology_context = conjunctive_graph.get_context("https://swapi.co/ontology/")
ontology_context.parse(data=graph.get_schema, format="turtle")
assert len(ontology_context) == expected_number_of_ontology_statements
assert len(conjunctive_graph) == expected_number_of_ontology_statements
local_file = tmp_path / ("starwars-ontology." + file_extension)
conjunctive_graph.serialize(local_file, format=rdf_format)
graph = OntotextGraphDBGraph(
query_endpoint="http://localhost:7200/repositories/langchain",
local_file=str(local_file),
)
assert (
len(Graph().parse(data=graph.get_schema, format="turtle"))
== expected_number_of_ontology_statements
)
@pytest.mark.parametrize(
"rdf_format", ["json-ld", "xml", "n3", "turtle", "nt", "trig", "nquads", "trix"]
)
def test_get_schema_from_file_with_explicit_rdf_format(
tmp_path: Path, rdf_format: str
) -> None:
expected_number_of_ontology_statements = 19
graph = OntotextGraphDBGraph(
query_endpoint="http://localhost:7200/repositories/langchain",
query_ontology="CONSTRUCT {?s ?p ?o}"
"FROM <https://swapi.co/ontology/> WHERE {?s ?p ?o}",
)
from rdflib import ConjunctiveGraph, Graph
assert (
len(Graph().parse(data=graph.get_schema, format="turtle"))
== expected_number_of_ontology_statements
)
# serialize the ontology schema loaded with the query in a local file
# in various rdf formats and check that this results
# in the same number of statements
conjunctive_graph = ConjunctiveGraph()
ontology_context = conjunctive_graph.get_context("https://swapi.co/ontology/")
ontology_context.parse(data=graph.get_schema, format="turtle")
assert len(ontology_context) == expected_number_of_ontology_statements
assert len(conjunctive_graph) == expected_number_of_ontology_statements
local_file = tmp_path / "starwars-ontology.txt"
conjunctive_graph.serialize(local_file, format=rdf_format)
graph = OntotextGraphDBGraph(
query_endpoint="http://localhost:7200/repositories/langchain",
local_file=str(local_file),
local_file_format=rdf_format,
)
assert (
len(Graph().parse(data=graph.get_schema, format="turtle"))
== expected_number_of_ontology_statements
)
def test_get_schema_from_file_with_wrong_extension(tmp_path: Path) -> None:
expected_number_of_ontology_statements = 19
graph = OntotextGraphDBGraph(
query_endpoint="http://localhost:7200/repositories/langchain",
query_ontology="CONSTRUCT {?s ?p ?o}"
"FROM <https://swapi.co/ontology/> WHERE {?s ?p ?o}",
)
from rdflib import ConjunctiveGraph, Graph
assert (
len(Graph().parse(data=graph.get_schema, format="turtle"))
== expected_number_of_ontology_statements
)
conjunctive_graph = ConjunctiveGraph()
ontology_context = conjunctive_graph.get_context("https://swapi.co/ontology/")
ontology_context.parse(data=graph.get_schema, format="turtle")
assert len(ontology_context) == expected_number_of_ontology_statements
assert len(conjunctive_graph) == expected_number_of_ontology_statements
local_file = tmp_path / "starwars-ontology.trig"
conjunctive_graph.serialize(local_file, format="nquads")
with pytest.raises(ValueError):
OntotextGraphDBGraph(
query_endpoint="http://localhost:7200/repositories/langchain",
local_file=str(local_file),
)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/graphs/test_kuzu.py | import shutil
import tempfile
import unittest
from langchain_community.graphs import KuzuGraph
EXPECTED_SCHEMA = """Node properties: [{'properties': [('name', 'STRING')], 'label': 'Movie'}, {'properties': [('name', 'STRING'), ('birthDate', 'STRING')], 'label': 'Person'}]
Relationships properties: [{'properties': [], 'label': 'ActedIn'}]
Relationships: ['(:Person)-[:ActedIn]->(:Movie)']
""" # noqa: E501
class TestKuzu(unittest.TestCase):
def setUp(self) -> None:
try:
import kuzu
except ImportError as e:
raise ImportError(
"Cannot import Python package kuzu. Please install it by running "
"`pip install kuzu`."
) from e
self.tmpdir = tempfile.mkdtemp()
self.kuzu_database = kuzu.Database(self.tmpdir)
self.conn = kuzu.Connection(self.kuzu_database)
self.conn.execute("CREATE NODE TABLE Movie (name STRING, PRIMARY KEY(name))")
self.conn.execute("CREATE (:Movie {name: 'The Godfather'})")
self.conn.execute("CREATE (:Movie {name: 'The Godfather: Part II'})")
self.conn.execute(
"CREATE (:Movie {name: 'The Godfather Coda: The Death of Michael "
"Corleone'})"
)
self.kuzu_graph = KuzuGraph(self.kuzu_database)
def tearDown(self) -> None:
shutil.rmtree(self.tmpdir, ignore_errors=True)
def test_query_no_params(self) -> None:
result = self.kuzu_graph.query("MATCH (n:Movie) RETURN n.name ORDER BY n.name")
excepted_result = [
{"n.name": "The Godfather"},
{"n.name": "The Godfather Coda: The Death of Michael Corleone"},
{"n.name": "The Godfather: Part II"},
]
self.assertEqual(result, excepted_result)
def test_query_params(self) -> None:
result = self.kuzu_graph.query(
query="MATCH (n:Movie) WHERE n.name = $name RETURN n.name",
params={"name": "The Godfather"},
)
excepted_result = [
{"n.name": "The Godfather"},
]
self.assertEqual(result, excepted_result)
def test_refresh_schema(self) -> None:
self.conn.execute(
"CREATE NODE TABLE Person (name STRING, birthDate STRING, PRIMARY "
"KEY(name))"
)
self.conn.execute("CREATE REL TABLE ActedIn (FROM Person TO Movie)")
self.kuzu_graph.refresh_schema()
schema = self.kuzu_graph.get_schema
self.assertEqual(schema, EXPECTED_SCHEMA)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/graphs/test_hugegraph.py | import unittest
from typing import Any
from unittest.mock import MagicMock, patch
from langchain_community.graphs import HugeGraph
class TestHugeGraph(unittest.TestCase):
def setUp(self) -> None:
self.username = "test_user"
self.password = "test_password"
self.address = "test_address"
self.graph = "test_hugegraph"
self.port = 1234
self.session_pool_size = 10
@patch("hugegraph.connection.PyHugeGraph")
def test_init(self, mock_client: Any) -> None:
mock_client.return_value = MagicMock()
huge_graph = HugeGraph(
self.username, self.password, self.address, self.port, self.graph
)
self.assertEqual(huge_graph.username, self.username)
self.assertEqual(huge_graph.password, self.password)
self.assertEqual(huge_graph.address, self.address)
self.assertEqual(huge_graph.port, self.port)
self.assertEqual(huge_graph.graph, self.graph)
@patch("hugegraph.connection.PyHugeGraph")
def test_execute(self, mock_client: Any) -> None:
mock_client.return_value = MagicMock()
huge_graph = HugeGraph(
self.username, self.password, self.address, self.port, self.graph
)
query = "g.V().limit(10)"
result = huge_graph.query(query)
self.assertIsInstance(result, MagicMock)
@patch("hugegraph.connection.PyHugeGraph")
def test_refresh_schema(self, mock_client: Any) -> None:
mock_client.return_value = MagicMock()
huge_graph = HugeGraph(
self.username, self.password, self.address, self.port, self.graph
)
huge_graph.refresh_schema()
self.assertNotEqual(huge_graph.get_schema, "")
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/graphs/test_nebulagraph.py | import unittest
from typing import Any
from unittest.mock import MagicMock, patch
from langchain_community.graphs import NebulaGraph
class TestNebulaGraph(unittest.TestCase):
def setUp(self) -> None:
self.space = "test_space"
self.username = "test_user"
self.password = "test_password"
self.address = "test_address"
self.port = 1234
self.session_pool_size = 10
@patch("nebula3.gclient.net.SessionPool.SessionPool")
def test_init(self, mock_session_pool: Any) -> None:
mock_session_pool.return_value = MagicMock()
nebula_graph = NebulaGraph(
self.space,
self.username,
self.password,
self.address,
self.port,
self.session_pool_size,
)
self.assertEqual(nebula_graph.space, self.space)
self.assertEqual(nebula_graph.username, self.username)
self.assertEqual(nebula_graph.password, self.password)
self.assertEqual(nebula_graph.address, self.address)
self.assertEqual(nebula_graph.port, self.port)
self.assertEqual(nebula_graph.session_pool_size, self.session_pool_size)
@patch("nebula3.gclient.net.SessionPool.SessionPool")
def test_get_session_pool(self, mock_session_pool: Any) -> None:
mock_session_pool.return_value = MagicMock()
nebula_graph = NebulaGraph(
self.space,
self.username,
self.password,
self.address,
self.port,
self.session_pool_size,
)
session_pool = nebula_graph._get_session_pool()
self.assertIsInstance(session_pool, MagicMock)
@patch("nebula3.gclient.net.SessionPool.SessionPool")
def test_del(self, mock_session_pool: Any) -> None:
mock_session_pool.return_value = MagicMock()
nebula_graph = NebulaGraph(
self.space,
self.username,
self.password,
self.address,
self.port,
self.session_pool_size,
)
nebula_graph.__del__()
mock_session_pool.return_value.close.assert_called_once()
@patch("nebula3.gclient.net.SessionPool.SessionPool")
def test_execute(self, mock_session_pool: Any) -> None:
mock_session_pool.return_value = MagicMock()
nebula_graph = NebulaGraph(
self.space,
self.username,
self.password,
self.address,
self.port,
self.session_pool_size,
)
query = "SELECT * FROM test_table"
result = nebula_graph.execute(query)
self.assertIsInstance(result, MagicMock)
@patch("nebula3.gclient.net.SessionPool.SessionPool")
def test_refresh_schema(self, mock_session_pool: Any) -> None:
mock_session_pool.return_value = MagicMock()
nebula_graph = NebulaGraph(
self.space,
self.username,
self.password,
self.address,
self.port,
self.session_pool_size,
)
nebula_graph.refresh_schema()
self.assertNotEqual(nebula_graph.get_schema, "")
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/graphs/test_memgraph.py | import os
from langchain_community.graphs import MemgraphGraph
def test_cypher_return_correct_schema() -> None:
"""Test that chain returns direct results."""
url = os.environ.get("MEMGRAPH_URI", "bolt://localhost:7687")
username = os.environ.get("MEMGRAPH_USERNAME", "")
password = os.environ.get("MEMGRAPH_PASSWORD", "")
assert url is not None
assert username is not None
assert password is not None
graph = MemgraphGraph(
url=url,
username=username,
password=password,
)
# Delete all nodes in the graph
graph.query("MATCH (n) DETACH DELETE n")
# Create two nodes and a relationship
graph.query(
"""
CREATE (la:LabelA {property_a: 'a'})
CREATE (lb:LabelB)
CREATE (lc:LabelC)
MERGE (la)-[:REL_TYPE]-> (lb)
MERGE (la)-[:REL_TYPE {rel_prop: 'abc'}]-> (lc)
"""
)
# Refresh schema information
graph.refresh_schema()
relationships = graph.query(
"CALL llm_util.schema('raw') YIELD schema "
"WITH schema.relationships AS relationships "
"UNWIND relationships AS relationship "
"RETURN relationship['start'] AS start, "
"relationship['type'] AS type, "
"relationship['end'] AS end "
"ORDER BY start, type, end;"
)
node_props = graph.query(
"CALL llm_util.schema('raw') YIELD schema "
"WITH schema.node_props AS nodes "
"WITH nodes['LabelA'] AS properties "
"UNWIND properties AS property "
"RETURN property['property'] AS prop, "
"property['type'] AS type "
"ORDER BY prop ASC;"
)
expected_relationships = [
{"start": "LabelA", "type": "REL_TYPE", "end": "LabelB"},
{"start": "LabelA", "type": "REL_TYPE", "end": "LabelC"},
]
expected_node_props = [{"prop": "property_a", "type": "str"}]
assert relationships == expected_relationships
assert node_props == expected_node_props
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/graphs/test_age_graph.py | import os
import re
import unittest
from typing import Any, Dict
from langchain_core.documents import Document
from langchain_community.graphs.age_graph import AGEGraph
from langchain_community.graphs.graph_document import GraphDocument, Node, Relationship
test_data = [
GraphDocument(
nodes=[Node(id="foo", type="foo"), Node(id="bar", type="bar")],
relationships=[
Relationship(
source=Node(id="foo", type="foo"),
target=Node(id="bar", type="bar"),
type="REL",
)
],
source=Document(page_content="source document"),
)
]
class TestAGEGraph(unittest.TestCase):
def test_node_properties(self) -> None:
conf = {
"database": os.getenv("AGE_PGSQL_DB"),
"user": os.getenv("AGE_PGSQL_USER"),
"password": os.getenv("AGE_PGSQL_PASSWORD"),
"host": os.getenv("AGE_PGSQL_HOST", "localhost"),
"port": int(os.getenv("AGE_PGSQL_PORT", 5432)),
}
self.assertIsNotNone(conf["database"])
self.assertIsNotNone(conf["user"])
self.assertIsNotNone(conf["password"])
graph_name = os.getenv("AGE_GRAPH_NAME", "age_test")
graph = AGEGraph(graph_name, conf)
graph.query("MATCH (n) DETACH DELETE n")
# Create two nodes and a relationship
graph.query(
"""
CREATE (la:LabelA {property_a: 'a'})
CREATE (lb:LabelB)
CREATE (lc:LabelC)
MERGE (la)-[:REL_TYPE]-> (lb)
MERGE (la)-[:REL_TYPE {rel_prop: 'abc'}]-> (lc)
"""
)
# Refresh schema information
# graph.refresh_schema()
n_labels, e_labels = graph._get_labels()
node_properties = graph._get_node_properties(n_labels)
expected_node_properties = [
{
"properties": [{"property": "property_a", "type": "STRING"}],
"labels": "LabelA",
},
{
"properties": [],
"labels": "LabelB",
},
{
"properties": [],
"labels": "LabelC",
},
]
self.assertEqual(
sorted(node_properties, key=lambda x: x["labels"]), expected_node_properties
)
def test_edge_properties(self) -> None:
conf = {
"database": os.getenv("AGE_PGSQL_DB"),
"user": os.getenv("AGE_PGSQL_USER"),
"password": os.getenv("AGE_PGSQL_PASSWORD"),
"host": os.getenv("AGE_PGSQL_HOST", "localhost"),
"port": int(os.getenv("AGE_PGSQL_PORT", 5432)),
}
self.assertIsNotNone(conf["database"])
self.assertIsNotNone(conf["user"])
self.assertIsNotNone(conf["password"])
graph_name = os.getenv("AGE_GRAPH_NAME", "age_test")
graph = AGEGraph(graph_name, conf)
graph.query("MATCH (n) DETACH DELETE n")
# Create two nodes and a relationship
graph.query(
"""
CREATE (la:LabelA {property_a: 'a'})
CREATE (lb:LabelB)
CREATE (lc:LabelC)
MERGE (la)-[:REL_TYPE]-> (lb)
MERGE (la)-[:REL_TYPE {rel_prop: 'abc'}]-> (lc)
"""
)
# Refresh schema information
# graph.refresh_schema()
n_labels, e_labels = graph._get_labels()
relationships_properties = graph._get_edge_properties(e_labels)
expected_relationships_properties = [
{
"type": "REL_TYPE",
"properties": [{"property": "rel_prop", "type": "STRING"}],
}
]
self.assertEqual(relationships_properties, expected_relationships_properties)
def test_relationships(self) -> None:
conf = {
"database": os.getenv("AGE_PGSQL_DB"),
"user": os.getenv("AGE_PGSQL_USER"),
"password": os.getenv("AGE_PGSQL_PASSWORD"),
"host": os.getenv("AGE_PGSQL_HOST", "localhost"),
"port": int(os.getenv("AGE_PGSQL_PORT", 5432)),
}
self.assertIsNotNone(conf["database"])
self.assertIsNotNone(conf["user"])
self.assertIsNotNone(conf["password"])
graph_name = os.getenv("AGE_GRAPH_NAME", "age_test")
graph = AGEGraph(graph_name, conf)
graph.query("MATCH (n) DETACH DELETE n")
# Create two nodes and a relationship
graph.query(
"""
CREATE (la:LabelA {property_a: 'a'})
CREATE (lb:LabelB)
CREATE (lc:LabelC)
MERGE (la)-[:REL_TYPE]-> (lb)
MERGE (la)-[:REL_TYPE {rel_prop: 'abc'}]-> (lc)
"""
)
# Refresh schema information
# graph.refresh_schema()
n_labels, e_labels = graph._get_labels()
relationships = graph._get_triples(e_labels)
expected_relationships = [
{"start": "LabelA", "type": "REL_TYPE", "end": "LabelB"},
{"start": "LabelA", "type": "REL_TYPE", "end": "LabelC"},
]
self.assertEqual(
sorted(relationships, key=lambda x: x["end"]), expected_relationships
)
def test_add_documents(self) -> None:
conf = {
"database": os.getenv("AGE_PGSQL_DB"),
"user": os.getenv("AGE_PGSQL_USER"),
"password": os.getenv("AGE_PGSQL_PASSWORD"),
"host": os.getenv("AGE_PGSQL_HOST", "localhost"),
"port": int(os.getenv("AGE_PGSQL_PORT", 5432)),
}
self.assertIsNotNone(conf["database"])
self.assertIsNotNone(conf["user"])
self.assertIsNotNone(conf["password"])
graph_name = os.getenv("AGE_GRAPH_NAME", "age_test")
graph = AGEGraph(graph_name, conf)
# Delete all nodes in the graph
graph.query("MATCH (n) DETACH DELETE n")
# Create two nodes and a relationship
graph.add_graph_documents(test_data)
output = graph.query(
"MATCH (n) RETURN labels(n) AS label, count(*) AS count ORDER BY labels(n)"
)
self.assertEqual(
output, [{"label": ["bar"], "count": 1}, {"label": ["foo"], "count": 1}]
)
def test_add_documents_source(self) -> None:
conf = {
"database": os.getenv("AGE_PGSQL_DB"),
"user": os.getenv("AGE_PGSQL_USER"),
"password": os.getenv("AGE_PGSQL_PASSWORD"),
"host": os.getenv("AGE_PGSQL_HOST", "localhost"),
"port": int(os.getenv("AGE_PGSQL_PORT", 5432)),
}
self.assertIsNotNone(conf["database"])
self.assertIsNotNone(conf["user"])
self.assertIsNotNone(conf["password"])
graph_name = os.getenv("AGE_GRAPH_NAME", "age_test")
graph = AGEGraph(graph_name, conf)
# Delete all nodes in the graph
graph.query("MATCH (n) DETACH DELETE n")
# Create two nodes and a relationship
graph.add_graph_documents(test_data, include_source=True)
output = graph.query(
"MATCH (n) RETURN labels(n) AS label, count(*) AS count ORDER BY labels(n)"
)
expected = [
{"label": ["bar"], "count": 1},
{"label": ["Document"], "count": 1},
{"label": ["foo"], "count": 1},
]
self.assertEqual(output, expected)
def test_get_schema(self) -> None:
conf = {
"database": os.getenv("AGE_PGSQL_DB"),
"user": os.getenv("AGE_PGSQL_USER"),
"password": os.getenv("AGE_PGSQL_PASSWORD"),
"host": os.getenv("AGE_PGSQL_HOST", "localhost"),
"port": int(os.getenv("AGE_PGSQL_PORT", 5432)),
}
self.assertIsNotNone(conf["database"])
self.assertIsNotNone(conf["user"])
self.assertIsNotNone(conf["password"])
graph_name = os.getenv("AGE_GRAPH_NAME", "age_test")
graph = AGEGraph(graph_name, conf)
graph.query("MATCH (n) DETACH DELETE n")
graph.refresh_schema()
expected = """
Node properties are the following:
[]
Relationship properties are the following:
[]
The relationships are the following:
[]
"""
# check that works on empty schema
self.assertEqual(
re.sub(r"\s", "", graph.get_schema), re.sub(r"\s", "", expected)
)
expected_structured: Dict[str, Any] = {
"node_props": {},
"rel_props": {},
"relationships": [],
"metadata": {},
}
self.assertEqual(graph.get_structured_schema, expected_structured)
# Create two nodes and a relationship
graph.query(
"""
MERGE (a:a {id: 1})-[b:b {id: 2}]-> (c:c {id: 3})
"""
)
# check that schema doesn't update without refresh
self.assertEqual(
re.sub(r"\s", "", graph.get_schema), re.sub(r"\s", "", expected)
)
self.assertEqual(graph.get_structured_schema, expected_structured)
# two possible orderings of node props
expected_possibilities = [
"""
Node properties are the following:
[
{'properties': [{'property': 'id', 'type': 'INTEGER'}], 'labels': 'a'},
{'properties': [{'property': 'id', 'type': 'INTEGER'}], 'labels': 'c'}
]
Relationship properties are the following:
[
{'properties': [{'property': 'id', 'type': 'INTEGER'}], 'type': 'b'}
]
The relationships are the following:
[
'(:`a`)-[:`b`]->(:`c`)'
]
""",
"""
Node properties are the following:
[
{'properties': [{'property': 'id', 'type': 'INTEGER'}], 'labels': 'c'},
{'properties': [{'property': 'id', 'type': 'INTEGER'}], 'labels': 'a'}
]
Relationship properties are the following:
[
{'properties': [{'property': 'id', 'type': 'INTEGER'}], 'type': 'b'}
]
The relationships are the following:
[
'(:`a`)-[:`b`]->(:`c`)'
]
""",
]
expected_structured2 = {
"node_props": {
"a": [{"property": "id", "type": "INTEGER"}],
"c": [{"property": "id", "type": "INTEGER"}],
},
"rel_props": {"b": [{"property": "id", "type": "INTEGER"}]},
"relationships": [{"start": "a", "type": "b", "end": "c"}],
"metadata": {},
}
graph.refresh_schema()
# check that schema is refreshed
self.assertIn(
re.sub(r"\s", "", graph.get_schema),
[re.sub(r"\s", "", x) for x in expected_possibilities],
)
self.assertEqual(graph.get_structured_schema, expected_structured2)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/graphs/test_falkordb.py | import unittest
from typing import Any
from unittest.mock import MagicMock, patch
from langchain_community.graphs import FalkorDBGraph
class TestFalkorDB(unittest.TestCase):
def setUp(self) -> None:
self.host = "localhost"
self.graph = "test_falkordb"
self.port = 6379
@patch("redis.Redis")
def test_init(self, mock_client: Any) -> None:
mock_client.return_value = MagicMock()
FalkorDBGraph(database=self.graph, host=self.host, port=self.port)
@patch("redis.Redis")
def test_execute(self, mock_client: Any) -> None:
mock_client.return_value = MagicMock()
graph = FalkorDBGraph(database=self.graph, host=self.host, port=self.port)
query = "RETURN 1"
result = graph.query(query)
self.assertIsInstance(result, MagicMock)
@patch("redis.Redis")
def test_refresh_schema(self, mock_client: Any) -> None:
mock_client.return_value = MagicMock()
graph = FalkorDBGraph(database=self.graph, host=self.host, port=self.port)
graph.refresh_schema()
self.assertNotEqual(graph.get_schema, "")
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/graphs/test_neo4j.py | import os
from langchain_core.documents import Document
from langchain_community.graphs import Neo4jGraph
from langchain_community.graphs.graph_document import GraphDocument, Node, Relationship
from langchain_community.graphs.neo4j_graph import (
BASE_ENTITY_LABEL,
node_properties_query,
rel_properties_query,
rel_query,
)
test_data = [
GraphDocument(
nodes=[Node(id="foo", type="foo"), Node(id="bar", type="bar")],
relationships=[
Relationship(
source=Node(id="foo", type="foo"),
target=Node(id="bar", type="bar"),
type="REL",
)
],
source=Document(page_content="source document"),
)
]
test_data_backticks = [
GraphDocument(
nodes=[Node(id="foo", type="foo`"), Node(id="bar", type="`bar")],
relationships=[
Relationship(
source=Node(id="foo", type="f`oo"),
target=Node(id="bar", type="ba`r"),
type="`REL`",
)
],
source=Document(page_content="source document"),
)
]
def test_cypher_return_correct_schema() -> None:
"""Test that chain returns direct results."""
url = os.environ.get("NEO4J_URI")
username = os.environ.get("NEO4J_USERNAME")
password = os.environ.get("NEO4J_PASSWORD")
assert url is not None
assert username is not None
assert password is not None
graph = Neo4jGraph(
url=url,
username=username,
password=password,
)
# Delete all nodes in the graph
graph.query("MATCH (n) DETACH DELETE n")
# Create two nodes and a relationship
graph.query(
"""
CREATE (la:LabelA {property_a: 'a'})
CREATE (lb:LabelB)
CREATE (lc:LabelC)
MERGE (la)-[:REL_TYPE]-> (lb)
MERGE (la)-[:REL_TYPE {rel_prop: 'abc'}]-> (lc)
"""
)
# Refresh schema information
graph.refresh_schema()
node_properties = graph.query(
node_properties_query, params={"EXCLUDED_LABELS": [BASE_ENTITY_LABEL]}
)
relationships_properties = graph.query(
rel_properties_query, params={"EXCLUDED_LABELS": [BASE_ENTITY_LABEL]}
)
relationships = graph.query(
rel_query, params={"EXCLUDED_LABELS": [BASE_ENTITY_LABEL]}
)
expected_node_properties = [
{
"output": {
"properties": [{"property": "property_a", "type": "STRING"}],
"labels": "LabelA",
}
}
]
expected_relationships_properties = [
{
"output": {
"type": "REL_TYPE",
"properties": [{"property": "rel_prop", "type": "STRING"}],
}
}
]
expected_relationships = [
{"output": {"start": "LabelA", "type": "REL_TYPE", "end": "LabelB"}},
{"output": {"start": "LabelA", "type": "REL_TYPE", "end": "LabelC"}},
]
assert node_properties == expected_node_properties
assert relationships_properties == expected_relationships_properties
# Order is not guaranteed with Neo4j returns
assert (
sorted(relationships, key=lambda x: x["output"]["end"])
== expected_relationships
)
def test_neo4j_timeout() -> None:
"""Test that neo4j uses the timeout correctly."""
url = os.environ.get("NEO4J_URI")
username = os.environ.get("NEO4J_USERNAME")
password = os.environ.get("NEO4J_PASSWORD")
assert url is not None
assert username is not None
assert password is not None
graph = Neo4jGraph(url=url, username=username, password=password, timeout=0.1)
try:
graph.query("UNWIND range(0,100000,1) AS i MERGE (:Foo {id:i})")
except Exception as e:
assert (
e.code # type: ignore[attr-defined]
== "Neo.ClientError.Transaction.TransactionTimedOutClientConfiguration"
)
def test_neo4j_sanitize_values() -> None:
"""Test that neo4j uses the timeout correctly."""
url = os.environ.get("NEO4J_URI")
username = os.environ.get("NEO4J_USERNAME")
password = os.environ.get("NEO4J_PASSWORD")
assert url is not None
assert username is not None
assert password is not None
graph = Neo4jGraph(url=url, username=username, password=password, sanitize=True)
# Delete all nodes in the graph
graph.query("MATCH (n) DETACH DELETE n")
# Create two nodes and a relationship
graph.query(
"""
CREATE (la:LabelA {property_a: 'a'})
CREATE (lb:LabelB)
CREATE (lc:LabelC)
MERGE (la)-[:REL_TYPE]-> (lb)
MERGE (la)-[:REL_TYPE {rel_prop: 'abc'}]-> (lc)
"""
)
graph.refresh_schema()
output = graph.query("RETURN range(0,130,1) AS result")
assert output == [{}]
def test_neo4j_add_data() -> None:
"""Test that neo4j correctly import graph document."""
url = os.environ.get("NEO4J_URI")
username = os.environ.get("NEO4J_USERNAME")
password = os.environ.get("NEO4J_PASSWORD")
assert url is not None
assert username is not None
assert password is not None
graph = Neo4jGraph(url=url, username=username, password=password, sanitize=True)
# Delete all nodes in the graph
graph.query("MATCH (n) DETACH DELETE n")
# Remove all constraints
graph.query("CALL apoc.schema.assert({}, {})")
graph.refresh_schema()
# Create two nodes and a relationship
graph.add_graph_documents(test_data)
output = graph.query(
"MATCH (n) RETURN labels(n) AS label, count(*) AS count ORDER BY label"
)
assert output == [{"label": ["bar"], "count": 1}, {"label": ["foo"], "count": 1}]
assert graph.structured_schema["metadata"]["constraint"] == []
def test_neo4j_add_data_source() -> None:
"""Test that neo4j correctly import graph document with source."""
url = os.environ.get("NEO4J_URI")
username = os.environ.get("NEO4J_USERNAME")
password = os.environ.get("NEO4J_PASSWORD")
assert url is not None
assert username is not None
assert password is not None
graph = Neo4jGraph(url=url, username=username, password=password, sanitize=True)
# Delete all nodes in the graph
graph.query("MATCH (n) DETACH DELETE n")
# Remove all constraints
graph.query("CALL apoc.schema.assert({}, {})")
graph.refresh_schema()
# Create two nodes and a relationship
graph.add_graph_documents(test_data, include_source=True)
output = graph.query(
"MATCH (n) RETURN labels(n) AS label, count(*) AS count ORDER BY label"
)
assert output == [
{"label": ["Document"], "count": 1},
{"label": ["bar"], "count": 1},
{"label": ["foo"], "count": 1},
]
assert graph.structured_schema["metadata"]["constraint"] == []
def test_neo4j_add_data_base() -> None:
"""Test that neo4j correctly import graph document with base_entity."""
url = os.environ.get("NEO4J_URI")
username = os.environ.get("NEO4J_USERNAME")
password = os.environ.get("NEO4J_PASSWORD")
assert url is not None
assert username is not None
assert password is not None
graph = Neo4jGraph(url=url, username=username, password=password, sanitize=True)
# Delete all nodes in the graph
graph.query("MATCH (n) DETACH DELETE n")
# Remove all constraints
graph.query("CALL apoc.schema.assert({}, {})")
graph.refresh_schema()
# Create two nodes and a relationship
graph.add_graph_documents(test_data, baseEntityLabel=True)
output = graph.query(
"MATCH (n) RETURN apoc.coll.sort(labels(n)) AS label, "
"count(*) AS count ORDER BY label"
)
assert output == [
{"label": [BASE_ENTITY_LABEL, "bar"], "count": 1},
{"label": [BASE_ENTITY_LABEL, "foo"], "count": 1},
]
assert graph.structured_schema["metadata"]["constraint"] != []
def test_neo4j_add_data_base_source() -> None:
"""Test that neo4j correctly import graph document with base_entity and source."""
url = os.environ.get("NEO4J_URI")
username = os.environ.get("NEO4J_USERNAME")
password = os.environ.get("NEO4J_PASSWORD")
assert url is not None
assert username is not None
assert password is not None
graph = Neo4jGraph(url=url, username=username, password=password, sanitize=True)
# Delete all nodes in the graph
graph.query("MATCH (n) DETACH DELETE n")
# Remove all constraints
graph.query("CALL apoc.schema.assert({}, {})")
graph.refresh_schema()
# Create two nodes and a relationship
graph.add_graph_documents(test_data, baseEntityLabel=True, include_source=True)
output = graph.query(
"MATCH (n) RETURN apoc.coll.sort(labels(n)) AS label, "
"count(*) AS count ORDER BY label"
)
assert output == [
{"label": ["Document"], "count": 1},
{"label": [BASE_ENTITY_LABEL, "bar"], "count": 1},
{"label": [BASE_ENTITY_LABEL, "foo"], "count": 1},
]
assert graph.structured_schema["metadata"]["constraint"] != []
def test_neo4j_filtering_labels() -> None:
"""Test that neo4j correctly filters excluded labels."""
url = os.environ.get("NEO4J_URI")
username = os.environ.get("NEO4J_USERNAME")
password = os.environ.get("NEO4J_PASSWORD")
assert url is not None
assert username is not None
assert password is not None
graph = Neo4jGraph(url=url, username=username, password=password, sanitize=True)
# Delete all nodes in the graph
graph.query("MATCH (n) DETACH DELETE n")
# Remove all constraints
graph.query("CALL apoc.schema.assert({}, {})")
graph.query(
"""
CREATE (:_Bloom_Scene_ {property_a: 'a'})
-[:_Bloom_HAS_SCENE_ {property_b: 'b'}]
->(:_Bloom_Perspective_)
"""
)
graph.refresh_schema()
# Assert all are empty
assert graph.structured_schema["node_props"] == {}
assert graph.structured_schema["rel_props"] == {}
assert graph.structured_schema["relationships"] == []
def test_driver_config() -> None:
"""Test that neo4j works with driver config."""
url = os.environ.get("NEO4J_URI")
username = os.environ.get("NEO4J_USERNAME")
password = os.environ.get("NEO4J_PASSWORD")
assert url is not None
assert username is not None
assert password is not None
graph = Neo4jGraph(
url=url,
username=username,
password=password,
driver_config={"max_connection_pool_size": 1},
)
graph.query("RETURN 'foo'")
def test_enhanced_schema() -> None:
"""Test that neo4j works with driver config."""
url = os.environ.get("NEO4J_URI")
username = os.environ.get("NEO4J_USERNAME")
password = os.environ.get("NEO4J_PASSWORD")
assert url is not None
assert username is not None
assert password is not None
graph = Neo4jGraph(
url=url, username=username, password=password, enhanced_schema=True
)
graph.query("MATCH (n) DETACH DELETE n")
graph.add_graph_documents(test_data)
graph.refresh_schema()
expected_output = {
"node_props": {
"foo": [
{
"property": "id",
"type": "STRING",
"values": ["foo"],
"distinct_count": 1,
}
],
"bar": [
{
"property": "id",
"type": "STRING",
"values": ["bar"],
"distinct_count": 1,
}
],
},
"rel_props": {},
"relationships": [{"start": "foo", "type": "REL", "end": "bar"}],
}
# remove metadata portion of schema
del graph.structured_schema["metadata"]
assert graph.structured_schema == expected_output
def test_enhanced_schema_exception() -> None:
"""Test no error with weird schema."""
url = os.environ.get("NEO4J_URI")
username = os.environ.get("NEO4J_USERNAME")
password = os.environ.get("NEO4J_PASSWORD")
assert url is not None
assert username is not None
assert password is not None
graph = Neo4jGraph(
url=url, username=username, password=password, enhanced_schema=True
)
graph.query("MATCH (n) DETACH DELETE n")
graph.query("CREATE (:Node {foo:'bar'})," "(:Node {foo: 1}), (:Node {foo: [1,2]})")
graph.refresh_schema()
expected_output = {
"node_props": {"Node": [{"property": "foo", "type": "STRING"}]},
"rel_props": {},
"relationships": [],
}
# remove metadata portion of schema
del graph.structured_schema["metadata"]
assert graph.structured_schema == expected_output
def test_backticks() -> None:
"""Test that backticks are correctly removed."""
url = os.environ.get("NEO4J_URI")
username = os.environ.get("NEO4J_USERNAME")
password = os.environ.get("NEO4J_PASSWORD")
assert url is not None
assert username is not None
assert password is not None
graph = Neo4jGraph(url=url, username=username, password=password)
graph.query("MATCH (n) DETACH DELETE n")
graph.add_graph_documents(test_data_backticks)
nodes = graph.query("MATCH (n) RETURN labels(n) AS labels ORDER BY n.id")
rels = graph.query("MATCH ()-[r]->() RETURN type(r) AS type")
expected_nodes = [{"labels": ["bar"]}, {"labels": ["foo"]}]
expected_rels = [{"type": "REL"}]
assert nodes == expected_nodes
assert rels == expected_rels
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests/graphs | lc_public_repos/langchain/libs/community/tests/integration_tests/graphs/docker-compose-ontotext-graphdb/graphdb_create.sh | #! /bin/bash
REPOSITORY_ID="langchain"
GRAPHDB_URI="http://localhost:7200/"
echo -e "\nUsing GraphDB: ${GRAPHDB_URI}"
function startGraphDB {
echo -e "\nStarting GraphDB..."
exec /opt/graphdb/dist/bin/graphdb
}
function waitGraphDBStart {
echo -e "\nWaiting GraphDB to start..."
for _ in $(seq 1 5); do
CHECK_RES=$(curl --silent --write-out '%{http_code}' --output /dev/null ${GRAPHDB_URI}/rest/repositories)
if [ "${CHECK_RES}" = '200' ]; then
echo -e "\nUp and running"
break
fi
sleep 30s
echo "CHECK_RES: ${CHECK_RES}"
done
}
function loadData {
echo -e "\nImporting starwars-data.trig"
curl -X POST -H "Content-Type: application/x-trig" -T /starwars-data.trig ${GRAPHDB_URI}/repositories/${REPOSITORY_ID}/statements
}
startGraphDB &
waitGraphDBStart
loadData
wait
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests/graphs | lc_public_repos/langchain/libs/community/tests/integration_tests/graphs/docker-compose-ontotext-graphdb/Dockerfile | FROM ontotext/graphdb:10.5.1
RUN mkdir -p /opt/graphdb/dist/data/repositories/langchain
COPY config.ttl /opt/graphdb/dist/data/repositories/langchain/
COPY starwars-data.trig /
COPY graphdb_create.sh /run.sh
ENTRYPOINT bash /run.sh |
0 | lc_public_repos/langchain/libs/community/tests/integration_tests/graphs | lc_public_repos/langchain/libs/community/tests/integration_tests/graphs/docker-compose-ontotext-graphdb/starwars-data.trig | @base <https://swapi.co/resource/>.
@prefix voc: <https://swapi.co/vocabulary/> .
@prefix owl: <http://www.w3.org/2002/07/owl#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
{
<besalisk/71>
a voc:Besalisk , voc:Character ;
rdfs:label "Dexter Jettster" ;
voc:eyeColor "yellow" ;
voc:gender "male" ;
voc:height 198.0 ;
voc:mass 102.0 ;
voc:skinColor "brown" .
}
<https://swapi.co/ontology/> {
voc:Character a owl:Class .
voc:Species a owl:Class .
voc:Besalisk a voc:Species;
rdfs:label "Besalisk";
voc:averageHeight 178.0;
voc:averageLifespan "75";
voc:character <https://swapi.co/resource/besalisk/71>;
voc:language "besalisk";
voc:skinColor "brown";
voc:eyeColor "yellow" .
voc:averageHeight a owl:DatatypeProperty .
voc:averageLifespan a owl:DatatypeProperty .
voc:character a owl:ObjectProperty .
voc:language a owl:DatatypeProperty .
voc:skinColor a owl:DatatypeProperty .
voc:eyeColor a owl:DatatypeProperty .
voc:gender a owl:DatatypeProperty .
voc:height a owl:DatatypeProperty .
voc:mass a owl:DatatypeProperty .
}
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests/graphs | lc_public_repos/langchain/libs/community/tests/integration_tests/graphs/docker-compose-ontotext-graphdb/config.ttl | @prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>.
@prefix rep: <http://www.openrdf.org/config/repository#>.
@prefix sr: <http://www.openrdf.org/config/repository/sail#>.
@prefix sail: <http://www.openrdf.org/config/sail#>.
@prefix graphdb: <http://www.ontotext.com/config/graphdb#>.
[] a rep:Repository ;
rep:repositoryID "langchain" ;
rdfs:label "" ;
rep:repositoryImpl [
rep:repositoryType "graphdb:SailRepository" ;
sr:sailImpl [
sail:sailType "graphdb:Sail" ;
graphdb:read-only "false" ;
# Inference and Validation
graphdb:ruleset "empty" ;
graphdb:disable-sameAs "true" ;
graphdb:check-for-inconsistencies "false" ;
# Indexing
graphdb:entity-id-size "32" ;
graphdb:enable-context-index "false" ;
graphdb:enablePredicateList "true" ;
graphdb:enable-fts-index "false" ;
graphdb:fts-indexes ("default" "iri") ;
graphdb:fts-string-literals-index "default" ;
graphdb:fts-iris-index "none" ;
# Queries and Updates
graphdb:query-timeout "0" ;
graphdb:throw-QueryEvaluationException-on-timeout "false" ;
graphdb:query-limit-results "0" ;
# Settable in the file but otherwise hidden in the UI and in the RDF4J console
graphdb:base-URL "http://example.org/owlim#" ;
graphdb:defaultNS "" ;
graphdb:imports "" ;
graphdb:repository-type "file-repository" ;
graphdb:storage-folder "storage" ;
graphdb:entity-index-size "10000000" ;
graphdb:in-memory-literal-properties "true" ;
graphdb:enable-literal-index "true" ;
]
].
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests/graphs | lc_public_repos/langchain/libs/community/tests/integration_tests/graphs/docker-compose-ontotext-graphdb/docker-compose.yaml | version: '3.7'
services:
graphdb:
image: graphdb
container_name: graphdb
ports:
- "7200:7200"
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests/graphs | lc_public_repos/langchain/libs/community/tests/integration_tests/graphs/docker-compose-ontotext-graphdb/start.sh | set -ex
docker compose down -v --remove-orphans
docker build --tag graphdb .
docker compose up -d graphdb
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_aperturedb.py | """Test ApertureDB functionality."""
import uuid
import pytest
from langchain_tests.integration_tests.vectorstores import (
AsyncReadWriteTestSuite,
ReadWriteTestSuite,
)
from langchain_community.vectorstores import ApertureDB
class TestApertureDBReadWriteTestSuite(ReadWriteTestSuite):
@pytest.fixture
def vectorstore(self) -> ApertureDB:
descriptor_set = uuid.uuid4().hex # Fresh descriptor set for each test
return ApertureDB(
embeddings=self.get_embeddings(), descriptor_set=descriptor_set
)
class TestAsyncApertureDBReadWriteTestSuite(AsyncReadWriteTestSuite):
@pytest.fixture
async def vectorstore(self) -> ApertureDB:
descriptor_set = uuid.uuid4().hex # Fresh descriptor set for each test
return ApertureDB(
embeddings=self.get_embeddings(), descriptor_set=descriptor_set
)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_deeplake.py | """Test Deep Lake functionality."""
import pytest
from langchain_core.documents import Document
from pytest import FixtureRequest
from langchain_community.vectorstores import DeepLake
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
@pytest.fixture
def deeplake_datastore() -> DeepLake: # type: ignore[misc]
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = DeepLake.from_texts(
dataset_path="./test_path",
texts=texts,
metadatas=metadatas,
embedding_function=FakeEmbeddings(),
overwrite=True,
)
yield docsearch
docsearch.delete_dataset()
@pytest.fixture(params=["L1", "L2", "max", "cos"])
def distance_metric(request: FixtureRequest) -> str:
return request.param
def test_deeplake() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = DeepLake.from_texts(
dataset_path="mem://test_path", texts=texts, embedding=FakeEmbeddings()
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_deeplake_with_metadatas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = DeepLake.from_texts(
dataset_path="mem://test_path",
texts=texts,
embedding=FakeEmbeddings(),
metadatas=metadatas,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
def test_deeplake_with_persistence(deeplake_datastore) -> None: # type: ignore[no-untyped-def]
"""Test end to end construction and search, with persistence."""
output = deeplake_datastore.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
# Get a new VectorStore from the persisted directory
docsearch = DeepLake(
dataset_path=deeplake_datastore.vectorstore.dataset_handler.path,
embedding_function=FakeEmbeddings(),
)
output = docsearch.similarity_search("foo", k=1)
# Clean up
docsearch.delete_dataset()
# Persist doesn't need to be called again
# Data will be automatically persisted on object deletion
# Or on program exit
def test_deeplake_overwrite_flag(deeplake_datastore) -> None: # type: ignore[no-untyped-def]
"""Test overwrite behavior"""
dataset_path = deeplake_datastore.vectorstore.dataset_handler.path
output = deeplake_datastore.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
# Get a new VectorStore from the persisted directory, with no overwrite (implicit)
docsearch = DeepLake(
dataset_path=dataset_path,
embedding_function=FakeEmbeddings(),
)
output = docsearch.similarity_search("foo", k=1)
# assert page still present
assert output == [Document(page_content="foo", metadata={"page": "0"})]
# Get a new VectorStore from the persisted directory, with no overwrite (explicit)
docsearch = DeepLake(
dataset_path=dataset_path,
embedding_function=FakeEmbeddings(),
overwrite=False,
)
output = docsearch.similarity_search("foo", k=1)
# assert page still present
assert output == [Document(page_content="foo", metadata={"page": "0"})]
# Get a new VectorStore from the persisted directory, with overwrite
docsearch = DeepLake(
dataset_path=dataset_path,
embedding_function=FakeEmbeddings(),
overwrite=True,
)
with pytest.raises(ValueError):
output = docsearch.similarity_search("foo", k=1)
def test_similarity_search(deeplake_datastore) -> None: # type: ignore[no-untyped-def]
"""Test similarity search."""
distance_metric = "cos"
output = deeplake_datastore.similarity_search(
"foo", k=1, distance_metric=distance_metric
)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
tql_query = (
f"SELECT * WHERE "
f"id=='{deeplake_datastore.vectorstore.dataset.id[0].numpy()[0]}'"
)
output = deeplake_datastore.similarity_search(
query="foo", tql_query=tql_query, k=1, distance_metric=distance_metric
)
assert len(output) == 1
def test_similarity_search_by_vector(
deeplake_datastore: DeepLake, distance_metric: str
) -> None:
"""Test similarity search by vector."""
embeddings = FakeEmbeddings().embed_documents(["foo", "bar", "baz"])
output = deeplake_datastore.similarity_search_by_vector(
embeddings[1], k=1, distance_metric=distance_metric
)
assert output == [Document(page_content="bar", metadata={"page": "1"})]
deeplake_datastore.delete_dataset()
def test_similarity_search_with_score(
deeplake_datastore: DeepLake, distance_metric: str
) -> None:
"""Test similarity search with score."""
deeplake_datastore.vectorstore.summary()
output, score = deeplake_datastore.similarity_search_with_score(
"foo", k=1, distance_metric=distance_metric
)[0]
assert output == Document(page_content="foo", metadata={"page": "0"})
if distance_metric == "cos":
assert score == 1.0
else:
assert score == 0.0
deeplake_datastore.delete_dataset()
def test_similarity_search_with_filter(
deeplake_datastore: DeepLake, distance_metric: str
) -> None:
"""Test similarity search."""
output = deeplake_datastore.similarity_search(
"foo",
k=1,
distance_metric=distance_metric,
filter={"metadata": {"page": "1"}},
)
assert output == [Document(page_content="bar", metadata={"page": "1"})]
deeplake_datastore.delete_dataset()
def test_max_marginal_relevance_search(deeplake_datastore: DeepLake) -> None:
"""Test max marginal relevance search by vector."""
output = deeplake_datastore.max_marginal_relevance_search("foo", k=1, fetch_k=2)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
embeddings = FakeEmbeddings().embed_documents(["foo", "bar", "baz"])
output = deeplake_datastore.max_marginal_relevance_search_by_vector(
embeddings[0], k=1, fetch_k=2
)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
deeplake_datastore.delete_dataset()
def test_delete_dataset_by_ids(deeplake_datastore: DeepLake) -> None:
"""Test delete dataset."""
id = deeplake_datastore.vectorstore.dataset.id.data()["value"][0]
deeplake_datastore.delete(ids=[id])
assert (
deeplake_datastore.similarity_search(
"foo", k=1, filter={"metadata": {"page": "0"}}
)
== []
)
assert len(deeplake_datastore.vectorstore) == 2
deeplake_datastore.delete_dataset()
def test_delete_dataset_by_filter(deeplake_datastore: DeepLake) -> None:
"""Test delete dataset."""
deeplake_datastore.delete(filter={"metadata": {"page": "1"}})
assert (
deeplake_datastore.similarity_search(
"bar", k=1, filter={"metadata": {"page": "1"}}
)
== []
)
assert len(deeplake_datastore.vectorstore.dataset) == 2
deeplake_datastore.delete_dataset()
def test_delete_by_path(deeplake_datastore: DeepLake) -> None:
"""Test delete dataset."""
import deeplake
path = deeplake_datastore.dataset_path
DeepLake.force_delete_by_path(path)
assert not deeplake.exists(path)
def test_add_texts(deeplake_datastore: DeepLake) -> None:
"""Test add_texts dataset."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
deeplake_datastore.add_texts(
texts=texts,
metadatas=metadatas,
)
with pytest.raises(TypeError):
deeplake_datastore.add_texts(
texts=texts,
metada=metadatas,
)
def test_ids_backwards_compatibility() -> None:
"""Test that ids are backwards compatible."""
db = DeepLake(
dataset_path="mem://test_path",
embedding_function=FakeEmbeddings(),
tensor_params=[
{"name": "ids", "htype": "text"},
{"name": "text", "htype": "text"},
{"name": "embedding", "htype": "embedding"},
{"name": "metadata", "htype": "json"},
],
)
db.vectorstore.add(
ids=["1", "2", "3"],
text=["foo", "bar", "baz"],
embedding=FakeEmbeddings().embed_documents(["foo", "bar", "baz"]),
metadata=[{"page": str(i)} for i in range(3)],
)
output = db.similarity_search("foo", k=1)
assert len(output) == 1
def test_similarity_search_should_error_out_when_not_supported_kwargs_are_provided(
deeplake_datastore: DeepLake,
) -> None:
"""Test that ids are backwards compatible."""
with pytest.raises(TypeError):
deeplake_datastore.similarity_search("foo", k=1, not_supported_kwarg=True)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_nucliadb.py | from typing import Any
from unittest import mock
from langchain_community.vectorstores.nucliadb import NucliaDB
class attrdict(dict):
def __getitem__(self, key: str) -> Any:
value = dict.__getitem__(self, key)
return attrdict(value) if isinstance(value, dict) else value
__getattr__ = __getitem__
def FakeCreate(**args: Any) -> Any:
def fn(self: Any, **kwargs: Any) -> str:
return "fake_uuid"
return fn
def FakeDelete(**args: Any) -> Any:
def fn(self: Any, **kwargs: Any) -> None:
return None
return fn
def FakeFind(**args: Any) -> Any:
def fn(self: Any, **kwargs: Any) -> Any:
return attrdict(
{
"resources": {
"123": attrdict(
{
"fields": {
"456": attrdict(
{
"paragraphs": {
"123/t/text/0-14": attrdict(
{
"text": "This is a test",
"order": 0,
}
),
}
}
)
},
"data": {
"texts": {
"text": {
"body": "This is a test",
}
}
},
"extra": attrdict({"metadata": {"some": "metadata"}}),
}
)
}
}
)
return fn
def test_add_texts() -> None:
with mock.patch(
"nuclia.sdk.resource.NucliaResource.create",
new_callable=FakeCreate,
):
ndb = NucliaDB(knowledge_box="YOUR_KB_ID", local=False, api_key="YOUR_API_KEY")
assert ndb.is_local is False
ids = ndb.add_texts(["This is a new test", "This is a second test"])
assert len(ids) == 2
def test_delete() -> None:
with mock.patch(
"nuclia.sdk.resource.NucliaResource.delete",
new_callable=FakeDelete,
):
ndb = NucliaDB(knowledge_box="YOUR_KB_ID", local=False, api_key="YOUR_API_KEY")
success = ndb.delete(["123", "456"])
assert success
def test_search() -> None:
with mock.patch(
"nuclia.sdk.search.NucliaSearch.find",
new_callable=FakeFind,
):
ndb = NucliaDB(knowledge_box="YOUR_KB_ID", local=False, api_key="YOUR_API_KEY")
results = ndb.similarity_search("Who was inspired by Ada Lovelace?")
assert len(results) == 1
assert results[0].page_content == "This is a test"
assert results[0].metadata["extra"]["some"] == "metadata"
assert results[0].metadata["value"]["body"] == "This is a test"
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_vald.py | """Test Vald functionality."""
import time
from typing import List, Optional
from langchain_core.documents import Document
from langchain_community.vectorstores import Vald
from tests.integration_tests.vectorstores.fake_embeddings import (
FakeEmbeddings,
fake_texts,
)
"""
To run, you should have a Vald cluster.
https://github.com/vdaas/vald/blob/main/docs/tutorial/get-started.md
"""
WAIT_TIME = 90
def _vald_from_texts(
metadatas: Optional[List[dict]] = None,
host: str = "localhost",
port: int = 8080,
skip_strict_exist_check: bool = True,
) -> Vald:
return Vald.from_texts(
fake_texts,
FakeEmbeddings(),
metadatas=metadatas,
host=host,
port=port,
skip_strict_exist_check=skip_strict_exist_check,
)
def test_vald_add_texts() -> None:
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _vald_from_texts(metadatas=metadatas)
time.sleep(WAIT_TIME) # Wait for CreateIndex
output = docsearch.similarity_search("foo", k=10)
assert len(output) == 3
texts = ["a", "b", "c"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch.add_texts(texts, metadatas)
time.sleep(WAIT_TIME) # Wait for CreateIndex
output = docsearch.similarity_search("foo", k=10)
assert len(output) == 6
def test_vald_delete() -> None:
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _vald_from_texts(metadatas=metadatas)
time.sleep(WAIT_TIME)
output = docsearch.similarity_search("foo", k=10)
assert len(output) == 3
docsearch.delete(["foo"])
time.sleep(WAIT_TIME)
output = docsearch.similarity_search("foo", k=10)
assert len(output) == 2
def test_vald_search() -> None:
"""Test end to end construction and search."""
docsearch = _vald_from_texts()
time.sleep(WAIT_TIME)
output = docsearch.similarity_search("foo", k=3)
assert output == [
Document(page_content="foo"),
Document(page_content="bar"),
Document(page_content="baz"),
]
def test_vald_search_with_score() -> None:
"""Test end to end construction and search with scores."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _vald_from_texts(metadatas=metadatas)
time.sleep(WAIT_TIME)
output = docsearch.similarity_search_with_score("foo", k=3)
docs = [o[0] for o in output]
scores = [o[1] for o in output]
assert docs == [
Document(page_content="foo"),
Document(page_content="bar"),
Document(page_content="baz"),
]
assert scores[0] < scores[1] < scores[2]
def test_vald_search_by_vector() -> None:
"""Test end to end construction and search by vector."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _vald_from_texts(metadatas=metadatas)
time.sleep(WAIT_TIME)
embedding = FakeEmbeddings().embed_query("foo")
output = docsearch.similarity_search_by_vector(embedding, k=3)
assert output == [
Document(page_content="foo"),
Document(page_content="bar"),
Document(page_content="baz"),
]
def test_vald_search_with_score_by_vector() -> None:
"""Test end to end construction and search with scores by vector."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _vald_from_texts(metadatas=metadatas)
time.sleep(WAIT_TIME)
embedding = FakeEmbeddings().embed_query("foo")
output = docsearch.similarity_search_with_score_by_vector(embedding, k=3)
docs = [o[0] for o in output]
scores = [o[1] for o in output]
assert docs == [
Document(page_content="foo"),
Document(page_content="bar"),
Document(page_content="baz"),
]
assert scores[0] < scores[1] < scores[2]
def test_vald_max_marginal_relevance_search() -> None:
"""Test end to end construction and MRR search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _vald_from_texts(metadatas=metadatas)
time.sleep(WAIT_TIME)
output = docsearch.max_marginal_relevance_search("foo", k=2, fetch_k=3)
assert output == [
Document(page_content="foo"),
Document(page_content="bar"),
]
def test_vald_max_marginal_relevance_search_by_vector() -> None:
"""Test end to end construction and MRR search by vector."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _vald_from_texts(metadatas=metadatas)
time.sleep(WAIT_TIME)
embedding = FakeEmbeddings().embed_query("foo")
output = docsearch.max_marginal_relevance_search_by_vector(
embedding, k=2, fetch_k=3
)
assert output == [
Document(page_content="foo"),
Document(page_content="bar"),
]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_opensearch.py | """Test OpenSearch functionality."""
import pytest
from langchain_core.documents import Document
from langchain_community.vectorstores.opensearch_vector_search import (
PAINLESS_SCRIPTING_SEARCH,
SCRIPT_SCORING_SEARCH,
OpenSearchVectorSearch,
)
from tests.integration_tests.vectorstores.fake_embeddings import (
ConsistentFakeEmbeddings,
FakeEmbeddings,
)
DEFAULT_OPENSEARCH_URL = "http://localhost:9200"
texts = ["foo", "bar", "baz"]
ids = ["id_foo", "id_bar", "id_baz"]
def test_opensearch() -> None:
"""Test end to end indexing and search using Approximate Search."""
docsearch = OpenSearchVectorSearch.from_texts(
texts,
FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL,
ids=ids,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", id="id_foo")]
def test_similarity_search_with_score() -> None:
"""Test similarity search with score using Approximate Search."""
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = OpenSearchVectorSearch.from_texts(
texts,
FakeEmbeddings(),
metadatas=metadatas,
opensearch_url=DEFAULT_OPENSEARCH_URL,
ids=ids,
)
output = docsearch.similarity_search_with_score("foo", k=2)
assert output == [
(Document(page_content="foo", metadata={"page": 0}, id="id_foo"), 1.0),
(Document(page_content="bar", metadata={"page": 1}, id="id_bar"), 0.5),
]
def test_opensearch_with_custom_field_name() -> None:
"""Test indexing and search using custom vector field and text field name."""
docsearch = OpenSearchVectorSearch.from_texts(
texts,
FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL,
vector_field="my_vector",
text_field="custom_text",
ids=ids,
)
output = docsearch.similarity_search(
"foo", k=1, vector_field="my_vector", text_field="custom_text"
)
assert output == [Document(page_content="foo", id="id_foo")]
text_input = ["test", "add", "text", "method"]
OpenSearchVectorSearch.add_texts(
docsearch,
text_input,
vector_field="my_vector",
text_field="custom_text",
)
output = docsearch.similarity_search(
"add", k=1, vector_field="my_vector", text_field="custom_text"
)
assert output == [Document(page_content="foo", id="id_foo")]
def test_opensearch_with_metadatas() -> None:
"""Test end to end indexing and search with metadata."""
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = OpenSearchVectorSearch.from_texts(
texts,
FakeEmbeddings(),
metadatas=metadatas,
opensearch_url=DEFAULT_OPENSEARCH_URL,
ids=ids,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": 0}, id="id_foo")]
def test_max_marginal_relevance_search() -> None:
"""Test end to end indexing and mmr search."""
docsearch = OpenSearchVectorSearch.from_texts(
texts,
FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL,
ids=ids,
)
output = docsearch.max_marginal_relevance_search("foo", k=1)
assert output == [Document(page_content="foo", id="id_foo")]
def test_add_text() -> None:
"""Test adding additional text elements to existing index."""
text_input = ["test", "add", "text", "method"]
metadatas = [{"page": i} for i in range(len(text_input))]
docsearch = OpenSearchVectorSearch.from_texts(
texts, FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL
)
doc_ids = OpenSearchVectorSearch.add_texts(docsearch, text_input, metadatas)
assert len(doc_ids) == len(text_input)
def test_add_embeddings() -> None:
"""
Test add_embeddings, which accepts pre-built embeddings instead of
using inference for the texts.
This allows you to separate the embeddings text and the page_content
for better proximity between user's question and embedded text.
For example, your embedding text can be a question, whereas page_content
is the answer.
"""
embeddings = ConsistentFakeEmbeddings()
text_input = ["foo1", "foo2", "foo3"]
metadatas = [{"page": i} for i in range(len(text_input))]
"""In real use case, embedding_input can be questions for each text"""
embedding_input = ["foo2", "foo3", "foo1"]
embedding_vectors = embeddings.embed_documents(embedding_input)
docsearch = OpenSearchVectorSearch.from_texts(
["filler"], embeddings, opensearch_url=DEFAULT_OPENSEARCH_URL
)
docsearch.add_embeddings(list(zip(text_input, embedding_vectors)), metadatas)
output = docsearch.similarity_search("foo1", k=1)
assert output[0].page_content == "foo3"
assert output[0].metadata == {"page": 2}
def test_opensearch_script_scoring() -> None:
"""Test end to end indexing and search using Script Scoring Search."""
pre_filter_val = {"bool": {"filter": {"term": {"text": "bar"}}}}
docsearch = OpenSearchVectorSearch.from_texts(
texts,
FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL,
is_appx_search=False,
)
output = docsearch.similarity_search(
"foo", k=1, search_type=SCRIPT_SCORING_SEARCH, pre_filter=pre_filter_val
)
assert output[0].page_content == "bar"
assert output[0].id is not None
def test_add_text_script_scoring() -> None:
"""Test adding additional text elements and validating using Script Scoring."""
text_input = ["test", "add", "text", "method"]
metadatas = [{"page": i} for i in range(len(text_input))]
docsearch = OpenSearchVectorSearch.from_texts(
text_input,
FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL,
is_appx_search=False,
)
OpenSearchVectorSearch.add_texts(docsearch, texts, metadatas)
output = docsearch.similarity_search(
"add", k=1, search_type=SCRIPT_SCORING_SEARCH, space_type="innerproduct"
)
assert output[0].page_content == "test"
assert output[0].id is not None
def test_opensearch_painless_scripting() -> None:
"""Test end to end indexing and search using Painless Scripting Search."""
pre_filter_val = {"bool": {"filter": {"term": {"text": "baz"}}}}
docsearch = OpenSearchVectorSearch.from_texts(
texts,
FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL,
is_appx_search=False,
)
output = docsearch.similarity_search(
"foo", k=1, search_type=PAINLESS_SCRIPTING_SEARCH, pre_filter=pre_filter_val
)
assert output[0].page_content == "baz"
assert output[0].id is not None
def test_add_text_painless_scripting() -> None:
"""Test adding additional text elements and validating using Painless Scripting."""
text_input = ["test", "add", "text", "method"]
metadatas = [{"page": i} for i in range(len(text_input))]
docsearch = OpenSearchVectorSearch.from_texts(
text_input,
FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL,
is_appx_search=False,
)
OpenSearchVectorSearch.add_texts(docsearch, texts, metadatas)
output = docsearch.similarity_search(
"add", k=1, search_type=PAINLESS_SCRIPTING_SEARCH, space_type="cosineSimilarity"
)
assert output[0].page_content == "test"
assert output[0].id is not None
def test_opensearch_invalid_search_type() -> None:
"""Test to validate similarity_search by providing invalid search_type."""
docsearch = OpenSearchVectorSearch.from_texts(
texts, FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL
)
with pytest.raises(ValueError):
docsearch.similarity_search("foo", k=1, search_type="invalid_search_type")
def test_opensearch_embedding_size_zero() -> None:
"""Test to validate indexing when embedding size is zero."""
with pytest.raises(RuntimeError):
OpenSearchVectorSearch.from_texts(
[], FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL
)
def test_appx_search_with_boolean_filter() -> None:
"""Test Approximate Search with Boolean Filter."""
boolean_filter_val = {"bool": {"must": [{"term": {"text": "bar"}}]}}
docsearch = OpenSearchVectorSearch.from_texts(
texts,
FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL,
)
output = docsearch.similarity_search(
"foo", k=3, boolean_filter=boolean_filter_val, subquery_clause="should"
)
assert output[0].page_content == "bar"
assert output[0].id is not None
def test_appx_search_with_lucene_filter() -> None:
"""Test Approximate Search with Lucene Filter."""
lucene_filter_val = {"bool": {"must": [{"term": {"text": "bar"}}]}}
docsearch = OpenSearchVectorSearch.from_texts(
texts, FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL, engine="lucene"
)
output = docsearch.similarity_search("foo", k=3, lucene_filter=lucene_filter_val)
assert output[0].page_content == "bar"
assert output[0].id is not None
def test_opensearch_with_custom_field_name_appx_true() -> None:
"""Test Approximate Search with custom field name appx true."""
text_input = ["add", "test", "text", "method"]
docsearch = OpenSearchVectorSearch.from_texts(
text_input,
FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL,
is_appx_search=True,
)
output = docsearch.similarity_search("add", k=1)
assert output[0].page_content == "add"
assert output[0].id is not None
def test_opensearch_with_custom_field_name_appx_false() -> None:
"""Test Approximate Search with custom field name appx true."""
text_input = ["add", "test", "text", "method"]
docsearch = OpenSearchVectorSearch.from_texts(
text_input, FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL
)
output = docsearch.similarity_search("add", k=1)
assert output[0].page_content == "add"
assert output[0].id is not None
def test_opensearch_serverless_with_scripting_search_indexing_throws_error() -> None:
"""Test to validate indexing using Serverless without Approximate Search."""
import boto3
from opensearchpy import AWSV4SignerAuth
region = "test-region"
service = "aoss"
credentials = boto3.Session().get_credentials()
auth = AWSV4SignerAuth(credentials, region, service)
with pytest.raises(ValueError):
OpenSearchVectorSearch.from_texts(
texts,
FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL,
is_appx_search=False,
http_auth=auth,
)
def test_opensearch_serverless_with_lucene_engine_throws_error() -> None:
"""Test to validate indexing using lucene engine with Serverless."""
import boto3
from opensearchpy import AWSV4SignerAuth
region = "test-region"
service = "aoss"
credentials = boto3.Session().get_credentials()
auth = AWSV4SignerAuth(credentials, region, service)
with pytest.raises(ValueError):
OpenSearchVectorSearch.from_texts(
texts,
FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL,
engine="lucene",
http_auth=auth,
)
def test_appx_search_with_efficient_and_bool_filter_throws_error() -> None:
"""Test Approximate Search with Efficient and Bool Filter throws Error."""
efficient_filter_val = {"bool": {"must": [{"term": {"text": "baz"}}]}}
boolean_filter_val = {"bool": {"must": [{"term": {"text": "bar"}}]}}
docsearch = OpenSearchVectorSearch.from_texts(
texts, FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL, engine="lucene"
)
with pytest.raises(ValueError):
docsearch.similarity_search(
"foo",
k=3,
efficient_filter=efficient_filter_val,
boolean_filter=boolean_filter_val,
)
def test_appx_search_with_efficient_and_lucene_filter_throws_error() -> None:
"""Test Approximate Search with Efficient and Lucene Filter throws Error."""
efficient_filter_val = {"bool": {"must": [{"term": {"text": "baz"}}]}}
lucene_filter_val = {"bool": {"must": [{"term": {"text": "bar"}}]}}
docsearch = OpenSearchVectorSearch.from_texts(
texts, FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL, engine="lucene"
)
with pytest.raises(ValueError):
docsearch.similarity_search(
"foo",
k=3,
efficient_filter=efficient_filter_val,
lucene_filter=lucene_filter_val,
)
def test_appx_search_with_boolean_and_lucene_filter_throws_error() -> None:
"""Test Approximate Search with Boolean and Lucene Filter throws Error."""
boolean_filter_val = {"bool": {"must": [{"term": {"text": "baz"}}]}}
lucene_filter_val = {"bool": {"must": [{"term": {"text": "bar"}}]}}
docsearch = OpenSearchVectorSearch.from_texts(
texts, FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL, engine="lucene"
)
with pytest.raises(ValueError):
docsearch.similarity_search(
"foo",
k=3,
boolean_filter=boolean_filter_val,
lucene_filter=lucene_filter_val,
)
def test_appx_search_with_faiss_efficient_filter() -> None:
"""Test Approximate Search with Faiss Efficient Filter."""
efficient_filter_val = {"bool": {"must": [{"term": {"text": "bar"}}]}}
docsearch = OpenSearchVectorSearch.from_texts(
texts, FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL, engine="faiss"
)
output = docsearch.similarity_search(
"foo", k=3, efficient_filter=efficient_filter_val
)
assert output[0].page_content == "bar"
assert output[0].id is not None
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_alibabacloud_opensearch.py | import time
from typing import List
from langchain_core.documents import Document
from langchain_community.vectorstores.alibabacloud_opensearch import (
AlibabaCloudOpenSearch,
AlibabaCloudOpenSearchSettings,
)
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
OS_TOKEN_COUNT = 1536
texts = ["foo", "bar", "baz"]
class FakeEmbeddingsWithOsDimension(FakeEmbeddings):
"""Fake embeddings functionality for testing."""
def embed_documents(self, embedding_texts: List[str]) -> List[List[float]]:
"""Return simple embeddings."""
return [
[float(1.0)] * (OS_TOKEN_COUNT - 1) + [float(i)]
for i in range(len(embedding_texts))
]
def embed_query(self, text: str) -> List[float]:
"""Return simple embeddings."""
return [float(1.0)] * (OS_TOKEN_COUNT - 1) + [float(texts.index(text))]
"""
settings = AlibabaCloudOpenSearchSettings(
endpoint="The endpoint of opensearch instance, If you want to access through
the public network, you need to enable public network access in the network
information of the instance details. If you want to access within
the Alibaba Cloud VPC, you can directly use the API domain name.",
instance_id="The identify of opensearch instance",
protocol (str): "Communication Protocol between SDK and Server, default is http.",
username="The username specified when purchasing the instance.",
password="The password specified when purchasing the instance.",
namespace (str) : "The instance data will be partitioned based on the
namespace field, If the namespace is enabled, you need to specify the
namespace field name during initialization. Otherwise, the queries cannot
be executed correctly, default is empty.",
table_name="The table name is specified when adding a table after completing
the instance configuration.",
field_name_mapping={
# insert data into opensearch based on the mapping name of the field.
"id": "The id field name map of index document.",
"document": "The text field name map of index document.",
"embedding": "The embedding field name map of index document,"
"the values must be in float16 multivalue type "
"and separated by commas.",
"metadata_x": "The metadata field name map of index document, "
"could specify multiple, The value field contains "
"mapping name and operator, the operator would be "
"used when executing metadata filter query",
},
)
"""
settings = AlibabaCloudOpenSearchSettings(
endpoint="ha-cn-5yd3fhdm102.public.ha.aliyuncs.com",
instance_id="ha-cn-5yd3fhdm102",
username="instance user name",
password="instance password",
table_name="instance table name",
field_name_mapping={
# insert data into opensearch based on the mapping name of the field.
"id": "id",
"document": "document",
"embedding": "embedding",
"string_field": "string_filed,=",
"int_field": "int_filed,=",
"float_field": "float_field,=",
"double_field": "double_field,=",
},
)
embeddings = FakeEmbeddingsWithOsDimension()
def test_create_alibabacloud_opensearch() -> None:
opensearch = create_alibabacloud_opensearch()
time.sleep(1)
output = opensearch.similarity_search("foo", k=10)
assert len(output) == 3
def test_alibabacloud_opensearch_with_text_query() -> None:
opensearch = create_alibabacloud_opensearch()
output = opensearch.similarity_search(query="foo", k=1)
assert output == [
Document(
page_content="foo",
metadata={
"string_field": "value1",
"int_field": 1,
"float_field": 1.0,
"double_field": 2.0,
},
)
]
output = opensearch.similarity_search(query="bar", k=1)
assert output == [
Document(
page_content="bar",
metadata={
"string_field": "value2",
"int_field": 2,
"float_field": 3.0,
"double_field": 4.0,
},
)
]
output = opensearch.similarity_search(query="baz", k=1)
assert output == [
Document(
page_content="baz",
metadata={
"string_field": "value3",
"int_field": 3,
"float_field": 5.0,
"double_field": 6.0,
},
)
]
def test_alibabacloud_opensearch_with_vector_query() -> None:
opensearch = create_alibabacloud_opensearch()
output = opensearch.similarity_search_by_vector(embeddings.embed_query("foo"), k=1)
assert output == [
Document(
page_content="foo",
metadata={
"string_field": "value1",
"int_field": 1,
"float_field": 1.0,
"double_field": 2.0,
},
)
]
output = opensearch.similarity_search_by_vector(embeddings.embed_query("bar"), k=1)
assert output == [
Document(
page_content="bar",
metadata={
"string_field": "value2",
"int_field": 2,
"float_field": 3.0,
"double_field": 4.0,
},
)
]
output = opensearch.similarity_search_by_vector(embeddings.embed_query("baz"), k=1)
assert output == [
Document(
page_content="baz",
metadata={
"string_field": "value3",
"int_field": 3,
"float_field": 5.0,
"double_field": 6.0,
},
)
]
def test_alibabacloud_opensearch_with_text_and_meta_query() -> None:
opensearch = create_alibabacloud_opensearch()
output = opensearch.similarity_search(
query="foo", search_filter={"string_field": "value1"}, k=1
)
assert output == [
Document(
page_content="foo",
metadata={
"string_field": "value1",
"int_field": 1,
"float_field": 1.0,
"double_field": 2.0,
},
)
]
output = opensearch.similarity_search(
query="bar", search_filter={"int_field": 2}, k=1
)
assert output == [
Document(
page_content="bar",
metadata={
"string_field": "value2",
"int_field": 2,
"float_field": 3.0,
"double_field": 4.0,
},
)
]
output = opensearch.similarity_search(
query="baz", search_filter={"float_field": 5.0}, k=1
)
assert output == [
Document(
page_content="baz",
metadata={
"string_field": "value3",
"int_field": 3,
"float_field": 5.0,
"double_field": 6.0,
},
)
]
output = opensearch.similarity_search(
query="baz", search_filter={"float_field": 6.0}, k=1
)
assert len(output) == 0
def test_alibabacloud_opensearch_with_text_and_meta_score_query() -> None:
opensearch = create_alibabacloud_opensearch()
output = opensearch.similarity_search_with_relevance_scores(
query="foo",
search_filter={
"string_field": "value1",
"int_field": 1,
"float_field": 1.0,
"double_field": 2.0,
},
k=1,
)
assert output == [
(
Document(
page_content="foo",
metadata={
"string_field": "value1",
"int_field": 1,
"float_field": 1.0,
"double_field": 2.0,
},
),
0.0,
)
]
def test_alibabacloud_opensearch_delete_doc() -> None:
opensearch = create_alibabacloud_opensearch()
delete_result = opensearch.delete_documents_with_texts(["bar"])
assert delete_result
time.sleep(1)
search_result = opensearch.similarity_search(
query="bar", search_filter={"int_field": 2}, k=1
)
assert len(search_result) == 0
def create_alibabacloud_opensearch() -> AlibabaCloudOpenSearch:
metadatas = [
{
"string_field": "value1",
"int_field": 1,
"float_field": 1.0,
"double_field": 2.0,
},
{
"string_field": "value2",
"int_field": 2,
"float_field": 3.0,
"double_field": 4.0,
},
{
"string_field": "value3",
"int_field": 3,
"float_field": 5.0,
"double_field": 6.0,
},
]
return AlibabaCloudOpenSearch.from_texts(
texts=texts,
embedding=FakeEmbeddingsWithOsDimension(),
metadatas=metadatas,
config=settings,
)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_zep.py | # mypy: disable-error-code=attr-defined
import copy
from random import random
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
from uuid import uuid4
import pytest
from langchain_core.documents import Document
from pytest_mock import MockerFixture
from langchain_community.vectorstores import ZepVectorStore
from langchain_community.vectorstores.zep import CollectionConfig
if TYPE_CHECKING:
from zep_python.document import Document as ZepDocument
from zep_python.document import DocumentCollection
VECTOR_DIMS = 5
def gen_vector() -> List[float]:
return [random() for _ in range(VECTOR_DIMS)]
def gen_mock_zep_document(
collection_name: str,
embedding_dimensions: Optional[int] = None,
) -> "ZepDocument":
from zep_python.document import Document as ZepDocument
embedding = (
[random() for _ in range(embedding_dimensions)]
if embedding_dimensions
else None
)
return ZepDocument(
uuid=str(uuid4()),
collection_name=collection_name,
content="Test Document",
embedding=embedding,
metadata={"key": "value"},
)
@pytest.fixture
def texts_metadatas() -> Dict[str, Any]:
return {
"texts": ["Test Document" for _ in range(2)],
"metadatas": [{"key": "value"} for _ in range(2)],
}
@pytest.fixture
def mock_documents() -> List[Document]:
return [
Document(
page_content="Test Document",
metadata={"key": "value"},
)
for _ in range(2)
]
@pytest.fixture
def texts_metadatas_as_zep_documents() -> List["ZepDocument"]:
from zep_python.document import Document as ZepDocument
return [
ZepDocument(
content="Test Document",
metadata={"key": "value"},
)
for _ in range(2)
]
@pytest.fixture
def search_results() -> List["ZepDocument"]:
return [
gen_mock_zep_document(
collection_name="test_collection", embedding_dimensions=VECTOR_DIMS
)
for _ in range(2)
]
@pytest.fixture
def search_results_with_query_embedding() -> Tuple[List["ZepDocument"], List[float]]:
return_count = 2
return [
gen_mock_zep_document(
collection_name="test_collection", embedding_dimensions=VECTOR_DIMS
)
for _ in range(return_count)
], gen_vector()
@pytest.fixture
def mock_collection_config() -> CollectionConfig:
return CollectionConfig(
name="test_collection",
description="Test Collection",
metadata={"key": "value"},
embedding_dimensions=VECTOR_DIMS,
is_auto_embedded=True,
)
@pytest.fixture
@pytest.mark.requires("zep_python")
def mock_collection(
mocker: MockerFixture,
mock_collection_config: CollectionConfig,
search_results: List[Document],
search_results_with_query_embedding: Tuple[List[Document], List[float]],
) -> "DocumentCollection":
from zep_python.document import DocumentCollection
mock_collection: DocumentCollection = mocker.patch(
"zep_python.document.collections.DocumentCollection", autospec=True
)
mock_collection.search.return_value = copy.deepcopy(search_results)
mock_collection.asearch.return_value = copy.deepcopy(search_results)
temp_value = copy.deepcopy(search_results_with_query_embedding)
mock_collection.search_return_query_vector.return_value = copy.deepcopy(temp_value)
mock_collection.asearch_return_query_vector.return_value = copy.deepcopy(temp_value)
mock_collection.name = mock_collection_config.name
mock_collection.is_auto_embedded = mock_collection_config.is_auto_embedded
mock_collection.embedding_dimensions = mock_collection_config.embedding_dimensions
return mock_collection
@pytest.fixture
@pytest.mark.requires("zep_python")
def zep_vectorstore(
mocker: MockerFixture,
mock_collection: "DocumentCollection",
mock_collection_config: CollectionConfig,
) -> ZepVectorStore:
mock_document_client = mocker.patch(
"zep_python.document.client.DocumentClient", autospec=True
)
mock_document_client.get_collection.return_value = mock_collection
mock_client = mocker.patch("zep_python.ZepClient", autospec=True)
mock_client.return_value.document = mock_document_client
vs = ZepVectorStore(
mock_collection_config.name,
"http://localhost:8080",
api_key="test",
config=mock_collection_config,
)
return vs
@pytest.mark.requires("zep_python")
def test_from_texts(
zep_vectorstore: ZepVectorStore,
mock_collection_config: CollectionConfig,
mock_collection: "DocumentCollection",
texts_metadatas: Dict[str, Any],
texts_metadatas_as_zep_documents: List["ZepDocument"],
) -> None:
vs = zep_vectorstore.from_texts(
**texts_metadatas,
collection_name=mock_collection_config.name,
api_url="http://localhost:8000",
)
vs._collection.add_documents.assert_called_once_with( # type: ignore
texts_metadatas_as_zep_documents
)
@pytest.mark.requires("zep_python")
def test_add_documents(
zep_vectorstore: ZepVectorStore,
mock_collection: "DocumentCollection",
mock_documents: List[Document],
texts_metadatas_as_zep_documents: List["ZepDocument"],
) -> None:
zep_vectorstore.add_documents(mock_documents)
mock_collection.add_documents.assert_called_once_with( # type: ignore
texts_metadatas_as_zep_documents
)
@pytest.mark.requires("zep_python")
async def test_asearch_similarity(
zep_vectorstore: ZepVectorStore,
) -> None:
r = await zep_vectorstore.asearch(
query="Test Document", search_type="similarity", k=2
)
assert len(r) == 2
assert r[0].page_content == "Test Document"
assert r[0].metadata == {"key": "value"}
@pytest.mark.requires("zep_python")
async def test_asearch_mmr(
zep_vectorstore: ZepVectorStore,
) -> None:
r = await zep_vectorstore.asearch(query="Test Document", search_type="mmr", k=1)
assert len(r) == 1
assert r[0].page_content == "Test Document"
assert r[0].metadata == {"key": "value"}
@pytest.mark.requires("zep_python")
def test_search_similarity(
zep_vectorstore: ZepVectorStore,
) -> None:
r = zep_vectorstore.search(query="Test Document", search_type="similarity", k=2)
assert len(r) == 2
assert r[0].page_content == "Test Document"
assert r[0].metadata == {"key": "value"}
@pytest.mark.requires("zep_python")
def test_search_mmr(
zep_vectorstore: ZepVectorStore,
) -> None:
r = zep_vectorstore.search(query="Test Document", search_type="mmr", k=1)
assert len(r) == 1
assert r[0].page_content == "Test Document"
assert r[0].metadata == {"key": "value"}
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_duckdb.py | from typing import Dict, Iterator, List
from uuid import uuid4
import duckdb
import pytest
from langchain_community.vectorstores import DuckDB
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
@pytest.fixture
def duckdb_connection() -> Iterator[duckdb.DuckDBPyConnection]:
# Setup a temporary DuckDB database
conn = duckdb.connect(":memory:")
yield conn
conn.close()
@pytest.fixture
def embeddings() -> FakeEmbeddings:
return FakeEmbeddings()
@pytest.fixture
def texts() -> List[str]:
return ["text 1", "text 2", "item 3"]
@pytest.fixture
def metadatas() -> List[Dict[str, str]]:
return [
{"source": "Document 1"},
{"source": "Document 2"},
{"source": "Document 3"},
]
@pytest.mark.requires("duckdb")
def test_duckdb_with_connection(
duckdb_connection: duckdb.DuckDBPyConnection,
embeddings: FakeEmbeddings,
texts: List[str],
) -> None:
store = DuckDB(
connection=duckdb_connection, embedding=embeddings, table_name="test_table"
)
store.add_texts(texts)
result = store.similarity_search("text 1")
result_texts = [doc.page_content for doc in result]
assert "text 1" in result_texts
@pytest.mark.requires("duckdb")
def test_duckdb_without_connection(
embeddings: FakeEmbeddings, texts: List[str]
) -> None:
store = DuckDB(embedding=embeddings, table_name="test_table")
store.add_texts(texts)
result = store.similarity_search("text 1")
result_texts = [doc.page_content for doc in result]
assert "text 1" in result_texts
@pytest.mark.requires("duckdb")
def test_duckdb_add_texts(embeddings: FakeEmbeddings) -> None:
store = DuckDB(embedding=embeddings, table_name="test_table")
store.add_texts(["text 2"])
result = store.similarity_search("text 2")
result_texts = [doc.page_content for doc in result]
assert "text 2" in result_texts
@pytest.mark.requires("duckdb")
def test_duckdb_add_texts_with_metadata(
duckdb_connection: duckdb.DuckDBPyConnection, embeddings: FakeEmbeddings
) -> None:
store = DuckDB(
connection=duckdb_connection,
embedding=embeddings,
table_name="test_table_with_metadata",
)
texts = ["text with metadata 1", "text with metadata 2"]
metadatas = [
{"author": "Author 1", "date": "2021-01-01"},
{"author": "Author 2", "date": "2021-02-01"},
]
# Add texts along with their metadata
store.add_texts(texts, metadatas=metadatas)
# Perform a similarity search to retrieve the documents
result = store.similarity_search("text with metadata", k=2)
# Check if the metadata is correctly associated with the texts
assert len(result) == 2, "Should return two results"
assert (
result[0].metadata.get("author") == "Author 1"
), "Metadata for Author 1 should be correctly retrieved"
assert (
result[0].metadata.get("date") == "2021-01-01"
), "Date for Author 1 should be correctly retrieved"
assert (
result[1].metadata.get("author") == "Author 2"
), "Metadata for Author 2 should be correctly retrieved"
assert (
result[1].metadata.get("date") == "2021-02-01"
), "Date for Author 2 should be correctly retrieved"
@pytest.mark.requires("duckdb")
def test_duckdb_add_texts_with_predefined_ids(
duckdb_connection: duckdb.DuckDBPyConnection, embeddings: FakeEmbeddings
) -> None:
store = DuckDB(
connection=duckdb_connection,
embedding=embeddings,
table_name="test_table_predefined_ids",
)
texts = ["unique text 1", "unique text 2"]
predefined_ids = [str(uuid4()), str(uuid4())] # Generate unique IDs
# Add texts with the predefined IDs
store.add_texts(texts, ids=predefined_ids)
# Perform a similarity search for each text and check if it's found
for text in texts:
result = store.similarity_search(text)
found_texts = [doc.page_content for doc in result]
assert (
text in found_texts
), f"Text '{text}' was not found in the search results."
@pytest.mark.requires("duckdb")
def test_duckdb_from_texts(
duckdb_connection: duckdb.DuckDBPyConnection,
embeddings: FakeEmbeddings,
texts: List[str],
metadatas: List[Dict[str, str]],
) -> None:
# Initialize DuckDB from texts using the from_texts class method
store = DuckDB.from_texts(
texts=texts,
embedding=embeddings,
metadatas=metadatas,
connection=duckdb_connection,
table_name="test_from_texts_table",
)
# Perform a similarity search to retrieve the documents
query_text = "sample text"
result = store.similarity_search(query_text, k=2)
# Verify that the vector store was populated and can return results
assert len(result) > 0, "Should return at least one result"
# Optionally, check that metadata is correctly associated with the texts
for doc in result:
assert "source" in doc.metadata, "Document metadata should include 'source' key"
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_mongodb_atlas.py | """Test MongoDB Atlas Vector Search functionality."""
from __future__ import annotations
import os
from time import sleep
from typing import Any
import pytest
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_community.vectorstores.mongodb_atlas import MongoDBAtlasVectorSearch
INDEX_NAME = "langchain-test-index"
NAMESPACE = "langchain_test_db.langchain_test_collection"
CONNECTION_STRING = os.environ.get("MONGODB_ATLAS_URI")
DB_NAME, COLLECTION_NAME = NAMESPACE.split(".")
def get_collection() -> Any:
from pymongo import MongoClient
test_client: MongoClient = MongoClient(CONNECTION_STRING)
return test_client[DB_NAME][COLLECTION_NAME]
@pytest.fixture()
def collection() -> Any:
return get_collection()
class TestMongoDBAtlasVectorSearch:
@classmethod
def setup_class(cls) -> None:
# insure the test collection is empty
collection = get_collection()
assert collection.count_documents({}) == 0 # type: ignore[index]
@classmethod
def teardown_class(cls) -> None:
collection = get_collection()
# delete all the documents in the collection
collection.delete_many({}) # type: ignore[index]
@pytest.fixture(autouse=True)
def setup(self) -> None:
collection = get_collection()
# delete all the documents in the collection
collection.delete_many({}) # type: ignore[index]
def test_from_documents(
self, embedding_openai: Embeddings, collection: Any
) -> None:
"""Test end to end construction and search."""
documents = [
Document(page_content="Dogs are tough.", metadata={"a": 1}),
Document(page_content="Cats have fluff.", metadata={"b": 1}),
Document(page_content="What is a sandwich?", metadata={"c": 1}),
Document(page_content="That fence is purple.", metadata={"d": 1, "e": 2}),
]
vectorstore = MongoDBAtlasVectorSearch.from_documents(
documents,
embedding_openai,
collection=collection,
index_name=INDEX_NAME,
)
sleep(1) # waits for mongot to update Lucene's index
output = vectorstore.similarity_search("Sandwich", k=1)
assert output[0].page_content == "What is a sandwich?"
assert output[0].metadata["c"] == 1
def test_from_texts(self, embedding_openai: Embeddings, collection: Any) -> None:
texts = [
"Dogs are tough.",
"Cats have fluff.",
"What is a sandwich?",
"That fence is purple.",
]
vectorstore = MongoDBAtlasVectorSearch.from_texts(
texts,
embedding_openai,
collection=collection,
index_name=INDEX_NAME,
)
sleep(1) # waits for mongot to update Lucene's index
output = vectorstore.similarity_search("Sandwich", k=1)
assert output[0].page_content == "What is a sandwich?"
def test_from_texts_with_metadatas(
self, embedding_openai: Embeddings, collection: Any
) -> None:
texts = [
"Dogs are tough.",
"Cats have fluff.",
"What is a sandwich?",
"The fence is purple.",
]
metadatas = [{"a": 1}, {"b": 1}, {"c": 1}, {"d": 1, "e": 2}]
vectorstore = MongoDBAtlasVectorSearch.from_texts(
texts,
embedding_openai,
metadatas=metadatas,
collection=collection,
index_name=INDEX_NAME,
)
sleep(1) # waits for mongot to update Lucene's index
output = vectorstore.similarity_search("Sandwich", k=1)
assert output[0].page_content == "What is a sandwich?"
assert output[0].metadata["c"] == 1
def test_from_texts_with_metadatas_and_pre_filter(
self, embedding_openai: Embeddings, collection: Any
) -> None:
texts = [
"Dogs are tough.",
"Cats have fluff.",
"What is a sandwich?",
"The fence is purple.",
]
metadatas = [{"a": 1}, {"b": 1}, {"c": 1}, {"d": 1, "e": 2}]
vectorstore = MongoDBAtlasVectorSearch.from_texts(
texts,
embedding_openai,
metadatas=metadatas,
collection=collection,
index_name=INDEX_NAME,
)
sleep(1) # waits for mongot to update Lucene's index
output = vectorstore.similarity_search(
"Sandwich", k=1, pre_filter={"range": {"lte": 0, "path": "c"}}
)
assert output == []
def test_mmr(self, embedding_openai: Embeddings, collection: Any) -> None:
texts = ["foo", "foo", "fou", "foy"]
vectorstore = MongoDBAtlasVectorSearch.from_texts(
texts,
embedding_openai,
collection=collection,
index_name=INDEX_NAME,
)
sleep(1) # waits for mongot to update Lucene's index
query = "foo"
output = vectorstore.max_marginal_relevance_search(query, k=10, lambda_mult=0.1)
assert len(output) == len(texts)
assert output[0].page_content == "foo"
assert output[1].page_content != "foo"
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_meilisearch.py | """Test Meilisearch functionality."""
from typing import TYPE_CHECKING, Any, Dict, Generator
import pytest
import requests
from langchain_core.documents import Document
from langchain_community.vectorstores import Meilisearch
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
if TYPE_CHECKING:
import meilisearch
INDEX_NAME = "test-langchain-demo"
TEST_MEILI_HTTP_ADDR = "http://localhost:7700"
TEST_MEILI_MASTER_KEY = "masterKey"
class TestMeilisearchVectorSearch:
@pytest.fixture(scope="class", autouse=True)
def enable_vector_search(self) -> Generator[str, None, None]:
requests.patch(
f"{TEST_MEILI_HTTP_ADDR}/experimental-features",
headers={"Authorization": f"Bearer {TEST_MEILI_MASTER_KEY}"},
json={"vectorStore": True},
timeout=10,
)
yield "done"
requests.patch(
f"{TEST_MEILI_HTTP_ADDR}/experimental-features",
headers={"Authorization": f"Bearer {TEST_MEILI_MASTER_KEY}"},
json={"vectorStore": False},
timeout=10,
)
@pytest.fixture
def new_embedders(self) -> Dict[str, Dict[str, Any]]:
return {
"default": {
"source": "userProvided",
# Dimension defined in FakeEmbeddings as [float(1.0)] * 9 + [float(0.0)]
"dimensions": 10,
}
}
@pytest.fixture(autouse=True)
def setup(self) -> None:
self.delete_all_indexes()
@pytest.fixture(scope="class", autouse=True)
def teardown_test(self) -> Generator[str, None, None]:
# Yields back to the test function.
yield "done"
self.delete_all_indexes()
def delete_all_indexes(self) -> None:
client = self.client()
# Deletes all the indexes in the Meilisearch instance.
indexes = client.get_indexes()
for index in indexes["results"]:
task = client.index(index.uid).delete()
client.wait_for_task(task.task_uid)
def client(self) -> "meilisearch.Client":
import meilisearch
return meilisearch.Client(TEST_MEILI_HTTP_ADDR, TEST_MEILI_MASTER_KEY)
def _wait_last_task(self) -> None:
client = self.client()
# Get the last task
tasks = client.get_tasks()
# Wait for the last task to be completed
client.wait_for_task(tasks.results[0].uid)
def test_meilisearch(self, new_embedders: Dict[str, Any]) -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
vectorstore = Meilisearch.from_texts(
texts=texts,
embedding=FakeEmbeddings(),
embedders=new_embedders,
embedder_name=list(new_embedders)[0],
url=TEST_MEILI_HTTP_ADDR,
api_key=TEST_MEILI_MASTER_KEY,
index_name=INDEX_NAME,
)
self._wait_last_task()
output = vectorstore.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_meilisearch_with_client(self, new_embedders: Dict[str, Any]) -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
vectorstore = Meilisearch.from_texts(
texts=texts,
embedding=FakeEmbeddings(),
embedders=new_embedders,
embedder_name=list(new_embedders)[0],
client=self.client(),
index_name=INDEX_NAME,
)
self._wait_last_task()
output = vectorstore.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_meilisearch_with_metadatas(self, new_embedders: Dict[str, Any]) -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = Meilisearch.from_texts(
texts=texts,
embedding=FakeEmbeddings(),
embedders=new_embedders,
embedder_name=list(new_embedders)[0],
url=TEST_MEILI_HTTP_ADDR,
api_key=TEST_MEILI_MASTER_KEY,
index_name=INDEX_NAME,
metadatas=metadatas,
)
self._wait_last_task()
output = docsearch.similarity_search("foo", k=1)
assert len(output) == 1
assert output[0].page_content == "foo"
assert output[0].metadata["page"] == 0
assert output == [Document(page_content="foo", metadata={"page": 0})]
def test_meilisearch_with_metadatas_with_scores(
self, new_embedders: Dict[str, Any]
) -> None:
"""Test end to end construction and scored search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Meilisearch.from_texts(
texts=texts,
embedding=FakeEmbeddings(),
embedders=new_embedders,
embedder_name=list(new_embedders)[0],
url=TEST_MEILI_HTTP_ADDR,
api_key=TEST_MEILI_MASTER_KEY,
index_name=INDEX_NAME,
metadatas=metadatas,
)
self._wait_last_task()
output = docsearch.similarity_search_with_score("foo", k=1)
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 1.0)]
def test_meilisearch_with_metadatas_with_scores_using_vector(
self, new_embedders: Dict[str, Any]
) -> None:
"""Test end to end construction and scored search, using embedding vector."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
embeddings = FakeEmbeddings()
docsearch = Meilisearch.from_texts(
texts=texts,
embedding=FakeEmbeddings(),
embedders=new_embedders,
embedder_name=list(new_embedders)[0],
url=TEST_MEILI_HTTP_ADDR,
api_key=TEST_MEILI_MASTER_KEY,
index_name=INDEX_NAME,
metadatas=metadatas,
)
embedded_query = embeddings.embed_query("foo")
self._wait_last_task()
output = docsearch.similarity_search_by_vector_with_scores(
embedding=embedded_query, k=1
)
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 1.0)]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_sqlitevec.py | from typing import List, Optional
import pytest
from langchain_core.documents import Document
from langchain_community.vectorstores import SQLiteVec
from tests.integration_tests.vectorstores.fake_embeddings import (
FakeEmbeddings,
fake_texts,
)
def _sqlite_vec_from_texts(
metadatas: Optional[List[dict]] = None, drop: bool = True
) -> SQLiteVec:
return SQLiteVec.from_texts(
fake_texts,
FakeEmbeddings(),
metadatas=metadatas,
table="test",
db_file=":memory:",
)
@pytest.mark.requires("sqlite-vec")
def test_sqlitevec() -> None:
"""Test end to end construction and search."""
docsearch = _sqlite_vec_from_texts()
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={})]
@pytest.mark.requires("sqlite-vec")
def test_sqlitevec_with_score() -> None:
"""Test end to end construction and search with scores and IDs."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _sqlite_vec_from_texts(metadatas=metadatas)
output = docsearch.similarity_search_with_score("foo", k=3)
docs = [o[0] for o in output]
distances = [o[1] for o in output]
assert docs == [
Document(page_content="foo", metadata={"page": 0}),
Document(page_content="bar", metadata={"page": 1}),
Document(page_content="baz", metadata={"page": 2}),
]
assert distances[0] < distances[1] < distances[2]
@pytest.mark.requires("sqlite-vec")
def test_sqlitevec_add_extra() -> None:
"""Test end to end construction and MRR search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _sqlite_vec_from_texts(metadatas=metadatas)
docsearch.add_texts(texts, metadatas)
output = docsearch.similarity_search("foo", k=10)
assert len(output) == 6
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_singlestoredb.py | """Test SingleStoreDB functionality."""
import math
import os
import tempfile
from typing import List
import numpy as np
import pytest
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_community.vectorstores.singlestoredb import SingleStoreDB
from langchain_community.vectorstores.utils import DistanceStrategy
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
TEST_SINGLESTOREDB_URL = "root:pass@localhost:3306/db"
TEST_SINGLE_RESULT = [Document(page_content="foo")]
TEST_SINGLE_WITH_METADATA_RESULT = [Document(page_content="foo", metadata={"a": "b"})]
TEST_RESULT = [Document(page_content="foo"), Document(page_content="foo")]
TEST_IMAGES_DIR = ""
try:
import singlestoredb as s2
singlestoredb_installed = True
except ImportError:
singlestoredb_installed = False
try:
from langchain_experimental.open_clip import OpenCLIPEmbeddings
langchain_experimental_installed = True
except ImportError:
langchain_experimental_installed = False
def drop(table_name: str) -> None:
with s2.connect(TEST_SINGLESTOREDB_URL) as conn:
conn.autocommit(True)
with conn.cursor() as cursor:
cursor.execute(f"DROP TABLE IF EXISTS {table_name};")
class NormilizedFakeEmbeddings(FakeEmbeddings):
"""Fake embeddings with normalization. For testing purposes."""
def normalize(self, vector: List[float]) -> List[float]:
"""Normalize vector."""
return [float(v / np.linalg.norm(vector)) for v in vector]
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return [self.normalize(v) for v in super().embed_documents(texts)]
def embed_query(self, text: str) -> List[float]:
return self.normalize(super().embed_query(text))
class RandomEmbeddings(Embeddings):
"""Fake embeddings with random vectors. For testing purposes."""
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return [np.random.rand(100).tolist() for _ in texts]
def embed_query(self, text: str) -> List[float]:
return np.random.rand(100).tolist()
def embed_image(self, uris: List[str]) -> List[List[float]]:
return [np.random.rand(100).tolist() for _ in uris]
class IncrementalEmbeddings(Embeddings):
"""Fake embeddings with incremental vectors. For testing purposes."""
def __init__(self) -> None:
self.counter = 0
def set_counter(self, counter: int) -> None:
self.counter = counter
def embed_query(self, text: str) -> List[float]:
self.counter += 1
return [
math.cos(self.counter * math.pi / 10),
math.sin(self.counter * math.pi / 10),
]
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return [self.embed_query(text) for text in texts]
@pytest.fixture
def texts() -> List[str]:
return ["foo", "bar", "baz"]
@pytest.fixture
def snow_rain_docs() -> List[Document]:
return [
Document(
page_content="""In the parched desert, a sudden rainstorm brought relief,
as the droplets danced upon the thirsty earth, rejuvenating the landscape
with the sweet scent of petrichor.""",
metadata={"count": "1", "category": "rain", "group": "a"},
),
Document(
page_content="""Amidst the bustling cityscape, the rain fell relentlessly,
creating a symphony of pitter-patter on the pavement, while umbrellas
bloomed like colorful flowers in a sea of gray.""",
metadata={"count": "2", "category": "rain", "group": "a"},
),
Document(
page_content="""High in the mountains, the rain transformed into a delicate
mist, enveloping the peaks in a mystical veil, where each droplet seemed to
whisper secrets to the ancient rocks below.""",
metadata={"count": "3", "category": "rain", "group": "b"},
),
Document(
page_content="""Blanketing the countryside in a soft, pristine layer, the
snowfall painted a serene tableau, muffling the world in a tranquil hush
as delicate flakes settled upon the branches of trees like nature's own
lacework.""",
metadata={"count": "1", "category": "snow", "group": "b"},
),
Document(
page_content="""In the urban landscape, snow descended, transforming
bustling streets into a winter wonderland, where the laughter of
children echoed amidst the flurry of snowballs and the twinkle of
holiday lights.""",
metadata={"count": "2", "category": "snow", "group": "a"},
),
Document(
page_content="""Atop the rugged peaks, snow fell with an unyielding
intensity, sculpting the landscape into a pristine alpine paradise,
where the frozen crystals shimmered under the moonlight, casting a
spell of enchantment over the wilderness below.""",
metadata={"count": "3", "category": "snow", "group": "a"},
),
]
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb(texts: List[str]) -> None:
"""Test end to end construction and search."""
table_name = "test_singlestoredb"
drop(table_name)
docsearch = SingleStoreDB.from_texts(
texts,
NormilizedFakeEmbeddings(),
table_name=table_name,
host=TEST_SINGLESTOREDB_URL,
)
output = docsearch.similarity_search("foo", k=1)
assert output == TEST_SINGLE_RESULT
docsearch.drop()
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_new_vector(texts: List[str]) -> None:
"""Test adding a new document"""
table_name = "test_singlestoredb_new_vector"
drop(table_name)
docsearch = SingleStoreDB.from_texts(
texts,
NormilizedFakeEmbeddings(),
table_name=table_name,
host=TEST_SINGLESTOREDB_URL,
)
docsearch.add_texts(["foo"])
output = docsearch.similarity_search("foo", k=2)
assert output == TEST_RESULT
docsearch.drop()
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_euclidean_distance(texts: List[str]) -> None:
"""Test adding a new document"""
table_name = "test_singlestoredb_euclidean_distance"
drop(table_name)
docsearch = SingleStoreDB.from_texts(
texts,
FakeEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
table_name=table_name,
host=TEST_SINGLESTOREDB_URL,
)
docsearch.add_texts(["foo"])
output = docsearch.similarity_search("foo", k=2)
assert output == TEST_RESULT
docsearch.drop()
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_vector_index_1(texts: List[str]) -> None:
"""Test adding a new document"""
table_name = "test_singlestoredb_vector_index_1"
drop(table_name)
docsearch = SingleStoreDB.from_texts(
texts,
FakeEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
table_name=table_name,
use_vector_index=True,
vector_size=10,
host=TEST_SINGLESTOREDB_URL,
)
docsearch.add_texts(["foo"])
output = docsearch.similarity_search("foo", k=2)
assert output == TEST_RESULT
docsearch.drop()
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_vector_index_2(texts: List[str]) -> None:
"""Test adding a new document"""
table_name = "test_singlestoredb_vector_index_2"
drop(table_name)
docsearch = SingleStoreDB.from_texts(
texts,
FakeEmbeddings(),
table_name=table_name,
use_vector_index=True,
vector_index_options={"index_type": "IVF_PQ", "nlist": 256},
vector_size=10,
host=TEST_SINGLESTOREDB_URL,
)
docsearch.add_texts(["foo"])
output = docsearch.similarity_search("foo", k=1)
output[0].page_content == "foo"
docsearch.drop()
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_vector_index_large() -> None:
"""Test adding a new document"""
table_name = "test_singlestoredb_vector_index_large"
drop(table_name)
docsearch = SingleStoreDB.from_texts(
["foo"] * 30,
RandomEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
table_name=table_name,
use_vector_index=True,
vector_size=100,
vector_index_name="vector_index_large",
host=TEST_SINGLESTOREDB_URL,
)
output = docsearch.similarity_search("foo", k=1)
assert output[0].page_content == "foo"
docsearch.drop()
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_from_existing(texts: List[str]) -> None:
"""Test adding a new document"""
table_name = "test_singlestoredb_from_existing"
drop(table_name)
SingleStoreDB.from_texts(
texts,
NormilizedFakeEmbeddings(),
table_name=table_name,
host=TEST_SINGLESTOREDB_URL,
)
# Test creating from an existing
docsearch2 = SingleStoreDB(
NormilizedFakeEmbeddings(),
table_name="test_singlestoredb_from_existing",
host=TEST_SINGLESTOREDB_URL,
)
output = docsearch2.similarity_search("foo", k=1)
assert output == TEST_SINGLE_RESULT
docsearch2.drop()
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_from_documents(texts: List[str]) -> None:
"""Test from_documents constructor."""
table_name = "test_singlestoredb_from_documents"
drop(table_name)
docs = [Document(page_content=t, metadata={"a": "b"}) for t in texts]
docsearch = SingleStoreDB.from_documents(
docs,
NormilizedFakeEmbeddings(),
table_name=table_name,
host=TEST_SINGLESTOREDB_URL,
)
output = docsearch.similarity_search("foo", k=1)
assert output == TEST_SINGLE_WITH_METADATA_RESULT
docsearch.drop()
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_add_texts_to_existing(texts: List[str]) -> None:
"""Test adding a new document"""
table_name = "test_singlestoredb_add_texts_to_existing"
drop(table_name)
# Test creating from an existing
SingleStoreDB.from_texts(
texts,
NormilizedFakeEmbeddings(),
table_name=table_name,
host=TEST_SINGLESTOREDB_URL,
)
docsearch = SingleStoreDB(
NormilizedFakeEmbeddings(),
table_name=table_name,
host=TEST_SINGLESTOREDB_URL,
)
docsearch.add_texts(["foo"])
output = docsearch.similarity_search("foo", k=2)
assert output == TEST_RESULT
docsearch.drop()
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_filter_metadata(texts: List[str]) -> None:
"""Test filtering by metadata"""
table_name = "test_singlestoredb_filter_metadata"
drop(table_name)
docs = [
Document(page_content=t, metadata={"index": i}) for i, t in enumerate(texts)
]
docsearch = SingleStoreDB.from_documents(
docs,
FakeEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
table_name=table_name,
host=TEST_SINGLESTOREDB_URL,
)
output = docsearch.similarity_search("foo", k=1, filter={"index": 2})
assert output == [Document(page_content="baz", metadata={"index": 2})]
docsearch.drop()
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_filter_metadata_2(texts: List[str]) -> None:
"""Test filtering by metadata field that is similar for each document"""
table_name = "test_singlestoredb_filter_metadata_2"
drop(table_name)
docs = [
Document(page_content=t, metadata={"index": i, "category": "budget"})
for i, t in enumerate(texts)
]
docsearch = SingleStoreDB.from_documents(
docs,
FakeEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
table_name=table_name,
host=TEST_SINGLESTOREDB_URL,
)
output = docsearch.similarity_search("foo", k=1, filter={"category": "budget"})
assert output == [
Document(page_content="foo", metadata={"index": 0, "category": "budget"})
]
docsearch.drop()
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_filter_metadata_3(texts: List[str]) -> None:
"""Test filtering by two metadata fields"""
table_name = "test_singlestoredb_filter_metadata_3"
drop(table_name)
docs = [
Document(page_content=t, metadata={"index": i, "category": "budget"})
for i, t in enumerate(texts)
]
docsearch = SingleStoreDB.from_documents(
docs,
FakeEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
table_name=table_name,
host=TEST_SINGLESTOREDB_URL,
)
output = docsearch.similarity_search(
"foo", k=1, filter={"category": "budget", "index": 1}
)
assert output == [
Document(page_content="bar", metadata={"index": 1, "category": "budget"})
]
docsearch.drop()
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_filter_metadata_4(texts: List[str]) -> None:
"""Test no matches"""
table_name = "test_singlestoredb_filter_metadata_4"
drop(table_name)
docs = [
Document(page_content=t, metadata={"index": i, "category": "budget"})
for i, t in enumerate(texts)
]
docsearch = SingleStoreDB.from_documents(
docs,
FakeEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
table_name=table_name,
host=TEST_SINGLESTOREDB_URL,
)
output = docsearch.similarity_search("foo", k=1, filter={"category": "vacation"})
assert output == []
docsearch.drop()
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_filter_metadata_5(texts: List[str]) -> None:
"""Test complex metadata path"""
table_name = "test_singlestoredb_filter_metadata_5"
drop(table_name)
docs = [
Document(
page_content=t,
metadata={
"index": i,
"category": "budget",
"subfield": {"subfield": {"idx": i, "other_idx": i + 1}},
},
)
for i, t in enumerate(texts)
]
docsearch = SingleStoreDB.from_documents(
docs,
FakeEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
table_name=table_name,
host=TEST_SINGLESTOREDB_URL,
)
output = docsearch.similarity_search(
"foo", k=1, filter={"category": "budget", "subfield": {"subfield": {"idx": 2}}}
)
assert output == [
Document(
page_content="baz",
metadata={
"index": 2,
"category": "budget",
"subfield": {"subfield": {"idx": 2, "other_idx": 3}},
},
)
]
docsearch.drop()
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_filter_metadata_6(texts: List[str]) -> None:
"""Test filtering by other bool"""
table_name = "test_singlestoredb_filter_metadata_6"
drop(table_name)
docs = [
Document(
page_content=t,
metadata={"index": i, "category": "budget", "is_good": i == 1},
)
for i, t in enumerate(texts)
]
docsearch = SingleStoreDB.from_documents(
docs,
FakeEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
table_name=table_name,
host=TEST_SINGLESTOREDB_URL,
)
output = docsearch.similarity_search(
"foo", k=1, filter={"category": "budget", "is_good": True}
)
assert output == [
Document(
page_content="bar",
metadata={"index": 1, "category": "budget", "is_good": True},
)
]
docsearch.drop()
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_filter_metadata_7(texts: List[str]) -> None:
"""Test filtering by float"""
table_name = "test_singlestoredb_filter_metadata_7"
drop(table_name)
docs = [
Document(
page_content=t,
metadata={"index": i, "category": "budget", "score": i + 0.5},
)
for i, t in enumerate(texts)
]
docsearch = SingleStoreDB.from_documents(
docs,
FakeEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
table_name=table_name,
host=TEST_SINGLESTOREDB_URL,
)
output = docsearch.similarity_search(
"bar", k=1, filter={"category": "budget", "score": 2.5}
)
assert output == [
Document(
page_content="baz",
metadata={"index": 2, "category": "budget", "score": 2.5},
)
]
docsearch.drop()
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_as_retriever(texts: List[str]) -> None:
table_name = "test_singlestoredb_8"
drop(table_name)
docsearch = SingleStoreDB.from_texts(
texts,
FakeEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
table_name=table_name,
host=TEST_SINGLESTOREDB_URL,
)
retriever = docsearch.as_retriever(search_kwargs={"k": 2})
output = retriever.invoke("foo")
assert output == [
Document(
page_content="foo",
),
Document(
page_content="bar",
),
]
docsearch.drop()
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_add_image(texts: List[str]) -> None:
"""Test adding images"""
table_name = "test_singlestoredb_add_image"
drop(table_name)
docsearch = SingleStoreDB(
RandomEmbeddings(),
table_name=table_name,
host=TEST_SINGLESTOREDB_URL,
)
temp_files = []
for _ in range(3):
temp_file = tempfile.NamedTemporaryFile(delete=False)
temp_file.write(b"foo")
temp_file.close()
temp_files.append(temp_file.name)
docsearch.add_images(temp_files)
output = docsearch.similarity_search("foo", k=1)
assert output[0].page_content in temp_files
docsearch.drop()
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
@pytest.mark.skipif(
not langchain_experimental_installed, reason="langchain_experimental not installed"
)
def test_singestoredb_add_image2() -> None:
table_name = "test_singlestoredb_add_images"
drop(table_name)
docsearch = SingleStoreDB(
OpenCLIPEmbeddings(), # type: ignore[call-arg, call-arg, call-arg]
table_name=table_name,
host=TEST_SINGLESTOREDB_URL,
)
image_uris = sorted(
[
os.path.join(TEST_IMAGES_DIR, image_name)
for image_name in os.listdir(TEST_IMAGES_DIR)
if image_name.endswith(".jpg")
]
)
docsearch.add_images(image_uris)
output = docsearch.similarity_search("horse", k=1)
assert "horse" in output[0].page_content
docsearch.drop()
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_text_only_search(snow_rain_docs: List[Document]) -> None:
table_name = "test_singlestoredb_text_only_search"
drop(table_name)
docsearch = SingleStoreDB(
RandomEmbeddings(),
table_name=table_name,
use_full_text_search=True,
host=TEST_SINGLESTOREDB_URL,
)
docsearch.add_documents(snow_rain_docs)
output = docsearch.similarity_search(
"rainstorm in parched desert",
k=3,
filter={"count": "1"},
search_strategy=SingleStoreDB.SearchStrategy.TEXT_ONLY,
)
assert len(output) == 2
assert (
"In the parched desert, a sudden rainstorm brought relief,"
in output[0].page_content
)
assert (
"Blanketing the countryside in a soft, pristine layer" in output[1].page_content
)
output = docsearch.similarity_search(
"snowfall in countryside",
k=3,
search_strategy=SingleStoreDB.SearchStrategy.TEXT_ONLY,
)
assert len(output) == 3
assert (
"Blanketing the countryside in a soft, pristine layer,"
in output[0].page_content
)
docsearch.drop()
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_filter_by_text_search(snow_rain_docs: List[Document]) -> None:
table_name = "test_singlestoredb_filter_by_text_search"
drop(table_name)
embeddings = IncrementalEmbeddings()
docsearch = SingleStoreDB.from_documents(
snow_rain_docs,
embeddings,
table_name=table_name,
use_full_text_search=True,
use_vector_index=True,
vector_size=2,
host=TEST_SINGLESTOREDB_URL,
)
output = docsearch.similarity_search(
"rainstorm in parched desert",
k=1,
search_strategy=SingleStoreDB.SearchStrategy.FILTER_BY_TEXT,
filter_threshold=0,
)
assert len(output) == 1
assert (
"In the parched desert, a sudden rainstorm brought relief"
in output[0].page_content
)
drop(table_name)
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_filter_by_vector_search1(snow_rain_docs: List[Document]) -> None:
table_name = "test_singlestoredb_filter_by_vector_search1"
drop(table_name)
embeddings = IncrementalEmbeddings()
docsearch = SingleStoreDB.from_documents(
snow_rain_docs,
embeddings,
table_name=table_name,
use_full_text_search=True,
use_vector_index=True,
vector_size=2,
host=TEST_SINGLESTOREDB_URL,
)
output = docsearch.similarity_search(
"rainstorm in parched desert, rain",
k=1,
filter={"category": "rain"},
search_strategy=SingleStoreDB.SearchStrategy.FILTER_BY_VECTOR,
filter_threshold=-0.2,
)
assert len(output) == 1
assert (
"High in the mountains, the rain transformed into a delicate"
in output[0].page_content
)
docsearch.drop()
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_filter_by_vector_search2(snow_rain_docs: List[Document]) -> None:
table_name = "test_singlestoredb_filter_by_vector_search2"
drop(table_name)
embeddings = IncrementalEmbeddings()
docsearch = SingleStoreDB.from_documents(
snow_rain_docs,
embeddings,
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
table_name=table_name,
use_full_text_search=True,
use_vector_index=True,
vector_size=2,
host=TEST_SINGLESTOREDB_URL,
)
output = docsearch.similarity_search(
"rainstorm in parched desert, rain",
k=1,
filter={"group": "a"},
search_strategy=SingleStoreDB.SearchStrategy.FILTER_BY_VECTOR,
filter_threshold=1.5,
)
assert len(output) == 1
assert (
"Amidst the bustling cityscape, the rain fell relentlessly"
in output[0].page_content
)
docsearch.drop()
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_weighted_sum_search_unsupported_strategy(
snow_rain_docs: List[Document],
) -> None:
table_name = "test_singlestoredb_waighted_sum_search_unsupported_strategy"
drop(table_name)
embeddings = IncrementalEmbeddings()
docsearch = SingleStoreDB.from_documents(
snow_rain_docs,
embeddings,
table_name=table_name,
use_full_text_search=True,
use_vector_index=True,
vector_size=2,
host=TEST_SINGLESTOREDB_URL,
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
)
try:
docsearch.similarity_search(
"rainstorm in parched desert, rain",
k=1,
search_strategy=SingleStoreDB.SearchStrategy.WEIGHTED_SUM,
)
except ValueError as e:
assert "Search strategy WEIGHTED_SUM is not" in str(e)
docsearch.drop()
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_singlestoredb_weighted_sum_search(snow_rain_docs: List[Document]) -> None:
table_name = "test_singlestoredb_waighted_sum_search"
drop(table_name)
embeddings = IncrementalEmbeddings()
docsearch = SingleStoreDB.from_documents(
snow_rain_docs,
embeddings,
table_name=table_name,
use_full_text_search=True,
use_vector_index=True,
vector_size=2,
host=TEST_SINGLESTOREDB_URL,
)
output = docsearch.similarity_search(
"rainstorm in parched desert, rain",
k=1,
search_strategy=SingleStoreDB.SearchStrategy.WEIGHTED_SUM,
filter={"category": "snow"},
)
assert len(output) == 1
assert (
"Atop the rugged peaks, snow fell with an unyielding" in output[0].page_content
)
docsearch.drop()
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_insert(snow_rain_docs: List[Document]) -> None:
table_name = "test_singlestoredb_insert"
drop(table_name)
embeddings = IncrementalEmbeddings()
docsearch = SingleStoreDB(
embeddings,
table_name=table_name,
host=TEST_SINGLESTOREDB_URL,
)
ids = docsearch.add_documents(snow_rain_docs, return_ids=True)
assert len(ids) == len(snow_rain_docs)
for i, id1 in enumerate(ids):
for j, id2 in enumerate(ids):
if i != j:
assert id1 != id2
docsearch.drop()
@pytest.mark.skipif(not singlestoredb_installed, reason="singlestoredb not installed")
def test_delete(snow_rain_docs: List[Document]) -> None:
table_name = "test_singlestoredb_delete"
drop(table_name)
embeddings = IncrementalEmbeddings()
docsearch = SingleStoreDB(
embeddings,
table_name=table_name,
host=TEST_SINGLESTOREDB_URL,
)
ids = docsearch.add_documents(snow_rain_docs, return_ids=True)
output = docsearch.similarity_search(
"rainstorm in parched desert",
k=3,
filter={"count": "1"},
)
assert len(output) == 2
docsearch.delete(ids)
output = docsearch.similarity_search(
"rainstorm in parched desert",
k=3,
)
assert len(output) == 0
docsearch.drop()
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_epsilla.py | """Test Epsilla functionality."""
from langchain_community.vectorstores import Epsilla
from tests.integration_tests.vectorstores.fake_embeddings import (
FakeEmbeddings,
fake_texts,
)
def _test_from_texts() -> Epsilla:
from pyepsilla import vectordb
embeddings = FakeEmbeddings()
client = vectordb.Client()
return Epsilla.from_texts(fake_texts, embeddings, client)
def test_epsilla() -> None:
instance = _test_from_texts()
search = instance.similarity_search(query="bar", k=1)
result_texts = [doc.page_content for doc in search]
assert "bar" in result_texts
def test_epsilla_add_texts() -> None:
from pyepsilla import vectordb
embeddings = FakeEmbeddings()
client = vectordb.Client()
db = Epsilla(client, embeddings)
db.add_texts(fake_texts)
search = db.similarity_search(query="foo", k=1)
result_texts = [doc.page_content for doc in search]
assert "foo" in result_texts
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_kinetica.py | import os
from typing import List
import pytest
from langchain_core.documents import Document
from langchain_community.embeddings.openai import OpenAIEmbeddings
from langchain_community.vectorstores import (
DistanceStrategy,
Kinetica,
KineticaSettings,
)
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
DIMENSIONS = 3
HOST = os.getenv("KINETICA_HOST", "http://127.0.0.1:9191")
USERNAME = os.getenv("KINETICA_USERNAME", "")
PASSWORD = os.getenv("KINETICA_PASSWORD", "")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
class FakeEmbeddingsWithAdaDimension(FakeEmbeddings):
"""Fake embeddings functionality for testing."""
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Return simple embeddings."""
return [[float(1.0)] * (DIMENSIONS - 1) + [float(i)] for i in range(len(texts))]
def embed_query(self, text: str) -> List[float]:
"""Return simple embeddings."""
return [float(1.0)] * (DIMENSIONS - 1) + [float(0.0)]
@pytest.fixture
def create_config() -> KineticaSettings:
return KineticaSettings(host=HOST, username=USERNAME, password=PASSWORD)
@pytest.mark.requires("gpudb")
def test_kinetica(create_config: KineticaSettings) -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"text": text} for text in texts]
docsearch = Kinetica.from_texts(
config=create_config,
texts=texts,
metadatas=metadatas,
embedding=FakeEmbeddingsWithAdaDimension(),
collection_name="1test_kinetica",
schema_name="1test",
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output[0].page_content == "foo"
@pytest.mark.requires("gpudb")
def test_kinetica_embeddings(create_config: KineticaSettings) -> None:
"""Test end to end construction with embeddings and search."""
texts = ["foo", "bar", "baz"]
text_embeddings = FakeEmbeddingsWithAdaDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Kinetica.from_embeddings(
config=create_config,
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithAdaDimension(),
collection_name="test_kinetica_embeddings",
pre_delete_collection=False,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
@pytest.mark.requires("gpudb")
def test_kinetica_with_metadatas(create_config: KineticaSettings) -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Kinetica.from_texts(
config=create_config,
texts=texts,
metadatas=metadatas,
embedding=FakeEmbeddingsWithAdaDimension(),
collection_name="test_kinetica_with_metadatas",
pre_delete_collection=False,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
@pytest.mark.requires("gpudb")
def test_kinetica_with_metadatas_with_scores(create_config: KineticaSettings) -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Kinetica.from_texts(
config=create_config,
texts=texts,
metadatas=metadatas,
embedding=FakeEmbeddingsWithAdaDimension(),
collection_name="test_kinetica_with_metadatas_with_scores",
pre_delete_collection=False,
)
output = docsearch.similarity_search_with_score("foo", k=1)
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
@pytest.mark.requires("gpudb")
def test_kinetica_with_filter_match(create_config: KineticaSettings) -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Kinetica.from_texts(
config=create_config,
texts=texts,
metadatas=metadatas,
embedding=FakeEmbeddingsWithAdaDimension(),
collection_name="test_kinetica_with_filter_match",
pre_delete_collection=False,
)
output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "0"})
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
@pytest.mark.requires("gpudb")
def test_kinetica_with_filter_distant_match(create_config: KineticaSettings) -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Kinetica.from_texts(
config=create_config,
texts=texts,
metadatas=metadatas,
embedding=FakeEmbeddingsWithAdaDimension(),
collection_name="test_kinetica_with_filter_distant_match",
pre_delete_collection=False,
)
output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "2"})
assert output == [(Document(page_content="baz", metadata={"page": "2"}), 2.0)]
@pytest.mark.skip(reason="Filter condition has IN clause")
@pytest.mark.requires("gpudb")
def test_kinetica_with_filter_in_set(create_config: KineticaSettings) -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Kinetica.from_texts(
config=create_config,
texts=texts,
metadatas=metadatas,
embedding=FakeEmbeddingsWithAdaDimension(),
collection_name="test_kinetica_with_filter_in_set",
pre_delete_collection=False,
)
output = docsearch.similarity_search_with_score(
"foo", k=2, filter={"page": {"IN": ["0", "2"]}}
)
assert output == [
(Document(page_content="foo", metadata={"page": "0"}), 0.0),
(Document(page_content="baz", metadata={"page": "2"}), 0.0013003906671379406),
]
@pytest.mark.requires("gpudb")
def test_kinetica_relevance_score(create_config: KineticaSettings) -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Kinetica.from_texts(
config=create_config,
texts=texts,
metadatas=metadatas,
embedding=FakeEmbeddingsWithAdaDimension(),
collection_name="test_kinetica_relevance_score",
pre_delete_collection=False,
)
output = docsearch.similarity_search_with_relevance_scores("foo", k=3)
assert output == [
(Document(page_content="foo", metadata={"page": "0"}), 1.0),
(Document(page_content="bar", metadata={"page": "1"}), 0.29289321881345254),
(Document(page_content="baz", metadata={"page": "2"}), -0.4142135623730949),
]
@pytest.mark.requires("openai", "gpudb")
def test_kinetica_max_marginal_relevance_search(
create_config: KineticaSettings,
) -> None:
"""Test end to end construction and search."""
openai = OpenAIEmbeddings(api_key=OPENAI_API_KEY)
texts = ["foo", "bar", "baz"]
docsearch = Kinetica.from_texts(
config=create_config,
texts=texts,
embedding=openai,
distance_strategy=DistanceStrategy.COSINE,
collection_name="test_kinetica_max_marginal_relevance_search",
pre_delete_collection=False,
)
output = docsearch.max_marginal_relevance_search("foo", k=1, fetch_k=3)
assert output == [Document(page_content="foo")]
@pytest.mark.requires("gpudb")
def test_kinetica_max_marginal_relevance_search_with_score(
create_config: KineticaSettings,
) -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = Kinetica.from_texts(
config=create_config,
texts=texts,
embedding=FakeEmbeddingsWithAdaDimension(),
distance_strategy=DistanceStrategy.EUCLIDEAN,
collection_name="test_kinetica_max_marginal_relevance_search_with_score",
pre_delete_collection=False,
)
output = docsearch.max_marginal_relevance_search_with_score("foo", k=1, fetch_k=3)
assert output == [(Document(page_content="foo"), 0.0)]
@pytest.mark.requires("openai", "gpudb")
def test_kinetica_with_openai_embeddings(create_config: KineticaSettings) -> None:
"""Test end to end construction and search."""
if OPENAI_API_KEY == "":
assert False
openai = OpenAIEmbeddings(api_key=OPENAI_API_KEY)
texts = ["foo", "bar", "baz"]
metadatas = [{"text": text} for text in texts]
docsearch = Kinetica.from_texts(
config=create_config,
texts=texts,
metadatas=metadatas,
embedding=openai,
collection_name="kinetica_openai_test",
pre_delete_collection=False,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"text": "foo"})]
@pytest.mark.requires("gpudb")
def test_kinetica_retriever_search_threshold(create_config: KineticaSettings) -> None:
"""Test using retriever for searching with threshold."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Kinetica.from_texts(
config=create_config,
texts=texts,
metadatas=metadatas,
embedding=FakeEmbeddingsWithAdaDimension(),
distance_strategy=DistanceStrategy.EUCLIDEAN,
collection_name="test_kinetica_retriever_search_threshold",
pre_delete_collection=False,
)
retriever = docsearch.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={"k": 3, "score_threshold": 0.999},
)
output = retriever.invoke("summer")
assert output == [
Document(page_content="foo", metadata={"page": "0"}),
]
@pytest.mark.requires("gpudb")
def test_kinetica_retriever_search_threshold_custom_normalization_fn(
create_config: KineticaSettings,
) -> None:
"""Test searching with threshold and custom normalization function"""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Kinetica.from_texts(
config=create_config,
texts=texts,
metadatas=metadatas,
embedding=FakeEmbeddingsWithAdaDimension(),
distance_strategy=DistanceStrategy.EUCLIDEAN,
collection_name="test_kinetica_retriever_search_threshold_custom_normalization_fn",
pre_delete_collection=False,
relevance_score_fn=lambda d: d * 0,
)
retriever = docsearch.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={"k": 3, "score_threshold": 0.5},
)
output = retriever.invoke("foo")
assert output == []
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_milvus.py | """Test Milvus functionality."""
from typing import Any, List, Optional
from langchain_core.documents import Document
from langchain_community.vectorstores import Milvus
from tests.integration_tests.vectorstores.fake_embeddings import (
FakeEmbeddings,
fake_texts,
)
def _milvus_from_texts(
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
drop: bool = True,
) -> Milvus:
return Milvus.from_texts(
fake_texts,
FakeEmbeddings(),
metadatas=metadatas,
ids=ids,
connection_args={"host": "127.0.0.1", "port": "19530"},
drop_old=drop,
)
def _get_pks(expr: str, docsearch: Milvus) -> List[Any]:
return docsearch.get_pks(expr) # type: ignore[return-value]
def test_milvus() -> None:
"""Test end to end construction and search."""
docsearch = _milvus_from_texts()
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_milvus_with_metadata() -> None:
"""Test with metadata"""
docsearch = _milvus_from_texts(metadatas=[{"label": "test"}] * len(fake_texts))
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"label": "test"})]
def test_milvus_with_id() -> None:
"""Test with ids"""
ids = ["id_" + str(i) for i in range(len(fake_texts))]
docsearch = _milvus_from_texts(ids=ids)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
output = docsearch.delete(ids=ids)
assert output.delete_count == len(fake_texts) # type: ignore[attr-defined]
try:
ids = ["dup_id" for _ in fake_texts]
_milvus_from_texts(ids=ids)
except Exception as e:
assert isinstance(e, AssertionError)
def test_milvus_with_score() -> None:
"""Test end to end construction and search with scores and IDs."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _milvus_from_texts(metadatas=metadatas)
output = docsearch.similarity_search_with_score("foo", k=3)
docs = [o[0] for o in output]
scores = [o[1] for o in output]
assert docs == [
Document(page_content="foo", metadata={"page": 0}),
Document(page_content="bar", metadata={"page": 1}),
Document(page_content="baz", metadata={"page": 2}),
]
assert scores[0] < scores[1] < scores[2]
def test_milvus_max_marginal_relevance_search() -> None:
"""Test end to end construction and MRR search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _milvus_from_texts(metadatas=metadatas)
output = docsearch.max_marginal_relevance_search("foo", k=2, fetch_k=3)
assert output == [
Document(page_content="foo", metadata={"page": 0}),
Document(page_content="baz", metadata={"page": 2}),
]
def test_milvus_add_extra() -> None:
"""Test end to end construction and MRR search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _milvus_from_texts(metadatas=metadatas)
docsearch.add_texts(texts, metadatas)
output = docsearch.similarity_search("foo", k=10)
assert len(output) == 6
def test_milvus_no_drop() -> None:
"""Test end to end construction and MRR search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _milvus_from_texts(metadatas=metadatas)
del docsearch
docsearch = _milvus_from_texts(metadatas=metadatas, drop=False)
output = docsearch.similarity_search("foo", k=10)
assert len(output) == 6
def test_milvus_get_pks() -> None:
"""Test end to end construction and get pks with expr"""
texts = ["foo", "bar", "baz"]
metadatas = [{"id": i} for i in range(len(texts))]
docsearch = _milvus_from_texts(metadatas=metadatas)
expr = "id in [1,2]"
output = _get_pks(expr, docsearch)
assert len(output) == 2
def test_milvus_delete_entities() -> None:
"""Test end to end construction and delete entities"""
texts = ["foo", "bar", "baz"]
metadatas = [{"id": i} for i in range(len(texts))]
docsearch = _milvus_from_texts(metadatas=metadatas)
expr = "id in [1,2]"
pks = _get_pks(expr, docsearch)
result = docsearch.delete(pks)
assert result is True
def test_milvus_upsert_entities() -> None:
"""Test end to end construction and upsert entities"""
texts = ["foo", "bar", "baz"]
metadatas = [{"id": i} for i in range(len(texts))]
docsearch = _milvus_from_texts(metadatas=metadatas)
expr = "id in [1,2]"
pks = _get_pks(expr, docsearch)
documents = [
Document(page_content="test_1", metadata={"id": 1}),
Document(page_content="test_2", metadata={"id": 3}),
]
ids = docsearch.upsert(pks, documents)
assert len(ids) == 2 # type: ignore[arg-type]
# if __name__ == "__main__":
# test_milvus()
# test_milvus_with_metadata()
# test_milvus_with_score()
# test_milvus_max_marginal_relevance_search()
# test_milvus_add_extra()
# test_milvus_no_drop()
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_elastic_vector_search.py | """Test ElasticSearch functionality."""
import logging
import os
import uuid
from typing import Generator, List, Union
import pytest
from langchain_core.documents import Document
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores.elastic_vector_search import ElasticVectorSearch
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
logging.basicConfig(level=logging.DEBUG)
"""
cd tests/integration_tests/vectorstores/docker-compose
docker-compose -f elasticsearch.yml up
"""
class TestElasticsearch:
@classmethod
def setup_class(cls) -> None:
if not os.getenv("OPENAI_API_KEY"):
raise ValueError("OPENAI_API_KEY environment variable is not set")
@pytest.fixture(scope="class", autouse=True)
def elasticsearch_url(self) -> Union[str, Generator[str, None, None]]: # type: ignore[return]
"""Return the elasticsearch url."""
from elasticsearch import Elasticsearch
url = "http://localhost:9200"
yield url
es = Elasticsearch(hosts=url)
# Clear all indexes
index_names = es.indices.get(index="_all").keys()
for index_name in index_names:
# print(index_name)
es.indices.delete(index=index_name)
def test_similarity_search_without_metadata(self, elasticsearch_url: str) -> None:
"""Test end to end construction and search without metadata."""
texts = ["foo", "bar", "baz"]
docsearch = ElasticVectorSearch.from_texts(
texts, FakeEmbeddings(), elasticsearch_url=elasticsearch_url
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
@pytest.mark.skip(
reason="Docker build has no ssl certs. Enable this test when testing with ssl."
)
def test_similarity_search_with_ssl_verify(self, elasticsearch_url: str) -> None:
"""Test end to end construction and search with ssl verify."""
ssl_verify = {
"verify_certs": True,
"basic_auth": ("ES_USER", "ES_PASSWORD"),
"ca_certs": "ES_CA_CERTS_PATH",
}
texts = ["foo", "bar", "baz"]
docsearch = ElasticVectorSearch.from_texts(
texts,
FakeEmbeddings(),
elasticsearch_url="http://localhost:9200",
ssl_verify=ssl_verify,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_similarity_search_with_metadata(self, elasticsearch_url: str) -> None:
"""Test end to end construction and search with metadata."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = ElasticVectorSearch.from_texts(
texts,
FakeEmbeddings(),
metadatas=metadatas,
elasticsearch_url=elasticsearch_url,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": 0})]
@pytest.mark.vcr(ignore_localhost=True)
def test_default_index_from_documents(
self,
documents: List[Document],
embedding_openai: OpenAIEmbeddings,
elasticsearch_url: str,
) -> None:
"""This test checks the construction of a default
ElasticSearch index using the 'from_documents'."""
elastic_vector_search = ElasticVectorSearch.from_documents(
documents=documents,
embedding=embedding_openai,
elasticsearch_url=elasticsearch_url,
)
search_result = elastic_vector_search.similarity_search("sharks")
assert len(search_result) != 0
@pytest.mark.vcr(ignore_localhost=True)
def test_custom_index_from_documents(
self,
documents: List[Document],
embedding_openai: OpenAIEmbeddings,
elasticsearch_url: str,
) -> None:
"""This test checks the construction of a custom
ElasticSearch index using the 'from_documents'."""
from elasticsearch import Elasticsearch
index_name = f"custom_index_{uuid.uuid4().hex}"
elastic_vector_search = ElasticVectorSearch.from_documents(
documents=documents,
embedding=embedding_openai,
elasticsearch_url=elasticsearch_url,
index_name=index_name,
)
es = Elasticsearch(hosts=elasticsearch_url)
index_names = es.indices.get(index="_all").keys()
assert index_name in index_names
search_result = elastic_vector_search.similarity_search("sharks")
assert len(search_result) != 0
@pytest.mark.vcr(ignore_localhost=True)
def test_custom_index_add_documents(
self,
documents: List[Document],
embedding_openai: OpenAIEmbeddings,
elasticsearch_url: str,
) -> None:
"""This test checks the construction of a custom
ElasticSearch index using the 'add_documents'."""
from elasticsearch import Elasticsearch
index_name = f"custom_index_{uuid.uuid4().hex}"
elastic_vector_search = ElasticVectorSearch(
embedding=embedding_openai,
elasticsearch_url=elasticsearch_url,
index_name=index_name,
)
es = Elasticsearch(hosts=elasticsearch_url)
elastic_vector_search.add_documents(documents)
index_names = es.indices.get(index="_all").keys()
assert index_name in index_names
search_result = elastic_vector_search.similarity_search("sharks")
assert len(search_result) != 0
def test_custom_index_add_documents_to_exists_store(self) -> None:
# TODO: implement it
pass
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_scann.py | """Test ScaNN functionality."""
import datetime
import tempfile
import numpy as np
import pytest
from langchain_core.documents import Document
from langchain_community.docstore.in_memory import InMemoryDocstore
from langchain_community.vectorstores.scann import (
ScaNN,
dependable_scann_import,
normalize,
)
from langchain_community.vectorstores.utils import DistanceStrategy
from tests.integration_tests.vectorstores.fake_embeddings import (
ConsistentFakeEmbeddings,
FakeEmbeddings,
)
def test_scann() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore(
{
index_to_id[0]: Document(page_content="foo"),
index_to_id[1]: Document(page_content="bar"),
index_to_id[2]: Document(page_content="baz"),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_scann_vector_mips_l2() -> None:
"""Test vector similarity with MIPS and L2."""
texts = ["foo", "bar", "baz"]
euclidean_search = ScaNN.from_texts(texts, FakeEmbeddings())
output = euclidean_search.similarity_search_with_score("foo", k=1)
expected_euclidean = [(Document(page_content="foo", metadata={}), 0.0)]
assert output == expected_euclidean
mips_search = ScaNN.from_texts(
texts,
FakeEmbeddings(),
distance_strategy=DistanceStrategy.MAX_INNER_PRODUCT,
normalize_L2=True,
)
output = mips_search.similarity_search_with_score("foo", k=1)
expected_mips = [(Document(page_content="foo", metadata={}), 1.0)]
assert output == expected_mips
def test_scann_with_config() -> None:
"""Test ScaNN with approximate search config."""
texts = [str(i) for i in range(10000)]
# Create a config with dimension = 10, k = 10.
# Tree: search 10 leaves in a search tree of 100 leaves.
# Quantization: uses 16-centroid quantizer every 2 dimension.
# Reordering: reorder top 100 results.
scann_config = (
dependable_scann_import()
.scann_ops_pybind.builder(np.zeros(shape=(0, 10)), 10, "squared_l2")
.tree(num_leaves=100, num_leaves_to_search=10)
.score_ah(2)
.reorder(100)
.create_config()
)
mips_search = ScaNN.from_texts(
texts,
ConsistentFakeEmbeddings(),
scann_config=scann_config,
distance_strategy=DistanceStrategy.MAX_INNER_PRODUCT,
normalize_L2=True,
)
output = mips_search.similarity_search_with_score("42", k=1)
expected = [(Document(page_content="42", metadata={}), 0.0)]
assert output == expected
def test_scann_vector_sim() -> None:
"""Test vector similarity."""
texts = ["foo", "bar", "baz"]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore(
{
index_to_id[0]: Document(page_content="foo"),
index_to_id[1]: Document(page_content="bar"),
index_to_id[2]: Document(page_content="baz"),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
query_vec = FakeEmbeddings().embed_query(text="foo")
output = docsearch.similarity_search_by_vector(query_vec, k=1)
assert output == [Document(page_content="foo")]
def test_scann_vector_sim_with_score_threshold() -> None:
"""Test vector similarity."""
texts = ["foo", "bar", "baz"]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore(
{
index_to_id[0]: Document(page_content="foo"),
index_to_id[1]: Document(page_content="bar"),
index_to_id[2]: Document(page_content="baz"),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
query_vec = FakeEmbeddings().embed_query(text="foo")
output = docsearch.similarity_search_by_vector(query_vec, k=2, score_threshold=0.2)
assert output == [Document(page_content="foo")]
def test_similarity_search_with_score_by_vector() -> None:
"""Test vector similarity with score by vector."""
texts = ["foo", "bar", "baz"]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore(
{
index_to_id[0]: Document(page_content="foo"),
index_to_id[1]: Document(page_content="bar"),
index_to_id[2]: Document(page_content="baz"),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
query_vec = FakeEmbeddings().embed_query(text="foo")
output = docsearch.similarity_search_with_score_by_vector(query_vec, k=1)
assert len(output) == 1
assert output[0][0] == Document(page_content="foo")
def test_similarity_search_with_score_by_vector_with_score_threshold() -> None:
"""Test vector similarity with score by vector."""
texts = ["foo", "bar", "baz"]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore(
{
index_to_id[0]: Document(page_content="foo"),
index_to_id[1]: Document(page_content="bar"),
index_to_id[2]: Document(page_content="baz"),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
query_vec = FakeEmbeddings().embed_query(text="foo")
output = docsearch.similarity_search_with_score_by_vector(
query_vec,
k=2,
score_threshold=0.2,
)
assert len(output) == 1
assert output[0][0] == Document(page_content="foo")
assert output[0][1] < 0.2
def test_scann_with_metadatas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings(), metadatas=metadatas)
expected_docstore = InMemoryDocstore(
{
docsearch.index_to_docstore_id[0]: Document(
page_content="foo", metadata={"page": 0}
),
docsearch.index_to_docstore_id[1]: Document(
page_content="bar", metadata={"page": 1}
),
docsearch.index_to_docstore_id[2]: Document(
page_content="baz", metadata={"page": 2}
),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": 0})]
def test_scann_with_metadatas_and_filter() -> None:
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings(), metadatas=metadatas)
expected_docstore = InMemoryDocstore(
{
docsearch.index_to_docstore_id[0]: Document(
page_content="foo", metadata={"page": 0}
),
docsearch.index_to_docstore_id[1]: Document(
page_content="bar", metadata={"page": 1}
),
docsearch.index_to_docstore_id[2]: Document(
page_content="baz", metadata={"page": 2}
),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = docsearch.similarity_search("foo", k=1, filter={"page": 1})
assert output == [Document(page_content="bar", metadata={"page": 1})]
def test_scann_with_metadatas_and_list_filter() -> None:
texts = ["foo", "bar", "baz", "foo", "qux"]
metadatas = [{"page": i} if i <= 3 else {"page": 3} for i in range(len(texts))]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings(), metadatas=metadatas)
expected_docstore = InMemoryDocstore(
{
docsearch.index_to_docstore_id[0]: Document(
page_content="foo", metadata={"page": 0}
),
docsearch.index_to_docstore_id[1]: Document(
page_content="bar", metadata={"page": 1}
),
docsearch.index_to_docstore_id[2]: Document(
page_content="baz", metadata={"page": 2}
),
docsearch.index_to_docstore_id[3]: Document(
page_content="foo", metadata={"page": 3}
),
docsearch.index_to_docstore_id[4]: Document(
page_content="qux", metadata={"page": 3}
),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = docsearch.similarity_search("foor", k=1, filter={"page": [0, 1, 2]})
assert output == [Document(page_content="foo", metadata={"page": 0})]
def test_scann_search_not_found() -> None:
"""Test what happens when document is not found."""
texts = ["foo", "bar", "baz"]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings())
# Get rid of the docstore to purposefully induce errors.
docsearch.docstore = InMemoryDocstore({})
with pytest.raises(ValueError):
docsearch.similarity_search("foo")
def test_scann_local_save_load() -> None:
"""Test end to end serialization."""
texts = ["foo", "bar", "baz"]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings())
temp_timestamp = datetime.datetime.utcnow().strftime("%Y%m%d-%H%M%S")
with tempfile.TemporaryDirectory(suffix="_" + temp_timestamp + "/") as temp_folder:
docsearch.save_local(temp_folder)
new_docsearch = ScaNN.load_local(
temp_folder, FakeEmbeddings(), allow_dangerous_deserialization=True
)
assert new_docsearch.index is not None
def test_scann_normalize_l2() -> None:
"""Test normalize L2."""
texts = ["foo", "bar", "baz"]
emb = np.array(FakeEmbeddings().embed_documents(texts))
# Test norm is 1.
np.testing.assert_allclose(1, np.linalg.norm(normalize(emb), axis=-1))
# Test that there is no NaN after normalization.
np.testing.assert_array_equal(False, np.isnan(normalize(np.zeros(10))))
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_thirdai_neuraldb.py | import os
import shutil
import pytest
from langchain_community.vectorstores import NeuralDBVectorStore
@pytest.fixture(scope="session")
def test_csv(): # type: ignore[no-untyped-def]
csv = "thirdai-test.csv"
with open(csv, "w") as o:
o.write("column_1,column_2\n")
o.write("column one,column two\n")
yield csv
os.remove(csv)
def assert_result_correctness(documents): # type: ignore[no-untyped-def]
assert len(documents) == 1
assert documents[0].page_content == "column_1: column one\n\ncolumn_2: column two"
@pytest.mark.requires("thirdai[neural_db]")
def test_neuraldb_retriever_from_scratch(test_csv): # type: ignore[no-untyped-def]
retriever = NeuralDBVectorStore.from_scratch()
retriever.insert([test_csv])
documents = retriever.similarity_search("column")
assert_result_correctness(documents)
@pytest.mark.requires("thirdai[neural_db]")
def test_neuraldb_retriever_from_checkpoint(test_csv): # type: ignore[no-untyped-def]
checkpoint = "thirdai-test-save.ndb"
if os.path.exists(checkpoint):
shutil.rmtree(checkpoint)
try:
retriever = NeuralDBVectorStore.from_scratch()
retriever.insert([test_csv])
retriever.save(checkpoint)
loaded_retriever = NeuralDBVectorStore.from_checkpoint(checkpoint)
documents = loaded_retriever.similarity_search("column")
assert_result_correctness(documents)
finally:
if os.path.exists(checkpoint):
shutil.rmtree(checkpoint)
@pytest.mark.requires("thirdai[neural_db]")
def test_neuraldb_retriever_other_methods(test_csv): # type: ignore[no-untyped-def]
retriever = NeuralDBVectorStore.from_scratch()
retriever.insert([test_csv])
# Make sure they don't throw an error.
retriever.associate("A", "B")
retriever.associate_batch([("A", "B"), ("C", "D")])
retriever.upvote("A", 0)
retriever.upvote_batch([("A", 0), ("B", 0)])
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_myscale.py | """Test MyScale functionality."""
from langchain_core.documents import Document
from langchain_community.vectorstores import MyScale, MyScaleSettings
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
def test_myscale() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
config = MyScaleSettings()
config.table = "test_myscale"
docsearch = MyScale.from_texts(texts, FakeEmbeddings(), config=config)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"_dummy": 0})]
docsearch.drop()
async def test_myscale_async() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
config = MyScaleSettings()
config.table = "test_myscale_async"
docsearch = MyScale.from_texts(
texts=texts, embedding=FakeEmbeddings(), config=config
)
output = await docsearch.asimilarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"_dummy": 0})]
docsearch.drop()
def test_myscale_with_metadatas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
config = MyScaleSettings()
config.table = "test_myscale_with_metadatas"
docsearch = MyScale.from_texts(
texts=texts,
embedding=FakeEmbeddings(),
config=config,
metadatas=metadatas,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
docsearch.drop()
def test_myscale_with_metadatas_with_relevance_scores() -> None:
"""Test end to end construction and scored search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
config = MyScaleSettings()
config.table = "test_myscale_with_metadatas_with_relevance_scores"
docsearch = MyScale.from_texts(
texts=texts, embedding=FakeEmbeddings(), metadatas=metadatas, config=config
)
output = docsearch.similarity_search_with_relevance_scores("foo", k=1)
assert output[0][0] == Document(page_content="foo", metadata={"page": "0"})
docsearch.drop()
def test_myscale_search_filter() -> None:
"""Test end to end construction and search with metadata filtering."""
texts = ["far", "bar", "baz"]
metadatas = [{"first_letter": "{}".format(text[0])} for text in texts]
config = MyScaleSettings()
config.table = "test_myscale_search_filter"
docsearch = MyScale.from_texts(
texts=texts, embedding=FakeEmbeddings(), metadatas=metadatas, config=config
)
output = docsearch.similarity_search(
"far", k=1, where_str=f"{docsearch.metadata_column}.first_letter='f'"
)
assert output == [Document(page_content="far", metadata={"first_letter": "f"})]
output = docsearch.similarity_search(
"bar", k=1, where_str=f"{docsearch.metadata_column}.first_letter='b'"
)
assert output == [Document(page_content="bar", metadata={"first_letter": "b"})]
docsearch.drop()
def test_myscale_with_persistence() -> None:
"""Test end to end construction and search, with persistence."""
config = MyScaleSettings()
config.table = "test_myscale_with_persistence"
texts = [
"foo",
"bar",
"baz",
]
docsearch = MyScale.from_texts(
texts=texts, embedding=FakeEmbeddings(), config=config
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"_dummy": 0})]
# Get a new VectorStore with same config
# it will reuse the table spontaneously
# unless you drop it
docsearch = MyScale(embedding=FakeEmbeddings(), config=config)
output = docsearch.similarity_search("foo", k=1)
# Clean up
docsearch.drop()
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_hanavector.py | """Test HANA vectorstore functionality."""
import os
import random
from typing import Any, Dict, List
import numpy as np
import pytest
from langchain_community.vectorstores import HanaDB
from langchain_community.vectorstores.utils import DistanceStrategy
from tests.integration_tests.vectorstores.fake_embeddings import (
ConsistentFakeEmbeddings,
)
from tests.integration_tests.vectorstores.fixtures.filtering_test_cases import (
DOCUMENTS,
TYPE_1_FILTERING_TEST_CASES,
TYPE_2_FILTERING_TEST_CASES,
TYPE_3_FILTERING_TEST_CASES,
TYPE_4_FILTERING_TEST_CASES,
TYPE_5_FILTERING_TEST_CASES,
)
TYPE_4B_FILTERING_TEST_CASES = [
# Test $nin, which is missing in TYPE_4_FILTERING_TEST_CASES
(
{"name": {"$nin": ["adam", "bob"]}},
[3],
),
]
try:
from hdbcli import dbapi
hanadb_installed = True
except ImportError:
hanadb_installed = False
class NormalizedFakeEmbeddings(ConsistentFakeEmbeddings):
"""Fake embeddings with normalization. For testing purposes."""
def normalize(self, vector: List[float]) -> List[float]:
"""Normalize vector."""
return [float(v / np.linalg.norm(vector)) for v in vector]
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return [self.normalize(v) for v in super().embed_documents(texts)]
def embed_query(self, text: str) -> List[float]:
return self.normalize(super().embed_query(text))
embedding = NormalizedFakeEmbeddings()
class ConfigData:
def __init__(self): # type: ignore[no-untyped-def]
self.conn = None
self.schema_name = ""
test_setup = ConfigData()
def generateSchemaName(cursor): # type: ignore[no-untyped-def]
# return "Langchain"
cursor.execute(
"SELECT REPLACE(CURRENT_UTCDATE, '-', '') || '_' || BINTOHEX(SYSUUID) FROM "
"DUMMY;"
)
if cursor.has_result_set():
rows = cursor.fetchall()
uid = rows[0][0]
else:
uid = random.randint(1, 100000000)
return f"VEC_{uid}"
def setup_module(module): # type: ignore[no-untyped-def]
test_setup.conn = dbapi.connect(
address=os.environ.get("HANA_DB_ADDRESS"),
port=os.environ.get("HANA_DB_PORT"),
user=os.environ.get("HANA_DB_USER"),
password=os.environ.get("HANA_DB_PASSWORD"),
autocommit=True,
sslValidateCertificate=False,
# encrypt=True
)
try:
cur = test_setup.conn.cursor()
test_setup.schema_name = generateSchemaName(cur)
sql_str = f"CREATE SCHEMA {test_setup.schema_name}"
cur.execute(sql_str)
sql_str = f"SET SCHEMA {test_setup.schema_name}"
cur.execute(sql_str)
except dbapi.ProgrammingError:
pass
finally:
cur.close()
def teardown_module(module): # type: ignore[no-untyped-def]
# return
try:
cur = test_setup.conn.cursor()
sql_str = f"DROP SCHEMA {test_setup.schema_name} CASCADE"
cur.execute(sql_str)
except dbapi.ProgrammingError:
pass
finally:
cur.close()
@pytest.fixture
def texts() -> List[str]:
return ["foo", "bar", "baz", "bak", "cat"]
@pytest.fixture
def metadatas() -> List[str]:
return [
{"start": 0, "end": 100, "quality": "good", "ready": True}, # type: ignore[list-item]
{"start": 100, "end": 200, "quality": "bad", "ready": False}, # type: ignore[list-item]
{"start": 200, "end": 300, "quality": "ugly", "ready": True}, # type: ignore[list-item]
{"start": 200, "quality": "ugly", "ready": True, "Owner": "Steve"}, # type: ignore[list-item]
{"start": 300, "quality": "ugly", "Owner": "Steve"}, # type: ignore[list-item]
]
def drop_table(connection, table_name): # type: ignore[no-untyped-def]
try:
cur = connection.cursor()
sql_str = f"DROP TABLE {table_name}"
cur.execute(sql_str)
except dbapi.ProgrammingError:
pass
finally:
cur.close()
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_hanavector_non_existing_table() -> None:
"""Test end to end construction and search."""
table_name = "NON_EXISTING"
# Delete table if it exists
drop_table(test_setup.conn, table_name)
# Check if table is created
vectordb = HanaDB(
connection=test_setup.conn,
embedding=embedding,
distance_strategy=DistanceStrategy.COSINE,
table_name=table_name,
)
assert vectordb._table_exists(table_name)
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_hanavector_table_with_missing_columns() -> None:
table_name = "EXISTING_MISSING_COLS"
try:
drop_table(test_setup.conn, table_name)
cur = test_setup.conn.cursor()
sql_str = f"CREATE TABLE {table_name}(WRONG_COL NVARCHAR(500));"
cur.execute(sql_str)
finally:
cur.close()
# Check if table is created
exception_occured = False
try:
HanaDB(
connection=test_setup.conn,
embedding=embedding,
distance_strategy=DistanceStrategy.COSINE,
table_name=table_name,
)
exception_occured = False
except AttributeError:
exception_occured = True
assert exception_occured
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_hanavector_table_with_nvarchar_content(texts: List[str]) -> None:
table_name = "EXISTING_NVARCHAR"
content_column = "TEST_TEXT"
metadata_column = "TEST_META"
vector_column = "TEST_VECTOR"
try:
drop_table(test_setup.conn, table_name)
cur = test_setup.conn.cursor()
sql_str = (
f"CREATE TABLE {table_name}({content_column} NVARCHAR(2048), "
f"{metadata_column} NVARCHAR(2048), {vector_column} REAL_VECTOR);"
)
cur.execute(sql_str)
finally:
cur.close()
vectordb = HanaDB(
connection=test_setup.conn,
embedding=embedding,
distance_strategy=DistanceStrategy.COSINE,
table_name=table_name,
content_column=content_column,
metadata_column=metadata_column,
vector_column=vector_column,
)
vectordb.add_texts(texts=texts)
# check that embeddings have been created in the table
number_of_texts = len(texts)
number_of_rows = -1
sql_str = f"SELECT COUNT(*) FROM {table_name}"
cur = test_setup.conn.cursor()
cur.execute(sql_str)
if cur.has_result_set():
rows = cur.fetchall()
number_of_rows = rows[0][0]
assert number_of_rows == number_of_texts
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_hanavector_table_with_wrong_typed_columns() -> None:
table_name = "EXISTING_WRONG_TYPES"
content_column = "DOC_TEXT"
metadata_column = "DOC_META"
vector_column = "DOC_VECTOR"
try:
drop_table(test_setup.conn, table_name)
cur = test_setup.conn.cursor()
sql_str = (
f"CREATE TABLE {table_name}({content_column} INTEGER, "
f"{metadata_column} INTEGER, {vector_column} INTEGER);"
)
cur.execute(sql_str)
finally:
cur.close()
# Check if table is created
exception_occured = False
try:
HanaDB(
connection=test_setup.conn,
embedding=embedding,
distance_strategy=DistanceStrategy.COSINE,
table_name=table_name,
)
exception_occured = False
except AttributeError as err:
print(err) # noqa: T201
exception_occured = True
assert exception_occured
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_hanavector_non_existing_table_fixed_vector_length() -> None:
"""Test end to end construction and search."""
table_name = "NON_EXISTING"
vector_column = "MY_VECTOR"
vector_column_length = 42
# Delete table if it exists
drop_table(test_setup.conn, table_name)
# Check if table is created
vectordb = HanaDB(
connection=test_setup.conn,
embedding=embedding,
distance_strategy=DistanceStrategy.COSINE,
table_name=table_name,
vector_column=vector_column,
vector_column_length=vector_column_length,
)
assert vectordb._table_exists(table_name)
vectordb._check_column(
table_name, vector_column, "REAL_VECTOR", vector_column_length
)
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_hanavector_add_texts(texts: List[str]) -> None:
table_name = "TEST_TABLE_ADD_TEXTS"
# Delete table if it exists
drop_table(test_setup.conn, table_name)
# Check if table is created
vectordb = HanaDB(
connection=test_setup.conn, embedding=embedding, table_name=table_name
)
vectordb.add_texts(texts=texts)
# check that embeddings have been created in the table
number_of_texts = len(texts)
number_of_rows = -1
sql_str = f"SELECT COUNT(*) FROM {table_name}"
cur = test_setup.conn.cursor()
cur.execute(sql_str)
if cur.has_result_set():
rows = cur.fetchall()
number_of_rows = rows[0][0]
assert number_of_rows == number_of_texts
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_hanavector_from_texts(texts: List[str]) -> None:
table_name = "TEST_TABLE_FROM_TEXTS"
# Delete table if it exists
drop_table(test_setup.conn, table_name)
# Check if table is created
vectorDB = HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
embedding=embedding,
table_name=table_name,
)
# test if vectorDB is instance of HanaDB
assert isinstance(vectorDB, HanaDB)
# check that embeddings have been created in the table
number_of_texts = len(texts)
number_of_rows = -1
sql_str = f"SELECT COUNT(*) FROM {table_name}"
cur = test_setup.conn.cursor()
cur.execute(sql_str)
if cur.has_result_set():
rows = cur.fetchall()
number_of_rows = rows[0][0]
assert number_of_rows == number_of_texts
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_hanavector_similarity_search_simple(texts: List[str]) -> None:
table_name = "TEST_TABLE_SEARCH_SIMPLE"
# Delete table if it exists
drop_table(test_setup.conn, table_name)
# Check if table is created
vectorDB = HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
embedding=embedding,
table_name=table_name,
)
assert texts[0] == vectorDB.similarity_search(texts[0], 1)[0].page_content
assert texts[1] != vectorDB.similarity_search(texts[0], 1)[0].page_content
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_hanavector_similarity_search_by_vector_simple(texts: List[str]) -> None:
table_name = "TEST_TABLE_SEARCH_SIMPLE_VECTOR"
# Delete table if it exists
drop_table(test_setup.conn, table_name)
vectorDB = HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
embedding=embedding,
table_name=table_name,
)
vector = embedding.embed_query(texts[0])
assert texts[0] == vectorDB.similarity_search_by_vector(vector, 1)[0].page_content
assert texts[1] != vectorDB.similarity_search_by_vector(vector, 1)[0].page_content
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_hanavector_similarity_search_simple_euclidean_distance(
texts: List[str],
) -> None:
table_name = "TEST_TABLE_SEARCH_EUCLIDIAN"
# Delete table if it exists
drop_table(test_setup.conn, table_name)
# Check if table is created
vectorDB = HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
embedding=embedding,
table_name=table_name,
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
)
assert texts[0] == vectorDB.similarity_search(texts[0], 1)[0].page_content
assert texts[1] != vectorDB.similarity_search(texts[0], 1)[0].page_content
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_hanavector_similarity_search_with_metadata(
texts: List[str], metadatas: List[dict]
) -> None:
table_name = "TEST_TABLE_METADATA"
# Delete table if it exists
drop_table(test_setup.conn, table_name)
# Check if table is created
vectorDB = HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
metadatas=metadatas,
embedding=embedding,
table_name=table_name,
)
search_result = vectorDB.similarity_search(texts[0], 3)
assert texts[0] == search_result[0].page_content
assert metadatas[0]["start"] == search_result[0].metadata["start"]
assert metadatas[0]["end"] == search_result[0].metadata["end"]
assert texts[1] != search_result[0].page_content
assert metadatas[1]["start"] != search_result[0].metadata["start"]
assert metadatas[1]["end"] != search_result[0].metadata["end"]
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_hanavector_similarity_search_with_metadata_filter(
texts: List[str], metadatas: List[dict]
) -> None:
table_name = "TEST_TABLE_FILTER"
# Delete table if it exists
drop_table(test_setup.conn, table_name)
# Check if table is created
vectorDB = HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
metadatas=metadatas,
embedding=embedding,
table_name=table_name,
)
search_result = vectorDB.similarity_search(texts[0], 3, filter={"start": 100})
assert len(search_result) == 1
assert texts[1] == search_result[0].page_content
assert metadatas[1]["start"] == search_result[0].metadata["start"]
assert metadatas[1]["end"] == search_result[0].metadata["end"]
search_result = vectorDB.similarity_search(
texts[0], 3, filter={"start": 100, "end": 150}
)
assert len(search_result) == 0
search_result = vectorDB.similarity_search(
texts[0], 3, filter={"start": 100, "end": 200}
)
assert len(search_result) == 1
assert texts[1] == search_result[0].page_content
assert metadatas[1]["start"] == search_result[0].metadata["start"]
assert metadatas[1]["end"] == search_result[0].metadata["end"]
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_hanavector_similarity_search_with_metadata_filter_string(
texts: List[str], metadatas: List[dict]
) -> None:
table_name = "TEST_TABLE_FILTER_STRING"
# Delete table if it exists
drop_table(test_setup.conn, table_name)
# Check if table is created
vectorDB = HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
metadatas=metadatas,
embedding=embedding,
table_name=table_name,
)
search_result = vectorDB.similarity_search(texts[0], 3, filter={"quality": "bad"})
assert len(search_result) == 1
assert texts[1] == search_result[0].page_content
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_hanavector_similarity_search_with_metadata_filter_bool(
texts: List[str], metadatas: List[dict]
) -> None:
table_name = "TEST_TABLE_FILTER_BOOL"
# Delete table if it exists
drop_table(test_setup.conn, table_name)
# Check if table is created
vectorDB = HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
metadatas=metadatas,
embedding=embedding,
table_name=table_name,
)
search_result = vectorDB.similarity_search(texts[0], 3, filter={"ready": False})
assert len(search_result) == 1
assert texts[1] == search_result[0].page_content
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_hanavector_similarity_search_with_metadata_filter_invalid_type(
texts: List[str], metadatas: List[dict]
) -> None:
table_name = "TEST_TABLE_FILTER_INVALID_TYPE"
# Delete table if it exists
drop_table(test_setup.conn, table_name)
# Check if table is created
vectorDB = HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
metadatas=metadatas,
embedding=embedding,
table_name=table_name,
)
exception_occured = False
try:
vectorDB.similarity_search(texts[0], 3, filter={"wrong_type": 0.1})
except ValueError:
exception_occured = True
assert exception_occured
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_hanavector_similarity_search_with_score(
texts: List[str], metadatas: List[dict]
) -> None:
table_name = "TEST_TABLE_SCORE"
# Delete table if it exists
drop_table(test_setup.conn, table_name)
# Check if table is created
vectorDB = HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
embedding=embedding,
table_name=table_name,
)
search_result = vectorDB.similarity_search_with_score(texts[0], 3)
assert search_result[0][0].page_content == texts[0]
assert search_result[0][1] == 1.0
assert search_result[1][1] <= search_result[0][1]
assert search_result[2][1] <= search_result[1][1]
assert search_result[2][1] >= 0.0
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_hanavector_similarity_search_with_relevance_score(
texts: List[str], metadatas: List[dict]
) -> None:
table_name = "TEST_TABLE_REL_SCORE"
# Delete table if it exists
drop_table(test_setup.conn, table_name)
# Check if table is created
vectorDB = HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
embedding=embedding,
table_name=table_name,
)
search_result = vectorDB.similarity_search_with_relevance_scores(texts[0], 3)
assert search_result[0][0].page_content == texts[0]
assert search_result[0][1] == 1.0
assert search_result[1][1] <= search_result[0][1]
assert search_result[2][1] <= search_result[1][1]
assert search_result[2][1] >= 0.0
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_hanavector_similarity_search_with_relevance_score_with_euclidian_distance(
texts: List[str], metadatas: List[dict]
) -> None:
table_name = "TEST_TABLE_REL_SCORE_EUCLIDIAN"
# Delete table if it exists
drop_table(test_setup.conn, table_name)
# Check if table is created
vectorDB = HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
embedding=embedding,
table_name=table_name,
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
)
search_result = vectorDB.similarity_search_with_relevance_scores(texts[0], 3)
assert search_result[0][0].page_content == texts[0]
assert search_result[0][1] == 1.0
assert search_result[1][1] <= search_result[0][1]
assert search_result[2][1] <= search_result[1][1]
assert search_result[2][1] >= 0.0
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_hanavector_similarity_search_with_score_with_euclidian_distance(
texts: List[str], metadatas: List[dict]
) -> None:
table_name = "TEST_TABLE_SCORE_DISTANCE"
# Delete table if it exists
drop_table(test_setup.conn, table_name)
# Check if table is created
vectorDB = HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
embedding=embedding,
table_name=table_name,
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
)
search_result = vectorDB.similarity_search_with_score(texts[0], 3)
assert search_result[0][0].page_content == texts[0]
assert search_result[0][1] == 0.0
assert search_result[1][1] >= search_result[0][1]
assert search_result[2][1] >= search_result[1][1]
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_hanavector_delete_with_filter(texts: List[str], metadatas: List[dict]) -> None:
table_name = "TEST_TABLE_DELETE_FILTER"
# Delete table if it exists
drop_table(test_setup.conn, table_name)
# Fill table
vectorDB = HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
metadatas=metadatas,
embedding=embedding,
table_name=table_name,
)
search_result = vectorDB.similarity_search(texts[0], 10)
assert len(search_result) == 5
# Delete one of the three entries
assert vectorDB.delete(filter={"start": 100, "end": 200})
search_result = vectorDB.similarity_search(texts[0], 10)
assert len(search_result) == 4
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
async def test_hanavector_delete_with_filter_async(
texts: List[str], metadatas: List[dict]
) -> None:
table_name = "TEST_TABLE_DELETE_FILTER_ASYNC"
# Delete table if it exists
drop_table(test_setup.conn, table_name)
# Fill table
vectorDB = HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
metadatas=metadatas,
embedding=embedding,
table_name=table_name,
)
search_result = vectorDB.similarity_search(texts[0], 10)
assert len(search_result) == 5
# Delete one of the three entries
assert await vectorDB.adelete(filter={"start": 100, "end": 200})
search_result = vectorDB.similarity_search(texts[0], 10)
assert len(search_result) == 4
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_hanavector_delete_all_with_empty_filter(
texts: List[str], metadatas: List[dict]
) -> None:
table_name = "TEST_TABLE_DELETE_ALL"
# Delete table if it exists
drop_table(test_setup.conn, table_name)
# Fill table
vectorDB = HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
metadatas=metadatas,
embedding=embedding,
table_name=table_name,
)
search_result = vectorDB.similarity_search(texts[0], 3)
assert len(search_result) == 3
# Delete all entries
assert vectorDB.delete(filter={})
search_result = vectorDB.similarity_search(texts[0], 3)
assert len(search_result) == 0
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_hanavector_delete_called_wrong(
texts: List[str], metadatas: List[dict]
) -> None:
table_name = "TEST_TABLE_DELETE_FILTER_WRONG"
# Delete table if it exists
drop_table(test_setup.conn, table_name)
# Fill table
vectorDB = HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
metadatas=metadatas,
embedding=embedding,
table_name=table_name,
)
# Delete without filter parameter
exception_occured = False
try:
vectorDB.delete()
except ValueError:
exception_occured = True
assert exception_occured
# Delete with ids parameter
exception_occured = False
try:
vectorDB.delete(ids=["id1", "id"], filter={"start": 100, "end": 200})
except ValueError:
exception_occured = True
assert exception_occured
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_hanavector_max_marginal_relevance_search(texts: List[str]) -> None:
table_name = "TEST_TABLE_MAX_RELEVANCE"
# Delete table if it exists
drop_table(test_setup.conn, table_name)
# Check if table is created
vectorDB = HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
embedding=embedding,
table_name=table_name,
)
search_result = vectorDB.max_marginal_relevance_search(texts[0], k=2, fetch_k=20)
assert len(search_result) == 2
assert search_result[0].page_content == texts[0]
assert search_result[1].page_content != texts[0]
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_hanavector_max_marginal_relevance_search_vector(texts: List[str]) -> None:
table_name = "TEST_TABLE_MAX_RELEVANCE_VECTOR"
# Delete table if it exists
drop_table(test_setup.conn, table_name)
# Check if table is created
vectorDB = HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
embedding=embedding,
table_name=table_name,
)
search_result = vectorDB.max_marginal_relevance_search_by_vector(
embedding.embed_query(texts[0]), k=2, fetch_k=20
)
assert len(search_result) == 2
assert search_result[0].page_content == texts[0]
assert search_result[1].page_content != texts[0]
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
async def test_hanavector_max_marginal_relevance_search_async(texts: List[str]) -> None:
table_name = "TEST_TABLE_MAX_RELEVANCE_ASYNC"
# Delete table if it exists
drop_table(test_setup.conn, table_name)
# Check if table is created
vectorDB = HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
embedding=embedding,
table_name=table_name,
)
search_result = await vectorDB.amax_marginal_relevance_search(
texts[0], k=2, fetch_k=20
)
assert len(search_result) == 2
assert search_result[0].page_content == texts[0]
assert search_result[1].page_content != texts[0]
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_hanavector_filter_prepared_statement_params(
texts: List[str], metadatas: List[dict]
) -> None:
table_name = "TEST_TABLE_FILTER_PARAM"
# Delete table if it exists
drop_table(test_setup.conn, table_name)
# Check if table is created
HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
metadatas=metadatas,
embedding=embedding,
table_name=table_name,
)
cur = test_setup.conn.cursor()
sql_str = (
f"SELECT * FROM {table_name} WHERE JSON_VALUE(VEC_META, '$.start') = '100'"
)
cur.execute(sql_str)
rows = cur.fetchall()
assert len(rows) == 1
query_value = 100
sql_str = f"SELECT * FROM {table_name} WHERE JSON_VALUE(VEC_META, '$.start') = ?"
cur.execute(sql_str, (query_value))
rows = cur.fetchall()
assert len(rows) == 1
sql_str = (
f"SELECT * FROM {table_name} WHERE JSON_VALUE(VEC_META, '$.quality') = 'good'"
)
cur.execute(sql_str)
rows = cur.fetchall()
assert len(rows) == 1
query_value = "good" # type: ignore[assignment]
sql_str = f"SELECT * FROM {table_name} WHERE JSON_VALUE(VEC_META, '$.quality') = ?"
cur.execute(sql_str, (query_value))
rows = cur.fetchall()
assert len(rows) == 1
sql_str = (
f"SELECT * FROM {table_name} WHERE JSON_VALUE(VEC_META, '$.ready') = false"
)
cur.execute(sql_str)
rows = cur.fetchall()
assert len(rows) == 1
# query_value = True
query_value = "true" # type: ignore[assignment]
sql_str = f"SELECT * FROM {table_name} WHERE JSON_VALUE(VEC_META, '$.ready') = ?"
cur.execute(sql_str, (query_value))
rows = cur.fetchall()
assert len(rows) == 3
# query_value = False
query_value = "false" # type: ignore[assignment]
sql_str = f"SELECT * FROM {table_name} WHERE JSON_VALUE(VEC_META, '$.ready') = ?"
cur.execute(sql_str, (query_value))
rows = cur.fetchall()
assert len(rows) == 1
def test_invalid_metadata_keys(texts: List[str], metadatas: List[dict]) -> None:
table_name = "TEST_TABLE_INVALID_METADATA"
# Delete table if it exists
drop_table(test_setup.conn, table_name)
invalid_metadatas = [
{"sta rt": 0, "end": 100, "quality": "good", "ready": True},
]
exception_occured = False
try:
HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
metadatas=invalid_metadatas,
embedding=embedding,
table_name=table_name,
)
except ValueError:
exception_occured = True
assert exception_occured
invalid_metadatas = [
{"sta/nrt": 0, "end": 100, "quality": "good", "ready": True},
]
exception_occured = False
try:
HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
metadatas=invalid_metadatas,
embedding=embedding,
table_name=table_name,
)
except ValueError:
exception_occured = True
assert exception_occured
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_hanavector_table_mixed_case_names(texts: List[str]) -> None:
table_name = "MyTableName"
content_column = "TextColumn"
metadata_column = "MetaColumn"
vector_column = "VectorColumn"
vectordb = HanaDB(
connection=test_setup.conn,
embedding=embedding,
distance_strategy=DistanceStrategy.COSINE,
table_name=table_name,
content_column=content_column,
metadata_column=metadata_column,
vector_column=vector_column,
)
vectordb.add_texts(texts=texts)
# check that embeddings have been created in the table
number_of_texts = len(texts)
number_of_rows = -1
sql_str = f'SELECT COUNT(*) FROM "{table_name}"'
cur = test_setup.conn.cursor()
cur.execute(sql_str)
if cur.has_result_set():
rows = cur.fetchall()
number_of_rows = rows[0][0]
assert number_of_rows == number_of_texts
# check results of similarity search
assert texts[0] == vectordb.similarity_search(texts[0], 1)[0].page_content
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_hanavector_enhanced_filter_1() -> None:
table_name = "TEST_TABLE_ENHANCED_FILTER_1"
# Delete table if it exists
drop_table(test_setup.conn, table_name)
vectorDB = HanaDB(
connection=test_setup.conn,
embedding=embedding,
table_name=table_name,
)
vectorDB.add_documents(DOCUMENTS)
@pytest.mark.parametrize("test_filter, expected_ids", TYPE_1_FILTERING_TEST_CASES)
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_pgvector_with_with_metadata_filters_1(
test_filter: Dict[str, Any],
expected_ids: List[int],
) -> None:
table_name = "TEST_TABLE_ENHANCED_FILTER_1"
drop_table(test_setup.conn, table_name)
vectorDB = HanaDB(
connection=test_setup.conn,
embedding=embedding,
table_name=table_name,
)
vectorDB.add_documents(DOCUMENTS)
docs = vectorDB.similarity_search("meow", k=5, filter=test_filter)
ids = [doc.metadata["id"] for doc in docs]
assert len(ids) == len(expected_ids), test_filter
assert set(ids).issubset(expected_ids), test_filter
@pytest.mark.parametrize("test_filter, expected_ids", TYPE_2_FILTERING_TEST_CASES)
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_pgvector_with_with_metadata_filters_2(
test_filter: Dict[str, Any],
expected_ids: List[int],
) -> None:
table_name = "TEST_TABLE_ENHANCED_FILTER_2"
drop_table(test_setup.conn, table_name)
vectorDB = HanaDB(
connection=test_setup.conn,
embedding=embedding,
table_name=table_name,
)
vectorDB.add_documents(DOCUMENTS)
docs = vectorDB.similarity_search("meow", k=5, filter=test_filter)
ids = [doc.metadata["id"] for doc in docs]
assert len(ids) == len(expected_ids), test_filter
assert set(ids).issubset(expected_ids), test_filter
@pytest.mark.parametrize("test_filter, expected_ids", TYPE_3_FILTERING_TEST_CASES)
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_pgvector_with_with_metadata_filters_3(
test_filter: Dict[str, Any],
expected_ids: List[int],
) -> None:
table_name = "TEST_TABLE_ENHANCED_FILTER_3"
drop_table(test_setup.conn, table_name)
vectorDB = HanaDB(
connection=test_setup.conn,
embedding=embedding,
table_name=table_name,
)
vectorDB.add_documents(DOCUMENTS)
docs = vectorDB.similarity_search("meow", k=5, filter=test_filter)
ids = [doc.metadata["id"] for doc in docs]
assert len(ids) == len(expected_ids), test_filter
assert set(ids).issubset(expected_ids), test_filter
@pytest.mark.parametrize("test_filter, expected_ids", TYPE_4_FILTERING_TEST_CASES)
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_pgvector_with_with_metadata_filters_4(
test_filter: Dict[str, Any],
expected_ids: List[int],
) -> None:
table_name = "TEST_TABLE_ENHANCED_FILTER_4"
drop_table(test_setup.conn, table_name)
vectorDB = HanaDB(
connection=test_setup.conn,
embedding=embedding,
table_name=table_name,
)
vectorDB.add_documents(DOCUMENTS)
docs = vectorDB.similarity_search("meow", k=5, filter=test_filter)
ids = [doc.metadata["id"] for doc in docs]
assert len(ids) == len(expected_ids), test_filter
assert set(ids).issubset(expected_ids), test_filter
@pytest.mark.parametrize("test_filter, expected_ids", TYPE_4B_FILTERING_TEST_CASES)
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_pgvector_with_with_metadata_filters_4b(
test_filter: Dict[str, Any],
expected_ids: List[int],
) -> None:
table_name = "TEST_TABLE_ENHANCED_FILTER_4B"
drop_table(test_setup.conn, table_name)
vectorDB = HanaDB(
connection=test_setup.conn,
embedding=embedding,
table_name=table_name,
)
vectorDB.add_documents(DOCUMENTS)
docs = vectorDB.similarity_search("meow", k=5, filter=test_filter)
ids = [doc.metadata["id"] for doc in docs]
assert len(ids) == len(expected_ids), test_filter
assert set(ids).issubset(expected_ids), test_filter
@pytest.mark.parametrize("test_filter, expected_ids", TYPE_5_FILTERING_TEST_CASES)
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_pgvector_with_with_metadata_filters_5(
test_filter: Dict[str, Any],
expected_ids: List[int],
) -> None:
table_name = "TEST_TABLE_ENHANCED_FILTER_5"
drop_table(test_setup.conn, table_name)
vectorDB = HanaDB(
connection=test_setup.conn,
embedding=embedding,
table_name=table_name,
)
vectorDB.add_documents(DOCUMENTS)
docs = vectorDB.similarity_search("meow", k=5, filter=test_filter)
ids = [doc.metadata["id"] for doc in docs]
assert len(ids) == len(expected_ids), test_filter
assert set(ids).issubset(expected_ids), test_filter
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_preexisting_specific_columns_for_metadata_fill(
texts: List[str], metadatas: List[dict]
) -> None:
table_name = "PREEXISTING_FILTER_COLUMNS"
# drop_table(test_setup.conn, table_name)
sql_str = (
f'CREATE TABLE "{table_name}" ('
f'"VEC_TEXT" NCLOB, '
f'"VEC_META" NCLOB, '
f'"VEC_VECTOR" REAL_VECTOR, '
f'"Owner" NVARCHAR(100), '
f'"quality" NVARCHAR(100));'
)
try:
cur = test_setup.conn.cursor()
cur.execute(sql_str)
finally:
cur.close()
vectorDB = HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
metadatas=metadatas,
embedding=embedding,
table_name=table_name,
specific_metadata_columns=["Owner", "quality"],
)
c = 0
try:
sql_str = f'SELECT COUNT(*) FROM {table_name} WHERE "quality"=' f"'ugly'"
cur = test_setup.conn.cursor()
cur.execute(sql_str)
if cur.has_result_set():
rows = cur.fetchall()
c = rows[0][0]
finally:
cur.close()
assert c == 3
docs = vectorDB.similarity_search("hello", k=5, filter={"quality": "good"})
assert len(docs) == 1
assert docs[0].page_content == "foo"
docs = vectorDB.similarity_search("hello", k=5, filter={"start": 100})
assert len(docs) == 1
assert docs[0].page_content == "bar"
docs = vectorDB.similarity_search(
"hello", k=5, filter={"start": 100, "quality": "good"}
)
assert len(docs) == 0
docs = vectorDB.similarity_search(
"hello", k=5, filter={"start": 0, "quality": "good"}
)
assert len(docs) == 1
assert docs[0].page_content == "foo"
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_preexisting_specific_columns_for_metadata_via_array(
texts: List[str], metadatas: List[dict]
) -> None:
table_name = "PREEXISTING_FILTER_COLUMNS_VIA_ARRAY"
# drop_table(test_setup.conn, table_name)
sql_str = (
f'CREATE TABLE "{table_name}" ('
f'"VEC_TEXT" NCLOB, '
f'"VEC_META" NCLOB, '
f'"VEC_VECTOR" REAL_VECTOR, '
f'"Owner" NVARCHAR(100), '
f'"quality" NVARCHAR(100));'
)
try:
cur = test_setup.conn.cursor()
cur.execute(sql_str)
finally:
cur.close()
vectorDB = HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
metadatas=metadatas,
embedding=embedding,
table_name=table_name,
specific_metadata_columns=["quality"],
)
c = 0
try:
sql_str = f'SELECT COUNT(*) FROM {table_name} WHERE "quality"=' f"'ugly'"
cur = test_setup.conn.cursor()
cur.execute(sql_str)
if cur.has_result_set():
rows = cur.fetchall()
c = rows[0][0]
finally:
cur.close()
assert c == 3
try:
sql_str = f'SELECT COUNT(*) FROM {table_name} WHERE "Owner"=' f"'Steve'"
cur = test_setup.conn.cursor()
cur.execute(sql_str)
if cur.has_result_set():
rows = cur.fetchall()
c = rows[0][0]
finally:
cur.close()
assert c == 0
docs = vectorDB.similarity_search("hello", k=5, filter={"quality": "good"})
assert len(docs) == 1
assert docs[0].page_content == "foo"
docs = vectorDB.similarity_search("hello", k=5, filter={"start": 100})
assert len(docs) == 1
assert docs[0].page_content == "bar"
docs = vectorDB.similarity_search(
"hello", k=5, filter={"start": 100, "quality": "good"}
)
assert len(docs) == 0
docs = vectorDB.similarity_search(
"hello", k=5, filter={"start": 0, "quality": "good"}
)
assert len(docs) == 1
assert docs[0].page_content == "foo"
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_preexisting_specific_columns_for_metadata_multiple_columns(
texts: List[str], metadatas: List[dict]
) -> None:
table_name = "PREEXISTING_FILTER_MULTIPLE_COLUMNS"
# drop_table(test_setup.conn, table_name)
sql_str = (
f'CREATE TABLE "{table_name}" ('
f'"VEC_TEXT" NCLOB, '
f'"VEC_META" NCLOB, '
f'"VEC_VECTOR" REAL_VECTOR, '
f'"quality" NVARCHAR(100), '
f'"start" INTEGER);'
)
try:
cur = test_setup.conn.cursor()
cur.execute(sql_str)
finally:
cur.close()
vectorDB = HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
metadatas=metadatas,
embedding=embedding,
table_name=table_name,
specific_metadata_columns=["quality", "start"],
)
docs = vectorDB.similarity_search("hello", k=5, filter={"quality": "good"})
assert len(docs) == 1
assert docs[0].page_content == "foo"
docs = vectorDB.similarity_search("hello", k=5, filter={"start": 100})
assert len(docs) == 1
assert docs[0].page_content == "bar"
docs = vectorDB.similarity_search(
"hello", k=5, filter={"start": 100, "quality": "good"}
)
assert len(docs) == 0
docs = vectorDB.similarity_search(
"hello", k=5, filter={"start": 0, "quality": "good"}
)
assert len(docs) == 1
assert docs[0].page_content == "foo"
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_preexisting_specific_columns_for_metadata_empty_columns(
texts: List[str], metadatas: List[dict]
) -> None:
table_name = "PREEXISTING_FILTER_MULTIPLE_COLUMNS_EMPTY"
# drop_table(test_setup.conn, table_name)
sql_str = (
f'CREATE TABLE "{table_name}" ('
f'"VEC_TEXT" NCLOB, '
f'"VEC_META" NCLOB, '
f'"VEC_VECTOR" REAL_VECTOR, '
f'"quality" NVARCHAR(100), '
f'"ready" BOOLEAN, '
f'"start" INTEGER);'
)
try:
cur = test_setup.conn.cursor()
cur.execute(sql_str)
finally:
cur.close()
vectorDB = HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
metadatas=metadatas,
embedding=embedding,
table_name=table_name,
specific_metadata_columns=["quality", "ready", "start"],
)
docs = vectorDB.similarity_search("hello", k=5, filter={"quality": "good"})
assert len(docs) == 1
assert docs[0].page_content == "foo"
docs = vectorDB.similarity_search("hello", k=5, filter={"start": 100})
assert len(docs) == 1
assert docs[0].page_content == "bar"
docs = vectorDB.similarity_search(
"hello", k=5, filter={"start": 100, "quality": "good"}
)
assert len(docs) == 0
docs = vectorDB.similarity_search(
"hello", k=5, filter={"start": 0, "quality": "good"}
)
assert len(docs) == 1
assert docs[0].page_content == "foo"
docs = vectorDB.similarity_search("hello", k=5, filter={"ready": True})
assert len(docs) == 3
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_preexisting_specific_columns_for_metadata_wrong_type_or_non_existing(
texts: List[str], metadatas: List[dict]
) -> None:
table_name = "PREEXISTING_FILTER_COLUMNS_WRONG_TYPE"
# drop_table(test_setup.conn, table_name)
sql_str = (
f'CREATE TABLE "{table_name}" ('
f'"VEC_TEXT" NCLOB, '
f'"VEC_META" NCLOB, '
f'"VEC_VECTOR" REAL_VECTOR, '
f'"quality" INTEGER); '
)
try:
cur = test_setup.conn.cursor()
cur.execute(sql_str)
finally:
cur.close()
# Check if table is created
exception_occured = False
try:
HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
metadatas=metadatas,
embedding=embedding,
table_name=table_name,
specific_metadata_columns=["quality"],
)
exception_occured = False
except dbapi.Error: # Nothing we should do here, hdbcli will throw an error
exception_occured = True
assert exception_occured # Check if table is created
exception_occured = False
try:
HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
metadatas=metadatas,
embedding=embedding,
table_name=table_name,
specific_metadata_columns=["NonExistingColumn"],
)
exception_occured = False
except AttributeError: # Nothing we should do here, hdbcli will throw an error
exception_occured = True
assert exception_occured
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_preexisting_specific_columns_for_returned_metadata_completeness(
texts: List[str], metadatas: List[dict]
) -> None:
table_name = "PREEXISTING_FILTER_COLUMNS_METADATA_COMPLETENESS"
# drop_table(test_setup.conn, table_name)
sql_str = (
f'CREATE TABLE "{table_name}" ('
f'"VEC_TEXT" NCLOB, '
f'"VEC_META" NCLOB, '
f'"VEC_VECTOR" REAL_VECTOR, '
f'"quality" NVARCHAR(100), '
f'"NonExisting" NVARCHAR(100), '
f'"ready" BOOLEAN, '
f'"start" INTEGER);'
)
try:
cur = test_setup.conn.cursor()
cur.execute(sql_str)
finally:
cur.close()
vectorDB = HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
metadatas=metadatas,
embedding=embedding,
table_name=table_name,
specific_metadata_columns=["quality", "ready", "start", "NonExisting"],
)
docs = vectorDB.similarity_search("hello", k=5, filter={"quality": "good"})
assert len(docs) == 1
assert docs[0].page_content == "foo"
assert docs[0].metadata["end"] == 100
assert docs[0].metadata["start"] == 0
assert docs[0].metadata["quality"] == "good"
assert docs[0].metadata["ready"]
assert "NonExisting" not in docs[0].metadata.keys()
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_create_hnsw_index_with_default_values(texts: List[str]) -> None:
table_name = "TEST_TABLE_HNSW_INDEX_DEFAULT"
# Delete table if it exists (cleanup from previous tests)
drop_table(test_setup.conn, table_name)
# Create table and insert data
vectorDB = HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
embedding=embedding,
table_name=table_name,
)
# Test the creation of HNSW index
try:
vectorDB.create_hnsw_index()
except Exception as e:
pytest.fail(f"Failed to create HNSW index: {e}")
# Perform a search using the index to confirm its correctness
search_result = vectorDB.max_marginal_relevance_search(texts[0], k=2, fetch_k=20)
assert len(search_result) == 2
assert search_result[0].page_content == texts[0]
assert search_result[1].page_content != texts[0]
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_create_hnsw_index_with_defined_values(texts: List[str]) -> None:
table_name = "TEST_TABLE_HNSW_INDEX_DEFINED"
# Delete table if it exists (cleanup from previous tests)
drop_table(test_setup.conn, table_name)
# Create table and insert data
vectorDB = HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
embedding=embedding,
table_name=table_name,
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
)
# Test the creation of HNSW index with specific values
try:
vectorDB.create_hnsw_index(
index_name="my_L2_index", ef_search=500, m=100, ef_construction=200
)
except Exception as e:
pytest.fail(f"Failed to create HNSW index with defined values: {e}")
# Perform a search using the index to confirm its correctness
search_result = vectorDB.max_marginal_relevance_search(texts[0], k=2, fetch_k=20)
assert len(search_result) == 2
assert search_result[0].page_content == texts[0]
assert search_result[1].page_content != texts[0]
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_create_hnsw_index_after_initialization(texts: List[str]) -> None:
table_name = "TEST_TABLE_HNSW_INDEX_AFTER_INIT"
drop_table(test_setup.conn, table_name)
# Initialize HanaDB without adding documents yet
vectorDB = HanaDB(
connection=test_setup.conn,
embedding=embedding,
table_name=table_name,
)
# Create HNSW index before adding documents
vectorDB.create_hnsw_index(
index_name="index_pre_add", ef_search=400, m=50, ef_construction=150
)
# Add texts after index creation
vectorDB.add_texts(texts=texts)
# Perform similarity search using the index
search_result = vectorDB.similarity_search(texts[0], k=3)
# Assert that search result is valid and has expected length
assert len(search_result) == 3
assert search_result[0].page_content == texts[0]
assert search_result[1].page_content != texts[0]
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_duplicate_hnsw_index_creation(texts: List[str]) -> None:
table_name = "TEST_TABLE_HNSW_DUPLICATE_INDEX"
# Delete table if it exists (cleanup from previous tests)
drop_table(test_setup.conn, table_name)
# Create table and insert data
vectorDB = HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
embedding=embedding,
table_name=table_name,
)
# Create HNSW index for the first time
vectorDB.create_hnsw_index(
index_name="index_cosine",
ef_search=300,
m=80,
ef_construction=100,
)
with pytest.raises(Exception):
vectorDB.create_hnsw_index(ef_search=300, m=80, ef_construction=100)
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_create_hnsw_index_invalid_m_value(texts: List[str]) -> None:
table_name = "TEST_TABLE_HNSW_INVALID_M"
# Cleanup: drop the table if it exists
drop_table(test_setup.conn, table_name)
# Create table and insert data
vectorDB = HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
embedding=embedding,
table_name=table_name,
)
# Test invalid `m` value (too low)
with pytest.raises(ValueError):
vectorDB.create_hnsw_index(m=3)
# Test invalid `m` value (too high)
with pytest.raises(ValueError):
vectorDB.create_hnsw_index(m=1001)
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_create_hnsw_index_invalid_ef_construction(texts: List[str]) -> None:
table_name = "TEST_TABLE_HNSW_INVALID_EF_CONSTRUCTION"
# Cleanup: drop the table if it exists
drop_table(test_setup.conn, table_name)
# Create table and insert data
vectorDB = HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
embedding=embedding,
table_name=table_name,
)
# Test invalid `ef_construction` value (too low)
with pytest.raises(ValueError):
vectorDB.create_hnsw_index(ef_construction=0)
# Test invalid `ef_construction` value (too high)
with pytest.raises(ValueError):
vectorDB.create_hnsw_index(ef_construction=100001)
@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed")
def test_create_hnsw_index_invalid_ef_search(texts: List[str]) -> None:
table_name = "TEST_TABLE_HNSW_INVALID_EF_SEARCH"
# Cleanup: drop the table if it exists
drop_table(test_setup.conn, table_name)
# Create table and insert data
vectorDB = HanaDB.from_texts(
connection=test_setup.conn,
texts=texts,
embedding=embedding,
table_name=table_name,
)
# Test invalid `ef_search` value (too low)
with pytest.raises(ValueError):
vectorDB.create_hnsw_index(ef_search=0)
# Test invalid `ef_search` value (too high)
with pytest.raises(ValueError):
vectorDB.create_hnsw_index(ef_search=100001)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_infinispanvs.py | """Test Infinispan functionality."""
import warnings
from typing import Any, List, Optional
import httpx
import pytest
from langchain_core.documents import Document
from langchain_community.vectorstores.infinispanvs import InfinispanVS
from tests.integration_tests.vectorstores.fake_embeddings import (
FakeEmbeddings,
fake_texts,
)
"""
cd tests/integration_tests/vectorstores/docker-compose
./infinispan.sh
Current Infinispan implementation relies on httpx: `pip install "httpx[http2]"`
if not installed. HTTP/2 is enable by default, if it's not
wanted use `pip install "httpx"`.
"""
def _infinispan_setup_noautoconf(**kwargs: Any) -> None:
ispnvs = InfinispanVS(http2=_hasHttp2(), auto_config=False, **kwargs)
ispnvs.cache_delete()
ispnvs.schema_delete()
proto = """
/**
* @Indexed
*/
message vector {
/**
* @Vector(dimension=10)
*/
repeated float vector = 1;
optional string text = 2;
optional string label = 3;
optional int32 page = 4;
}
"""
ispnvs.schema_create(proto)
ispnvs.cache_create()
ispnvs.cache_index_clear()
def _infinispanvs_from_texts(
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
clear_old: Optional[bool] = True,
auto_config: Optional[bool] = False,
**kwargs: Any,
) -> InfinispanVS:
texts = [{"text": t} for t in fake_texts]
if metadatas is None:
metadatas = texts
else:
[m.update(t) for (m, t) in zip(metadatas, texts)]
return InfinispanVS.from_texts(
fake_texts,
FakeEmbeddings(),
metadatas=metadatas,
ids=ids,
clear_old=clear_old,
auto_config=auto_config,
http2=_hasHttp2(),
**kwargs,
)
def _hasHttp2() -> bool:
try:
httpx.Client(http2=True)
return True
except Exception:
return False
@pytest.mark.parametrize("autoconfig", [False, True])
@pytest.mark.parametrize(
"conn_opts",
[
{},
{
"user": "user",
"password": "password",
"hosts": ["localhost:11232"],
"schema": "http",
},
{
"user": "user",
"password": "password",
"hosts": ["localhost:11242"],
"schema": "https",
"verify": False,
},
],
)
class TestBasic:
def test_infinispan(self, autoconfig: bool, conn_opts: dict) -> None:
"""Test end to end construction and search."""
if not autoconfig:
_infinispan_setup_noautoconf(**conn_opts)
docsearch = _infinispanvs_from_texts(auto_config=autoconfig, **conn_opts)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_infinispan_with_auth(self, autoconfig: bool, conn_opts: dict) -> None:
"""Test end to end construction and search."""
if not autoconfig:
_infinispan_setup_noautoconf(**conn_opts)
docsearch = _infinispanvs_from_texts(auto_config=autoconfig, **conn_opts)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_infinispan_with_metadata(self, autoconfig: bool, conn_opts: dict) -> None:
"""Test with metadata"""
if not autoconfig:
_infinispan_setup_noautoconf(**conn_opts)
meta = []
for _ in range(len(fake_texts)):
meta.append({"label": "test"})
docsearch = _infinispanvs_from_texts(
metadatas=meta, auto_config=autoconfig, **conn_opts
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"label": "test"})]
def test_infinispan_with_metadata_with_output_fields(
self, autoconfig: bool, conn_opts: dict
) -> None:
"""Test with metadata"""
if not autoconfig:
_infinispan_setup_noautoconf(**conn_opts)
metadatas = [
{"page": i, "label": "label" + str(i)} for i in range(len(fake_texts))
]
c = {"output_fields": ["label", "page", "text"]}
docsearch = _infinispanvs_from_texts(
metadatas=metadatas, configuration=c, auto_config=autoconfig, **conn_opts
)
output = docsearch.similarity_search("foo", k=1)
assert output == [
Document(page_content="foo", metadata={"label": "label0", "page": 0})
]
def test_infinispanvs_with_id(self, autoconfig: bool, conn_opts: dict) -> None:
"""Test with ids"""
ids = ["id_" + str(i) for i in range(len(fake_texts))]
docsearch = _infinispanvs_from_texts(
ids=ids, auto_config=autoconfig, **conn_opts
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_infinispan_with_score(self, autoconfig: bool, conn_opts: dict) -> None:
"""Test end to end construction and search with scores and IDs."""
if not autoconfig:
_infinispan_setup_noautoconf(**conn_opts)
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _infinispanvs_from_texts(
metadatas=metadatas, auto_config=autoconfig, **conn_opts
)
output = docsearch.similarity_search_with_score("foo", k=3)
docs = [o[0] for o in output]
scores = [o[1] for o in output]
assert docs == [
Document(page_content="foo", metadata={"page": 0}),
Document(page_content="bar", metadata={"page": 1}),
Document(page_content="baz", metadata={"page": 2}),
]
assert scores[0] >= scores[1] >= scores[2]
def test_infinispan_add_texts(self, autoconfig: bool, conn_opts: dict) -> None:
"""Test end to end construction and MRR search."""
if not autoconfig:
_infinispan_setup_noautoconf(**conn_opts)
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _infinispanvs_from_texts(
metadatas=metadatas, auto_config=autoconfig, **conn_opts
)
docsearch.add_texts(texts, metadatas)
output = docsearch.similarity_search("foo", k=10)
assert len(output) == 6
def test_infinispan_no_clear_old(self, autoconfig: bool, conn_opts: dict) -> None:
"""Test end to end construction and MRR search."""
if not autoconfig:
_infinispan_setup_noautoconf(**conn_opts)
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _infinispanvs_from_texts(
metadatas=metadatas, auto_config=autoconfig, **conn_opts
)
del docsearch
try:
docsearch = _infinispanvs_from_texts(
metadatas=metadatas,
clear_old=False,
auto_config=autoconfig,
**conn_opts,
)
except AssertionError:
if autoconfig:
return
else:
raise
output = docsearch.similarity_search("foo", k=10)
assert len(output) == 6
class TestHttp2:
def test_http2(self) -> None:
try:
httpx.Client(http2=True)
except Exception:
warnings.warn('pip install "httpx[http2]" if you need HTTP/2')
pass
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_bigquery_vector_search.py | """Test BigQuery Vector Search.
In order to run this test, you need to install Google Cloud BigQuery SDK
pip install google-cloud-bigquery
Your end-user credentials would be used to make the calls (make sure you've run
`gcloud auth login` first).
"""
import os
import uuid
import pytest
from langchain_community.vectorstores.bigquery_vector_search import BigQueryVectorSearch
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
TEST_TABLE_NAME = "langchain_test_table"
@pytest.fixture(scope="class")
def store(request: pytest.FixtureRequest) -> BigQueryVectorSearch:
"""BigQueryVectorStore tests context.
In order to run this test, you define PROJECT environment variable
with GCP project id.
Example:
export PROJECT=...
"""
from google.cloud import bigquery
bigquery.Client(location="US").create_dataset(
TestBigQueryVectorStore.dataset_name, exists_ok=True
)
TestBigQueryVectorStore.store = BigQueryVectorSearch(
project_id=os.environ.get("PROJECT", None), # type: ignore[arg-type]
embedding=FakeEmbeddings(),
dataset_name=TestBigQueryVectorStore.dataset_name,
table_name=TEST_TABLE_NAME,
)
TestBigQueryVectorStore.store.add_texts(
TestBigQueryVectorStore.texts, TestBigQueryVectorStore.metadatas
)
def teardown() -> None:
bigquery.Client(location="US").delete_dataset(
TestBigQueryVectorStore.dataset_name,
delete_contents=True,
not_found_ok=True,
)
request.addfinalizer(teardown)
return TestBigQueryVectorStore.store
class TestBigQueryVectorStore:
"""BigQueryVectorStore tests class."""
dataset_name = uuid.uuid4().hex
store: BigQueryVectorSearch
texts = ["apple", "ice cream", "Saturn", "candy", "banana"]
metadatas = [
{
"kind": "fruit",
},
{
"kind": "treat",
},
{
"kind": "planet",
},
{
"kind": "treat",
},
{
"kind": "fruit",
},
]
def test_semantic_search(self, store: BigQueryVectorSearch) -> None:
"""Test on semantic similarity."""
docs = store.similarity_search("food", k=4)
print(docs) # noqa: T201
kinds = [d.metadata["kind"] for d in docs]
assert "fruit" in kinds
assert "treat" in kinds
assert "planet" not in kinds
def test_semantic_search_filter_fruits(self, store: BigQueryVectorSearch) -> None:
"""Test on semantic similarity with metadata filter."""
docs = store.similarity_search("food", filter={"kind": "fruit"})
kinds = [d.metadata["kind"] for d in docs]
assert "fruit" in kinds
assert "treat" not in kinds
assert "planet" not in kinds
def test_get_doc_by_filter(self, store: BigQueryVectorSearch) -> None:
"""Test on document retrieval with metadata filter."""
docs = store.get_documents(filter={"kind": "fruit"})
kinds = [d.metadata["kind"] for d in docs]
assert "fruit" in kinds
assert "treat" not in kinds
assert "planet" not in kinds
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_tencentvectordb.py | """Test TencentVectorDB functionality."""
import time
from typing import List, Optional
from langchain_core.documents import Document
from langchain_community.vectorstores import TencentVectorDB
from langchain_community.vectorstores.tencentvectordb import ConnectionParams
from tests.integration_tests.vectorstores.fake_embeddings import (
FakeEmbeddings,
fake_texts,
)
def _tencent_vector_db_from_texts(
metadatas: Optional[List[dict]] = None, drop: bool = True
) -> TencentVectorDB:
conn_params = ConnectionParams(
url="http://10.0.X.X",
key="eC4bLRy2va******************************",
username="root",
timeout=20,
)
return TencentVectorDB.from_texts(
fake_texts,
FakeEmbeddings(),
metadatas=metadatas,
connection_params=conn_params,
drop_old=drop,
)
def test_tencent_vector_db() -> None:
"""Test end to end construction and search."""
docsearch = _tencent_vector_db_from_texts()
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_tencent_vector_db_with_score() -> None:
"""Test end to end construction and search with scores and IDs."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _tencent_vector_db_from_texts(metadatas=metadatas)
output = docsearch.similarity_search_with_score("foo", k=3)
docs = [o[0] for o in output]
assert docs == [
Document(page_content="foo", metadata={"page": 0}),
Document(page_content="bar", metadata={"page": 1}),
Document(page_content="baz", metadata={"page": 2}),
]
def test_tencent_vector_db_max_marginal_relevance_search() -> None:
"""Test end to end construction and MRR search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _tencent_vector_db_from_texts(metadatas=metadatas)
output = docsearch.max_marginal_relevance_search("foo", k=2, fetch_k=3)
assert output == [
Document(page_content="foo", metadata={"page": 0}),
Document(page_content="bar", metadata={"page": 1}),
]
def test_tencent_vector_db_add_extra() -> None:
"""Test end to end construction and MRR search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _tencent_vector_db_from_texts(metadatas=metadatas)
docsearch.add_texts(texts, metadatas)
time.sleep(3)
output = docsearch.similarity_search("foo", k=10)
assert len(output) == 6
def test_tencent_vector_db_no_drop() -> None:
"""Test end to end construction and MRR search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _tencent_vector_db_from_texts(metadatas=metadatas)
del docsearch
docsearch = _tencent_vector_db_from_texts(metadatas=metadatas, drop=False)
time.sleep(3)
output = docsearch.similarity_search("foo", k=10)
assert len(output) == 6
def test_tencent_vector_db_add_texts_and_search_with_score() -> None:
"""Test add texts to a new-created db and search with score."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
conn_params = ConnectionParams(
url="http://10.0.X.X",
key="eC4bLRy2va******************************",
username="root",
timeout=20,
)
docsearch = TencentVectorDB(
embedding=FakeEmbeddings(),
connection_params=conn_params,
)
docsearch.add_texts(texts, metadatas)
output = docsearch.similarity_search_with_score("foo", k=3)
docs = [o[0] for o in output]
assert docs == [
Document(page_content="foo", metadata={"page": 0}),
Document(page_content="bar", metadata={"page": 1}),
Document(page_content="baz", metadata={"page": 2}),
]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_neo4jvector.py | """Test Neo4jVector functionality."""
import os
from math import isclose
from typing import Any, Dict, List, cast
from langchain_core.documents import Document
from yaml import safe_load
from langchain_community.graphs import Neo4jGraph
from langchain_community.vectorstores.neo4j_vector import (
Neo4jVector,
SearchType,
_get_search_index_query,
)
from langchain_community.vectorstores.utils import DistanceStrategy
from tests.integration_tests.vectorstores.fake_embeddings import (
AngularTwoDimensionalEmbeddings,
FakeEmbeddings,
)
from tests.integration_tests.vectorstores.fixtures.filtering_test_cases import (
DOCUMENTS,
TYPE_1_FILTERING_TEST_CASES,
TYPE_2_FILTERING_TEST_CASES,
TYPE_3_FILTERING_TEST_CASES,
TYPE_4_FILTERING_TEST_CASES,
)
url = os.environ.get("NEO4J_URL", "bolt://localhost:7687")
username = os.environ.get("NEO4J_USERNAME", "neo4j")
password = os.environ.get("NEO4J_PASSWORD", "pleaseletmein")
OS_TOKEN_COUNT = 1536
texts = ["foo", "bar", "baz", "It is the end of the world. Take shelter!"]
"""
cd tests/integration_tests/vectorstores/docker-compose
docker-compose -f neo4j.yml up
"""
def drop_vector_indexes(store: Neo4jVector) -> None:
"""Cleanup all vector indexes"""
all_indexes = store.query(
"""
SHOW INDEXES YIELD name, type
WHERE type IN ["VECTOR", "FULLTEXT"]
RETURN name
"""
)
for index in all_indexes:
store.query(f"DROP INDEX `{index['name']}`")
store.query("MATCH (n) DETACH DELETE n;")
class FakeEmbeddingsWithOsDimension(FakeEmbeddings):
"""Fake embeddings functionality for testing."""
def embed_documents(self, embedding_texts: List[str]) -> List[List[float]]:
"""Return simple embeddings."""
return [
[float(1.0)] * (OS_TOKEN_COUNT - 1) + [float(i + 1)]
for i in range(len(embedding_texts))
]
def embed_query(self, text: str) -> List[float]:
"""Return simple embeddings."""
return [float(1.0)] * (OS_TOKEN_COUNT - 1) + [float(texts.index(text) + 1)]
def test_neo4jvector() -> None:
"""Test end to end construction and search."""
docsearch = Neo4jVector.from_texts(
texts=texts,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
drop_vector_indexes(docsearch)
def test_neo4jvector_euclidean() -> None:
"""Test euclidean distance"""
docsearch = Neo4jVector.from_texts(
texts=texts,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
drop_vector_indexes(docsearch)
def test_neo4jvector_embeddings() -> None:
"""Test end to end construction with embeddings and search."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
drop_vector_indexes(docsearch)
def test_neo4jvector_catch_wrong_index_name() -> None:
"""Test if index name is misspelled, but node label and property are correct."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
existing = Neo4jVector.from_existing_index(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="test",
)
output = existing.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
drop_vector_indexes(existing)
def test_neo4jvector_catch_wrong_node_label() -> None:
"""Test if node label is misspelled, but index name is correct."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
existing = Neo4jVector.from_existing_index(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="vector",
node_label="test",
)
output = existing.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
drop_vector_indexes(existing)
def test_neo4jvector_with_metadatas() -> None:
"""Test end to end construction and search."""
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Neo4jVector.from_texts(
texts=texts,
embedding=FakeEmbeddingsWithOsDimension(),
metadatas=metadatas,
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
drop_vector_indexes(docsearch)
def test_neo4jvector_with_metadatas_with_scores() -> None:
"""Test end to end construction and search."""
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Neo4jVector.from_texts(
texts=texts,
embedding=FakeEmbeddingsWithOsDimension(),
metadatas=metadatas,
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
output = [
(doc, round(score, 1))
for doc, score in docsearch.similarity_search_with_score("foo", k=1)
]
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 1.0)]
drop_vector_indexes(docsearch)
def test_neo4jvector_relevance_score() -> None:
"""Test to make sure the relevance score is scaled to 0-1."""
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Neo4jVector.from_texts(
texts=texts,
embedding=FakeEmbeddingsWithOsDimension(),
metadatas=metadatas,
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_relevance_scores("foo", k=3)
expected_output = [
(Document(page_content="foo", metadata={"page": "0"}), 1.0),
(Document(page_content="bar", metadata={"page": "1"}), 0.9998376369476318),
(Document(page_content="baz", metadata={"page": "2"}), 0.9993523359298706),
]
# Check if the length of the outputs matches
assert len(output) == len(expected_output)
# Check if each document and its relevance score is close to the expected value
for (doc, score), (expected_doc, expected_score) in zip(output, expected_output):
assert doc.page_content == expected_doc.page_content
assert doc.metadata == expected_doc.metadata
assert isclose(score, expected_score, rel_tol=1e-5)
drop_vector_indexes(docsearch)
def test_neo4jvector_retriever_search_threshold() -> None:
"""Test using retriever for searching with threshold."""
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Neo4jVector.from_texts(
texts=texts,
embedding=FakeEmbeddingsWithOsDimension(),
metadatas=metadatas,
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
retriever = docsearch.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={"k": 3, "score_threshold": 0.9999},
)
output = retriever.invoke("foo")
assert output == [
Document(page_content="foo", metadata={"page": "0"}),
]
drop_vector_indexes(docsearch)
def test_custom_return_neo4jvector() -> None:
"""Test end to end construction and search."""
docsearch = Neo4jVector.from_texts(
texts=["test"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
retrieval_query="RETURN 'foo' AS text, score, {test: 'test'} AS metadata",
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"test": "test"})]
drop_vector_indexes(docsearch)
def test_neo4jvector_prefer_indexname() -> None:
"""Test using when two indexes are found, prefer by index_name."""
Neo4jVector.from_texts(
texts=["foo"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
Neo4jVector.from_texts(
texts=["bar"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="foo",
node_label="Test",
embedding_node_property="vector",
text_node_property="info",
pre_delete_collection=True,
)
existing_index = Neo4jVector.from_existing_index(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="foo",
text_node_property="info",
)
output = existing_index.similarity_search("bar", k=1)
assert output == [Document(page_content="bar", metadata={})]
drop_vector_indexes(existing_index)
def test_neo4jvector_prefer_indexname_insert() -> None:
"""Test using when two indexes are found, prefer by index_name."""
Neo4jVector.from_texts(
texts=["baz"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
Neo4jVector.from_texts(
texts=["foo"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="foo",
node_label="Test",
embedding_node_property="vector",
text_node_property="info",
pre_delete_collection=True,
)
existing_index = Neo4jVector.from_existing_index(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="foo",
text_node_property="info",
)
existing_index.add_documents([Document(page_content="bar", metadata={})])
output = existing_index.similarity_search("bar", k=2)
assert output == [
Document(page_content="bar", metadata={}),
Document(page_content="foo", metadata={}),
]
drop_vector_indexes(existing_index)
def test_neo4jvector_hybrid() -> None:
"""Test end to end construction with hybrid search."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
search_type=SearchType.HYBRID,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
drop_vector_indexes(docsearch)
def test_neo4jvector_hybrid_deduplicate() -> None:
"""Test result deduplication with hybrid search."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
search_type=SearchType.HYBRID,
)
output = docsearch.similarity_search("foo", k=3)
assert output == [
Document(page_content="foo"),
Document(page_content="bar"),
Document(page_content="baz"),
]
drop_vector_indexes(docsearch)
def test_neo4jvector_hybrid_retrieval_query() -> None:
"""Test custom retrieval_query with hybrid search."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
search_type=SearchType.HYBRID,
retrieval_query="RETURN 'moo' AS text, score, {test: 'test'} AS metadata",
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="moo", metadata={"test": "test"})]
drop_vector_indexes(docsearch)
def test_neo4jvector_hybrid_retrieval_query2() -> None:
"""Test custom retrieval_query with hybrid search."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
search_type=SearchType.HYBRID,
retrieval_query="RETURN node.text AS text, score, {test: 'test'} AS metadata",
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"test": "test"})]
drop_vector_indexes(docsearch)
def test_neo4jvector_missing_keyword() -> None:
"""Test hybrid search with missing keyword_index_search."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
try:
Neo4jVector.from_existing_index(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="vector",
search_type=SearchType.HYBRID,
)
except ValueError as e:
assert str(e) == (
"keyword_index name has to be specified when " "using hybrid search option"
)
drop_vector_indexes(docsearch)
def test_neo4jvector_hybrid_from_existing() -> None:
"""Test hybrid search with missing keyword_index_search."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
search_type=SearchType.HYBRID,
)
existing = Neo4jVector.from_existing_index(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="vector",
keyword_index_name="keyword",
search_type=SearchType.HYBRID,
)
output = existing.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
drop_vector_indexes(existing)
def test_neo4jvector_from_existing_graph() -> None:
"""Test from_existing_graph with a single property."""
graph = Neo4jVector.from_texts(
texts=["test"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="foo",
node_label="Foo",
embedding_node_property="vector",
text_node_property="info",
pre_delete_collection=True,
)
graph.query("MATCH (n) DETACH DELETE n")
graph.query("CREATE (:Test {name:'Foo'})," "(:Test {name:'Bar'})")
existing = Neo4jVector.from_existing_graph(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="vector",
node_label="Test",
text_node_properties=["name"],
embedding_node_property="embedding",
)
output = existing.similarity_search("foo", k=1)
assert output == [Document(page_content="\nname: Foo")]
drop_vector_indexes(existing)
def test_neo4jvector_from_existing_graph_hybrid() -> None:
"""Test from_existing_graph hybrid with a single property."""
graph = Neo4jVector.from_texts(
texts=["test"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="foo",
node_label="Foo",
embedding_node_property="vector",
text_node_property="info",
pre_delete_collection=True,
)
graph.query("MATCH (n) DETACH DELETE n")
graph.query("CREATE (:Test {name:'foo'})," "(:Test {name:'Bar'})")
existing = Neo4jVector.from_existing_graph(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="vector",
node_label="Test",
text_node_properties=["name"],
embedding_node_property="embedding",
search_type=SearchType.HYBRID,
)
output = existing.similarity_search("foo", k=1)
assert output == [Document(page_content="\nname: foo")]
drop_vector_indexes(existing)
def test_neo4jvector_from_existing_graph_multiple_properties() -> None:
"""Test from_existing_graph with a two property."""
graph = Neo4jVector.from_texts(
texts=["test"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="foo",
node_label="Foo",
embedding_node_property="vector",
text_node_property="info",
pre_delete_collection=True,
)
graph.query("MATCH (n) DETACH DELETE n")
graph.query("CREATE (:Test {name:'Foo', name2: 'Fooz'})," "(:Test {name:'Bar'})")
existing = Neo4jVector.from_existing_graph(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="vector",
node_label="Test",
text_node_properties=["name", "name2"],
embedding_node_property="embedding",
)
output = existing.similarity_search("foo", k=1)
assert output == [Document(page_content="\nname: Foo\nname2: Fooz")]
drop_vector_indexes(existing)
def test_neo4jvector_from_existing_graph_multiple_properties_hybrid() -> None:
"""Test from_existing_graph with a two property."""
graph = Neo4jVector.from_texts(
texts=["test"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="foo",
node_label="Foo",
embedding_node_property="vector",
text_node_property="info",
pre_delete_collection=True,
)
graph.query("MATCH (n) DETACH DELETE n")
graph.query("CREATE (:Test {name:'Foo', name2: 'Fooz'})," "(:Test {name:'Bar'})")
existing = Neo4jVector.from_existing_graph(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="vector",
node_label="Test",
text_node_properties=["name", "name2"],
embedding_node_property="embedding",
search_type=SearchType.HYBRID,
)
output = existing.similarity_search("foo", k=1)
assert output == [Document(page_content="\nname: Foo\nname2: Fooz")]
drop_vector_indexes(existing)
def test_neo4jvector_special_character() -> None:
"""Test removing lucene."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
search_type=SearchType.HYBRID,
)
output = docsearch.similarity_search(
"It is the end of the world. Take shelter!", k=1
)
assert output == [
Document(page_content="It is the end of the world. Take shelter!", metadata={})
]
drop_vector_indexes(docsearch)
def test_hybrid_score_normalization() -> None:
"""Test if we can get two 1.0 documents with RRF"""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(["foo"], text_embeddings))
docsearch = Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
search_type=SearchType.HYBRID,
)
# Remove deduplication part of the query
rrf_query = (
_get_search_index_query(SearchType.HYBRID)
.rstrip("WITH node, max(score) AS score ORDER BY score DESC LIMIT $k")
.replace("UNION", "UNION ALL")
+ "RETURN node.text AS text, score LIMIT 2"
)
output = docsearch.query(
rrf_query,
params={
"index": "vector",
"k": 1,
"embedding": FakeEmbeddingsWithOsDimension().embed_query("foo"),
"query": "foo",
"keyword_index": "keyword",
},
)
# Both FT and Vector must return 1.0 score
assert output == [{"text": "foo", "score": 1.0}, {"text": "foo", "score": 1.0}]
drop_vector_indexes(docsearch)
def test_index_fetching() -> None:
"""testing correct index creation and fetching"""
embeddings = FakeEmbeddings()
def create_store(
node_label: str, index: str, text_properties: List[str]
) -> Neo4jVector:
return Neo4jVector.from_existing_graph(
embedding=embeddings,
url=url,
username=username,
password=password,
index_name=index,
node_label=node_label,
text_node_properties=text_properties,
embedding_node_property="embedding",
)
def fetch_store(index_name: str) -> Neo4jVector:
store = Neo4jVector.from_existing_index(
embedding=embeddings,
url=url,
username=username,
password=password,
index_name=index_name,
)
return store
# create index 0
index_0_str = "index0"
create_store("label0", index_0_str, ["text"])
# create index 1
index_1_str = "index1"
create_store("label1", index_1_str, ["text"])
index_1_store = fetch_store(index_1_str)
assert index_1_store.index_name == index_1_str
index_0_store = fetch_store(index_0_str)
assert index_0_store.index_name == index_0_str
drop_vector_indexes(index_1_store)
drop_vector_indexes(index_0_store)
def test_retrieval_params() -> None:
"""Test if we use parameters in retrieval query"""
docsearch = Neo4jVector.from_texts(
texts=texts,
embedding=FakeEmbeddings(),
pre_delete_collection=True,
retrieval_query="""
RETURN $test as text, score, {test: $test1} AS metadata
""",
)
output = docsearch.similarity_search(
"Foo", k=2, params={"test": "test", "test1": "test1"}
)
assert output == [
Document(page_content="test", metadata={"test": "test1"}),
Document(page_content="test", metadata={"test": "test1"}),
]
drop_vector_indexes(docsearch)
def test_retrieval_dictionary() -> None:
"""Test if we use parameters in retrieval query"""
docsearch = Neo4jVector.from_texts(
texts=texts,
embedding=FakeEmbeddings(),
pre_delete_collection=True,
retrieval_query="""
RETURN {
name:'John',
age: 30,
skills: ["Python", "Data Analysis", "Machine Learning"]} as text,
score, {} AS metadata
""",
)
expected_output = [
Document(
page_content=(
"skills:\n- Python\n- Data Analysis\n- "
"Machine Learning\nage: 30\nname: John\n"
)
)
]
output = docsearch.similarity_search("Foo", k=1)
def parse_document(doc: Document) -> Any:
return safe_load(doc.page_content)
parsed_expected = [parse_document(doc) for doc in expected_output]
parsed_output = [parse_document(doc) for doc in output]
assert parsed_output == parsed_expected
drop_vector_indexes(docsearch)
def test_metadata_filters_type1() -> None:
"""Test metadata filters"""
docsearch = Neo4jVector.from_documents(
DOCUMENTS,
embedding=FakeEmbeddings(),
pre_delete_collection=True,
)
# We don't test type 5, because LIKE has very SQL specific examples
for example in (
TYPE_1_FILTERING_TEST_CASES
+ TYPE_2_FILTERING_TEST_CASES
+ TYPE_3_FILTERING_TEST_CASES
+ TYPE_4_FILTERING_TEST_CASES
):
filter_dict = cast(Dict[str, Any], example[0])
output = docsearch.similarity_search("Foo", filter=filter_dict)
indices = cast(List[int], example[1])
adjusted_indices = [index - 1 for index in indices]
expected_output = [DOCUMENTS[index] for index in adjusted_indices]
# We don't return id properties from similarity search by default
# Also remove any key where the value is None
for doc in expected_output:
if "id" in doc.metadata:
del doc.metadata["id"]
keys_with_none = [
key for key, value in doc.metadata.items() if value is None
]
for key in keys_with_none:
del doc.metadata[key]
assert output == expected_output
drop_vector_indexes(docsearch)
def test_neo4jvector_relationship_index() -> None:
"""Test end to end construction and search."""
embeddings = FakeEmbeddingsWithOsDimension()
docsearch = Neo4jVector.from_texts(
texts=texts,
embedding=embeddings,
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
# Ingest data
docsearch.query(
(
"CREATE ()-[:REL {text: 'foo', embedding: $e1}]->()"
", ()-[:REL {text: 'far', embedding: $e2}]->()"
),
params={
"e1": embeddings.embed_query("foo"),
"e2": embeddings.embed_query("bar"),
},
)
# Create relationship index
docsearch.query(
"""CREATE VECTOR INDEX `relationship`
FOR ()-[r:REL]-() ON (r.embedding)
OPTIONS {indexConfig: {
`vector.dimensions`: 1536,
`vector.similarity_function`: 'cosine'
}}
"""
)
relationship_index = Neo4jVector.from_existing_relationship_index(
embeddings, index_name="relationship"
)
output = relationship_index.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
drop_vector_indexes(docsearch)
def test_neo4jvector_relationship_index_retrieval() -> None:
"""Test end to end construction and search."""
embeddings = FakeEmbeddingsWithOsDimension()
docsearch = Neo4jVector.from_texts(
texts=texts,
embedding=embeddings,
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
# Ingest data
docsearch.query(
(
"CREATE ({node:'text'})-[:REL {text: 'foo', embedding: $e1}]->()"
", ({node:'text'})-[:REL {text: 'far', embedding: $e2}]->()"
),
params={
"e1": embeddings.embed_query("foo"),
"e2": embeddings.embed_query("bar"),
},
)
# Create relationship index
docsearch.query(
"""CREATE VECTOR INDEX `relationship`
FOR ()-[r:REL]-() ON (r.embedding)
OPTIONS {indexConfig: {
`vector.dimensions`: 1536,
`vector.similarity_function`: 'cosine'
}}
"""
)
retrieval_query = (
"RETURN relationship.text + '-' + startNode(relationship).node "
"AS text, score, {foo:'bar'} AS metadata"
)
relationship_index = Neo4jVector.from_existing_relationship_index(
embeddings, index_name="relationship", retrieval_query=retrieval_query
)
output = relationship_index.similarity_search("foo", k=1)
assert output == [Document(page_content="foo-text", metadata={"foo": "bar"})]
drop_vector_indexes(docsearch)
def test_neo4j_max_marginal_relevance_search() -> None:
"""
Test end to end construction and MMR search.
The embedding function used here ensures `texts` become
the following vectors on a circle (numbered v0 through v3):
______ v2
/ \
/ | v1
v3 | . | query
| / v0
|______/ (N.B. very crude drawing)
With fetch_k==3 and k==2, when query is at (1, ),
one expects that v2 and v0 are returned (in some order).
"""
texts = ["-0.124", "+0.127", "+0.25", "+1.0"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = Neo4jVector.from_texts(
texts,
metadatas=metadatas,
embedding=AngularTwoDimensionalEmbeddings(),
pre_delete_collection=True,
)
expected_set = {
("+0.25", 2),
("-0.124", 0),
}
output = docsearch.max_marginal_relevance_search("0.0", k=2, fetch_k=3)
output_set = {
(mmr_doc.page_content, mmr_doc.metadata["page"]) for mmr_doc in output
}
assert output_set == expected_set
drop_vector_indexes(docsearch)
def test_neo4jvector_passing_graph_object() -> None:
"""Test end to end construction and search with passing graph object."""
graph = Neo4jGraph()
# Rewrite env vars to make sure it fails if env is used
os.environ["NEO4J_URI"] = "foo"
docsearch = Neo4jVector.from_texts(
texts=texts,
embedding=FakeEmbeddingsWithOsDimension(),
graph=graph,
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
drop_vector_indexes(docsearch)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_weaviate.py | """Test Weaviate functionality."""
import logging
import os
import uuid
from typing import Generator, Union
import pytest
from langchain_core.documents import Document
from langchain_community.embeddings.openai import OpenAIEmbeddings
from langchain_community.vectorstores.weaviate import Weaviate
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
logging.basicConfig(level=logging.DEBUG)
"""
cd tests/integration_tests/vectorstores/docker-compose
docker compose -f weaviate.yml up
"""
class TestWeaviate:
@classmethod
def setup_class(cls) -> None:
if not os.getenv("OPENAI_API_KEY"):
raise ValueError("OPENAI_API_KEY environment variable is not set")
@pytest.fixture(scope="class", autouse=True)
def weaviate_url(self) -> Union[str, Generator[str, None, None]]: # type: ignore[return]
"""Return the weaviate url."""
from weaviate import Client
url = "http://localhost:8080"
yield url
# Clear the test index
client = Client(url)
client.schema.delete_all()
@pytest.mark.vcr(ignore_localhost=True)
def test_similarity_search_without_metadata(
self, weaviate_url: str, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and search without metadata."""
texts = ["foo", "bar", "baz"]
docsearch = Weaviate.from_texts(
texts,
embedding_openai,
weaviate_url=weaviate_url,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
@pytest.mark.vcr(ignore_localhost=True)
def test_similarity_search_with_metadata(
self, weaviate_url: str, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and search with metadata."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = Weaviate.from_texts(
texts, embedding_openai, metadatas=metadatas, weaviate_url=weaviate_url
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": 0})]
@pytest.mark.vcr(ignore_localhost=True)
def test_similarity_search_with_metadata_and_filter(
self, weaviate_url: str, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and search with metadata."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = Weaviate.from_texts(
texts, embedding_openai, metadatas=metadatas, weaviate_url=weaviate_url
)
output = docsearch.similarity_search(
"foo",
k=2,
where_filter={"path": ["page"], "operator": "Equal", "valueNumber": 0},
)
assert output == [Document(page_content="foo", metadata={"page": 0})]
@pytest.mark.vcr(ignore_localhost=True)
def test_similarity_search_with_metadata_and_additional(
self, weaviate_url: str, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and search with metadata and additional."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = Weaviate.from_texts(
texts, embedding_openai, metadatas=metadatas, weaviate_url=weaviate_url
)
output = docsearch.similarity_search(
"foo",
k=1,
additional=["certainty"],
)
assert output == [
Document(
page_content="foo",
metadata={"page": 0, "_additional": {"certainty": 1}},
)
]
@pytest.mark.vcr(ignore_localhost=True)
def test_similarity_search_with_uuids(
self, weaviate_url: str, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and search with uuids."""
texts = ["foo", "bar", "baz"]
# Weaviate replaces the object if the UUID already exists
uuids = [uuid.uuid5(uuid.NAMESPACE_DNS, "same-name") for text in texts]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = Weaviate.from_texts(
texts,
embedding_openai,
metadatas=metadatas,
weaviate_url=weaviate_url,
uuids=uuids,
)
output = docsearch.similarity_search("foo", k=2)
assert len(output) == 1
@pytest.mark.vcr(ignore_localhost=True)
def test_max_marginal_relevance_search(
self, weaviate_url: str, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and MRR search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = Weaviate.from_texts(
texts, embedding_openai, metadatas=metadatas, weaviate_url=weaviate_url
)
# if lambda=1 the algorithm should be equivalent to standard ranking
standard_ranking = docsearch.similarity_search("foo", k=2)
output = docsearch.max_marginal_relevance_search(
"foo", k=2, fetch_k=3, lambda_mult=1.0
)
assert output == standard_ranking
# if lambda=0 the algorithm should favour maximal diversity
output = docsearch.max_marginal_relevance_search(
"foo", k=2, fetch_k=3, lambda_mult=0.0
)
assert output == [
Document(page_content="foo", metadata={"page": 0}),
Document(page_content="bar", metadata={"page": 1}),
]
@pytest.mark.vcr(ignore_localhost=True)
def test_max_marginal_relevance_search_by_vector(
self, weaviate_url: str, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and MRR search by vector."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = Weaviate.from_texts(
texts, embedding_openai, metadatas=metadatas, weaviate_url=weaviate_url
)
foo_embedding = embedding_openai.embed_query("foo")
# if lambda=1 the algorithm should be equivalent to standard ranking
standard_ranking = docsearch.similarity_search("foo", k=2)
output = docsearch.max_marginal_relevance_search_by_vector(
foo_embedding, k=2, fetch_k=3, lambda_mult=1.0
)
assert output == standard_ranking
# if lambda=0 the algorithm should favour maximal diversity
output = docsearch.max_marginal_relevance_search_by_vector(
foo_embedding, k=2, fetch_k=3, lambda_mult=0.0
)
assert output == [
Document(page_content="foo", metadata={"page": 0}),
Document(page_content="bar", metadata={"page": 1}),
]
@pytest.mark.vcr(ignore_localhost=True)
def test_max_marginal_relevance_search_with_filter(
self, weaviate_url: str, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and MRR search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = Weaviate.from_texts(
texts, embedding_openai, metadatas=metadatas, weaviate_url=weaviate_url
)
where_filter = {"path": ["page"], "operator": "Equal", "valueNumber": 0}
# if lambda=1 the algorithm should be equivalent to standard ranking
standard_ranking = docsearch.similarity_search(
"foo", k=2, where_filter=where_filter
)
output = docsearch.max_marginal_relevance_search(
"foo", k=2, fetch_k=3, lambda_mult=1.0, where_filter=where_filter
)
assert output == standard_ranking
# if lambda=0 the algorithm should favour maximal diversity
output = docsearch.max_marginal_relevance_search(
"foo", k=2, fetch_k=3, lambda_mult=0.0, where_filter=where_filter
)
assert output == [
Document(page_content="foo", metadata={"page": 0}),
]
def test_add_texts_with_given_embedding(self, weaviate_url: str) -> None:
texts = ["foo", "bar", "baz"]
embedding = FakeEmbeddings()
docsearch = Weaviate.from_texts(
texts, embedding=embedding, weaviate_url=weaviate_url
)
docsearch.add_texts(["foo"])
output = docsearch.similarity_search_by_vector(
embedding.embed_query("foo"), k=2
)
assert output == [
Document(page_content="foo"),
Document(page_content="foo"),
]
def test_add_texts_with_given_uuids(self, weaviate_url: str) -> None:
texts = ["foo", "bar", "baz"]
embedding = FakeEmbeddings()
uuids = [uuid.uuid5(uuid.NAMESPACE_DNS, text) for text in texts]
docsearch = Weaviate.from_texts(
texts,
embedding=embedding,
weaviate_url=weaviate_url,
uuids=uuids,
)
# Weaviate replaces the object if the UUID already exists
docsearch.add_texts(["foo"], uuids=[uuids[0]])
output = docsearch.similarity_search_by_vector(
embedding.embed_query("foo"), k=2
)
assert output[0] == Document(page_content="foo")
assert output[1] != Document(page_content="foo")
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_astradb.py | """
Test of Astra DB vector store class `AstraDB`
Required to run this test:
- a recent `astrapy` Python package available
- an Astra DB instance;
- the two environment variables set:
export ASTRA_DB_API_ENDPOINT="https://<DB-ID>-us-east1.apps.astra.datastax.com"
export ASTRA_DB_APPLICATION_TOKEN="AstraCS:........."
- optionally this as well (otherwise defaults are used):
export ASTRA_DB_KEYSPACE="my_keyspace"
"""
import json
import math
import os
from typing import Iterable, List
import pytest
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_community.vectorstores import AstraDB
# Ad-hoc embedding classes:
class SomeEmbeddings(Embeddings):
"""
Turn a sentence into an embedding vector in some way.
Not important how. It is deterministic is all that counts.
"""
def __init__(self, dimension: int) -> None:
self.dimension = dimension
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return [self.embed_query(txt) for txt in texts]
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
return self.embed_documents(texts)
def embed_query(self, text: str) -> List[float]:
unnormed0 = [ord(c) for c in text[: self.dimension]]
unnormed = (unnormed0 + [1] + [0] * (self.dimension - 1 - len(unnormed0)))[
: self.dimension
]
norm = sum(x * x for x in unnormed) ** 0.5
normed = [x / norm for x in unnormed]
return normed
async def aembed_query(self, text: str) -> List[float]:
return self.embed_query(text)
class ParserEmbeddings(Embeddings):
"""
Parse input texts: if they are json for a List[float], fine.
Otherwise, return all zeros and call it a day.
"""
def __init__(self, dimension: int) -> None:
self.dimension = dimension
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return [self.embed_query(txt) for txt in texts]
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
return self.embed_documents(texts)
def embed_query(self, text: str) -> List[float]:
try:
vals = json.loads(text)
assert len(vals) == self.dimension
return vals
except Exception:
print(f'[ParserEmbeddings] Returning a moot vector for "{text}"') # noqa: T201
return [0.0] * self.dimension
async def aembed_query(self, text: str) -> List[float]:
return self.embed_query(text)
def _has_env_vars() -> bool:
return all(
[
"ASTRA_DB_APPLICATION_TOKEN" in os.environ,
"ASTRA_DB_API_ENDPOINT" in os.environ,
]
)
@pytest.fixture(scope="function")
def store_someemb() -> Iterable[AstraDB]:
emb = SomeEmbeddings(dimension=2)
v_store = AstraDB(
embedding=emb,
collection_name="lc_test_s",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
yield v_store
v_store.delete_collection()
@pytest.fixture(scope="function")
def store_parseremb() -> Iterable[AstraDB]:
emb = ParserEmbeddings(dimension=2)
v_store = AstraDB(
embedding=emb,
collection_name="lc_test_p",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
yield v_store
v_store.delete_collection()
@pytest.mark.requires("astrapy")
@pytest.mark.skipif(not _has_env_vars(), reason="Missing Astra DB env. vars")
class TestAstraDB:
def test_astradb_vectorstore_create_delete(self) -> None:
"""Create and delete."""
emb = SomeEmbeddings(dimension=2)
# creation by passing the connection secrets
v_store = AstraDB(
embedding=emb,
collection_name="lc_test_1",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
v_store.delete_collection()
# Creation by passing a ready-made astrapy client:
from astrapy.db import AstraDB as LibAstraDB
astra_db_client = LibAstraDB(
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
v_store_2 = AstraDB(
embedding=emb,
collection_name="lc_test_2",
astra_db_client=astra_db_client,
)
v_store_2.delete_collection()
async def test_astradb_vectorstore_create_delete_async(self) -> None:
"""Create and delete."""
emb = SomeEmbeddings(dimension=2)
# creation by passing the connection secrets
v_store = AstraDB(
embedding=emb,
collection_name="lc_test_1_async",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
await v_store.adelete_collection()
# Creation by passing a ready-made astrapy client:
from astrapy.db import AsyncAstraDB
astra_db_client = AsyncAstraDB(
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
v_store_2 = AstraDB(
embedding=emb,
collection_name="lc_test_2_async",
async_astra_db_client=astra_db_client,
)
await v_store_2.adelete_collection()
def test_astradb_vectorstore_pre_delete_collection(self) -> None:
"""Create and delete."""
emb = SomeEmbeddings(dimension=2)
# creation by passing the connection secrets
v_store = AstraDB(
embedding=emb,
collection_name="lc_test_pre_del",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
try:
v_store.add_texts(
texts=["aa"],
metadatas=[
{"k": "a", "ord": 0},
],
ids=["a"],
)
res1 = v_store.similarity_search("aa", k=5)
assert len(res1) == 1
v_store = AstraDB(
embedding=emb,
pre_delete_collection=True,
collection_name="lc_test_pre_del",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
res1 = v_store.similarity_search("aa", k=5)
assert len(res1) == 0
finally:
v_store.delete_collection()
async def test_astradb_vectorstore_pre_delete_collection_async(self) -> None:
"""Create and delete."""
emb = SomeEmbeddings(dimension=2)
# creation by passing the connection secrets
v_store = AstraDB(
embedding=emb,
collection_name="lc_test_pre_del_async",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
try:
await v_store.aadd_texts(
texts=["aa"],
metadatas=[
{"k": "a", "ord": 0},
],
ids=["a"],
)
res1 = await v_store.asimilarity_search("aa", k=5)
assert len(res1) == 1
v_store = AstraDB(
embedding=emb,
pre_delete_collection=True,
collection_name="lc_test_pre_del_async",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
res1 = await v_store.asimilarity_search("aa", k=5)
assert len(res1) == 0
finally:
await v_store.adelete_collection()
def test_astradb_vectorstore_from_x(self) -> None:
"""from_texts and from_documents methods."""
emb = SomeEmbeddings(dimension=2)
# from_texts
v_store = AstraDB.from_texts(
texts=["Hi", "Ho"],
embedding=emb,
collection_name="lc_test_ft",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
try:
assert v_store.similarity_search("Ho", k=1)[0].page_content == "Ho"
finally:
v_store.delete_collection()
# from_documents
v_store_2 = AstraDB.from_documents(
[
Document(page_content="Hee"),
Document(page_content="Hoi"),
],
embedding=emb,
collection_name="lc_test_fd",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
try:
assert v_store_2.similarity_search("Hoi", k=1)[0].page_content == "Hoi"
finally:
v_store_2.delete_collection()
async def test_astradb_vectorstore_from_x_async(self) -> None:
"""from_texts and from_documents methods."""
emb = SomeEmbeddings(dimension=2)
# from_texts
v_store = await AstraDB.afrom_texts(
texts=["Hi", "Ho"],
embedding=emb,
collection_name="lc_test_ft_async",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
try:
assert (await v_store.asimilarity_search("Ho", k=1))[0].page_content == "Ho"
finally:
await v_store.adelete_collection()
# from_documents
v_store_2 = await AstraDB.afrom_documents(
[
Document(page_content="Hee"),
Document(page_content="Hoi"),
],
embedding=emb,
collection_name="lc_test_fd_async",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
try:
assert (await v_store_2.asimilarity_search("Hoi", k=1))[
0
].page_content == "Hoi"
finally:
await v_store_2.adelete_collection()
def test_astradb_vectorstore_crud(self, store_someemb: AstraDB) -> None:
"""Basic add/delete/update behaviour."""
res0 = store_someemb.similarity_search("Abc", k=2)
assert res0 == []
# write and check again
store_someemb.add_texts(
texts=["aa", "bb", "cc"],
metadatas=[
{"k": "a", "ord": 0},
{"k": "b", "ord": 1},
{"k": "c", "ord": 2},
],
ids=["a", "b", "c"],
)
res1 = store_someemb.similarity_search("Abc", k=5)
assert {doc.page_content for doc in res1} == {"aa", "bb", "cc"}
# partial overwrite and count total entries
store_someemb.add_texts(
texts=["cc", "dd"],
metadatas=[
{"k": "c_new", "ord": 102},
{"k": "d_new", "ord": 103},
],
ids=["c", "d"],
)
res2 = store_someemb.similarity_search("Abc", k=10)
assert len(res2) == 4
# pick one that was just updated and check its metadata
res3 = store_someemb.similarity_search_with_score_id(
query="cc", k=1, filter={"k": "c_new"}
)
print(str(res3)) # noqa: T201
doc3, score3, id3 = res3[0]
assert doc3.page_content == "cc"
assert doc3.metadata == {"k": "c_new", "ord": 102}
assert score3 > 0.999 # leaving some leeway for approximations...
assert id3 == "c"
# delete and count again
del1_res = store_someemb.delete(["b"])
assert del1_res is True
del2_res = store_someemb.delete(["a", "c", "Z!"])
assert del2_res is True # a non-existing ID was supplied
assert len(store_someemb.similarity_search("xy", k=10)) == 1
# clear store
store_someemb.clear()
assert store_someemb.similarity_search("Abc", k=2) == []
# add_documents with "ids" arg passthrough
store_someemb.add_documents(
[
Document(page_content="vv", metadata={"k": "v", "ord": 204}),
Document(page_content="ww", metadata={"k": "w", "ord": 205}),
],
ids=["v", "w"],
)
assert len(store_someemb.similarity_search("xy", k=10)) == 2
res4 = store_someemb.similarity_search("ww", k=1, filter={"k": "w"})
assert res4[0].metadata["ord"] == 205
async def test_astradb_vectorstore_crud_async(self, store_someemb: AstraDB) -> None:
"""Basic add/delete/update behaviour."""
res0 = await store_someemb.asimilarity_search("Abc", k=2)
assert res0 == []
# write and check again
await store_someemb.aadd_texts(
texts=["aa", "bb", "cc"],
metadatas=[
{"k": "a", "ord": 0},
{"k": "b", "ord": 1},
{"k": "c", "ord": 2},
],
ids=["a", "b", "c"],
)
res1 = await store_someemb.asimilarity_search("Abc", k=5)
assert {doc.page_content for doc in res1} == {"aa", "bb", "cc"}
# partial overwrite and count total entries
await store_someemb.aadd_texts(
texts=["cc", "dd"],
metadatas=[
{"k": "c_new", "ord": 102},
{"k": "d_new", "ord": 103},
],
ids=["c", "d"],
)
res2 = await store_someemb.asimilarity_search("Abc", k=10)
assert len(res2) == 4
# pick one that was just updated and check its metadata
res3 = await store_someemb.asimilarity_search_with_score_id(
query="cc", k=1, filter={"k": "c_new"}
)
print(str(res3)) # noqa: T201
doc3, score3, id3 = res3[0]
assert doc3.page_content == "cc"
assert doc3.metadata == {"k": "c_new", "ord": 102}
assert score3 > 0.999 # leaving some leeway for approximations...
assert id3 == "c"
# delete and count again
del1_res = await store_someemb.adelete(["b"])
assert del1_res is True
del2_res = await store_someemb.adelete(["a", "c", "Z!"])
assert del2_res is False # a non-existing ID was supplied
assert len(await store_someemb.asimilarity_search("xy", k=10)) == 1
# clear store
await store_someemb.aclear()
assert await store_someemb.asimilarity_search("Abc", k=2) == []
# add_documents with "ids" arg passthrough
await store_someemb.aadd_documents(
[
Document(page_content="vv", metadata={"k": "v", "ord": 204}),
Document(page_content="ww", metadata={"k": "w", "ord": 205}),
],
ids=["v", "w"],
)
assert len(await store_someemb.asimilarity_search("xy", k=10)) == 2
res4 = await store_someemb.asimilarity_search("ww", k=1, filter={"k": "w"})
assert res4[0].metadata["ord"] == 205
@staticmethod
def _v_from_i(i: int, N: int) -> str:
angle = 2 * math.pi * i / N
vector = [math.cos(angle), math.sin(angle)]
return json.dumps(vector)
def test_astradb_vectorstore_mmr(self, store_parseremb: AstraDB) -> None:
"""
MMR testing. We work on the unit circle with angle multiples
of 2*pi/20 and prepare a store with known vectors for a controlled
MMR outcome.
"""
i_vals = [0, 4, 5, 13]
N_val = 20
store_parseremb.add_texts(
[self._v_from_i(i, N_val) for i in i_vals],
metadatas=[{"i": i} for i in i_vals],
)
res1 = store_parseremb.max_marginal_relevance_search(
self._v_from_i(3, N_val),
k=2,
fetch_k=3,
)
res_i_vals = {doc.metadata["i"] for doc in res1}
assert res_i_vals == {0, 4}
async def test_astradb_vectorstore_mmr_async(
self, store_parseremb: AstraDB
) -> None:
"""
MMR testing. We work on the unit circle with angle multiples
of 2*pi/20 and prepare a store with known vectors for a controlled
MMR outcome.
"""
i_vals = [0, 4, 5, 13]
N_val = 20
await store_parseremb.aadd_texts(
[self._v_from_i(i, N_val) for i in i_vals],
metadatas=[{"i": i} for i in i_vals],
)
res1 = await store_parseremb.amax_marginal_relevance_search(
self._v_from_i(3, N_val),
k=2,
fetch_k=3,
)
res_i_vals = {doc.metadata["i"] for doc in res1}
assert res_i_vals == {0, 4}
def test_astradb_vectorstore_metadata(self, store_someemb: AstraDB) -> None:
"""Metadata filtering."""
store_someemb.add_documents(
[
Document(
page_content="q",
metadata={"ord": ord("q"), "group": "consonant"},
),
Document(
page_content="w",
metadata={"ord": ord("w"), "group": "consonant"},
),
Document(
page_content="r",
metadata={"ord": ord("r"), "group": "consonant"},
),
Document(
page_content="e",
metadata={"ord": ord("e"), "group": "vowel"},
),
Document(
page_content="i",
metadata={"ord": ord("i"), "group": "vowel"},
),
Document(
page_content="o",
metadata={"ord": ord("o"), "group": "vowel"},
),
]
)
# no filters
res0 = store_someemb.similarity_search("x", k=10)
assert {doc.page_content for doc in res0} == set("qwreio")
# single filter
res1 = store_someemb.similarity_search(
"x",
k=10,
filter={"group": "vowel"},
)
assert {doc.page_content for doc in res1} == set("eio")
# multiple filters
res2 = store_someemb.similarity_search(
"x",
k=10,
filter={"group": "consonant", "ord": ord("q")},
)
assert {doc.page_content for doc in res2} == set("q")
# excessive filters
res3 = store_someemb.similarity_search(
"x",
k=10,
filter={"group": "consonant", "ord": ord("q"), "case": "upper"},
)
assert res3 == []
# filter with logical operator
res4 = store_someemb.similarity_search(
"x",
k=10,
filter={"$or": [{"ord": ord("q")}, {"ord": ord("r")}]},
)
assert {doc.page_content for doc in res4} == {"q", "r"}
def test_astradb_vectorstore_similarity_scale(
self, store_parseremb: AstraDB
) -> None:
"""Scale of the similarity scores."""
store_parseremb.add_texts(
texts=[
json.dumps([1, 1]),
json.dumps([-1, -1]),
],
ids=["near", "far"],
)
res1 = store_parseremb.similarity_search_with_score(
json.dumps([0.5, 0.5]),
k=2,
)
scores = [sco for _, sco in res1]
sco_near, sco_far = scores
assert abs(1 - sco_near) < 0.001 and abs(sco_far) < 0.001
async def test_astradb_vectorstore_similarity_scale_async(
self, store_parseremb: AstraDB
) -> None:
"""Scale of the similarity scores."""
await store_parseremb.aadd_texts(
texts=[
json.dumps([1, 1]),
json.dumps([-1, -1]),
],
ids=["near", "far"],
)
res1 = await store_parseremb.asimilarity_search_with_score(
json.dumps([0.5, 0.5]),
k=2,
)
scores = [sco for _, sco in res1]
sco_near, sco_far = scores
assert abs(1 - sco_near) < 0.001 and abs(sco_far) < 0.001
def test_astradb_vectorstore_massive_delete(self, store_someemb: AstraDB) -> None:
"""Larger-scale bulk deletes."""
M = 50
texts = [str(i + 1 / 7.0) for i in range(2 * M)]
ids0 = ["doc_%i" % i for i in range(M)]
ids1 = ["doc_%i" % (i + M) for i in range(M)]
ids = ids0 + ids1
store_someemb.add_texts(texts=texts, ids=ids)
# deleting a bunch of these
del_res0 = store_someemb.delete(ids0)
assert del_res0 is True
# deleting the rest plus a fake one
del_res1 = store_someemb.delete(ids1 + ["ghost!"])
assert del_res1 is True # ensure no error
# nothing left
assert store_someemb.similarity_search("x", k=2 * M) == []
def test_astradb_vectorstore_drop(self) -> None:
"""behaviour of 'delete_collection'."""
collection_name = "lc_test_d"
emb = SomeEmbeddings(dimension=2)
v_store = AstraDB(
embedding=emb,
collection_name=collection_name,
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
v_store.add_texts(["huh"])
assert len(v_store.similarity_search("hah", k=10)) == 1
# another instance pointing to the same collection on DB
v_store_kenny = AstraDB(
embedding=emb,
collection_name=collection_name,
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
v_store_kenny.delete_collection()
# dropped on DB, but 'v_store' should have no clue:
with pytest.raises(ValueError):
_ = v_store.similarity_search("hah", k=10)
def test_astradb_vectorstore_custom_params(self) -> None:
"""Custom batch size and concurrency params."""
emb = SomeEmbeddings(dimension=2)
v_store = AstraDB(
embedding=emb,
collection_name="lc_test_c",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
batch_size=17,
bulk_insert_batch_concurrency=13,
bulk_insert_overwrite_concurrency=7,
bulk_delete_concurrency=19,
)
try:
# add_texts
N = 50
texts = [str(i + 1 / 7.0) for i in range(N)]
ids = ["doc_%i" % i for i in range(N)]
v_store.add_texts(texts=texts, ids=ids)
v_store.add_texts(
texts=texts,
ids=ids,
batch_size=19,
batch_concurrency=7,
overwrite_concurrency=13,
)
#
_ = v_store.delete(ids[: N // 2])
_ = v_store.delete(ids[N // 2 :], concurrency=23)
#
finally:
v_store.delete_collection()
async def test_astradb_vectorstore_custom_params_async(self) -> None:
"""Custom batch size and concurrency params."""
emb = SomeEmbeddings(dimension=2)
v_store = AstraDB(
embedding=emb,
collection_name="lc_test_c_async",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
batch_size=17,
bulk_insert_batch_concurrency=13,
bulk_insert_overwrite_concurrency=7,
bulk_delete_concurrency=19,
)
try:
# add_texts
N = 50
texts = [str(i + 1 / 7.0) for i in range(N)]
ids = ["doc_%i" % i for i in range(N)]
await v_store.aadd_texts(texts=texts, ids=ids)
await v_store.aadd_texts(
texts=texts,
ids=ids,
batch_size=19,
batch_concurrency=7,
overwrite_concurrency=13,
)
#
await v_store.adelete(ids[: N // 2])
await v_store.adelete(ids[N // 2 :], concurrency=23)
#
finally:
await v_store.adelete_collection()
def test_astradb_vectorstore_metrics(self) -> None:
"""
Different choices of similarity metric.
Both stores (with "cosine" and "euclidea" metrics) contain these two:
- a vector slightly rotated w.r.t query vector
- a vector which is a long multiple of query vector
so, which one is "the closest one" depends on the metric.
"""
emb = ParserEmbeddings(dimension=2)
isq2 = 0.5**0.5
isa = 0.7
isb = (1.0 - isa * isa) ** 0.5
texts = [
json.dumps([isa, isb]),
json.dumps([10 * isq2, 10 * isq2]),
]
ids = [
"rotated",
"scaled",
]
query_text = json.dumps([isq2, isq2])
# creation, population, query - cosine
vstore_cos = AstraDB(
embedding=emb,
collection_name="lc_test_m_c",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
metric="cosine",
)
try:
vstore_cos.add_texts(
texts=texts,
ids=ids,
)
_, _, id_from_cos = vstore_cos.similarity_search_with_score_id(
query_text,
k=1,
)[0]
assert id_from_cos == "scaled"
finally:
vstore_cos.delete_collection()
# creation, population, query - euclidean
vstore_euc = AstraDB(
embedding=emb,
collection_name="lc_test_m_e",
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
api_endpoint=os.environ["ASTRA_DB_API_ENDPOINT"],
namespace=os.environ.get("ASTRA_DB_KEYSPACE"),
metric="euclidean",
)
try:
vstore_euc.add_texts(
texts=texts,
ids=ids,
)
_, _, id_from_euc = vstore_euc.similarity_search_with_score_id(
query_text,
k=1,
)[0]
assert id_from_euc == "rotated"
finally:
vstore_euc.delete_collection()
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/fake_embeddings.py | """Fake Embedding class for testing purposes."""
import math
from typing import List
from langchain_core.embeddings import Embeddings
fake_texts = ["foo", "bar", "baz"]
class FakeEmbeddings(Embeddings):
"""Fake embeddings functionality for testing."""
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Return simple embeddings.
Embeddings encode each text as its index."""
return [[float(1.0)] * 9 + [float(i)] for i in range(len(texts))]
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
return self.embed_documents(texts)
def embed_query(self, text: str) -> List[float]:
"""Return constant query embeddings.
Embeddings are identical to embed_documents(texts)[0].
Distance to each text will be that text's index,
as it was passed to embed_documents."""
return [float(1.0)] * 9 + [float(0.0)]
async def aembed_query(self, text: str) -> List[float]:
return self.embed_query(text)
class ConsistentFakeEmbeddings(FakeEmbeddings):
"""Fake embeddings which remember all the texts seen so far to return consistent
vectors for the same texts."""
def __init__(self, dimensionality: int = 10) -> None:
self.known_texts: List[str] = []
self.dimensionality = dimensionality
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Return consistent embeddings for each text seen so far."""
out_vectors = []
for text in texts:
if text not in self.known_texts:
self.known_texts.append(text)
vector = [float(1.0)] * (self.dimensionality - 1) + [
float(self.known_texts.index(text))
]
out_vectors.append(vector)
return out_vectors
def embed_query(self, text: str) -> List[float]:
"""Return consistent embeddings for the text, if seen before, or a constant
one if the text is unknown."""
return self.embed_documents([text])[0]
class AngularTwoDimensionalEmbeddings(Embeddings):
"""
From angles (as strings in units of pi) to unit embedding vectors on a circle.
"""
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""
Make a list of texts into a list of embedding vectors.
"""
return [self.embed_query(text) for text in texts]
def embed_query(self, text: str) -> List[float]:
"""
Convert input text to a 'vector' (list of floats).
If the text is a number, use it as the angle for the
unit vector in units of pi.
Any other input text becomes the singular result [0, 0] !
"""
try:
angle = float(text)
return [math.cos(angle * math.pi), math.sin(angle * math.pi)]
except ValueError:
# Assume: just test string, no attention is paid to values.
return [0.0, 0.0]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_pinecone.py | import importlib
import os
import time
import uuid
from typing import TYPE_CHECKING, List
import numpy as np
import pytest
from langchain_core.documents import Document
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores.pinecone import Pinecone
if TYPE_CHECKING:
import pinecone
index_name = "langchain-test-index" # name of the index
namespace_name = "langchain-test-namespace" # name of the namespace
dimension = 1536 # dimension of the embeddings
def reset_pinecone() -> None:
assert os.environ.get("PINECONE_API_KEY") is not None
assert os.environ.get("PINECONE_ENVIRONMENT") is not None
import pinecone
importlib.reload(pinecone)
pinecone.init(
api_key=os.environ.get("PINECONE_API_KEY"),
environment=os.environ.get("PINECONE_ENVIRONMENT"),
)
class TestPinecone:
index: "pinecone.Index"
@classmethod
def setup_class(cls) -> None:
import pinecone
reset_pinecone()
cls.index = pinecone.Index(index_name)
if index_name in pinecone.list_indexes():
index_stats = cls.index.describe_index_stats()
if index_stats["dimension"] == dimension:
# delete all the vectors in the index if the dimension is the same
# from all namespaces
index_stats = cls.index.describe_index_stats()
for _namespace_name in index_stats["namespaces"].keys():
cls.index.delete(delete_all=True, namespace=_namespace_name)
else:
pinecone.delete_index(index_name)
pinecone.create_index(name=index_name, dimension=dimension)
else:
pinecone.create_index(name=index_name, dimension=dimension)
# insure the index is empty
index_stats = cls.index.describe_index_stats()
assert index_stats["dimension"] == dimension
if index_stats["namespaces"].get(namespace_name) is not None:
assert index_stats["namespaces"][namespace_name]["vector_count"] == 0
@classmethod
def teardown_class(cls) -> None:
index_stats = cls.index.describe_index_stats()
for _namespace_name in index_stats["namespaces"].keys():
cls.index.delete(delete_all=True, namespace=_namespace_name)
reset_pinecone()
@pytest.fixture(autouse=True)
def setup(self) -> None:
# delete all the vectors in the index
index_stats = self.index.describe_index_stats()
for _namespace_name in index_stats["namespaces"].keys():
self.index.delete(delete_all=True, namespace=_namespace_name)
reset_pinecone()
@pytest.mark.vcr()
def test_from_texts(
self, texts: List[str], embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and search."""
unique_id = uuid.uuid4().hex
needs = f"foobuu {unique_id} booo"
texts.insert(0, needs)
docsearch = Pinecone.from_texts(
texts=texts,
embedding=embedding_openai,
index_name=index_name,
namespace=namespace_name,
)
output = docsearch.similarity_search(unique_id, k=1, namespace=namespace_name)
assert output == [Document(page_content=needs)]
@pytest.mark.vcr()
def test_from_texts_with_metadatas(
self, texts: List[str], embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and search."""
unique_id = uuid.uuid4().hex
needs = f"foobuu {unique_id} booo"
texts.insert(0, needs)
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = Pinecone.from_texts(
texts,
embedding_openai,
index_name=index_name,
metadatas=metadatas,
namespace=namespace_name,
)
output = docsearch.similarity_search(needs, k=1, namespace=namespace_name)
# TODO: why metadata={"page": 0.0}) instead of {"page": 0}?
assert output == [Document(page_content=needs, metadata={"page": 0.0})]
@pytest.mark.vcr()
def test_from_texts_with_scores(self, embedding_openai: OpenAIEmbeddings) -> None:
"""Test end to end construction and search with scores and IDs."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = Pinecone.from_texts(
texts,
embedding_openai,
index_name=index_name,
metadatas=metadatas,
namespace=namespace_name,
)
output = docsearch.similarity_search_with_score(
"foo", k=3, namespace=namespace_name
)
docs = [o[0] for o in output]
scores = [o[1] for o in output]
sorted_documents = sorted(docs, key=lambda x: x.metadata["page"])
# TODO: why metadata={"page": 0.0}) instead of {"page": 0}, etc???
assert sorted_documents == [
Document(page_content="foo", metadata={"page": 0.0}),
Document(page_content="bar", metadata={"page": 1.0}),
Document(page_content="baz", metadata={"page": 2.0}),
]
assert scores[0] > scores[1] > scores[2]
def test_from_existing_index_with_namespaces(
self, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test that namespaces are properly handled."""
# Create two indexes with the same name but different namespaces
texts_1 = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts_1))]
Pinecone.from_texts(
texts_1,
embedding_openai,
index_name=index_name,
metadatas=metadatas,
namespace=f"{index_name}-1",
)
texts_2 = ["foo2", "bar2", "baz2"]
metadatas = [{"page": i} for i in range(len(texts_2))]
Pinecone.from_texts(
texts_2,
embedding_openai,
index_name=index_name,
metadatas=metadatas,
namespace=f"{index_name}-2",
)
# Search with namespace
docsearch = Pinecone.from_existing_index(
index_name=index_name,
embedding=embedding_openai,
namespace=f"{index_name}-1",
)
output = docsearch.similarity_search("foo", k=20, namespace=f"{index_name}-1")
# check that we don't get results from the other namespace
page_contents = sorted(set([o.page_content for o in output]))
assert all(content in ["foo", "bar", "baz"] for content in page_contents)
assert all(content not in ["foo2", "bar2", "baz2"] for content in page_contents)
def test_add_documents_with_ids(
self, texts: List[str], embedding_openai: OpenAIEmbeddings
) -> None:
ids = [uuid.uuid4().hex for _ in range(len(texts))]
Pinecone.from_texts(
texts=texts,
ids=ids,
embedding=embedding_openai,
index_name=index_name,
namespace=index_name,
)
index_stats = self.index.describe_index_stats()
assert index_stats["namespaces"][index_name]["vector_count"] == len(texts)
ids_1 = [uuid.uuid4().hex for _ in range(len(texts))]
Pinecone.from_texts(
texts=texts,
ids=ids_1,
embedding=embedding_openai,
index_name=index_name,
namespace=index_name,
)
index_stats = self.index.describe_index_stats()
assert index_stats["namespaces"][index_name]["vector_count"] == len(texts) * 2
assert index_stats["total_vector_count"] == len(texts) * 2
@pytest.mark.vcr()
def test_relevance_score_bound(self, embedding_openai: OpenAIEmbeddings) -> None:
"""Ensures all relevance scores are between 0 and 1."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = Pinecone.from_texts(
texts,
embedding_openai,
index_name=index_name,
metadatas=metadatas,
)
# wait for the index to be ready
time.sleep(20)
output = docsearch.similarity_search_with_relevance_scores("foo", k=3)
assert all(
(1 >= score or np.isclose(score, 1)) and score >= 0 for _, score in output
)
@pytest.mark.skipif(reason="slow to run for benchmark")
@pytest.mark.parametrize(
"pool_threads,batch_size,embeddings_chunk_size,data_multiplier",
[
(
1,
32,
32,
1000,
), # simulate single threaded with embeddings_chunk_size = batch_size = 32
(
1,
32,
1000,
1000,
), # simulate single threaded with embeddings_chunk_size = 1000
(
4,
32,
1000,
1000,
), # simulate 4 threaded with embeddings_chunk_size = 1000
(20, 64, 5000, 1000),
], # simulate 20 threaded with embeddings_chunk_size = 5000
)
def test_from_texts_with_metadatas_benchmark(
self,
pool_threads: int,
batch_size: int,
embeddings_chunk_size: int,
data_multiplier: int,
documents: List[Document],
embedding_openai: OpenAIEmbeddings,
) -> None:
"""Test end to end construction and search."""
texts = [document.page_content for document in documents] * data_multiplier
uuids = [uuid.uuid4().hex for _ in range(len(texts))]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = Pinecone.from_texts(
texts,
embedding_openai,
ids=uuids,
metadatas=metadatas,
index_name=index_name,
namespace=namespace_name,
pool_threads=pool_threads,
batch_size=batch_size,
embeddings_chunk_size=embeddings_chunk_size,
)
query = "What did the president say about Ketanji Brown Jackson"
_ = docsearch.similarity_search(query, k=1, namespace=namespace_name)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_elasticsearch.py | """Test ElasticSearch functionality."""
import logging
import os
import re
import uuid
from typing import Any, Dict, Generator, List, Union
import pytest
from langchain_core.documents import Document
from langchain_community.vectorstores.elasticsearch import ElasticsearchStore
from tests.integration_tests.vectorstores.fake_embeddings import (
ConsistentFakeEmbeddings,
FakeEmbeddings,
)
logging.basicConfig(level=logging.DEBUG)
"""
cd tests/integration_tests/vectorstores/docker-compose
docker-compose -f elasticsearch.yml up
By default runs against local docker instance of Elasticsearch.
To run against Elastic Cloud, set the following environment variables:
- ES_CLOUD_ID
- ES_USERNAME
- ES_PASSWORD
Some of the tests require the following models to be deployed in the ML Node:
- elser (can be downloaded and deployed through Kibana and trained models UI)
- sentence-transformers__all-minilm-l6-v2 (can be deployed
through API, loaded via eland)
These tests that require the models to be deployed are skipped by default.
Enable them by adding the model name to the modelsDeployed list below.
"""
modelsDeployed: List[str] = [
# "elser",
# "sentence-transformers__all-minilm-l6-v2",
]
class TestElasticsearch:
@classmethod
def setup_class(cls) -> None:
if not os.getenv("OPENAI_API_KEY"):
raise ValueError("OPENAI_API_KEY environment variable is not set")
@pytest.fixture(scope="class", autouse=True)
def elasticsearch_connection(self) -> Union[dict, Generator[dict, None, None]]: # type: ignore[return]
# Running this integration test with Elastic Cloud
# Required for in-stack inference testing (ELSER + model_id)
from elasticsearch import Elasticsearch
es_url = os.environ.get("ES_URL", "http://localhost:9200")
cloud_id = os.environ.get("ES_CLOUD_ID")
es_username = os.environ.get("ES_USERNAME", "elastic")
es_password = os.environ.get("ES_PASSWORD", "changeme")
if cloud_id:
es = Elasticsearch(
cloud_id=cloud_id,
basic_auth=(es_username, es_password),
)
yield {
"es_cloud_id": cloud_id,
"es_user": es_username,
"es_password": es_password,
}
else:
# Running this integration test with local docker instance
es = Elasticsearch(hosts=es_url)
yield {"es_url": es_url}
# Clear all indexes
index_names = es.indices.get(index="_all").keys()
for index_name in index_names:
if index_name.startswith("test_"):
es.indices.delete(index=index_name)
es.indices.refresh(index="_all")
# clear all test pipelines
try:
response = es.ingest.get_pipeline(id="test_*,*_sparse_embedding")
for pipeline_id, _ in response.items():
try:
es.ingest.delete_pipeline(id=pipeline_id)
print(f"Deleted pipeline: {pipeline_id}") # noqa: T201
except Exception as e:
print(f"Pipeline error: {e}") # noqa: T201
except Exception:
pass
@pytest.fixture(scope="function")
def es_client(self) -> Any:
# Running this integration test with Elastic Cloud
# Required for in-stack inference testing (ELSER + model_id)
from elastic_transport import Transport
from elasticsearch import Elasticsearch
class CustomTransport(Transport):
requests = []
def perform_request(self, *args, **kwargs): # type: ignore
self.requests.append(kwargs)
return super().perform_request(*args, **kwargs)
es_url = os.environ.get("ES_URL", "http://localhost:9200")
cloud_id = os.environ.get("ES_CLOUD_ID")
es_username = os.environ.get("ES_USERNAME", "elastic")
es_password = os.environ.get("ES_PASSWORD", "changeme")
if cloud_id:
es = Elasticsearch(
cloud_id=cloud_id,
basic_auth=(es_username, es_password),
transport_class=CustomTransport,
)
return es
else:
# Running this integration test with local docker instance
es = Elasticsearch(hosts=es_url, transport_class=CustomTransport)
return es
@pytest.fixture(scope="function")
def index_name(self) -> str:
"""Return the index name."""
return f"test_{uuid.uuid4().hex}"
def test_similarity_search_without_metadata(
self, elasticsearch_connection: dict, index_name: str
) -> None:
"""Test end to end construction and search without metadata."""
def assert_query(query_body: dict, query: str) -> dict:
assert query_body == {
"knn": {
"field": "vector",
"filter": [],
"k": 1,
"num_candidates": 50,
"query_vector": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0],
}
}
return query_body
texts = ["foo", "bar", "baz"]
docsearch = ElasticsearchStore.from_texts(
texts,
FakeEmbeddings(),
**elasticsearch_connection,
index_name=index_name,
)
output = docsearch.similarity_search("foo", k=1, custom_query=assert_query)
assert output == [Document(page_content="foo")]
async def test_similarity_search_without_metadata_async(
self, elasticsearch_connection: dict, index_name: str
) -> None:
"""Test end to end construction and search without metadata."""
texts = ["foo", "bar", "baz"]
docsearch = ElasticsearchStore.from_texts(
texts,
FakeEmbeddings(),
**elasticsearch_connection,
index_name=index_name,
)
output = await docsearch.asimilarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_add_embeddings(
self, elasticsearch_connection: dict, index_name: str
) -> None:
"""
Test add_embeddings, which accepts pre-built embeddings instead of
using inference for the texts.
This allows you to separate the embeddings text and the page_content
for better proximity between user's question and embedded text.
For example, your embedding text can be a question, whereas page_content
is the answer.
"""
embeddings = ConsistentFakeEmbeddings()
text_input = ["foo1", "foo2", "foo3"]
metadatas = [{"page": i} for i in range(len(text_input))]
"""In real use case, embedding_input can be questions for each text"""
embedding_input = ["foo2", "foo3", "foo1"]
embedding_vectors = embeddings.embed_documents(embedding_input)
docsearch = ElasticsearchStore._create_cls_from_kwargs(
embeddings,
**elasticsearch_connection,
index_name=index_name,
)
docsearch.add_embeddings(list(zip(text_input, embedding_vectors)), metadatas)
output = docsearch.similarity_search("foo1", k=1)
assert output == [Document(page_content="foo3", metadata={"page": 2})]
def test_similarity_search_with_metadata(
self, elasticsearch_connection: dict, index_name: str
) -> None:
"""Test end to end construction and search with metadata."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = ElasticsearchStore.from_texts(
texts,
ConsistentFakeEmbeddings(),
metadatas=metadatas,
**elasticsearch_connection,
index_name=index_name,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": 0})]
output = docsearch.similarity_search("bar", k=1)
assert output == [Document(page_content="bar", metadata={"page": 1})]
def test_similarity_search_with_filter(
self, elasticsearch_connection: dict, index_name: str
) -> None:
"""Test end to end construction and search with metadata."""
texts = ["foo", "foo", "foo"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = ElasticsearchStore.from_texts(
texts,
FakeEmbeddings(),
metadatas=metadatas,
**elasticsearch_connection,
index_name=index_name,
)
def assert_query(query_body: dict, query: str) -> dict:
assert query_body == {
"knn": {
"field": "vector",
"filter": [{"term": {"metadata.page": "1"}}],
"k": 3,
"num_candidates": 50,
"query_vector": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0],
}
}
return query_body
output = docsearch.similarity_search(
query="foo",
k=3,
filter=[{"term": {"metadata.page": "1"}}],
custom_query=assert_query,
)
assert output == [Document(page_content="foo", metadata={"page": 1})]
def test_similarity_search_with_doc_builder(
self, elasticsearch_connection: dict, index_name: str
) -> None:
texts = ["foo", "foo", "foo"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = ElasticsearchStore.from_texts(
texts,
FakeEmbeddings(),
metadatas=metadatas,
**elasticsearch_connection,
index_name=index_name,
)
def custom_document_builder(_: Dict) -> Document:
return Document(
page_content="Mock content!",
metadata={
"page_number": -1,
"original_filename": "Mock filename!",
},
)
output = docsearch.similarity_search(
query="foo", k=1, doc_builder=custom_document_builder
)
assert output[0].page_content == "Mock content!"
assert output[0].metadata["page_number"] == -1
assert output[0].metadata["original_filename"] == "Mock filename!"
def test_similarity_search_exact_search(
self, elasticsearch_connection: dict, index_name: str
) -> None:
"""Test end to end construction and search with metadata."""
texts = ["foo", "bar", "baz"]
docsearch = ElasticsearchStore.from_texts(
texts,
FakeEmbeddings(),
**elasticsearch_connection,
index_name=index_name,
strategy=ElasticsearchStore.ExactRetrievalStrategy(),
)
expected_query = {
"query": {
"script_score": {
"query": {"match_all": {}},
"script": {
"source": "cosineSimilarity(params.query_vector, 'vector') + 1.0", # noqa: E501
"params": {
"query_vector": [
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
0.0,
]
},
},
}
}
}
def assert_query(query_body: dict, query: str) -> dict:
assert query_body == expected_query
return query_body
output = docsearch.similarity_search("foo", k=1, custom_query=assert_query)
assert output == [Document(page_content="foo")]
def test_similarity_search_exact_search_with_filter(
self, elasticsearch_connection: dict, index_name: str
) -> None:
"""Test end to end construction and search with metadata."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = ElasticsearchStore.from_texts(
texts,
FakeEmbeddings(),
**elasticsearch_connection,
index_name=index_name,
metadatas=metadatas,
strategy=ElasticsearchStore.ExactRetrievalStrategy(),
)
def assert_query(query_body: dict, query: str) -> dict:
expected_query = {
"query": {
"script_score": {
"query": {"bool": {"filter": [{"term": {"metadata.page": 0}}]}},
"script": {
"source": "cosineSimilarity(params.query_vector, 'vector') + 1.0", # noqa: E501
"params": {
"query_vector": [
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
0.0,
]
},
},
}
}
}
assert query_body == expected_query
return query_body
output = docsearch.similarity_search(
"foo",
k=1,
custom_query=assert_query,
filter=[{"term": {"metadata.page": 0}}],
)
assert output == [Document(page_content="foo", metadata={"page": 0})]
def test_similarity_search_exact_search_distance_dot_product(
self, elasticsearch_connection: dict, index_name: str
) -> None:
"""Test end to end construction and search with metadata."""
texts = ["foo", "bar", "baz"]
docsearch = ElasticsearchStore.from_texts(
texts,
FakeEmbeddings(),
**elasticsearch_connection,
index_name=index_name,
strategy=ElasticsearchStore.ExactRetrievalStrategy(),
distance_strategy="DOT_PRODUCT",
)
def assert_query(query_body: dict, query: str) -> dict:
assert query_body == {
"query": {
"script_score": {
"query": {"match_all": {}},
"script": {
"source": """
double value = dotProduct(params.query_vector, 'vector');
return sigmoid(1, Math.E, -value);
""",
"params": {
"query_vector": [
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
0.0,
]
},
},
}
}
}
return query_body
output = docsearch.similarity_search("foo", k=1, custom_query=assert_query)
assert output == [Document(page_content="foo")]
def test_similarity_search_exact_search_unknown_distance_strategy(
self, elasticsearch_connection: dict, index_name: str
) -> None:
"""Test end to end construction and search with unknown distance strategy."""
with pytest.raises(KeyError):
texts = ["foo", "bar", "baz"]
ElasticsearchStore.from_texts(
texts,
FakeEmbeddings(),
**elasticsearch_connection,
index_name=index_name,
strategy=ElasticsearchStore.ExactRetrievalStrategy(),
distance_strategy="NOT_A_STRATEGY",
)
def test_max_marginal_relevance_search(
self, elasticsearch_connection: dict, index_name: str
) -> None:
"""Test max marginal relevance search."""
texts = ["foo", "bar", "baz"]
docsearch = ElasticsearchStore.from_texts(
texts,
FakeEmbeddings(),
**elasticsearch_connection,
index_name=index_name,
strategy=ElasticsearchStore.ExactRetrievalStrategy(),
)
mmr_output = docsearch.max_marginal_relevance_search(texts[0], k=3, fetch_k=3)
sim_output = docsearch.similarity_search(texts[0], k=3)
assert mmr_output == sim_output
mmr_output = docsearch.max_marginal_relevance_search(texts[0], k=2, fetch_k=3)
assert len(mmr_output) == 2
assert mmr_output[0].page_content == texts[0]
assert mmr_output[1].page_content == texts[1]
mmr_output = docsearch.max_marginal_relevance_search(
texts[0],
k=2,
fetch_k=3,
lambda_mult=0.1, # more diversity
)
assert len(mmr_output) == 2
assert mmr_output[0].page_content == texts[0]
assert mmr_output[1].page_content == texts[2]
# if fetch_k < k, then the output will be less than k
mmr_output = docsearch.max_marginal_relevance_search(texts[0], k=3, fetch_k=2)
assert len(mmr_output) == 2
def test_similarity_search_approx_with_hybrid_search(
self, elasticsearch_connection: dict, index_name: str
) -> None:
"""Test end to end construction and search with metadata."""
texts = ["foo", "bar", "baz"]
docsearch = ElasticsearchStore.from_texts(
texts,
FakeEmbeddings(),
**elasticsearch_connection,
index_name=index_name,
strategy=ElasticsearchStore.ApproxRetrievalStrategy(hybrid=True),
)
def assert_query(query_body: dict, query: str) -> dict:
assert query_body == {
"knn": {
"field": "vector",
"filter": [],
"k": 1,
"num_candidates": 50,
"query_vector": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0],
},
"query": {
"bool": {
"filter": [],
"must": [{"match": {"text": {"query": "foo"}}}],
}
},
"rank": {"rrf": {}},
}
return query_body
output = docsearch.similarity_search("foo", k=1, custom_query=assert_query)
assert output == [Document(page_content="foo")]
def test_similarity_search_approx_with_hybrid_search_rrf(
self, es_client: Any, elasticsearch_connection: dict, index_name: str
) -> None:
"""Test end to end construction and rrf hybrid search with metadata."""
from functools import partial
from typing import Optional
# 1. check query_body is okay
rrf_test_cases: List[Optional[Union[dict, bool]]] = [
True,
False,
{"rank_constant": 1, "window_size": 5},
]
for rrf_test_case in rrf_test_cases:
texts = ["foo", "bar", "baz"]
docsearch = ElasticsearchStore.from_texts(
texts,
FakeEmbeddings(),
**elasticsearch_connection,
index_name=index_name,
strategy=ElasticsearchStore.ApproxRetrievalStrategy(
hybrid=True, rrf=rrf_test_case
),
)
def assert_query(
query_body: dict,
query: str,
rrf: Optional[Union[dict, bool]] = True,
) -> dict:
cmp_query_body = {
"knn": {
"field": "vector",
"filter": [],
"k": 3,
"num_candidates": 50,
"query_vector": [
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
0.0,
],
},
"query": {
"bool": {
"filter": [],
"must": [{"match": {"text": {"query": "foo"}}}],
}
},
}
if isinstance(rrf, dict):
cmp_query_body["rank"] = {"rrf": rrf}
elif isinstance(rrf, bool) and rrf is True:
cmp_query_body["rank"] = {"rrf": {}}
assert query_body == cmp_query_body
return query_body
## without fetch_k parameter
output = docsearch.similarity_search(
"foo", k=3, custom_query=partial(assert_query, rrf=rrf_test_case)
)
# 2. check query result is okay
es_output = es_client.search(
index=index_name,
query={
"bool": {
"filter": [],
"must": [{"match": {"text": {"query": "foo"}}}],
}
},
knn={
"field": "vector",
"filter": [],
"k": 3,
"num_candidates": 50,
"query_vector": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0],
},
size=3,
rank={"rrf": {"rank_constant": 1, "window_size": 5}},
)
assert [o.page_content for o in output] == [
e["_source"]["text"] for e in es_output["hits"]["hits"]
]
# 3. check rrf default option is okay
docsearch = ElasticsearchStore.from_texts(
texts,
FakeEmbeddings(),
**elasticsearch_connection,
index_name=index_name,
strategy=ElasticsearchStore.ApproxRetrievalStrategy(hybrid=True),
)
## with fetch_k parameter
output = docsearch.similarity_search(
"foo", k=3, fetch_k=50, custom_query=assert_query
)
def test_similarity_search_approx_with_custom_query_fn(
self, elasticsearch_connection: dict, index_name: str
) -> None:
"""test that custom query function is called
with the query string and query body"""
def my_custom_query(query_body: dict, query: str) -> dict:
assert query == "foo"
assert query_body == {
"knn": {
"field": "vector",
"filter": [],
"k": 1,
"num_candidates": 50,
"query_vector": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0],
}
}
return {"query": {"match": {"text": {"query": "bar"}}}}
"""Test end to end construction and search with metadata."""
texts = ["foo", "bar", "baz"]
docsearch = ElasticsearchStore.from_texts(
texts, FakeEmbeddings(), **elasticsearch_connection, index_name=index_name
)
output = docsearch.similarity_search("foo", k=1, custom_query=my_custom_query)
assert output == [Document(page_content="bar")]
@pytest.mark.skipif(
"sentence-transformers__all-minilm-l6-v2" not in modelsDeployed,
reason="Sentence Transformers model not deployed in ML Node, skipping test",
)
def test_similarity_search_with_approx_infer_instack(
self, elasticsearch_connection: dict, index_name: str
) -> None:
"""test end to end with approx retrieval strategy and inference in-stack"""
docsearch = ElasticsearchStore(
index_name=index_name,
strategy=ElasticsearchStore.ApproxRetrievalStrategy(
query_model_id="sentence-transformers__all-minilm-l6-v2"
),
query_field="text_field",
vector_query_field="vector_query_field.predicted_value",
**elasticsearch_connection,
)
# setting up the pipeline for inference
docsearch.client.ingest.put_pipeline(
id="test_pipeline",
processors=[
{
"inference": {
"model_id": "sentence-transformers__all-minilm-l6-v2",
"field_map": {"query_field": "text_field"},
"target_field": "vector_query_field",
}
}
],
)
# creating a new index with the pipeline,
# not relying on langchain to create the index
docsearch.client.indices.create(
index=index_name,
mappings={
"properties": {
"text_field": {"type": "text"},
"vector_query_field": {
"properties": {
"predicted_value": {
"type": "dense_vector",
"dims": 384,
"index": True,
"similarity": "l2_norm",
}
}
},
}
},
settings={"index": {"default_pipeline": "test_pipeline"}},
)
# adding documents to the index
texts = ["foo", "bar", "baz"]
for i, text in enumerate(texts):
docsearch.client.create(
index=index_name,
id=str(i),
document={"text_field": text, "metadata": {}},
)
docsearch.client.indices.refresh(index=index_name)
def assert_query(query_body: dict, query: str) -> dict:
assert query_body == {
"knn": {
"filter": [],
"field": "vector_query_field.predicted_value",
"k": 1,
"num_candidates": 50,
"query_vector_builder": {
"text_embedding": {
"model_id": "sentence-transformers__all-minilm-l6-v2",
"model_text": "foo",
}
},
}
}
return query_body
output = docsearch.similarity_search("foo", k=1, custom_query=assert_query)
assert output == [Document(page_content="foo")]
output = docsearch.similarity_search("bar", k=1)
assert output == [Document(page_content="bar")]
@pytest.mark.skipif(
"elser" not in modelsDeployed,
reason="ELSER not deployed in ML Node, skipping test",
)
def test_similarity_search_with_sparse_infer_instack(
self, elasticsearch_connection: dict, index_name: str
) -> None:
"""test end to end with sparse retrieval strategy and inference in-stack"""
texts = ["foo", "bar", "baz"]
docsearch = ElasticsearchStore.from_texts(
texts,
**elasticsearch_connection,
index_name=index_name,
strategy=ElasticsearchStore.SparseVectorRetrievalStrategy(),
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_elasticsearch_with_relevance_score(
self, elasticsearch_connection: dict, index_name: str
) -> None:
"""Test to make sure the relevance score is scaled to 0-1."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
embeddings = FakeEmbeddings()
docsearch = ElasticsearchStore.from_texts(
index_name=index_name,
texts=texts,
embedding=embeddings,
metadatas=metadatas,
**elasticsearch_connection,
)
embedded_query = embeddings.embed_query("foo")
output = docsearch.similarity_search_by_vector_with_relevance_scores(
embedding=embedded_query, k=1
)
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 1.0)]
def test_elasticsearch_with_relevance_threshold(
self, elasticsearch_connection: dict, index_name: str
) -> None:
"""Test to make sure the relevance threshold is respected."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
embeddings = FakeEmbeddings()
docsearch = ElasticsearchStore.from_texts(
index_name=index_name,
texts=texts,
embedding=embeddings,
metadatas=metadatas,
**elasticsearch_connection,
)
# Find a good threshold for testing
query_string = "foo"
embedded_query = embeddings.embed_query(query_string)
top3 = docsearch.similarity_search_by_vector_with_relevance_scores(
embedding=embedded_query, k=3
)
similarity_of_second_ranked = top3[1][1]
assert len(top3) == 3
# Test threshold
retriever = docsearch.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={"score_threshold": similarity_of_second_ranked},
)
output = retriever.invoke(query_string)
assert output == [
top3[0][0],
top3[1][0],
# third ranked is out
]
def test_elasticsearch_delete_ids(
self, elasticsearch_connection: dict, index_name: str
) -> None:
"""Test delete methods from vector store."""
texts = ["foo", "bar", "baz", "gni"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = ElasticsearchStore(
embedding=ConsistentFakeEmbeddings(),
**elasticsearch_connection,
index_name=index_name,
)
ids = docsearch.add_texts(texts, metadatas)
output = docsearch.similarity_search("foo", k=10)
assert len(output) == 4
docsearch.delete(ids[1:3])
output = docsearch.similarity_search("foo", k=10)
assert len(output) == 2
docsearch.delete(["not-existing"])
output = docsearch.similarity_search("foo", k=10)
assert len(output) == 2
docsearch.delete([ids[0]])
output = docsearch.similarity_search("foo", k=10)
assert len(output) == 1
docsearch.delete([ids[3]])
output = docsearch.similarity_search("gni", k=10)
assert len(output) == 0
def test_elasticsearch_indexing_exception_error(
self,
elasticsearch_connection: dict,
index_name: str,
caplog: pytest.LogCaptureFixture,
) -> None:
"""Test bulk exception logging is giving better hints."""
from elasticsearch.helpers import BulkIndexError
docsearch = ElasticsearchStore(
embedding=ConsistentFakeEmbeddings(),
**elasticsearch_connection,
index_name=index_name,
)
docsearch.client.indices.create(
index=index_name,
mappings={"properties": {}},
settings={"index": {"default_pipeline": "not-existing-pipeline"}},
)
texts = ["foo"]
with pytest.raises(BulkIndexError):
docsearch.add_texts(texts)
error_reason = "pipeline with id [not-existing-pipeline] does not exist"
log_message = f"First error reason: {error_reason}"
assert log_message in caplog.text
def test_elasticsearch_with_user_agent(
self, es_client: Any, index_name: str
) -> None:
"""Test to make sure the user-agent is set correctly."""
texts = ["foo", "bob", "baz"]
ElasticsearchStore.from_texts(
texts,
FakeEmbeddings(),
es_connection=es_client,
index_name=index_name,
)
user_agent = es_client.transport.requests[0]["headers"]["User-Agent"]
pattern = r"^langchain-py-vs/\d+\.\d+\.\d+$"
match = re.match(pattern, user_agent)
assert (
match is not None
), f"The string '{user_agent}' does not match the expected pattern."
def test_elasticsearch_with_internal_user_agent(
self, elasticsearch_connection: Dict, index_name: str
) -> None:
"""Test to make sure the user-agent is set correctly."""
texts = ["foo"]
store = ElasticsearchStore.from_texts(
texts,
FakeEmbeddings(),
**elasticsearch_connection,
index_name=index_name,
)
user_agent = store.client._headers["User-Agent"]
pattern = r"^langchain-py-vs/\d+\.\d+\.\d+$"
match = re.match(pattern, user_agent)
assert (
match is not None
), f"The string '{user_agent}' does not match the expected pattern."
def test_bulk_args(self, es_client: Any, index_name: str) -> None:
"""Test to make sure the user-agent is set correctly."""
texts = ["foo", "bob", "baz"]
ElasticsearchStore.from_texts(
texts,
FakeEmbeddings(),
es_connection=es_client,
index_name=index_name,
bulk_kwargs={"chunk_size": 1},
)
# 1 for index exist, 1 for index create, 3 for index docs
assert len(es_client.transport.requests) == 5 # type: ignore
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_documentdb.py | """Test DocumentDBVectorSearch functionality."""
import logging
import os
from time import sleep
from typing import Any, Optional
import pytest
from langchain_core.documents import Document
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores.documentdb import (
DocumentDBSimilarityType,
DocumentDBVectorSearch,
)
logging.basicConfig(level=logging.DEBUG)
model_deployment = os.getenv(
"OPENAI_EMBEDDINGS_DEPLOYMENT", "smart-agent-embedding-ada"
)
model_name = os.getenv("OPENAI_EMBEDDINGS_MODEL_NAME", "text-embedding-ada-002")
INDEX_NAME = "langchain-test-index"
NAMESPACE = "langchain_test_db.langchain_test_collection"
CONNECTION_STRING = os.getenv("DOCUMENTDB_URI", "")
DB_NAME, COLLECTION_NAME = NAMESPACE.split(".")
dimensions = 1536
similarity_algorithm = DocumentDBSimilarityType.COS
def prepare_collection() -> Any:
from pymongo import MongoClient
test_client: MongoClient = MongoClient(CONNECTION_STRING)
return test_client[DB_NAME][COLLECTION_NAME]
@pytest.fixture()
def collection() -> Any:
return prepare_collection()
@pytest.fixture()
def embedding_openai() -> Any:
openai_embeddings: OpenAIEmbeddings = OpenAIEmbeddings(
deployment=model_deployment, model=model_name, chunk_size=1
)
return openai_embeddings
"""
This is how to run the integration tests:
cd libs/community
make test TEST_FILE=tests/integration_tests/vectorstores/test_documentdb.py
NOTE: You will first need to follow the contributor setup steps:
https://python.langchain.com/docs/contributing/code. You will also need to install
`pymongo` via `poetry`. You can also run the test directly using `pytest`, but please
make sure to install all dependencies.
"""
class TestDocumentDBVectorSearch:
@classmethod
def setup_class(cls) -> None:
if not os.getenv("OPENAI_API_KEY"):
raise ValueError("OPENAI_API_KEY environment variable is not set")
# insure the test collection is empty
collection = prepare_collection()
assert collection.count_documents({}) == 0 # type: ignore[index]
@classmethod
def teardown_class(cls) -> None:
collection = prepare_collection()
# delete all the documents in the collection
collection.delete_many({}) # type: ignore[index]
collection.drop_indexes()
@pytest.fixture(autouse=True)
def setup(self) -> None:
collection = prepare_collection()
# delete all the documents in the collection
collection.delete_many({}) # type: ignore[index]
collection.drop_indexes()
def test_from_documents_cosine_distance(
self, embedding_openai: OpenAIEmbeddings, collection: Any
) -> None:
"""Test end to end construction and search."""
documents = [
Document(page_content="Dogs are tough.", metadata={"a": 1}),
Document(page_content="Cats have fluff.", metadata={"b": 1}),
Document(page_content="What is a sandwich?", metadata={"c": 1}),
Document(page_content="That fence is purple.", metadata={"d": 1, "e": 2}),
]
vectorstore = DocumentDBVectorSearch.from_documents(
documents,
embedding_openai,
collection=collection,
index_name=INDEX_NAME,
)
sleep(1) # waits for DocumentDB to save contents to the collection
# Create the HNSW index that will be leveraged later for vector search
vectorstore.create_index(dimensions, similarity_algorithm)
sleep(2) # waits for the index to be set up
output = vectorstore.similarity_search("Sandwich", k=1)
assert output
assert output[0].page_content == "What is a sandwich?"
assert output[0].metadata["c"] == 1
vectorstore.delete_index()
def test_from_documents_inner_product(
self, embedding_openai: OpenAIEmbeddings, collection: Any
) -> None:
"""Test end to end construction and search."""
documents = [
Document(page_content="Dogs are tough.", metadata={"a": 1}),
Document(page_content="Cats have fluff.", metadata={"b": 1}),
Document(page_content="What is a sandwich?", metadata={"c": 1}),
Document(page_content="That fence is purple.", metadata={"d": 1, "e": 2}),
]
vectorstore = DocumentDBVectorSearch.from_documents(
documents,
embedding_openai,
collection=collection,
index_name=INDEX_NAME,
)
sleep(1) # waits for DocumentDB to save contents to the collection
# Create the HNSW index that will be leveraged later for vector search
vectorstore.create_index(dimensions, DocumentDBSimilarityType.DOT)
sleep(2) # waits for the index to be set up
output = vectorstore.similarity_search("Sandwich", k=1, ef_search=100)
assert output
assert output[0].page_content == "What is a sandwich?"
assert output[0].metadata["c"] == 1
vectorstore.delete_index()
def test_from_texts_cosine_distance(
self, embedding_openai: OpenAIEmbeddings, collection: Any
) -> None:
texts = [
"Dogs are tough.",
"Cats have fluff.",
"What is a sandwich?",
"That fence is purple.",
]
vectorstore = DocumentDBVectorSearch.from_texts(
texts,
embedding_openai,
collection=collection,
index_name=INDEX_NAME,
)
# Create the HNSW index that will be leveraged later for vector search
vectorstore.create_index(dimensions, similarity_algorithm)
sleep(2) # waits for the index to be set up
output = vectorstore.similarity_search("Sandwich", k=1)
assert output[0].page_content == "What is a sandwich?"
vectorstore.delete_index()
def test_from_texts_with_metadatas_cosine_distance(
self, embedding_openai: OpenAIEmbeddings, collection: Any
) -> None:
texts = [
"Dogs are tough.",
"Cats have fluff.",
"What is a sandwich?",
"The fence is purple.",
]
metadatas = [{"a": 1}, {"b": 1}, {"c": 1}, {"d": 1, "e": 2}]
vectorstore = DocumentDBVectorSearch.from_texts(
texts,
embedding_openai,
metadatas=metadatas,
collection=collection,
index_name=INDEX_NAME,
)
# Create the HNSW index that will be leveraged later for vector search
vectorstore.create_index(dimensions, similarity_algorithm)
sleep(2) # waits for the index to be set up
output = vectorstore.similarity_search("Sandwich", k=1)
assert output
assert output[0].page_content == "What is a sandwich?"
assert output[0].metadata["c"] == 1
vectorstore.delete_index()
def test_from_texts_with_metadatas_delete_one(
self, embedding_openai: OpenAIEmbeddings, collection: Any
) -> None:
texts = [
"Dogs are tough.",
"Cats have fluff.",
"What is a sandwich?",
"The fence is purple.",
]
metadatas = [{"a": 1}, {"b": 1}, {"c": 1}, {"d": 1, "e": 2}]
vectorstore = DocumentDBVectorSearch.from_texts(
texts,
embedding_openai,
metadatas=metadatas,
collection=collection,
index_name=INDEX_NAME,
)
# Create the HNSW index that will be leveraged later for vector search
vectorstore.create_index(dimensions, similarity_algorithm)
sleep(2) # waits for the index to be set up
output = vectorstore.similarity_search("Sandwich", k=1)
assert output
assert output[0].page_content == "What is a sandwich?"
assert output[0].metadata["c"] == 1
first_document_id_object = output[0].metadata["_id"]
first_document_id = str(first_document_id_object)
vectorstore.delete_document_by_id(first_document_id)
sleep(2) # waits for the index to be updated
output2 = vectorstore.similarity_search("Sandwich", k=1, ef_search=10)
assert output2
assert output2[0].page_content != "What is a sandwich?"
vectorstore.delete_index()
def test_from_texts_with_metadatas_delete_multiple(
self, embedding_openai: OpenAIEmbeddings, collection: Any
) -> None:
texts = [
"Dogs are tough.",
"Cats have fluff.",
"What is a sandwich?",
"The fence is purple.",
]
metadatas = [{"a": 1}, {"b": 1}, {"c": 1}, {"d": 1, "e": 2}]
vectorstore = DocumentDBVectorSearch.from_texts(
texts,
embedding_openai,
metadatas=metadatas,
collection=collection,
index_name=INDEX_NAME,
)
# Create the HNSW index that will be leveraged later for vector search
vectorstore.create_index(dimensions, similarity_algorithm)
sleep(2) # waits for the index to be set up
output = vectorstore.similarity_search("Sandwich", k=5)
first_document_id_object = output[0].metadata["_id"]
first_document_id = str(first_document_id_object)
output[1].metadata["_id"]
second_document_id = output[1].metadata["_id"]
output[2].metadata["_id"]
third_document_id = output[2].metadata["_id"]
document_ids = [first_document_id, second_document_id, third_document_id]
vectorstore.delete(document_ids)
sleep(2) # waits for the index to be updated
output_2 = vectorstore.similarity_search("Sandwich", k=5)
assert output
assert output_2
assert len(output) == 4 # we should see all the four documents
assert (
len(output_2) == 1
) # we should see only one document left after three have been deleted
vectorstore.delete_index()
def test_from_texts_with_metadatas_inner_product(
self, embedding_openai: OpenAIEmbeddings, collection: Any
) -> None:
texts = [
"Dogs are tough.",
"Cats have fluff.",
"What is a sandwich?",
"The fence is purple.",
]
metadatas = [{"a": 1}, {"b": 1}, {"c": 1}, {"d": 1, "e": 2}]
vectorstore = DocumentDBVectorSearch.from_texts(
texts,
embedding_openai,
metadatas=metadatas,
collection=collection,
index_name=INDEX_NAME,
)
# Create the HNSW index that will be leveraged later for vector search
vectorstore.create_index(dimensions, DocumentDBSimilarityType.DOT)
sleep(2) # waits for the index to be set up
output = vectorstore.similarity_search("Sandwich", k=1)
assert output
assert output[0].page_content == "What is a sandwich?"
assert output[0].metadata["c"] == 1
vectorstore.delete_index()
def test_from_texts_with_metadatas_euclidean_distance(
self, embedding_openai: OpenAIEmbeddings, collection: Any
) -> None:
texts = [
"Dogs are tough.",
"Cats have fluff.",
"What is a sandwich?",
"The fence is purple.",
]
metadatas = [{"a": 1}, {"b": 1}, {"c": 1}, {"d": 1, "e": 2}]
vectorstore = DocumentDBVectorSearch.from_texts(
texts,
embedding_openai,
metadatas=metadatas,
collection=collection,
index_name=INDEX_NAME,
)
# Create the HNSW index that will be leveraged later for vector search
vectorstore.create_index(dimensions, DocumentDBSimilarityType.EUC)
sleep(2) # waits for the index to be set up
output = vectorstore.similarity_search("Sandwich", k=1)
assert output
assert output[0].page_content == "What is a sandwich?"
assert output[0].metadata["c"] == 1
vectorstore.delete_index()
def invoke_delete_with_no_args(
self, embedding_openai: OpenAIEmbeddings, collection: Any
) -> Optional[bool]:
vectorstore: DocumentDBVectorSearch = (
DocumentDBVectorSearch.from_connection_string(
CONNECTION_STRING,
NAMESPACE,
embedding_openai,
index_name=INDEX_NAME,
)
)
return vectorstore.delete()
def invoke_delete_by_id_with_no_args(
self, embedding_openai: OpenAIEmbeddings, collection: Any
) -> None:
vectorstore: DocumentDBVectorSearch = (
DocumentDBVectorSearch.from_connection_string(
CONNECTION_STRING,
NAMESPACE,
embedding_openai,
index_name=INDEX_NAME,
)
)
vectorstore.delete_document_by_id()
def test_invalid_arguments_to_delete(
self, embedding_openai: OpenAIEmbeddings, collection: Any
) -> None:
with pytest.raises(ValueError) as exception_info:
self.invoke_delete_with_no_args(embedding_openai, collection)
assert str(exception_info.value) == "No document ids provided to delete."
def test_no_arguments_to_delete_by_id(
self, embedding_openai: OpenAIEmbeddings, collection: Any
) -> None:
with pytest.raises(Exception) as exception_info:
self.invoke_delete_by_id_with_no_args(embedding_openai, collection)
assert str(exception_info.value) == "No document id provided to delete."
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_lantern.py | """Test Lantern functionality."""
import os
from typing import List, Tuple
from langchain_core.documents import Document
from langchain_community.embeddings import FakeEmbeddings
from langchain_community.vectorstores import Lantern
CONNECTION_STRING = Lantern.connection_string_from_db_params(
driver=os.environ.get("TEST_LANTERN_DRIVER", "psycopg2"),
host=os.environ.get("TEST_LANTERN_HOST", "localhost"),
port=int(os.environ.get("TEST_LANTERN_PORT", "5432")),
database=os.environ.get("TEST_LANTERN_DATABASE", "postgres"),
user=os.environ.get("TEST_LANTERN_USER", "postgres"),
password=os.environ.get("TEST_LANTERN_PASSWORD", "postgres"),
)
ADA_TOKEN_COUNT = 1536
def fix_distance_precision(
results: List[Tuple[Document, float]], precision: int = 2
) -> List[Tuple[Document, float]]:
return list(
map(lambda x: (x[0], float(f"{{:.{precision}f}}".format(x[1]))), results)
)
class FakeEmbeddingsWithAdaDimension(FakeEmbeddings):
"""Fake embeddings functionality for testing."""
def __init__(self): # type: ignore[no-untyped-def]
super(FakeEmbeddingsWithAdaDimension, self).__init__(size=ADA_TOKEN_COUNT)
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Return simple embeddings."""
return [
[float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(i)] for i in range(len(texts))
]
def embed_query(self, text: str) -> List[float]:
"""Return simple embeddings."""
return [float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(0.0)]
def test_lantern() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = Lantern.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_lantern_embeddings() -> None:
"""Test end to end construction with embeddings and search."""
texts = ["foo", "bar", "baz"]
text_embeddings = FakeEmbeddingsWithAdaDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Lantern.from_embeddings(
text_embeddings=text_embedding_pairs,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_lantern_embeddings_distance_strategy() -> None:
"""Test end to end construction with embeddings and search."""
texts = ["foo", "bar", "baz"]
text_embeddings = FakeEmbeddingsWithAdaDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Lantern.from_embeddings(
text_embeddings=text_embedding_pairs,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING,
distance_strategy="hamming", # type: ignore[arg-type]
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_lantern_with_metadatas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Lantern.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
def test_lantern_with_metadatas_with_scores() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Lantern.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = fix_distance_precision(docsearch.similarity_search_with_score("foo", k=1))
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
def test_lantern_with_filter_match() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Lantern.from_texts(
texts=texts,
collection_name="test_collection_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = fix_distance_precision(
docsearch.similarity_search_with_score("foo", k=1, filter={"page": "0"})
)
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
def test_lantern_with_filter_distant_match() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Lantern.from_texts(
texts=texts,
collection_name="test_collection_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = fix_distance_precision(
docsearch.similarity_search_with_score("foo", k=1, filter={"page": "2"})
)
assert output == [(Document(page_content="baz", metadata={"page": "2"}), 0.0)]
def test_lantern_with_filter_no_match() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Lantern.from_texts(
texts=texts,
collection_name="test_collection_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "5"})
assert output == []
def test_lantern_with_filter_in_set() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Lantern.from_texts(
texts=texts,
collection_name="test_collection_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = fix_distance_precision(
docsearch.similarity_search_with_score(
"foo", k=2, filter={"page": {"IN": ["0", "2"]}}
),
4,
)
assert output == [
(Document(page_content="foo", metadata={"page": "0"}), 0.0),
(Document(page_content="baz", metadata={"page": "2"}), 0.0013),
]
def test_lantern_delete_docs() -> None:
"""Add and delete documents."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Lantern.from_texts(
texts=texts,
collection_name="test_collection_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
ids=["1", "2", "3"],
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
docsearch.delete(["1", "2", "3"])
output = docsearch.similarity_search("foo", k=3)
assert output == []
def test_lantern_relevance_score() -> None:
"""Test to make sure the relevance score is scaled to 0-1."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Lantern.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = fix_distance_precision(
docsearch.similarity_search_with_relevance_scores("foo", k=3), 4
)
assert output == [
(Document(page_content="foo", metadata={"page": "0"}), 1.0),
(Document(page_content="bar", metadata={"page": "1"}), 0.9997),
(Document(page_content="baz", metadata={"page": "2"}), 0.9987),
]
def test_lantern_retriever_search_threshold() -> None:
"""Test using retriever for searching with threshold."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Lantern.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
retriever = docsearch.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={"k": 3, "score_threshold": 0.999},
)
output = retriever.invoke("summer")
assert output == [
Document(page_content="foo", metadata={"page": "0"}),
Document(page_content="bar", metadata={"page": "1"}),
]
def test_lantern_retriever_search_threshold_custom_normalization_fn() -> None:
"""Test searching with threshold and custom normalization function"""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Lantern.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
relevance_score_fn=lambda d: d * 0,
pre_delete_collection=True,
)
retriever = docsearch.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={"k": 3, "score_threshold": 0.9999},
)
output = retriever.invoke("foo")
assert output == [
Document(page_content="foo", metadata={"page": "0"}),
]
def test_lantern_max_marginal_relevance_search() -> None:
"""Test max marginal relevance search."""
texts = ["foo", "bar", "baz"]
docsearch = Lantern.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.max_marginal_relevance_search("foo", k=1, fetch_k=3)
assert output == [Document(page_content="foo")]
def test_lantern_max_marginal_relevance_search_with_score() -> None:
"""Test max marginal relevance search with relevance scores."""
texts = ["foo", "bar", "baz"]
docsearch = Lantern.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = fix_distance_precision(
docsearch.max_marginal_relevance_search_with_score("foo", k=1, fetch_k=3)
)
assert output == [(Document(page_content="foo"), 0.0)]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_pgvector.py | """Test PGVector functionality."""
import os
from typing import Any, Dict, Generator, List, Type, Union
import pytest
import sqlalchemy
from langchain_core.documents import Document
from sqlalchemy.dialects import postgresql
from sqlalchemy.orm import Session
from langchain_community.vectorstores.pgvector import (
SUPPORTED_OPERATORS,
PGVector,
)
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
from tests.integration_tests.vectorstores.fixtures.filtering_test_cases import (
DOCUMENTS,
TYPE_1_FILTERING_TEST_CASES,
TYPE_2_FILTERING_TEST_CASES,
TYPE_3_FILTERING_TEST_CASES,
TYPE_4_FILTERING_TEST_CASES,
TYPE_5_FILTERING_TEST_CASES,
)
# The connection string matches the default settings in the docker-compose file
# located in the root of the repository: [root]/docker/docker-compose.yml
# Non-standard ports are used to avoid conflicts with other local postgres
# instances.
# To spin up postgres with the pgvector extension:
# cd [root]/docker/docker-compose.yml
# docker compose up pgvector
CONNECTION_STRING = PGVector.connection_string_from_db_params(
driver=os.environ.get("TEST_PGVECTOR_DRIVER", "psycopg2"),
host=os.environ.get("TEST_PGVECTOR_HOST", "localhost"),
port=int(os.environ.get("TEST_PGVECTOR_PORT", "6024")),
database=os.environ.get("TEST_PGVECTOR_DATABASE", "langchain"),
user=os.environ.get("TEST_PGVECTOR_USER", "langchain"),
password=os.environ.get("TEST_PGVECTOR_PASSWORD", "langchain"),
)
ADA_TOKEN_COUNT = 1536
class FakeEmbeddingsWithAdaDimension(FakeEmbeddings):
"""Fake embeddings functionality for testing."""
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Return simple embeddings."""
return [
[float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(i)] for i in range(len(texts))
]
def embed_query(self, text: str) -> List[float]:
"""Return simple embeddings."""
return [float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(0.0)]
def test_pgvector(pgvector: PGVector) -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = PGVector.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_pgvector_embeddings() -> None:
"""Test end to end construction with embeddings and search."""
texts = ["foo", "bar", "baz"]
text_embeddings = FakeEmbeddingsWithAdaDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = PGVector.from_embeddings(
text_embeddings=text_embedding_pairs,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_pgvector_with_metadatas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
def test_pgvector_with_metadatas_with_scores() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score("foo", k=1)
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
def test_pgvector_with_filter_match() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(
texts=texts,
collection_name="test_collection_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "0"})
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
def test_pgvector_with_filter_distant_match() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(
texts=texts,
collection_name="test_collection_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "2"})
assert output == [
(Document(page_content="baz", metadata={"page": "2"}), 0.0013003906671379406)
]
def test_pgvector_with_filter_no_match() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(
texts=texts,
collection_name="test_collection_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "5"})
assert output == []
def test_pgvector_collection_with_metadata() -> None:
"""Test end to end collection construction"""
pgvector = PGVector(
collection_name="test_collection",
collection_metadata={"foo": "bar"},
embedding_function=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
session = Session(pgvector._create_engine())
collection = pgvector.get_collection(session)
if collection is None:
assert False, "Expected a CollectionStore object but received None"
else:
assert collection.name == "test_collection"
assert collection.cmetadata == {"foo": "bar"}
def test_pgvector_with_filter_in_set() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(
texts=texts,
collection_name="test_collection_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score(
"foo", k=2, filter={"page": {"IN": ["0", "2"]}}
)
assert output == [
(Document(page_content="foo", metadata={"page": "0"}), 0.0),
(Document(page_content="baz", metadata={"page": "2"}), 0.0013003906671379406),
]
def test_pgvector_with_filter_nin_set() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(
texts=texts,
collection_name="test_collection_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score(
"foo", k=2, filter={"page": {"NIN": ["1"]}}
)
assert output == [
(Document(page_content="foo", metadata={"page": "0"}), 0.0),
(Document(page_content="baz", metadata={"page": "2"}), 0.0013003906671379406),
]
def test_pg_vector_with_or_filter() -> None:
"""Test end to end construction and search with specific OR filter."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(
texts=texts,
collection_name="test_collection_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score(
"foo", k=3, filter={"page": {"OR": [{"EQ": "0"}, {"EQ": "2"}]}}
)
assert output == [
(Document(page_content="foo", metadata={"page": "0"}), 0.0),
(Document(page_content="baz", metadata={"page": "2"}), 0.0013003906671379406),
]
def test_pg_vector_with_and_filter() -> None:
"""Test end to end construction and search with specific AND filter."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(
texts=texts,
collection_name="test_collection_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score(
"foo", k=3, filter={"page": {"AND": [{"IN": ["0", "1"]}, {"NIN": ["1"]}]}}
)
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
def test_pgvector_delete_docs() -> None:
"""Add and delete documents."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(
texts=texts,
collection_name="test_collection_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
ids=["1", "2", "3"],
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
docsearch.delete(["1", "2"])
with docsearch._make_session() as session:
records = list(session.query(docsearch.EmbeddingStore).all())
# ignoring type error since mypy cannot determine whether
# the list is sortable
assert sorted(record.custom_id for record in records) == ["3"] # type: ignore
docsearch.delete(["2", "3"]) # Should not raise on missing ids
with docsearch._make_session() as session:
records = list(session.query(docsearch.EmbeddingStore).all())
# ignoring type error since mypy cannot determine whether
# the list is sortable
assert sorted(record.custom_id for record in records) == [] # type: ignore
def test_pgvector_relevance_score() -> None:
"""Test to make sure the relevance score is scaled to 0-1."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_relevance_scores("foo", k=3)
assert output == [
(Document(page_content="foo", metadata={"page": "0"}), 1.0),
(Document(page_content="bar", metadata={"page": "1"}), 0.9996744261675065),
(Document(page_content="baz", metadata={"page": "2"}), 0.9986996093328621),
]
def test_pgvector_retriever_search_threshold() -> None:
"""Test using retriever for searching with threshold."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
retriever = docsearch.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={"k": 3, "score_threshold": 0.999},
)
output = retriever.invoke("summer")
assert output == [
Document(page_content="foo", metadata={"page": "0"}),
Document(page_content="bar", metadata={"page": "1"}),
]
def test_pgvector_retriever_search_threshold_custom_normalization_fn() -> None:
"""Test searching with threshold and custom normalization function"""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
relevance_score_fn=lambda d: d * 0,
)
retriever = docsearch.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={"k": 3, "score_threshold": 0.5},
)
output = retriever.invoke("foo")
assert output == []
def test_pgvector_max_marginal_relevance_search() -> None:
"""Test max marginal relevance search."""
texts = ["foo", "bar", "baz"]
docsearch = PGVector.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.max_marginal_relevance_search("foo", k=1, fetch_k=3)
assert output == [Document(page_content="foo")]
def test_pgvector_max_marginal_relevance_search_with_score() -> None:
"""Test max marginal relevance search with relevance scores."""
texts = ["foo", "bar", "baz"]
docsearch = PGVector.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.max_marginal_relevance_search_with_score("foo", k=1, fetch_k=3)
assert output == [(Document(page_content="foo"), 0.0)]
def test_pgvector_with_custom_connection() -> None:
"""Test construction using a custom connection."""
texts = ["foo", "bar", "baz"]
engine = sqlalchemy.create_engine(CONNECTION_STRING)
with engine.connect() as connection:
docsearch = PGVector.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
connection=connection,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_pgvector_with_custom_engine_args() -> None:
"""Test construction using custom engine arguments."""
texts = ["foo", "bar", "baz"]
engine_args = {
"pool_size": 5,
"max_overflow": 10,
"pool_recycle": -1,
"pool_use_lifo": False,
"pool_pre_ping": False,
"pool_timeout": 30,
}
docsearch = PGVector.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
engine_args=engine_args,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
# We should reuse this test-case across other integrations
# Add database fixture using pytest
@pytest.fixture
def pgvector() -> Generator[PGVector, None, None]:
"""Create a PGVector instance."""
store = PGVector.from_documents(
documents=DOCUMENTS,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
relevance_score_fn=lambda d: d * 0,
use_jsonb=True,
)
try:
yield store
# Do clean up
finally:
store.drop_tables()
@pytest.mark.parametrize("test_filter, expected_ids", TYPE_1_FILTERING_TEST_CASES)
def test_pgvector_with_with_metadata_filters_1(
pgvector: PGVector,
test_filter: Dict[str, Any],
expected_ids: List[int],
) -> None:
"""Test end to end construction and search."""
docs = pgvector.similarity_search("meow", k=5, filter=test_filter)
assert [doc.metadata["id"] for doc in docs] == expected_ids, test_filter
@pytest.mark.parametrize("test_filter, expected_ids", TYPE_2_FILTERING_TEST_CASES)
def test_pgvector_with_with_metadata_filters_2(
pgvector: PGVector,
test_filter: Dict[str, Any],
expected_ids: List[int],
) -> None:
"""Test end to end construction and search."""
docs = pgvector.similarity_search("meow", k=5, filter=test_filter)
assert [doc.metadata["id"] for doc in docs] == expected_ids, test_filter
@pytest.mark.parametrize("test_filter, expected_ids", TYPE_3_FILTERING_TEST_CASES)
def test_pgvector_with_with_metadata_filters_3(
pgvector: PGVector,
test_filter: Dict[str, Any],
expected_ids: List[int],
) -> None:
"""Test end to end construction and search."""
docs = pgvector.similarity_search("meow", k=5, filter=test_filter)
assert [doc.metadata["id"] for doc in docs] == expected_ids, test_filter
@pytest.mark.parametrize("test_filter, expected_ids", TYPE_4_FILTERING_TEST_CASES)
def test_pgvector_with_with_metadata_filters_4(
pgvector: PGVector,
test_filter: Dict[str, Any],
expected_ids: List[int],
) -> None:
"""Test end to end construction and search."""
docs = pgvector.similarity_search("meow", k=5, filter=test_filter)
assert [doc.metadata["id"] for doc in docs] == expected_ids, test_filter
@pytest.mark.parametrize("test_filter, expected_ids", TYPE_5_FILTERING_TEST_CASES)
def test_pgvector_with_with_metadata_filters_5(
pgvector: PGVector,
test_filter: Dict[str, Any],
expected_ids: List[int],
) -> None:
"""Test end to end construction and search."""
docs = pgvector.similarity_search("meow", k=5, filter=test_filter)
assert [doc.metadata["id"] for doc in docs] == expected_ids, test_filter
@pytest.mark.parametrize(
"invalid_filter",
[
["hello"],
{
"id": 2,
"$name": "foo",
},
{"$or": {}},
{"$and": {}},
{"$between": {}},
{"$eq": {}},
],
)
def test_invalid_filters(pgvector: PGVector, invalid_filter: Any) -> None:
"""Verify that invalid filters raise an error."""
with pytest.raises(ValueError):
pgvector._create_filter_clause(invalid_filter)
@pytest.mark.parametrize(
"filter,compiled",
[
({"id 'evil code'": 2}, ValueError),
(
{"id": "'evil code' == 2"},
(
"jsonb_path_match(langchain_pg_embedding.cmetadata, "
"'$.id == $value', "
"'{\"value\": \"''evil code'' == 2\"}')"
),
),
(
{"name": 'a"b'},
(
"jsonb_path_match(langchain_pg_embedding.cmetadata, "
"'$.name == $value', "
'\'{"value": "a\\\\"b"}\')'
),
),
],
)
def test_evil_code(
pgvector: PGVector, filter: Any, compiled: Union[Type[Exception], str]
) -> None:
"""Test evil code."""
if isinstance(compiled, str):
clause = pgvector._create_filter_clause(filter)
compiled_stmt = str(
clause.compile(
dialect=postgresql.dialect(),
compile_kwargs={
# This substitutes the parameters with their actual values
"literal_binds": True
},
)
)
assert compiled_stmt == compiled
else:
with pytest.raises(compiled):
pgvector._create_filter_clause(filter)
@pytest.mark.parametrize(
"filter,compiled",
[
(
{"id": 2},
"jsonb_path_match(langchain_pg_embedding.cmetadata, '$.id == $value', "
"'{\"value\": 2}')",
),
(
{"id": {"$eq": 2}},
(
"jsonb_path_match(langchain_pg_embedding.cmetadata, '$.id == $value', "
"'{\"value\": 2}')"
),
),
(
{"name": "foo"},
(
"jsonb_path_match(langchain_pg_embedding.cmetadata, "
"'$.name == $value', "
'\'{"value": "foo"}\')'
),
),
(
{"id": {"$ne": 2}},
(
"jsonb_path_match(langchain_pg_embedding.cmetadata, '$.id != $value', "
"'{\"value\": 2}')"
),
),
(
{"id": {"$gt": 2}},
(
"jsonb_path_match(langchain_pg_embedding.cmetadata, '$.id > $value', "
"'{\"value\": 2}')"
),
),
(
{"id": {"$gte": 2}},
(
"jsonb_path_match(langchain_pg_embedding.cmetadata, '$.id >= $value', "
"'{\"value\": 2}')"
),
),
(
{"id": {"$lt": 2}},
(
"jsonb_path_match(langchain_pg_embedding.cmetadata, '$.id < $value', "
"'{\"value\": 2}')"
),
),
(
{"id": {"$lte": 2}},
(
"jsonb_path_match(langchain_pg_embedding.cmetadata, '$.id <= $value', "
"'{\"value\": 2}')"
),
),
(
{"name": {"$ilike": "foo"}},
"langchain_pg_embedding.cmetadata ->> 'name' ILIKE 'foo'",
),
(
{"name": {"$like": "foo"}},
"langchain_pg_embedding.cmetadata ->> 'name' LIKE 'foo'",
),
(
{"$or": [{"id": 1}, {"id": 2}]},
# Please note that this might not be super optimized
# Another way to phrase the query is as
# langchain_pg_embedding.cmetadata @@ '($.id == 1 || $.id == 2)'
"jsonb_path_match(langchain_pg_embedding.cmetadata, '$.id == $value', "
"'{\"value\": 1}') OR jsonb_path_match(langchain_pg_embedding.cmetadata, "
"'$.id == $value', '{\"value\": 2}')",
),
],
)
def test_pgvector_query_compilation(
pgvector: PGVector, filter: Any, compiled: str
) -> None:
"""Test translation from IR to SQL"""
clause = pgvector._create_filter_clause(filter)
compiled_stmt = str(
clause.compile(
dialect=postgresql.dialect(),
compile_kwargs={
# This substitutes the parameters with their actual values
"literal_binds": True
},
)
)
assert compiled_stmt == compiled
def test_validate_operators() -> None:
"""Verify that all operators have been categorized."""
assert sorted(SUPPORTED_OPERATORS) == [
"$and",
"$between",
"$eq",
"$gt",
"$gte",
"$ilike",
"$in",
"$like",
"$lt",
"$lte",
"$ne",
"$nin",
"$or",
]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_vlite.py | """Test VLite functionality."""
from langchain_core.documents import Document
from langchain_community.embeddings import FakeEmbeddings
from langchain_community.vectorstores import VLite
def test_vlite() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = VLite.from_texts(texts=texts, embedding=FakeEmbeddings()) # type: ignore[call-arg]
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_vlite_with_metadatas() -> None:
"""Test end to end construction and search with metadata."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = VLite.from_texts(
texts=texts,
embedding=FakeEmbeddings(), # type: ignore[call-arg]
metadatas=metadatas, # type: ignore[call-arg]
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
def test_vlite_with_metadatas_with_scores() -> None:
"""Test end to end construction and search with metadata and scores."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = VLite.from_texts(
texts=texts,
embedding=FakeEmbeddings(), # type: ignore[call-arg]
metadatas=metadatas, # type: ignore[call-arg]
)
output = docsearch.similarity_search_with_score("foo", k=1)
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
def test_vlite_update_document() -> None:
"""Test updating a document."""
texts = ["foo", "bar", "baz"]
docsearch = VLite.from_texts(
texts=texts,
embedding=FakeEmbeddings(), # type: ignore[call-arg]
ids=["1", "2", "3"], # type: ignore[call-arg]
)
docsearch.update_document("1", Document(page_content="updated_foo"))
output = docsearch.similarity_search("updated_foo", k=1)
assert output == [Document(page_content="updated_foo")]
def test_vlite_delete_document() -> None:
"""Test deleting a document."""
texts = ["foo", "bar", "baz"]
docsearch = VLite.from_texts(
texts=texts,
embedding=FakeEmbeddings(), # type: ignore[call-arg]
ids=["1", "2", "3"], # type: ignore[call-arg]
)
docsearch.delete(["1"])
output = docsearch.similarity_search("foo", k=3)
assert Document(page_content="foo") not in output
def test_vlite_get_documents() -> None:
"""Test getting documents by IDs."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = VLite.from_texts(
texts=texts,
embedding=FakeEmbeddings(), # type: ignore[call-arg]
metadatas=metadatas,
ids=["1", "2", "3"],
)
output = docsearch.get(ids=["1", "3"])
assert output == [
Document(page_content="foo", metadata={"page": "0"}),
Document(page_content="baz", metadata={"page": "2"}),
]
def test_vlite_from_existing_index() -> None:
"""Test loading from an existing index."""
texts = ["foo", "bar", "baz"]
VLite.from_texts(
texts=texts,
embedding=FakeEmbeddings(), # type: ignore[call-arg]
collection="test_collection", # type: ignore[call-arg]
)
new_docsearch = VLite.from_existing_index(
collection="test_collection",
embedding=FakeEmbeddings(), # type: ignore[call-arg]
)
output = new_docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_sqlitevss.py | from typing import List, Optional
import pytest
from langchain_core.documents import Document
from langchain_community.vectorstores import SQLiteVSS
from tests.integration_tests.vectorstores.fake_embeddings import (
FakeEmbeddings,
fake_texts,
)
def _sqlite_vss_from_texts(
metadatas: Optional[List[dict]] = None, drop: bool = True
) -> SQLiteVSS:
return SQLiteVSS.from_texts(
fake_texts,
FakeEmbeddings(),
metadatas=metadatas,
table="test",
db_file=":memory:",
)
@pytest.mark.requires("sqlite-vss")
def test_sqlitevss() -> None:
"""Test end to end construction and search."""
docsearch = _sqlite_vss_from_texts()
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={})]
@pytest.mark.requires("sqlite-vss")
def test_sqlitevss_with_score() -> None:
"""Test end to end construction and search with scores and IDs."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _sqlite_vss_from_texts(metadatas=metadatas)
output = docsearch.similarity_search_with_score("foo", k=3)
docs = [o[0] for o in output]
distances = [o[1] for o in output]
assert docs == [
Document(page_content="foo", metadata={"page": 0}),
Document(page_content="bar", metadata={"page": 1}),
Document(page_content="baz", metadata={"page": 2}),
]
assert distances[0] < distances[1] < distances[2]
@pytest.mark.requires("sqlite-vss")
def test_sqlitevss_add_extra() -> None:
"""Test end to end construction and MRR search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _sqlite_vss_from_texts(metadatas=metadatas)
docsearch.add_texts(texts, metadatas)
output = docsearch.similarity_search("foo", k=10)
assert len(output) == 6
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_ecloud_vector_search.py | """Test EcloudESVectorStore functionality."""
from __future__ import annotations
from typing import TYPE_CHECKING, List, Optional
from langchain_core.documents import Document
from langchain_community.vectorstores.ecloud_vector_search import EcloudESVectorStore
from tests.integration_tests.vectorstores.fake_embeddings import (
FakeEmbeddings,
fake_texts,
)
if TYPE_CHECKING:
from elasticsearch.client import Elasticsearch
user = "elastic"
password = "*****"
ES_URL = "http://localhost:9200"
def _ecloud_vector_db_from_texts(
metadatas: Optional[List[dict]] = None, index_name: str = "testknn"
) -> EcloudESVectorStore:
return EcloudESVectorStore.from_texts(
fake_texts,
FakeEmbeddings(),
metadatas=metadatas,
es_url=ES_URL,
user=user,
password=password,
index_name=index_name,
refresh_indices=True,
)
def delete_index(es: Elasticsearch, index: str) -> None:
"""Delete the specific index"""
try:
es.indices.delete(index)
except Exception:
pass
def test_ecloud_vector_db() -> None:
"""Test end to end construction and search."""
index_name = "testknn1"
docsearch = _ecloud_vector_db_from_texts(index_name=index_name)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
delete_index(docsearch.client, index_name)
def test_ecloud_vector_index_settings() -> None:
index_name = "testknn2"
docsearch = EcloudESVectorStore.from_texts(
fake_texts,
FakeEmbeddings(),
es_url=ES_URL,
user=user,
password=password,
index_name=index_name,
refresh_indices=True,
vector_field="my_vector",
text_field="custom_text",
time_out=120,
)
res = docsearch.client.indices.get_settings(index=index_name)
assert res[index_name]["settings"]["index"]["number_of_shards"] == "1"
assert res[index_name]["settings"]["index"]["number_of_replicas"] == "1"
delete_index(docsearch.client, index_name)
index_name = "testknn3"
docsearch = EcloudESVectorStore.from_texts(
fake_texts,
FakeEmbeddings(),
es_url=ES_URL,
user=user,
password=password,
index_name=index_name,
refresh_indices=True,
vector_field="my_vector",
text_field="custom_text",
index_settings={"index": {"number_of_shards": "3", "number_of_replicas": "0"}},
)
res = docsearch.client.indices.get_settings(index=index_name)
assert res[index_name]["settings"]["index"]["number_of_shards"] == "3"
assert res[index_name]["settings"]["index"]["number_of_replicas"] == "0"
delete_index(docsearch.client, index_name)
def test_similarity_search_with_score() -> None:
"""Test similarity search with score using Approximate Search."""
metadatas = [{"page": i} for i in range(len(fake_texts))]
index_name = "testknn4"
docsearch = _ecloud_vector_db_from_texts(metadatas=metadatas, index_name=index_name)
output = docsearch.similarity_search_with_score("foo", k=2)
assert output == [
(Document(page_content="foo", metadata={"page": 0}), 2.0),
(Document(page_content="bar", metadata={"page": 1}), 1.9486833),
]
delete_index(docsearch.client, index_name)
def test_ecloud_with_custom_field_name() -> None:
"""Test indexing and search using custom vector field and text field name."""
index_name = "testknn5"
docsearch = EcloudESVectorStore.from_texts(
fake_texts,
FakeEmbeddings(),
es_url=ES_URL,
user=user,
password=password,
index_name=index_name,
refresh_indices=True,
vector_field="my_vector",
text_field="custom_text",
)
output = docsearch.similarity_search(
"foo", k=1, vector_field="my_vector", text_field="custom_text"
)
assert output == [Document(page_content="foo")]
text_input = ["test", "add", "text", "method"]
EcloudESVectorStore.add_texts(
docsearch, text_input, vector_field="my_vector", text_field="custom_text"
)
output = docsearch.similarity_search(
"add", k=1, vector_field="my_vector", text_field="custom_text"
)
assert output == [Document(page_content="foo")]
delete_index(docsearch.client, index_name)
def test_ecloud_with_metadatas() -> None:
"""Test end to end indexing and search with metadata."""
index_name = "testknn6"
metadatas = [{"page": i} for i in range(len(fake_texts))]
docsearch = EcloudESVectorStore.from_texts(
fake_texts,
FakeEmbeddings(),
index_name=index_name,
refresh_indices=True,
metadatas=metadatas,
es_url=ES_URL,
user=user,
password=password,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": 0})]
delete_index(docsearch.client, index_name)
def test_add_text() -> None:
"""Test adding additional text elements to existing index."""
index_name = "testknn7"
text_input = ["test", "add", "text", "method"]
metadatas = [{"page": i} for i in range(len(text_input))]
docsearch = EcloudESVectorStore.from_texts(
fake_texts,
FakeEmbeddings(),
index_name=index_name,
refresh_indices=True,
es_url=ES_URL,
user=user,
password=password,
)
docids = EcloudESVectorStore.add_texts(docsearch, text_input, metadatas)
assert len(docids) == len(text_input)
delete_index(docsearch.client, index_name)
def test_dense_float_vector_lsh_cosine() -> None:
"""
Test indexing with vector type knn_dense_float_vector and
model-similarity of lsh-cosine
this mapping is compatible with model of exact and similarity of l2/cosine
this mapping is compatible with model of lsh and similarity of cosine
"""
index_name = "testknn9"
docsearch = EcloudESVectorStore.from_texts(
fake_texts,
FakeEmbeddings(),
index_name=index_name,
refresh_indices=True,
es_url=ES_URL,
user=user,
password=password,
text_field="my_text",
vector_field="my_vec",
vector_type="knn_dense_float_vector",
vector_params={"model": "lsh", "similarity": "cosine", "L": 99, "k": 1},
)
output = docsearch.similarity_search(
"foo",
k=1,
search_params={
"model": "exact",
"vector_field": "my_vec",
"text_field": "my_text",
},
)
assert output == [Document(page_content="foo")]
output = docsearch.similarity_search(
"foo",
k=1,
search_params={
"model": "exact",
"similarity": "l2",
"vector_field": "my_vec",
"text_field": "my_text",
},
)
assert output == [Document(page_content="foo")]
output = docsearch.similarity_search(
"foo",
k=1,
search_params={
"model": "exact",
"similarity": "cosine",
"vector_field": "my_vec",
"text_field": "my_text",
},
)
assert output == [Document(page_content="foo")]
output = docsearch.similarity_search(
"foo",
k=1,
search_params={
"model": "lsh",
"similarity": "cosine",
"candidates": 1,
"vector_field": "my_vec",
"text_field": "my_text",
},
)
assert output == [Document(page_content="foo")]
delete_index(docsearch.client, index_name)
def test_dense_float_vector_exact_with_filter() -> None:
"""
Test indexing with vector type knn_dense_float_vector and
default model/similarity
this mapping is compatible with model of exact and
similarity of l2/cosine
"""
index_name = "testknn15"
docsearch = EcloudESVectorStore.from_texts(
fake_texts,
FakeEmbeddings(),
index_name=index_name,
refresh_indices=True,
es_url=ES_URL,
user=user,
password=password,
text_field="my_text",
vector_field="my_vec",
vector_type="knn_dense_float_vector",
)
output = docsearch.similarity_search(
"foo",
k=1,
filter={"match_all": {}},
search_params={
"model": "exact",
"vector_field": "my_vec",
"text_field": "my_text",
},
)
assert output == [Document(page_content="foo")]
output = docsearch.similarity_search(
"bar",
k=2,
filter={"term": {"my_text.keyword": "bar"}},
search_params={
"model": "exact",
"vector_field": "my_vec",
"text_field": "my_text",
},
)
assert output == [Document(page_content="bar")]
output = docsearch.similarity_search(
"bar",
k=2,
filter={"term": {"my_text.keyword": "foo"}},
search_params={
"model": "exact",
"similarity": "l2",
"vector_field": "my_vec",
"text_field": "my_text",
},
)
assert output == [Document(page_content="foo")]
output = docsearch.similarity_search(
"foo",
k=2,
filter={"bool": {"filter": {"term": {"my_text.keyword": "bar"}}}},
search_params={
"model": "exact",
"similarity": "cosine",
"vector_field": "my_vec",
"text_field": "my_text",
},
)
assert output == [Document(page_content="bar")]
output = docsearch.similarity_search(
"foo",
k=2,
filter={"bool": {"filter": [{"term": {"my_text.keyword": "bar"}}]}},
search_params={
"model": "exact",
"similarity": "cosine",
"vector_field": "my_vec",
"text_field": "my_text",
},
)
assert output == [Document(page_content="bar")]
delete_index(docsearch.client, index_name)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_vearch.py | # flake8: noqa
from langchain_core.documents import Document
from langchain_community.vectorstores.vearch import Vearch
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
def test_vearch() -> None:
"""
Test end to end create vearch ,store vector into it and search
"""
texts = [
"Vearch 是一款存储大语言模型数据的向量数据库,用于存储和快速搜索模型embedding后的向量,可用于基于个人知识库的大模型应用",
"Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库",
"vearch 是基于C语言,go语言开发的,并提供python接口,可以直接通过pip安装",
]
metadatas = [
{
"source": (
"/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt"
)
},
{
"source": (
"/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt"
)
},
{
"source": (
"/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt"
)
},
]
vearch_db = Vearch.from_texts(
texts=texts,
embedding=FakeEmbeddings(),
metadatas=metadatas,
table_name="test_vearch",
metadata_path="./",
)
result = vearch_db.similarity_search(
"Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库", 1
)
assert result == [
Document(
page_content="Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库",
metadata={
"source": (
"/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/"
"three_body.txt"
)
},
)
]
def test_vearch_add_texts() -> None:
"""Test end to end adding of texts."""
texts = [
(
"Vearch 是一款存储大语言模型数据的向量数据库,用于存储和快速搜索模型embedding后的向量,"
"可用于基于个人知识库的大模型应用"
),
"Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库",
"vearch 是基于C语言,go语言开发的,并提供python接口,可以直接通过pip安装",
]
metadatas = [
{
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/"
"three_body.txt"
},
{
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/"
"three_body.txt"
},
{
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/"
"three_body.txt"
},
]
vearch_db = Vearch.from_texts(
texts=texts,
embedding=FakeEmbeddings(),
metadatas=metadatas,
table_name="test_vearch",
metadata_path="./",
)
vearch_db.add_texts(
texts=["Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库"],
metadatas=[
{
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt"
},
],
)
result = vearch_db.similarity_search(
"Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库", 2
)
assert result == [
Document(
page_content="Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库",
metadata={
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt"
},
),
Document(
page_content="Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库",
metadata={
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt"
},
),
]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_marqo.py | """Test Marqo functionality."""
from typing import Dict
import pytest
from langchain_core.documents import Document
from langchain_community.vectorstores.marqo import Marqo
DEFAULT_MARQO_URL = "http://localhost:8882"
DEFAULT_MARQO_API_KEY = ""
INDEX_NAME = "langchain-integration-tests"
@pytest.fixture
def client() -> Marqo:
import marqo
# fixture for marqo client to be used throughout testing, resets the index
client = marqo.Client(url=DEFAULT_MARQO_URL, api_key=DEFAULT_MARQO_API_KEY)
try:
client.index(INDEX_NAME).delete()
except Exception:
pass
client.create_index(INDEX_NAME)
return client
def test_marqo(client: Marqo) -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
marqo_search = Marqo.from_texts(
texts=texts,
index_name=INDEX_NAME,
url=DEFAULT_MARQO_URL,
api_key=DEFAULT_MARQO_API_KEY,
verbose=False,
)
results = marqo_search.similarity_search("foo", k=1)
assert results == [Document(page_content="foo")]
def test_marqo_with_metadatas(client: Marqo) -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
marqo_search = Marqo.from_texts(
texts=texts,
metadatas=metadatas,
index_name=INDEX_NAME,
url=DEFAULT_MARQO_URL,
api_key=DEFAULT_MARQO_API_KEY,
verbose=False,
)
results = marqo_search.similarity_search("foo", k=1)
assert results == [Document(page_content="foo", metadata={"page": 0})]
def test_marqo_with_scores(client: Marqo) -> None:
"""Test end to end construction and search with scores and IDs."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
marqo_search = Marqo.from_texts(
texts=texts,
metadatas=metadatas,
index_name=INDEX_NAME,
url=DEFAULT_MARQO_URL,
api_key=DEFAULT_MARQO_API_KEY,
verbose=False,
)
results = marqo_search.similarity_search_with_score("foo", k=3)
docs = [r[0] for r in results]
scores = [r[1] for r in results]
assert docs == [
Document(page_content="foo", metadata={"page": 0}),
Document(page_content="bar", metadata={"page": 1}),
Document(page_content="baz", metadata={"page": 2}),
]
assert scores[0] > scores[1] > scores[2]
def test_marqo_add_texts(client: Marqo) -> None:
marqo_search = Marqo(client=client, index_name=INDEX_NAME)
ids1 = marqo_search.add_texts(["1", "2", "3"])
assert len(ids1) == 3
ids2 = marqo_search.add_texts(["1", "2", "3"])
assert len(ids2) == 3
assert len(set(ids1).union(set(ids2))) == 6
def test_marqo_search(client: Marqo) -> None:
marqo_search = Marqo(client=client, index_name=INDEX_NAME)
input_documents = ["This is document 1", "2", "3"]
ids = marqo_search.add_texts(input_documents)
results = marqo_search.marqo_similarity_search("What is the first document?", k=3)
assert len(ids) == len(input_documents)
assert ids[0] == results["hits"][0]["_id"]
def test_marqo_bulk(client: Marqo) -> None:
marqo_search = Marqo(client=client, index_name=INDEX_NAME)
input_documents = ["This is document 1", "2", "3"]
ids = marqo_search.add_texts(input_documents)
bulk_results = marqo_search.bulk_similarity_search(
["What is the first document?", "2", "3"], k=3
)
assert len(ids) == len(input_documents)
assert bulk_results[0][0].page_content == input_documents[0]
assert bulk_results[1][0].page_content == input_documents[1]
assert bulk_results[2][0].page_content == input_documents[2]
def test_marqo_weighted_query(client: Marqo) -> None:
"""Test end to end construction and search."""
texts = ["Smartphone", "Telephone"]
marqo_search = Marqo.from_texts(
texts=texts,
index_name=INDEX_NAME,
url=DEFAULT_MARQO_URL,
api_key=DEFAULT_MARQO_API_KEY,
verbose=False,
)
results = marqo_search.similarity_search(
{"communications device": 1.0, "Old technology": -5.0}, k=1
)
assert results == [Document(page_content="Smartphone")]
def test_marqo_multimodal() -> None:
import marqo
client = marqo.Client(url=DEFAULT_MARQO_URL, api_key=DEFAULT_MARQO_API_KEY)
try:
client.index(INDEX_NAME).delete()
except Exception:
pass
# reset the index for this example
client.delete_index(INDEX_NAME)
# This index could have been created by another system
settings = {"treat_urls_and_pointers_as_images": True, "model": "ViT-L/14"}
client.create_index(INDEX_NAME, **settings)
client.index(INDEX_NAME).add_documents(
[
# image of a bus
{
"caption": "Bus",
"image": "https://raw.githubusercontent.com/marqo-ai/marqo/mainline/"
"examples/ImageSearchGuide/data/image4.jpg",
},
# image of a plane
{
"caption": "Plane",
"image": "https://raw.githubusercontent.com/marqo-ai/marqo/"
"mainline/examples/ImageSearchGuide/data/image2.jpg",
},
],
tensor_fields=["caption", "image"],
)
def get_content(res: Dict[str, str]) -> str:
if "text" in res:
return res["text"]
return f"{res['caption']}: {res['image']}"
marqo_search = Marqo(client, INDEX_NAME, page_content_builder=get_content)
query = "vehicles that fly"
docs = marqo_search.similarity_search(query)
assert docs[0].page_content.split(":")[0] == "Plane"
raised_value_error = False
try:
marqo_search.add_texts(["text"])
except ValueError:
raised_value_error = True
assert raised_value_error
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_vectara.py | import tempfile
import urllib.request
from typing import Generator, Iterable
import pytest
from langchain_core.documents import Document
from langchain_community.vectorstores import Vectara
from langchain_community.vectorstores.vectara import (
MMRConfig,
RerankConfig,
SummaryConfig,
VectaraQueryConfig,
)
#
# For this test to run properly, please setup as follows:
# 1. Create a Vectara account: sign up at https://www.vectara.com/integrations/langchain
# 2. Create a corpus in your Vectara account, with a filter attribute called "test_num".
# 3. Create an API_KEY for this corpus with permissions for query and indexing
# 4. Setup environment variables:
# VECTARA_API_KEY, VECTARA_CORPUS_ID and VECTARA_CUSTOMER_ID
#
test_prompt_name = "vectara-summary-ext-24-05-med-omni"
def get_abbr(s: str) -> str:
words = s.split(" ") # Split the string into words
first_letters = [word[0] for word in words] # Extract the first letter of each word
return "".join(first_letters) # Join the first letters into a single string
@pytest.fixture(scope="function")
def vectara1() -> Iterable[Vectara]:
# Set up code
# create a new Vectara instance
vectara1: Vectara = Vectara()
# start with some initial texts, added with add_texts
texts1 = ["grounded generation", "retrieval augmented generation", "data privacy"]
md = [{"abbr": get_abbr(t)} for t in texts1]
doc_id1 = vectara1.add_texts(
texts1,
metadatas=md,
doc_metadata={"test_num": "1"},
)
# then add some additional documents, now with add_documents
texts2 = ["large language model", "information retrieval", "question answering"]
doc_id2 = vectara1.add_documents(
[Document(page_content=t, metadata={"abbr": get_abbr(t)}) for t in texts2],
doc_metadata={"test_num": "2"},
)
doc_ids = doc_id1 + doc_id2
yield vectara1
# Tear down code
vectara1.delete(doc_ids)
def test_vectara_add_documents(vectara1: Vectara) -> None:
"""Test add_documents."""
# test without filter
output1 = vectara1.similarity_search(
"large language model",
k=2,
n_sentence_before=0,
n_sentence_after=0,
)
assert len(output1) == 2
assert output1[0].page_content == "large language model"
assert output1[0].metadata["abbr"] == "llm"
# test with metadata filter (doc level)
output2 = vectara1.similarity_search(
"large language model",
k=1,
n_sentence_before=0,
n_sentence_after=0,
filter="doc.test_num = 1",
)
assert len(output2) == 1
assert output2[0].page_content == "retrieval augmented generation"
assert output2[0].metadata["abbr"] == "rag"
# test without filter but with similarity score
# this is similar to the first test, but given the score threshold
# we only get one result
output3 = vectara1.similarity_search_with_score(
"large language model",
k=2,
score_threshold=0.5,
n_sentence_before=0,
n_sentence_after=0,
)
assert len(output3) == 2
assert output3[0][0].page_content == "large language model"
assert output3[0][0].metadata["abbr"] == "llm"
@pytest.fixture(scope="function")
def vectara2() -> Generator[Vectara, None, None]:
# download documents to local storage and then upload as files
# attention paper and deep learning book
vectara2: Vectara = Vectara() # type: ignore
urls = [
(
"https://papers.nips.cc/paper_files/paper/2017/"
"file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf"
),
(
"https://www.microsoft.com/en-us/research/wp-content/uploads/"
"2016/02/Final-DengYu-NOW-Book-DeepLearn2013-ForLecturesJuly2.docx"
),
]
files_list = []
for url in urls:
name = tempfile.NamedTemporaryFile().name
urllib.request.urlretrieve(url, name)
files_list.append(name)
doc_ids = vectara2.add_files(
files_list=files_list,
metadatas=[{"url": url, "test_num": "2"} for url in urls],
)
yield vectara2
# Tear down code
vectara2.delete(doc_ids)
def test_vectara_from_files(vectara2: Vectara) -> None:
"""test uploading data from files"""
output = vectara2.similarity_search(
"By the commonly adopted machine learning tradition",
k=1,
n_sentence_before=0,
n_sentence_after=0,
filter="doc.test_num = 2",
)
assert (
"By the commonly adopted machine learning tradition" in output[0].page_content
)
# another similarity search, this time with n_sentences_before/after = 1
output = vectara2.similarity_search(
"By the commonly adopted machine learning tradition",
k=1,
n_sentence_before=1,
n_sentence_after=1,
filter="doc.test_num = 2",
)
assert "Note the use of" in output[0].page_content
# Test the old n_sentence_context to ensure it's backward compatible
output = vectara2.similarity_search(
"By the commonly adopted machine learning tradition",
k=1,
n_sentence_context=1,
filter="doc.test_num = 2",
)
assert "Note the use of" in output[0].page_content
def test_vectara_rag_with_reranking(vectara2: Vectara) -> None:
"""Test Vectara reranking."""
query_str = "What is a transformer model?"
# Note: we don't test rerank_multilingual_v1 as it's for Scale only
# Test MMR
summary_config = SummaryConfig(
is_enabled=True,
max_results=7,
response_lang="eng",
prompt_name=test_prompt_name,
)
rerank_config = RerankConfig(reranker="mmr", rerank_k=50, mmr_diversity_bias=0.2)
config = VectaraQueryConfig(
k=10,
lambda_val=0.005,
rerank_config=rerank_config,
summary_config=summary_config,
)
rag1 = vectara2.as_rag(config)
response1 = rag1.invoke(query_str)
assert "transformer model" in response1["answer"].lower()
# Test No reranking
summary_config = SummaryConfig(
is_enabled=True,
max_results=7,
response_lang="eng",
prompt_name=test_prompt_name,
)
rerank_config = RerankConfig(reranker="None")
config = VectaraQueryConfig(
k=10,
lambda_val=0.005,
rerank_config=rerank_config,
summary_config=summary_config,
)
rag2 = vectara2.as_rag(config)
response2 = rag2.invoke(query_str)
assert "transformer model" in response2["answer"].lower()
# assert that the page content is different for the top 5 results
# in each reranking
n_results = 10
response1_content = [x[0].page_content for x in response1["context"][:n_results]]
response2_content = [x[0].page_content for x in response2["context"][:n_results]]
assert response1_content != response2_content
@pytest.fixture(scope="function")
def vectara3() -> Iterable[Vectara]:
# Set up code
vectara3: Vectara = Vectara()
# start with some initial texts, added with add_texts
texts = [
"""
The way Grounded Generation with Vectara works is we only use valid responses
from your data relative to the search query.
This dramatically reduces hallucinations in Vectara's responses.
You can try it out on your own on our newly launched AskNews demo to experience
Grounded Generation, or register an account to ground generative summaries on
your own data.
""",
"""
Generative AI promises to revolutionize how you can benefit from your data,
but you need it to provide dependable information without the risk of data
leakage. This is why today we're adding a fundamental capability to our
platform to make generative AI safer to use. It enables you to ask your
data questions and get reliable, accurate answers by retrieving and
summarizing only the relevant information. We call it “Grounded Generation”.
""",
"""
We are incredibly excited to share another feature with this launch:
Hybrid Search! Neural LLM systems are excellent at understanding the context
and meaning of end-user queries, but they can still underperform when matching
exact product SKUs, unusual names of people or companies, barcodes, and other
text which identifies entities rather than conveying semantics. We're bridging
this gap by introducing a lexical configuration that matches exact keywords,
supports Boolean operators, and executes phrase searches, and incorporates
the results into our neural search results.
""",
]
doc_ids = []
for text in texts:
ids = vectara3.add_documents([Document(page_content=text, metadata={})])
doc_ids.extend(ids)
yield vectara3
# Tear down code
vectara3.delete(doc_ids)
def test_vectara_with_langchain_mmr(vectara3: Vectara) -> None: # type: ignore[no-untyped-def]
# test max marginal relevance
output1 = vectara3.max_marginal_relevance_search(
"generative AI",
k=2,
fetch_k=6,
lambda_mult=1.0, # no diversity bias
n_sentence_before=0,
n_sentence_after=0,
)
assert len(output1) == 2
assert (
"This is why today we're adding a fundamental capability"
in output1[1].page_content
)
output2 = vectara3.max_marginal_relevance_search(
"generative AI",
k=2,
fetch_k=6,
lambda_mult=0.0, # only diversity bias
n_sentence_before=0,
n_sentence_after=0,
)
assert len(output2) == 2
assert (
"Neural LLM systems are excellent at understanding the context"
in output2[1].page_content
)
def test_vectara_rerankers(vectara3: Vectara) -> None: # type: ignore[no-untyped-def]
# test Vectara multi-lingual reranker
summary_config = SummaryConfig(is_enabled=True, max_results=7, response_lang="eng")
rerank_config = RerankConfig(reranker="rerank_multilingual_v1", rerank_k=50)
config = VectaraQueryConfig(
k=10,
lambda_val=0.005,
rerank_config=rerank_config,
summary_config=summary_config,
)
rag = vectara3.as_rag(config)
output1 = rag.invoke("what is generative AI?")["answer"]
assert len(output1) > 0
# test Vectara udf reranker
summary_config = SummaryConfig(is_enabled=True, max_results=7, response_lang="eng")
rerank_config = RerankConfig(
reranker="udf", rerank_k=50, user_function="get('$.score')"
)
config = VectaraQueryConfig(
k=10,
lambda_val=0.005,
rerank_config=rerank_config,
summary_config=summary_config,
)
rag = vectara3.as_rag(config)
output1 = rag.invoke("what is generative AI?")["answer"]
assert len(output1) > 0
# test Vectara MMR reranker
summary_config = SummaryConfig(is_enabled=True, max_results=7, response_lang="eng")
rerank_config = RerankConfig(reranker="mmr", rerank_k=50, mmr_diversity_bias=0.2)
config = VectaraQueryConfig(
k=10,
lambda_val=0.005,
rerank_config=rerank_config,
summary_config=summary_config,
)
rag = vectara3.as_rag(config)
output1 = rag.invoke("what is generative AI?")["answer"]
assert len(output1) > 0
# test MMR directly with old mmr_config
summary_config = SummaryConfig(is_enabled=True, max_results=7, response_lang="eng")
mmr_config = MMRConfig(is_enabled=True, mmr_k=50, diversity_bias=0.2)
config = VectaraQueryConfig(
k=10, lambda_val=0.005, mmr_config=mmr_config, summary_config=summary_config
)
rag = vectara3.as_rag(config)
output2 = rag.invoke("what is generative AI?")["answer"]
assert len(output2) > 0
# test reranking disabled - RerankConfig
summary_config = SummaryConfig(is_enabled=True, max_results=7, response_lang="eng")
rerank_config = RerankConfig(reranker="none")
config = VectaraQueryConfig(
k=10,
lambda_val=0.005,
rerank_config=rerank_config,
summary_config=summary_config,
)
rag = vectara3.as_rag(config)
output1 = rag.invoke("what is generative AI?")["answer"]
assert len(output1) > 0
# test with reranking disabled - MMRConfig
summary_config = SummaryConfig(is_enabled=True, max_results=7, response_lang="eng")
mmr_config = MMRConfig(is_enabled=False, mmr_k=50, diversity_bias=0.2)
config = VectaraQueryConfig(
k=10, lambda_val=0.005, mmr_config=mmr_config, summary_config=summary_config
)
rag = vectara3.as_rag(config)
output2 = rag.invoke("what is generative AI?")["answer"]
assert len(output2) > 0
def test_vectara_with_summary(vectara3) -> None: # type: ignore[no-untyped-def]
"""Test vectara summary."""
# test summarization
num_results = 10
output1 = vectara3.similarity_search(
query="what is generative AI?",
k=num_results,
summary_config=SummaryConfig(
is_enabled=True,
max_results=5,
response_lang="eng",
prompt_name=test_prompt_name,
),
)
assert len(output1) == num_results + 1
assert len(output1[num_results].page_content) > 0
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_tiledb.py | from pathlib import Path
import numpy as np
import pytest
from langchain_core.documents import Document
from langchain_community.vectorstores.tiledb import TileDB
from tests.integration_tests.vectorstores.fake_embeddings import (
ConsistentFakeEmbeddings,
FakeEmbeddings,
)
@pytest.mark.requires("tiledb-vector-search")
def test_tiledb(tmp_path: Path) -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = TileDB.from_texts(
texts=texts,
embedding=ConsistentFakeEmbeddings(),
index_uri=f"{str(tmp_path)}/flat",
index_type="FLAT",
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
docsearch = TileDB.from_texts(
texts=texts,
embedding=ConsistentFakeEmbeddings(),
index_uri=f"{str(tmp_path)}/ivf_flat",
index_type="IVF_FLAT",
)
output = docsearch.similarity_search(
"foo", k=1, nprobe=docsearch.vector_index.partitions
)
assert output == [Document(page_content="foo")]
@pytest.mark.requires("tiledb-vector-search")
def test_tiledb_vector_sim(tmp_path: Path) -> None:
"""Test vector similarity."""
texts = ["foo", "bar", "baz"]
docsearch = TileDB.from_texts(
texts=texts,
embedding=ConsistentFakeEmbeddings(),
index_uri=f"{str(tmp_path)}/flat",
index_type="FLAT",
)
query_vec = FakeEmbeddings().embed_query(text="foo")
output = docsearch.similarity_search_by_vector(query_vec, k=1)
assert output == [Document(page_content="foo")]
docsearch = TileDB.from_texts(
texts=texts,
embedding=ConsistentFakeEmbeddings(),
index_uri=f"{str(tmp_path)}/ivf_flat",
index_type="IVF_FLAT",
)
query_vec = FakeEmbeddings().embed_query(text="foo")
output = docsearch.similarity_search_by_vector(
query_vec, k=1, nprobe=docsearch.vector_index.partitions
)
assert output == [Document(page_content="foo")]
@pytest.mark.requires("tiledb-vector-search")
def test_tiledb_vector_sim_with_score_threshold(tmp_path: Path) -> None:
"""Test vector similarity."""
texts = ["foo", "bar", "baz"]
docsearch = TileDB.from_texts(
texts=texts,
embedding=ConsistentFakeEmbeddings(),
index_uri=f"{str(tmp_path)}/flat",
index_type="FLAT",
)
query_vec = FakeEmbeddings().embed_query(text="foo")
output = docsearch.similarity_search_by_vector(query_vec, k=2, score_threshold=0.2)
assert output == [Document(page_content="foo")]
docsearch = TileDB.from_texts(
texts=texts,
embedding=ConsistentFakeEmbeddings(),
index_uri=f"{str(tmp_path)}/ivf_flat",
index_type="IVF_FLAT",
)
query_vec = FakeEmbeddings().embed_query(text="foo")
output = docsearch.similarity_search_by_vector(
query_vec, k=2, score_threshold=0.2, nprobe=docsearch.vector_index.partitions
)
assert output == [Document(page_content="foo")]
@pytest.mark.requires("tiledb-vector-search")
def test_similarity_search_with_score_by_vector(tmp_path: Path) -> None:
"""Test vector similarity with score by vector."""
texts = ["foo", "bar", "baz"]
docsearch = TileDB.from_texts(
texts=texts,
embedding=ConsistentFakeEmbeddings(),
index_uri=f"{str(tmp_path)}/flat",
index_type="FLAT",
)
query_vec = FakeEmbeddings().embed_query(text="foo")
output = docsearch.similarity_search_with_score_by_vector(query_vec, k=1)
assert len(output) == 1
assert output[0][0] == Document(page_content="foo")
docsearch = TileDB.from_texts(
texts=texts,
embedding=ConsistentFakeEmbeddings(),
index_uri=f"{str(tmp_path)}/ivf_flat",
index_type="IVF_FLAT",
)
query_vec = FakeEmbeddings().embed_query(text="foo")
output = docsearch.similarity_search_with_score_by_vector(
query_vec, k=1, nprobe=docsearch.vector_index.partitions
)
assert len(output) == 1
assert output[0][0] == Document(page_content="foo")
@pytest.mark.requires("tiledb-vector-search")
def test_similarity_search_with_score_by_vector_with_score_threshold(
tmp_path: Path,
) -> None:
"""Test vector similarity with score by vector."""
texts = ["foo", "bar", "baz"]
docsearch = TileDB.from_texts(
texts=texts,
embedding=ConsistentFakeEmbeddings(),
index_uri=f"{str(tmp_path)}/flat",
index_type="FLAT",
)
query_vec = FakeEmbeddings().embed_query(text="foo")
output = docsearch.similarity_search_with_score_by_vector(
query_vec,
k=2,
score_threshold=0.2,
)
assert len(output) == 1
assert output[0][0] == Document(page_content="foo")
assert output[0][1] < 0.2
docsearch = TileDB.from_texts(
texts=texts,
embedding=ConsistentFakeEmbeddings(),
index_uri=f"{str(tmp_path)}/ivf_flat",
index_type="IVF_FLAT",
)
query_vec = FakeEmbeddings().embed_query(text="foo")
output = docsearch.similarity_search_with_score_by_vector(
query_vec, k=2, score_threshold=0.2, nprobe=docsearch.vector_index.partitions
)
assert len(output) == 1
assert output[0][0] == Document(page_content="foo")
assert output[0][1] < 0.2
@pytest.mark.requires("tiledb-vector-search")
def test_tiledb_mmr(tmp_path: Path) -> None:
texts = ["foo", "foo", "fou", "foy"]
docsearch = TileDB.from_texts(
texts=texts,
embedding=ConsistentFakeEmbeddings(),
index_uri=f"{str(tmp_path)}/flat",
index_type="FLAT",
)
query_vec = ConsistentFakeEmbeddings().embed_query(text="foo")
output = docsearch.max_marginal_relevance_search_with_score_by_vector(
query_vec, k=3, lambda_mult=0.1
)
assert output[0][0] == Document(page_content="foo")
assert output[0][1] == 0.0
assert output[1][0] != Document(page_content="foo")
assert output[2][0] != Document(page_content="foo")
docsearch = TileDB.from_texts(
texts=texts,
embedding=ConsistentFakeEmbeddings(),
index_uri=f"{str(tmp_path)}/ivf_flat",
index_type="IVF_FLAT",
)
query_vec = ConsistentFakeEmbeddings().embed_query(text="foo")
output = docsearch.max_marginal_relevance_search_with_score_by_vector(
query_vec, k=3, lambda_mult=0.1, nprobe=docsearch.vector_index.partitions
)
assert output[0][0] == Document(page_content="foo")
assert output[0][1] == 0.0
assert output[1][0] != Document(page_content="foo")
assert output[2][0] != Document(page_content="foo")
@pytest.mark.requires("tiledb-vector-search")
def test_tiledb_mmr_with_metadatas_and_filter(tmp_path: Path) -> None:
texts = ["foo", "foo", "fou", "foy"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = TileDB.from_texts(
texts=texts,
metadatas=metadatas,
embedding=ConsistentFakeEmbeddings(),
index_uri=f"{str(tmp_path)}/flat",
index_type="FLAT",
)
query_vec = ConsistentFakeEmbeddings().embed_query(text="foo")
output = docsearch.max_marginal_relevance_search_with_score_by_vector(
query_vec, k=3, lambda_mult=0.1, filter={"page": 1}
)
assert len(output) == 1
assert output[0][0] == Document(page_content="foo", metadata={"page": 1})
assert output[0][1] == 0.0
docsearch = TileDB.from_texts(
texts=texts,
metadatas=metadatas,
embedding=ConsistentFakeEmbeddings(),
index_uri=f"{str(tmp_path)}/ivf_flat",
index_type="IVF_FLAT",
)
query_vec = ConsistentFakeEmbeddings().embed_query(text="foo")
output = docsearch.max_marginal_relevance_search_with_score_by_vector(
query_vec,
k=3,
lambda_mult=0.1,
filter={"page": 1},
nprobe=docsearch.vector_index.partitions,
)
assert len(output) == 1
assert output[0][0] == Document(page_content="foo", metadata={"page": 1})
assert output[0][1] == 0.0
@pytest.mark.requires("tiledb-vector-search")
def test_tiledb_mmr_with_metadatas_and_list_filter(tmp_path: Path) -> None:
texts = ["foo", "fou", "foy", "foo"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = TileDB.from_texts(
texts=texts,
metadatas=metadatas,
embedding=ConsistentFakeEmbeddings(),
index_uri=f"{str(tmp_path)}/flat",
index_type="FLAT",
)
query_vec = ConsistentFakeEmbeddings().embed_query(text="foo")
output = docsearch.max_marginal_relevance_search_with_score_by_vector(
query_vec, k=3, lambda_mult=0.1, filter={"page": [0, 1, 2]}
)
assert len(output) == 3
assert output[0][0] == Document(page_content="foo", metadata={"page": 0})
assert output[0][1] == 0.0
assert output[1][0] != Document(page_content="foo", metadata={"page": 0})
assert output[2][0] != Document(page_content="foo", metadata={"page": 0})
docsearch = TileDB.from_texts(
texts=texts,
metadatas=metadatas,
embedding=ConsistentFakeEmbeddings(),
index_uri=f"{str(tmp_path)}/ivf_flat",
index_type="IVF_FLAT",
)
query_vec = ConsistentFakeEmbeddings().embed_query(text="foo")
output = docsearch.max_marginal_relevance_search_with_score_by_vector(
query_vec,
k=3,
lambda_mult=0.1,
filter={"page": [0, 1, 2]},
nprobe=docsearch.vector_index.partitions,
)
assert len(output) == 3
assert output[0][0] == Document(page_content="foo", metadata={"page": 0})
assert output[0][1] == 0.0
assert output[1][0] != Document(page_content="foo", metadata={"page": 0})
assert output[2][0] != Document(page_content="foo", metadata={"page": 0})
@pytest.mark.requires("tiledb-vector-search")
def test_tiledb_flat_updates(tmp_path: Path) -> None:
"""Test end to end construction and search."""
dimensions = 10
index_uri = str(tmp_path)
embedding = ConsistentFakeEmbeddings(dimensionality=dimensions)
TileDB.create(
index_uri=index_uri,
index_type="FLAT",
dimensions=dimensions,
vector_type=np.dtype("float32"),
metadatas=False,
)
docsearch = TileDB.load(
index_uri=index_uri,
embedding=embedding,
)
output = docsearch.similarity_search("foo", k=2)
assert output == []
docsearch.add_texts(texts=["foo", "bar", "baz"], ids=["1", "2", "3"])
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
docsearch.delete(["1", "3"])
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="bar")]
output = docsearch.similarity_search("baz", k=1)
assert output == [Document(page_content="bar")]
docsearch.add_texts(texts=["fooo", "bazz"], ids=["4", "5"])
output = docsearch.similarity_search("fooo", k=1)
assert output == [Document(page_content="fooo")]
output = docsearch.similarity_search("bazz", k=1)
assert output == [Document(page_content="bazz")]
docsearch.consolidate_updates()
output = docsearch.similarity_search("fooo", k=1)
assert output == [Document(page_content="fooo")]
output = docsearch.similarity_search("bazz", k=1)
assert output == [Document(page_content="bazz")]
@pytest.mark.requires("tiledb-vector-search")
def test_tiledb_ivf_flat_updates(tmp_path: Path) -> None:
"""Test end to end construction and search."""
dimensions = 10
index_uri = str(tmp_path)
embedding = ConsistentFakeEmbeddings(dimensionality=dimensions)
TileDB.create(
index_uri=index_uri,
index_type="IVF_FLAT",
dimensions=dimensions,
vector_type=np.dtype("float32"),
metadatas=False,
)
docsearch = TileDB.load(
index_uri=index_uri,
embedding=embedding,
)
output = docsearch.similarity_search("foo", k=2)
assert output == []
docsearch.add_texts(texts=["foo", "bar", "baz"], ids=["1", "2", "3"])
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
docsearch.delete(["1", "3"])
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="bar")]
output = docsearch.similarity_search("baz", k=1)
assert output == [Document(page_content="bar")]
docsearch.add_texts(texts=["fooo", "bazz"], ids=["4", "5"])
output = docsearch.similarity_search("fooo", k=1)
assert output == [Document(page_content="fooo")]
output = docsearch.similarity_search("bazz", k=1)
assert output == [Document(page_content="bazz")]
docsearch.consolidate_updates()
output = docsearch.similarity_search("fooo", k=1)
assert output == [Document(page_content="fooo")]
output = docsearch.similarity_search("bazz", k=1)
assert output == [Document(page_content="bazz")]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_atlas.py | """Test Atlas functionality."""
import time
from langchain_community.vectorstores import AtlasDB
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
ATLAS_TEST_API_KEY = "7xDPkYXSYDc1_ErdTPIcoAR9RNd8YDlkS3nVNXcVoIMZ6"
def test_atlas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = AtlasDB.from_texts(
name="langchain_test_project" + str(time.time()),
texts=texts,
api_key=ATLAS_TEST_API_KEY,
embedding=FakeEmbeddings(),
)
output = docsearch.similarity_search("foo", k=1)
assert len(output) == 1
assert output[0].page_content == "foo"
def test_atlas_with_metadatas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = AtlasDB.from_texts(
name="langchain_test_project" + str(time.time()),
texts=texts,
api_key=ATLAS_TEST_API_KEY,
embedding=FakeEmbeddings(),
metadatas=metadatas,
reset_project_if_exists=True,
)
output = docsearch.similarity_search("foo", k=1)
assert len(output) == 1
assert output[0].page_content == "foo"
assert output[0].metadata["page"] == "0"
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_hippo.py | """Test Hippo functionality."""
from typing import List, Optional
from langchain_core.documents import Document
from langchain_community.vectorstores.hippo import Hippo
from tests.integration_tests.vectorstores.fake_embeddings import (
FakeEmbeddings,
fake_texts,
)
def _hippo_from_texts(
metadatas: Optional[List[dict]] = None, drop: bool = True
) -> Hippo:
return Hippo.from_texts(
fake_texts,
FakeEmbeddings(),
metadatas=metadatas,
table_name="langchain_test",
connection_args={"host": "127.0.0.1", "port": 7788},
drop_old=drop,
)
def test_hippo_add_extra() -> None:
"""Test end to end construction and MRR search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _hippo_from_texts(metadatas=metadatas)
docsearch.add_texts(texts, metadatas)
output = docsearch.similarity_search("foo", k=1)
print(output) # noqa: T201
assert len(output) == 1
def test_hippo() -> None:
docsearch = _hippo_from_texts()
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_hippo_with_score() -> None:
"""Test end to end construction and search with scores and IDs."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _hippo_from_texts(metadatas=metadatas)
output = docsearch.similarity_search_with_score("foo", k=3)
docs = [o[0] for o in output]
scores = [o[1] for o in output]
assert docs == [
Document(page_content="foo", metadata={"page": "0"}),
Document(page_content="bar", metadata={"page": "1"}),
Document(page_content="baz", metadata={"page": "2"}),
]
assert scores[0] < scores[1] < scores[2]
# if __name__ == "__main__":
# test_hippo()
# test_hippo_with_score()
# test_hippo_with_score()
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_annoy.py | """Test Annoy functionality."""
import tempfile
import pytest
from langchain_core.documents import Document
from langchain_community.docstore.in_memory import InMemoryDocstore
from langchain_community.vectorstores.annoy import Annoy
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
def test_annoy() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = Annoy.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore(
{
index_to_id[0]: Document(page_content="foo"),
index_to_id[1]: Document(page_content="bar"),
index_to_id[2]: Document(page_content="baz"),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_annoy_vector_sim() -> None:
"""Test vector similarity."""
texts = ["foo", "bar", "baz"]
docsearch = Annoy.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore(
{
index_to_id[0]: Document(page_content="foo"),
index_to_id[1]: Document(page_content="bar"),
index_to_id[2]: Document(page_content="baz"),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
query_vec = FakeEmbeddings().embed_query(text="foo")
output = docsearch.similarity_search_by_vector(query_vec, k=1)
assert output == [Document(page_content="foo")]
# make sure we can have k > docstore size
output = docsearch.max_marginal_relevance_search_by_vector(query_vec, k=10)
assert len(output) == len(texts)
def test_annoy_vector_sim_by_index() -> None:
"""Test vector similarity."""
texts = ["foo", "bar", "baz"]
docsearch = Annoy.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore(
{
index_to_id[0]: Document(page_content="foo"),
index_to_id[1]: Document(page_content="bar"),
index_to_id[2]: Document(page_content="baz"),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = docsearch.similarity_search_by_index(2, k=1)
assert output == [Document(page_content="baz")]
def test_annoy_with_metadatas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = Annoy.from_texts(texts, FakeEmbeddings(), metadatas=metadatas)
expected_docstore = InMemoryDocstore(
{
docsearch.index_to_docstore_id[0]: Document(
page_content="foo", metadata={"page": 0}
),
docsearch.index_to_docstore_id[1]: Document(
page_content="bar", metadata={"page": 1}
),
docsearch.index_to_docstore_id[2]: Document(
page_content="baz", metadata={"page": 2}
),
}
)
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": 0})]
def test_annoy_search_not_found() -> None:
"""Test what happens when document is not found."""
texts = ["foo", "bar", "baz"]
docsearch = Annoy.from_texts(texts, FakeEmbeddings())
# Get rid of the docstore to purposefully induce errors.
docsearch.docstore = InMemoryDocstore({})
with pytest.raises(ValueError):
docsearch.similarity_search("foo")
def test_annoy_add_texts() -> None:
"""Test end to end adding of texts."""
# Create initial doc store.
texts = ["foo", "bar", "baz"]
docsearch = Annoy.from_texts(texts, FakeEmbeddings())
# Test adding a similar document as before.
with pytest.raises(NotImplementedError):
docsearch.add_texts(["foo"])
def test_annoy_local_save_load() -> None:
"""Test end to end serialization."""
texts = ["foo", "bar", "baz"]
docsearch = Annoy.from_texts(texts, FakeEmbeddings())
temp_dir = tempfile.TemporaryDirectory()
docsearch.save_local(temp_dir.name)
loaded_docsearch = Annoy.load_local(
temp_dir.name, FakeEmbeddings(), allow_dangerous_deserialization=True
)
assert docsearch.index_to_docstore_id == loaded_docsearch.index_to_docstore_id
assert docsearch.docstore.__dict__ == loaded_docsearch.docstore.__dict__
assert loaded_docsearch.index is not None
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_timescalevector.py | """Test TimescaleVector functionality."""
import os
from datetime import datetime, timedelta
from typing import List
from langchain_core.documents import Document
from langchain_community.vectorstores.timescalevector import TimescaleVector
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
SERVICE_URL = TimescaleVector.service_url_from_db_params(
host=os.environ.get("TEST_TIMESCALE_HOST", "localhost"),
port=int(os.environ.get("TEST_TIMESCALE_PORT", "5432")),
database=os.environ.get("TEST_TIMESCALE_DATABASE", "postgres"),
user=os.environ.get("TEST_TIMESCALE_USER", "postgres"),
password=os.environ.get("TEST_TIMESCALE_PASSWORD", "postgres"),
)
ADA_TOKEN_COUNT = 1536
class FakeEmbeddingsWithAdaDimension(FakeEmbeddings):
"""Fake embeddings functionality for testing."""
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Return simple embeddings."""
return [
[float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(i)] for i in range(len(texts))
]
def embed_query(self, text: str) -> List[float]:
"""Return simple embeddings."""
return [float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(0.0)]
def test_timescalevector() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = TimescaleVector.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
service_url=SERVICE_URL,
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_timescalevector_from_documents() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docs = [Document(page_content=t, metadata={"a": "b"}) for t in texts]
docsearch = TimescaleVector.from_documents(
documents=docs,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
service_url=SERVICE_URL,
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"a": "b"})]
async def test_timescalevector_afrom_documents() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docs = [Document(page_content=t, metadata={"a": "b"}) for t in texts]
docsearch = await TimescaleVector.afrom_documents(
documents=docs,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
service_url=SERVICE_URL,
pre_delete_collection=True,
)
output = await docsearch.asimilarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"a": "b"})]
def test_timescalevector_embeddings() -> None:
"""Test end to end construction with embeddings and search."""
texts = ["foo", "bar", "baz"]
text_embeddings = FakeEmbeddingsWithAdaDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = TimescaleVector.from_embeddings(
text_embeddings=text_embedding_pairs,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
service_url=SERVICE_URL,
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
async def test_timescalevector_aembeddings() -> None:
"""Test end to end construction with embeddings and search."""
texts = ["foo", "bar", "baz"]
text_embeddings = FakeEmbeddingsWithAdaDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = await TimescaleVector.afrom_embeddings(
text_embeddings=text_embedding_pairs,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
service_url=SERVICE_URL,
pre_delete_collection=True,
)
output = await docsearch.asimilarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_timescalevector_with_metadatas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = TimescaleVector.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
service_url=SERVICE_URL,
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
def test_timescalevector_with_metadatas_with_scores() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = TimescaleVector.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
service_url=SERVICE_URL,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score("foo", k=1)
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
async def test_timescalevector_awith_metadatas_with_scores() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = await TimescaleVector.afrom_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
service_url=SERVICE_URL,
pre_delete_collection=True,
)
output = await docsearch.asimilarity_search_with_score("foo", k=1)
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
def test_timescalevector_with_filter_match() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = TimescaleVector.from_texts(
texts=texts,
collection_name="test_collection_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
service_url=SERVICE_URL,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "0"})
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
def test_timescalevector_with_filter_distant_match() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = TimescaleVector.from_texts(
texts=texts,
collection_name="test_collection_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
service_url=SERVICE_URL,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "2"})
assert output == [
(Document(page_content="baz", metadata={"page": "2"}), 0.0013003906671379406)
]
def test_timescalevector_with_filter_no_match() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = TimescaleVector.from_texts(
texts=texts,
collection_name="test_collection_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
service_url=SERVICE_URL,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "5"})
assert output == []
def test_timescalevector_with_filter_in_set() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = TimescaleVector.from_texts(
texts=texts,
collection_name="test_collection_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
service_url=SERVICE_URL,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score(
"foo", k=2, filter=[{"page": "0"}, {"page": "2"}]
)
assert output == [
(Document(page_content="foo", metadata={"page": "0"}), 0.0),
(Document(page_content="baz", metadata={"page": "2"}), 0.0013003906671379406),
]
def test_timescalevector_relevance_score() -> None:
"""Test to make sure the relevance score is scaled to 0-1."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = TimescaleVector.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
service_url=SERVICE_URL,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_relevance_scores("foo", k=3)
assert output == [
(Document(page_content="foo", metadata={"page": "0"}), 1.0),
(Document(page_content="bar", metadata={"page": "1"}), 0.9996744261675065),
(Document(page_content="baz", metadata={"page": "2"}), 0.9986996093328621),
]
async def test_timescalevector_relevance_score_async() -> None:
"""Test to make sure the relevance score is scaled to 0-1."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = await TimescaleVector.afrom_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
service_url=SERVICE_URL,
pre_delete_collection=True,
)
output = await docsearch.asimilarity_search_with_relevance_scores("foo", k=3)
assert output == [
(Document(page_content="foo", metadata={"page": "0"}), 1.0),
(Document(page_content="bar", metadata={"page": "1"}), 0.9996744261675065),
(Document(page_content="baz", metadata={"page": "2"}), 0.9986996093328621),
]
def test_timescalevector_retriever_search_threshold() -> None:
"""Test using retriever for searching with threshold."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = TimescaleVector.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
service_url=SERVICE_URL,
pre_delete_collection=True,
)
retriever = docsearch.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={"k": 3, "score_threshold": 0.999},
)
output = retriever.invoke("summer")
assert output == [
Document(page_content="foo", metadata={"page": "0"}),
Document(page_content="bar", metadata={"page": "1"}),
]
def test_timescalevector_retriever_search_threshold_custom_normalization_fn() -> None:
"""Test searching with threshold and custom normalization function"""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = TimescaleVector.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
service_url=SERVICE_URL,
pre_delete_collection=True,
relevance_score_fn=lambda d: d * 0,
)
retriever = docsearch.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={"k": 3, "score_threshold": 0.5},
)
output = retriever.invoke("foo")
assert output == []
def test_timescalevector_delete() -> None:
"""Test deleting functionality."""
texts = ["bar", "baz"]
docs = [Document(page_content=t, metadata={"a": "b"}) for t in texts]
docsearch = TimescaleVector.from_documents(
documents=docs,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
service_url=SERVICE_URL,
pre_delete_collection=True,
)
texts = ["foo"]
meta = [{"b": "c"}]
ids = docsearch.add_texts(texts, meta)
output = docsearch.similarity_search("bar", k=10)
assert len(output) == 3
docsearch.delete(ids)
output = docsearch.similarity_search("bar", k=10)
assert len(output) == 2
docsearch.delete_by_metadata({"a": "b"})
output = docsearch.similarity_search("bar", k=10)
assert len(output) == 0
def test_timescalevector_with_index() -> None:
"""Test deleting functionality."""
texts = ["bar", "baz"]
docs = [Document(page_content=t, metadata={"a": "b"}) for t in texts]
docsearch = TimescaleVector.from_documents(
documents=docs,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
service_url=SERVICE_URL,
pre_delete_collection=True,
)
texts = ["foo"]
meta = [{"b": "c"}]
docsearch.add_texts(texts, meta)
docsearch.create_index()
output = docsearch.similarity_search("bar", k=10)
assert len(output) == 3
docsearch.drop_index()
docsearch.create_index(
index_type=TimescaleVector.IndexType.TIMESCALE_VECTOR,
max_alpha=1.0,
num_neighbors=50,
)
docsearch.drop_index()
docsearch.create_index("tsv", max_alpha=1.0, num_neighbors=50)
docsearch.drop_index()
docsearch.create_index("ivfflat", num_lists=20, num_records=1000)
docsearch.drop_index()
docsearch.create_index("hnsw", m=16, ef_construction=64)
def test_timescalevector_time_partitioning() -> None:
"""Test deleting functionality."""
from timescale_vector import client
texts = ["bar", "baz"]
docs = [Document(page_content=t, metadata={"a": "b"}) for t in texts]
docsearch = TimescaleVector.from_documents(
documents=docs,
collection_name="test_collection_time_partitioning",
embedding=FakeEmbeddingsWithAdaDimension(),
service_url=SERVICE_URL,
pre_delete_collection=True,
time_partition_interval=timedelta(hours=1),
)
texts = ["foo"]
meta = [{"b": "c"}]
ids = [client.uuid_from_time(datetime.now() - timedelta(hours=3))]
docsearch.add_texts(texts, meta, ids)
output = docsearch.similarity_search("bar", k=10)
assert len(output) == 3
output = docsearch.similarity_search(
"bar", k=10, start_date=datetime.now() - timedelta(hours=1)
)
assert len(output) == 2
output = docsearch.similarity_search(
"bar", k=10, end_date=datetime.now() - timedelta(hours=1)
)
assert len(output) == 1
output = docsearch.similarity_search(
"bar", k=10, start_date=datetime.now() - timedelta(minutes=200)
)
assert len(output) == 3
output = docsearch.similarity_search(
"bar",
k=10,
start_date=datetime.now() - timedelta(minutes=200),
time_delta=timedelta(hours=1),
)
assert len(output) == 1
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_jaguar.py | import json
from langchain_community.vectorstores.jaguar import Jaguar
from tests.integration_tests.vectorstores.fake_embeddings import (
ConsistentFakeEmbeddings,
)
#############################################################################################
##
## Requirement: fwww http server must be running at 127.0.0.1:8080 (or any end point)
## jaguardb server must be running accepting commands from the http server
##
## FakeEmbeddings is used to create text embeddings with dimension of 10.
##
#############################################################################################
class TestJaguar:
vectorstore: Jaguar
pod: str
store: str
@classmethod
def setup_class(cls) -> None:
url = "http://127.0.0.1:8080/fwww/"
cls.pod = "vdb"
cls.store = "langchain_test_store"
vector_index = "v"
vector_type = "cosine_fraction_float"
vector_dimension = 10
embeddings = ConsistentFakeEmbeddings()
cls.vectorstore = Jaguar(
cls.pod,
cls.store,
vector_index,
vector_type,
vector_dimension,
url,
embeddings,
)
@classmethod
def teardown_class(cls) -> None:
pass
def test_login(self) -> None:
"""
Requires environment variable JAGUAR_API_KEY
or $HOME/.jagrc storing the jaguar api key
"""
self.vectorstore.login()
def test_create(self) -> None:
"""
Create a vector with vector index 'v' of dimension 10
and 'v:text' to hold text and metadatas author and category
"""
metadata_str = "author char(32), category char(16)"
self.vectorstore.create(metadata_str, 1024)
podstore = self.pod + "." + self.store
js = self.vectorstore.run(f"desc {podstore}")
jd = json.loads(js[0])
assert podstore in jd["data"]
def test_add_texts(self) -> None:
"""
Add some texts
"""
texts = ["foo", "bar", "baz"]
metadatas = [
{"author": "Adam", "category": "Music"},
{"author": "Eve", "category": "Music"},
{"author": "John", "category": "History"},
]
ids = self.vectorstore.add_texts(texts=texts, metadatas=metadatas)
assert len(ids) == len(texts)
def test_search(self) -> None:
"""
Test that `foo` is closest to `foo`
Here k is 1
"""
output = self.vectorstore.similarity_search(
query="foo",
k=1,
metadatas=["author", "category"],
)
assert output[0].page_content == "foo"
assert output[0].metadata["author"] == "Adam"
assert output[0].metadata["category"] == "Music"
assert len(output) == 1
def test_search_filter(self) -> None:
"""
Test filter(where)
"""
where = "author='Eve'"
output = self.vectorstore.similarity_search(
query="foo",
k=3,
fetch_k=9,
where=where,
metadatas=["author", "category"],
)
assert output[0].page_content == "bar"
assert output[0].metadata["author"] == "Eve"
assert output[0].metadata["category"] == "Music"
assert len(output) == 1
def test_search_anomalous(self) -> None:
"""
Test detection of anomalousness
"""
result = self.vectorstore.is_anomalous(
query="dogs can jump high",
)
assert result is False
def test_clear(self) -> None:
"""
Test cleanup of data in the store
"""
self.vectorstore.clear()
assert self.vectorstore.count() == 0
def test_drop(self) -> None:
"""
Destroy the vector store
"""
self.vectorstore.drop()
def test_logout(self) -> None:
"""
Logout and free resources
"""
self.vectorstore.logout()
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_baiducloud_vector_search.py | """Test BESVectorStore functionality."""
from typing import List, Optional
from langchain_core.documents import Document
from langchain_community.vectorstores import BESVectorStore
from tests.integration_tests.vectorstores.fake_embeddings import (
FakeEmbeddings,
fake_texts,
)
def _bes_vector_db_from_texts(
metadatas: Optional[List[dict]] = None, drop: bool = True
) -> BESVectorStore:
return BESVectorStore.from_texts(
fake_texts,
FakeEmbeddings(),
metadatas=metadatas,
bes_url="http://10.0.X.X",
)
def test_bes_vector_db() -> None:
"""Test end to end construction and search."""
docsearch = _bes_vector_db_from_texts()
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_zilliz.py | """Test Zilliz functionality."""
from typing import List, Optional
from langchain_core.documents import Document
from langchain_community.vectorstores import Zilliz
from tests.integration_tests.vectorstores.fake_embeddings import (
FakeEmbeddings,
fake_texts,
)
def _zilliz_from_texts(
metadatas: Optional[List[dict]] = None, drop: bool = True
) -> Zilliz:
return Zilliz.from_texts(
fake_texts,
FakeEmbeddings(),
metadatas=metadatas,
connection_args={
"uri": "",
"user": "",
"password": "",
"secure": True,
},
drop_old=drop,
)
def test_zilliz() -> None:
"""Test end to end construction and search."""
docsearch = _zilliz_from_texts()
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_zilliz_with_score() -> None:
"""Test end to end construction and search with scores and IDs."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _zilliz_from_texts(metadatas=metadatas)
output = docsearch.similarity_search_with_score("foo", k=3)
docs = [o[0] for o in output]
scores = [o[1] for o in output]
assert docs == [
Document(page_content="foo", metadata={"page": 0}),
Document(page_content="bar", metadata={"page": 1}),
Document(page_content="baz", metadata={"page": 2}),
]
assert scores[0] < scores[1] < scores[2]
def test_zilliz_max_marginal_relevance_search() -> None:
"""Test end to end construction and MRR search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _zilliz_from_texts(metadatas=metadatas)
output = docsearch.max_marginal_relevance_search("foo", k=2, fetch_k=3)
assert output == [
Document(page_content="foo", metadata={"page": 0}),
Document(page_content="baz", metadata={"page": 2}),
]
def test_zilliz_add_extra() -> None:
"""Test end to end construction and MRR search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _zilliz_from_texts(metadatas=metadatas)
docsearch.add_texts(texts, metadatas)
output = docsearch.similarity_search("foo", k=10)
assert len(output) == 6
def test_zilliz_no_drop() -> None:
"""Test end to end construction and MRR search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _zilliz_from_texts(metadatas=metadatas)
del docsearch
docsearch = _zilliz_from_texts(metadatas=metadatas, drop=False)
output = docsearch.similarity_search("foo", k=10)
assert len(output) == 6
# if __name__ == "__main__":
# test_zilliz()
# test_zilliz_with_score()
# test_zilliz_max_marginal_relevance_search()
# test_zilliz_add_extra()
# test_zilliz_no_drop()
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_tidb_vector.py | """Test TiDB Vector functionality."""
import os
from typing import List
from langchain_core.documents import Document
from langchain_community.vectorstores import TiDBVectorStore
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
TiDB_CONNECT_URL = os.environ.get(
"TEST_TiDB_CONNECTION_URL", "mysql+pymysql://root@127.0.0.1:4000/test"
)
ADA_TOKEN_COUNT = 1536
class FakeEmbeddingsWithAdaDimension(FakeEmbeddings):
"""Fake embeddings functionality for testing."""
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Return simple embeddings based on ASCII values of text characters."""
return [self._text_to_embedding(text) for text in texts]
def embed_query(self, text: str) -> List[float]:
"""Return simple embeddings based on ASCII values of text characters."""
return self._text_to_embedding(text)
def _text_to_embedding(self, text: str) -> List[float]:
"""Convert text to a unique embedding using ASCII values."""
ascii_values = [float(ord(char)) for char in text]
# Pad or trim the list to make it of length ADA_TOKEN_COUNT
ascii_values = ascii_values[:ADA_TOKEN_COUNT] + [0.0] * (
ADA_TOKEN_COUNT - len(ascii_values)
)
return ascii_values
def test_search() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
ids = ["1", "2", "3"]
docsearch = TiDBVectorStore.from_texts(
texts=texts,
table_name="test_tidb_vectorstore_langchain",
embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=TiDB_CONNECT_URL,
metadatas=metadatas,
ids=ids,
drop_existing_table=True,
distance_strategy="cosine",
)
with docsearch.tidb_vector_client._make_session() as session:
records = list(session.query(docsearch.tidb_vector_client._table_model).all())
assert len([record.id for record in records]) == 3 # type: ignore
session.close()
output = docsearch.similarity_search("foo", k=1)
docsearch.drop_vectorstore()
assert output == [Document(page_content="foo", metadata={"page": "0"})]
def test_search_with_filter() -> None:
"""Test end to end construction and search."""
# no metadata
texts = ["foo", "bar", "baz"]
docsearch = TiDBVectorStore.from_texts(
texts=texts,
table_name="test_tidb_vectorstore_langchain",
embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=TiDB_CONNECT_URL,
drop_existing_table=True,
)
output = docsearch.similarity_search("foo", k=1)
output_filtered = docsearch.similarity_search(
"foo", k=1, filter={"filter_condition": "N/A"}
)
assert output == [Document(page_content="foo")]
assert output_filtered == []
# having metadata
metadatas = [{"page": i + 1, "page_str": str(i + 1)} for i in range(len(texts))]
docsearch = TiDBVectorStore.from_texts(
texts=texts,
table_name="test_tidb_vectorstore_langchain",
embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=TiDB_CONNECT_URL,
metadatas=metadatas,
drop_existing_table=True,
)
output = docsearch.similarity_search("foo", k=1, filter={"page": 1})
assert output == [
Document(page_content="foo", metadata={"page": 1, "page_str": "1"})
]
# test mismatched value
output = docsearch.similarity_search("foo", k=1, filter={"page": "1"})
assert output == []
# test non-existing key
output = docsearch.similarity_search("foo", k=1, filter={"filter_condition": "N/A"})
assert output == []
# test IN, NIN expression
output = docsearch.similarity_search("foo", k=1, filter={"page": {"$in": [1, 2]}})
assert output == [
Document(page_content="foo", metadata={"page": 1, "page_str": "1"})
]
output = docsearch.similarity_search("foo", k=1, filter={"page": {"$nin": [1, 2]}})
assert output == [
Document(page_content="baz", metadata={"page": 3, "page_str": "3"})
]
output = docsearch.similarity_search(
"foo", k=1, filter={"page": {"$in": ["1", "2"]}}
)
assert output == []
output = docsearch.similarity_search(
"foo", k=1, filter={"page_str": {"$in": ["1", "2"]}}
)
assert output == [
Document(page_content="foo", metadata={"page": 1, "page_str": "1"})
]
# test GT, GTE, LT, LTE expression
output = docsearch.similarity_search("foo", k=1, filter={"page": {"$gt": 1}})
assert output == [
Document(page_content="bar", metadata={"page": 2, "page_str": "2"})
]
output = docsearch.similarity_search("foo", k=1, filter={"page": {"$gte": 1}})
assert output == [
Document(page_content="foo", metadata={"page": 1, "page_str": "1"})
]
output = docsearch.similarity_search("foo", k=1, filter={"page": {"$lt": 3}})
assert output == [
Document(page_content="foo", metadata={"page": 1, "page_str": "1"})
]
output = docsearch.similarity_search("baz", k=1, filter={"page": {"$lte": 3}})
assert output == [
Document(page_content="baz", metadata={"page": 3, "page_str": "3"})
]
output = docsearch.similarity_search("foo", k=1, filter={"page": {"$gt": 3}})
assert output == []
output = docsearch.similarity_search("foo", k=1, filter={"page": {"$lt": 1}})
assert output == []
# test eq, neq expression
output = docsearch.similarity_search("foo", k=1, filter={"page": {"$eq": 3}})
assert output == [
Document(page_content="baz", metadata={"page": 3, "page_str": "3"})
]
output = docsearch.similarity_search("bar", k=1, filter={"page": {"$ne": 2}})
assert output == [
Document(page_content="baz", metadata={"page": 3, "page_str": "3"})
]
# test AND, OR expression
output = docsearch.similarity_search(
"bar", k=1, filter={"$and": [{"page": 1}, {"page_str": "1"}]}
)
assert output == [
Document(page_content="foo", metadata={"page": 1, "page_str": "1"})
]
output = docsearch.similarity_search(
"bar", k=1, filter={"$or": [{"page": 1}, {"page_str": "2"}]}
)
assert output == [
Document(page_content="bar", metadata={"page": 2, "page_str": "2"}),
]
output = docsearch.similarity_search(
"foo",
k=1,
filter={
"$or": [{"page": 1}, {"page": 2}],
"$and": [{"page": 2}],
},
)
assert output == [
Document(page_content="bar", metadata={"page": 2, "page_str": "2"})
]
output = docsearch.similarity_search(
"foo", k=1, filter={"$and": [{"$or": [{"page": 1}, {"page": 2}]}, {"page": 3}]}
)
assert output == []
docsearch.drop_vectorstore()
def test_search_with_score() -> None:
"""Test end to end construction, search"""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = TiDBVectorStore.from_texts(
texts=texts,
table_name="test_tidb_vectorstore_langchain",
embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=TiDB_CONNECT_URL,
metadatas=metadatas,
drop_existing_table=True,
distance_strategy="cosine",
)
output = docsearch.similarity_search_with_score("foo", k=1)
docsearch.drop_vectorstore()
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
def test_load_from_existing_vectorstore() -> None:
"""Test loading existing TiDB Vector Store."""
# create tidb vector store and add documents
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = TiDBVectorStore.from_texts(
texts=texts,
table_name="test_tidb_vectorstore_langchain",
embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=TiDB_CONNECT_URL,
metadatas=metadatas,
drop_existing_table=True,
distance_strategy="cosine",
)
# load from existing tidb vector store
docsearch_copy = TiDBVectorStore.from_existing_vector_table(
table_name="test_tidb_vectorstore_langchain",
embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=TiDB_CONNECT_URL,
)
output = docsearch_copy.similarity_search_with_score("foo", k=1)
docsearch.drop_vectorstore()
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
# load from non-existing tidb vector store
try:
_ = TiDBVectorStore.from_existing_vector_table(
table_name="test_vectorstore_non_existing",
embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=TiDB_CONNECT_URL,
)
assert False, "non-existing tidb vector store testing raised an error"
except ValueError:
pass
def test_delete_doc() -> None:
"""Test delete document from TiDB Vector."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
ids = ["1", "2", "3"]
docsearch = TiDBVectorStore.from_texts(
texts=texts,
table_name="test_tidb_vectorstore_langchain",
embedding=FakeEmbeddingsWithAdaDimension(),
ids=ids,
connection_string=TiDB_CONNECT_URL,
metadatas=metadatas,
drop_existing_table=True,
)
output = docsearch.similarity_search_with_score("foo", k=1)
docsearch.delete(["1", "2"])
output_after_deleted = docsearch.similarity_search_with_score("foo", k=1)
docsearch.drop_vectorstore()
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0)]
assert output_after_deleted == [
(Document(page_content="baz", metadata={"page": "2"}), 0.004691842206844599)
]
def test_relevance_score() -> None:
"""Test to make sure the relevance score is scaled to 0-1."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch_consine = TiDBVectorStore.from_texts(
texts=texts,
table_name="test_tidb_vectorstore_langchain_cosine",
embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=TiDB_CONNECT_URL,
metadatas=metadatas,
distance_strategy="cosine",
drop_existing_table=True,
)
output_consine = docsearch_consine.similarity_search_with_relevance_scores(
"foo", k=3
)
assert output_consine == [
(Document(page_content="foo", metadata={"page": "0"}), 1.0),
(Document(page_content="bar", metadata={"page": "1"}), 0.9977280385800326),
(Document(page_content="baz", metadata={"page": "2"}), 0.9953081577931554),
]
docsearch_l2 = TiDBVectorStore.from_texts(
texts=texts,
table_name="test_tidb_vectorstore_langchain_cosine",
embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=TiDB_CONNECT_URL,
metadatas=metadatas,
distance_strategy="l2",
drop_existing_table=True,
)
output_l2 = docsearch_l2.similarity_search_with_relevance_scores("foo", k=3)
assert output_l2 == [
(Document(page_content="foo", metadata={"page": "0"}), 1.0),
(Document(page_content="bar", metadata={"page": "1"}), -9.51189802081432),
(Document(page_content="baz", metadata={"page": "2"}), -11.90348790056394),
]
try:
_ = TiDBVectorStore.from_texts(
texts=texts,
table_name="test_tidb_vectorstore_langchain_inner",
embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=TiDB_CONNECT_URL,
metadatas=metadatas,
distance_strategy="inner",
drop_existing_table=True,
)
assert False, "inner product should raise error"
except ValueError:
pass
docsearch_l2.drop_vectorstore() # type: ignore[attr-defined]
def test_retriever_search_threshold() -> None:
"""Test using retriever for searching with threshold."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = TiDBVectorStore.from_texts(
texts=texts,
table_name="test_tidb_vectorstore_langchain",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=TiDB_CONNECT_URL,
drop_existing_table=True,
)
retriever = docsearch.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={"k": 3, "score_threshold": 0.997},
)
output = retriever.invoke("foo")
assert output == [
Document(page_content="foo", metadata={"page": "0"}),
Document(page_content="bar", metadata={"page": "1"}),
]
docsearch.drop_vectorstore()
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_yellowbrick.py | import logging
from typing import List, Optional
import pytest
from langchain_community.docstore.document import Document
from langchain_community.vectorstores import Yellowbrick
from tests.integration_tests.vectorstores.fake_embeddings import (
ConsistentFakeEmbeddings,
fake_texts,
)
YELLOWBRICK_URL = "postgres://username:password@host:port/database"
YELLOWBRICK_TABLE = "test_table"
YELLOWBRICK_CONTENT = "test_table_content"
YELLOWBRICK_SCHEMA = "test_schema"
def _yellowbrick_vector_from_texts(
metadatas: Optional[List[dict]] = None, drop: bool = True
) -> Yellowbrick:
db = Yellowbrick.from_texts(
fake_texts,
ConsistentFakeEmbeddings(),
metadatas,
YELLOWBRICK_URL,
table=YELLOWBRICK_TABLE,
schema=YELLOWBRICK_SCHEMA,
drop=drop,
)
db.logger.setLevel(logging.DEBUG)
return db
def _yellowbrick_vector_from_texts_no_schema(
metadatas: Optional[List[dict]] = None, drop: bool = True
) -> Yellowbrick:
db = Yellowbrick.from_texts(
fake_texts,
ConsistentFakeEmbeddings(),
metadatas,
YELLOWBRICK_URL,
table=YELLOWBRICK_TABLE,
drop=drop,
)
db.logger.setLevel(logging.DEBUG)
return db
@pytest.mark.requires("yb-vss")
def test_yellowbrick() -> None:
"""Test end to end construction and search."""
docsearches = [
_yellowbrick_vector_from_texts(),
_yellowbrick_vector_from_texts_no_schema(),
]
for docsearch in docsearches:
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={})]
docsearch.drop(table=YELLOWBRICK_TABLE, schema=docsearch._schema)
docsearch.drop(table=YELLOWBRICK_CONTENT, schema=docsearch._schema)
@pytest.mark.requires("yb-vss")
def test_yellowbrick_add_text() -> None:
"""Test end to end construction and search."""
docsearches = [
_yellowbrick_vector_from_texts(),
_yellowbrick_vector_from_texts_no_schema(),
]
for docsearch in docsearches:
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={})]
texts = ["oof"]
docsearch.add_texts(texts)
output = docsearch.similarity_search("oof", k=1)
assert output == [Document(page_content="oof", metadata={})]
docsearch.drop(table=YELLOWBRICK_TABLE, schema=docsearch._schema)
docsearch.drop(table=YELLOWBRICK_CONTENT, schema=docsearch._schema)
@pytest.mark.requires("yb-vss")
def test_yellowbrick_delete() -> None:
"""Test end to end construction and search."""
docsearches = [
_yellowbrick_vector_from_texts(),
_yellowbrick_vector_from_texts_no_schema(),
]
for docsearch in docsearches:
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={})]
texts = ["oof"]
added_docs = docsearch.add_texts(texts)
output = docsearch.similarity_search("oof", k=1)
assert output == [Document(page_content="oof", metadata={})]
docsearch.delete(added_docs)
output = docsearch.similarity_search("oof", k=1)
assert output != [Document(page_content="oof", metadata={})]
docsearch.drop(table=YELLOWBRICK_TABLE, schema=docsearch._schema)
docsearch.drop(table=YELLOWBRICK_CONTENT, schema=docsearch._schema)
@pytest.mark.requires("yb-vss")
def test_yellowbrick_delete_all() -> None:
"""Test end to end construction and search."""
docsearches = [
_yellowbrick_vector_from_texts(),
_yellowbrick_vector_from_texts_no_schema(),
]
for docsearch in docsearches:
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={})]
texts = ["oof"]
docsearch.add_texts(texts)
output = docsearch.similarity_search("oof", k=1)
assert output == [Document(page_content="oof", metadata={})]
docsearch.delete(delete_all=True)
output = docsearch.similarity_search("oof", k=1)
assert output != [Document(page_content="oof", metadata={})]
output = docsearch.similarity_search("foo", k=1)
assert output != [Document(page_content="foo", metadata={})]
docsearch.drop(table=YELLOWBRICK_TABLE, schema=docsearch._schema)
docsearch.drop(table=YELLOWBRICK_CONTENT, schema=docsearch._schema)
@pytest.mark.requires("yb-vss")
def test_yellowbrick_lsh_search() -> None:
"""Test end to end construction and search."""
docsearches = [
_yellowbrick_vector_from_texts(),
_yellowbrick_vector_from_texts_no_schema(),
]
for docsearch in docsearches:
index_params = Yellowbrick.IndexParams(
Yellowbrick.IndexType.LSH, {"num_hyperplanes": 10, "hamming_distance": 0}
)
docsearch.drop_index(index_params)
docsearch.create_index(index_params)
output = docsearch.similarity_search("foo", k=1, index_params=index_params)
assert output == [Document(page_content="foo", metadata={})]
docsearch.drop(table=YELLOWBRICK_TABLE, schema=docsearch._schema)
docsearch.drop(table=YELLOWBRICK_CONTENT, schema=docsearch._schema)
docsearch.drop_index(index_params=index_params)
@pytest.mark.requires("yb-vss")
def test_yellowbrick_lsh_search_update() -> None:
"""Test end to end construction and search."""
docsearches = [
_yellowbrick_vector_from_texts(),
_yellowbrick_vector_from_texts_no_schema(),
]
for docsearch in docsearches:
index_params = Yellowbrick.IndexParams(
Yellowbrick.IndexType.LSH, {"num_hyperplanes": 10, "hamming_distance": 0}
)
docsearch.drop_index(index_params)
docsearch.create_index(index_params)
output = docsearch.similarity_search("foo", k=1, index_params=index_params)
assert output == [Document(page_content="foo", metadata={})]
texts = ["oof"]
docsearch.add_texts(texts, index_params=index_params)
output = docsearch.similarity_search("oof", k=1, index_params=index_params)
assert output == [Document(page_content="oof", metadata={})]
docsearch.drop(table=YELLOWBRICK_TABLE, schema=docsearch._schema)
docsearch.drop(table=YELLOWBRICK_CONTENT, schema=docsearch._schema)
docsearch.drop_index(index_params=index_params)
@pytest.mark.requires("yb-vss")
def test_yellowbrick_lsh_delete() -> None:
"""Test end to end construction and search."""
docsearches = [
_yellowbrick_vector_from_texts(),
_yellowbrick_vector_from_texts_no_schema(),
]
for docsearch in docsearches:
index_params = Yellowbrick.IndexParams(
Yellowbrick.IndexType.LSH, {"num_hyperplanes": 10, "hamming_distance": 0}
)
docsearch.drop_index(index_params)
docsearch.create_index(index_params)
output = docsearch.similarity_search("foo", k=1, index_params=index_params)
assert output == [Document(page_content="foo", metadata={})]
texts = ["oof"]
added_docs = docsearch.add_texts(texts, index_params=index_params)
output = docsearch.similarity_search("oof", k=1, index_params=index_params)
assert output == [Document(page_content="oof", metadata={})]
docsearch.delete(added_docs)
output = docsearch.similarity_search("oof", k=1, index_params=index_params)
assert output != [Document(page_content="oof", metadata={})]
docsearch.drop(table=YELLOWBRICK_TABLE, schema=docsearch._schema)
docsearch.drop(table=YELLOWBRICK_CONTENT, schema=docsearch._schema)
docsearch.drop_index(index_params=index_params)
@pytest.mark.requires("yb-vss")
def test_yellowbrick_lsh_delete_all() -> None:
"""Test end to end construction and search."""
docsearches = [
_yellowbrick_vector_from_texts(),
_yellowbrick_vector_from_texts_no_schema(),
]
for docsearch in docsearches:
index_params = Yellowbrick.IndexParams(
Yellowbrick.IndexType.LSH, {"num_hyperplanes": 10, "hamming_distance": 0}
)
docsearch.drop_index(index_params)
docsearch.create_index(index_params)
output = docsearch.similarity_search("foo", k=1, index_params=index_params)
assert output == [Document(page_content="foo", metadata={})]
texts = ["oof"]
docsearch.add_texts(texts, index_params=index_params)
output = docsearch.similarity_search("oof", k=1, index_params=index_params)
assert output == [Document(page_content="oof", metadata={})]
docsearch.delete(delete_all=True)
output = docsearch.similarity_search("oof", k=1, index_params=index_params)
assert output != [Document(page_content="oof", metadata={})]
output = docsearch.similarity_search("foo", k=1, index_params=index_params)
assert output != [Document(page_content="foo", metadata={})]
docsearch.drop(table=YELLOWBRICK_TABLE, schema=docsearch._schema)
docsearch.drop(table=YELLOWBRICK_CONTENT, schema=docsearch._schema)
docsearch.drop_index(index_params=index_params)
@pytest.mark.requires("yb-vss")
def test_yellowbrick_with_score() -> None:
"""Test end to end construction and search with scores and IDs."""
docsearches = [
_yellowbrick_vector_from_texts(),
_yellowbrick_vector_from_texts_no_schema(),
]
for docsearch in docsearches:
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _yellowbrick_vector_from_texts(metadatas=metadatas)
output = docsearch.similarity_search_with_score("foo", k=3)
docs = [o[0] for o in output]
distances = [o[1] for o in output]
assert docs == [
Document(page_content="foo", metadata={"page": 0}),
Document(page_content="bar", metadata={"page": 1}),
Document(page_content="baz", metadata={"page": 2}),
]
assert distances[0] > distances[1] > distances[2]
@pytest.mark.requires("yb-vss")
def test_yellowbrick_add_extra() -> None:
"""Test end to end construction and MRR search."""
docsearches = [
_yellowbrick_vector_from_texts(),
_yellowbrick_vector_from_texts_no_schema(),
]
for docsearch in docsearches:
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _yellowbrick_vector_from_texts(metadatas=metadatas)
docsearch.add_texts(texts, metadatas)
output = docsearch.similarity_search("foo", k=10)
assert len(output) == 6
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_rocksetdb.py | import logging
import os
import uuid
from langchain_core.documents import Document
from langchain_community.vectorstores.rocksetdb import Rockset
from tests.integration_tests.vectorstores.fake_embeddings import (
ConsistentFakeEmbeddings,
fake_texts,
)
logger = logging.getLogger(__name__)
# To run these tests, make sure you have a collection with the name `langchain_demo`
# and the following ingest transformation:
#
# SELECT
# _input.* EXCEPT(_meta),
# VECTOR_ENFORCE(_input.description_embedding, 10, 'float') as
# description_embedding
# FROM
# _input
#
# We're using FakeEmbeddings utility to create text embeddings.
# It generates vector embeddings of length 10.
#
# Set env ROCKSET_DELETE_DOCS_ON_START=1 if you want to delete all docs from
# the collection before running any test. Be careful, this will delete any
# existing documents in your Rockset collection.
#
# See https://rockset.com/blog/introducing-vector-search-on-rockset/ for more details.
WORKSPACE = "morgana"
COLLECTION_NAME = "langchain_demo"
TEXT_KEY = "description"
EMBEDDING_KEY = "description_embedding"
class TestRockset:
rockset_vectorstore: Rockset
@classmethod
def setup_class(cls) -> None:
import rockset
import rockset.models
assert os.environ.get("ROCKSET_API_KEY") is not None
assert os.environ.get("ROCKSET_REGION") is not None
api_key = os.environ.get("ROCKSET_API_KEY")
region = os.environ.get("ROCKSET_REGION")
if region == "use1a1":
host = rockset.Regions.use1a1
elif region == "usw2a1":
host = rockset.Regions.usw2a1
elif region == "euc1a1":
host = rockset.Regions.euc1a1
elif region == "dev":
host = rockset.DevRegions.usw2a1
else:
logger.warning(
"Using ROCKSET_REGION:%s as it is.. \
You should know what you're doing...",
region,
)
host = region
client = rockset.RocksetClient(host, api_key)
if os.environ.get("ROCKSET_DELETE_DOCS_ON_START") == "1":
logger.info(
"Deleting all existing documents from the Rockset collection %s",
COLLECTION_NAME,
)
query = f"select _id from {WORKSPACE}.{COLLECTION_NAME}"
query_response = client.Queries.query(sql={"query": query})
ids = [
str(r["_id"])
for r in getattr(
query_response, query_response.attribute_map["results"]
)
]
logger.info("Existing ids in collection: %s", ids)
client.Documents.delete_documents(
collection=COLLECTION_NAME,
data=[rockset.models.DeleteDocumentsRequestData(id=i) for i in ids],
workspace=WORKSPACE,
)
embeddings = ConsistentFakeEmbeddings()
embeddings.embed_documents(fake_texts)
cls.rockset_vectorstore = Rockset(
client, embeddings, COLLECTION_NAME, TEXT_KEY, EMBEDDING_KEY, WORKSPACE
)
texts = ["foo", "bar", "baz"]
metadatas = [{"metadata_index": i} for i in range(len(texts))]
ids = cls.rockset_vectorstore.add_texts(
texts=texts,
metadatas=metadatas,
)
assert len(ids) == len(texts)
def test_rockset_search(self) -> None:
"""Test end-to-end vector search in Rockset"""
# Test that `foo` is closest to `foo`
output = self.rockset_vectorstore.similarity_search(
query="foo", distance_func=Rockset.DistanceFunction.COSINE_SIM, k=1
)
assert output == [Document(page_content="foo", metadata={"metadata_index": 0})]
# Find closest vector to `foo` which is not `foo`
output = self.rockset_vectorstore.similarity_search(
query="foo",
distance_func=Rockset.DistanceFunction.COSINE_SIM,
k=1,
where_str="metadata_index != 0",
)
assert output == [Document(page_content="bar", metadata={"metadata_index": 1})]
def test_rockset_mmr_search(self) -> None:
"""Test end-to-end mmr search in Rockset"""
output = self.rockset_vectorstore.max_marginal_relevance_search(
query="foo",
distance_func=Rockset.DistanceFunction.COSINE_SIM,
fetch_k=1,
k=1,
)
assert output == [Document(page_content="foo", metadata={"metadata_index": 0})]
# Find closest vector to `foo` which is not `foo`
output = self.rockset_vectorstore.max_marginal_relevance_search(
query="foo",
distance_func=Rockset.DistanceFunction.COSINE_SIM,
fetch_k=3,
k=1,
where_str="metadata_index != 0",
)
assert output == [Document(page_content="bar", metadata={"metadata_index": 1})]
def test_add_documents_and_delete(self) -> None:
""" "add_documents" and "delete" are requirements to support use
with RecordManager"""
texts = ["foo", "bar", "baz"]
metadatas = [{"metadata_index": i} for i in range(len(texts))]
_docs = zip(texts, metadatas)
docs = [Document(page_content=pc, metadata=i) for pc, i in _docs]
ids = self.rockset_vectorstore.add_documents(docs)
assert len(ids) == len(texts)
deleted = self.rockset_vectorstore.delete(ids)
assert deleted
def test_add_texts_does_not_modify_metadata(self) -> None:
"""If metadata changes it will inhibit the langchain RecordManager
functionality"""
texts = ["kitty", "doggy"]
metadatas = [{"source": "kitty.txt"}, {"source": "doggy.txt"}]
ids = [str(uuid.uuid4()), str(uuid.uuid4())]
self.rockset_vectorstore.add_texts(texts=texts, metadatas=metadatas, ids=ids)
for metadata in metadatas:
assert len(metadata) == 1
assert list(metadata.keys())[0] == "source"
def test_build_query_sql(self) -> None:
vector = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]
q_str = self.rockset_vectorstore._build_query_sql(
vector,
Rockset.DistanceFunction.COSINE_SIM,
4,
)
vector_str = ",".join(map(str, vector))
expected = f"""\
SELECT * EXCEPT({EMBEDDING_KEY}), \
COSINE_SIM({EMBEDDING_KEY}, [{vector_str}]) as dist
FROM {WORKSPACE}.{COLLECTION_NAME}
ORDER BY dist DESC
LIMIT 4
"""
assert q_str == expected
def test_build_query_sql_with_where(self) -> None:
vector = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]
q_str = self.rockset_vectorstore._build_query_sql(
vector,
Rockset.DistanceFunction.COSINE_SIM,
4,
"age >= 10",
)
vector_str = ",".join(map(str, vector))
expected = f"""\
SELECT * EXCEPT({EMBEDDING_KEY}), \
COSINE_SIM({EMBEDDING_KEY}, [{vector_str}]) as dist
FROM {WORKSPACE}.{COLLECTION_NAME}
WHERE age >= 10
ORDER BY dist DESC
LIMIT 4
"""
assert q_str == expected
def test_build_query_sql_with_select_embeddings(self) -> None:
vector = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]
q_str = self.rockset_vectorstore._build_query_sql(
vector, Rockset.DistanceFunction.COSINE_SIM, 4, "age >= 10", False
)
vector_str = ",".join(map(str, vector))
expected = f"""\
SELECT *, \
COSINE_SIM({EMBEDDING_KEY}, [{vector_str}]) as dist
FROM {WORKSPACE}.{COLLECTION_NAME}
WHERE age >= 10
ORDER BY dist DESC
LIMIT 4
"""
assert q_str == expected
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_tair.py | """Test tair functionality."""
from langchain_core.documents import Document
from langchain_community.vectorstores.tair import Tair
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
def test_tair() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = Tair.from_texts(
texts, FakeEmbeddings(), tair_url="redis://localhost:6379"
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_relyt.py | """Test Relyt functionality."""
import os
from typing import List
from langchain_core.documents import Document
from langchain_community.vectorstores.relyt import Relyt
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
CONNECTION_STRING = Relyt.connection_string_from_db_params(
driver=os.environ.get("PG_DRIVER", "psycopg2cffi"),
host=os.environ.get("PG_HOST", "localhost"),
port=int(os.environ.get("PG_PORT", "5432")),
database=os.environ.get("PG_DATABASE", "postgres"),
user=os.environ.get("PG_USER", "postgres"),
password=os.environ.get("PG_PASSWORD", "postgres"),
)
ADA_TOKEN_COUNT = 1536
class FakeEmbeddingsWithAdaDimension(FakeEmbeddings):
"""Fake embeddings functionality for testing."""
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Return simple embeddings."""
return [
[float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(i)] for i in range(len(texts))
]
def embed_query(self, text: str) -> List[float]:
"""Return simple embeddings."""
return [float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(0.0)]
def test_relyt() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = Relyt.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_relyt_with_engine_args() -> None:
engine_args = {"pool_recycle": 3600, "pool_size": 50}
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = Relyt.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
engine_args=engine_args,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_relyt_with_metadatas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Relyt.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
def test_relyt_with_metadatas_with_scores() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Relyt.from_texts(
texts=texts,
collection_name="test_collection",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score("foo", k=1)
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
def test_relyt_with_filter_match() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Relyt.from_texts(
texts=texts,
collection_name="test_collection_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "0"})
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
def test_relyt_with_filter_distant_match() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Relyt.from_texts(
texts=texts,
collection_name="test_collection_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "2"})
print(output) # noqa: T201
assert output == [(Document(page_content="baz", metadata={"page": "2"}), 4.0)]
def test_relyt_with_filter_no_match() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Relyt.from_texts(
texts=texts,
collection_name="test_collection_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "5"})
assert output == []
def test_relyt_delete() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
ids = ["fooid", "barid", "bazid"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Relyt.from_texts(
texts=texts,
collection_name="test_collection_delete",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
ids=ids,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "2"})
print(output) # noqa: T201
assert output == [(Document(page_content="baz", metadata={"page": "2"}), 4.0)]
docsearch.delete(ids=ids)
output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "2"})
assert output == []
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_awadb.py | """Test AwaDB functionality."""
from langchain_core.documents import Document
from langchain_community.vectorstores import AwaDB
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
def test_awadb() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = AwaDB.from_texts(
table_name="test_awadb", texts=texts, embedding=FakeEmbeddings()
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_awadb_with_metadatas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = AwaDB.from_texts(
table_name="test_awadb",
texts=texts,
embedding=FakeEmbeddings(),
metadatas=metadatas,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": 0})]
def test_awadb_with_metadatas_with_scores() -> None:
"""Test end to end construction and scored search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = AwaDB.from_texts(
table_name="test_awadb",
texts=texts,
embedding=FakeEmbeddings(),
metadatas=metadatas,
)
output = docsearch.similarity_search_with_score("foo", k=1)
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
def test_awadb_add_texts() -> None:
"""Test end to end adding of texts."""
# Create initial doc store.
texts = ["foo", "bar", "baz"]
docsearch = AwaDB.from_texts(
table_name="test_awadb", texts=texts, embedding=FakeEmbeddings()
)
# Test adding a similar document as before.
docsearch.add_texts(["foo"])
output = docsearch.similarity_search("foo", k=2)
assert output == [Document(page_content="foo"), Document(page_content="foo")]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_chroma.py | """Test Chroma functionality."""
import uuid
import pytest
import requests
from langchain_core.documents import Document
from langchain_community.embeddings import FakeEmbeddings as Fak
from langchain_community.vectorstores import Chroma
from tests.integration_tests.vectorstores.fake_embeddings import (
ConsistentFakeEmbeddings,
FakeEmbeddings,
)
def test_chroma() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = Chroma.from_texts(
collection_name="test_collection", texts=texts, embedding=FakeEmbeddings()
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
assert len(docsearch) == 3
async def test_chroma_async() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = Chroma.from_texts(
collection_name="test_collection", texts=texts, embedding=FakeEmbeddings()
)
output = await docsearch.asimilarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_chroma_with_metadatas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Chroma.from_texts(
collection_name="test_collection",
texts=texts,
embedding=FakeEmbeddings(),
metadatas=metadatas,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
def test_chroma_with_metadatas_with_scores() -> None:
"""Test end to end construction and scored search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Chroma.from_texts(
collection_name="test_collection",
texts=texts,
embedding=FakeEmbeddings(),
metadatas=metadatas,
)
output = docsearch.similarity_search_with_score("foo", k=1)
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
def test_chroma_with_metadatas_with_scores_using_vector() -> None:
"""Test end to end construction and scored search, using embedding vector."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
embeddings = FakeEmbeddings()
docsearch = Chroma.from_texts(
collection_name="test_collection",
texts=texts,
embedding=embeddings,
metadatas=metadatas,
)
embedded_query = embeddings.embed_query("foo")
output = docsearch.similarity_search_by_vector_with_relevance_scores(
embedding=embedded_query, k=1
)
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
def test_chroma_search_filter() -> None:
"""Test end to end construction and search with metadata filtering."""
texts = ["far", "bar", "baz"]
metadatas = [{"first_letter": "{}".format(text[0])} for text in texts]
docsearch = Chroma.from_texts(
collection_name="test_collection",
texts=texts,
embedding=FakeEmbeddings(),
metadatas=metadatas,
)
output = docsearch.similarity_search("far", k=1, filter={"first_letter": "f"})
assert output == [Document(page_content="far", metadata={"first_letter": "f"})]
output = docsearch.similarity_search("far", k=1, filter={"first_letter": "b"})
assert output == [Document(page_content="bar", metadata={"first_letter": "b"})]
def test_chroma_search_filter_with_scores() -> None:
"""Test end to end construction and scored search with metadata filtering."""
texts = ["far", "bar", "baz"]
metadatas = [{"first_letter": "{}".format(text[0])} for text in texts]
docsearch = Chroma.from_texts(
collection_name="test_collection",
texts=texts,
embedding=FakeEmbeddings(),
metadatas=metadatas,
)
output = docsearch.similarity_search_with_score(
"far", k=1, filter={"first_letter": "f"}
)
assert output == [
(Document(page_content="far", metadata={"first_letter": "f"}), 0.0)
]
output = docsearch.similarity_search_with_score(
"far", k=1, filter={"first_letter": "b"}
)
assert output == [
(Document(page_content="bar", metadata={"first_letter": "b"}), 1.0)
]
def test_chroma_with_persistence() -> None:
"""Test end to end construction and search, with persistence."""
chroma_persist_dir = "./tests/persist_dir"
collection_name = "test_collection"
texts = ["foo", "bar", "baz"]
docsearch = Chroma.from_texts(
collection_name=collection_name,
texts=texts,
embedding=FakeEmbeddings(),
persist_directory=chroma_persist_dir,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
# Get a new VectorStore from the persisted directory
docsearch = Chroma(
collection_name=collection_name,
embedding_function=FakeEmbeddings(),
persist_directory=chroma_persist_dir,
)
output = docsearch.similarity_search("foo", k=1)
# Clean up
docsearch.delete_collection()
# Persist doesn't need to be called again
# Data will be automatically persisted on object deletion
# Or on program exit
def test_chroma_mmr() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = Chroma.from_texts(
collection_name="test_collection", texts=texts, embedding=FakeEmbeddings()
)
output = docsearch.max_marginal_relevance_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_chroma_mmr_by_vector() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
embeddings = FakeEmbeddings()
docsearch = Chroma.from_texts(
collection_name="test_collection", texts=texts, embedding=embeddings
)
embedded_query = embeddings.embed_query("foo")
output = docsearch.max_marginal_relevance_search_by_vector(embedded_query, k=1)
assert output == [Document(page_content="foo")]
def test_chroma_with_include_parameter() -> None:
"""Test end to end construction and include parameter."""
texts = ["foo", "bar", "baz"]
docsearch = Chroma.from_texts(
collection_name="test_collection", texts=texts, embedding=FakeEmbeddings()
)
output = docsearch.get(include=["embeddings"])
assert output["embeddings"] is not None
output = docsearch.get()
assert output["embeddings"] is None
def test_chroma_update_document() -> None:
"""Test the update_document function in the Chroma class."""
# Make a consistent embedding
embedding = ConsistentFakeEmbeddings()
# Initial document content and id
initial_content = "foo"
document_id = "doc1"
# Create an instance of Document with initial content and metadata
original_doc = Document(page_content=initial_content, metadata={"page": "0"})
# Initialize a Chroma instance with the original document
docsearch = Chroma.from_documents(
collection_name="test_collection",
documents=[original_doc],
embedding=embedding,
ids=[document_id],
)
old_embedding = docsearch._collection.peek()["embeddings"][ # type: ignore[index]
docsearch._collection.peek()["ids"].index(document_id)
]
# Define updated content for the document
updated_content = "updated foo"
# Create a new Document instance with the updated content and the same id
updated_doc = Document(page_content=updated_content, metadata={"page": "0"})
# Update the document in the Chroma instance
docsearch.update_document(document_id=document_id, document=updated_doc)
# Perform a similarity search with the updated content
output = docsearch.similarity_search(updated_content, k=1)
# Assert that the updated document is returned by the search
assert output == [Document(page_content=updated_content, metadata={"page": "0"})]
# Assert that the new embedding is correct
new_embedding = docsearch._collection.peek()["embeddings"][ # type: ignore[index]
docsearch._collection.peek()["ids"].index(document_id)
]
assert new_embedding == embedding.embed_documents([updated_content])[0]
assert new_embedding != old_embedding
def test_chroma_with_relevance_score() -> None:
"""Test to make sure the relevance score is scaled to 0-1."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Chroma.from_texts(
collection_name="test_collection",
texts=texts,
embedding=FakeEmbeddings(),
metadatas=metadatas,
collection_metadata={"hnsw:space": "l2"},
)
output = docsearch.similarity_search_with_relevance_scores("foo", k=3)
assert output == [
(Document(page_content="foo", metadata={"page": "0"}), 1.0),
(Document(page_content="bar", metadata={"page": "1"}), 0.8),
(Document(page_content="baz", metadata={"page": "2"}), 0.5),
]
def test_chroma_with_relevance_score_custom_normalization_fn() -> None:
"""Test searching with relevance score and custom normalization function."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Chroma.from_texts(
collection_name="test_collection",
texts=texts,
embedding=FakeEmbeddings(),
metadatas=metadatas,
relevance_score_fn=lambda d: d * 0,
collection_metadata={"hnsw:space": "l2"},
)
output = docsearch.similarity_search_with_relevance_scores("foo", k=3)
assert output == [
(Document(page_content="foo", metadata={"page": "0"}), -0.0),
(Document(page_content="bar", metadata={"page": "1"}), -0.0),
(Document(page_content="baz", metadata={"page": "2"}), -0.0),
]
def test_init_from_client() -> None:
import chromadb
client = chromadb.Client(chromadb.config.Settings())
Chroma(client=client)
def test_init_from_client_settings() -> None:
import chromadb
client_settings = chromadb.config.Settings()
Chroma(client_settings=client_settings)
def test_chroma_add_documents_no_metadata() -> None:
db = Chroma(embedding_function=FakeEmbeddings())
db.add_documents([Document(page_content="foo")])
def test_chroma_add_documents_mixed_metadata() -> None:
db = Chroma(embedding_function=FakeEmbeddings())
docs = [
Document(page_content="foo"),
Document(page_content="bar", metadata={"baz": 1}),
]
ids = ["0", "1"]
actual_ids = db.add_documents(docs, ids=ids)
assert actual_ids == ids
search = db.similarity_search("foo bar")
assert sorted(search, key=lambda d: d.page_content) == sorted(
docs, key=lambda d: d.page_content
)
def is_api_accessible(url: str) -> bool:
try:
response = requests.get(url)
return response.status_code == 200
except Exception:
return False
def batch_support_chroma_version() -> bool:
try:
import chromadb
except Exception:
return False
major, minor, patch = chromadb.__version__.split(".")
if int(major) == 0 and int(minor) >= 4 and int(patch) >= 10:
return True
return False
@pytest.mark.requires("chromadb")
@pytest.mark.skipif(
not is_api_accessible("http://localhost:8000/api/v1/heartbeat"),
reason="API not accessible",
)
@pytest.mark.skipif(
not batch_support_chroma_version(),
reason="ChromaDB version does not support batching",
)
def test_chroma_large_batch() -> None:
import chromadb
client = chromadb.HttpClient()
embedding_function = Fak(size=255)
col = client.get_or_create_collection(
"my_collection",
embedding_function=embedding_function.embed_documents, # type: ignore
)
docs = ["This is a test document"] * (client.max_batch_size + 100) # type: ignore[attr-defined]
Chroma.from_texts(
client=client,
collection_name=col.name,
texts=docs,
embedding=embedding_function,
ids=[str(uuid.uuid4()) for _ in range(len(docs))],
)
@pytest.mark.requires("chromadb")
@pytest.mark.skipif(
not is_api_accessible("http://localhost:8000/api/v1/heartbeat"),
reason="API not accessible",
)
@pytest.mark.skipif(
not batch_support_chroma_version(),
reason="ChromaDB version does not support batching",
)
def test_chroma_large_batch_update() -> None:
import chromadb
client = chromadb.HttpClient()
embedding_function = Fak(size=255)
col = client.get_or_create_collection(
"my_collection",
embedding_function=embedding_function.embed_documents, # type: ignore
)
docs = ["This is a test document"] * (client.max_batch_size + 100) # type: ignore[attr-defined]
ids = [str(uuid.uuid4()) for _ in range(len(docs))]
db = Chroma.from_texts(
client=client,
collection_name=col.name,
texts=docs,
embedding=embedding_function,
ids=ids,
)
new_docs = [
Document(
page_content="This is a new test document", metadata={"doc_id": f"{i}"}
)
for i in range(len(docs) - 10)
]
new_ids = [_id for _id in ids[: len(new_docs)]]
db.update_documents(ids=new_ids, documents=new_docs)
@pytest.mark.requires("chromadb")
@pytest.mark.skipif(
not is_api_accessible("http://localhost:8000/api/v1/heartbeat"),
reason="API not accessible",
)
@pytest.mark.skipif(
batch_support_chroma_version(), reason="ChromaDB version does not support batching"
)
def test_chroma_legacy_batching() -> None:
import chromadb
client = chromadb.HttpClient()
embedding_function = Fak(size=255)
col = client.get_or_create_collection(
"my_collection",
embedding_function=embedding_function.embed_documents, # type: ignore
)
docs = ["This is a test document"] * 100
Chroma.from_texts(
client=client,
collection_name=col.name,
texts=docs,
embedding=embedding_function,
ids=[str(uuid.uuid4()) for _ in range(len(docs))],
)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_bagel.py | from langchain_core.documents import Document
from langchain_community.vectorstores import Bagel
from tests.integration_tests.vectorstores.fake_embeddings import (
FakeEmbeddings,
)
def test_similarity_search() -> None:
"""Test similarity search"""
from bagel.config import Settings
setting = Settings(
bagel_api_impl="rest",
bagel_server_host="api.bageldb.ai",
)
bagel = Bagel(client_settings=setting)
bagel.add_texts(texts=["hello bagel", "hello langchain"])
result = bagel.similarity_search(query="bagel", k=1)
assert result == [Document(page_content="hello bagel")]
bagel.delete_cluster()
def test_bagel() -> None:
"""Test from_texts"""
texts = ["hello bagel", "hello langchain"]
txt_search = Bagel.from_texts(cluster_name="testing", texts=texts)
output = txt_search.similarity_search("hello bagel", k=1)
assert output == [Document(page_content="hello bagel")]
txt_search.delete_cluster()
def test_with_metadatas() -> None:
"""Test end to end construction and search."""
texts = ["hello bagel", "hello langchain"]
metadatas = [{"metadata": str(i)} for i in range(len(texts))]
txt_search = Bagel.from_texts(
cluster_name="testing",
texts=texts,
metadatas=metadatas,
)
output = txt_search.similarity_search("hello bagel", k=1)
assert output == [Document(page_content="hello bagel", metadata={"metadata": "0"})]
txt_search.delete_cluster()
def test_with_metadatas_with_scores() -> None:
"""Test end to end construction and scored search."""
texts = ["hello bagel", "hello langchain"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
txt_search = Bagel.from_texts(
cluster_name="testing", texts=texts, metadatas=metadatas
)
output = txt_search.similarity_search_with_score("hello bagel", k=1)
assert output == [
(Document(page_content="hello bagel", metadata={"page": "0"}), 0.0)
]
txt_search.delete_cluster()
def test_with_metadatas_with_scores_using_vector() -> None:
"""Test end to end construction and scored search, using embedding vector."""
texts = ["hello bagel", "hello langchain"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
embeddings = [[1.1, 2.3, 3.2], [0.3, 0.3, 0.1]]
vector_search = Bagel.from_texts(
cluster_name="testing_vector",
texts=texts,
metadatas=metadatas,
text_embeddings=embeddings,
)
embedded_query = [1.1, 2.3, 3.2]
output = vector_search.similarity_search_by_vector_with_relevance_scores(
query_embeddings=embedded_query, k=1
)
assert output == [
(Document(page_content="hello bagel", metadata={"page": "0"}), 0.0)
]
vector_search.delete_cluster()
def test_with_metadatas_with_scores_using_vector_embe() -> None:
"""Test end to end construction and scored search, using embedding vector."""
texts = ["hello bagel", "hello langchain"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
embedding_function = FakeEmbeddings()
vector_search = Bagel.from_texts(
cluster_name="testing_vector_embedding1",
texts=texts,
metadatas=metadatas,
embedding=embedding_function,
)
embedded_query = embedding_function.embed_query("hello bagel")
output = vector_search.similarity_search_by_vector_with_relevance_scores(
query_embeddings=embedded_query, k=1
)
assert output == [
(Document(page_content="hello bagel", metadata={"page": "0"}), 0.0)
]
vector_search.delete_cluster()
def test_search_filter() -> None:
"""Test end to end construction and search with metadata filtering."""
texts = ["hello bagel", "hello langchain"]
metadatas = [{"first_letter": text[0]} for text in texts]
txt_search = Bagel.from_texts(
cluster_name="testing",
texts=texts,
metadatas=metadatas,
)
output = txt_search.similarity_search("bagel", k=1, where={"first_letter": "h"})
assert output == [
Document(page_content="hello bagel", metadata={"first_letter": "h"})
]
output = txt_search.similarity_search("langchain", k=1, where={"first_letter": "h"})
assert output == [
Document(page_content="hello langchain", metadata={"first_letter": "h"})
]
txt_search.delete_cluster()
def test_search_filter_with_scores() -> None:
texts = ["hello bagel", "this is langchain"]
metadatas = [{"source": "notion"}, {"source": "google"}]
txt_search = Bagel.from_texts(
cluster_name="testing",
texts=texts,
metadatas=metadatas,
)
output = txt_search.similarity_search_with_score(
"hello bagel", k=1, where={"source": "notion"}
)
assert output == [
(Document(page_content="hello bagel", metadata={"source": "notion"}), 0.0)
]
txt_search.delete_cluster()
def test_with_include_parameter() -> None:
"""Test end to end construction and include parameter."""
texts = ["hello bagel", "this is langchain"]
docsearch = Bagel.from_texts(cluster_name="testing", texts=texts)
output = docsearch.get(include=["embeddings"])
assert output["embeddings"] is not None
output = docsearch.get()
assert output["embeddings"] is None
docsearch.delete_cluster()
def test_bagel_update_document() -> None:
"""Test the update_document function in the Bagel class."""
initial_content = "bagel"
document_id = "doc1"
original_doc = Document(page_content=initial_content, metadata={"page": "0"})
docsearch = Bagel.from_documents(
cluster_name="testing_docs",
documents=[original_doc],
ids=[document_id],
)
updated_content = "updated bagel doc"
updated_doc = Document(page_content=updated_content, metadata={"page": "0"})
docsearch.update_document(document_id=document_id, document=updated_doc)
output = docsearch.similarity_search(updated_content, k=1)
assert output == [Document(page_content=updated_content, metadata={"page": "0"})]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_upstash.py | """Test Upstash Vector functionality."""
import os
from time import sleep
from typing import List, Tuple
# to fix the following error in test with vcr and asyncio
#
# RuntimeError: asyncio.run() cannot be called from a running event loop
import pytest
from langchain_core.documents import Document
from langchain_community.vectorstores.upstash import UpstashVectorStore
from tests.integration_tests.vectorstores.fake_embeddings import (
FakeEmbeddings,
)
@pytest.fixture(scope="module")
def vcr_cassette_dir() -> str:
# save under cassettes/test_upstash/{item}.yaml
return os.path.join("cassettes", "test_upstash")
@pytest.fixture(scope="function", autouse=True)
def fixture() -> None:
store = UpstashVectorStore()
embedding_store = UpstashVectorStore(
index_url=os.environ["UPSTASH_VECTOR_URL_EMBEDDING"],
index_token=os.environ["UPSTASH_VECTOR_TOKEN_EMBEDDING"],
)
store.delete(delete_all=True)
embedding_store.delete(delete_all=True)
wait_for_indexing(store)
wait_for_indexing(embedding_store)
def wait_for_indexing(store: UpstashVectorStore) -> None:
while store.info().pending_vector_count != 0:
# Wait for indexing to complete
sleep(0.5)
def check_response_with_score(
result: List[Tuple[Document, float]],
expected: List[Tuple[Document, float]],
) -> None:
"""
check the result of a search with scores with an expected value
scores in result will be rounded by two digits
"""
result = list(map(lambda result: (result[0], round(result[1], 2)), result))
assert result == expected
@pytest.mark.vcr()
def test_upstash_simple_insert() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
store = UpstashVectorStore.from_texts(texts=texts, embedding=FakeEmbeddings())
wait_for_indexing(store)
output = store.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
@pytest.mark.vcr()
async def test_upstash_simple_insert_async() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
store = UpstashVectorStore.from_texts(texts=texts, embedding=FakeEmbeddings())
wait_for_indexing(store)
output = await store.asimilarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
@pytest.mark.vcr()
def test_upstash_with_metadatas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
store = UpstashVectorStore.from_texts(
texts=texts,
embedding=FakeEmbeddings(),
metadatas=metadatas,
)
wait_for_indexing(store)
output = store.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
@pytest.mark.vcr()
async def test_upstash_with_metadatas_async() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
store = UpstashVectorStore.from_texts(
texts=texts,
embedding=FakeEmbeddings(),
metadatas=metadatas,
)
wait_for_indexing(store)
output = await store.asimilarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
@pytest.mark.vcr()
def test_upstash_with_metadatas_with_scores() -> None:
"""Test end to end construction and scored search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
store = UpstashVectorStore.from_texts(
texts=texts,
embedding=FakeEmbeddings(),
metadatas=metadatas,
)
wait_for_indexing(store)
output = store.similarity_search_with_score("foo", k=1)
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 1.0)]
@pytest.mark.vcr()
async def test_upstash_with_metadatas_with_scores_async() -> None:
"""Test end to end construction and scored search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
store = UpstashVectorStore.from_texts(
texts=texts,
embedding=FakeEmbeddings(),
metadatas=metadatas,
)
wait_for_indexing(store)
output = await store.asimilarity_search_with_score("foo", k=1)
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 1.0)]
@pytest.mark.vcr()
def test_upstash_with_metadatas_with_scores_using_vector() -> None:
"""Test end to end construction and scored search, using embedding vector."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
embeddings = FakeEmbeddings()
store = UpstashVectorStore.from_texts(
texts=texts,
embedding=embeddings,
metadatas=metadatas,
)
wait_for_indexing(store)
embedded_query = embeddings.embed_query("foo")
output = store.similarity_search_by_vector_with_score(embedding=embedded_query, k=1)
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 1.0)]
@pytest.mark.vcr()
async def test_upstash_with_metadatas_with_scores_using_vector_async() -> None:
"""Test end to end construction and scored search, using embedding vector."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
embeddings = FakeEmbeddings()
store = UpstashVectorStore.from_texts(
texts=texts,
embedding=embeddings,
metadatas=metadatas,
)
wait_for_indexing(store)
embedded_query = embeddings.embed_query("foo")
output = await store.asimilarity_search_by_vector_with_score(
embedding=embedded_query, k=1
)
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 1.0)]
@pytest.mark.vcr()
def test_upstash_mmr() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
store = UpstashVectorStore.from_texts(texts=texts, embedding=FakeEmbeddings())
wait_for_indexing(store)
output = store.max_marginal_relevance_search("foo", k=1)
assert output == [Document(page_content="foo")]
@pytest.mark.vcr()
async def test_upstash_mmr_async() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
store = UpstashVectorStore.from_texts(texts=texts, embedding=FakeEmbeddings())
wait_for_indexing(store)
output = await store.amax_marginal_relevance_search("foo", k=1)
assert output == [Document(page_content="foo")]
@pytest.mark.vcr()
def test_upstash_mmr_by_vector() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
embeddings = FakeEmbeddings()
store = UpstashVectorStore.from_texts(texts=texts, embedding=embeddings)
wait_for_indexing(store)
embedded_query = embeddings.embed_query("foo")
output = store.max_marginal_relevance_search_by_vector(embedded_query, k=1)
assert output == [Document(page_content="foo")]
@pytest.mark.vcr()
async def test_upstash_mmr_by_vector_async() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
embeddings = FakeEmbeddings()
store = UpstashVectorStore.from_texts(texts=texts, embedding=embeddings)
wait_for_indexing(store)
embedded_query = embeddings.embed_query("foo")
output = await store.amax_marginal_relevance_search_by_vector(embedded_query, k=1)
assert output == [Document(page_content="foo")]
@pytest.mark.vcr()
def test_init_from_index() -> None:
from upstash_vector import Index
index = Index.from_env()
store = UpstashVectorStore(index=index)
assert store.info() is not None
@pytest.mark.vcr()
async def test_init_from_async_index() -> None:
from upstash_vector import AsyncIndex
index = AsyncIndex.from_env()
store = UpstashVectorStore(async_index=index)
assert await store.ainfo() is not None
@pytest.mark.vcr()
def test_init_from_credentials() -> None:
store = UpstashVectorStore(
index_url=os.environ["UPSTASH_VECTOR_REST_URL"],
index_token=os.environ["UPSTASH_VECTOR_REST_TOKEN"],
)
assert store.info() is not None
@pytest.mark.vcr()
async def test_init_from_credentials_async() -> None:
store = UpstashVectorStore(
index_url=os.environ["UPSTASH_VECTOR_REST_URL"],
index_token=os.environ["UPSTASH_VECTOR_REST_TOKEN"],
)
assert await store.ainfo() is not None
@pytest.mark.vcr()
def test_upstash_add_documents_no_metadata() -> None:
store = UpstashVectorStore(embedding=FakeEmbeddings())
store.add_documents([Document(page_content="foo")])
wait_for_indexing(store)
search = store.similarity_search("foo")
assert search == [Document(page_content="foo")]
@pytest.mark.vcr()
def test_upstash_add_documents_mixed_metadata() -> None:
store = UpstashVectorStore(embedding=FakeEmbeddings())
docs = [
Document(page_content="foo"),
Document(page_content="bar", metadata={"baz": 1}),
]
ids = ["0", "1"]
actual_ids = store.add_documents(docs, ids=ids)
wait_for_indexing(store)
assert actual_ids == ids
search = store.similarity_search("foo bar")
assert sorted(search, key=lambda d: d.page_content) == sorted(
docs, key=lambda d: d.page_content
)
@pytest.mark.vcr()
def test_upstash_similarity_search_with_metadata() -> None:
store = UpstashVectorStore(embedding=FakeEmbeddings())
docs = [
Document(page_content="foo"),
Document(page_content="bar", metadata={"waldo": 1}),
Document(page_content="baz", metadata={"waldo": 1}),
Document(page_content="fred", metadata={"waldo": 2}),
]
ids = ["0", "1", "3", "4"]
store.add_documents(docs, ids=ids)
wait_for_indexing(store)
result = store.similarity_search(query="foo", k=5, filter="waldo = 1")
assert result == [
Document(page_content="bar", metadata={"waldo": 1}),
Document(page_content="baz", metadata={"waldo": 1}),
]
search_result = store.similarity_search_with_score(
query="foo", k=5, filter="waldo = 2"
)
check_response_with_score(
search_result, [(Document(page_content="fred", metadata={"waldo": 2}), 0.85)]
)
@pytest.mark.vcr()
async def test_upstash_similarity_search_with_metadata_async() -> None:
store = UpstashVectorStore(embedding=FakeEmbeddings())
docs = [
Document(page_content="foo"),
Document(page_content="bar", metadata={"waldo": 1}),
Document(page_content="baz", metadata={"waldo": 1}),
Document(page_content="fred", metadata={"waldo": 2}),
]
ids = ["0", "1", "3", "4"]
store.add_documents(docs, ids=ids)
wait_for_indexing(store)
result = await store.asimilarity_search(query="foo", k=5, filter="waldo = 1")
assert result == [
Document(page_content="bar", metadata={"waldo": 1}),
Document(page_content="baz", metadata={"waldo": 1}),
]
search_result = await store.asimilarity_search_with_score(
query="foo", k=5, filter="waldo = 2"
)
check_response_with_score(
search_result, [(Document(page_content="fred", metadata={"waldo": 2}), 0.85)]
)
@pytest.mark.vcr()
def test_upstash_similarity_search_by_vector_with_metadata() -> None:
store = UpstashVectorStore(embedding=FakeEmbeddings())
docs = [
Document(page_content="foo"),
Document(page_content="bar", metadata={"waldo": 1}),
Document(page_content="baz", metadata={"waldo": 1}),
Document(page_content="fred", metadata={"waldo": 2}),
]
ids = ["0", "1", "3", "4"]
store.add_documents(docs, ids=ids)
wait_for_indexing(store)
result = store.similarity_search_by_vector_with_score(
embedding=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
k=5,
filter="waldo = 1",
)
check_response_with_score(
result,
[
(Document(page_content="bar", metadata={"waldo": 1}), 1.0),
(Document(page_content="baz", metadata={"waldo": 1}), 0.98),
],
)
@pytest.mark.vcr()
async def test_upstash_similarity_search_by_vector_with_metadata_async() -> None:
store = UpstashVectorStore(embedding=FakeEmbeddings())
docs = [
Document(page_content="foo"),
Document(page_content="bar", metadata={"waldo": 1}),
Document(page_content="baz", metadata={"waldo": 1}),
Document(page_content="fred", metadata={"waldo": 2}),
]
ids = ["0", "1", "3", "4"]
store.add_documents(docs, ids=ids)
wait_for_indexing(store)
result = await store.asimilarity_search_by_vector_with_score(
embedding=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
k=5,
filter="waldo = 1",
)
check_response_with_score(
result,
[
(Document(page_content="bar", metadata={"waldo": 1}), 1.0),
(Document(page_content="baz", metadata={"waldo": 1}), 0.98),
],
)
@pytest.mark.vcr()
def test_upstash_max_marginal_relevance_search_with_metadata() -> None:
store = UpstashVectorStore(embedding=FakeEmbeddings())
docs = [
Document(page_content="foo"),
Document(page_content="bar", metadata={"waldo": 1}),
Document(page_content="baz", metadata={"waldo": 1}),
Document(page_content="fred", metadata={"waldo": 2}),
]
ids = ["0", "1", "3", "4"]
store.add_documents(docs, ids=ids)
wait_for_indexing(store)
result = store.max_marginal_relevance_search(query="foo", k=3, filter="waldo = 1")
assert result == [
Document(page_content="bar", metadata={"waldo": 1}),
Document(page_content="baz", metadata={"waldo": 1}),
]
@pytest.mark.vcr()
async def test_upstash_max_marginal_relevance_search_with_metadata_async() -> None:
store = UpstashVectorStore(embedding=FakeEmbeddings())
docs = [
Document(page_content="foo"),
Document(page_content="bar", metadata={"waldo": 1}),
Document(page_content="baz", metadata={"waldo": 1}),
Document(page_content="fred", metadata={"waldo": 2}),
]
ids = ["0", "1", "3", "4"]
store.add_documents(docs, ids=ids)
wait_for_indexing(store)
result = await store.amax_marginal_relevance_search(
query="foo", k=3, filter="waldo = 1"
)
assert result == [
Document(page_content="bar", metadata={"waldo": 1}),
Document(page_content="baz", metadata={"waldo": 1}),
]
@pytest.mark.vcr()
def test_embeddings_configurations() -> None:
"""
test the behavior of the vector store for different `embeddings` parameter
values
"""
# case 1: use FakeEmbeddings, a subclass of Embeddings
store = UpstashVectorStore(embedding=FakeEmbeddings())
query_embedding = store._embed_query("query")
assert query_embedding == [1, 1, 1, 1, 1, 1, 1, 1, 1, 0]
document_embedding = store._embed_documents(["doc1", "doc2"])
assert document_embedding == [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
]
# case 2: pass False as embedding
store = UpstashVectorStore(embedding=False)
with pytest.raises(ValueError):
query_embedding = store._embed_query("query")
with pytest.raises(ValueError):
document_embedding = store._embed_documents(["doc1", "doc2"])
# case 3: pass True as embedding
# Upstash embeddings will be used
store = UpstashVectorStore(
index_url=os.environ["UPSTASH_VECTOR_URL_EMBEDDING"],
index_token=os.environ["UPSTASH_VECTOR_TOKEN_EMBEDDING"],
embedding=True,
)
query_embedding = store._embed_query("query")
assert query_embedding == "query"
document_embedding = store._embed_documents(["doc1", "doc2"])
assert document_embedding == ["doc1", "doc2"]
@pytest.mark.vcr()
def test_embedding_index() -> None:
store = UpstashVectorStore(
index_url=os.environ["UPSTASH_VECTOR_URL_EMBEDDING"],
index_token=os.environ["UPSTASH_VECTOR_TOKEN_EMBEDDING"],
embedding=True,
)
# add documents
store.add_documents(
[
Document(page_content="penguin", metadata={"topic": "bird"}),
Document(page_content="albatros", metadata={"topic": "bird"}),
Document(page_content="beethoven", metadata={"topic": "composer"}),
Document(page_content="rachmaninoff", metadata={"topic": "composer"}),
]
)
wait_for_indexing(store)
# similarity search
search_result = store.similarity_search_with_score(query="eagle", k=2)
check_response_with_score(
search_result,
[
(Document(page_content="penguin", metadata={"topic": "bird"}), 0.82),
(Document(page_content="albatros", metadata={"topic": "bird"}), 0.78),
],
)
# similarity search with relevance score
search_result = store.similarity_search_with_relevance_scores(query="mozart", k=2)
check_response_with_score(
search_result,
[
(Document(page_content="beethoven", metadata={"topic": "composer"}), 0.88),
(
Document(page_content="rachmaninoff", metadata={"topic": "composer"}),
0.84,
),
],
)
@pytest.mark.vcr()
async def test_embedding_index_async() -> None:
store = UpstashVectorStore(
index_url=os.environ["UPSTASH_VECTOR_URL_EMBEDDING"],
index_token=os.environ["UPSTASH_VECTOR_TOKEN_EMBEDDING"],
embedding=True,
)
# add documents
await store.aadd_documents(
[
Document(page_content="penguin", metadata={"topic": "bird"}),
Document(page_content="albatros", metadata={"topic": "bird"}),
Document(page_content="beethoven", metadata={"topic": "composer"}),
Document(page_content="rachmaninoff", metadata={"topic": "composer"}),
]
)
wait_for_indexing(store)
# similarity search
search_result = await store.asimilarity_search_with_score(query="eagle", k=2)
check_response_with_score(
search_result,
[
(Document(page_content="penguin", metadata={"topic": "bird"}), 0.82),
(Document(page_content="albatros", metadata={"topic": "bird"}), 0.78),
],
)
# similarity search with relevance score
search_result = await store.asimilarity_search_with_relevance_scores(
query="mozart", k=2
)
check_response_with_score(
search_result,
[
(Document(page_content="beethoven", metadata={"topic": "composer"}), 0.88),
(
Document(page_content="rachmaninoff", metadata={"topic": "composer"}),
0.84,
),
],
)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_usearch.py | """Test USearch functionality."""
import pytest
from langchain_core.documents import Document
from langchain_community.vectorstores.usearch import USearch
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
def test_usearch_from_texts() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = USearch.from_texts(texts, FakeEmbeddings())
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_usearch_from_documents() -> None:
"""Test from_documents constructor."""
texts = ["foo", "bar", "baz"]
docs = [Document(page_content=t, metadata={"a": "b"}) for t in texts]
docsearch = USearch.from_documents(docs, FakeEmbeddings())
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"a": "b"})]
def test_usearch_add_texts() -> None:
"""Test adding a new document"""
texts = ["foo", "bar", "baz"]
docsearch = USearch.from_texts(texts, FakeEmbeddings())
docsearch.add_texts(["foo"])
output = docsearch.similarity_search("foo", k=2)
assert output == [Document(page_content="foo"), Document(page_content="foo")]
def test_ip() -> None:
"""Test inner product distance."""
texts = ["foo", "bar", "baz"]
docsearch = USearch.from_texts(texts, FakeEmbeddings(), metric="ip")
output = docsearch.similarity_search_with_score("far", k=2)
_, score = output[1]
assert score == -8.0
def test_l2() -> None:
"""Test Flat L2 distance."""
texts = ["foo", "bar", "baz"]
docsearch = USearch.from_texts(texts, FakeEmbeddings(), metric="l2_sq")
output = docsearch.similarity_search_with_score("far", k=2)
_, score = output[1]
assert score == 1.0
def test_cos() -> None:
"""Test cosine distance."""
texts = ["foo", "bar", "baz"]
docsearch = USearch.from_texts(texts, FakeEmbeddings(), metric="cos")
output = docsearch.similarity_search_with_score("far", k=2)
_, score = output[1]
assert score == pytest.approx(0.05, abs=0.002)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_cassandra.py | """Test Cassandra functionality."""
import asyncio
import json
import math
import os
import time
from contextlib import asynccontextmanager, contextmanager
from typing import (
Any,
AsyncGenerator,
Generator,
Iterable,
List,
Optional,
Tuple,
Union,
)
import pytest
from langchain_core.documents import Document
from langchain_community.vectorstores import Cassandra
from tests.integration_tests.vectorstores.fake_embeddings import (
AngularTwoDimensionalEmbeddings,
ConsistentFakeEmbeddings,
Embeddings,
)
TEST_KEYSPACE = "vector_test_keyspace"
# similarity threshold definitions
EUCLIDEAN_MIN_SIM_UNIT_VECTORS = 0.2
MATCH_EPSILON = 0.0001
def _strip_docs(documents: List[Document]) -> List[Document]:
return [_strip_doc(doc) for doc in documents]
def _strip_doc(document: Document) -> Document:
return Document(
page_content=document.page_content,
metadata=document.metadata,
)
class ParserEmbeddings(Embeddings):
"""Parse input texts: if they are json for a List[float], fine.
Otherwise, return all zeros and call it a day.
"""
def __init__(self, dimension: int) -> None:
self.dimension = dimension
def embed_documents(self, texts: list[str]) -> list[list[float]]:
return [self.embed_query(txt) for txt in texts]
def embed_query(self, text: str) -> list[float]:
try:
vals = json.loads(text)
except json.JSONDecodeError:
return [0.0] * self.dimension
else:
assert len(vals) == self.dimension
return vals
@pytest.fixture
def embedding_d2() -> Embeddings:
return ParserEmbeddings(dimension=2)
@pytest.fixture
def metadata_documents() -> list[Document]:
"""Documents for metadata and id tests"""
return [
Document(
id="q",
page_content="[1,2]",
metadata={"ord": str(ord("q")), "group": "consonant", "letter": "q"},
),
Document(
id="w",
page_content="[3,4]",
metadata={"ord": str(ord("w")), "group": "consonant", "letter": "w"},
),
Document(
id="r",
page_content="[5,6]",
metadata={"ord": str(ord("r")), "group": "consonant", "letter": "r"},
),
Document(
id="e",
page_content="[-1,2]",
metadata={"ord": str(ord("e")), "group": "vowel", "letter": "e"},
),
Document(
id="i",
page_content="[-3,4]",
metadata={"ord": str(ord("i")), "group": "vowel", "letter": "i"},
),
Document(
id="o",
page_content="[-5,6]",
metadata={"ord": str(ord("o")), "group": "vowel", "letter": "o"},
),
]
class CassandraSession:
table_name: str
session: Any
def __init__(self, table_name: str, session: Any):
self.table_name = table_name
self.session = session
@contextmanager
def get_cassandra_session(
table_name: str, drop: bool = True
) -> Generator[CassandraSession, None, None]:
"""Initialize the Cassandra cluster and session"""
from cassandra.cluster import Cluster
if "CASSANDRA_CONTACT_POINTS" in os.environ:
contact_points = [
cp.strip()
for cp in os.environ["CASSANDRA_CONTACT_POINTS"].split(",")
if cp.strip()
]
else:
contact_points = None
cluster = Cluster(contact_points)
session = cluster.connect()
try:
session.execute(
(
f"CREATE KEYSPACE IF NOT EXISTS {TEST_KEYSPACE}"
" WITH replication = "
"{'class': 'SimpleStrategy', 'replication_factor': 1}"
)
)
if drop:
session.execute(f"DROP TABLE IF EXISTS {TEST_KEYSPACE}.{table_name}")
# Yield the session for usage
yield CassandraSession(table_name=table_name, session=session)
finally:
# Ensure proper shutdown/cleanup of resources
session.shutdown()
cluster.shutdown()
@pytest.fixture
def cassandra_session(
request: pytest.FixtureRequest,
) -> Generator[CassandraSession, None, None]:
request_param = getattr(request, "param", {})
table_name = request_param.get("table_name", "vector_test_table")
drop = request_param.get("drop", True)
with get_cassandra_session(table_name, drop) as session:
yield session
@contextmanager
def vector_store_from_texts(
texts: List[str],
metadatas: Optional[List[dict]] = None,
embedding: Optional[Embeddings] = None,
drop: bool = True,
metadata_indexing: Union[Tuple[str, Iterable[str]], str] = "all",
table_name: str = "vector_test_table",
) -> Generator[Cassandra, None, None]:
if embedding is None:
embedding = ConsistentFakeEmbeddings()
with get_cassandra_session(table_name=table_name, drop=drop) as session:
yield Cassandra.from_texts(
texts,
embedding=embedding,
metadatas=metadatas,
session=session.session,
keyspace=TEST_KEYSPACE,
table_name=session.table_name,
metadata_indexing=metadata_indexing,
)
@asynccontextmanager
async def vector_store_from_texts_async(
texts: List[str],
metadatas: Optional[List[dict]] = None,
embedding: Optional[Embeddings] = None,
drop: bool = True,
metadata_indexing: Union[Tuple[str, Iterable[str]], str] = "all",
table_name: str = "vector_test_table",
) -> AsyncGenerator[Cassandra, None]:
if embedding is None:
embedding = ConsistentFakeEmbeddings()
with get_cassandra_session(table_name=table_name, drop=drop) as session:
yield await Cassandra.afrom_texts(
texts,
embedding=embedding,
metadatas=metadatas,
session=session.session,
keyspace=TEST_KEYSPACE,
table_name=session.table_name,
metadata_indexing=metadata_indexing,
)
@pytest.fixture(scope="function")
def vector_store_d2(
embedding_d2: Embeddings,
table_name: str = "vector_test_table_d2",
) -> Generator[Cassandra, None, None]:
with get_cassandra_session(table_name=table_name) as session:
yield Cassandra(
embedding=embedding_d2,
session=session.session,
keyspace=TEST_KEYSPACE,
table_name=session.table_name,
)
async def test_cassandra() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
with vector_store_from_texts(texts) as vstore:
output = vstore.similarity_search("foo", k=1)
assert _strip_docs(output) == _strip_docs([Document(page_content="foo")])
output = await vstore.asimilarity_search("foo", k=1)
assert _strip_docs(output) == _strip_docs([Document(page_content="foo")])
async def test_cassandra_with_score() -> None:
"""Test end to end construction and search with scores and IDs."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
with vector_store_from_texts(texts, metadatas=metadatas) as vstore:
expected_docs = [
Document(page_content="foo", metadata={"page": "0.0"}),
Document(page_content="bar", metadata={"page": "1.0"}),
Document(page_content="baz", metadata={"page": "2.0"}),
]
output = vstore.similarity_search_with_score("foo", k=3)
docs = [o[0] for o in output]
scores = [o[1] for o in output]
assert _strip_docs(docs) == _strip_docs(expected_docs)
assert scores[0] > scores[1] > scores[2]
output = await vstore.asimilarity_search_with_score("foo", k=3)
docs = [o[0] for o in output]
scores = [o[1] for o in output]
assert _strip_docs(docs) == _strip_docs(expected_docs)
assert scores[0] > scores[1] > scores[2]
async def test_cassandra_max_marginal_relevance_search() -> None:
"""
Test end to end construction and MMR search.
The embedding function used here ensures `texts` become
the following vectors on a circle (numbered v0 through v3):
______ v2
/ \
/ | v1
v3 | . | query
| / v0
|______/ (N.B. very crude drawing)
With fetch_k==3 and k==2, when query is at (1, ),
one expects that v2 and v0 are returned (in some order).
"""
texts = ["-0.124", "+0.127", "+0.25", "+1.0"]
metadatas = [{"page": i} for i in range(len(texts))]
with vector_store_from_texts(
texts,
metadatas=metadatas,
embedding=AngularTwoDimensionalEmbeddings(),
) as vstore:
expected_set = {
("+0.25", "2.0"),
("-0.124", "0.0"),
}
output = vstore.max_marginal_relevance_search("0.0", k=2, fetch_k=3)
output_set = {
(mmr_doc.page_content, mmr_doc.metadata["page"]) for mmr_doc in output
}
assert output_set == expected_set
output = await vstore.amax_marginal_relevance_search("0.0", k=2, fetch_k=3)
output_set = {
(mmr_doc.page_content, mmr_doc.metadata["page"]) for mmr_doc in output
}
assert output_set == expected_set
def test_cassandra_add_texts() -> None:
"""Test end to end construction with further insertions."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
with vector_store_from_texts(texts, metadatas=metadatas) as vstore:
texts2 = ["foo2", "bar2", "baz2"]
metadatas2 = [{"page": i + 3} for i in range(len(texts))]
vstore.add_texts(texts2, metadatas2)
output = vstore.similarity_search("foo", k=10)
assert len(output) == 6
async def test_cassandra_add_texts_async() -> None:
"""Test end to end construction with further insertions."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
async with vector_store_from_texts_async(texts, metadatas=metadatas) as vstore:
texts2 = ["foo2", "bar2", "baz2"]
metadatas2 = [{"page": i + 3} for i in range(len(texts))]
await vstore.aadd_texts(texts2, metadatas2)
output = await vstore.asimilarity_search("foo", k=10)
assert len(output) == 6
def test_cassandra_no_drop() -> None:
"""Test end to end construction and re-opening the same index."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
with vector_store_from_texts(texts, metadatas=metadatas) as vstore:
output = vstore.similarity_search("foo", k=10)
assert len(output) == 3
texts2 = ["foo2", "bar2", "baz2"]
with vector_store_from_texts(texts2, metadatas=metadatas, drop=False) as vstore:
output = vstore.similarity_search("foo", k=10)
assert len(output) == 6
async def test_cassandra_no_drop_async() -> None:
"""Test end to end construction and re-opening the same index."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
async with vector_store_from_texts_async(texts, metadatas=metadatas) as vstore:
output = await vstore.asimilarity_search("foo", k=10)
assert len(output) == 3
texts2 = ["foo2", "bar2", "baz2"]
async with vector_store_from_texts_async(
texts2, metadatas=metadatas, drop=False
) as vstore:
output = await vstore.asimilarity_search("foo", k=10)
assert len(output) == 6
def test_cassandra_delete() -> None:
"""Test delete methods from vector store."""
texts = ["foo", "bar", "baz", "gni"]
metadatas = [{"page": i, "mod2": i % 2} for i in range(len(texts))]
with vector_store_from_texts([], metadatas=metadatas) as vstore:
ids = vstore.add_texts(texts, metadatas)
output = vstore.similarity_search("foo", k=10)
assert len(output) == 4
vstore.delete_by_document_id(ids[0])
output = vstore.similarity_search("foo", k=10)
assert len(output) == 3
vstore.delete(ids[1:3])
output = vstore.similarity_search("foo", k=10)
assert len(output) == 1
vstore.delete(["not-existing"])
output = vstore.similarity_search("foo", k=10)
assert len(output) == 1
vstore.clear()
time.sleep(0.3)
output = vstore.similarity_search("foo", k=10)
assert len(output) == 0
vstore.add_texts(texts, metadatas)
num_deleted = vstore.delete_by_metadata_filter({"mod2": 0}, batch_size=1)
assert num_deleted == 2
output = vstore.similarity_search("foo", k=10)
assert len(output) == 2
vstore.clear()
with pytest.raises(ValueError):
vstore.delete_by_metadata_filter({})
async def test_cassandra_delete_async() -> None:
"""Test delete methods from vector store."""
texts = ["foo", "bar", "baz", "gni"]
metadatas = [{"page": i, "mod2": i % 2} for i in range(len(texts))]
async with vector_store_from_texts_async([], metadatas=metadatas) as vstore:
ids = await vstore.aadd_texts(texts, metadatas)
output = await vstore.asimilarity_search("foo", k=10)
assert len(output) == 4
await vstore.adelete_by_document_id(ids[0])
output = await vstore.asimilarity_search("foo", k=10)
assert len(output) == 3
await vstore.adelete(ids[1:3])
output = await vstore.asimilarity_search("foo", k=10)
assert len(output) == 1
await vstore.adelete(["not-existing"])
output = await vstore.asimilarity_search("foo", k=10)
assert len(output) == 1
await vstore.aclear()
await asyncio.sleep(0.3)
output = vstore.similarity_search("foo", k=10)
assert len(output) == 0
await vstore.aadd_texts(texts, metadatas)
num_deleted = await vstore.adelete_by_metadata_filter({"mod2": 0}, batch_size=1)
assert num_deleted == 2
output = await vstore.asimilarity_search("foo", k=10)
assert len(output) == 2
await vstore.aclear()
with pytest.raises(ValueError):
await vstore.adelete_by_metadata_filter({})
def test_cassandra_metadata_indexing() -> None:
"""Test comparing metadata indexing policies."""
texts = ["foo"]
metadatas = [{"field1": "a", "field2": "b"}]
with vector_store_from_texts(texts, metadatas=metadatas) as vstore_all:
with vector_store_from_texts(
texts,
metadatas=metadatas,
metadata_indexing=("allowlist", ["field1"]),
table_name="vector_test_table_indexing",
embedding=ConsistentFakeEmbeddings(),
) as vstore_f1:
output_all = vstore_all.similarity_search("bar", k=2)
output_f1 = vstore_f1.similarity_search("bar", filter={"field1": "a"}, k=2)
output_f1_no = vstore_f1.similarity_search(
"bar", filter={"field1": "Z"}, k=2
)
assert len(output_all) == 1
assert output_all[0].metadata == metadatas[0]
assert len(output_f1) == 1
assert output_f1[0].metadata == metadatas[0]
assert len(output_f1_no) == 0
with pytest.raises(ValueError):
# "Non-indexed metadata fields cannot be used in queries."
vstore_f1.similarity_search("bar", filter={"field2": "b"}, k=2)
class TestCassandraVectorStore:
@pytest.mark.parametrize(
"page_contents",
[
[
"[1,2]",
"[3,4]",
"[5,6]",
"[7,8]",
"[9,10]",
"[11,12]",
],
],
)
def test_cassandra_vectorstore_from_texts_sync(
self,
*,
cassandra_session: CassandraSession,
embedding_d2: Embeddings,
page_contents: list[str],
) -> None:
"""from_texts methods and the associated warnings."""
v_store = Cassandra.from_texts(
texts=page_contents[0:2],
metadatas=[{"m": 1}, {"m": 3}],
ids=["ft1", "ft3"],
table_name=cassandra_session.table_name,
session=cassandra_session.session,
keyspace=TEST_KEYSPACE,
embedding=embedding_d2,
)
search_results_triples_0 = v_store.similarity_search_with_score_id(
page_contents[1],
k=1,
)
assert len(search_results_triples_0) == 1
res_doc_0, _, res_id_0 = search_results_triples_0[0]
assert res_doc_0.page_content == page_contents[1]
assert res_doc_0.metadata == {"m": "3.0"}
assert res_id_0 == "ft3"
Cassandra.from_texts(
texts=page_contents[2:4],
metadatas=[{"m": 5}, {"m": 7}],
ids=["ft5", "ft7"],
table_name=cassandra_session.table_name,
session=cassandra_session.session,
keyspace=TEST_KEYSPACE,
embedding=embedding_d2,
)
search_results_triples_1 = v_store.similarity_search_with_score_id(
page_contents[3],
k=1,
)
assert len(search_results_triples_1) == 1
res_doc_1, _, res_id_1 = search_results_triples_1[0]
assert res_doc_1.page_content == page_contents[3]
assert res_doc_1.metadata == {"m": "7.0"}
assert res_id_1 == "ft7"
v_store_2 = Cassandra.from_texts(
texts=page_contents[4:6],
metadatas=[{"m": 9}, {"m": 11}],
ids=["ft9", "ft11"],
table_name=cassandra_session.table_name,
session=cassandra_session.session,
keyspace=TEST_KEYSPACE,
embedding=embedding_d2,
)
search_results_triples_2 = v_store_2.similarity_search_with_score_id(
page_contents[5],
k=1,
)
assert len(search_results_triples_2) == 1
res_doc_2, _, res_id_2 = search_results_triples_2[0]
assert res_doc_2.page_content == page_contents[5]
assert res_doc_2.metadata == {"m": "11.0"}
assert res_id_2 == "ft11"
v_store_2.clear()
@pytest.mark.parametrize(
"page_contents",
[
["[1,2]", "[3,4]"],
],
)
def test_cassandra_vectorstore_from_documents_sync(
self,
*,
cassandra_session: CassandraSession,
embedding_d2: Embeddings,
page_contents: list[str],
) -> None:
"""from_documents, esp. the various handling of ID-in-doc vs external."""
pc1, pc2 = page_contents
# no IDs.
v_store = Cassandra.from_documents(
[
Document(page_content=pc1, metadata={"m": 1}),
Document(page_content=pc2, metadata={"m": 3}),
],
table_name=cassandra_session.table_name,
session=cassandra_session.session,
keyspace=TEST_KEYSPACE,
embedding=embedding_d2,
)
hits = v_store.similarity_search(pc2, k=1)
assert len(hits) == 1
assert hits[0].page_content == pc2
assert hits[0].metadata == {"m": "3.0"}
v_store.clear()
# IDs passed separately.
with pytest.warns(DeprecationWarning) as rec_warnings:
v_store_2 = Cassandra.from_documents(
[
Document(page_content=pc1, metadata={"m": 1}),
Document(page_content=pc2, metadata={"m": 3}),
],
ids=["idx1", "idx3"],
table_name=cassandra_session.table_name,
session=cassandra_session.session,
keyspace=TEST_KEYSPACE,
embedding=embedding_d2,
)
f_rec_warnings = [
wrn for wrn in rec_warnings if issubclass(wrn.category, DeprecationWarning)
]
assert len(f_rec_warnings) == 1
hits = v_store_2.similarity_search(pc2, k=1)
assert len(hits) == 1
assert hits[0].page_content == pc2
assert hits[0].metadata == {"m": "3.0"}
assert hits[0].id == "idx3"
v_store_2.clear()
# IDs in documents.
v_store_3 = Cassandra.from_documents(
[
Document(page_content=pc1, metadata={"m": 1}, id="idx1"),
Document(page_content=pc2, metadata={"m": 3}, id="idx3"),
],
table_name=cassandra_session.table_name,
session=cassandra_session.session,
keyspace=TEST_KEYSPACE,
embedding=embedding_d2,
)
hits = v_store_3.similarity_search(pc2, k=1)
assert len(hits) == 1
assert hits[0].page_content == pc2
assert hits[0].metadata == {"m": "3.0"}
assert hits[0].id == "idx3"
v_store_3.clear()
# IDs both in documents and aside.
with pytest.warns(DeprecationWarning) as rec_warnings:
v_store_4 = Cassandra.from_documents(
[
Document(page_content=pc1, metadata={"m": 1}),
Document(page_content=pc2, metadata={"m": 3}, id="idy3"),
],
ids=["idx1", "idx3"],
table_name=cassandra_session.table_name,
session=cassandra_session.session,
keyspace=TEST_KEYSPACE,
embedding=embedding_d2,
)
f_rec_warnings = [
wrn for wrn in rec_warnings if issubclass(wrn.category, DeprecationWarning)
]
hits = v_store_4.similarity_search(pc2, k=1)
assert len(hits) == 1
assert hits[0].page_content == pc2
assert hits[0].metadata == {"m": "3.0"}
assert hits[0].id == "idx3"
v_store_4.clear()
@pytest.mark.parametrize(
"page_contents",
[
[
"[1,2]",
"[3,4]",
"[5,6]",
"[7,8]",
"[9,10]",
"[11,12]",
],
],
)
async def test_cassandra_vectorstore_from_texts_async(
self,
*,
cassandra_session: CassandraSession,
embedding_d2: Embeddings,
page_contents: list[str],
) -> None:
"""from_texts methods and the associated warnings, async version."""
v_store = await Cassandra.afrom_texts(
texts=page_contents[0:2],
metadatas=[{"m": 1}, {"m": 3}],
ids=["ft1", "ft3"],
table_name=cassandra_session.table_name,
session=cassandra_session.session,
keyspace=TEST_KEYSPACE,
embedding=embedding_d2,
)
search_results_triples_0 = await v_store.asimilarity_search_with_score_id(
page_contents[1],
k=1,
)
assert len(search_results_triples_0) == 1
res_doc_0, _, res_id_0 = search_results_triples_0[0]
assert res_doc_0.page_content == page_contents[1]
assert res_doc_0.metadata == {"m": "3.0"}
assert res_id_0 == "ft3"
await Cassandra.afrom_texts(
texts=page_contents[2:4],
metadatas=[{"m": 5}, {"m": 7}],
ids=["ft5", "ft7"],
table_name=cassandra_session.table_name,
session=cassandra_session.session,
keyspace=TEST_KEYSPACE,
embedding=embedding_d2,
)
search_results_triples_1 = await v_store.asimilarity_search_with_score_id(
page_contents[3],
k=1,
)
assert len(search_results_triples_1) == 1
res_doc_1, _, res_id_1 = search_results_triples_1[0]
assert res_doc_1.page_content == page_contents[3]
assert res_doc_1.metadata == {"m": "7.0"}
assert res_id_1 == "ft7"
v_store_2 = await Cassandra.afrom_texts(
texts=page_contents[4:6],
metadatas=[{"m": 9}, {"m": 11}],
ids=["ft9", "ft11"],
table_name=cassandra_session.table_name,
session=cassandra_session.session,
keyspace=TEST_KEYSPACE,
embedding=embedding_d2,
)
search_results_triples_2 = await v_store_2.asimilarity_search_with_score_id(
page_contents[5],
k=1,
)
assert len(search_results_triples_2) == 1
res_doc_2, _, res_id_2 = search_results_triples_2[0]
assert res_doc_2.page_content == page_contents[5]
assert res_doc_2.metadata == {"m": "11.0"}
assert res_id_2 == "ft11"
await v_store_2.aclear()
@pytest.mark.parametrize(
"page_contents",
[
["[1,2]", "[3,4]"],
],
)
async def test_cassandra_vectorstore_from_documents_async(
self,
*,
cassandra_session: CassandraSession,
embedding_d2: Embeddings,
page_contents: list[str],
) -> None:
"""
from_documents, esp. the various handling of ID-in-doc vs external.
Async version.
"""
pc1, pc2 = page_contents
# no IDs.
v_store = await Cassandra.afrom_documents(
[
Document(page_content=pc1, metadata={"m": 1}),
Document(page_content=pc2, metadata={"m": 3}),
],
table_name=cassandra_session.table_name,
session=cassandra_session.session,
keyspace=TEST_KEYSPACE,
embedding=embedding_d2,
)
hits = await v_store.asimilarity_search(pc2, k=1)
assert len(hits) == 1
assert hits[0].page_content == pc2
assert hits[0].metadata == {"m": "3.0"}
await v_store.aclear()
# IDs passed separately.
with pytest.warns(DeprecationWarning) as rec_warnings:
v_store_2 = await Cassandra.afrom_documents(
[
Document(page_content=pc1, metadata={"m": 1}),
Document(page_content=pc2, metadata={"m": 3}),
],
ids=["idx1", "idx3"],
table_name=cassandra_session.table_name,
session=cassandra_session.session,
keyspace=TEST_KEYSPACE,
embedding=embedding_d2,
)
f_rec_warnings = [
wrn for wrn in rec_warnings if issubclass(wrn.category, DeprecationWarning)
]
assert len(f_rec_warnings) == 1
hits = await v_store_2.asimilarity_search(pc2, k=1)
assert len(hits) == 1
assert hits[0].page_content == pc2
assert hits[0].metadata == {"m": "3.0"}
assert hits[0].id == "idx3"
await v_store_2.aclear()
# IDs in documents.
v_store_3 = await Cassandra.afrom_documents(
[
Document(page_content=pc1, metadata={"m": 1}, id="idx1"),
Document(page_content=pc2, metadata={"m": 3}, id="idx3"),
],
table_name=cassandra_session.table_name,
session=cassandra_session.session,
keyspace=TEST_KEYSPACE,
embedding=embedding_d2,
)
hits = await v_store_3.asimilarity_search(pc2, k=1)
assert len(hits) == 1
assert hits[0].page_content == pc2
assert hits[0].metadata == {"m": "3.0"}
assert hits[0].id == "idx3"
await v_store_3.aclear()
# IDs both in documents and aside.
with pytest.warns(DeprecationWarning) as rec_warnings:
v_store_4 = await Cassandra.afrom_documents(
[
Document(page_content=pc1, metadata={"m": 1}),
Document(page_content=pc2, metadata={"m": 3}, id="idy3"),
],
ids=["idx1", "idx3"],
table_name=cassandra_session.table_name,
session=cassandra_session.session,
keyspace=TEST_KEYSPACE,
embedding=embedding_d2,
)
f_rec_warnings = [
wrn for wrn in rec_warnings if issubclass(wrn.category, DeprecationWarning)
]
assert len(f_rec_warnings) == 1
hits = await v_store_4.asimilarity_search(pc2, k=1)
assert len(hits) == 1
assert hits[0].page_content == pc2
assert hits[0].metadata == {"m": "3.0"}
assert hits[0].id == "idx3"
await v_store_4.aclear()
def test_cassandra_vectorstore_crud_sync(
self,
vector_store_d2: Cassandra,
) -> None:
"""Add/delete/update behaviour."""
vstore = vector_store_d2
res0 = vstore.similarity_search("[-1,-1]", k=2)
assert res0 == []
# write and check again
added_ids = vstore.add_texts(
texts=["[1,2]", "[3,4]", "[5,6]"],
metadatas=[
{"k": "a", "ord": 0},
{"k": "b", "ord": 1},
{"k": "c", "ord": 2},
],
ids=["a", "b", "c"],
)
# not requiring ordered match (elsewhere it may be overwriting some)
assert set(added_ids) == {"a", "b", "c"}
res1 = vstore.similarity_search("[-1,-1]", k=5)
assert {doc.page_content for doc in res1} == {"[1,2]", "[3,4]", "[5,6]"}
res2 = vstore.similarity_search("[3,4]", k=1)
assert len(res2) == 1
assert res2[0].page_content == "[3,4]"
assert res2[0].metadata == {"k": "b", "ord": "1.0"}
assert res2[0].id == "b"
# partial overwrite and count total entries
added_ids_1 = vstore.add_texts(
texts=["[5,6]", "[7,8]"],
metadatas=[
{"k": "c_new", "ord": 102},
{"k": "d_new", "ord": 103},
],
ids=["c", "d"],
)
# not requiring ordered match (elsewhere it may be overwriting some)
assert set(added_ids_1) == {"c", "d"}
res2 = vstore.similarity_search("[-1,-1]", k=10)
assert len(res2) == 4
# pick one that was just updated and check its metadata
res3 = vstore.similarity_search_with_score_id(
query="[5,6]", k=1, filter={"k": "c_new"}
)
doc3, _, id3 = res3[0]
assert doc3.page_content == "[5,6]"
assert doc3.metadata == {"k": "c_new", "ord": "102.0"}
assert id3 == "c"
# delete and count again
del1_res = vstore.delete(["b"])
assert del1_res is True
del2_res = vstore.delete(["a", "c", "Z!"])
assert del2_res is True # a non-existing ID was supplied
assert len(vstore.similarity_search("[-1,-1]", k=10)) == 1
# clear store
vstore.clear()
assert vstore.similarity_search("[-1,-1]", k=2) == []
# add_documents with "ids" arg passthrough
vstore.add_documents(
[
Document(page_content="[9,10]", metadata={"k": "v", "ord": 204}),
Document(page_content="[11,12]", metadata={"k": "w", "ord": 205}),
],
ids=["v", "w"],
)
assert len(vstore.similarity_search("[-1,-1]", k=10)) == 2
res4 = vstore.similarity_search("[11,12]", k=1, filter={"k": "w"})
assert res4[0].metadata["ord"] == "205.0"
assert res4[0].id == "w"
# add_texts with "ids" arg passthrough
vstore.add_texts(
texts=["[13,14]", "[15,16]"],
metadatas=[{"k": "r", "ord": 306}, {"k": "s", "ord": 307}],
ids=["r", "s"],
)
assert len(vstore.similarity_search("[-1,-1]", k=10)) == 4
res4 = vstore.similarity_search("[-1,-1]", k=1, filter={"k": "s"})
assert res4[0].metadata["ord"] == "307.0"
assert res4[0].id == "s"
# delete_by_document_id
vstore.delete_by_document_id("s")
assert len(vstore.similarity_search("[-1,-1]", k=10)) == 3
async def test_cassandra_vectorstore_crud_async(
self,
vector_store_d2: Cassandra,
) -> None:
"""Add/delete/update behaviour, async version."""
vstore = vector_store_d2
res0 = await vstore.asimilarity_search("[-1,-1]", k=2)
assert res0 == []
# write and check again
added_ids = await vstore.aadd_texts(
texts=["[1,2]", "[3,4]", "[5,6]"],
metadatas=[
{"k": "a", "ord": 0},
{"k": "b", "ord": 1},
{"k": "c", "ord": 2},
],
ids=["a", "b", "c"],
)
# not requiring ordered match (elsewhere it may be overwriting some)
assert set(added_ids) == {"a", "b", "c"}
res1 = await vstore.asimilarity_search("[-1,-1]", k=5)
assert {doc.page_content for doc in res1} == {"[1,2]", "[3,4]", "[5,6]"}
res2 = await vstore.asimilarity_search("[3,4]", k=1)
assert len(res2) == 1
assert res2[0].page_content == "[3,4]"
assert res2[0].metadata == {"k": "b", "ord": "1.0"}
assert res2[0].id == "b"
# partial overwrite and count total entries
added_ids_1 = await vstore.aadd_texts(
texts=["[5,6]", "[7,8]"],
metadatas=[
{"k": "c_new", "ord": 102},
{"k": "d_new", "ord": 103},
],
ids=["c", "d"],
)
# not requiring ordered match (elsewhere it may be overwriting some)
assert set(added_ids_1) == {"c", "d"}
res2 = await vstore.asimilarity_search("[-1,-1]", k=10)
assert len(res2) == 4
# pick one that was just updated and check its metadata
res3 = await vstore.asimilarity_search_with_score_id(
query="[5,6]", k=1, filter={"k": "c_new"}
)
doc3, _, id3 = res3[0]
assert doc3.page_content == "[5,6]"
assert doc3.metadata == {"k": "c_new", "ord": "102.0"}
assert id3 == "c"
# delete and count again
del1_res = await vstore.adelete(["b"])
assert del1_res is True
del2_res = await vstore.adelete(["a", "c", "Z!"])
assert del2_res is True # a non-existing ID was supplied
assert len(await vstore.asimilarity_search("[-1,-1]", k=10)) == 1
# clear store
await vstore.aclear()
assert await vstore.asimilarity_search("[-1,-1]", k=2) == []
# add_documents with "ids" arg passthrough
await vstore.aadd_documents(
[
Document(page_content="[9,10]", metadata={"k": "v", "ord": 204}),
Document(page_content="[11,12]", metadata={"k": "w", "ord": 205}),
],
ids=["v", "w"],
)
assert len(await vstore.asimilarity_search("[-1,-1]", k=10)) == 2
res4 = await vstore.asimilarity_search("[11,12]", k=1, filter={"k": "w"})
assert res4[0].metadata["ord"] == "205.0"
assert res4[0].id == "w"
# add_texts with "ids" arg passthrough
await vstore.aadd_texts(
texts=["[13,14]", "[15,16]"],
metadatas=[{"k": "r", "ord": 306}, {"k": "s", "ord": 307}],
ids=["r", "s"],
)
assert len(await vstore.asimilarity_search("[-1,-1]", k=10)) == 4
res4 = await vstore.asimilarity_search("[-1,-1]", k=1, filter={"k": "s"})
assert res4[0].metadata["ord"] == "307.0"
assert res4[0].id == "s"
# delete_by_document_id
await vstore.adelete_by_document_id("s")
assert len(await vstore.asimilarity_search("[-1,-1]", k=10)) == 3
def test_cassandra_vectorstore_massive_insert_replace_sync(
self,
vector_store_d2: Cassandra,
) -> None:
"""Testing the insert-many-and-replace-some patterns thoroughly."""
full_size = 300
first_group_size = 150
second_group_slicer = [30, 100, 2]
all_ids = [f"doc_{idx}" for idx in range(full_size)]
all_texts = [f"[0,{idx + 1}]" for idx in range(full_size)]
# massive insertion on empty
group0_ids = all_ids[0:first_group_size]
group0_texts = all_texts[0:first_group_size]
inserted_ids0 = vector_store_d2.add_texts(
texts=group0_texts,
ids=group0_ids,
)
assert set(inserted_ids0) == set(group0_ids)
# massive insertion with many overwrites scattered through
# (we change the text to later check on DB for successful update)
_s, _e, _st = second_group_slicer
group1_ids = all_ids[_s:_e:_st] + all_ids[first_group_size:full_size]
group1_texts = [
txt.upper()
for txt in (all_texts[_s:_e:_st] + all_texts[first_group_size:full_size])
]
inserted_ids1 = vector_store_d2.add_texts(
texts=group1_texts,
ids=group1_ids,
)
assert set(inserted_ids1) == set(group1_ids)
# final read (we want the IDs to do a full check)
expected_text_by_id = {
**dict(zip(group0_ids, group0_texts)),
**dict(zip(group1_ids, group1_texts)),
}
full_results = vector_store_d2.similarity_search_with_score_id_by_vector(
embedding=[1.0, 1.0],
k=full_size,
)
for doc, _, doc_id in full_results:
assert doc.page_content == expected_text_by_id[doc_id]
async def test_cassandra_vectorstore_massive_insert_replace_async(
self,
vector_store_d2: Cassandra,
) -> None:
"""
Testing the insert-many-and-replace-some patterns thoroughly.
Async version.
"""
full_size = 300
first_group_size = 150
second_group_slicer = [30, 100, 2]
all_ids = [f"doc_{idx}" for idx in range(full_size)]
all_texts = [f"[0,{idx + 1}]" for idx in range(full_size)]
all_embeddings = [[0, idx + 1] for idx in range(full_size)]
# massive insertion on empty
group0_ids = all_ids[0:first_group_size]
group0_texts = all_texts[0:first_group_size]
inserted_ids0 = await vector_store_d2.aadd_texts(
texts=group0_texts,
ids=group0_ids,
)
assert set(inserted_ids0) == set(group0_ids)
# massive insertion with many overwrites scattered through
# (we change the text to later check on DB for successful update)
_s, _e, _st = second_group_slicer
group1_ids = all_ids[_s:_e:_st] + all_ids[first_group_size:full_size]
group1_texts = [
txt.upper()
for txt in (all_texts[_s:_e:_st] + all_texts[first_group_size:full_size])
]
inserted_ids1 = await vector_store_d2.aadd_texts(
texts=group1_texts,
ids=group1_ids,
)
assert set(inserted_ids1) == set(group1_ids)
# final read (we want the IDs to do a full check)
expected_text_by_id = dict(zip(all_ids, all_texts))
full_results = await vector_store_d2.asimilarity_search_with_score_id_by_vector(
embedding=[1.0, 1.0],
k=full_size,
)
for doc, _, doc_id in full_results:
assert doc.page_content == expected_text_by_id[doc_id]
expected_embedding_by_id = dict(zip(all_ids, all_embeddings))
full_results_with_embeddings = (
await vector_store_d2.asimilarity_search_with_embedding_id_by_vector(
embedding=[1.0, 1.0],
k=full_size,
)
)
for doc, embedding, doc_id in full_results_with_embeddings:
assert doc.page_content == expected_text_by_id[doc_id]
assert embedding == expected_embedding_by_id[doc_id]
def test_cassandra_vectorstore_delete_by_metadata_sync(
self,
vector_store_d2: Cassandra,
) -> None:
"""Testing delete_by_metadata_filter."""
full_size = 400
# one in ... will be deleted
deletee_ratio = 3
documents = [
Document(
page_content="[1,1]", metadata={"deletee": doc_i % deletee_ratio == 0}
)
for doc_i in range(full_size)
]
num_deletees = len([doc for doc in documents if doc.metadata["deletee"]])
inserted_ids0 = vector_store_d2.add_documents(documents)
assert len(inserted_ids0) == len(documents)
d_result0 = vector_store_d2.delete_by_metadata_filter({"deletee": True})
assert d_result0 == num_deletees
count_on_store0 = len(
vector_store_d2.similarity_search("[1,1]", k=full_size + 1)
)
assert count_on_store0 == full_size - num_deletees
with pytest.raises(ValueError, match="does not accept an empty"):
vector_store_d2.delete_by_metadata_filter({})
count_on_store1 = len(
vector_store_d2.similarity_search("[1,1]", k=full_size + 1)
)
assert count_on_store1 == full_size - num_deletees
async def test_cassandra_vectorstore_delete_by_metadata_async(
self,
vector_store_d2: Cassandra,
) -> None:
"""Testing delete_by_metadata_filter, async version."""
full_size = 400
# one in ... will be deleted
deletee_ratio = 3
documents = [
Document(
page_content="[1,1]", metadata={"deletee": doc_i % deletee_ratio == 0}
)
for doc_i in range(full_size)
]
num_deletees = len([doc for doc in documents if doc.metadata["deletee"]])
inserted_ids0 = await vector_store_d2.aadd_documents(documents)
assert len(inserted_ids0) == len(documents)
d_result0 = await vector_store_d2.adelete_by_metadata_filter({"deletee": True})
assert d_result0 == num_deletees
count_on_store0 = len(
await vector_store_d2.asimilarity_search("[1,1]", k=full_size + 1)
)
assert count_on_store0 == full_size - num_deletees
with pytest.raises(ValueError, match="does not accept an empty"):
await vector_store_d2.adelete_by_metadata_filter({})
count_on_store1 = len(
await vector_store_d2.asimilarity_search("[1,1]", k=full_size + 1)
)
assert count_on_store1 == full_size - num_deletees
def test_cassandra_replace_metadata(self) -> None:
"""Test of replacing metadata."""
N_DOCS = 100
REPLACE_RATIO = 2 # one in ... will have replaced metadata
BATCH_SIZE = 3
with vector_store_from_texts(
texts=[],
metadata_indexing=("allowlist", ["field1", "field2"]),
table_name="vector_test_table_indexing",
) as vstore_f1:
orig_documents = [
Document(
page_content=f"doc_{doc_i}",
id=f"doc_id_{doc_i}",
metadata={"field1": f"f1_{doc_i}", "otherf": "pre"},
)
for doc_i in range(N_DOCS)
]
vstore_f1.add_documents(orig_documents)
ids_to_replace = [
f"doc_id_{doc_i}"
for doc_i in range(N_DOCS)
if doc_i % REPLACE_RATIO == 0
]
# various kinds of replacement at play here:
def _make_new_md(mode: int, doc_id: str) -> dict[str, str]:
if mode == 0:
return {}
elif mode == 1:
return {"field2": f"NEW_{doc_id}"}
elif mode == 2:
return {"field2": f"NEW_{doc_id}", "ofherf2": "post"}
else:
return {"ofherf2": "post"}
ids_to_new_md = {
doc_id: _make_new_md(rep_i % 4, doc_id)
for rep_i, doc_id in enumerate(ids_to_replace)
}
vstore_f1.replace_metadata(ids_to_new_md, batch_size=BATCH_SIZE)
# thorough check
expected_id_to_metadata: dict[str, dict] = {
**{
(document.id or ""): document.metadata
for document in orig_documents
},
**ids_to_new_md,
}
for hit in vstore_f1.similarity_search("doc", k=N_DOCS + 1):
assert hit.id is not None
assert hit.metadata == expected_id_to_metadata[hit.id]
async def test_cassandra_replace_metadata_async(self) -> None:
"""Test of replacing metadata."""
N_DOCS = 100
REPLACE_RATIO = 2 # one in ... will have replaced metadata
BATCH_SIZE = 3
async with vector_store_from_texts_async(
texts=[],
metadata_indexing=("allowlist", ["field1", "field2"]),
table_name="vector_test_table_indexing",
embedding=ConsistentFakeEmbeddings(),
) as vstore_f1:
orig_documents = [
Document(
page_content=f"doc_{doc_i}",
id=f"doc_id_{doc_i}",
metadata={"field1": f"f1_{doc_i}", "otherf": "pre"},
)
for doc_i in range(N_DOCS)
]
await vstore_f1.aadd_documents(orig_documents)
ids_to_replace = [
f"doc_id_{doc_i}"
for doc_i in range(N_DOCS)
if doc_i % REPLACE_RATIO == 0
]
# various kinds of replacement at play here:
def _make_new_md(mode: int, doc_id: str) -> dict[str, str]:
if mode == 0:
return {}
elif mode == 1:
return {"field2": f"NEW_{doc_id}"}
elif mode == 2:
return {"field2": f"NEW_{doc_id}", "ofherf2": "post"}
else:
return {"ofherf2": "post"}
ids_to_new_md = {
doc_id: _make_new_md(rep_i % 4, doc_id)
for rep_i, doc_id in enumerate(ids_to_replace)
}
await vstore_f1.areplace_metadata(ids_to_new_md, concurrency=BATCH_SIZE)
# thorough check
expected_id_to_metadata: dict[str, dict] = {
**{
(document.id or ""): document.metadata
for document in orig_documents
},
**ids_to_new_md,
}
for hit in await vstore_f1.asimilarity_search("doc", k=N_DOCS + 1):
assert hit.id is not None
assert hit.metadata == expected_id_to_metadata[hit.id]
def test_cassandra_vectorstore_mmr_sync(
self,
vector_store_d2: Cassandra,
) -> None:
"""MMR testing. We work on the unit circle with angle multiples
of 2*pi/20 and prepare a store with known vectors for a controlled
MMR outcome.
"""
def _v_from_i(i: int, n: int) -> str:
angle = 2 * math.pi * i / n
vector = [math.cos(angle), math.sin(angle)]
return json.dumps(vector)
i_vals = [0, 4, 5, 13]
n_val = 20
vector_store_d2.add_texts(
[_v_from_i(i, n_val) for i in i_vals], metadatas=[{"i": i} for i in i_vals]
)
res1 = vector_store_d2.max_marginal_relevance_search(
_v_from_i(3, n_val),
k=2,
fetch_k=3,
)
res_i_vals = {doc.metadata["i"] for doc in res1}
assert res_i_vals == {"0.0", "4.0"}
async def test_cassandra_vectorstore_mmr_async(
self,
vector_store_d2: Cassandra,
) -> None:
"""MMR testing. We work on the unit circle with angle multiples
of 2*pi/20 and prepare a store with known vectors for a controlled
MMR outcome.
Async version.
"""
def _v_from_i(i: int, n: int) -> str:
angle = 2 * math.pi * i / n
vector = [math.cos(angle), math.sin(angle)]
return json.dumps(vector)
i_vals = [0, 4, 5, 13]
n_val = 20
await vector_store_d2.aadd_texts(
[_v_from_i(i, n_val) for i in i_vals],
metadatas=[{"i": i} for i in i_vals],
)
res1 = await vector_store_d2.amax_marginal_relevance_search(
_v_from_i(3, n_val),
k=2,
fetch_k=3,
)
res_i_vals = {doc.metadata["i"] for doc in res1}
assert res_i_vals == {"0.0", "4.0"}
def test_cassandra_vectorstore_metadata_filter(
self,
vector_store_d2: Cassandra,
metadata_documents: list[Document],
) -> None:
"""Metadata filtering."""
vstore = vector_store_d2
vstore.add_documents(metadata_documents)
# no filters
res0 = vstore.similarity_search("[-1,-1]", k=10)
assert {doc.metadata["letter"] for doc in res0} == set("qwreio")
# single filter
res1 = vstore.similarity_search(
"[-1,-1]",
k=10,
filter={"group": "vowel"},
)
assert {doc.metadata["letter"] for doc in res1} == set("eio")
# multiple filters
res2 = vstore.similarity_search(
"[-1,-1]",
k=10,
filter={"group": "consonant", "ord": str(ord("q"))},
)
assert {doc.metadata["letter"] for doc in res2} == set("q")
# excessive filters
res3 = vstore.similarity_search(
"[-1,-1]",
k=10,
filter={"group": "consonant", "ord": str(ord("q")), "case": "upper"},
)
assert res3 == []
def test_cassandra_vectorstore_metadata_search_sync(
self,
vector_store_d2: Cassandra,
metadata_documents: list[Document],
) -> None:
"""Metadata Search"""
vstore = vector_store_d2
vstore.add_documents(metadata_documents)
# no filters
res0 = vstore.metadata_search(filter={}, n=10)
assert {doc.metadata["letter"] for doc in res0} == set("qwreio")
# single filter
res1 = vstore.metadata_search(
n=10,
filter={"group": "vowel"},
)
assert {doc.metadata["letter"] for doc in res1} == set("eio")
# multiple filters
res2 = vstore.metadata_search(
n=10,
filter={"group": "consonant", "ord": str(ord("q"))},
)
assert {doc.metadata["letter"] for doc in res2} == set("q")
# excessive filters
res3 = vstore.metadata_search(
n=10,
filter={"group": "consonant", "ord": str(ord("q")), "case": "upper"},
)
assert res3 == []
async def test_cassandra_vectorstore_metadata_search_async(
self,
vector_store_d2: Cassandra,
metadata_documents: list[Document],
) -> None:
"""Metadata Search"""
vstore = vector_store_d2
await vstore.aadd_documents(metadata_documents)
# no filters
res0 = await vstore.ametadata_search(filter={}, n=10)
assert {doc.metadata["letter"] for doc in res0} == set("qwreio")
# single filter
res1 = vstore.metadata_search(
n=10,
filter={"group": "vowel"},
)
assert {doc.metadata["letter"] for doc in res1} == set("eio")
# multiple filters
res2 = await vstore.ametadata_search(
n=10,
filter={"group": "consonant", "ord": str(ord("q"))},
)
assert {doc.metadata["letter"] for doc in res2} == set("q")
# excessive filters
res3 = await vstore.ametadata_search(
n=10,
filter={"group": "consonant", "ord": str(ord("q")), "case": "upper"},
)
assert res3 == []
def test_cassandra_vectorstore_get_by_document_id_sync(
self,
vector_store_d2: Cassandra,
metadata_documents: list[Document],
) -> None:
"""Get by document_id"""
vstore = vector_store_d2
vstore.add_documents(metadata_documents)
# invalid id
invalid = vstore.get_by_document_id(document_id="z")
assert invalid is None
# valid id
valid = vstore.get_by_document_id(document_id="q")
assert isinstance(valid, Document)
assert valid.id == "q"
assert valid.page_content == "[1,2]"
assert valid.metadata["group"] == "consonant"
assert valid.metadata["letter"] == "q"
async def test_cassandra_vectorstore_get_by_document_id_async(
self,
vector_store_d2: Cassandra,
metadata_documents: list[Document],
) -> None:
"""Get by document_id"""
vstore = vector_store_d2
await vstore.aadd_documents(metadata_documents)
# invalid id
invalid = await vstore.aget_by_document_id(document_id="z")
assert invalid is None
# valid id
valid = await vstore.aget_by_document_id(document_id="q")
assert isinstance(valid, Document)
assert valid.id == "q"
assert valid.page_content == "[1,2]"
assert valid.metadata["group"] == "consonant"
assert valid.metadata["letter"] == "q"
@pytest.mark.parametrize(
("texts", "query"),
[
(
["[1,1]", "[-1,-1]"],
"[0.99999,1.00001]",
),
],
)
def test_cassandra_vectorstore_similarity_scale_sync(
self,
*,
vector_store_d2: Cassandra,
texts: list[str],
query: str,
) -> None:
"""Scale of the similarity scores."""
vstore = vector_store_d2
vstore.add_texts(
texts=texts,
ids=["near", "far"],
)
res1 = vstore.similarity_search_with_score(
query,
k=2,
)
scores = [sco for _, sco in res1]
sco_near, sco_far = scores
assert sco_far >= 0
assert abs(1 - sco_near) < MATCH_EPSILON
assert sco_far < EUCLIDEAN_MIN_SIM_UNIT_VECTORS + MATCH_EPSILON
@pytest.mark.parametrize(
("texts", "query"),
[
(
["[1,1]", "[-1,-1]"],
"[0.99999,1.00001]",
),
],
)
async def test_cassandra_vectorstore_similarity_scale_async(
self,
*,
vector_store_d2: Cassandra,
texts: list[str],
query: str,
) -> None:
"""Scale of the similarity scores, async version."""
vstore = vector_store_d2
await vstore.aadd_texts(
texts=texts,
ids=["near", "far"],
)
res1 = await vstore.asimilarity_search_with_score(
query,
k=2,
)
scores = [sco for _, sco in res1]
sco_near, sco_far = scores
assert sco_far >= 0
assert abs(1 - sco_near) < MATCH_EPSILON
assert sco_far < EUCLIDEAN_MIN_SIM_UNIT_VECTORS + MATCH_EPSILON
def test_cassandra_vectorstore_massive_delete(
self,
vector_store_d2: Cassandra,
) -> None:
"""Larger-scale bulk deletes."""
vstore = vector_store_d2
m = 150
texts = [f"[0,{i + 1 / 7.0}]" for i in range(2 * m)]
ids0 = [f"doc_{i}" for i in range(m)]
ids1 = [f"doc_{i + m}" for i in range(m)]
ids = ids0 + ids1
vstore.add_texts(texts=texts, ids=ids)
# deleting a bunch of these
del_res0 = vstore.delete(ids0)
assert del_res0 is True
# deleting the rest plus a fake one
del_res1 = vstore.delete([*ids1, "ghost!"])
assert del_res1 is True # ensure no error
# nothing left
assert vstore.similarity_search("[-1,-1]", k=2 * m) == []
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_oraclevs.py | """Test Oracle AI Vector Search functionality."""
# import required modules
import sys
import threading
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores.oraclevs import (
OracleVS,
_create_table,
_index_exists,
_table_exists,
create_index,
drop_index_if_exists,
drop_table_purge,
)
from langchain_community.vectorstores.utils import DistanceStrategy
username = ""
password = ""
dsn = ""
############################
####### table_exists #######
############################
def test_table_exists_test() -> None:
try:
import oracledb
except ImportError:
return
try:
connection = oracledb.connect(user=username, password=password, dsn=dsn)
except Exception:
sys.exit(1)
# 1. Existing Table:(all capital letters)
# expectation:True
_table_exists(connection, "V$TRANSACTION")
# 2. Existing Table:(all small letters)
# expectation:True
_table_exists(connection, "v$transaction")
# 3. Non-Existing Table
# expectation:false
_table_exists(connection, "Hello")
# 4. Invalid Table Name
# Expectation:ORA-00903: invalid table name
try:
_table_exists(connection, "123")
except Exception:
pass
# 5. Empty String
# Expectation:ORA-00903: invalid table name
try:
_table_exists(connection, "")
except Exception:
pass
# 6. Special Character
# Expectation:ORA-00911: #: invalid character after FROM
try:
_table_exists(connection, "##4")
except Exception:
pass
# 7. Table name length > 128
# Expectation:ORA-00972: The identifier XXXXXXXXXX...XXXXXXXXXX...
# exceeds the maximum length of 128 bytes.
try:
_table_exists(connection, "x" * 129)
except Exception:
pass
# 8. <Schema_Name.Table_Name>
# Expectation:True
_create_table(connection, "TB1", 65535)
# 9. Toggle Case (like TaBlE)
# Expectation:True
_table_exists(connection, "Tb1")
drop_table_purge(connection, "TB1")
# 10. Table_Name→ "हिन्दी"
# Expectation:True
_create_table(connection, '"हिन्दी"', 545)
_table_exists(connection, '"हिन्दी"')
drop_table_purge(connection, '"हिन्दी"')
############################
####### create_table #######
############################
def test_create_table_test() -> None:
try:
import oracledb
except ImportError:
return
try:
connection = oracledb.connect(user=username, password=password, dsn=dsn)
except Exception:
sys.exit(1)
# 1. New table - HELLO
# Dimension - 100
# Expectation:table is created
_create_table(connection, "HELLO", 100)
# 2. Existing table name
# HELLO
# Dimension - 110
# Expectation:Nothing happens
_create_table(connection, "HELLO", 110)
drop_table_purge(connection, "HELLO")
# 3. New Table - 123
# Dimension - 100
# Expectation:ORA-00903: invalid table name
try:
_create_table(connection, "123", 100)
drop_table_purge(connection, "123")
except Exception:
pass
# 4. New Table - Hello123
# Dimension - 65535
# Expectation:table is created
_create_table(connection, "Hello123", 65535)
drop_table_purge(connection, "Hello123")
# 5. New Table - T1
# Dimension - 65536
# Expectation:ORA-51801: VECTOR column type specification
# has an unsupported dimension count ('65536').
try:
_create_table(connection, "T1", 65536)
drop_table_purge(connection, "T1")
except Exception:
pass
# 6. New Table - T1
# Dimension - 0
# Expectation:ORA-51801: VECTOR column type specification has
# an unsupported dimension count (0).
try:
_create_table(connection, "T1", 0)
drop_table_purge(connection, "T1")
except Exception:
pass
# 7. New Table - T1
# Dimension - -1
# Expectation:ORA-51801: VECTOR column type specification has
# an unsupported dimension count ('-').
try:
_create_table(connection, "T1", -1)
drop_table_purge(connection, "T1")
except Exception:
pass
# 8. New Table - T2
# Dimension - '1000'
# Expectation:table is created
_create_table(connection, "T2", int("1000"))
drop_table_purge(connection, "T2")
# 9. New Table - T3
# Dimension - 100 passed as a variable
# Expectation:table is created
val = 100
_create_table(connection, "T3", val)
drop_table_purge(connection, "T3")
# 10.
# Expectation:ORA-00922: missing or invalid option
val2 = """H
ello"""
try:
_create_table(connection, val2, 545)
drop_table_purge(connection, val2)
except Exception:
pass
# 11. New Table - हिन्दी
# Dimension - 545
# Expectation:table is created
_create_table(connection, '"हिन्दी"', 545)
drop_table_purge(connection, '"हिन्दी"')
# 12. <schema_name.table_name>
# Expectation:failure - user does not exist
try:
_create_table(connection, "U1.TB4", 128)
drop_table_purge(connection, "U1.TB4")
except Exception:
pass
# 13.
# Expectation:table is created
_create_table(connection, '"T5"', 128)
drop_table_purge(connection, '"T5"')
# 14. Toggle Case
# Expectation:table creation fails
try:
_create_table(connection, "TaBlE", 128)
drop_table_purge(connection, "TaBlE")
except Exception:
pass
# 15. table_name as empty_string
# Expectation: ORA-00903: invalid table name
try:
_create_table(connection, "", 128)
drop_table_purge(connection, "")
_create_table(connection, '""', 128)
drop_table_purge(connection, '""')
except Exception:
pass
# 16. Arithmetic Operations in dimension parameter
# Expectation:table is created
n = 1
_create_table(connection, "T10", n + 500)
drop_table_purge(connection, "T10")
# 17. String Operations in table_name&dimension parameter
# Expectation:table is created
_create_table(connection, "YaSh".replace("aS", "ok"), 500)
drop_table_purge(connection, "YaSh".replace("aS", "ok"))
##################################
####### create_hnsw_index #######
##################################
def test_create_hnsw_index_test() -> None:
try:
import oracledb
except ImportError:
return
try:
connection = oracledb.connect(user=username, password=password, dsn=dsn)
except Exception:
sys.exit(1)
# 1. Table_name - TB1
# New Index
# distance_strategy - DistanceStrategy.Dot_product
# Expectation:Index created
model1 = HuggingFaceEmbeddings(
model_name="sentence-transformers/paraphrase-mpnet-base-v2"
)
vs = OracleVS(connection, model1, "TB1", DistanceStrategy.EUCLIDEAN_DISTANCE)
create_index(connection, vs)
# 2. Creating same index again
# Table_name - TB1
# Expectation:Nothing happens
try:
create_index(connection, vs)
drop_index_if_exists(connection, "HNSW")
except Exception:
pass
drop_table_purge(connection, "TB1")
# 3. Create index with following parameters:
# idx_name - hnsw_idx2
# idx_type - HNSW
# Expectation:Index created
vs = OracleVS(connection, model1, "TB2", DistanceStrategy.EUCLIDEAN_DISTANCE)
create_index(connection, vs, params={"idx_name": "hnsw_idx2", "idx_type": "HNSW"})
drop_index_if_exists(connection, "hnsw_idx2")
drop_table_purge(connection, "TB2")
# 4. Table Name - TB1
# idx_name - "हिन्दी"
# idx_type - HNSW
# Expectation:Index created
try:
vs = OracleVS(connection, model1, "TB3", DistanceStrategy.EUCLIDEAN_DISTANCE)
create_index(connection, vs, params={"idx_name": '"हिन्दी"', "idx_type": "HNSW"})
drop_index_if_exists(connection, '"हिन्दी"')
except Exception:
pass
drop_table_purge(connection, "TB3")
# 5. idx_name passed empty
# Expectation:ORA-01741: illegal zero-length identifier
try:
vs = OracleVS(connection, model1, "TB4", DistanceStrategy.EUCLIDEAN_DISTANCE)
create_index(connection, vs, params={"idx_name": '""', "idx_type": "HNSW"})
drop_index_if_exists(connection, '""')
except Exception:
pass
drop_table_purge(connection, "TB4")
# 6. idx_type left empty
# Expectation:Index created
try:
vs = OracleVS(connection, model1, "TB5", DistanceStrategy.EUCLIDEAN_DISTANCE)
create_index(connection, vs, params={"idx_name": "Hello", "idx_type": ""})
drop_index_if_exists(connection, "Hello")
except Exception:
pass
drop_table_purge(connection, "TB5")
# 7. efconstruction passed as parameter but not neighbours
# Expectation:Index created
vs = OracleVS(connection, model1, "TB7", DistanceStrategy.EUCLIDEAN_DISTANCE)
create_index(
connection,
vs,
params={"idx_name": "idx11", "efConstruction": 100, "idx_type": "HNSW"},
)
drop_index_if_exists(connection, "idx11")
drop_table_purge(connection, "TB7")
# 8. efconstruction passed as parameter as well as neighbours
# (for this idx_type parameter is also necessary)
# Expectation:Index created
vs = OracleVS(connection, model1, "TB8", DistanceStrategy.EUCLIDEAN_DISTANCE)
create_index(
connection,
vs,
params={
"idx_name": "idx11",
"efConstruction": 100,
"neighbors": 80,
"idx_type": "HNSW",
},
)
drop_index_if_exists(connection, "idx11")
drop_table_purge(connection, "TB8")
# 9. Limit of Values for(integer values):
# parallel
# efConstruction
# Neighbors
# Accuracy
# 0<Accuracy<=100
# 0<Neighbour<=2048
# 0<efConstruction<=65535
# 0<parallel<=255
# Expectation:Index created
drop_table_purge(connection, "TB9")
vs = OracleVS(connection, model1, "TB9", DistanceStrategy.EUCLIDEAN_DISTANCE)
create_index(
connection,
vs,
params={
"idx_name": "idx11",
"efConstruction": 65535,
"neighbors": 2048,
"idx_type": "HNSW",
"parallel": 255,
},
)
drop_index_if_exists(connection, "idx11")
drop_table_purge(connection, "TB9")
# index not created:
try:
vs = OracleVS(connection, model1, "TB10", DistanceStrategy.EUCLIDEAN_DISTANCE)
create_index(
connection,
vs,
params={
"idx_name": "idx11",
"efConstruction": 0,
"neighbors": 2048,
"idx_type": "HNSW",
"parallel": 255,
},
)
drop_index_if_exists(connection, "idx11")
except Exception:
pass
# index not created:
try:
vs = OracleVS(connection, model1, "TB11", DistanceStrategy.EUCLIDEAN_DISTANCE)
create_index(
connection,
vs,
params={
"idx_name": "idx11",
"efConstruction": 100,
"neighbors": 0,
"idx_type": "HNSW",
"parallel": 255,
},
)
drop_index_if_exists(connection, "idx11")
except Exception:
pass
# index not created
try:
vs = OracleVS(connection, model1, "TB12", DistanceStrategy.EUCLIDEAN_DISTANCE)
create_index(
connection,
vs,
params={
"idx_name": "idx11",
"efConstruction": 100,
"neighbors": 100,
"idx_type": "HNSW",
"parallel": 0,
},
)
drop_index_if_exists(connection, "idx11")
except Exception:
pass
# index not created
try:
vs = OracleVS(connection, model1, "TB13", DistanceStrategy.EUCLIDEAN_DISTANCE)
create_index(
connection,
vs,
params={
"idx_name": "idx11",
"efConstruction": 10,
"neighbors": 100,
"idx_type": "HNSW",
"parallel": 10,
"accuracy": 120,
},
)
drop_index_if_exists(connection, "idx11")
except Exception:
pass
# with negative values/out-of-bound values for all 4 of them, we get the same errors
# Expectation:Index not created
try:
vs = OracleVS(connection, model1, "TB14", DistanceStrategy.EUCLIDEAN_DISTANCE)
create_index(
connection,
vs,
params={
"idx_name": "idx11",
"efConstruction": 200,
"neighbors": 100,
"idx_type": "HNSW",
"parallel": "hello",
"accuracy": 10,
},
)
drop_index_if_exists(connection, "idx11")
except Exception:
pass
drop_table_purge(connection, "TB10")
drop_table_purge(connection, "TB11")
drop_table_purge(connection, "TB12")
drop_table_purge(connection, "TB13")
drop_table_purge(connection, "TB14")
# 10. Table_name as <schema_name.table_name>
# Expectation:Index created
vs = OracleVS(connection, model1, "TB15", DistanceStrategy.EUCLIDEAN_DISTANCE)
create_index(
connection,
vs,
params={
"idx_name": "idx11",
"efConstruction": 200,
"neighbors": 100,
"idx_type": "HNSW",
"parallel": 8,
"accuracy": 10,
},
)
drop_index_if_exists(connection, "idx11")
drop_table_purge(connection, "TB15")
# 11. index_name as <schema_name.index_name>
# Expectation:U1 not present
try:
vs = OracleVS(
connection, model1, "U1.TB16", DistanceStrategy.EUCLIDEAN_DISTANCE
)
create_index(
connection,
vs,
params={
"idx_name": "U1.idx11",
"efConstruction": 200,
"neighbors": 100,
"idx_type": "HNSW",
"parallel": 8,
"accuracy": 10,
},
)
drop_index_if_exists(connection, "U1.idx11")
drop_table_purge(connection, "TB16")
except Exception:
pass
# 12. Index_name size >129
# Expectation:Index not created
try:
vs = OracleVS(connection, model1, "TB17", DistanceStrategy.EUCLIDEAN_DISTANCE)
create_index(connection, vs, params={"idx_name": "x" * 129, "idx_type": "HNSW"})
drop_index_if_exists(connection, "x" * 129)
except Exception:
pass
drop_table_purge(connection, "TB17")
# 13. Index_name size 128
# Expectation:Index created
vs = OracleVS(connection, model1, "TB18", DistanceStrategy.EUCLIDEAN_DISTANCE)
create_index(connection, vs, params={"idx_name": "x" * 128, "idx_type": "HNSW"})
drop_index_if_exists(connection, "x" * 128)
drop_table_purge(connection, "TB18")
##################################
####### index_exists #############
##################################
def test_index_exists_test() -> None:
try:
import oracledb
except ImportError:
return
try:
connection = oracledb.connect(user=username, password=password, dsn=dsn)
except Exception:
sys.exit(1)
model1 = HuggingFaceEmbeddings(
model_name="sentence-transformers/paraphrase-mpnet-base-v2"
)
# 1. Existing Index:(all capital letters)
# Expectation:true
vs = OracleVS(connection, model1, "TB1", DistanceStrategy.EUCLIDEAN_DISTANCE)
create_index(connection, vs, params={"idx_name": "idx11", "idx_type": "HNSW"})
_index_exists(connection, "IDX11")
# 2. Existing Table:(all small letters)
# Expectation:true
_index_exists(connection, "idx11")
# 3. Non-Existing Index
# Expectation:False
_index_exists(connection, "Hello")
# 4. Invalid Index Name
# Expectation:Error
try:
_index_exists(connection, "123")
except Exception:
pass
# 5. Empty String
# Expectation:Error
try:
_index_exists(connection, "")
except Exception:
pass
try:
_index_exists(connection, "")
except Exception:
pass
# 6. Special Character
# Expectation:Error
try:
_index_exists(connection, "##4")
except Exception:
pass
# 7. Index name length > 128
# Expectation:Error
try:
_index_exists(connection, "x" * 129)
except Exception:
pass
# 8. <Schema_Name.Index_Name>
# Expectation:true
_index_exists(connection, "U1.IDX11")
# 9. Toggle Case (like iDx11)
# Expectation:true
_index_exists(connection, "IdX11")
# 10. Index_Name→ "हिन्दी"
# Expectation:true
drop_index_if_exists(connection, "idx11")
try:
create_index(connection, vs, params={"idx_name": '"हिन्दी"', "idx_type": "HNSW"})
_index_exists(connection, '"हिन्दी"')
except Exception:
pass
drop_table_purge(connection, "TB1")
##################################
####### add_texts ################
##################################
def test_add_texts_test() -> None:
try:
import oracledb
except ImportError:
return
try:
connection = oracledb.connect(user=username, password=password, dsn=dsn)
except Exception:
sys.exit(1)
# 1. Add 2 records to table
# Expectation:Successful
texts = ["Rohan", "Shailendra"]
metadata = [
{"id": "100", "link": "Document Example Test 1"},
{"id": "101", "link": "Document Example Test 2"},
]
model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
vs_obj = OracleVS(connection, model, "TB1", DistanceStrategy.EUCLIDEAN_DISTANCE)
vs_obj.add_texts(texts, metadata)
drop_table_purge(connection, "TB1")
# 2. Add record but metadata is not there
# Expectation:An exception occurred :: Either specify an 'ids' list or
# 'metadatas' with an 'id' attribute for each element.
model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
vs_obj = OracleVS(connection, model, "TB2", DistanceStrategy.EUCLIDEAN_DISTANCE)
texts2 = ["Sri Ram", "Krishna"]
vs_obj.add_texts(texts2)
drop_table_purge(connection, "TB2")
# 3. Add record with ids option
# ids are passed as string
# ids are passed as empty string
# ids are passed as multi-line string
# ids are passed as "<string>"
# Expectations:
# Successful
# Successful
# Successful
# Successful
vs_obj = OracleVS(connection, model, "TB4", DistanceStrategy.EUCLIDEAN_DISTANCE)
ids3 = ["114", "124"]
vs_obj.add_texts(texts2, ids=ids3)
drop_table_purge(connection, "TB4")
vs_obj = OracleVS(connection, model, "TB5", DistanceStrategy.EUCLIDEAN_DISTANCE)
ids4 = ["", "134"]
vs_obj.add_texts(texts2, ids=ids4)
drop_table_purge(connection, "TB5")
vs_obj = OracleVS(connection, model, "TB6", DistanceStrategy.EUCLIDEAN_DISTANCE)
ids5 = [
"""Good afternoon
my friends""",
"India",
]
vs_obj.add_texts(texts2, ids=ids5)
drop_table_purge(connection, "TB6")
vs_obj = OracleVS(connection, model, "TB7", DistanceStrategy.EUCLIDEAN_DISTANCE)
ids6 = ['"Good afternoon"', '"India"']
vs_obj.add_texts(texts2, ids=ids6)
drop_table_purge(connection, "TB7")
# 4. Add records with ids and metadatas
# Expectation:Successful
vs_obj = OracleVS(connection, model, "TB8", DistanceStrategy.EUCLIDEAN_DISTANCE)
texts3 = ["Sri Ram 6", "Krishna 6"]
ids7 = ["1", "2"]
metadata = [
{"id": "102", "link": "Document Example", "stream": "Science"},
{"id": "104", "link": "Document Example 45"},
]
vs_obj.add_texts(texts3, metadata, ids=ids7)
drop_table_purge(connection, "TB8")
# 5. Add 10000 records
# Expectation:Successful
vs_obj = OracleVS(connection, model, "TB9", DistanceStrategy.EUCLIDEAN_DISTANCE)
texts4 = ["Sri Ram{0}".format(i) for i in range(1, 10000)]
ids8 = ["Hello{0}".format(i) for i in range(1, 10000)]
vs_obj.add_texts(texts4, ids=ids8)
drop_table_purge(connection, "TB9")
# 6. Add 2 different record concurrently
# Expectation:Successful
def add(val: str) -> None:
model = HuggingFaceEmbeddings(
model_name="sentence-transformers/all-mpnet-base-v2"
)
vs_obj = OracleVS(
connection, model, "TB10", DistanceStrategy.EUCLIDEAN_DISTANCE
)
texts5 = [val]
ids9 = texts5
vs_obj.add_texts(texts5, ids=ids9)
thread_1 = threading.Thread(target=add, args=("Sri Ram"))
thread_2 = threading.Thread(target=add, args=("Sri Krishna"))
thread_1.start()
thread_2.start()
thread_1.join()
thread_2.join()
drop_table_purge(connection, "TB10")
# 7. Add 2 same record concurrently
# Expectation:Successful, For one of the insert,get primary key violation error
def add1(val: str) -> None:
model = HuggingFaceEmbeddings(
model_name="sentence-transformers/all-mpnet-base-v2"
)
vs_obj = OracleVS(
connection, model, "TB11", DistanceStrategy.EUCLIDEAN_DISTANCE
)
texts = [val]
ids10 = texts
vs_obj.add_texts(texts, ids=ids10)
try:
thread_1 = threading.Thread(target=add1, args=("Sri Ram"))
thread_2 = threading.Thread(target=add1, args=("Sri Ram"))
thread_1.start()
thread_2.start()
thread_1.join()
thread_2.join()
except Exception:
pass
drop_table_purge(connection, "TB11")
# 8. create object with table name of type <schema_name.table_name>
# Expectation:U1 does not exist
try:
vs_obj = OracleVS(connection, model, "U1.TB14", DistanceStrategy.DOT_PRODUCT)
for i in range(1, 10):
texts7 = ["Yash{0}".format(i)]
ids13 = ["1234{0}".format(i)]
vs_obj.add_texts(texts7, ids=ids13)
drop_table_purge(connection, "TB14")
except Exception:
pass
##################################
####### embed_documents(text) ####
##################################
def test_embed_documents_test() -> None:
try:
import oracledb
except ImportError:
return
try:
connection = oracledb.connect(user=username, password=password, dsn=dsn)
except Exception:
sys.exit(1)
# 1. String Example-'Sri Ram'
# Expectation:Vector Printed
model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
vs_obj = OracleVS(connection, model, "TB7", DistanceStrategy.EUCLIDEAN_DISTANCE)
# 4. List
# Expectation:Vector Printed
vs_obj._embed_documents(["hello", "yash"])
drop_table_purge(connection, "TB7")
##################################
####### embed_query(text) ########
##################################
def test_embed_query_test() -> None:
try:
import oracledb
except ImportError:
return
try:
connection = oracledb.connect(user=username, password=password, dsn=dsn)
except Exception:
sys.exit(1)
# 1. String
# Expectation:Vector printed
model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
vs_obj = OracleVS(connection, model, "TB8", DistanceStrategy.EUCLIDEAN_DISTANCE)
vs_obj._embed_query("Sri Ram")
drop_table_purge(connection, "TB8")
# 3. Empty string
# Expectation:[]
vs_obj._embed_query("")
##################################
####### create_index #############
##################################
def test_create_index_test() -> None:
try:
import oracledb
except ImportError:
return
try:
connection = oracledb.connect(user=username, password=password, dsn=dsn)
except Exception:
sys.exit(1)
# 1. No optional parameters passed
# Expectation:Successful
model1 = HuggingFaceEmbeddings(
model_name="sentence-transformers/paraphrase-mpnet-base-v2"
)
vs = OracleVS(connection, model1, "TB1", DistanceStrategy.EUCLIDEAN_DISTANCE)
create_index(connection, vs)
drop_index_if_exists(connection, "HNSW")
drop_table_purge(connection, "TB1")
# 2. ivf index
# Expectation:Successful
vs = OracleVS(connection, model1, "TB2", DistanceStrategy.EUCLIDEAN_DISTANCE)
create_index(connection, vs, {"idx_type": "IVF", "idx_name": "IVF"})
drop_index_if_exists(connection, "IVF")
drop_table_purge(connection, "TB2")
# 3. ivf index with neighbour_part passed as parameter
# Expectation:Successful
vs = OracleVS(connection, model1, "TB3", DistanceStrategy.EUCLIDEAN_DISTANCE)
create_index(connection, vs, {"idx_type": "IVF", "neighbor_part": 10})
drop_index_if_exists(connection, "IVF")
drop_table_purge(connection, "TB3")
# 4. ivf index with neighbour_part and accuracy passed as parameter
# Expectation:Successful
vs = OracleVS(connection, model1, "TB4", DistanceStrategy.EUCLIDEAN_DISTANCE)
create_index(
connection, vs, {"idx_type": "IVF", "neighbor_part": 10, "accuracy": 90}
)
drop_index_if_exists(connection, "IVF")
drop_table_purge(connection, "TB4")
# 5. ivf index with neighbour_part and parallel passed as parameter
# Expectation:Successful
vs = OracleVS(connection, model1, "TB5", DistanceStrategy.EUCLIDEAN_DISTANCE)
create_index(
connection, vs, {"idx_type": "IVF", "neighbor_part": 10, "parallel": 90}
)
drop_index_if_exists(connection, "IVF")
drop_table_purge(connection, "TB5")
# 6. ivf index and then perform dml(insert)
# Expectation:Successful
vs = OracleVS(connection, model1, "TB6", DistanceStrategy.EUCLIDEAN_DISTANCE)
create_index(connection, vs, {"idx_type": "IVF", "idx_name": "IVF"})
texts = ["Sri Ram", "Krishna"]
vs.add_texts(texts)
# perform delete
vs.delete(["hello"])
drop_index_if_exists(connection, "IVF")
drop_table_purge(connection, "TB6")
# 7. ivf index with neighbour_part,parallel and accuracy passed as parameter
# Expectation:Successful
vs = OracleVS(connection, model1, "TB7", DistanceStrategy.EUCLIDEAN_DISTANCE)
create_index(
connection,
vs,
{"idx_type": "IVF", "neighbor_part": 10, "parallel": 90, "accuracy": 99},
)
drop_index_if_exists(connection, "IVF")
drop_table_purge(connection, "TB7")
##################################
####### perform_search ###########
##################################
def test_perform_search_test() -> None:
try:
import oracledb
except ImportError:
return
try:
connection = oracledb.connect(user=username, password=password, dsn=dsn)
except Exception:
sys.exit(1)
model1 = HuggingFaceEmbeddings(
model_name="sentence-transformers/paraphrase-mpnet-base-v2"
)
vs_1 = OracleVS(connection, model1, "TB10", DistanceStrategy.EUCLIDEAN_DISTANCE)
vs_2 = OracleVS(connection, model1, "TB11", DistanceStrategy.DOT_PRODUCT)
vs_3 = OracleVS(connection, model1, "TB12", DistanceStrategy.COSINE)
vs_4 = OracleVS(connection, model1, "TB13", DistanceStrategy.EUCLIDEAN_DISTANCE)
vs_5 = OracleVS(connection, model1, "TB14", DistanceStrategy.DOT_PRODUCT)
vs_6 = OracleVS(connection, model1, "TB15", DistanceStrategy.COSINE)
# vector store lists:
vs_list = [vs_1, vs_2, vs_3, vs_4, vs_5, vs_6]
for i, vs in enumerate(vs_list, start=1):
# insert data
texts = ["Yash", "Varanasi", "Yashaswi", "Mumbai", "BengaluruYash"]
metadatas = [
{"id": "hello"},
{"id": "105"},
{"id": "106"},
{"id": "yash"},
{"id": "108"},
]
vs.add_texts(texts, metadatas)
# create index
if i == 1 or i == 2 or i == 3:
create_index(connection, vs, {"idx_type": "HNSW", "idx_name": f"IDX1{i}"})
else:
create_index(connection, vs, {"idx_type": "IVF", "idx_name": f"IDX1{i}"})
# perform search
query = "YashB"
filter = {"id": ["106", "108", "yash"]}
# similarity_searh without filter
vs.similarity_search(query, 2)
# similarity_searh with filter
vs.similarity_search(query, 2, filter=filter)
# Similarity search with relevance score
vs.similarity_search_with_score(query, 2)
# Similarity search with relevance score with filter
vs.similarity_search_with_score(query, 2, filter=filter)
# Max marginal relevance search
vs.max_marginal_relevance_search(query, 2, fetch_k=20, lambda_mult=0.5)
# Max marginal relevance search with filter
vs.max_marginal_relevance_search(
query, 2, fetch_k=20, lambda_mult=0.5, filter=filter
)
drop_table_purge(connection, "TB10")
drop_table_purge(connection, "TB11")
drop_table_purge(connection, "TB12")
drop_table_purge(connection, "TB13")
drop_table_purge(connection, "TB14")
drop_table_purge(connection, "TB15")
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_momento_vector_index.py | import os
import time
import uuid
from typing import Generator, Iterator, List
import pytest
from langchain_core.documents import Document
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import MomentoVectorIndex
API_KEY_ENV_VAR = "MOMENTO_API_KEY"
def random_string() -> str:
return str(uuid.uuid4())
@pytest.fixture(scope="function")
def random_index_name() -> str:
return f"langchain-test-index-{random_string()}"
def wait() -> None:
time.sleep(1)
@pytest.fixture(scope="module")
def embedding_openai() -> OpenAIEmbeddings:
if not os.environ.get("OPENAI_API_KEY"):
raise ValueError("OPENAI_API_KEY is not set")
return OpenAIEmbeddings()
@pytest.fixture(scope="function")
def texts() -> Generator[List[str], None, None]:
# Load the documents from a file located in the fixtures directory
documents = TextLoader(
os.path.join(os.path.dirname(__file__), "fixtures", "sharks.txt")
).load()
yield [doc.page_content for doc in documents]
@pytest.fixture(scope="function")
def vector_store(
embedding_openai: OpenAIEmbeddings, random_index_name: str
) -> Iterator[MomentoVectorIndex]:
from momento import (
CredentialProvider,
PreviewVectorIndexClient,
VectorIndexConfigurations,
)
vector_store = None
try:
client = PreviewVectorIndexClient(
VectorIndexConfigurations.Default.latest(),
credential_provider=CredentialProvider.from_environment_variable(
API_KEY_ENV_VAR
),
)
vector_store = MomentoVectorIndex(
embedding=embedding_openai,
client=client,
index_name=random_index_name,
)
yield vector_store
finally:
if vector_store is not None:
vector_store._client.delete_index(random_index_name)
def test_from_texts(
random_index_name: str, embedding_openai: OpenAIEmbeddings, texts: List[str]
) -> None:
from momento import (
CredentialProvider,
VectorIndexConfigurations,
)
random_text = random_string()
random_document = f"Hello world {random_text} goodbye world!"
texts.insert(0, random_document)
vector_store = None
try:
vector_store = MomentoVectorIndex.from_texts(
texts=texts,
embedding=embedding_openai,
index_name=random_index_name,
configuration=VectorIndexConfigurations.Default.latest(),
credential_provider=CredentialProvider.from_environment_variable(
"MOMENTO_API_KEY"
),
)
wait()
documents = vector_store.similarity_search(query=random_text, k=1)
assert documents == [Document(page_content=random_document)]
finally:
if vector_store is not None:
vector_store._client.delete_index(random_index_name)
def test_from_texts_with_metadatas(
random_index_name: str, embedding_openai: OpenAIEmbeddings, texts: List[str]
) -> None:
"""Test end to end construction and search."""
from momento import (
CredentialProvider,
VectorIndexConfigurations,
)
random_text = random_string()
random_document = f"Hello world {random_text} goodbye world!"
texts.insert(0, random_document)
metadatas = [{"page": f"{i}", "source": "user"} for i in range(len(texts))]
vector_store = None
try:
vector_store = MomentoVectorIndex.from_texts(
texts=texts,
embedding=embedding_openai,
index_name=random_index_name,
metadatas=metadatas,
configuration=VectorIndexConfigurations.Default.latest(),
credential_provider=CredentialProvider.from_environment_variable(
API_KEY_ENV_VAR
),
)
wait()
documents = vector_store.similarity_search(query=random_text, k=1)
assert documents == [
Document(
page_content=random_document, metadata={"page": "0", "source": "user"}
)
]
finally:
if vector_store is not None:
vector_store._client.delete_index(random_index_name)
def test_from_texts_with_scores(vector_store: MomentoVectorIndex) -> None:
"""Test end to end construction and search with scores and IDs."""
texts = ["apple", "orange", "hammer"]
metadatas = [{"page": f"{i}"} for i in range(len(texts))]
vector_store.add_texts(texts, metadatas)
wait()
search_results = vector_store.similarity_search_with_score("apple", k=3)
docs = [o[0] for o in search_results]
scores = [o[1] for o in search_results]
assert docs == [
Document(page_content="apple", metadata={"page": "0"}),
Document(page_content="orange", metadata={"page": "1"}),
Document(page_content="hammer", metadata={"page": "2"}),
]
assert scores[0] > scores[1] > scores[2]
def test_add_documents_with_ids(vector_store: MomentoVectorIndex) -> None:
"""Test end to end construction and search with scores and IDs."""
from momento.responses.vector_index import Search
texts = ["apple", "orange", "hammer"]
ids = [random_string() for _ in range(len(texts))]
metadatas = [{"page": f"{i}"} for i in range(len(texts))]
# Add texts with metadata and ids
stored_ids = vector_store.add_texts(texts, metadatas, ids=ids)
assert stored_ids == ids
wait()
# Verify that the ids are in the index
response = vector_store._client.search(
vector_store.index_name, vector_store.embeddings.embed_query("apple")
)
assert isinstance(response, Search.Success)
assert [hit.id for hit in response.hits] == ids
def test_max_marginal_relevance_search(vector_store: MomentoVectorIndex) -> None:
"""Test max marginal relevance search."""
pepperoni_pizza = "pepperoni pizza"
cheese_pizza = "cheese pizza"
hot_dog = "hot dog"
vector_store.add_texts([pepperoni_pizza, cheese_pizza, hot_dog])
wait()
search_results = vector_store.similarity_search("pizza", k=2)
assert search_results == [
Document(page_content=pepperoni_pizza, metadata={}),
Document(page_content=cheese_pizza, metadata={}),
]
search_results = vector_store.max_marginal_relevance_search(query="pizza", k=2)
assert search_results == [
Document(page_content=pepperoni_pizza, metadata={}),
Document(page_content=hot_dog, metadata={}),
]
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_couchbase.py | """Test Couchbase Vector Store functionality"""
import os
import time
from typing import Any
import pytest
from langchain_core.documents import Document
from langchain_community.vectorstores.couchbase import CouchbaseVectorStore
from tests.integration_tests.vectorstores.fake_embeddings import (
ConsistentFakeEmbeddings,
)
CONNECTION_STRING = os.getenv("COUCHBASE_CONNECTION_STRING", "")
BUCKET_NAME = os.getenv("COUCHBASE_BUCKET_NAME", "")
SCOPE_NAME = os.getenv("COUCHBASE_SCOPE_NAME", "")
COLLECTION_NAME = os.getenv("COUCHBASE_COLLECTION_NAME", "")
USERNAME = os.getenv("COUCHBASE_USERNAME", "")
PASSWORD = os.getenv("COUCHBASE_PASSWORD", "")
INDEX_NAME = os.getenv("COUCHBASE_INDEX_NAME", "")
SLEEP_DURATION = 1
def set_all_env_vars() -> bool:
return all(
[
CONNECTION_STRING,
BUCKET_NAME,
SCOPE_NAME,
COLLECTION_NAME,
USERNAME,
PASSWORD,
INDEX_NAME,
]
)
def get_cluster() -> Any:
"""Get a couchbase cluster object"""
from datetime import timedelta
from couchbase.auth import PasswordAuthenticator
from couchbase.cluster import Cluster
from couchbase.options import ClusterOptions
auth = PasswordAuthenticator(USERNAME, PASSWORD)
options = ClusterOptions(auth)
connect_string = CONNECTION_STRING
cluster = Cluster(connect_string, options)
# Wait until the cluster is ready for use.
cluster.wait_until_ready(timedelta(seconds=5))
return cluster
@pytest.fixture()
def cluster() -> Any:
"""Get a couchbase cluster object"""
return get_cluster()
def delete_documents(
cluster: Any, bucket_name: str, scope_name: str, collection_name: str
) -> None:
"""Delete all the documents in the collection"""
query = f"DELETE FROM `{bucket_name}`.`{scope_name}`.`{collection_name}`"
cluster.query(query).execute()
@pytest.mark.requires("couchbase")
@pytest.mark.skipif(
not set_all_env_vars(), reason="Missing Couchbase environment variables"
)
class TestCouchbaseVectorStore:
@classmethod
def setup_method(self) -> None:
cluster = get_cluster()
# Delete all the documents in the collection
delete_documents(cluster, BUCKET_NAME, SCOPE_NAME, COLLECTION_NAME)
def test_from_documents(self, cluster: Any) -> None:
"""Test end to end search using a list of documents."""
documents = [
Document(page_content="foo", metadata={"page": 1}),
Document(page_content="bar", metadata={"page": 2}),
Document(page_content="baz", metadata={"page": 3}),
]
vectorstore = CouchbaseVectorStore.from_documents(
documents,
ConsistentFakeEmbeddings(),
cluster=cluster,
bucket_name=BUCKET_NAME,
scope_name=SCOPE_NAME,
collection_name=COLLECTION_NAME,
index_name=INDEX_NAME,
)
# Wait for the documents to be indexed
time.sleep(SLEEP_DURATION)
output = vectorstore.similarity_search("baz", k=1)
assert output[0].page_content == "baz"
assert output[0].metadata["page"] == 3
def test_from_texts(self, cluster: Any) -> None:
"""Test end to end search using a list of texts."""
texts = [
"foo",
"bar",
"baz",
]
vectorstore = CouchbaseVectorStore.from_texts(
texts,
ConsistentFakeEmbeddings(),
cluster=cluster,
index_name=INDEX_NAME,
bucket_name=BUCKET_NAME,
scope_name=SCOPE_NAME,
collection_name=COLLECTION_NAME,
)
# Wait for the documents to be indexed
time.sleep(SLEEP_DURATION)
output = vectorstore.similarity_search("foo", k=1)
assert len(output) == 1
assert output[0].page_content == "foo"
def test_from_texts_with_metadatas(self, cluster: Any) -> None:
"""Test end to end search using a list of texts and metadatas."""
texts = [
"foo",
"bar",
"baz",
]
metadatas = [{"a": 1}, {"b": 2}, {"c": 3}]
vectorstore = CouchbaseVectorStore.from_texts(
texts,
ConsistentFakeEmbeddings(),
metadatas=metadatas,
cluster=cluster,
index_name=INDEX_NAME,
bucket_name=BUCKET_NAME,
scope_name=SCOPE_NAME,
collection_name=COLLECTION_NAME,
)
# Wait for the documents to be indexed
time.sleep(SLEEP_DURATION)
output = vectorstore.similarity_search("baz", k=1)
assert output[0].page_content == "baz"
assert output[0].metadata["c"] == 3
def test_add_texts_with_ids_and_metadatas(self, cluster: Any) -> None:
"""Test end to end search by adding a list of texts, ids and metadatas."""
texts = [
"foo",
"bar",
"baz",
]
ids = ["a", "b", "c"]
metadatas = [{"a": 1}, {"b": 2}, {"c": 3}]
vectorstore = CouchbaseVectorStore(
cluster=cluster,
embedding=ConsistentFakeEmbeddings(),
index_name=INDEX_NAME,
bucket_name=BUCKET_NAME,
scope_name=SCOPE_NAME,
collection_name=COLLECTION_NAME,
)
results = vectorstore.add_texts(
texts,
ids=ids,
metadatas=metadatas,
)
assert results == ids
# Wait for the documents to be indexed
time.sleep(SLEEP_DURATION)
output = vectorstore.similarity_search("foo", k=1)
assert output[0].page_content == "foo"
assert output[0].metadata["a"] == 1
def test_delete_texts_with_ids(self, cluster: Any) -> None:
"""Test deletion of documents by ids."""
texts = [
"foo",
"bar",
"baz",
]
ids = ["a", "b", "c"]
metadatas = [{"a": 1}, {"b": 2}, {"c": 3}]
vectorstore = CouchbaseVectorStore(
cluster=cluster,
embedding=ConsistentFakeEmbeddings(),
index_name=INDEX_NAME,
bucket_name=BUCKET_NAME,
scope_name=SCOPE_NAME,
collection_name=COLLECTION_NAME,
)
results = vectorstore.add_texts(
texts,
ids=ids,
metadatas=metadatas,
)
assert results == ids
assert vectorstore.delete(ids)
# Wait for the documents to be indexed
time.sleep(SLEEP_DURATION)
output = vectorstore.similarity_search("foo", k=1)
assert len(output) == 0
def test_similarity_search_with_scores(self, cluster: Any) -> None:
"""Test similarity search with scores."""
texts = ["foo", "bar", "baz"]
metadatas = [{"a": 1}, {"b": 2}, {"c": 3}]
vectorstore = CouchbaseVectorStore(
cluster=cluster,
embedding=ConsistentFakeEmbeddings(),
index_name=INDEX_NAME,
bucket_name=BUCKET_NAME,
scope_name=SCOPE_NAME,
collection_name=COLLECTION_NAME,
)
vectorstore.add_texts(texts, metadatas=metadatas)
# Wait for the documents to be indexed
time.sleep(SLEEP_DURATION)
output = vectorstore.similarity_search_with_score("foo", k=2)
assert len(output) == 2
assert output[0][0].page_content == "foo"
# check if the scores are sorted
assert output[0][0].metadata["a"] == 1
assert output[0][1] > output[1][1]
def test_similarity_search_by_vector(self, cluster: Any) -> None:
"""Test similarity search by vector."""
texts = ["foo", "bar", "baz"]
metadatas = [{"a": 1}, {"b": 2}, {"c": 3}]
vectorstore = CouchbaseVectorStore(
cluster=cluster,
embedding=ConsistentFakeEmbeddings(),
index_name=INDEX_NAME,
bucket_name=BUCKET_NAME,
scope_name=SCOPE_NAME,
collection_name=COLLECTION_NAME,
)
vectorstore.add_texts(texts, metadatas=metadatas)
# Wait for the documents to be indexed
time.sleep(SLEEP_DURATION)
vector = ConsistentFakeEmbeddings().embed_query("foo")
vector_output = vectorstore.similarity_search_by_vector(vector, k=1)
assert vector_output[0].page_content == "foo"
similarity_output = vectorstore.similarity_search("foo", k=1)
assert similarity_output == vector_output
def test_output_fields(self, cluster: Any) -> None:
"""Test that output fields are set correctly."""
texts = [
"foo",
"bar",
"baz",
]
metadatas = [{"page": 1, "a": 1}, {"page": 2, "b": 2}, {"page": 3, "c": 3}]
vectorstore = CouchbaseVectorStore(
cluster=cluster,
embedding=ConsistentFakeEmbeddings(),
index_name=INDEX_NAME,
bucket_name=BUCKET_NAME,
scope_name=SCOPE_NAME,
collection_name=COLLECTION_NAME,
)
ids = vectorstore.add_texts(texts, metadatas)
assert len(ids) == len(texts)
# Wait for the documents to be indexed
time.sleep(SLEEP_DURATION)
output = vectorstore.similarity_search("foo", k=1, fields=["metadata.page"])
assert output[0].page_content == "foo"
assert output[0].metadata["page"] == 1
assert "a" not in output[0].metadata
def test_hybrid_search(self, cluster: Any) -> None:
"""Test hybrid search."""
texts = [
"foo",
"bar",
"baz",
]
metadatas = [
{"section": "index"},
{"section": "glossary"},
{"section": "appendix"},
]
vectorstore = CouchbaseVectorStore(
cluster=cluster,
embedding=ConsistentFakeEmbeddings(),
index_name=INDEX_NAME,
bucket_name=BUCKET_NAME,
scope_name=SCOPE_NAME,
collection_name=COLLECTION_NAME,
)
vectorstore.add_texts(texts, metadatas=metadatas)
# Wait for the documents to be indexed
time.sleep(SLEEP_DURATION)
result, score = vectorstore.similarity_search_with_score("foo", k=1)[0]
# Wait for the documents to be indexed for hybrid search
time.sleep(SLEEP_DURATION)
hybrid_result, hybrid_score = vectorstore.similarity_search_with_score(
"foo",
k=1,
search_options={"query": {"match": "index", "field": "metadata.section"}},
)[0]
assert result == hybrid_result
assert score <= hybrid_score
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_hologres.py | """Test Hologres functionality."""
import os
from typing import List
from langchain_core.documents import Document
from langchain_community.vectorstores.hologres import Hologres
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
CONNECTION_STRING = Hologres.connection_string_from_db_params(
host=os.environ.get("TEST_HOLOGRES_HOST", "localhost"),
port=int(os.environ.get("TEST_HOLOGRES_PORT", "80")),
database=os.environ.get("TEST_HOLOGRES_DATABASE", "postgres"),
user=os.environ.get("TEST_HOLOGRES_USER", "postgres"),
password=os.environ.get("TEST_HOLOGRES_PASSWORD", "postgres"),
)
ADA_TOKEN_COUNT = 1536
class FakeEmbeddingsWithAdaDimension(FakeEmbeddings):
"""Fake embeddings functionality for testing."""
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Return simple embeddings."""
return [
[float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(i)] for i in range(len(texts))
]
def embed_query(self, text: str) -> List[float]:
"""Return simple embeddings."""
return [float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(0.0)]
def test_hologres() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = Hologres.from_texts(
texts=texts,
table_name="test_table",
embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING,
pre_delete_table=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_hologres_embeddings() -> None:
"""Test end to end construction with embeddings and search."""
texts = ["foo", "bar", "baz"]
text_embeddings = FakeEmbeddingsWithAdaDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Hologres.from_embeddings(
text_embeddings=text_embedding_pairs,
table_name="test_table",
embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING,
pre_delete_table=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_hologres_with_metadatas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Hologres.from_texts(
texts=texts,
table_name="test_table",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_table=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
def test_hologres_with_metadatas_with_scores() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Hologres.from_texts(
texts=texts,
table_name="test_table",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_table=True,
)
output = docsearch.similarity_search_with_score("foo", k=1)
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
def test_hologres_with_filter_match() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Hologres.from_texts(
texts=texts,
table_name="test_table_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_table=True,
)
output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "0"})
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
def test_hologres_with_filter_distant_match() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Hologres.from_texts(
texts=texts,
table_name="test_table_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_table=True,
)
output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "2"})
assert output == [(Document(page_content="baz", metadata={"page": "2"}), 4.0)]
def test_hologres_with_filter_no_match() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Hologres.from_texts(
texts=texts,
table_name="test_table_filter",
embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas,
connection_string=CONNECTION_STRING,
pre_delete_table=True,
)
output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "5"})
assert output == []
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/test_vdms.py | """Test VDMS functionality."""
from __future__ import annotations
import logging
import os
from typing import TYPE_CHECKING
import pytest
from langchain_core.documents import Document
from langchain_community.vectorstores import VDMS
from langchain_community.vectorstores.vdms import VDMS_Client, embedding2bytes
from tests.integration_tests.vectorstores.fake_embeddings import (
ConsistentFakeEmbeddings,
FakeEmbeddings,
)
if TYPE_CHECKING:
import vdms
logging.basicConfig(level=logging.DEBUG)
embedding_function = FakeEmbeddings()
# The connection string matches the default settings in the docker-compose file
# located in the root of the repository: [root]/docker/docker-compose.yml
# To spin up a detached VDMS server:
# cd [root]/docker
# docker compose up -d vdms
@pytest.fixture
@pytest.mark.enable_socket
def vdms_client() -> vdms.vdms:
return VDMS_Client(
host=os.getenv("VDMS_DBHOST", "localhost"),
port=int(os.getenv("VDMS_DBPORT", 6025)),
)
@pytest.mark.requires("vdms")
@pytest.mark.enable_socket
def test_init_from_client(vdms_client: vdms.vdms) -> None:
_ = VDMS( # type: ignore[call-arg]
embedding=embedding_function,
client=vdms_client,
)
@pytest.mark.requires("vdms")
@pytest.mark.enable_socket
def test_from_texts_with_metadatas(vdms_client: vdms.vdms) -> None:
"""Test end to end construction and search."""
collection_name = "test_from_texts_with_metadatas"
texts = ["foo", "bar", "baz"]
ids = [f"test_from_texts_with_metadatas_{i}" for i in range(len(texts))]
metadatas = [{"page": str(i)} for i in range(1, len(texts) + 1)]
docsearch = VDMS.from_texts(
texts=texts,
ids=ids,
embedding=embedding_function,
metadatas=metadatas,
collection_name=collection_name,
client=vdms_client,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [
Document(page_content="foo", metadata={"page": "1", "id": ids[0]})
]
@pytest.mark.requires("vdms")
@pytest.mark.enable_socket
def test_from_texts_with_metadatas_with_scores(vdms_client: vdms.vdms) -> None:
"""Test end to end construction and scored search."""
collection_name = "test_from_texts_with_metadatas_with_scores"
texts = ["foo", "bar", "baz"]
ids = [f"test_from_texts_with_metadatas_with_scores_{i}" for i in range(len(texts))]
metadatas = [{"page": str(i)} for i in range(1, len(texts) + 1)]
docsearch = VDMS.from_texts(
texts=texts,
ids=ids,
embedding=embedding_function,
metadatas=metadatas,
collection_name=collection_name,
client=vdms_client,
)
output = docsearch.similarity_search_with_score("foo", k=1, fetch_k=1)
assert output == [
(Document(page_content="foo", metadata={"page": "1", "id": ids[0]}), 0.0)
]
@pytest.mark.requires("vdms")
@pytest.mark.enable_socket
def test_from_texts_with_metadatas_with_scores_using_vector(
vdms_client: vdms.vdms,
) -> None:
"""Test end to end construction and scored search, using embedding vector."""
collection_name = "test_from_texts_with_metadatas_with_scores_using_vector"
texts = ["foo", "bar", "baz"]
ids = [f"test_from_texts_with_metadatas_{i}" for i in range(len(texts))]
metadatas = [{"page": str(i)} for i in range(1, len(texts) + 1)]
docsearch = VDMS.from_texts(
texts=texts,
ids=ids,
embedding=embedding_function,
metadatas=metadatas,
collection_name=collection_name,
client=vdms_client,
)
output = docsearch._similarity_search_with_relevance_scores("foo", k=1)
assert output == [
(Document(page_content="foo", metadata={"page": "1", "id": ids[0]}), 0.0)
]
@pytest.mark.requires("vdms")
@pytest.mark.enable_socket
def test_search_filter(vdms_client: vdms.vdms) -> None:
"""Test end to end construction and search with metadata filtering."""
collection_name = "test_search_filter"
texts = ["far", "bar", "baz"]
ids = [f"test_search_filter_{i}" for i in range(len(texts))]
metadatas = [{"first_letter": "{}".format(text[0])} for text in texts]
docsearch = VDMS.from_texts(
texts=texts,
ids=ids,
embedding=embedding_function,
metadatas=metadatas,
collection_name=collection_name,
client=vdms_client,
)
output = docsearch.similarity_search(
"far", k=1, filter={"first_letter": ["==", "f"]}
)
assert output == [
Document(page_content="far", metadata={"first_letter": "f", "id": ids[0]})
]
output = docsearch.similarity_search(
"far", k=2, filter={"first_letter": ["==", "b"]}
)
assert output == [
Document(page_content="bar", metadata={"first_letter": "b", "id": ids[1]}),
Document(page_content="baz", metadata={"first_letter": "b", "id": ids[2]}),
]
@pytest.mark.requires("vdms")
@pytest.mark.enable_socket
def test_search_filter_with_scores(vdms_client: vdms.vdms) -> None:
"""Test end to end construction and scored search with metadata filtering."""
collection_name = "test_search_filter_with_scores"
texts = ["far", "bar", "baz"]
ids = [f"test_search_filter_with_scores_{i}" for i in range(len(texts))]
metadatas = [{"first_letter": "{}".format(text[0])} for text in texts]
docsearch = VDMS.from_texts(
texts=texts,
ids=ids,
embedding=embedding_function,
metadatas=metadatas,
collection_name=collection_name,
client=vdms_client,
)
output = docsearch.similarity_search_with_score(
"far", k=1, filter={"first_letter": ["==", "f"]}
)
assert output == [
(
Document(page_content="far", metadata={"first_letter": "f", "id": ids[0]}),
0.0,
)
]
output = docsearch.similarity_search_with_score(
"far", k=2, filter={"first_letter": ["==", "b"]}
)
assert output == [
(
Document(page_content="bar", metadata={"first_letter": "b", "id": ids[1]}),
1.0,
),
(
Document(page_content="baz", metadata={"first_letter": "b", "id": ids[2]}),
4.0,
),
]
@pytest.mark.requires("vdms")
@pytest.mark.enable_socket
def test_mmr(vdms_client: vdms.vdms) -> None:
"""Test end to end construction and search."""
collection_name = "test_mmr"
texts = ["foo", "bar", "baz"]
ids = [f"test_mmr_{i}" for i in range(len(texts))]
docsearch = VDMS.from_texts(
texts=texts,
ids=ids,
embedding=embedding_function,
collection_name=collection_name,
client=vdms_client,
)
output = docsearch.max_marginal_relevance_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"id": ids[0]})]
@pytest.mark.requires("vdms")
@pytest.mark.enable_socket
def test_mmr_by_vector(vdms_client: vdms.vdms) -> None:
"""Test end to end construction and search."""
collection_name = "test_mmr_by_vector"
texts = ["foo", "bar", "baz"]
ids = [f"test_mmr_by_vector_{i}" for i in range(len(texts))]
docsearch = VDMS.from_texts(
texts=texts,
ids=ids,
embedding=embedding_function,
collection_name=collection_name,
client=vdms_client,
)
embedded_query = embedding_function.embed_query("foo")
output = docsearch.max_marginal_relevance_search_by_vector(embedded_query, k=1)
assert output == [Document(page_content="foo", metadata={"id": ids[0]})]
@pytest.mark.requires("vdms")
@pytest.mark.enable_socket
def test_with_include_parameter(vdms_client: vdms.vdms) -> None:
"""Test end to end construction and include parameter."""
collection_name = "test_with_include_parameter"
texts = ["foo", "bar", "baz"]
docsearch = VDMS.from_texts(
texts=texts,
embedding=embedding_function,
collection_name=collection_name,
client=vdms_client,
)
response, response_array = docsearch.get(collection_name, include=["embeddings"])
for emb in embedding_function.embed_documents(texts):
assert embedding2bytes(emb) in response_array
response, response_array = docsearch.get(collection_name)
assert response_array == []
@pytest.mark.requires("vdms")
@pytest.mark.enable_socket
def test_update_document(vdms_client: vdms.vdms) -> None:
"""Test the update_document function in the VDMS class."""
collection_name = "test_update_document"
# Make a consistent embedding
const_embedding_function = ConsistentFakeEmbeddings()
# Initial document content and id
initial_content = "foo"
document_id = "doc1"
# Create an instance of Document with initial content and metadata
original_doc = Document(page_content=initial_content, metadata={"page": "1"})
# Initialize a VDMS instance with the original document
docsearch = VDMS.from_documents(
client=vdms_client,
collection_name=collection_name,
documents=[original_doc],
embedding=const_embedding_function,
ids=[document_id],
)
old_response, old_embedding = docsearch.get(
collection_name,
constraints={"id": ["==", document_id]},
include=["metadata", "embeddings"],
)
# old_embedding = response_array[0]
# Define updated content for the document
updated_content = "updated foo"
# Create a new Document instance with the updated content and the same id
updated_doc = Document(page_content=updated_content, metadata={"page": "1"})
# Update the document in the VDMS instance
docsearch.update_document(
collection_name, document_id=document_id, document=updated_doc
)
# Perform a similarity search with the updated content
output = docsearch.similarity_search(updated_content, k=3)[0]
# Assert that the updated document is returned by the search
assert output == Document(
page_content=updated_content, metadata={"page": "1", "id": document_id}
)
# Assert that the new embedding is correct
new_response, new_embedding = docsearch.get(
collection_name,
constraints={"id": ["==", document_id]},
include=["metadata", "embeddings"],
)
# new_embedding = response_array[0]
assert new_embedding[0] == embedding2bytes(
const_embedding_function.embed_documents([updated_content])[0]
)
assert new_embedding != old_embedding
assert (
new_response[0]["FindDescriptor"]["entities"][0]["content"]
!= old_response[0]["FindDescriptor"]["entities"][0]["content"]
)
@pytest.mark.requires("vdms")
@pytest.mark.enable_socket
def test_with_relevance_score(vdms_client: vdms.vdms) -> None:
"""Test to make sure the relevance score is scaled to 0-1."""
collection_name = "test_with_relevance_score"
texts = ["foo", "bar", "baz"]
ids = [f"test_relevance_scores_{i}" for i in range(len(texts))]
metadatas = [{"page": str(i)} for i in range(1, len(texts) + 1)]
docsearch = VDMS.from_texts(
texts=texts,
ids=ids,
embedding=embedding_function,
metadatas=metadatas,
collection_name=collection_name,
client=vdms_client,
)
output = docsearch._similarity_search_with_relevance_scores("foo", k=3)
assert output == [
(Document(page_content="foo", metadata={"page": "1", "id": ids[0]}), 0.0),
(Document(page_content="bar", metadata={"page": "2", "id": ids[1]}), 0.25),
(Document(page_content="baz", metadata={"page": "3", "id": ids[2]}), 1.0),
]
@pytest.mark.requires("vdms")
@pytest.mark.enable_socket
def test_add_documents_no_metadata(vdms_client: vdms.vdms) -> None:
collection_name = "test_add_documents_no_metadata"
db = VDMS( # type: ignore[call-arg]
collection_name=collection_name,
embedding=embedding_function,
client=vdms_client,
)
db.add_documents([Document(page_content="foo")])
@pytest.mark.requires("vdms")
@pytest.mark.enable_socket
def test_add_documents_mixed_metadata(vdms_client: vdms.vdms) -> None:
collection_name = "test_add_documents_mixed_metadata"
db = VDMS( # type: ignore[call-arg]
collection_name=collection_name,
embedding=embedding_function,
client=vdms_client,
)
docs = [
Document(page_content="foo"),
Document(page_content="bar", metadata={"baz": 1}),
]
ids = ["10", "11"]
actual_ids = db.add_documents(docs, ids=ids)
assert actual_ids == ids
search = db.similarity_search("foo bar", k=2)
docs[0].metadata = {"id": ids[0]}
docs[1].metadata["id"] = ids[1]
assert sorted(search, key=lambda d: d.page_content) == sorted(
docs, key=lambda d: d.page_content
)
|
0 | lc_public_repos/langchain/libs/community/tests/integration_tests | lc_public_repos/langchain/libs/community/tests/integration_tests/vectorstores/__init__.py | """Test vectorstores."""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.